@camstack/addon-vision 0.1.6 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. package/dist/addons/animal-classifier/index.d.mts +30 -0
  2. package/dist/addons/animal-classifier/index.d.ts +30 -0
  3. package/dist/addons/animal-classifier/index.js +822 -999
  4. package/dist/addons/animal-classifier/index.js.map +1 -1
  5. package/dist/addons/animal-classifier/index.mjs +7 -242
  6. package/dist/addons/animal-classifier/index.mjs.map +1 -1
  7. package/dist/addons/audio-classification/index.d.mts +36 -0
  8. package/dist/addons/audio-classification/index.d.ts +36 -0
  9. package/dist/addons/audio-classification/index.js +378 -501
  10. package/dist/addons/audio-classification/index.js.map +1 -1
  11. package/dist/addons/audio-classification/index.mjs +4 -224
  12. package/dist/addons/audio-classification/index.mjs.map +1 -1
  13. package/dist/addons/bird-global-classifier/index.d.mts +31 -0
  14. package/dist/addons/bird-global-classifier/index.d.ts +31 -0
  15. package/dist/addons/bird-global-classifier/index.js +825 -1002
  16. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  17. package/dist/addons/bird-global-classifier/index.mjs +7 -248
  18. package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
  19. package/dist/addons/bird-nabirds-classifier/index.d.mts +33 -0
  20. package/dist/addons/bird-nabirds-classifier/index.d.ts +33 -0
  21. package/dist/addons/bird-nabirds-classifier/index.js +825 -1002
  22. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  23. package/dist/addons/bird-nabirds-classifier/index.mjs +7 -289
  24. package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
  25. package/dist/addons/face-detection/index.d.mts +29 -0
  26. package/dist/addons/face-detection/index.d.ts +29 -0
  27. package/dist/addons/face-detection/index.js +934 -1196
  28. package/dist/addons/face-detection/index.js.map +1 -1
  29. package/dist/addons/face-detection/index.mjs +7 -227
  30. package/dist/addons/face-detection/index.mjs.map +1 -1
  31. package/dist/addons/face-recognition/index.d.mts +29 -0
  32. package/dist/addons/face-recognition/index.d.ts +29 -0
  33. package/dist/addons/face-recognition/index.js +807 -1003
  34. package/dist/addons/face-recognition/index.js.map +1 -1
  35. package/dist/addons/face-recognition/index.mjs +6 -197
  36. package/dist/addons/face-recognition/index.mjs.map +1 -1
  37. package/dist/addons/motion-detection/index.d.mts +28 -0
  38. package/dist/addons/motion-detection/index.d.ts +28 -0
  39. package/dist/addons/motion-detection/index.js +111 -214
  40. package/dist/addons/motion-detection/index.js.map +1 -1
  41. package/dist/addons/motion-detection/index.mjs +9 -12
  42. package/dist/addons/motion-detection/index.mjs.map +1 -1
  43. package/dist/addons/object-detection/index.d.mts +31 -0
  44. package/dist/addons/object-detection/index.d.ts +31 -0
  45. package/dist/addons/object-detection/index.js +1082 -1287
  46. package/dist/addons/object-detection/index.js.map +1 -1
  47. package/dist/addons/object-detection/index.mjs +7 -373
  48. package/dist/addons/object-detection/index.mjs.map +1 -1
  49. package/dist/addons/plate-detection/index.d.mts +30 -0
  50. package/dist/addons/plate-detection/index.d.ts +30 -0
  51. package/dist/addons/plate-detection/index.js +868 -1075
  52. package/dist/addons/plate-detection/index.js.map +1 -1
  53. package/dist/addons/plate-detection/index.mjs +7 -230
  54. package/dist/addons/plate-detection/index.mjs.map +1 -1
  55. package/dist/addons/plate-recognition/index.d.mts +31 -0
  56. package/dist/addons/plate-recognition/index.d.ts +31 -0
  57. package/dist/addons/plate-recognition/index.js +505 -684
  58. package/dist/addons/plate-recognition/index.js.map +1 -1
  59. package/dist/addons/plate-recognition/index.mjs +5 -244
  60. package/dist/addons/plate-recognition/index.mjs.map +1 -1
  61. package/dist/addons/segmentation-refiner/index.d.mts +30 -0
  62. package/dist/addons/segmentation-refiner/index.d.ts +30 -0
  63. package/dist/addons/segmentation-refiner/index.js +790 -967
  64. package/dist/addons/segmentation-refiner/index.js.map +1 -1
  65. package/dist/addons/segmentation-refiner/index.mjs +17 -21
  66. package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
  67. package/dist/addons/vehicle-classifier/index.d.mts +31 -0
  68. package/dist/addons/vehicle-classifier/index.d.ts +31 -0
  69. package/dist/addons/vehicle-classifier/index.js +410 -581
  70. package/dist/addons/vehicle-classifier/index.js.map +1 -1
  71. package/dist/addons/vehicle-classifier/index.mjs +16 -20
  72. package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
  73. package/dist/chunk-22BHCDT5.mjs +101 -0
  74. package/dist/{chunk-WG66JYYW.mjs.map → chunk-22BHCDT5.mjs.map} +1 -1
  75. package/dist/chunk-2IOKI4ES.mjs +335 -0
  76. package/dist/{chunk-PIFS7AIT.mjs.map → chunk-2IOKI4ES.mjs.map} +1 -1
  77. package/dist/chunk-7DYHXUPZ.mjs +36 -0
  78. package/dist/{chunk-BS4DKYGN.mjs.map → chunk-7DYHXUPZ.mjs.map} +1 -1
  79. package/dist/chunk-BJTO5JO5.mjs +11 -0
  80. package/dist/chunk-BP7H4NFS.mjs +412 -0
  81. package/dist/{chunk-MGT6RUVX.mjs.map → chunk-BP7H4NFS.mjs.map} +1 -1
  82. package/dist/chunk-BR2FPGOX.mjs +98 -0
  83. package/dist/{chunk-YYDM6V2F.mjs.map → chunk-BR2FPGOX.mjs.map} +1 -1
  84. package/dist/chunk-D6WEHN33.mjs +276 -0
  85. package/dist/chunk-D6WEHN33.mjs.map +1 -0
  86. package/dist/chunk-DRYFGARD.mjs +289 -0
  87. package/dist/chunk-DRYFGARD.mjs.map +1 -0
  88. package/dist/chunk-DUN6XU3N.mjs +72 -0
  89. package/dist/{chunk-XD7WGXHZ.mjs.map → chunk-DUN6XU3N.mjs.map} +1 -1
  90. package/dist/chunk-ESLHNWWE.mjs +387 -0
  91. package/dist/chunk-ESLHNWWE.mjs.map +1 -0
  92. package/dist/chunk-JUQEW6ON.mjs +256 -0
  93. package/dist/chunk-JUQEW6ON.mjs.map +1 -0
  94. package/dist/chunk-KUO2BVFY.mjs +90 -0
  95. package/dist/{chunk-DE7I3VHO.mjs.map → chunk-KUO2BVFY.mjs.map} +1 -1
  96. package/dist/chunk-R5J3WAUI.mjs +645 -0
  97. package/dist/chunk-R5J3WAUI.mjs.map +1 -0
  98. package/dist/chunk-XZ6ZMXXU.mjs +39 -0
  99. package/dist/{chunk-K36R6HWY.mjs.map → chunk-XZ6ZMXXU.mjs.map} +1 -1
  100. package/dist/chunk-YPU4WTXZ.mjs +269 -0
  101. package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
  102. package/dist/chunk-YUCD2TFH.mjs +242 -0
  103. package/dist/chunk-YUCD2TFH.mjs.map +1 -0
  104. package/dist/chunk-ZTJENCFC.mjs +379 -0
  105. package/dist/chunk-ZTJENCFC.mjs.map +1 -0
  106. package/dist/chunk-ZWYXXCXP.mjs +248 -0
  107. package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
  108. package/dist/index.d.mts +183 -0
  109. package/dist/index.d.ts +183 -0
  110. package/dist/index.js +3930 -4449
  111. package/dist/index.js.map +1 -1
  112. package/dist/index.mjs +250 -2698
  113. package/dist/index.mjs.map +1 -1
  114. package/package.json +5 -5
  115. package/dist/chunk-2YMA6QOV.mjs +0 -193
  116. package/dist/chunk-2YMA6QOV.mjs.map +0 -1
  117. package/dist/chunk-3IIFBJCD.mjs +0 -45
  118. package/dist/chunk-BS4DKYGN.mjs +0 -48
  119. package/dist/chunk-DE7I3VHO.mjs +0 -106
  120. package/dist/chunk-F6D2OZ36.mjs +0 -89
  121. package/dist/chunk-F6D2OZ36.mjs.map +0 -1
  122. package/dist/chunk-GAOIFQDX.mjs +0 -59
  123. package/dist/chunk-GAOIFQDX.mjs.map +0 -1
  124. package/dist/chunk-HUIX2XVR.mjs +0 -159
  125. package/dist/chunk-HUIX2XVR.mjs.map +0 -1
  126. package/dist/chunk-K36R6HWY.mjs +0 -51
  127. package/dist/chunk-MBTAI3WE.mjs +0 -78
  128. package/dist/chunk-MBTAI3WE.mjs.map +0 -1
  129. package/dist/chunk-MGT6RUVX.mjs +0 -423
  130. package/dist/chunk-PIFS7AIT.mjs +0 -446
  131. package/dist/chunk-WG66JYYW.mjs +0 -116
  132. package/dist/chunk-XD7WGXHZ.mjs +0 -82
  133. package/dist/chunk-YYDM6V2F.mjs +0 -113
  134. package/dist/chunk-ZK7P3TZN.mjs +0 -286
  135. package/dist/chunk-ZK7P3TZN.mjs.map +0 -1
  136. /package/dist/{chunk-3IIFBJCD.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
@@ -5,9 +5,6 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
6
  var __getProtoOf = Object.getPrototypeOf;
7
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __commonJS = (cb, mod) => function __require() {
9
- return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
10
- };
11
8
  var __export = (target, all) => {
12
9
  for (var name in all)
13
10
  __defProp(target, name, { get: all[name], enumerable: true });
@@ -30,1009 +27,835 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
30
27
  ));
31
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
32
29
 
33
- // src/catalogs/object-detection-models.js
34
- var require_object_detection_models = __commonJS({
35
- "src/catalogs/object-detection-models.js"(exports2) {
36
- "use strict";
37
- Object.defineProperty(exports2, "__esModule", { value: true });
38
- exports2.OBJECT_DETECTION_MODELS = exports2.MLPACKAGE_FILES = void 0;
39
- var types_1 = require("@camstack/types");
40
- var HF_REPO = "camstack/camstack-models";
41
- exports2.MLPACKAGE_FILES = [
42
- "Manifest.json",
43
- "Data/com.apple.CoreML/model.mlmodel",
44
- "Data/com.apple.CoreML/weights/weight.bin"
45
- ];
46
- exports2.OBJECT_DETECTION_MODELS = [
47
- // ── YOLOv8 ──────────────────────────────────────────────────────
48
- {
49
- id: "yolov8n",
50
- name: "YOLOv8 Nano",
51
- description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
52
- inputSize: { width: 640, height: 640 },
53
- labels: types_1.COCO_80_LABELS,
54
- formats: {
55
- onnx: {
56
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
57
- sizeMB: 12
58
- },
59
- coreml: {
60
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
61
- sizeMB: 6,
62
- isDirectory: true,
63
- files: exports2.MLPACKAGE_FILES,
64
- runtimes: ["python"]
65
- },
66
- openvino: {
67
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
68
- sizeMB: 7,
69
- runtimes: ["python"]
70
- },
71
- tflite: {
72
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
73
- sizeMB: 12,
74
- runtimes: ["python"]
75
- }
76
- }
30
+ // src/addons/segmentation-refiner/index.ts
31
+ var segmentation_refiner_exports = {};
32
+ __export(segmentation_refiner_exports, {
33
+ default: () => SegmentationRefinerAddon
34
+ });
35
+ module.exports = __toCommonJS(segmentation_refiner_exports);
36
+
37
+ // src/catalogs/segmentation-refiner-models.ts
38
+ var import_types2 = require("@camstack/types");
39
+
40
+ // src/catalogs/object-detection-models.ts
41
+ var import_types = require("@camstack/types");
42
+ var HF_REPO = "camstack/camstack-models";
43
+ var MLPACKAGE_FILES = [
44
+ "Manifest.json",
45
+ "Data/com.apple.CoreML/model.mlmodel",
46
+ "Data/com.apple.CoreML/weights/weight.bin"
47
+ ];
48
+ var OBJECT_DETECTION_MODELS = [
49
+ // ── YOLOv8 ──────────────────────────────────────────────────────
50
+ {
51
+ id: "yolov8n",
52
+ name: "YOLOv8 Nano",
53
+ description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
54
+ inputSize: { width: 640, height: 640 },
55
+ labels: import_types.COCO_80_LABELS,
56
+ formats: {
57
+ onnx: {
58
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
59
+ sizeMB: 12
77
60
  },
78
- {
79
- id: "yolov8s",
80
- name: "YOLOv8 Small",
81
- description: "YOLOv8 Small \u2014 balanced speed and accuracy",
82
- inputSize: { width: 640, height: 640 },
83
- labels: types_1.COCO_80_LABELS,
84
- formats: {
85
- onnx: {
86
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
87
- sizeMB: 43
88
- },
89
- coreml: {
90
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
91
- sizeMB: 21,
92
- isDirectory: true,
93
- files: exports2.MLPACKAGE_FILES,
94
- runtimes: ["python"]
95
- },
96
- openvino: {
97
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
98
- sizeMB: 22,
99
- runtimes: ["python"]
100
- },
101
- tflite: {
102
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
103
- sizeMB: 43,
104
- runtimes: ["python"]
105
- }
106
- }
61
+ coreml: {
62
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
63
+ sizeMB: 6,
64
+ isDirectory: true,
65
+ files: MLPACKAGE_FILES,
66
+ runtimes: ["python"]
107
67
  },
108
- {
109
- id: "yolov8s-relu",
110
- name: "YOLOv8 Small ReLU",
111
- description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
112
- inputSize: { width: 640, height: 640 },
113
- labels: types_1.COCO_80_LABELS,
114
- formats: {
115
- onnx: {
116
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
117
- sizeMB: 43
118
- }
119
- }
68
+ openvino: {
69
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
70
+ sizeMB: 7,
71
+ runtimes: ["python"]
120
72
  },
121
- {
122
- id: "yolov8m",
123
- name: "YOLOv8 Medium",
124
- description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
125
- inputSize: { width: 640, height: 640 },
126
- labels: types_1.COCO_80_LABELS,
127
- formats: {
128
- onnx: {
129
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
130
- sizeMB: 99
131
- },
132
- coreml: {
133
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
134
- sizeMB: 49,
135
- isDirectory: true,
136
- files: exports2.MLPACKAGE_FILES,
137
- runtimes: ["python"]
138
- },
139
- openvino: {
140
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
141
- sizeMB: 50,
142
- runtimes: ["python"]
143
- },
144
- tflite: {
145
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
146
- sizeMB: 99,
147
- runtimes: ["python"]
148
- }
149
- }
73
+ tflite: {
74
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
75
+ sizeMB: 12,
76
+ runtimes: ["python"]
77
+ }
78
+ }
79
+ },
80
+ {
81
+ id: "yolov8s",
82
+ name: "YOLOv8 Small",
83
+ description: "YOLOv8 Small \u2014 balanced speed and accuracy",
84
+ inputSize: { width: 640, height: 640 },
85
+ labels: import_types.COCO_80_LABELS,
86
+ formats: {
87
+ onnx: {
88
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
89
+ sizeMB: 43
150
90
  },
151
- {
152
- id: "yolov8l",
153
- name: "YOLOv8 Large",
154
- description: "YOLOv8 Large \u2014 high-accuracy large model",
155
- inputSize: { width: 640, height: 640 },
156
- labels: types_1.COCO_80_LABELS,
157
- formats: {
158
- onnx: {
159
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
160
- sizeMB: 167
161
- },
162
- coreml: {
163
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
164
- sizeMB: 83,
165
- isDirectory: true,
166
- files: exports2.MLPACKAGE_FILES,
167
- runtimes: ["python"]
168
- },
169
- openvino: {
170
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
171
- sizeMB: 84,
172
- runtimes: ["python"]
173
- }
174
- }
91
+ coreml: {
92
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
93
+ sizeMB: 21,
94
+ isDirectory: true,
95
+ files: MLPACKAGE_FILES,
96
+ runtimes: ["python"]
175
97
  },
176
- {
177
- id: "yolov8x",
178
- name: "YOLOv8 Extra-Large",
179
- description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
180
- inputSize: { width: 640, height: 640 },
181
- labels: types_1.COCO_80_LABELS,
182
- formats: {
183
- onnx: {
184
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
185
- sizeMB: 260
186
- },
187
- coreml: {
188
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
189
- sizeMB: 130,
190
- isDirectory: true,
191
- files: exports2.MLPACKAGE_FILES,
192
- runtimes: ["python"]
193
- },
194
- openvino: {
195
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
196
- sizeMB: 131,
197
- runtimes: ["python"]
198
- }
199
- }
98
+ openvino: {
99
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
100
+ sizeMB: 22,
101
+ runtimes: ["python"]
200
102
  },
201
- // ── YOLOv9 ──────────────────────────────────────────────────────
202
- {
203
- id: "yolov9t",
204
- name: "YOLOv9 Tiny",
205
- description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
206
- inputSize: { width: 640, height: 640 },
207
- labels: types_1.COCO_80_LABELS,
208
- formats: {
209
- onnx: {
210
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
211
- sizeMB: 8
212
- },
213
- coreml: {
214
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
215
- sizeMB: 4,
216
- isDirectory: true,
217
- files: exports2.MLPACKAGE_FILES,
218
- runtimes: ["python"]
219
- },
220
- openvino: {
221
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
222
- sizeMB: 6,
223
- runtimes: ["python"]
224
- },
225
- tflite: {
226
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
227
- sizeMB: 8,
228
- runtimes: ["python"]
229
- }
230
- }
103
+ tflite: {
104
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
105
+ sizeMB: 43,
106
+ runtimes: ["python"]
107
+ }
108
+ }
109
+ },
110
+ {
111
+ id: "yolov8s-relu",
112
+ name: "YOLOv8 Small ReLU",
113
+ description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
114
+ inputSize: { width: 640, height: 640 },
115
+ labels: import_types.COCO_80_LABELS,
116
+ formats: {
117
+ onnx: {
118
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
119
+ sizeMB: 43
120
+ }
121
+ }
122
+ },
123
+ {
124
+ id: "yolov8m",
125
+ name: "YOLOv8 Medium",
126
+ description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
127
+ inputSize: { width: 640, height: 640 },
128
+ labels: import_types.COCO_80_LABELS,
129
+ formats: {
130
+ onnx: {
131
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
132
+ sizeMB: 99
231
133
  },
232
- {
233
- id: "yolov9s",
234
- name: "YOLOv9 Small",
235
- description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
236
- inputSize: { width: 640, height: 640 },
237
- labels: types_1.COCO_80_LABELS,
238
- formats: {
239
- onnx: {
240
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
241
- sizeMB: 28
242
- },
243
- coreml: {
244
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
245
- sizeMB: 14,
246
- isDirectory: true,
247
- files: exports2.MLPACKAGE_FILES,
248
- runtimes: ["python"]
249
- },
250
- openvino: {
251
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
252
- sizeMB: 16,
253
- runtimes: ["python"]
254
- },
255
- tflite: {
256
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
257
- sizeMB: 28,
258
- runtimes: ["python"]
259
- }
260
- }
134
+ coreml: {
135
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
136
+ sizeMB: 49,
137
+ isDirectory: true,
138
+ files: MLPACKAGE_FILES,
139
+ runtimes: ["python"]
261
140
  },
262
- {
263
- id: "yolov9c",
264
- name: "YOLOv9 C",
265
- description: "YOLOv9 C \u2014 high-accuracy compact model",
266
- inputSize: { width: 640, height: 640 },
267
- labels: types_1.COCO_80_LABELS,
268
- formats: {
269
- onnx: {
270
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
271
- sizeMB: 97
272
- },
273
- coreml: {
274
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
275
- sizeMB: 48,
276
- isDirectory: true,
277
- files: exports2.MLPACKAGE_FILES,
278
- runtimes: ["python"]
279
- },
280
- openvino: {
281
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
282
- sizeMB: 49,
283
- runtimes: ["python"]
284
- },
285
- tflite: {
286
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
287
- sizeMB: 97,
288
- runtimes: ["python"]
289
- }
290
- }
141
+ openvino: {
142
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
143
+ sizeMB: 50,
144
+ runtimes: ["python"]
291
145
  },
292
- // ── YOLO11 ────────────────────────────────────────────────────
293
- {
294
- id: "yolo11n",
295
- name: "YOLO11 Nano",
296
- description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
297
- inputSize: { width: 640, height: 640 },
298
- labels: types_1.COCO_80_LABELS,
299
- formats: {
300
- onnx: {
301
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
302
- sizeMB: 10
303
- },
304
- coreml: {
305
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
306
- sizeMB: 5,
307
- isDirectory: true,
308
- files: exports2.MLPACKAGE_FILES,
309
- runtimes: ["python"]
310
- },
311
- openvino: {
312
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
313
- sizeMB: 5,
314
- runtimes: ["python"]
315
- },
316
- tflite: {
317
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
318
- sizeMB: 10,
319
- runtimes: ["python"]
320
- }
321
- }
146
+ tflite: {
147
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
148
+ sizeMB: 99,
149
+ runtimes: ["python"]
150
+ }
151
+ }
152
+ },
153
+ {
154
+ id: "yolov8l",
155
+ name: "YOLOv8 Large",
156
+ description: "YOLOv8 Large \u2014 high-accuracy large model",
157
+ inputSize: { width: 640, height: 640 },
158
+ labels: import_types.COCO_80_LABELS,
159
+ formats: {
160
+ onnx: {
161
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
162
+ sizeMB: 167
322
163
  },
323
- {
324
- id: "yolo11s",
325
- name: "YOLO11 Small",
326
- description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
327
- inputSize: { width: 640, height: 640 },
328
- labels: types_1.COCO_80_LABELS,
329
- formats: {
330
- onnx: {
331
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
332
- sizeMB: 36
333
- },
334
- coreml: {
335
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
336
- sizeMB: 18,
337
- isDirectory: true,
338
- files: exports2.MLPACKAGE_FILES,
339
- runtimes: ["python"]
340
- },
341
- openvino: {
342
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
343
- sizeMB: 18,
344
- runtimes: ["python"]
345
- },
346
- tflite: {
347
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
348
- sizeMB: 36,
349
- runtimes: ["python"]
350
- }
351
- }
164
+ coreml: {
165
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
166
+ sizeMB: 83,
167
+ isDirectory: true,
168
+ files: MLPACKAGE_FILES,
169
+ runtimes: ["python"]
352
170
  },
353
- {
354
- id: "yolo11m",
355
- name: "YOLO11 Medium",
356
- description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
357
- inputSize: { width: 640, height: 640 },
358
- labels: types_1.COCO_80_LABELS,
359
- formats: {
360
- onnx: {
361
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
362
- sizeMB: 77
363
- },
364
- coreml: {
365
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
366
- sizeMB: 39,
367
- isDirectory: true,
368
- files: exports2.MLPACKAGE_FILES,
369
- runtimes: ["python"]
370
- },
371
- openvino: {
372
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
373
- sizeMB: 39,
374
- runtimes: ["python"]
375
- },
376
- tflite: {
377
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
378
- sizeMB: 77,
379
- runtimes: ["python"]
380
- }
381
- }
171
+ openvino: {
172
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
173
+ sizeMB: 84,
174
+ runtimes: ["python"]
175
+ }
176
+ }
177
+ },
178
+ {
179
+ id: "yolov8x",
180
+ name: "YOLOv8 Extra-Large",
181
+ description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
182
+ inputSize: { width: 640, height: 640 },
183
+ labels: import_types.COCO_80_LABELS,
184
+ formats: {
185
+ onnx: {
186
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
187
+ sizeMB: 260
382
188
  },
383
- {
384
- id: "yolo11l",
385
- name: "YOLO11 Large",
386
- description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
387
- inputSize: { width: 640, height: 640 },
388
- labels: types_1.COCO_80_LABELS,
389
- formats: {
390
- onnx: {
391
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
392
- sizeMB: 97
393
- },
394
- coreml: {
395
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
396
- sizeMB: 49,
397
- isDirectory: true,
398
- files: exports2.MLPACKAGE_FILES,
399
- runtimes: ["python"]
400
- },
401
- openvino: {
402
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
403
- sizeMB: 49,
404
- runtimes: ["python"]
405
- },
406
- tflite: {
407
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
408
- sizeMB: 97,
409
- runtimes: ["python"]
410
- }
411
- }
189
+ coreml: {
190
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
191
+ sizeMB: 130,
192
+ isDirectory: true,
193
+ files: MLPACKAGE_FILES,
194
+ runtimes: ["python"]
412
195
  },
413
- {
414
- id: "yolo11x",
415
- name: "YOLO11 Extra-Large",
416
- description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
417
- inputSize: { width: 640, height: 640 },
418
- labels: types_1.COCO_80_LABELS,
419
- formats: {
420
- onnx: {
421
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
422
- sizeMB: 218
423
- },
424
- coreml: {
425
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
426
- sizeMB: 109,
427
- isDirectory: true,
428
- files: exports2.MLPACKAGE_FILES,
429
- runtimes: ["python"]
430
- },
431
- openvino: {
432
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
433
- sizeMB: 109,
434
- runtimes: ["python"]
435
- },
436
- tflite: {
437
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
438
- sizeMB: 218,
439
- runtimes: ["python"]
440
- }
441
- }
196
+ openvino: {
197
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
198
+ sizeMB: 131,
199
+ runtimes: ["python"]
442
200
  }
443
- ];
444
- }
445
- });
446
-
447
- // src/catalogs/segmentation-refiner-models.js
448
- var require_segmentation_refiner_models = __commonJS({
449
- "src/catalogs/segmentation-refiner-models.js"(exports2) {
450
- "use strict";
451
- Object.defineProperty(exports2, "__esModule", { value: true });
452
- exports2.SEGMENTATION_REFINER_MODELS = void 0;
453
- var types_1 = require("@camstack/types");
454
- var object_detection_models_js_1 = require_object_detection_models();
455
- var HF_REPO = "camstack/camstack-models";
456
- var hf = (path) => (0, types_1.hfModelUrl)(HF_REPO, path);
457
- var MASK_LABELS2 = [
458
- { id: "mask", name: "Segmentation Mask" }
459
- ];
460
- exports2.SEGMENTATION_REFINER_MODELS = [
461
- {
462
- id: "u2netp",
463
- name: "U2-Net Portable",
464
- description: "U2-Net-P \u2014 ultra-lightweight salient object segmentation (4.7 MB), no prompt needed",
465
- inputSize: { width: 320, height: 320 },
466
- labels: MASK_LABELS2,
467
- formats: {
468
- onnx: { url: hf("segmentationRefiner/u2netp/onnx/camstack-u2netp.onnx"), sizeMB: 5 },
469
- coreml: {
470
- url: hf("segmentationRefiner/u2netp/coreml/camstack-u2netp.mlpackage"),
471
- sizeMB: 3,
472
- isDirectory: true,
473
- files: object_detection_models_js_1.MLPACKAGE_FILES,
474
- runtimes: ["python"]
475
- }
476
- // OpenVINO: not yet converted
477
- }
478
- }
479
- ];
480
- }
481
- });
482
-
483
- // src/shared/image-utils.js
484
- var require_image_utils = __commonJS({
485
- "src/shared/image-utils.js"(exports2) {
486
- "use strict";
487
- var __importDefault = exports2 && exports2.__importDefault || function(mod) {
488
- return mod && mod.__esModule ? mod : { "default": mod };
489
- };
490
- Object.defineProperty(exports2, "__esModule", { value: true });
491
- exports2.jpegToRgb = jpegToRgb;
492
- exports2.cropRegion = cropRegion2;
493
- exports2.letterbox = letterbox;
494
- exports2.resizeAndNormalize = resizeAndNormalize2;
495
- exports2.rgbToGrayscale = rgbToGrayscale;
496
- var sharp_1 = __importDefault(require("sharp"));
497
- async function jpegToRgb(jpeg) {
498
- const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
499
- return { data, width: info.width, height: info.height };
500
- }
501
- async function cropRegion2(jpeg, roi) {
502
- return (0, sharp_1.default)(jpeg).extract({
503
- left: Math.round(roi.x),
504
- top: Math.round(roi.y),
505
- width: Math.round(roi.w),
506
- height: Math.round(roi.h)
507
- }).jpeg().toBuffer();
508
201
  }
509
- async function letterbox(jpeg, targetSize) {
510
- const meta = await (0, sharp_1.default)(jpeg).metadata();
511
- const originalWidth = meta.width ?? 0;
512
- const originalHeight = meta.height ?? 0;
513
- const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
514
- const scaledWidth = Math.round(originalWidth * scale);
515
- const scaledHeight = Math.round(originalHeight * scale);
516
- const padX = Math.floor((targetSize - scaledWidth) / 2);
517
- const padY = Math.floor((targetSize - scaledHeight) / 2);
518
- const { data } = await (0, sharp_1.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
519
- top: padY,
520
- bottom: targetSize - scaledHeight - padY,
521
- left: padX,
522
- right: targetSize - scaledWidth - padX,
523
- background: { r: 114, g: 114, b: 114 }
524
- }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
525
- const numPixels = targetSize * targetSize;
526
- const float32 = new Float32Array(3 * numPixels);
527
- for (let i = 0; i < numPixels; i++) {
528
- const srcBase = i * 3;
529
- float32[0 * numPixels + i] = data[srcBase] / 255;
530
- float32[1 * numPixels + i] = data[srcBase + 1] / 255;
531
- float32[2 * numPixels + i] = data[srcBase + 2] / 255;
202
+ },
203
+ // ── YOLOv9 ──────────────────────────────────────────────────────
204
+ {
205
+ id: "yolov9t",
206
+ name: "YOLOv9 Tiny",
207
+ description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
208
+ inputSize: { width: 640, height: 640 },
209
+ labels: import_types.COCO_80_LABELS,
210
+ formats: {
211
+ onnx: {
212
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
213
+ sizeMB: 8
214
+ },
215
+ coreml: {
216
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
217
+ sizeMB: 4,
218
+ isDirectory: true,
219
+ files: MLPACKAGE_FILES,
220
+ runtimes: ["python"]
221
+ },
222
+ openvino: {
223
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
224
+ sizeMB: 6,
225
+ runtimes: ["python"]
226
+ },
227
+ tflite: {
228
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
229
+ sizeMB: 8,
230
+ runtimes: ["python"]
532
231
  }
533
- return { data: float32, scale, padX, padY, originalWidth, originalHeight };
534
232
  }
535
- async function resizeAndNormalize2(jpeg, targetWidth, targetHeight, normalization, layout) {
536
- const { data } = await (0, sharp_1.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
537
- const numPixels = targetWidth * targetHeight;
538
- const float32 = new Float32Array(3 * numPixels);
539
- const mean = [0.485, 0.456, 0.406];
540
- const std = [0.229, 0.224, 0.225];
541
- if (layout === "nchw") {
542
- for (let i = 0; i < numPixels; i++) {
543
- const srcBase = i * 3;
544
- for (let c = 0; c < 3; c++) {
545
- const raw = data[srcBase + c] / 255;
546
- let val;
547
- if (normalization === "zero-one") {
548
- val = raw;
549
- } else if (normalization === "imagenet") {
550
- val = (raw - mean[c]) / std[c];
551
- } else {
552
- val = data[srcBase + c];
553
- }
554
- float32[c * numPixels + i] = val;
555
- }
556
- }
557
- } else {
558
- for (let i = 0; i < numPixels; i++) {
559
- const srcBase = i * 3;
560
- for (let c = 0; c < 3; c++) {
561
- const raw = data[srcBase + c] / 255;
562
- let val;
563
- if (normalization === "zero-one") {
564
- val = raw;
565
- } else if (normalization === "imagenet") {
566
- val = (raw - mean[c]) / std[c];
567
- } else {
568
- val = data[srcBase + c];
569
- }
570
- float32[i * 3 + c] = val;
571
- }
572
- }
233
+ },
234
+ {
235
+ id: "yolov9s",
236
+ name: "YOLOv9 Small",
237
+ description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
238
+ inputSize: { width: 640, height: 640 },
239
+ labels: import_types.COCO_80_LABELS,
240
+ formats: {
241
+ onnx: {
242
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
243
+ sizeMB: 28
244
+ },
245
+ coreml: {
246
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
247
+ sizeMB: 14,
248
+ isDirectory: true,
249
+ files: MLPACKAGE_FILES,
250
+ runtimes: ["python"]
251
+ },
252
+ openvino: {
253
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
254
+ sizeMB: 16,
255
+ runtimes: ["python"]
256
+ },
257
+ tflite: {
258
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
259
+ sizeMB: 28,
260
+ runtimes: ["python"]
573
261
  }
574
- return float32;
575
262
  }
576
- function rgbToGrayscale(rgb, width, height) {
577
- const numPixels = width * height;
578
- const gray = new Uint8Array(numPixels);
579
- for (let i = 0; i < numPixels; i++) {
580
- const r = rgb[i * 3];
581
- const g = rgb[i * 3 + 1];
582
- const b = rgb[i * 3 + 2];
583
- gray[i] = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
263
+ },
264
+ {
265
+ id: "yolov9c",
266
+ name: "YOLOv9 C",
267
+ description: "YOLOv9 C \u2014 high-accuracy compact model",
268
+ inputSize: { width: 640, height: 640 },
269
+ labels: import_types.COCO_80_LABELS,
270
+ formats: {
271
+ onnx: {
272
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
273
+ sizeMB: 97
274
+ },
275
+ coreml: {
276
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
277
+ sizeMB: 48,
278
+ isDirectory: true,
279
+ files: MLPACKAGE_FILES,
280
+ runtimes: ["python"]
281
+ },
282
+ openvino: {
283
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
284
+ sizeMB: 49,
285
+ runtimes: ["python"]
286
+ },
287
+ tflite: {
288
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
289
+ sizeMB: 97,
290
+ runtimes: ["python"]
584
291
  }
585
- return gray;
586
292
  }
587
- }
588
- });
589
-
590
- // src/shared/node-engine.js
591
- var require_node_engine = __commonJS({
592
- "src/shared/node-engine.js"(exports2) {
593
- "use strict";
594
- var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
595
- if (k2 === void 0) k2 = k;
596
- var desc = Object.getOwnPropertyDescriptor(m, k);
597
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
598
- desc = { enumerable: true, get: function() {
599
- return m[k];
600
- } };
601
- }
602
- Object.defineProperty(o, k2, desc);
603
- }) : (function(o, m, k, k2) {
604
- if (k2 === void 0) k2 = k;
605
- o[k2] = m[k];
606
- }));
607
- var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
608
- Object.defineProperty(o, "default", { enumerable: true, value: v });
609
- }) : function(o, v) {
610
- o["default"] = v;
611
- });
612
- var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
613
- var ownKeys = function(o) {
614
- ownKeys = Object.getOwnPropertyNames || function(o2) {
615
- var ar = [];
616
- for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
617
- return ar;
618
- };
619
- return ownKeys(o);
620
- };
621
- return function(mod) {
622
- if (mod && mod.__esModule) return mod;
623
- var result = {};
624
- if (mod != null) {
625
- for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
626
- }
627
- __setModuleDefault(result, mod);
628
- return result;
629
- };
630
- })();
631
- Object.defineProperty(exports2, "__esModule", { value: true });
632
- exports2.NodeInferenceEngine = void 0;
633
- var path = __importStar(require("path"));
634
- var BACKEND_TO_PROVIDER = {
635
- cpu: "cpu",
636
- coreml: "coreml",
637
- cuda: "cuda",
638
- tensorrt: "tensorrt",
639
- dml: "dml"
640
- };
641
- var BACKEND_TO_DEVICE = {
642
- cpu: "cpu",
643
- coreml: "gpu-mps",
644
- cuda: "gpu-cuda",
645
- tensorrt: "tensorrt"
646
- };
647
- var NodeInferenceEngine = class {
648
- modelPath;
649
- backend;
650
- runtime = "onnx";
651
- device;
652
- session = null;
653
- constructor(modelPath, backend) {
654
- this.modelPath = modelPath;
655
- this.backend = backend;
656
- this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
293
+ },
294
+ // ── YOLO11 ────────────────────────────────────────────────────
295
+ {
296
+ id: "yolo11n",
297
+ name: "YOLO11 Nano",
298
+ description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
299
+ inputSize: { width: 640, height: 640 },
300
+ labels: import_types.COCO_80_LABELS,
301
+ formats: {
302
+ onnx: {
303
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
304
+ sizeMB: 10
305
+ },
306
+ coreml: {
307
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
308
+ sizeMB: 5,
309
+ isDirectory: true,
310
+ files: MLPACKAGE_FILES,
311
+ runtimes: ["python"]
312
+ },
313
+ openvino: {
314
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
315
+ sizeMB: 5,
316
+ runtimes: ["python"]
317
+ },
318
+ tflite: {
319
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
320
+ sizeMB: 10,
321
+ runtimes: ["python"]
657
322
  }
658
- async initialize() {
659
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
660
- const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
661
- const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
662
- const sessionOptions = {
663
- executionProviders: [provider]
664
- };
665
- this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
323
+ }
324
+ },
325
+ {
326
+ id: "yolo11s",
327
+ name: "YOLO11 Small",
328
+ description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
329
+ inputSize: { width: 640, height: 640 },
330
+ labels: import_types.COCO_80_LABELS,
331
+ formats: {
332
+ onnx: {
333
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
334
+ sizeMB: 36
335
+ },
336
+ coreml: {
337
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
338
+ sizeMB: 18,
339
+ isDirectory: true,
340
+ files: MLPACKAGE_FILES,
341
+ runtimes: ["python"]
342
+ },
343
+ openvino: {
344
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
345
+ sizeMB: 18,
346
+ runtimes: ["python"]
347
+ },
348
+ tflite: {
349
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
350
+ sizeMB: 36,
351
+ runtimes: ["python"]
666
352
  }
667
- async run(input, inputShape) {
668
- if (!this.session) {
669
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
670
- }
671
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
672
- const sess = this.session;
673
- const inputName = sess.inputNames[0];
674
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
675
- const feeds = { [inputName]: tensor };
676
- const results = await sess.run(feeds);
677
- const outputName = sess.outputNames[0];
678
- const outputTensor = results[outputName];
679
- return outputTensor.data;
353
+ }
354
+ },
355
+ {
356
+ id: "yolo11m",
357
+ name: "YOLO11 Medium",
358
+ description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
359
+ inputSize: { width: 640, height: 640 },
360
+ labels: import_types.COCO_80_LABELS,
361
+ formats: {
362
+ onnx: {
363
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
364
+ sizeMB: 77
365
+ },
366
+ coreml: {
367
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
368
+ sizeMB: 39,
369
+ isDirectory: true,
370
+ files: MLPACKAGE_FILES,
371
+ runtimes: ["python"]
372
+ },
373
+ openvino: {
374
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
375
+ sizeMB: 39,
376
+ runtimes: ["python"]
377
+ },
378
+ tflite: {
379
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
380
+ sizeMB: 77,
381
+ runtimes: ["python"]
680
382
  }
681
- async runMultiOutput(input, inputShape) {
682
- if (!this.session) {
683
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
684
- }
685
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
686
- const sess = this.session;
687
- const inputName = sess.inputNames[0];
688
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
689
- const feeds = { [inputName]: tensor };
690
- const results = await sess.run(feeds);
691
- const out = {};
692
- for (const name of sess.outputNames) {
693
- out[name] = results[name].data;
694
- }
695
- return out;
383
+ }
384
+ },
385
+ {
386
+ id: "yolo11l",
387
+ name: "YOLO11 Large",
388
+ description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
389
+ inputSize: { width: 640, height: 640 },
390
+ labels: import_types.COCO_80_LABELS,
391
+ formats: {
392
+ onnx: {
393
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
394
+ sizeMB: 97
395
+ },
396
+ coreml: {
397
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
398
+ sizeMB: 49,
399
+ isDirectory: true,
400
+ files: MLPACKAGE_FILES,
401
+ runtimes: ["python"]
402
+ },
403
+ openvino: {
404
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
405
+ sizeMB: 49,
406
+ runtimes: ["python"]
407
+ },
408
+ tflite: {
409
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
410
+ sizeMB: 97,
411
+ runtimes: ["python"]
696
412
  }
697
- async dispose() {
698
- this.session = null;
413
+ }
414
+ },
415
+ {
416
+ id: "yolo11x",
417
+ name: "YOLO11 Extra-Large",
418
+ description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
419
+ inputSize: { width: 640, height: 640 },
420
+ labels: import_types.COCO_80_LABELS,
421
+ formats: {
422
+ onnx: {
423
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
424
+ sizeMB: 218
425
+ },
426
+ coreml: {
427
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
428
+ sizeMB: 109,
429
+ isDirectory: true,
430
+ files: MLPACKAGE_FILES,
431
+ runtimes: ["python"]
432
+ },
433
+ openvino: {
434
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
435
+ sizeMB: 109,
436
+ runtimes: ["python"]
437
+ },
438
+ tflite: {
439
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
440
+ sizeMB: 218,
441
+ runtimes: ["python"]
699
442
  }
700
- };
701
- exports2.NodeInferenceEngine = NodeInferenceEngine;
443
+ }
702
444
  }
703
- });
445
+ ];
704
446
 
705
- // src/shared/python-engine.js
706
- var require_python_engine = __commonJS({
707
- "src/shared/python-engine.js"(exports2) {
708
- "use strict";
709
- Object.defineProperty(exports2, "__esModule", { value: true });
710
- exports2.PythonInferenceEngine = void 0;
711
- exports2.resolvePythonBinary = resolvePythonBinary;
712
- var node_child_process_1 = require("child_process");
713
- var PythonInferenceEngine = class {
714
- pythonPath;
715
- scriptPath;
716
- modelPath;
717
- extraArgs;
718
- runtime;
719
- device;
720
- process = null;
721
- receiveBuffer = Buffer.alloc(0);
722
- pendingResolve = null;
723
- pendingReject = null;
724
- constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
725
- this.pythonPath = pythonPath;
726
- this.scriptPath = scriptPath;
727
- this.modelPath = modelPath;
728
- this.extraArgs = extraArgs;
729
- this.runtime = runtime;
730
- const runtimeDeviceMap = {
731
- onnx: "cpu",
732
- coreml: "gpu-mps",
733
- pytorch: "cpu",
734
- openvino: "cpu",
735
- tflite: "cpu"
736
- };
737
- this.device = runtimeDeviceMap[runtime];
738
- }
739
- async initialize() {
740
- const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
741
- this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
742
- stdio: ["pipe", "pipe", "pipe"]
743
- });
744
- if (!this.process.stdout || !this.process.stdin) {
745
- throw new Error("PythonInferenceEngine: failed to create process pipes");
746
- }
747
- this.process.stderr?.on("data", (chunk) => {
748
- process.stderr.write(`[python-engine] ${chunk.toString()}`);
749
- });
750
- this.process.on("error", (err) => {
751
- this.pendingReject?.(err);
752
- this.pendingReject = null;
753
- this.pendingResolve = null;
754
- });
755
- this.process.on("exit", (code) => {
756
- if (code !== 0) {
757
- const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
758
- this.pendingReject?.(err);
759
- this.pendingReject = null;
760
- this.pendingResolve = null;
761
- }
762
- });
763
- this.process.stdout.on("data", (chunk) => {
764
- this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
765
- this._tryReceive();
766
- });
767
- await new Promise((resolve, reject) => {
768
- const timeout = setTimeout(() => resolve(), 2e3);
769
- this.process?.on("error", (err) => {
770
- clearTimeout(timeout);
771
- reject(err);
772
- });
773
- this.process?.on("exit", (code) => {
774
- clearTimeout(timeout);
775
- if (code !== 0) {
776
- reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
777
- }
778
- });
779
- });
780
- }
781
- _tryReceive() {
782
- if (this.receiveBuffer.length < 4)
783
- return;
784
- const length = this.receiveBuffer.readUInt32LE(0);
785
- if (this.receiveBuffer.length < 4 + length)
786
- return;
787
- const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
788
- this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
789
- const resolve = this.pendingResolve;
790
- const reject = this.pendingReject;
791
- this.pendingResolve = null;
792
- this.pendingReject = null;
793
- if (!resolve)
794
- return;
795
- try {
796
- const parsed = JSON.parse(jsonBytes.toString("utf8"));
797
- resolve(parsed);
798
- } catch (err) {
799
- reject?.(err instanceof Error ? err : new Error(String(err)));
800
- }
447
+ // src/catalogs/segmentation-refiner-models.ts
448
+ var HF_REPO2 = "camstack/camstack-models";
449
+ var hf = (path3) => (0, import_types2.hfModelUrl)(HF_REPO2, path3);
450
+ var MASK_LABELS = [
451
+ { id: "mask", name: "Segmentation Mask" }
452
+ ];
453
+ var SEGMENTATION_REFINER_MODELS = [
454
+ {
455
+ id: "u2netp",
456
+ name: "U2-Net Portable",
457
+ description: "U2-Net-P \u2014 ultra-lightweight salient object segmentation (4.7 MB), no prompt needed",
458
+ inputSize: { width: 320, height: 320 },
459
+ labels: MASK_LABELS,
460
+ formats: {
461
+ onnx: { url: hf("segmentationRefiner/u2netp/onnx/camstack-u2netp.onnx"), sizeMB: 5 },
462
+ coreml: {
463
+ url: hf("segmentationRefiner/u2netp/coreml/camstack-u2netp.mlpackage"),
464
+ sizeMB: 3,
465
+ isDirectory: true,
466
+ files: MLPACKAGE_FILES,
467
+ runtimes: ["python"]
801
468
  }
802
- /** Send JPEG buffer, receive JSON detection results */
803
- async runJpeg(jpeg) {
804
- if (!this.process?.stdin) {
805
- throw new Error("PythonInferenceEngine: process not initialized");
469
+ // OpenVINO: not yet converted
470
+ }
471
+ }
472
+ ];
473
+
474
+ // src/shared/image-utils.ts
475
+ var import_sharp = __toESM(require("sharp"));
476
+ async function cropRegion(jpeg, roi) {
477
+ return (0, import_sharp.default)(jpeg).extract({
478
+ left: Math.round(roi.x),
479
+ top: Math.round(roi.y),
480
+ width: Math.round(roi.w),
481
+ height: Math.round(roi.h)
482
+ }).jpeg().toBuffer();
483
+ }
484
+ async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
485
+ const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
486
+ const numPixels = targetWidth * targetHeight;
487
+ const float32 = new Float32Array(3 * numPixels);
488
+ const mean = [0.485, 0.456, 0.406];
489
+ const std = [0.229, 0.224, 0.225];
490
+ if (layout === "nchw") {
491
+ for (let i = 0; i < numPixels; i++) {
492
+ const srcBase = i * 3;
493
+ for (let c = 0; c < 3; c++) {
494
+ const raw = data[srcBase + c] / 255;
495
+ let val;
496
+ if (normalization === "zero-one") {
497
+ val = raw;
498
+ } else if (normalization === "imagenet") {
499
+ val = (raw - mean[c]) / std[c];
500
+ } else {
501
+ val = data[srcBase + c];
806
502
  }
807
- return new Promise((resolve, reject) => {
808
- this.pendingResolve = resolve;
809
- this.pendingReject = reject;
810
- const lengthBuf = Buffer.allocUnsafe(4);
811
- lengthBuf.writeUInt32LE(jpeg.length, 0);
812
- this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
813
- });
503
+ float32[c * numPixels + i] = val;
814
504
  }
815
- /** IInferenceEngine.run — wraps runJpeg for compatibility */
816
- async run(_input, _inputShape) {
817
- throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
818
- }
819
- /** IInferenceEngine.runMultiOutput not supported by Python engine (operates on JPEG input) */
820
- async runMultiOutput(_input, _inputShape) {
821
- throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
822
- }
823
- async dispose() {
824
- if (this.process) {
825
- this.process.stdin?.end();
826
- this.process.kill("SIGTERM");
827
- this.process = null;
505
+ }
506
+ } else {
507
+ for (let i = 0; i < numPixels; i++) {
508
+ const srcBase = i * 3;
509
+ for (let c = 0; c < 3; c++) {
510
+ const raw = data[srcBase + c] / 255;
511
+ let val;
512
+ if (normalization === "zero-one") {
513
+ val = raw;
514
+ } else if (normalization === "imagenet") {
515
+ val = (raw - mean[c]) / std[c];
516
+ } else {
517
+ val = data[srcBase + c];
828
518
  }
519
+ float32[i * 3 + c] = val;
829
520
  }
521
+ }
522
+ }
523
+ return float32;
524
+ }
525
+
526
+ // src/shared/engine-resolver.ts
527
+ var fs = __toESM(require("fs"));
528
+ var path2 = __toESM(require("path"));
529
+
530
+ // src/shared/node-engine.ts
531
+ var path = __toESM(require("path"));
532
+ var BACKEND_TO_PROVIDER = {
533
+ cpu: "cpu",
534
+ coreml: "coreml",
535
+ cuda: "cuda",
536
+ tensorrt: "tensorrt",
537
+ dml: "dml"
538
+ };
539
+ var BACKEND_TO_DEVICE = {
540
+ cpu: "cpu",
541
+ coreml: "gpu-mps",
542
+ cuda: "gpu-cuda",
543
+ tensorrt: "tensorrt"
544
+ };
545
+ var NodeInferenceEngine = class {
546
+ constructor(modelPath, backend) {
547
+ this.modelPath = modelPath;
548
+ this.backend = backend;
549
+ this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
550
+ }
551
+ runtime = "onnx";
552
+ device;
553
+ session = null;
554
+ async initialize() {
555
+ const ort = await import("onnxruntime-node");
556
+ const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
557
+ const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
558
+ const sessionOptions = {
559
+ executionProviders: [provider]
830
560
  };
831
- exports2.PythonInferenceEngine = PythonInferenceEngine;
832
- async function resolvePythonBinary(configPath, deps) {
833
- if (configPath)
834
- return configPath;
835
- return deps.ensurePython();
561
+ this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
562
+ }
563
+ async run(input, inputShape) {
564
+ if (!this.session) {
565
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
836
566
  }
567
+ const ort = await import("onnxruntime-node");
568
+ const sess = this.session;
569
+ const inputName = sess.inputNames[0];
570
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
571
+ const feeds = { [inputName]: tensor };
572
+ const results = await sess.run(feeds);
573
+ const outputName = sess.outputNames[0];
574
+ const outputTensor = results[outputName];
575
+ return outputTensor.data;
837
576
  }
838
- });
577
+ async runMultiOutput(input, inputShape) {
578
+ if (!this.session) {
579
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
580
+ }
581
+ const ort = await import("onnxruntime-node");
582
+ const sess = this.session;
583
+ const inputName = sess.inputNames[0];
584
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
585
+ const feeds = { [inputName]: tensor };
586
+ const results = await sess.run(feeds);
587
+ const out = {};
588
+ for (const name of sess.outputNames) {
589
+ out[name] = results[name].data;
590
+ }
591
+ return out;
592
+ }
593
+ async dispose() {
594
+ this.session = null;
595
+ }
596
+ };
839
597
 
840
- // src/shared/engine-resolver.js
841
- var require_engine_resolver = __commonJS({
842
- "src/shared/engine-resolver.js"(exports2) {
843
- "use strict";
844
- var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
845
- if (k2 === void 0) k2 = k;
846
- var desc = Object.getOwnPropertyDescriptor(m, k);
847
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
848
- desc = { enumerable: true, get: function() {
849
- return m[k];
850
- } };
598
+ // src/shared/python-engine.ts
599
+ var import_node_child_process = require("child_process");
600
+ var PythonInferenceEngine = class {
601
+ constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
602
+ this.pythonPath = pythonPath;
603
+ this.scriptPath = scriptPath;
604
+ this.modelPath = modelPath;
605
+ this.extraArgs = extraArgs;
606
+ this.runtime = runtime;
607
+ const runtimeDeviceMap = {
608
+ onnx: "cpu",
609
+ coreml: "gpu-mps",
610
+ pytorch: "cpu",
611
+ openvino: "cpu",
612
+ tflite: "cpu"
613
+ };
614
+ this.device = runtimeDeviceMap[runtime];
615
+ }
616
+ runtime;
617
+ device;
618
+ process = null;
619
+ receiveBuffer = Buffer.alloc(0);
620
+ pendingResolve = null;
621
+ pendingReject = null;
622
+ async initialize() {
623
+ const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
624
+ this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
625
+ stdio: ["pipe", "pipe", "pipe"]
626
+ });
627
+ if (!this.process.stdout || !this.process.stdin) {
628
+ throw new Error("PythonInferenceEngine: failed to create process pipes");
629
+ }
630
+ this.process.stderr?.on("data", (chunk) => {
631
+ process.stderr.write(`[python-engine] ${chunk.toString()}`);
632
+ });
633
+ this.process.on("error", (err) => {
634
+ this.pendingReject?.(err);
635
+ this.pendingReject = null;
636
+ this.pendingResolve = null;
637
+ });
638
+ this.process.on("exit", (code) => {
639
+ if (code !== 0) {
640
+ const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
641
+ this.pendingReject?.(err);
642
+ this.pendingReject = null;
643
+ this.pendingResolve = null;
851
644
  }
852
- Object.defineProperty(o, k2, desc);
853
- }) : (function(o, m, k, k2) {
854
- if (k2 === void 0) k2 = k;
855
- o[k2] = m[k];
856
- }));
857
- var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
858
- Object.defineProperty(o, "default", { enumerable: true, value: v });
859
- }) : function(o, v) {
860
- o["default"] = v;
861
645
  });
862
- var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
863
- var ownKeys = function(o) {
864
- ownKeys = Object.getOwnPropertyNames || function(o2) {
865
- var ar = [];
866
- for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
867
- return ar;
868
- };
869
- return ownKeys(o);
870
- };
871
- return function(mod) {
872
- if (mod && mod.__esModule) return mod;
873
- var result = {};
874
- if (mod != null) {
875
- for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
646
+ this.process.stdout.on("data", (chunk) => {
647
+ this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
648
+ this._tryReceive();
649
+ });
650
+ await new Promise((resolve2, reject) => {
651
+ const timeout = setTimeout(() => resolve2(), 2e3);
652
+ this.process?.on("error", (err) => {
653
+ clearTimeout(timeout);
654
+ reject(err);
655
+ });
656
+ this.process?.on("exit", (code) => {
657
+ clearTimeout(timeout);
658
+ if (code !== 0) {
659
+ reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
876
660
  }
877
- __setModuleDefault(result, mod);
878
- return result;
879
- };
880
- })();
881
- Object.defineProperty(exports2, "__esModule", { value: true });
882
- exports2.resolveEngine = resolveEngine2;
883
- exports2.probeOnnxBackends = probeOnnxBackends;
884
- var fs = __importStar(require("fs"));
885
- var path = __importStar(require("path"));
886
- var node_engine_js_1 = require_node_engine();
887
- var python_engine_js_1 = require_python_engine();
888
- var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
889
- var BACKEND_TO_FORMAT = {
890
- cpu: "onnx",
891
- coreml: "onnx",
892
- cuda: "onnx",
893
- tensorrt: "onnx"
894
- };
895
- var RUNTIME_TO_FORMAT = {
896
- onnx: "onnx",
897
- coreml: "coreml",
898
- openvino: "openvino",
899
- tflite: "tflite",
900
- pytorch: "pt"
901
- };
902
- function modelFilePath(modelsDir, modelEntry, format) {
903
- const formatEntry = modelEntry.formats[format];
904
- if (!formatEntry) {
905
- throw new Error(`Model ${modelEntry.id} has no ${format} format`);
906
- }
907
- const urlParts = formatEntry.url.split("/");
908
- const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
909
- return path.join(modelsDir, filename);
661
+ });
662
+ });
663
+ }
664
+ _tryReceive() {
665
+ if (this.receiveBuffer.length < 4) return;
666
+ const length = this.receiveBuffer.readUInt32LE(0);
667
+ if (this.receiveBuffer.length < 4 + length) return;
668
+ const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
669
+ this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
670
+ const resolve2 = this.pendingResolve;
671
+ const reject = this.pendingReject;
672
+ this.pendingResolve = null;
673
+ this.pendingReject = null;
674
+ if (!resolve2) return;
675
+ try {
676
+ const parsed = JSON.parse(jsonBytes.toString("utf8"));
677
+ resolve2(parsed);
678
+ } catch (err) {
679
+ reject?.(err instanceof Error ? err : new Error(String(err)));
910
680
  }
911
- function modelExists(filePath) {
912
- try {
913
- return fs.existsSync(filePath);
914
- } catch {
915
- return false;
916
- }
681
+ }
682
+ /** Send JPEG buffer, receive JSON detection results */
683
+ async runJpeg(jpeg) {
684
+ if (!this.process?.stdin) {
685
+ throw new Error("PythonInferenceEngine: process not initialized");
917
686
  }
918
- async function resolveEngine2(options) {
919
- const { runtime, backend, modelEntry, modelsDir, models } = options;
920
- let selectedFormat;
921
- let selectedBackend;
922
- if (runtime === "auto") {
923
- const available = await probeOnnxBackends();
924
- let chosen = null;
925
- for (const b of AUTO_BACKEND_PRIORITY) {
926
- if (!available.includes(b))
927
- continue;
928
- const fmt = BACKEND_TO_FORMAT[b];
929
- if (!fmt)
930
- continue;
931
- if (!modelEntry.formats[fmt])
932
- continue;
933
- chosen = { backend: b, format: fmt };
934
- break;
935
- }
936
- if (!chosen) {
937
- throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
938
- }
939
- selectedFormat = chosen.format;
940
- selectedBackend = chosen.backend;
941
- } else {
942
- const fmt = RUNTIME_TO_FORMAT[runtime];
943
- if (!fmt) {
944
- throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
945
- }
946
- if (!modelEntry.formats[fmt]) {
947
- throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
948
- }
949
- selectedFormat = fmt;
950
- selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
951
- }
952
- let modelPath;
953
- if (models) {
954
- modelPath = await models.ensure(modelEntry.id, selectedFormat);
955
- } else {
956
- modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
957
- if (!modelExists(modelPath)) {
958
- throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
959
- }
960
- }
961
- if (selectedFormat === "onnx") {
962
- const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
963
- await engine.initialize();
964
- return { engine, format: selectedFormat, modelPath };
965
- }
966
- const { pythonPath } = options;
967
- const PYTHON_SCRIPT_MAP = {
968
- coreml: "coreml_inference.py",
969
- pytorch: "pytorch_inference.py",
970
- openvino: "openvino_inference.py"
971
- };
972
- const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
973
- const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
974
- if (scriptName && pythonPath) {
975
- const candidates = [
976
- path.join(__dirname, "../../python", scriptName),
977
- path.join(__dirname, "../python", scriptName),
978
- path.join(__dirname, "../../../python", scriptName)
979
- ];
980
- const scriptPath = candidates.find((p) => fs.existsSync(p));
981
- if (!scriptPath) {
982
- throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
983
- ${candidates.join("\n")}`);
984
- }
985
- const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
986
- const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
987
- `--input-size=${inputSize}`,
988
- `--confidence=0.25`
989
- ]);
990
- await engine.initialize();
991
- return { engine, format: selectedFormat, modelPath };
992
- }
993
- const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
994
- if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
995
- const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
996
- await engine.initialize();
997
- return { engine, format: "onnx", modelPath: fallbackPath };
998
- }
999
- throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
687
+ return new Promise((resolve2, reject) => {
688
+ this.pendingResolve = resolve2;
689
+ this.pendingReject = reject;
690
+ const lengthBuf = Buffer.allocUnsafe(4);
691
+ lengthBuf.writeUInt32LE(jpeg.length, 0);
692
+ this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
693
+ });
694
+ }
695
+ /** IInferenceEngine.run — wraps runJpeg for compatibility */
696
+ async run(_input, _inputShape) {
697
+ throw new Error(
698
+ "PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
699
+ );
700
+ }
701
+ /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
702
+ async runMultiOutput(_input, _inputShape) {
703
+ throw new Error(
704
+ "PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
705
+ );
706
+ }
707
+ async dispose() {
708
+ if (this.process) {
709
+ this.process.stdin?.end();
710
+ this.process.kill("SIGTERM");
711
+ this.process = null;
1000
712
  }
1001
- async function probeOnnxBackends() {
1002
- const available = ["cpu"];
1003
- try {
1004
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
1005
- const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
1006
- for (const p of providers) {
1007
- const normalized = p.toLowerCase().replace("executionprovider", "");
1008
- if (normalized === "coreml")
1009
- available.push("coreml");
1010
- else if (normalized === "cuda")
1011
- available.push("cuda");
1012
- else if (normalized === "tensorrt")
1013
- available.push("tensorrt");
1014
- }
1015
- } catch {
1016
- }
1017
- if (process.platform === "darwin" && !available.includes("coreml")) {
1018
- available.push("coreml");
1019
- }
1020
- return [...new Set(available)];
713
+ }
714
+ };
715
+
716
+ // src/shared/engine-resolver.ts
717
+ var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
718
+ var BACKEND_TO_FORMAT = {
719
+ cpu: "onnx",
720
+ coreml: "onnx",
721
+ cuda: "onnx",
722
+ tensorrt: "onnx"
723
+ };
724
+ var RUNTIME_TO_FORMAT = {
725
+ onnx: "onnx",
726
+ coreml: "coreml",
727
+ openvino: "openvino",
728
+ tflite: "tflite",
729
+ pytorch: "pt"
730
+ };
731
+ function modelFilePath(modelsDir, modelEntry, format) {
732
+ const formatEntry = modelEntry.formats[format];
733
+ if (!formatEntry) {
734
+ throw new Error(`Model ${modelEntry.id} has no ${format} format`);
735
+ }
736
+ const urlParts = formatEntry.url.split("/");
737
+ const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
738
+ return path2.join(modelsDir, filename);
739
+ }
740
+ function modelExists(filePath) {
741
+ try {
742
+ return fs.existsSync(filePath);
743
+ } catch {
744
+ return false;
745
+ }
746
+ }
747
+ async function resolveEngine(options) {
748
+ const { runtime, backend, modelEntry, modelsDir, models } = options;
749
+ let selectedFormat;
750
+ let selectedBackend;
751
+ if (runtime === "auto") {
752
+ const available = await probeOnnxBackends();
753
+ let chosen = null;
754
+ for (const b of AUTO_BACKEND_PRIORITY) {
755
+ if (!available.includes(b)) continue;
756
+ const fmt = BACKEND_TO_FORMAT[b];
757
+ if (!fmt) continue;
758
+ if (!modelEntry.formats[fmt]) continue;
759
+ chosen = { backend: b, format: fmt };
760
+ break;
761
+ }
762
+ if (!chosen) {
763
+ throw new Error(
764
+ `resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
765
+ );
766
+ }
767
+ selectedFormat = chosen.format;
768
+ selectedBackend = chosen.backend;
769
+ } else {
770
+ const fmt = RUNTIME_TO_FORMAT[runtime];
771
+ if (!fmt) {
772
+ throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
773
+ }
774
+ if (!modelEntry.formats[fmt]) {
775
+ throw new Error(
776
+ `resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
777
+ );
1021
778
  }
779
+ selectedFormat = fmt;
780
+ selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
1022
781
  }
1023
- });
782
+ let modelPath;
783
+ if (models) {
784
+ modelPath = await models.ensure(modelEntry.id, selectedFormat);
785
+ } else {
786
+ modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
787
+ if (!modelExists(modelPath)) {
788
+ throw new Error(
789
+ `resolveEngine: model file not found at ${modelPath} and no model service provided`
790
+ );
791
+ }
792
+ }
793
+ if (selectedFormat === "onnx") {
794
+ const engine = new NodeInferenceEngine(modelPath, selectedBackend);
795
+ await engine.initialize();
796
+ return { engine, format: selectedFormat, modelPath };
797
+ }
798
+ const { pythonPath } = options;
799
+ const PYTHON_SCRIPT_MAP = {
800
+ coreml: "coreml_inference.py",
801
+ pytorch: "pytorch_inference.py",
802
+ openvino: "openvino_inference.py"
803
+ };
804
+ const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
805
+ const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
806
+ if (scriptName && pythonPath) {
807
+ const candidates = [
808
+ path2.join(__dirname, "../../python", scriptName),
809
+ path2.join(__dirname, "../python", scriptName),
810
+ path2.join(__dirname, "../../../python", scriptName)
811
+ ];
812
+ const scriptPath = candidates.find((p) => fs.existsSync(p));
813
+ if (!scriptPath) {
814
+ throw new Error(
815
+ `resolveEngine: Python script "${scriptName}" not found. Searched:
816
+ ${candidates.join("\n")}`
817
+ );
818
+ }
819
+ const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
820
+ const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
821
+ `--input-size=${inputSize}`,
822
+ `--confidence=0.25`
823
+ ]);
824
+ await engine.initialize();
825
+ return { engine, format: selectedFormat, modelPath };
826
+ }
827
+ const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
828
+ if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
829
+ const engine = new NodeInferenceEngine(fallbackPath, "cpu");
830
+ await engine.initialize();
831
+ return { engine, format: "onnx", modelPath: fallbackPath };
832
+ }
833
+ throw new Error(
834
+ `resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
835
+ );
836
+ }
837
+ async function probeOnnxBackends() {
838
+ const available = ["cpu"];
839
+ try {
840
+ const ort = await import("onnxruntime-node");
841
+ const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
842
+ for (const p of providers) {
843
+ const normalized = p.toLowerCase().replace("executionprovider", "");
844
+ if (normalized === "coreml") available.push("coreml");
845
+ else if (normalized === "cuda") available.push("cuda");
846
+ else if (normalized === "tensorrt") available.push("tensorrt");
847
+ }
848
+ } catch {
849
+ }
850
+ if (process.platform === "darwin" && !available.includes("coreml")) {
851
+ available.push("coreml");
852
+ }
853
+ return [...new Set(available)];
854
+ }
1024
855
 
1025
856
  // src/addons/segmentation-refiner/index.ts
1026
- var segmentation_refiner_exports = {};
1027
- __export(segmentation_refiner_exports, {
1028
- default: () => SegmentationRefinerAddon
1029
- });
1030
- module.exports = __toCommonJS(segmentation_refiner_exports);
1031
- var import_segmentation_refiner_models = __toESM(require_segmentation_refiner_models());
1032
- var import_image_utils = __toESM(require_image_utils());
1033
- var import_engine_resolver = __toESM(require_engine_resolver());
1034
857
  var MASK_LABEL = { id: "mask", name: "Segmentation Mask" };
1035
- var MASK_LABELS = [MASK_LABEL];
858
+ var MASK_LABELS2 = [MASK_LABEL];
1036
859
  var MASK_CLASS_MAP = { mapping: {}, preserveOriginal: true };
1037
860
  var DEFAULT_MASK_THRESHOLD = 0.5;
1038
861
  var SegmentationRefinerAddon = class {
@@ -1064,7 +887,7 @@ var SegmentationRefinerAddon = class {
1064
887
  resolvedConfig = null;
1065
888
  ctx = null;
1066
889
  getModelRequirements() {
1067
- return import_segmentation_refiner_models.SEGMENTATION_REFINER_MODELS.map((m) => ({
890
+ return SEGMENTATION_REFINER_MODELS.map((m) => ({
1068
891
  modelId: m.id,
1069
892
  name: m.name,
1070
893
  minRAM_MB: 256,
@@ -1080,7 +903,7 @@ var SegmentationRefinerAddon = class {
1080
903
  const cfg = ctx.addonConfig;
1081
904
  const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "u2netp";
1082
905
  this.maskThreshold = cfg["maskThreshold"] ?? DEFAULT_MASK_THRESHOLD;
1083
- const entry = import_segmentation_refiner_models.SEGMENTATION_REFINER_MODELS.find((m) => m.id === modelId);
906
+ const entry = SEGMENTATION_REFINER_MODELS.find((m) => m.id === modelId);
1084
907
  if (!entry) {
1085
908
  throw new Error(`SegmentationRefinerAddon: unknown modelId "${modelId}"`);
1086
909
  }
@@ -1090,8 +913,8 @@ var SegmentationRefinerAddon = class {
1090
913
  if (!this.engine) await this.ensureEngine();
1091
914
  const start = Date.now();
1092
915
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
1093
- const crop = await (0, import_image_utils.cropRegion)(input.frame.data, input.roi);
1094
- const normalized = await (0, import_image_utils.resizeAndNormalize)(crop, inputW, inputH, "zero-one", "nchw");
916
+ const crop = await cropRegion(input.frame.data, input.roi);
917
+ const normalized = await resizeAndNormalize(crop, inputW, inputH, "zero-one", "nchw");
1095
918
  const rawOutput = await this.engine.run(normalized, [1, 3, inputH, inputW]);
1096
919
  const maskSize = inputW * inputH;
1097
920
  const mask = new Uint8Array(maskSize);
@@ -1113,13 +936,13 @@ var SegmentationRefinerAddon = class {
1113
936
  const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
1114
937
  const backend = config?.backend ?? "cpu";
1115
938
  const format = config?.format ?? "onnx";
1116
- const entry = import_segmentation_refiner_models.SEGMENTATION_REFINER_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
939
+ const entry = SEGMENTATION_REFINER_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
1117
940
  this.modelEntry = entry;
1118
941
  const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
1119
942
  if (this.ctx.models) {
1120
943
  await this.ctx.models.ensure(modelId, format);
1121
944
  }
1122
- const resolved = await (0, import_engine_resolver.resolveEngine)({
945
+ const resolved = await resolveEngine({
1123
946
  runtime,
1124
947
  backend,
1125
948
  modelEntry: entry,
@@ -1143,7 +966,7 @@ var SegmentationRefinerAddon = class {
1143
966
  key: "modelId",
1144
967
  label: "Model",
1145
968
  type: "model-selector",
1146
- catalog: [...import_segmentation_refiner_models.SEGMENTATION_REFINER_MODELS],
969
+ catalog: [...SEGMENTATION_REFINER_MODELS],
1147
970
  allowCustom: false,
1148
971
  allowConversion: false,
1149
972
  acceptFormats: ["onnx", "coreml", "openvino"],
@@ -1205,13 +1028,13 @@ var SegmentationRefinerAddon = class {
1205
1028
  return MASK_CLASS_MAP;
1206
1029
  }
1207
1030
  getModelCatalog() {
1208
- return [...import_segmentation_refiner_models.SEGMENTATION_REFINER_MODELS];
1031
+ return [...SEGMENTATION_REFINER_MODELS];
1209
1032
  }
1210
1033
  getAvailableModels() {
1211
1034
  return [];
1212
1035
  }
1213
1036
  getActiveLabels() {
1214
- return MASK_LABELS;
1037
+ return MASK_LABELS2;
1215
1038
  }
1216
1039
  async probe() {
1217
1040
  return {