@camstack/addon-vision 0.1.7 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. package/dist/addons/animal-classifier/index.d.mts +30 -0
  2. package/dist/addons/animal-classifier/index.d.ts +30 -0
  3. package/dist/addons/animal-classifier/index.js +822 -999
  4. package/dist/addons/animal-classifier/index.js.map +1 -1
  5. package/dist/addons/animal-classifier/index.mjs +7 -242
  6. package/dist/addons/animal-classifier/index.mjs.map +1 -1
  7. package/dist/addons/audio-classification/index.d.mts +36 -0
  8. package/dist/addons/audio-classification/index.d.ts +36 -0
  9. package/dist/addons/audio-classification/index.js +378 -501
  10. package/dist/addons/audio-classification/index.js.map +1 -1
  11. package/dist/addons/audio-classification/index.mjs +4 -224
  12. package/dist/addons/audio-classification/index.mjs.map +1 -1
  13. package/dist/addons/bird-global-classifier/index.d.mts +31 -0
  14. package/dist/addons/bird-global-classifier/index.d.ts +31 -0
  15. package/dist/addons/bird-global-classifier/index.js +825 -1002
  16. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  17. package/dist/addons/bird-global-classifier/index.mjs +7 -248
  18. package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
  19. package/dist/addons/bird-nabirds-classifier/index.d.mts +33 -0
  20. package/dist/addons/bird-nabirds-classifier/index.d.ts +33 -0
  21. package/dist/addons/bird-nabirds-classifier/index.js +825 -1002
  22. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  23. package/dist/addons/bird-nabirds-classifier/index.mjs +7 -289
  24. package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
  25. package/dist/addons/face-detection/index.d.mts +29 -0
  26. package/dist/addons/face-detection/index.d.ts +29 -0
  27. package/dist/addons/face-detection/index.js +934 -1196
  28. package/dist/addons/face-detection/index.js.map +1 -1
  29. package/dist/addons/face-detection/index.mjs +7 -227
  30. package/dist/addons/face-detection/index.mjs.map +1 -1
  31. package/dist/addons/face-recognition/index.d.mts +29 -0
  32. package/dist/addons/face-recognition/index.d.ts +29 -0
  33. package/dist/addons/face-recognition/index.js +807 -1003
  34. package/dist/addons/face-recognition/index.js.map +1 -1
  35. package/dist/addons/face-recognition/index.mjs +6 -197
  36. package/dist/addons/face-recognition/index.mjs.map +1 -1
  37. package/dist/addons/motion-detection/index.d.mts +28 -0
  38. package/dist/addons/motion-detection/index.d.ts +28 -0
  39. package/dist/addons/motion-detection/index.js +111 -214
  40. package/dist/addons/motion-detection/index.js.map +1 -1
  41. package/dist/addons/motion-detection/index.mjs +9 -12
  42. package/dist/addons/motion-detection/index.mjs.map +1 -1
  43. package/dist/addons/object-detection/index.d.mts +31 -0
  44. package/dist/addons/object-detection/index.d.ts +31 -0
  45. package/dist/addons/object-detection/index.js +1082 -1287
  46. package/dist/addons/object-detection/index.js.map +1 -1
  47. package/dist/addons/object-detection/index.mjs +7 -373
  48. package/dist/addons/object-detection/index.mjs.map +1 -1
  49. package/dist/addons/plate-detection/index.d.mts +30 -0
  50. package/dist/addons/plate-detection/index.d.ts +30 -0
  51. package/dist/addons/plate-detection/index.js +868 -1075
  52. package/dist/addons/plate-detection/index.js.map +1 -1
  53. package/dist/addons/plate-detection/index.mjs +7 -230
  54. package/dist/addons/plate-detection/index.mjs.map +1 -1
  55. package/dist/addons/plate-recognition/index.d.mts +31 -0
  56. package/dist/addons/plate-recognition/index.d.ts +31 -0
  57. package/dist/addons/plate-recognition/index.js +505 -684
  58. package/dist/addons/plate-recognition/index.js.map +1 -1
  59. package/dist/addons/plate-recognition/index.mjs +5 -244
  60. package/dist/addons/plate-recognition/index.mjs.map +1 -1
  61. package/dist/addons/segmentation-refiner/index.d.mts +30 -0
  62. package/dist/addons/segmentation-refiner/index.d.ts +30 -0
  63. package/dist/addons/segmentation-refiner/index.js +790 -967
  64. package/dist/addons/segmentation-refiner/index.js.map +1 -1
  65. package/dist/addons/segmentation-refiner/index.mjs +17 -21
  66. package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
  67. package/dist/addons/vehicle-classifier/index.d.mts +31 -0
  68. package/dist/addons/vehicle-classifier/index.d.ts +31 -0
  69. package/dist/addons/vehicle-classifier/index.js +410 -581
  70. package/dist/addons/vehicle-classifier/index.js.map +1 -1
  71. package/dist/addons/vehicle-classifier/index.mjs +16 -20
  72. package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
  73. package/dist/chunk-22BHCDT5.mjs +101 -0
  74. package/dist/{chunk-WG66JYYW.mjs.map → chunk-22BHCDT5.mjs.map} +1 -1
  75. package/dist/chunk-2IOKI4ES.mjs +335 -0
  76. package/dist/{chunk-PIFS7AIT.mjs.map → chunk-2IOKI4ES.mjs.map} +1 -1
  77. package/dist/chunk-7DYHXUPZ.mjs +36 -0
  78. package/dist/{chunk-BS4DKYGN.mjs.map → chunk-7DYHXUPZ.mjs.map} +1 -1
  79. package/dist/chunk-BJTO5JO5.mjs +11 -0
  80. package/dist/chunk-BP7H4NFS.mjs +412 -0
  81. package/dist/{chunk-MGT6RUVX.mjs.map → chunk-BP7H4NFS.mjs.map} +1 -1
  82. package/dist/chunk-BR2FPGOX.mjs +98 -0
  83. package/dist/{chunk-YYDM6V2F.mjs.map → chunk-BR2FPGOX.mjs.map} +1 -1
  84. package/dist/chunk-D6WEHN33.mjs +276 -0
  85. package/dist/chunk-D6WEHN33.mjs.map +1 -0
  86. package/dist/chunk-DRYFGARD.mjs +289 -0
  87. package/dist/chunk-DRYFGARD.mjs.map +1 -0
  88. package/dist/chunk-DUN6XU3N.mjs +72 -0
  89. package/dist/{chunk-XD7WGXHZ.mjs.map → chunk-DUN6XU3N.mjs.map} +1 -1
  90. package/dist/chunk-ESLHNWWE.mjs +387 -0
  91. package/dist/chunk-ESLHNWWE.mjs.map +1 -0
  92. package/dist/chunk-JUQEW6ON.mjs +256 -0
  93. package/dist/chunk-JUQEW6ON.mjs.map +1 -0
  94. package/dist/chunk-KUO2BVFY.mjs +90 -0
  95. package/dist/{chunk-DE7I3VHO.mjs.map → chunk-KUO2BVFY.mjs.map} +1 -1
  96. package/dist/chunk-R5J3WAUI.mjs +645 -0
  97. package/dist/chunk-R5J3WAUI.mjs.map +1 -0
  98. package/dist/chunk-XZ6ZMXXU.mjs +39 -0
  99. package/dist/{chunk-K36R6HWY.mjs.map → chunk-XZ6ZMXXU.mjs.map} +1 -1
  100. package/dist/chunk-YPU4WTXZ.mjs +269 -0
  101. package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
  102. package/dist/chunk-YUCD2TFH.mjs +242 -0
  103. package/dist/chunk-YUCD2TFH.mjs.map +1 -0
  104. package/dist/chunk-ZTJENCFC.mjs +379 -0
  105. package/dist/chunk-ZTJENCFC.mjs.map +1 -0
  106. package/dist/chunk-ZWYXXCXP.mjs +248 -0
  107. package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
  108. package/dist/index.d.mts +183 -0
  109. package/dist/index.d.ts +183 -0
  110. package/dist/index.js +3930 -4449
  111. package/dist/index.js.map +1 -1
  112. package/dist/index.mjs +250 -2698
  113. package/dist/index.mjs.map +1 -1
  114. package/package.json +5 -5
  115. package/dist/chunk-2YMA6QOV.mjs +0 -193
  116. package/dist/chunk-2YMA6QOV.mjs.map +0 -1
  117. package/dist/chunk-3IIFBJCD.mjs +0 -45
  118. package/dist/chunk-BS4DKYGN.mjs +0 -48
  119. package/dist/chunk-DE7I3VHO.mjs +0 -106
  120. package/dist/chunk-F6D2OZ36.mjs +0 -89
  121. package/dist/chunk-F6D2OZ36.mjs.map +0 -1
  122. package/dist/chunk-GAOIFQDX.mjs +0 -59
  123. package/dist/chunk-GAOIFQDX.mjs.map +0 -1
  124. package/dist/chunk-HUIX2XVR.mjs +0 -159
  125. package/dist/chunk-HUIX2XVR.mjs.map +0 -1
  126. package/dist/chunk-K36R6HWY.mjs +0 -51
  127. package/dist/chunk-MBTAI3WE.mjs +0 -78
  128. package/dist/chunk-MBTAI3WE.mjs.map +0 -1
  129. package/dist/chunk-MGT6RUVX.mjs +0 -423
  130. package/dist/chunk-PIFS7AIT.mjs +0 -446
  131. package/dist/chunk-WG66JYYW.mjs +0 -116
  132. package/dist/chunk-XD7WGXHZ.mjs +0 -82
  133. package/dist/chunk-YYDM6V2F.mjs +0 -113
  134. package/dist/chunk-ZK7P3TZN.mjs +0 -286
  135. package/dist/chunk-ZK7P3TZN.mjs.map +0 -1
  136. /package/dist/{chunk-3IIFBJCD.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
@@ -5,9 +5,6 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
6
  var __getProtoOf = Object.getPrototypeOf;
7
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __commonJS = (cb, mod) => function __require() {
9
- return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
10
- };
11
8
  var __export = (target, all) => {
12
9
  for (var name in all)
13
10
  __defProp(target, name, { get: all[name], enumerable: true });
@@ -30,1249 +27,990 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
30
27
  ));
31
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
32
29
 
33
- // src/catalogs/object-detection-models.js
34
- var require_object_detection_models = __commonJS({
35
- "src/catalogs/object-detection-models.js"(exports2) {
36
- "use strict";
37
- Object.defineProperty(exports2, "__esModule", { value: true });
38
- exports2.OBJECT_DETECTION_MODELS = exports2.MLPACKAGE_FILES = void 0;
39
- var types_1 = require("@camstack/types");
40
- var HF_REPO = "camstack/camstack-models";
41
- exports2.MLPACKAGE_FILES = [
42
- "Manifest.json",
43
- "Data/com.apple.CoreML/model.mlmodel",
44
- "Data/com.apple.CoreML/weights/weight.bin"
45
- ];
46
- exports2.OBJECT_DETECTION_MODELS = [
47
- // ── YOLOv8 ──────────────────────────────────────────────────────
48
- {
49
- id: "yolov8n",
50
- name: "YOLOv8 Nano",
51
- description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
52
- inputSize: { width: 640, height: 640 },
53
- labels: types_1.COCO_80_LABELS,
54
- formats: {
55
- onnx: {
56
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
57
- sizeMB: 12
58
- },
59
- coreml: {
60
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
61
- sizeMB: 6,
62
- isDirectory: true,
63
- files: exports2.MLPACKAGE_FILES,
64
- runtimes: ["python"]
65
- },
66
- openvino: {
67
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
68
- sizeMB: 7,
69
- runtimes: ["python"]
70
- },
71
- tflite: {
72
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
73
- sizeMB: 12,
74
- runtimes: ["python"]
75
- }
76
- }
30
+ // src/addons/face-detection/index.ts
31
+ var face_detection_exports = {};
32
+ __export(face_detection_exports, {
33
+ default: () => FaceDetectionAddon
34
+ });
35
+ module.exports = __toCommonJS(face_detection_exports);
36
+
37
+ // src/catalogs/face-detection-models.ts
38
+ var import_types2 = require("@camstack/types");
39
+
40
+ // src/catalogs/object-detection-models.ts
41
+ var import_types = require("@camstack/types");
42
+ var HF_REPO = "camstack/camstack-models";
43
+ var MLPACKAGE_FILES = [
44
+ "Manifest.json",
45
+ "Data/com.apple.CoreML/model.mlmodel",
46
+ "Data/com.apple.CoreML/weights/weight.bin"
47
+ ];
48
+ var OBJECT_DETECTION_MODELS = [
49
+ // ── YOLOv8 ──────────────────────────────────────────────────────
50
+ {
51
+ id: "yolov8n",
52
+ name: "YOLOv8 Nano",
53
+ description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
54
+ inputSize: { width: 640, height: 640 },
55
+ labels: import_types.COCO_80_LABELS,
56
+ formats: {
57
+ onnx: {
58
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
59
+ sizeMB: 12
77
60
  },
78
- {
79
- id: "yolov8s",
80
- name: "YOLOv8 Small",
81
- description: "YOLOv8 Small \u2014 balanced speed and accuracy",
82
- inputSize: { width: 640, height: 640 },
83
- labels: types_1.COCO_80_LABELS,
84
- formats: {
85
- onnx: {
86
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
87
- sizeMB: 43
88
- },
89
- coreml: {
90
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
91
- sizeMB: 21,
92
- isDirectory: true,
93
- files: exports2.MLPACKAGE_FILES,
94
- runtimes: ["python"]
95
- },
96
- openvino: {
97
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
98
- sizeMB: 22,
99
- runtimes: ["python"]
100
- },
101
- tflite: {
102
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
103
- sizeMB: 43,
104
- runtimes: ["python"]
105
- }
106
- }
61
+ coreml: {
62
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
63
+ sizeMB: 6,
64
+ isDirectory: true,
65
+ files: MLPACKAGE_FILES,
66
+ runtimes: ["python"]
107
67
  },
108
- {
109
- id: "yolov8s-relu",
110
- name: "YOLOv8 Small ReLU",
111
- description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
112
- inputSize: { width: 640, height: 640 },
113
- labels: types_1.COCO_80_LABELS,
114
- formats: {
115
- onnx: {
116
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
117
- sizeMB: 43
118
- }
119
- }
68
+ openvino: {
69
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
70
+ sizeMB: 7,
71
+ runtimes: ["python"]
120
72
  },
121
- {
122
- id: "yolov8m",
123
- name: "YOLOv8 Medium",
124
- description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
125
- inputSize: { width: 640, height: 640 },
126
- labels: types_1.COCO_80_LABELS,
127
- formats: {
128
- onnx: {
129
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
130
- sizeMB: 99
131
- },
132
- coreml: {
133
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
134
- sizeMB: 49,
135
- isDirectory: true,
136
- files: exports2.MLPACKAGE_FILES,
137
- runtimes: ["python"]
138
- },
139
- openvino: {
140
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
141
- sizeMB: 50,
142
- runtimes: ["python"]
143
- },
144
- tflite: {
145
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
146
- sizeMB: 99,
147
- runtimes: ["python"]
148
- }
149
- }
73
+ tflite: {
74
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
75
+ sizeMB: 12,
76
+ runtimes: ["python"]
77
+ }
78
+ }
79
+ },
80
+ {
81
+ id: "yolov8s",
82
+ name: "YOLOv8 Small",
83
+ description: "YOLOv8 Small \u2014 balanced speed and accuracy",
84
+ inputSize: { width: 640, height: 640 },
85
+ labels: import_types.COCO_80_LABELS,
86
+ formats: {
87
+ onnx: {
88
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
89
+ sizeMB: 43
150
90
  },
151
- {
152
- id: "yolov8l",
153
- name: "YOLOv8 Large",
154
- description: "YOLOv8 Large \u2014 high-accuracy large model",
155
- inputSize: { width: 640, height: 640 },
156
- labels: types_1.COCO_80_LABELS,
157
- formats: {
158
- onnx: {
159
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
160
- sizeMB: 167
161
- },
162
- coreml: {
163
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
164
- sizeMB: 83,
165
- isDirectory: true,
166
- files: exports2.MLPACKAGE_FILES,
167
- runtimes: ["python"]
168
- },
169
- openvino: {
170
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
171
- sizeMB: 84,
172
- runtimes: ["python"]
173
- }
174
- }
91
+ coreml: {
92
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
93
+ sizeMB: 21,
94
+ isDirectory: true,
95
+ files: MLPACKAGE_FILES,
96
+ runtimes: ["python"]
175
97
  },
176
- {
177
- id: "yolov8x",
178
- name: "YOLOv8 Extra-Large",
179
- description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
180
- inputSize: { width: 640, height: 640 },
181
- labels: types_1.COCO_80_LABELS,
182
- formats: {
183
- onnx: {
184
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
185
- sizeMB: 260
186
- },
187
- coreml: {
188
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
189
- sizeMB: 130,
190
- isDirectory: true,
191
- files: exports2.MLPACKAGE_FILES,
192
- runtimes: ["python"]
193
- },
194
- openvino: {
195
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
196
- sizeMB: 131,
197
- runtimes: ["python"]
198
- }
199
- }
98
+ openvino: {
99
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
100
+ sizeMB: 22,
101
+ runtimes: ["python"]
200
102
  },
201
- // ── YOLOv9 ──────────────────────────────────────────────────────
202
- {
203
- id: "yolov9t",
204
- name: "YOLOv9 Tiny",
205
- description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
206
- inputSize: { width: 640, height: 640 },
207
- labels: types_1.COCO_80_LABELS,
208
- formats: {
209
- onnx: {
210
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
211
- sizeMB: 8
212
- },
213
- coreml: {
214
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
215
- sizeMB: 4,
216
- isDirectory: true,
217
- files: exports2.MLPACKAGE_FILES,
218
- runtimes: ["python"]
219
- },
220
- openvino: {
221
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
222
- sizeMB: 6,
223
- runtimes: ["python"]
224
- },
225
- tflite: {
226
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
227
- sizeMB: 8,
228
- runtimes: ["python"]
229
- }
230
- }
103
+ tflite: {
104
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
105
+ sizeMB: 43,
106
+ runtimes: ["python"]
107
+ }
108
+ }
109
+ },
110
+ {
111
+ id: "yolov8s-relu",
112
+ name: "YOLOv8 Small ReLU",
113
+ description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
114
+ inputSize: { width: 640, height: 640 },
115
+ labels: import_types.COCO_80_LABELS,
116
+ formats: {
117
+ onnx: {
118
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
119
+ sizeMB: 43
120
+ }
121
+ }
122
+ },
123
+ {
124
+ id: "yolov8m",
125
+ name: "YOLOv8 Medium",
126
+ description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
127
+ inputSize: { width: 640, height: 640 },
128
+ labels: import_types.COCO_80_LABELS,
129
+ formats: {
130
+ onnx: {
131
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
132
+ sizeMB: 99
231
133
  },
232
- {
233
- id: "yolov9s",
234
- name: "YOLOv9 Small",
235
- description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
236
- inputSize: { width: 640, height: 640 },
237
- labels: types_1.COCO_80_LABELS,
238
- formats: {
239
- onnx: {
240
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
241
- sizeMB: 28
242
- },
243
- coreml: {
244
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
245
- sizeMB: 14,
246
- isDirectory: true,
247
- files: exports2.MLPACKAGE_FILES,
248
- runtimes: ["python"]
249
- },
250
- openvino: {
251
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
252
- sizeMB: 16,
253
- runtimes: ["python"]
254
- },
255
- tflite: {
256
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
257
- sizeMB: 28,
258
- runtimes: ["python"]
259
- }
260
- }
134
+ coreml: {
135
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
136
+ sizeMB: 49,
137
+ isDirectory: true,
138
+ files: MLPACKAGE_FILES,
139
+ runtimes: ["python"]
261
140
  },
262
- {
263
- id: "yolov9c",
264
- name: "YOLOv9 C",
265
- description: "YOLOv9 C \u2014 high-accuracy compact model",
266
- inputSize: { width: 640, height: 640 },
267
- labels: types_1.COCO_80_LABELS,
268
- formats: {
269
- onnx: {
270
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
271
- sizeMB: 97
272
- },
273
- coreml: {
274
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
275
- sizeMB: 48,
276
- isDirectory: true,
277
- files: exports2.MLPACKAGE_FILES,
278
- runtimes: ["python"]
279
- },
280
- openvino: {
281
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
282
- sizeMB: 49,
283
- runtimes: ["python"]
284
- },
285
- tflite: {
286
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
287
- sizeMB: 97,
288
- runtimes: ["python"]
289
- }
290
- }
141
+ openvino: {
142
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
143
+ sizeMB: 50,
144
+ runtimes: ["python"]
291
145
  },
292
- // ── YOLO11 ────────────────────────────────────────────────────
293
- {
294
- id: "yolo11n",
295
- name: "YOLO11 Nano",
296
- description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
297
- inputSize: { width: 640, height: 640 },
298
- labels: types_1.COCO_80_LABELS,
299
- formats: {
300
- onnx: {
301
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
302
- sizeMB: 10
303
- },
304
- coreml: {
305
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
306
- sizeMB: 5,
307
- isDirectory: true,
308
- files: exports2.MLPACKAGE_FILES,
309
- runtimes: ["python"]
310
- },
311
- openvino: {
312
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
313
- sizeMB: 5,
314
- runtimes: ["python"]
315
- },
316
- tflite: {
317
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
318
- sizeMB: 10,
319
- runtimes: ["python"]
320
- }
321
- }
146
+ tflite: {
147
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
148
+ sizeMB: 99,
149
+ runtimes: ["python"]
150
+ }
151
+ }
152
+ },
153
+ {
154
+ id: "yolov8l",
155
+ name: "YOLOv8 Large",
156
+ description: "YOLOv8 Large \u2014 high-accuracy large model",
157
+ inputSize: { width: 640, height: 640 },
158
+ labels: import_types.COCO_80_LABELS,
159
+ formats: {
160
+ onnx: {
161
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
162
+ sizeMB: 167
322
163
  },
323
- {
324
- id: "yolo11s",
325
- name: "YOLO11 Small",
326
- description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
327
- inputSize: { width: 640, height: 640 },
328
- labels: types_1.COCO_80_LABELS,
329
- formats: {
330
- onnx: {
331
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
332
- sizeMB: 36
333
- },
334
- coreml: {
335
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
336
- sizeMB: 18,
337
- isDirectory: true,
338
- files: exports2.MLPACKAGE_FILES,
339
- runtimes: ["python"]
340
- },
341
- openvino: {
342
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
343
- sizeMB: 18,
344
- runtimes: ["python"]
345
- },
346
- tflite: {
347
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
348
- sizeMB: 36,
349
- runtimes: ["python"]
350
- }
351
- }
164
+ coreml: {
165
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
166
+ sizeMB: 83,
167
+ isDirectory: true,
168
+ files: MLPACKAGE_FILES,
169
+ runtimes: ["python"]
352
170
  },
353
- {
354
- id: "yolo11m",
355
- name: "YOLO11 Medium",
356
- description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
357
- inputSize: { width: 640, height: 640 },
358
- labels: types_1.COCO_80_LABELS,
359
- formats: {
360
- onnx: {
361
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
362
- sizeMB: 77
363
- },
364
- coreml: {
365
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
366
- sizeMB: 39,
367
- isDirectory: true,
368
- files: exports2.MLPACKAGE_FILES,
369
- runtimes: ["python"]
370
- },
371
- openvino: {
372
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
373
- sizeMB: 39,
374
- runtimes: ["python"]
375
- },
376
- tflite: {
377
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
378
- sizeMB: 77,
379
- runtimes: ["python"]
380
- }
381
- }
171
+ openvino: {
172
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
173
+ sizeMB: 84,
174
+ runtimes: ["python"]
175
+ }
176
+ }
177
+ },
178
+ {
179
+ id: "yolov8x",
180
+ name: "YOLOv8 Extra-Large",
181
+ description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
182
+ inputSize: { width: 640, height: 640 },
183
+ labels: import_types.COCO_80_LABELS,
184
+ formats: {
185
+ onnx: {
186
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
187
+ sizeMB: 260
382
188
  },
383
- {
384
- id: "yolo11l",
385
- name: "YOLO11 Large",
386
- description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
387
- inputSize: { width: 640, height: 640 },
388
- labels: types_1.COCO_80_LABELS,
389
- formats: {
390
- onnx: {
391
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
392
- sizeMB: 97
393
- },
394
- coreml: {
395
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
396
- sizeMB: 49,
397
- isDirectory: true,
398
- files: exports2.MLPACKAGE_FILES,
399
- runtimes: ["python"]
400
- },
401
- openvino: {
402
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
403
- sizeMB: 49,
404
- runtimes: ["python"]
405
- },
406
- tflite: {
407
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
408
- sizeMB: 97,
409
- runtimes: ["python"]
410
- }
411
- }
189
+ coreml: {
190
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
191
+ sizeMB: 130,
192
+ isDirectory: true,
193
+ files: MLPACKAGE_FILES,
194
+ runtimes: ["python"]
412
195
  },
413
- {
414
- id: "yolo11x",
415
- name: "YOLO11 Extra-Large",
416
- description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
417
- inputSize: { width: 640, height: 640 },
418
- labels: types_1.COCO_80_LABELS,
419
- formats: {
420
- onnx: {
421
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
422
- sizeMB: 218
423
- },
424
- coreml: {
425
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
426
- sizeMB: 109,
427
- isDirectory: true,
428
- files: exports2.MLPACKAGE_FILES,
429
- runtimes: ["python"]
430
- },
431
- openvino: {
432
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
433
- sizeMB: 109,
434
- runtimes: ["python"]
435
- },
436
- tflite: {
437
- url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
438
- sizeMB: 218,
439
- runtimes: ["python"]
440
- }
441
- }
196
+ openvino: {
197
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
198
+ sizeMB: 131,
199
+ runtimes: ["python"]
442
200
  }
443
- ];
444
- }
445
- });
446
-
447
- // src/catalogs/face-detection-models.js
448
- var require_face_detection_models = __commonJS({
449
- "src/catalogs/face-detection-models.js"(exports2) {
450
- "use strict";
451
- Object.defineProperty(exports2, "__esModule", { value: true });
452
- exports2.FACE_DETECTION_MODELS = void 0;
453
- var types_1 = require("@camstack/types");
454
- var object_detection_models_js_1 = require_object_detection_models();
455
- var HF_REPO = "camstack/camstack-models";
456
- var FACE_LABELS2 = [
457
- { id: "face", name: "Face" }
458
- ];
459
- exports2.FACE_DETECTION_MODELS = [
460
- {
461
- id: "scrfd-500m",
462
- name: "SCRFD 500M",
463
- description: "SCRFD 500M \u2014 ultra-lightweight face detector",
464
- inputSize: { width: 640, height: 640 },
465
- labels: FACE_LABELS2,
466
- formats: {
467
- onnx: {
468
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/onnx/camstack-scrfd-500m.onnx"),
469
- sizeMB: 2.2
470
- },
471
- coreml: {
472
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
473
- sizeMB: 1.2,
474
- isDirectory: true,
475
- files: object_detection_models_js_1.MLPACKAGE_FILES,
476
- runtimes: ["python"]
477
- },
478
- openvino: {
479
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
480
- sizeMB: 1.3,
481
- runtimes: ["python"]
482
- }
483
- }
201
+ }
202
+ },
203
+ // ── YOLOv9 ──────────────────────────────────────────────────────
204
+ {
205
+ id: "yolov9t",
206
+ name: "YOLOv9 Tiny",
207
+ description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
208
+ inputSize: { width: 640, height: 640 },
209
+ labels: import_types.COCO_80_LABELS,
210
+ formats: {
211
+ onnx: {
212
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
213
+ sizeMB: 8
484
214
  },
485
- {
486
- id: "scrfd-2.5g",
487
- name: "SCRFD 2.5G",
488
- description: "SCRFD 2.5G \u2014 balanced face detection model",
489
- inputSize: { width: 640, height: 640 },
490
- labels: FACE_LABELS2,
491
- formats: {
492
- onnx: {
493
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/onnx/camstack-scrfd-2.5g.onnx"),
494
- sizeMB: 3.1
495
- },
496
- coreml: {
497
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
498
- sizeMB: 1.7,
499
- isDirectory: true,
500
- files: object_detection_models_js_1.MLPACKAGE_FILES,
501
- runtimes: ["python"]
502
- },
503
- openvino: {
504
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
505
- sizeMB: 1.8,
506
- runtimes: ["python"]
507
- }
508
- }
215
+ coreml: {
216
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
217
+ sizeMB: 4,
218
+ isDirectory: true,
219
+ files: MLPACKAGE_FILES,
220
+ runtimes: ["python"]
509
221
  },
510
- {
511
- id: "scrfd-10g",
512
- name: "SCRFD 10G",
513
- description: "SCRFD 10G \u2014 high-accuracy face detector",
514
- inputSize: { width: 640, height: 640 },
515
- labels: FACE_LABELS2,
516
- formats: {
517
- onnx: {
518
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/onnx/camstack-scrfd-10g.onnx"),
519
- sizeMB: 16
520
- },
521
- coreml: {
522
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
523
- sizeMB: 8.2,
524
- isDirectory: true,
525
- files: object_detection_models_js_1.MLPACKAGE_FILES,
526
- runtimes: ["python"]
527
- },
528
- openvino: {
529
- url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
530
- sizeMB: 8.3,
531
- runtimes: ["python"]
532
- }
533
- }
222
+ openvino: {
223
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
224
+ sizeMB: 6,
225
+ runtimes: ["python"]
226
+ },
227
+ tflite: {
228
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
229
+ sizeMB: 8,
230
+ runtimes: ["python"]
534
231
  }
535
- ];
536
- }
537
- });
538
-
539
- // src/shared/image-utils.js
540
- var require_image_utils = __commonJS({
541
- "src/shared/image-utils.js"(exports2) {
542
- "use strict";
543
- var __importDefault = exports2 && exports2.__importDefault || function(mod) {
544
- return mod && mod.__esModule ? mod : { "default": mod };
545
- };
546
- Object.defineProperty(exports2, "__esModule", { value: true });
547
- exports2.jpegToRgb = jpegToRgb;
548
- exports2.cropRegion = cropRegion2;
549
- exports2.letterbox = letterbox2;
550
- exports2.resizeAndNormalize = resizeAndNormalize;
551
- exports2.rgbToGrayscale = rgbToGrayscale;
552
- var sharp_1 = __importDefault(require("sharp"));
553
- async function jpegToRgb(jpeg) {
554
- const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
555
- return { data, width: info.width, height: info.height };
556
232
  }
557
- async function cropRegion2(jpeg, roi) {
558
- return (0, sharp_1.default)(jpeg).extract({
559
- left: Math.round(roi.x),
560
- top: Math.round(roi.y),
561
- width: Math.round(roi.w),
562
- height: Math.round(roi.h)
563
- }).jpeg().toBuffer();
233
+ },
234
+ {
235
+ id: "yolov9s",
236
+ name: "YOLOv9 Small",
237
+ description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
238
+ inputSize: { width: 640, height: 640 },
239
+ labels: import_types.COCO_80_LABELS,
240
+ formats: {
241
+ onnx: {
242
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
243
+ sizeMB: 28
244
+ },
245
+ coreml: {
246
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
247
+ sizeMB: 14,
248
+ isDirectory: true,
249
+ files: MLPACKAGE_FILES,
250
+ runtimes: ["python"]
251
+ },
252
+ openvino: {
253
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
254
+ sizeMB: 16,
255
+ runtimes: ["python"]
256
+ },
257
+ tflite: {
258
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
259
+ sizeMB: 28,
260
+ runtimes: ["python"]
261
+ }
564
262
  }
565
- async function letterbox2(jpeg, targetSize) {
566
- const meta = await (0, sharp_1.default)(jpeg).metadata();
567
- const originalWidth = meta.width ?? 0;
568
- const originalHeight = meta.height ?? 0;
569
- const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
570
- const scaledWidth = Math.round(originalWidth * scale);
571
- const scaledHeight = Math.round(originalHeight * scale);
572
- const padX = Math.floor((targetSize - scaledWidth) / 2);
573
- const padY = Math.floor((targetSize - scaledHeight) / 2);
574
- const { data } = await (0, sharp_1.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
575
- top: padY,
576
- bottom: targetSize - scaledHeight - padY,
577
- left: padX,
578
- right: targetSize - scaledWidth - padX,
579
- background: { r: 114, g: 114, b: 114 }
580
- }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
581
- const numPixels = targetSize * targetSize;
582
- const float32 = new Float32Array(3 * numPixels);
583
- for (let i = 0; i < numPixels; i++) {
584
- const srcBase = i * 3;
585
- float32[0 * numPixels + i] = data[srcBase] / 255;
586
- float32[1 * numPixels + i] = data[srcBase + 1] / 255;
587
- float32[2 * numPixels + i] = data[srcBase + 2] / 255;
263
+ },
264
+ {
265
+ id: "yolov9c",
266
+ name: "YOLOv9 C",
267
+ description: "YOLOv9 C \u2014 high-accuracy compact model",
268
+ inputSize: { width: 640, height: 640 },
269
+ labels: import_types.COCO_80_LABELS,
270
+ formats: {
271
+ onnx: {
272
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
273
+ sizeMB: 97
274
+ },
275
+ coreml: {
276
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
277
+ sizeMB: 48,
278
+ isDirectory: true,
279
+ files: MLPACKAGE_FILES,
280
+ runtimes: ["python"]
281
+ },
282
+ openvino: {
283
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
284
+ sizeMB: 49,
285
+ runtimes: ["python"]
286
+ },
287
+ tflite: {
288
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
289
+ sizeMB: 97,
290
+ runtimes: ["python"]
588
291
  }
589
- return { data: float32, scale, padX, padY, originalWidth, originalHeight };
590
292
  }
591
- async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
592
- const { data } = await (0, sharp_1.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
593
- const numPixels = targetWidth * targetHeight;
594
- const float32 = new Float32Array(3 * numPixels);
595
- const mean = [0.485, 0.456, 0.406];
596
- const std = [0.229, 0.224, 0.225];
597
- if (layout === "nchw") {
598
- for (let i = 0; i < numPixels; i++) {
599
- const srcBase = i * 3;
600
- for (let c = 0; c < 3; c++) {
601
- const raw = data[srcBase + c] / 255;
602
- let val;
603
- if (normalization === "zero-one") {
604
- val = raw;
605
- } else if (normalization === "imagenet") {
606
- val = (raw - mean[c]) / std[c];
607
- } else {
608
- val = data[srcBase + c];
609
- }
610
- float32[c * numPixels + i] = val;
611
- }
612
- }
613
- } else {
614
- for (let i = 0; i < numPixels; i++) {
615
- const srcBase = i * 3;
616
- for (let c = 0; c < 3; c++) {
617
- const raw = data[srcBase + c] / 255;
618
- let val;
619
- if (normalization === "zero-one") {
620
- val = raw;
621
- } else if (normalization === "imagenet") {
622
- val = (raw - mean[c]) / std[c];
623
- } else {
624
- val = data[srcBase + c];
625
- }
626
- float32[i * 3 + c] = val;
627
- }
628
- }
293
+ },
294
+ // ── YOLO11 ────────────────────────────────────────────────────
295
+ {
296
+ id: "yolo11n",
297
+ name: "YOLO11 Nano",
298
+ description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
299
+ inputSize: { width: 640, height: 640 },
300
+ labels: import_types.COCO_80_LABELS,
301
+ formats: {
302
+ onnx: {
303
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
304
+ sizeMB: 10
305
+ },
306
+ coreml: {
307
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
308
+ sizeMB: 5,
309
+ isDirectory: true,
310
+ files: MLPACKAGE_FILES,
311
+ runtimes: ["python"]
312
+ },
313
+ openvino: {
314
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
315
+ sizeMB: 5,
316
+ runtimes: ["python"]
317
+ },
318
+ tflite: {
319
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
320
+ sizeMB: 10,
321
+ runtimes: ["python"]
629
322
  }
630
- return float32;
631
323
  }
632
- function rgbToGrayscale(rgb, width, height) {
633
- const numPixels = width * height;
634
- const gray = new Uint8Array(numPixels);
635
- for (let i = 0; i < numPixels; i++) {
636
- const r = rgb[i * 3];
637
- const g = rgb[i * 3 + 1];
638
- const b = rgb[i * 3 + 2];
639
- gray[i] = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
324
+ },
325
+ {
326
+ id: "yolo11s",
327
+ name: "YOLO11 Small",
328
+ description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
329
+ inputSize: { width: 640, height: 640 },
330
+ labels: import_types.COCO_80_LABELS,
331
+ formats: {
332
+ onnx: {
333
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
334
+ sizeMB: 36
335
+ },
336
+ coreml: {
337
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
338
+ sizeMB: 18,
339
+ isDirectory: true,
340
+ files: MLPACKAGE_FILES,
341
+ runtimes: ["python"]
342
+ },
343
+ openvino: {
344
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
345
+ sizeMB: 18,
346
+ runtimes: ["python"]
347
+ },
348
+ tflite: {
349
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
350
+ sizeMB: 36,
351
+ runtimes: ["python"]
640
352
  }
641
- return gray;
642
353
  }
643
- }
644
- });
645
-
646
- // src/shared/postprocess/yolo.js
647
- var require_yolo = __commonJS({
648
- "src/shared/postprocess/yolo.js"(exports2) {
649
- "use strict";
650
- Object.defineProperty(exports2, "__esModule", { value: true });
651
- exports2.iou = iou;
652
- exports2.nms = nms;
653
- exports2.yoloPostprocess = yoloPostprocess;
654
- function iou(a, b) {
655
- const ax1 = a.x;
656
- const ay1 = a.y;
657
- const ax2 = a.x + a.w;
658
- const ay2 = a.y + a.h;
659
- const bx1 = b.x;
660
- const by1 = b.y;
661
- const bx2 = b.x + b.w;
662
- const by2 = b.y + b.h;
663
- const interX1 = Math.max(ax1, bx1);
664
- const interY1 = Math.max(ay1, by1);
665
- const interX2 = Math.min(ax2, bx2);
666
- const interY2 = Math.min(ay2, by2);
667
- const interW = Math.max(0, interX2 - interX1);
668
- const interH = Math.max(0, interY2 - interY1);
669
- const interArea = interW * interH;
670
- if (interArea === 0)
671
- return 0;
672
- const areaA = a.w * a.h;
673
- const areaB = b.w * b.h;
674
- const unionArea = areaA + areaB - interArea;
675
- return unionArea === 0 ? 0 : interArea / unionArea;
354
+ },
355
+ {
356
+ id: "yolo11m",
357
+ name: "YOLO11 Medium",
358
+ description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
359
+ inputSize: { width: 640, height: 640 },
360
+ labels: import_types.COCO_80_LABELS,
361
+ formats: {
362
+ onnx: {
363
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
364
+ sizeMB: 77
365
+ },
366
+ coreml: {
367
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
368
+ sizeMB: 39,
369
+ isDirectory: true,
370
+ files: MLPACKAGE_FILES,
371
+ runtimes: ["python"]
372
+ },
373
+ openvino: {
374
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
375
+ sizeMB: 39,
376
+ runtimes: ["python"]
377
+ },
378
+ tflite: {
379
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
380
+ sizeMB: 77,
381
+ runtimes: ["python"]
382
+ }
676
383
  }
677
- function nms(boxes, iouThreshold) {
678
- const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
679
- const kept = [];
680
- const suppressed = /* @__PURE__ */ new Set();
681
- for (const idx of indices) {
682
- if (suppressed.has(idx))
683
- continue;
684
- kept.push(idx);
685
- for (const other of indices) {
686
- if (other === idx || suppressed.has(other))
687
- continue;
688
- if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
689
- suppressed.add(other);
690
- }
691
- }
384
+ },
385
+ {
386
+ id: "yolo11l",
387
+ name: "YOLO11 Large",
388
+ description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
389
+ inputSize: { width: 640, height: 640 },
390
+ labels: import_types.COCO_80_LABELS,
391
+ formats: {
392
+ onnx: {
393
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
394
+ sizeMB: 97
395
+ },
396
+ coreml: {
397
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
398
+ sizeMB: 49,
399
+ isDirectory: true,
400
+ files: MLPACKAGE_FILES,
401
+ runtimes: ["python"]
402
+ },
403
+ openvino: {
404
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
405
+ sizeMB: 49,
406
+ runtimes: ["python"]
407
+ },
408
+ tflite: {
409
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
410
+ sizeMB: 97,
411
+ runtimes: ["python"]
692
412
  }
693
- return kept;
694
413
  }
695
- function yoloPostprocess(output, numClasses, numBoxes, options) {
696
- const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight } = options;
697
- const candidates = [];
698
- for (let i = 0; i < numBoxes; i++) {
699
- const cx = output[0 * numBoxes + i];
700
- const cy = output[1 * numBoxes + i];
701
- const w = output[2 * numBoxes + i];
702
- const h = output[3 * numBoxes + i];
703
- let bestScore = -Infinity;
704
- let bestClass = 0;
705
- for (let j = 0; j < numClasses; j++) {
706
- const score = output[(4 + j) * numBoxes + i];
707
- if (score > bestScore) {
708
- bestScore = score;
709
- bestClass = j;
710
- }
711
- }
712
- if (bestScore < confidence)
713
- continue;
714
- const bbox = {
715
- x: cx - w / 2,
716
- y: cy - h / 2,
717
- w,
718
- h
719
- };
720
- candidates.push({ bbox, score: bestScore, classIdx: bestClass });
414
+ },
415
+ {
416
+ id: "yolo11x",
417
+ name: "YOLO11 Extra-Large",
418
+ description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
419
+ inputSize: { width: 640, height: 640 },
420
+ labels: import_types.COCO_80_LABELS,
421
+ formats: {
422
+ onnx: {
423
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
424
+ sizeMB: 218
425
+ },
426
+ coreml: {
427
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
428
+ sizeMB: 109,
429
+ isDirectory: true,
430
+ files: MLPACKAGE_FILES,
431
+ runtimes: ["python"]
432
+ },
433
+ openvino: {
434
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
435
+ sizeMB: 109,
436
+ runtimes: ["python"]
437
+ },
438
+ tflite: {
439
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
440
+ sizeMB: 218,
441
+ runtimes: ["python"]
721
442
  }
722
- if (candidates.length === 0)
723
- return [];
724
- const keptIndices = nms(candidates, iouThreshold);
725
- return keptIndices.map((idx) => {
726
- const { bbox, score, classIdx } = candidates[idx];
727
- const label = labels[classIdx] ?? String(classIdx);
728
- const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
729
- const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
730
- const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
731
- const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
732
- const finalBbox = { x, y, w: x2 - x, h: y2 - y };
733
- return {
734
- class: label,
735
- originalClass: label,
736
- score,
737
- bbox: finalBbox
738
- };
739
- });
740
443
  }
741
444
  }
742
- });
445
+ ];
743
446
 
744
- // src/shared/postprocess/scrfd.js
745
- var require_scrfd = __commonJS({
746
- "src/shared/postprocess/scrfd.js"(exports2) {
747
- "use strict";
748
- Object.defineProperty(exports2, "__esModule", { value: true });
749
- exports2.scrfdPostprocess = scrfdPostprocess2;
750
- var yolo_js_1 = require_yolo();
751
- var STRIDES = [8, 16, 32];
752
- var NUM_ANCHORS_PER_STRIDE = 2;
753
- function generateAnchors(stride, inputSize) {
754
- const featureSize = Math.ceil(inputSize / stride);
755
- const anchors = [];
756
- for (let y = 0; y < featureSize; y++) {
757
- for (let x = 0; x < featureSize; x++) {
758
- for (let k = 0; k < NUM_ANCHORS_PER_STRIDE; k++) {
759
- anchors.push({
760
- cx: (x + 0.5) * stride,
761
- cy: (y + 0.5) * stride
762
- });
763
- }
764
- }
447
+ // src/catalogs/face-detection-models.ts
448
+ var HF_REPO2 = "camstack/camstack-models";
449
+ var FACE_LABELS = [
450
+ { id: "face", name: "Face" }
451
+ ];
452
+ var FACE_DETECTION_MODELS = [
453
+ {
454
+ id: "scrfd-500m",
455
+ name: "SCRFD 500M",
456
+ description: "SCRFD 500M \u2014 ultra-lightweight face detector",
457
+ inputSize: { width: 640, height: 640 },
458
+ labels: FACE_LABELS,
459
+ formats: {
460
+ onnx: {
461
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-500m.onnx"),
462
+ sizeMB: 2.2
463
+ },
464
+ coreml: {
465
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
466
+ sizeMB: 1.2,
467
+ isDirectory: true,
468
+ files: MLPACKAGE_FILES,
469
+ runtimes: ["python"]
470
+ },
471
+ openvino: {
472
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
473
+ sizeMB: 1.3,
474
+ runtimes: ["python"]
765
475
  }
766
- return anchors;
767
476
  }
768
- function scrfdPostprocess2(outputs, confidence, inputSize, originalWidth, originalHeight) {
769
- const scaleX = originalWidth / inputSize;
770
- const scaleY = originalHeight / inputSize;
771
- const candidates = [];
772
- for (const stride of STRIDES) {
773
- const scoreKey = Object.keys(outputs).find((k) => k.includes(`score_${stride}`) || k.includes(`_${stride}_score`));
774
- const bboxKey = Object.keys(outputs).find((k) => k.includes(`bbox_${stride}`) || k.includes(`_${stride}_bbox`));
775
- const kpsKey = Object.keys(outputs).find((k) => k.includes(`kps_${stride}`) || k.includes(`_${stride}_kps`));
776
- if (!scoreKey || !bboxKey)
777
- continue;
778
- const scores = outputs[scoreKey];
779
- const bboxes = outputs[bboxKey];
780
- const kps = kpsKey ? outputs[kpsKey] : void 0;
781
- const anchors = generateAnchors(stride, inputSize);
782
- const n = anchors.length;
783
- for (let i = 0; i < n; i++) {
784
- const score = scores[i];
785
- if (score < confidence)
786
- continue;
787
- const anchor = anchors[i];
788
- const x1 = anchor.cx - bboxes[i * 4] * stride;
789
- const y1 = anchor.cy - bboxes[i * 4 + 1] * stride;
790
- const x2 = anchor.cx + bboxes[i * 4 + 2] * stride;
791
- const y2 = anchor.cy + bboxes[i * 4 + 3] * stride;
792
- const bbox = {
793
- x: x1 * scaleX,
794
- y: y1 * scaleY,
795
- w: (x2 - x1) * scaleX,
796
- h: (y2 - y1) * scaleY
797
- };
798
- let landmarks;
799
- if (kps) {
800
- const pts = [];
801
- for (let p = 0; p < 5; p++) {
802
- pts.push({
803
- x: (anchor.cx + kps[i * 10 + p * 2] * stride) * scaleX,
804
- y: (anchor.cy + kps[i * 10 + p * 2 + 1] * stride) * scaleY
805
- });
806
- }
807
- landmarks = pts;
808
- }
809
- candidates.push({ bbox, score, landmarks });
810
- }
477
+ },
478
+ {
479
+ id: "scrfd-2.5g",
480
+ name: "SCRFD 2.5G",
481
+ description: "SCRFD 2.5G \u2014 balanced face detection model",
482
+ inputSize: { width: 640, height: 640 },
483
+ labels: FACE_LABELS,
484
+ formats: {
485
+ onnx: {
486
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-2.5g.onnx"),
487
+ sizeMB: 3.1
488
+ },
489
+ coreml: {
490
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
491
+ sizeMB: 1.7,
492
+ isDirectory: true,
493
+ files: MLPACKAGE_FILES,
494
+ runtimes: ["python"]
495
+ },
496
+ openvino: {
497
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
498
+ sizeMB: 1.8,
499
+ runtimes: ["python"]
500
+ }
501
+ }
502
+ },
503
+ {
504
+ id: "scrfd-10g",
505
+ name: "SCRFD 10G",
506
+ description: "SCRFD 10G \u2014 high-accuracy face detector",
507
+ inputSize: { width: 640, height: 640 },
508
+ labels: FACE_LABELS,
509
+ formats: {
510
+ onnx: {
511
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-10g.onnx"),
512
+ sizeMB: 16
513
+ },
514
+ coreml: {
515
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
516
+ sizeMB: 8.2,
517
+ isDirectory: true,
518
+ files: MLPACKAGE_FILES,
519
+ runtimes: ["python"]
520
+ },
521
+ openvino: {
522
+ url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
523
+ sizeMB: 8.3,
524
+ runtimes: ["python"]
811
525
  }
812
- if (candidates.length === 0)
813
- return [];
814
- const keptIndices = (0, yolo_js_1.nms)(candidates, 0.45);
815
- return keptIndices.map((idx) => {
816
- const { bbox, score, landmarks } = candidates[idx];
817
- return {
818
- class: "face",
819
- originalClass: "face",
820
- score,
821
- bbox,
822
- ...landmarks ? { landmarks } : {}
823
- };
824
- });
825
526
  }
826
527
  }
827
- });
528
+ ];
828
529
 
829
- // src/shared/node-engine.js
830
- var require_node_engine = __commonJS({
831
- "src/shared/node-engine.js"(exports2) {
832
- "use strict";
833
- var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
834
- if (k2 === void 0) k2 = k;
835
- var desc = Object.getOwnPropertyDescriptor(m, k);
836
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
837
- desc = { enumerable: true, get: function() {
838
- return m[k];
839
- } };
840
- }
841
- Object.defineProperty(o, k2, desc);
842
- }) : (function(o, m, k, k2) {
843
- if (k2 === void 0) k2 = k;
844
- o[k2] = m[k];
845
- }));
846
- var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
847
- Object.defineProperty(o, "default", { enumerable: true, value: v });
848
- }) : function(o, v) {
849
- o["default"] = v;
850
- });
851
- var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
852
- var ownKeys = function(o) {
853
- ownKeys = Object.getOwnPropertyNames || function(o2) {
854
- var ar = [];
855
- for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
856
- return ar;
857
- };
858
- return ownKeys(o);
859
- };
860
- return function(mod) {
861
- if (mod && mod.__esModule) return mod;
862
- var result = {};
863
- if (mod != null) {
864
- for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
865
- }
866
- __setModuleDefault(result, mod);
867
- return result;
868
- };
869
- })();
870
- Object.defineProperty(exports2, "__esModule", { value: true });
871
- exports2.NodeInferenceEngine = void 0;
872
- var path = __importStar(require("path"));
873
- var BACKEND_TO_PROVIDER = {
874
- cpu: "cpu",
875
- coreml: "coreml",
876
- cuda: "cuda",
877
- tensorrt: "tensorrt",
878
- dml: "dml"
879
- };
880
- var BACKEND_TO_DEVICE = {
881
- cpu: "cpu",
882
- coreml: "gpu-mps",
883
- cuda: "gpu-cuda",
884
- tensorrt: "tensorrt"
885
- };
886
- var NodeInferenceEngine = class {
887
- modelPath;
888
- backend;
889
- runtime = "onnx";
890
- device;
891
- session = null;
892
- constructor(modelPath, backend) {
893
- this.modelPath = modelPath;
894
- this.backend = backend;
895
- this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
896
- }
897
- async initialize() {
898
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
899
- const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
900
- const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
901
- const sessionOptions = {
902
- executionProviders: [provider]
903
- };
904
- this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
905
- }
906
- async run(input, inputShape) {
907
- if (!this.session) {
908
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
909
- }
910
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
911
- const sess = this.session;
912
- const inputName = sess.inputNames[0];
913
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
914
- const feeds = { [inputName]: tensor };
915
- const results = await sess.run(feeds);
916
- const outputName = sess.outputNames[0];
917
- const outputTensor = results[outputName];
918
- return outputTensor.data;
919
- }
920
- async runMultiOutput(input, inputShape) {
921
- if (!this.session) {
922
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
923
- }
924
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
925
- const sess = this.session;
926
- const inputName = sess.inputNames[0];
927
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
928
- const feeds = { [inputName]: tensor };
929
- const results = await sess.run(feeds);
930
- const out = {};
931
- for (const name of sess.outputNames) {
932
- out[name] = results[name].data;
933
- }
934
- return out;
935
- }
936
- async dispose() {
937
- this.session = null;
938
- }
939
- };
940
- exports2.NodeInferenceEngine = NodeInferenceEngine;
530
+ // src/shared/image-utils.ts
531
+ var import_sharp = __toESM(require("sharp"));
532
+ async function cropRegion(jpeg, roi) {
533
+ return (0, import_sharp.default)(jpeg).extract({
534
+ left: Math.round(roi.x),
535
+ top: Math.round(roi.y),
536
+ width: Math.round(roi.w),
537
+ height: Math.round(roi.h)
538
+ }).jpeg().toBuffer();
539
+ }
540
+ async function letterbox(jpeg, targetSize) {
541
+ const meta = await (0, import_sharp.default)(jpeg).metadata();
542
+ const originalWidth = meta.width ?? 0;
543
+ const originalHeight = meta.height ?? 0;
544
+ const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
545
+ const scaledWidth = Math.round(originalWidth * scale);
546
+ const scaledHeight = Math.round(originalHeight * scale);
547
+ const padX = Math.floor((targetSize - scaledWidth) / 2);
548
+ const padY = Math.floor((targetSize - scaledHeight) / 2);
549
+ const { data } = await (0, import_sharp.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
550
+ top: padY,
551
+ bottom: targetSize - scaledHeight - padY,
552
+ left: padX,
553
+ right: targetSize - scaledWidth - padX,
554
+ background: { r: 114, g: 114, b: 114 }
555
+ }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
556
+ const numPixels = targetSize * targetSize;
557
+ const float32 = new Float32Array(3 * numPixels);
558
+ for (let i = 0; i < numPixels; i++) {
559
+ const srcBase = i * 3;
560
+ float32[0 * numPixels + i] = data[srcBase] / 255;
561
+ float32[1 * numPixels + i] = data[srcBase + 1] / 255;
562
+ float32[2 * numPixels + i] = data[srcBase + 2] / 255;
941
563
  }
942
- });
564
+ return { data: float32, scale, padX, padY, originalWidth, originalHeight };
565
+ }
943
566
 
944
- // src/shared/python-engine.js
945
- var require_python_engine = __commonJS({
946
- "src/shared/python-engine.js"(exports2) {
947
- "use strict";
948
- Object.defineProperty(exports2, "__esModule", { value: true });
949
- exports2.PythonInferenceEngine = void 0;
950
- exports2.resolvePythonBinary = resolvePythonBinary;
951
- var node_child_process_1 = require("child_process");
952
- var PythonInferenceEngine = class {
953
- pythonPath;
954
- scriptPath;
955
- modelPath;
956
- extraArgs;
957
- runtime;
958
- device;
959
- process = null;
960
- receiveBuffer = Buffer.alloc(0);
961
- pendingResolve = null;
962
- pendingReject = null;
963
- constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
964
- this.pythonPath = pythonPath;
965
- this.scriptPath = scriptPath;
966
- this.modelPath = modelPath;
967
- this.extraArgs = extraArgs;
968
- this.runtime = runtime;
969
- const runtimeDeviceMap = {
970
- onnx: "cpu",
971
- coreml: "gpu-mps",
972
- pytorch: "cpu",
973
- openvino: "cpu",
974
- tflite: "cpu"
975
- };
976
- this.device = runtimeDeviceMap[runtime];
567
+ // src/shared/postprocess/yolo.ts
568
+ function iou(a, b) {
569
+ const ax1 = a.x;
570
+ const ay1 = a.y;
571
+ const ax2 = a.x + a.w;
572
+ const ay2 = a.y + a.h;
573
+ const bx1 = b.x;
574
+ const by1 = b.y;
575
+ const bx2 = b.x + b.w;
576
+ const by2 = b.y + b.h;
577
+ const interX1 = Math.max(ax1, bx1);
578
+ const interY1 = Math.max(ay1, by1);
579
+ const interX2 = Math.min(ax2, bx2);
580
+ const interY2 = Math.min(ay2, by2);
581
+ const interW = Math.max(0, interX2 - interX1);
582
+ const interH = Math.max(0, interY2 - interY1);
583
+ const interArea = interW * interH;
584
+ if (interArea === 0) return 0;
585
+ const areaA = a.w * a.h;
586
+ const areaB = b.w * b.h;
587
+ const unionArea = areaA + areaB - interArea;
588
+ return unionArea === 0 ? 0 : interArea / unionArea;
589
+ }
590
+ function nms(boxes, iouThreshold) {
591
+ const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
592
+ const kept = [];
593
+ const suppressed = /* @__PURE__ */ new Set();
594
+ for (const idx of indices) {
595
+ if (suppressed.has(idx)) continue;
596
+ kept.push(idx);
597
+ for (const other of indices) {
598
+ if (other === idx || suppressed.has(other)) continue;
599
+ if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
600
+ suppressed.add(other);
977
601
  }
978
- async initialize() {
979
- const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
980
- this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
981
- stdio: ["pipe", "pipe", "pipe"]
982
- });
983
- if (!this.process.stdout || !this.process.stdin) {
984
- throw new Error("PythonInferenceEngine: failed to create process pipes");
985
- }
986
- this.process.stderr?.on("data", (chunk) => {
987
- process.stderr.write(`[python-engine] ${chunk.toString()}`);
988
- });
989
- this.process.on("error", (err) => {
990
- this.pendingReject?.(err);
991
- this.pendingReject = null;
992
- this.pendingResolve = null;
993
- });
994
- this.process.on("exit", (code) => {
995
- if (code !== 0) {
996
- const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
997
- this.pendingReject?.(err);
998
- this.pendingReject = null;
999
- this.pendingResolve = null;
1000
- }
1001
- });
1002
- this.process.stdout.on("data", (chunk) => {
1003
- this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
1004
- this._tryReceive();
1005
- });
1006
- await new Promise((resolve, reject) => {
1007
- const timeout = setTimeout(() => resolve(), 2e3);
1008
- this.process?.on("error", (err) => {
1009
- clearTimeout(timeout);
1010
- reject(err);
1011
- });
1012
- this.process?.on("exit", (code) => {
1013
- clearTimeout(timeout);
1014
- if (code !== 0) {
1015
- reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
1016
- }
1017
- });
1018
- });
1019
- }
1020
- _tryReceive() {
1021
- if (this.receiveBuffer.length < 4)
1022
- return;
1023
- const length = this.receiveBuffer.readUInt32LE(0);
1024
- if (this.receiveBuffer.length < 4 + length)
1025
- return;
1026
- const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
1027
- this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
1028
- const resolve = this.pendingResolve;
1029
- const reject = this.pendingReject;
1030
- this.pendingResolve = null;
1031
- this.pendingReject = null;
1032
- if (!resolve)
1033
- return;
1034
- try {
1035
- const parsed = JSON.parse(jsonBytes.toString("utf8"));
1036
- resolve(parsed);
1037
- } catch (err) {
1038
- reject?.(err instanceof Error ? err : new Error(String(err)));
1039
- }
1040
- }
1041
- /** Send JPEG buffer, receive JSON detection results */
1042
- async runJpeg(jpeg) {
1043
- if (!this.process?.stdin) {
1044
- throw new Error("PythonInferenceEngine: process not initialized");
1045
- }
1046
- return new Promise((resolve, reject) => {
1047
- this.pendingResolve = resolve;
1048
- this.pendingReject = reject;
1049
- const lengthBuf = Buffer.allocUnsafe(4);
1050
- lengthBuf.writeUInt32LE(jpeg.length, 0);
1051
- this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
602
+ }
603
+ }
604
+ return kept;
605
+ }
606
+
607
+ // src/shared/postprocess/scrfd.ts
608
+ var STRIDES = [8, 16, 32];
609
+ var NUM_ANCHORS_PER_STRIDE = 2;
610
+ function generateAnchors(stride, inputSize) {
611
+ const featureSize = Math.ceil(inputSize / stride);
612
+ const anchors = [];
613
+ for (let y = 0; y < featureSize; y++) {
614
+ for (let x = 0; x < featureSize; x++) {
615
+ for (let k = 0; k < NUM_ANCHORS_PER_STRIDE; k++) {
616
+ anchors.push({
617
+ cx: (x + 0.5) * stride,
618
+ cy: (y + 0.5) * stride
1052
619
  });
1053
620
  }
1054
- /** IInferenceEngine.run — wraps runJpeg for compatibility */
1055
- async run(_input, _inputShape) {
1056
- throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
1057
- }
1058
- /** IInferenceEngine.runMultiOutput not supported by Python engine (operates on JPEG input) */
1059
- async runMultiOutput(_input, _inputShape) {
1060
- throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
1061
- }
1062
- async dispose() {
1063
- if (this.process) {
1064
- this.process.stdin?.end();
1065
- this.process.kill("SIGTERM");
1066
- this.process = null;
621
+ }
622
+ }
623
+ return anchors;
624
+ }
625
+ function scrfdPostprocess(outputs, confidence, inputSize, originalWidth, originalHeight) {
626
+ const scaleX = originalWidth / inputSize;
627
+ const scaleY = originalHeight / inputSize;
628
+ const candidates = [];
629
+ for (const stride of STRIDES) {
630
+ const scoreKey = Object.keys(outputs).find((k) => k.includes(`score_${stride}`) || k.includes(`_${stride}_score`));
631
+ const bboxKey = Object.keys(outputs).find((k) => k.includes(`bbox_${stride}`) || k.includes(`_${stride}_bbox`));
632
+ const kpsKey = Object.keys(outputs).find((k) => k.includes(`kps_${stride}`) || k.includes(`_${stride}_kps`));
633
+ if (!scoreKey || !bboxKey) continue;
634
+ const scores = outputs[scoreKey];
635
+ const bboxes = outputs[bboxKey];
636
+ const kps = kpsKey ? outputs[kpsKey] : void 0;
637
+ const anchors = generateAnchors(stride, inputSize);
638
+ const n = anchors.length;
639
+ for (let i = 0; i < n; i++) {
640
+ const score = scores[i];
641
+ if (score < confidence) continue;
642
+ const anchor = anchors[i];
643
+ const x1 = anchor.cx - bboxes[i * 4] * stride;
644
+ const y1 = anchor.cy - bboxes[i * 4 + 1] * stride;
645
+ const x2 = anchor.cx + bboxes[i * 4 + 2] * stride;
646
+ const y2 = anchor.cy + bboxes[i * 4 + 3] * stride;
647
+ const bbox = {
648
+ x: x1 * scaleX,
649
+ y: y1 * scaleY,
650
+ w: (x2 - x1) * scaleX,
651
+ h: (y2 - y1) * scaleY
652
+ };
653
+ let landmarks;
654
+ if (kps) {
655
+ const pts = [];
656
+ for (let p = 0; p < 5; p++) {
657
+ pts.push({
658
+ x: (anchor.cx + kps[i * 10 + p * 2] * stride) * scaleX,
659
+ y: (anchor.cy + kps[i * 10 + p * 2 + 1] * stride) * scaleY
660
+ });
1067
661
  }
662
+ landmarks = pts;
1068
663
  }
664
+ candidates.push({ bbox, score, landmarks });
665
+ }
666
+ }
667
+ if (candidates.length === 0) return [];
668
+ const keptIndices = nms(candidates, 0.45);
669
+ return keptIndices.map((idx) => {
670
+ const { bbox, score, landmarks } = candidates[idx];
671
+ return {
672
+ class: "face",
673
+ originalClass: "face",
674
+ score,
675
+ bbox,
676
+ ...landmarks ? { landmarks } : {}
677
+ };
678
+ });
679
+ }
680
+
681
+ // src/shared/engine-resolver.ts
682
+ var fs = __toESM(require("fs"));
683
+ var path2 = __toESM(require("path"));
684
+
685
+ // src/shared/node-engine.ts
686
+ var path = __toESM(require("path"));
687
+ var BACKEND_TO_PROVIDER = {
688
+ cpu: "cpu",
689
+ coreml: "coreml",
690
+ cuda: "cuda",
691
+ tensorrt: "tensorrt",
692
+ dml: "dml"
693
+ };
694
+ var BACKEND_TO_DEVICE = {
695
+ cpu: "cpu",
696
+ coreml: "gpu-mps",
697
+ cuda: "gpu-cuda",
698
+ tensorrt: "tensorrt"
699
+ };
700
+ var NodeInferenceEngine = class {
701
+ constructor(modelPath, backend) {
702
+ this.modelPath = modelPath;
703
+ this.backend = backend;
704
+ this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
705
+ }
706
+ runtime = "onnx";
707
+ device;
708
+ session = null;
709
+ async initialize() {
710
+ const ort = await import("onnxruntime-node");
711
+ const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
712
+ const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
713
+ const sessionOptions = {
714
+ executionProviders: [provider]
1069
715
  };
1070
- exports2.PythonInferenceEngine = PythonInferenceEngine;
1071
- async function resolvePythonBinary(configPath, deps) {
1072
- if (configPath)
1073
- return configPath;
1074
- return deps.ensurePython();
716
+ this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
717
+ }
718
+ async run(input, inputShape) {
719
+ if (!this.session) {
720
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
1075
721
  }
722
+ const ort = await import("onnxruntime-node");
723
+ const sess = this.session;
724
+ const inputName = sess.inputNames[0];
725
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
726
+ const feeds = { [inputName]: tensor };
727
+ const results = await sess.run(feeds);
728
+ const outputName = sess.outputNames[0];
729
+ const outputTensor = results[outputName];
730
+ return outputTensor.data;
1076
731
  }
1077
- });
732
+ async runMultiOutput(input, inputShape) {
733
+ if (!this.session) {
734
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
735
+ }
736
+ const ort = await import("onnxruntime-node");
737
+ const sess = this.session;
738
+ const inputName = sess.inputNames[0];
739
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
740
+ const feeds = { [inputName]: tensor };
741
+ const results = await sess.run(feeds);
742
+ const out = {};
743
+ for (const name of sess.outputNames) {
744
+ out[name] = results[name].data;
745
+ }
746
+ return out;
747
+ }
748
+ async dispose() {
749
+ this.session = null;
750
+ }
751
+ };
1078
752
 
1079
- // src/shared/engine-resolver.js
1080
- var require_engine_resolver = __commonJS({
1081
- "src/shared/engine-resolver.js"(exports2) {
1082
- "use strict";
1083
- var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
1084
- if (k2 === void 0) k2 = k;
1085
- var desc = Object.getOwnPropertyDescriptor(m, k);
1086
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
1087
- desc = { enumerable: true, get: function() {
1088
- return m[k];
1089
- } };
753
+ // src/shared/python-engine.ts
754
+ var import_node_child_process = require("child_process");
755
+ var PythonInferenceEngine = class {
756
+ constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
757
+ this.pythonPath = pythonPath;
758
+ this.scriptPath = scriptPath;
759
+ this.modelPath = modelPath;
760
+ this.extraArgs = extraArgs;
761
+ this.runtime = runtime;
762
+ const runtimeDeviceMap = {
763
+ onnx: "cpu",
764
+ coreml: "gpu-mps",
765
+ pytorch: "cpu",
766
+ openvino: "cpu",
767
+ tflite: "cpu"
768
+ };
769
+ this.device = runtimeDeviceMap[runtime];
770
+ }
771
+ runtime;
772
+ device;
773
+ process = null;
774
+ receiveBuffer = Buffer.alloc(0);
775
+ pendingResolve = null;
776
+ pendingReject = null;
777
+ async initialize() {
778
+ const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
779
+ this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
780
+ stdio: ["pipe", "pipe", "pipe"]
781
+ });
782
+ if (!this.process.stdout || !this.process.stdin) {
783
+ throw new Error("PythonInferenceEngine: failed to create process pipes");
784
+ }
785
+ this.process.stderr?.on("data", (chunk) => {
786
+ process.stderr.write(`[python-engine] ${chunk.toString()}`);
787
+ });
788
+ this.process.on("error", (err) => {
789
+ this.pendingReject?.(err);
790
+ this.pendingReject = null;
791
+ this.pendingResolve = null;
792
+ });
793
+ this.process.on("exit", (code) => {
794
+ if (code !== 0) {
795
+ const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
796
+ this.pendingReject?.(err);
797
+ this.pendingReject = null;
798
+ this.pendingResolve = null;
1090
799
  }
1091
- Object.defineProperty(o, k2, desc);
1092
- }) : (function(o, m, k, k2) {
1093
- if (k2 === void 0) k2 = k;
1094
- o[k2] = m[k];
1095
- }));
1096
- var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
1097
- Object.defineProperty(o, "default", { enumerable: true, value: v });
1098
- }) : function(o, v) {
1099
- o["default"] = v;
1100
800
  });
1101
- var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
1102
- var ownKeys = function(o) {
1103
- ownKeys = Object.getOwnPropertyNames || function(o2) {
1104
- var ar = [];
1105
- for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
1106
- return ar;
1107
- };
1108
- return ownKeys(o);
1109
- };
1110
- return function(mod) {
1111
- if (mod && mod.__esModule) return mod;
1112
- var result = {};
1113
- if (mod != null) {
1114
- for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
801
+ this.process.stdout.on("data", (chunk) => {
802
+ this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
803
+ this._tryReceive();
804
+ });
805
+ await new Promise((resolve2, reject) => {
806
+ const timeout = setTimeout(() => resolve2(), 2e3);
807
+ this.process?.on("error", (err) => {
808
+ clearTimeout(timeout);
809
+ reject(err);
810
+ });
811
+ this.process?.on("exit", (code) => {
812
+ clearTimeout(timeout);
813
+ if (code !== 0) {
814
+ reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
1115
815
  }
1116
- __setModuleDefault(result, mod);
1117
- return result;
1118
- };
1119
- })();
1120
- Object.defineProperty(exports2, "__esModule", { value: true });
1121
- exports2.resolveEngine = resolveEngine2;
1122
- exports2.probeOnnxBackends = probeOnnxBackends;
1123
- var fs = __importStar(require("fs"));
1124
- var path = __importStar(require("path"));
1125
- var node_engine_js_1 = require_node_engine();
1126
- var python_engine_js_1 = require_python_engine();
1127
- var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
1128
- var BACKEND_TO_FORMAT = {
1129
- cpu: "onnx",
1130
- coreml: "onnx",
1131
- cuda: "onnx",
1132
- tensorrt: "onnx"
1133
- };
1134
- var RUNTIME_TO_FORMAT = {
1135
- onnx: "onnx",
1136
- coreml: "coreml",
1137
- openvino: "openvino",
1138
- tflite: "tflite",
1139
- pytorch: "pt"
1140
- };
1141
- function modelFilePath(modelsDir, modelEntry, format) {
1142
- const formatEntry = modelEntry.formats[format];
1143
- if (!formatEntry) {
1144
- throw new Error(`Model ${modelEntry.id} has no ${format} format`);
1145
- }
1146
- const urlParts = formatEntry.url.split("/");
1147
- const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
1148
- return path.join(modelsDir, filename);
816
+ });
817
+ });
818
+ }
819
+ _tryReceive() {
820
+ if (this.receiveBuffer.length < 4) return;
821
+ const length = this.receiveBuffer.readUInt32LE(0);
822
+ if (this.receiveBuffer.length < 4 + length) return;
823
+ const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
824
+ this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
825
+ const resolve2 = this.pendingResolve;
826
+ const reject = this.pendingReject;
827
+ this.pendingResolve = null;
828
+ this.pendingReject = null;
829
+ if (!resolve2) return;
830
+ try {
831
+ const parsed = JSON.parse(jsonBytes.toString("utf8"));
832
+ resolve2(parsed);
833
+ } catch (err) {
834
+ reject?.(err instanceof Error ? err : new Error(String(err)));
1149
835
  }
1150
- function modelExists(filePath) {
1151
- try {
1152
- return fs.existsSync(filePath);
1153
- } catch {
1154
- return false;
1155
- }
836
+ }
837
+ /** Send JPEG buffer, receive JSON detection results */
838
+ async runJpeg(jpeg) {
839
+ if (!this.process?.stdin) {
840
+ throw new Error("PythonInferenceEngine: process not initialized");
1156
841
  }
1157
- async function resolveEngine2(options) {
1158
- const { runtime, backend, modelEntry, modelsDir, models } = options;
1159
- let selectedFormat;
1160
- let selectedBackend;
1161
- if (runtime === "auto") {
1162
- const available = await probeOnnxBackends();
1163
- let chosen = null;
1164
- for (const b of AUTO_BACKEND_PRIORITY) {
1165
- if (!available.includes(b))
1166
- continue;
1167
- const fmt = BACKEND_TO_FORMAT[b];
1168
- if (!fmt)
1169
- continue;
1170
- if (!modelEntry.formats[fmt])
1171
- continue;
1172
- chosen = { backend: b, format: fmt };
1173
- break;
1174
- }
1175
- if (!chosen) {
1176
- throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
1177
- }
1178
- selectedFormat = chosen.format;
1179
- selectedBackend = chosen.backend;
1180
- } else {
1181
- const fmt = RUNTIME_TO_FORMAT[runtime];
1182
- if (!fmt) {
1183
- throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
1184
- }
1185
- if (!modelEntry.formats[fmt]) {
1186
- throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
1187
- }
1188
- selectedFormat = fmt;
1189
- selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
1190
- }
1191
- let modelPath;
1192
- if (models) {
1193
- modelPath = await models.ensure(modelEntry.id, selectedFormat);
1194
- } else {
1195
- modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
1196
- if (!modelExists(modelPath)) {
1197
- throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
1198
- }
1199
- }
1200
- if (selectedFormat === "onnx") {
1201
- const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
1202
- await engine.initialize();
1203
- return { engine, format: selectedFormat, modelPath };
1204
- }
1205
- const { pythonPath } = options;
1206
- const PYTHON_SCRIPT_MAP = {
1207
- coreml: "coreml_inference.py",
1208
- pytorch: "pytorch_inference.py",
1209
- openvino: "openvino_inference.py"
1210
- };
1211
- const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
1212
- const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
1213
- if (scriptName && pythonPath) {
1214
- const candidates = [
1215
- path.join(__dirname, "../../python", scriptName),
1216
- path.join(__dirname, "../python", scriptName),
1217
- path.join(__dirname, "../../../python", scriptName)
1218
- ];
1219
- const scriptPath = candidates.find((p) => fs.existsSync(p));
1220
- if (!scriptPath) {
1221
- throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
1222
- ${candidates.join("\n")}`);
1223
- }
1224
- const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
1225
- const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
1226
- `--input-size=${inputSize}`,
1227
- `--confidence=0.25`
1228
- ]);
1229
- await engine.initialize();
1230
- return { engine, format: selectedFormat, modelPath };
1231
- }
1232
- const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
1233
- if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
1234
- const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
1235
- await engine.initialize();
1236
- return { engine, format: "onnx", modelPath: fallbackPath };
1237
- }
1238
- throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
842
+ return new Promise((resolve2, reject) => {
843
+ this.pendingResolve = resolve2;
844
+ this.pendingReject = reject;
845
+ const lengthBuf = Buffer.allocUnsafe(4);
846
+ lengthBuf.writeUInt32LE(jpeg.length, 0);
847
+ this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
848
+ });
849
+ }
850
+ /** IInferenceEngine.run — wraps runJpeg for compatibility */
851
+ async run(_input, _inputShape) {
852
+ throw new Error(
853
+ "PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
854
+ );
855
+ }
856
+ /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
857
+ async runMultiOutput(_input, _inputShape) {
858
+ throw new Error(
859
+ "PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
860
+ );
861
+ }
862
+ async dispose() {
863
+ if (this.process) {
864
+ this.process.stdin?.end();
865
+ this.process.kill("SIGTERM");
866
+ this.process = null;
1239
867
  }
1240
- async function probeOnnxBackends() {
1241
- const available = ["cpu"];
1242
- try {
1243
- const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
1244
- const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
1245
- for (const p of providers) {
1246
- const normalized = p.toLowerCase().replace("executionprovider", "");
1247
- if (normalized === "coreml")
1248
- available.push("coreml");
1249
- else if (normalized === "cuda")
1250
- available.push("cuda");
1251
- else if (normalized === "tensorrt")
1252
- available.push("tensorrt");
1253
- }
1254
- } catch {
1255
- }
1256
- if (process.platform === "darwin" && !available.includes("coreml")) {
1257
- available.push("coreml");
1258
- }
1259
- return [...new Set(available)];
868
+ }
869
+ };
870
+
871
+ // src/shared/engine-resolver.ts
872
+ var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
873
+ var BACKEND_TO_FORMAT = {
874
+ cpu: "onnx",
875
+ coreml: "onnx",
876
+ cuda: "onnx",
877
+ tensorrt: "onnx"
878
+ };
879
+ var RUNTIME_TO_FORMAT = {
880
+ onnx: "onnx",
881
+ coreml: "coreml",
882
+ openvino: "openvino",
883
+ tflite: "tflite",
884
+ pytorch: "pt"
885
+ };
886
+ function modelFilePath(modelsDir, modelEntry, format) {
887
+ const formatEntry = modelEntry.formats[format];
888
+ if (!formatEntry) {
889
+ throw new Error(`Model ${modelEntry.id} has no ${format} format`);
890
+ }
891
+ const urlParts = formatEntry.url.split("/");
892
+ const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
893
+ return path2.join(modelsDir, filename);
894
+ }
895
+ function modelExists(filePath) {
896
+ try {
897
+ return fs.existsSync(filePath);
898
+ } catch {
899
+ return false;
900
+ }
901
+ }
902
+ async function resolveEngine(options) {
903
+ const { runtime, backend, modelEntry, modelsDir, models } = options;
904
+ let selectedFormat;
905
+ let selectedBackend;
906
+ if (runtime === "auto") {
907
+ const available = await probeOnnxBackends();
908
+ let chosen = null;
909
+ for (const b of AUTO_BACKEND_PRIORITY) {
910
+ if (!available.includes(b)) continue;
911
+ const fmt = BACKEND_TO_FORMAT[b];
912
+ if (!fmt) continue;
913
+ if (!modelEntry.formats[fmt]) continue;
914
+ chosen = { backend: b, format: fmt };
915
+ break;
916
+ }
917
+ if (!chosen) {
918
+ throw new Error(
919
+ `resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
920
+ );
921
+ }
922
+ selectedFormat = chosen.format;
923
+ selectedBackend = chosen.backend;
924
+ } else {
925
+ const fmt = RUNTIME_TO_FORMAT[runtime];
926
+ if (!fmt) {
927
+ throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
928
+ }
929
+ if (!modelEntry.formats[fmt]) {
930
+ throw new Error(
931
+ `resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
932
+ );
1260
933
  }
934
+ selectedFormat = fmt;
935
+ selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
1261
936
  }
1262
- });
937
+ let modelPath;
938
+ if (models) {
939
+ modelPath = await models.ensure(modelEntry.id, selectedFormat);
940
+ } else {
941
+ modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
942
+ if (!modelExists(modelPath)) {
943
+ throw new Error(
944
+ `resolveEngine: model file not found at ${modelPath} and no model service provided`
945
+ );
946
+ }
947
+ }
948
+ if (selectedFormat === "onnx") {
949
+ const engine = new NodeInferenceEngine(modelPath, selectedBackend);
950
+ await engine.initialize();
951
+ return { engine, format: selectedFormat, modelPath };
952
+ }
953
+ const { pythonPath } = options;
954
+ const PYTHON_SCRIPT_MAP = {
955
+ coreml: "coreml_inference.py",
956
+ pytorch: "pytorch_inference.py",
957
+ openvino: "openvino_inference.py"
958
+ };
959
+ const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
960
+ const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
961
+ if (scriptName && pythonPath) {
962
+ const candidates = [
963
+ path2.join(__dirname, "../../python", scriptName),
964
+ path2.join(__dirname, "../python", scriptName),
965
+ path2.join(__dirname, "../../../python", scriptName)
966
+ ];
967
+ const scriptPath = candidates.find((p) => fs.existsSync(p));
968
+ if (!scriptPath) {
969
+ throw new Error(
970
+ `resolveEngine: Python script "${scriptName}" not found. Searched:
971
+ ${candidates.join("\n")}`
972
+ );
973
+ }
974
+ const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
975
+ const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
976
+ `--input-size=${inputSize}`,
977
+ `--confidence=0.25`
978
+ ]);
979
+ await engine.initialize();
980
+ return { engine, format: selectedFormat, modelPath };
981
+ }
982
+ const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
983
+ if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
984
+ const engine = new NodeInferenceEngine(fallbackPath, "cpu");
985
+ await engine.initialize();
986
+ return { engine, format: "onnx", modelPath: fallbackPath };
987
+ }
988
+ throw new Error(
989
+ `resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
990
+ );
991
+ }
992
+ async function probeOnnxBackends() {
993
+ const available = ["cpu"];
994
+ try {
995
+ const ort = await import("onnxruntime-node");
996
+ const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
997
+ for (const p of providers) {
998
+ const normalized = p.toLowerCase().replace("executionprovider", "");
999
+ if (normalized === "coreml") available.push("coreml");
1000
+ else if (normalized === "cuda") available.push("cuda");
1001
+ else if (normalized === "tensorrt") available.push("tensorrt");
1002
+ }
1003
+ } catch {
1004
+ }
1005
+ if (process.platform === "darwin" && !available.includes("coreml")) {
1006
+ available.push("coreml");
1007
+ }
1008
+ return [...new Set(available)];
1009
+ }
1263
1010
 
1264
1011
  // src/addons/face-detection/index.ts
1265
- var face_detection_exports = {};
1266
- __export(face_detection_exports, {
1267
- default: () => FaceDetectionAddon
1268
- });
1269
- module.exports = __toCommonJS(face_detection_exports);
1270
- var import_face_detection_models = __toESM(require_face_detection_models());
1271
- var import_image_utils = __toESM(require_image_utils());
1272
- var import_scrfd = __toESM(require_scrfd());
1273
- var import_engine_resolver = __toESM(require_engine_resolver());
1274
1012
  var FACE_LABEL = { id: "face", name: "Face" };
1275
- var FACE_LABELS = [FACE_LABEL];
1013
+ var FACE_LABELS2 = [FACE_LABEL];
1276
1014
  var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
1277
1015
  var RAM_ESTIMATES = {
1278
1016
  "scrfd-500m": 50,
@@ -1313,7 +1051,7 @@ var FaceDetectionAddon = class {
1313
1051
  resolvedConfig = null;
1314
1052
  ctx = null;
1315
1053
  getModelRequirements() {
1316
- return import_face_detection_models.FACE_DETECTION_MODELS.map((m) => ({
1054
+ return FACE_DETECTION_MODELS.map((m) => ({
1317
1055
  modelId: m.id,
1318
1056
  name: m.name,
1319
1057
  minRAM_MB: RAM_ESTIMATES[m.id] ?? 50,
@@ -1329,7 +1067,7 @@ var FaceDetectionAddon = class {
1329
1067
  const cfg = ctx.addonConfig;
1330
1068
  const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "scrfd-500m";
1331
1069
  this.confidence = cfg["confidence"] ?? 0.5;
1332
- const entry = import_face_detection_models.FACE_DETECTION_MODELS.find((m) => m.id === modelId);
1070
+ const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId);
1333
1071
  if (!entry) {
1334
1072
  throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
1335
1073
  }
@@ -1340,8 +1078,8 @@ var FaceDetectionAddon = class {
1340
1078
  const start = Date.now();
1341
1079
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
1342
1080
  const targetSize = Math.max(inputW, inputH);
1343
- const personCrop = await (0, import_image_utils.cropRegion)(input.frame.data, input.roi);
1344
- const lb = await (0, import_image_utils.letterbox)(personCrop, targetSize);
1081
+ const personCrop = await cropRegion(input.frame.data, input.roi);
1082
+ const lb = await letterbox(personCrop, targetSize);
1345
1083
  const engineWithMulti = this.engine;
1346
1084
  let outputs;
1347
1085
  if (typeof engineWithMulti.runMultiOutput === "function") {
@@ -1350,7 +1088,7 @@ var FaceDetectionAddon = class {
1350
1088
  const single = await this.engine.run(lb.data, [1, 3, targetSize, targetSize]);
1351
1089
  outputs = { output0: single };
1352
1090
  }
1353
- const crops = (0, import_scrfd.scrfdPostprocess)(
1091
+ const crops = scrfdPostprocess(
1354
1092
  outputs,
1355
1093
  this.confidence,
1356
1094
  targetSize,
@@ -1369,13 +1107,13 @@ var FaceDetectionAddon = class {
1369
1107
  const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
1370
1108
  const backend = config?.backend ?? "cpu";
1371
1109
  const format = config?.format ?? "onnx";
1372
- const entry = import_face_detection_models.FACE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
1110
+ const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
1373
1111
  this.modelEntry = entry;
1374
1112
  const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
1375
1113
  if (this.ctx.models) {
1376
1114
  await this.ctx.models.ensure(modelId, format);
1377
1115
  }
1378
- const resolved = await (0, import_engine_resolver.resolveEngine)({
1116
+ const resolved = await resolveEngine({
1379
1117
  runtime,
1380
1118
  backend,
1381
1119
  modelEntry: entry,
@@ -1399,7 +1137,7 @@ var FaceDetectionAddon = class {
1399
1137
  key: "modelId",
1400
1138
  label: "Model",
1401
1139
  type: "model-selector",
1402
- catalog: [...import_face_detection_models.FACE_DETECTION_MODELS],
1140
+ catalog: [...FACE_DETECTION_MODELS],
1403
1141
  allowCustom: false,
1404
1142
  allowConversion: false,
1405
1143
  acceptFormats: ["onnx", "coreml", "openvino"],
@@ -1461,13 +1199,13 @@ var FaceDetectionAddon = class {
1461
1199
  return FACE_CLASS_MAP;
1462
1200
  }
1463
1201
  getModelCatalog() {
1464
- return [...import_face_detection_models.FACE_DETECTION_MODELS];
1202
+ return [...FACE_DETECTION_MODELS];
1465
1203
  }
1466
1204
  getAvailableModels() {
1467
1205
  return [];
1468
1206
  }
1469
1207
  getActiveLabels() {
1470
- return FACE_LABELS;
1208
+ return FACE_LABELS2;
1471
1209
  }
1472
1210
  async probe() {
1473
1211
  return {