@camstack/addon-vision 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dist/addons/animal-classifier/index.d.mts +6 -1
  2. package/dist/addons/animal-classifier/index.d.ts +6 -1
  3. package/dist/addons/animal-classifier/index.js +514 -49
  4. package/dist/addons/animal-classifier/index.js.map +1 -1
  5. package/dist/addons/animal-classifier/index.mjs +6 -4
  6. package/dist/addons/audio-classification/index.d.mts +6 -1
  7. package/dist/addons/audio-classification/index.d.ts +6 -1
  8. package/dist/addons/audio-classification/index.js +87 -26
  9. package/dist/addons/audio-classification/index.js.map +1 -1
  10. package/dist/addons/audio-classification/index.mjs +3 -2
  11. package/dist/addons/bird-global-classifier/index.d.mts +6 -1
  12. package/dist/addons/bird-global-classifier/index.d.ts +6 -1
  13. package/dist/addons/bird-global-classifier/index.js +515 -50
  14. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  15. package/dist/addons/bird-global-classifier/index.mjs +6 -4
  16. package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
  17. package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
  18. package/dist/addons/bird-nabirds-classifier/index.js +524 -60
  19. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  20. package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
  21. package/dist/addons/face-detection/index.d.mts +6 -1
  22. package/dist/addons/face-detection/index.d.ts +6 -1
  23. package/dist/addons/face-detection/index.js +539 -39
  24. package/dist/addons/face-detection/index.js.map +1 -1
  25. package/dist/addons/face-detection/index.mjs +5 -3
  26. package/dist/addons/face-recognition/index.d.mts +6 -1
  27. package/dist/addons/face-recognition/index.d.ts +6 -1
  28. package/dist/addons/face-recognition/index.js +488 -33
  29. package/dist/addons/face-recognition/index.js.map +1 -1
  30. package/dist/addons/face-recognition/index.mjs +5 -3
  31. package/dist/addons/motion-detection/index.d.mts +3 -1
  32. package/dist/addons/motion-detection/index.d.ts +3 -1
  33. package/dist/addons/motion-detection/index.js +11 -3
  34. package/dist/addons/motion-detection/index.js.map +1 -1
  35. package/dist/addons/motion-detection/index.mjs +140 -3
  36. package/dist/addons/motion-detection/index.mjs.map +1 -1
  37. package/dist/addons/object-detection/index.d.mts +6 -1
  38. package/dist/addons/object-detection/index.d.ts +6 -1
  39. package/dist/addons/object-detection/index.js +370 -72
  40. package/dist/addons/object-detection/index.js.map +1 -1
  41. package/dist/addons/object-detection/index.mjs +5 -3
  42. package/dist/addons/plate-detection/index.d.mts +6 -1
  43. package/dist/addons/plate-detection/index.d.ts +6 -1
  44. package/dist/addons/plate-detection/index.js +532 -31
  45. package/dist/addons/plate-detection/index.js.map +1 -1
  46. package/dist/addons/plate-detection/index.mjs +5 -3
  47. package/dist/addons/plate-recognition/index.d.mts +7 -1
  48. package/dist/addons/plate-recognition/index.d.ts +7 -1
  49. package/dist/addons/plate-recognition/index.js +177 -44
  50. package/dist/addons/plate-recognition/index.js.map +1 -1
  51. package/dist/addons/plate-recognition/index.mjs +4 -3
  52. package/dist/addons/segmentation-refiner/index.d.mts +30 -0
  53. package/dist/addons/segmentation-refiner/index.d.ts +30 -0
  54. package/dist/addons/segmentation-refiner/index.js +1049 -0
  55. package/dist/addons/segmentation-refiner/index.js.map +1 -0
  56. package/dist/addons/segmentation-refiner/index.mjs +209 -0
  57. package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
  58. package/dist/addons/vehicle-classifier/index.d.mts +31 -0
  59. package/dist/addons/vehicle-classifier/index.d.ts +31 -0
  60. package/dist/addons/vehicle-classifier/index.js +689 -0
  61. package/dist/addons/vehicle-classifier/index.js.map +1 -0
  62. package/dist/addons/vehicle-classifier/index.mjs +250 -0
  63. package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
  64. package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
  65. package/dist/chunk-22BHCDT5.mjs.map +1 -0
  66. package/dist/{chunk-LPI42WL6.mjs → chunk-6DJZZR64.mjs} +24 -12
  67. package/dist/chunk-6DJZZR64.mjs.map +1 -0
  68. package/dist/chunk-7DYHXUPZ.mjs +36 -0
  69. package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
  70. package/dist/chunk-BJTO5JO5.mjs +11 -0
  71. package/dist/chunk-BP7H4NFS.mjs +412 -0
  72. package/dist/chunk-BP7H4NFS.mjs.map +1 -0
  73. package/dist/chunk-BR2FPGOX.mjs +98 -0
  74. package/dist/chunk-BR2FPGOX.mjs.map +1 -0
  75. package/dist/{chunk-B3R66MPF.mjs → chunk-DNQNGDR4.mjs} +58 -21
  76. package/dist/chunk-DNQNGDR4.mjs.map +1 -0
  77. package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
  78. package/dist/chunk-DUN6XU3N.mjs.map +1 -0
  79. package/dist/{chunk-MEVASN3P.mjs → chunk-EPNWLSCG.mjs} +104 -22
  80. package/dist/chunk-EPNWLSCG.mjs.map +1 -0
  81. package/dist/{chunk-AYBFB7ID.mjs → chunk-G32RCIUI.mjs} +200 -318
  82. package/dist/chunk-G32RCIUI.mjs.map +1 -0
  83. package/dist/{chunk-3MQFUDRU.mjs → chunk-GR65KM6X.mjs} +76 -47
  84. package/dist/chunk-GR65KM6X.mjs.map +1 -0
  85. package/dist/{chunk-5AIQSN32.mjs → chunk-H7LMBTS5.mjs} +66 -17
  86. package/dist/chunk-H7LMBTS5.mjs.map +1 -0
  87. package/dist/{chunk-J4WRYHHY.mjs → chunk-IK4XIQPC.mjs} +66 -36
  88. package/dist/chunk-IK4XIQPC.mjs.map +1 -0
  89. package/dist/{chunk-5JJZGKL7.mjs → chunk-J6VNIIYX.mjs} +102 -19
  90. package/dist/chunk-J6VNIIYX.mjs.map +1 -0
  91. package/dist/{chunk-Q3SQOYG6.mjs → chunk-ML2JX43J.mjs} +67 -37
  92. package/dist/chunk-ML2JX43J.mjs.map +1 -0
  93. package/dist/{chunk-PDSHDDPV.mjs → chunk-WUMV524J.mjs} +159 -35
  94. package/dist/chunk-WUMV524J.mjs.map +1 -0
  95. package/dist/chunk-XZ6ZMXXU.mjs +39 -0
  96. package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
  97. package/dist/index.d.mts +17 -5
  98. package/dist/index.d.ts +17 -5
  99. package/dist/index.js +1344 -550
  100. package/dist/index.js.map +1 -1
  101. package/dist/index.mjs +191 -20
  102. package/dist/index.mjs.map +1 -1
  103. package/package.json +95 -18
  104. package/python/coreml_inference.py +61 -18
  105. package/python/openvino_inference.py +12 -4
  106. package/python/pytorch_inference.py +12 -4
  107. package/dist/addons/camera-native-detection/index.d.mts +0 -32
  108. package/dist/addons/camera-native-detection/index.d.ts +0 -32
  109. package/dist/addons/camera-native-detection/index.js +0 -99
  110. package/dist/addons/camera-native-detection/index.js.map +0 -1
  111. package/dist/addons/camera-native-detection/index.mjs +0 -7
  112. package/dist/chunk-3MQFUDRU.mjs.map +0 -1
  113. package/dist/chunk-5AIQSN32.mjs.map +0 -1
  114. package/dist/chunk-5JJZGKL7.mjs.map +0 -1
  115. package/dist/chunk-6OR5TE7A.mjs.map +0 -1
  116. package/dist/chunk-AYBFB7ID.mjs.map +0 -1
  117. package/dist/chunk-B3R66MPF.mjs.map +0 -1
  118. package/dist/chunk-DTOAB2CE.mjs +0 -79
  119. package/dist/chunk-DTOAB2CE.mjs.map +0 -1
  120. package/dist/chunk-ISOIDU4U.mjs.map +0 -1
  121. package/dist/chunk-J4WRYHHY.mjs.map +0 -1
  122. package/dist/chunk-LPI42WL6.mjs.map +0 -1
  123. package/dist/chunk-MEVASN3P.mjs.map +0 -1
  124. package/dist/chunk-PDSHDDPV.mjs.map +0 -1
  125. package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
  126. package/dist/chunk-QIMDG34B.mjs +0 -229
  127. package/dist/chunk-QIMDG34B.mjs.map +0 -1
  128. package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
  129. package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
  130. package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
  131. /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
@@ -0,0 +1,689 @@
1
+ "use strict";
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+
30
+ // src/addons/vehicle-classifier/index.ts
31
+ var vehicle_classifier_exports = {};
32
+ __export(vehicle_classifier_exports, {
33
+ default: () => VehicleClassifierAddon
34
+ });
35
+ module.exports = __toCommonJS(vehicle_classifier_exports);
36
+
37
+ // src/catalogs/vehicle-classification-models.ts
38
+ var import_types = require("@camstack/types");
39
+ var HF_REPO = "camstack/camstack-models";
40
+ var hf = (path4) => (0, import_types.hfModelUrl)(HF_REPO, path4);
41
+ var VEHICLE_LABELS = [
42
+ { id: "vehicle-type", name: "Vehicle Type" }
43
+ ];
44
+ var VEHICLE_TYPE_MODELS = [
45
+ {
46
+ id: "vehicle-type-efficientnet",
47
+ name: "Vehicle Type (EfficientNet)",
48
+ description: "EfficientNet-B4 vehicle make/model/year classifier \u2014 8,949 classes from VMMRdb",
49
+ inputSize: { width: 380, height: 380 },
50
+ inputNormalization: "imagenet",
51
+ labels: VEHICLE_LABELS,
52
+ formats: {
53
+ onnx: { url: hf("vehicleClassification/efficientnet/onnx/camstack-vehicle-type-efficientnet.onnx"), sizeMB: 135 },
54
+ coreml: {
55
+ url: hf("vehicleClassification/efficientnet/coreml/camstack-vehicle-type-efficientnet.mlpackage"),
56
+ sizeMB: 10,
57
+ isDirectory: true,
58
+ files: ["Manifest.json", "Data/com.apple.CoreML/model.mlmodel", "Data/com.apple.CoreML/weights/weight.bin"],
59
+ runtimes: ["python"]
60
+ }
61
+ },
62
+ extraFiles: [
63
+ {
64
+ url: hf("vehicleClassification/efficientnet/camstack-vehicle-type-labels.json"),
65
+ filename: "camstack-vehicle-type-labels.json",
66
+ sizeMB: 0.2
67
+ }
68
+ ]
69
+ }
70
+ ];
71
+
72
+ // src/shared/image-utils.ts
73
+ var import_sharp = __toESM(require("sharp"));
74
+ async function cropRegion(jpeg, roi) {
75
+ return (0, import_sharp.default)(jpeg).extract({
76
+ left: Math.round(roi.x),
77
+ top: Math.round(roi.y),
78
+ width: Math.round(roi.w),
79
+ height: Math.round(roi.h)
80
+ }).jpeg().toBuffer();
81
+ }
82
+ async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
83
+ const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
84
+ const numPixels = targetWidth * targetHeight;
85
+ const float32 = new Float32Array(3 * numPixels);
86
+ const mean = [0.485, 0.456, 0.406];
87
+ const std = [0.229, 0.224, 0.225];
88
+ if (layout === "nchw") {
89
+ for (let i = 0; i < numPixels; i++) {
90
+ const srcBase = i * 3;
91
+ for (let c = 0; c < 3; c++) {
92
+ const raw = data[srcBase + c] / 255;
93
+ let val;
94
+ if (normalization === "zero-one") {
95
+ val = raw;
96
+ } else if (normalization === "imagenet") {
97
+ val = (raw - mean[c]) / std[c];
98
+ } else {
99
+ val = data[srcBase + c];
100
+ }
101
+ float32[c * numPixels + i] = val;
102
+ }
103
+ }
104
+ } else {
105
+ for (let i = 0; i < numPixels; i++) {
106
+ const srcBase = i * 3;
107
+ for (let c = 0; c < 3; c++) {
108
+ const raw = data[srcBase + c] / 255;
109
+ let val;
110
+ if (normalization === "zero-one") {
111
+ val = raw;
112
+ } else if (normalization === "imagenet") {
113
+ val = (raw - mean[c]) / std[c];
114
+ } else {
115
+ val = data[srcBase + c];
116
+ }
117
+ float32[i * 3 + c] = val;
118
+ }
119
+ }
120
+ }
121
+ return float32;
122
+ }
123
+
124
+ // src/shared/engine-resolver.ts
125
+ var fs = __toESM(require("fs"));
126
+ var path2 = __toESM(require("path"));
127
+
128
+ // src/shared/node-engine.ts
129
+ var path = __toESM(require("path"));
130
+ var BACKEND_TO_PROVIDER = {
131
+ cpu: "cpu",
132
+ coreml: "coreml",
133
+ cuda: "cuda",
134
+ tensorrt: "tensorrt",
135
+ dml: "dml"
136
+ };
137
+ var BACKEND_TO_DEVICE = {
138
+ cpu: "cpu",
139
+ coreml: "gpu-mps",
140
+ cuda: "gpu-cuda",
141
+ tensorrt: "tensorrt"
142
+ };
143
+ var NodeInferenceEngine = class {
144
+ constructor(modelPath, backend) {
145
+ this.modelPath = modelPath;
146
+ this.backend = backend;
147
+ this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
148
+ }
149
+ runtime = "onnx";
150
+ device;
151
+ session = null;
152
+ async initialize() {
153
+ const ort = await import("onnxruntime-node");
154
+ const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
155
+ const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
156
+ const sessionOptions = {
157
+ executionProviders: [provider]
158
+ };
159
+ this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
160
+ }
161
+ async run(input, inputShape) {
162
+ if (!this.session) {
163
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
164
+ }
165
+ const ort = await import("onnxruntime-node");
166
+ const sess = this.session;
167
+ const inputName = sess.inputNames[0];
168
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
169
+ const feeds = { [inputName]: tensor };
170
+ const results = await sess.run(feeds);
171
+ const outputName = sess.outputNames[0];
172
+ const outputTensor = results[outputName];
173
+ return outputTensor.data;
174
+ }
175
+ async runMultiOutput(input, inputShape) {
176
+ if (!this.session) {
177
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
178
+ }
179
+ const ort = await import("onnxruntime-node");
180
+ const sess = this.session;
181
+ const inputName = sess.inputNames[0];
182
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
183
+ const feeds = { [inputName]: tensor };
184
+ const results = await sess.run(feeds);
185
+ const out = {};
186
+ for (const name of sess.outputNames) {
187
+ out[name] = results[name].data;
188
+ }
189
+ return out;
190
+ }
191
+ async dispose() {
192
+ this.session = null;
193
+ }
194
+ };
195
+
196
+ // src/shared/python-engine.ts
197
+ var import_node_child_process = require("child_process");
198
+ var import_core = require("@camstack/core");
199
+ var PythonInferenceEngine = class {
200
+ constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
201
+ this.pythonPath = pythonPath;
202
+ this.scriptPath = scriptPath;
203
+ this.modelPath = modelPath;
204
+ this.extraArgs = extraArgs;
205
+ this.runtime = runtime;
206
+ const runtimeDeviceMap = {
207
+ onnx: "cpu",
208
+ coreml: "gpu-mps",
209
+ pytorch: "cpu",
210
+ openvino: "cpu",
211
+ tflite: "cpu"
212
+ };
213
+ this.device = runtimeDeviceMap[runtime];
214
+ }
215
+ runtime;
216
+ device;
217
+ process = null;
218
+ receiveBuffer = Buffer.alloc(0);
219
+ pendingResolve = null;
220
+ pendingReject = null;
221
+ async initialize() {
222
+ const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
223
+ this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
224
+ stdio: ["pipe", "pipe", "pipe"]
225
+ });
226
+ if (!this.process.stdout || !this.process.stdin) {
227
+ throw new Error("PythonInferenceEngine: failed to create process pipes");
228
+ }
229
+ this.process.stderr?.on("data", (chunk) => {
230
+ process.stderr.write(`[python-engine] ${chunk.toString()}`);
231
+ });
232
+ this.process.on("error", (err) => {
233
+ this.pendingReject?.(err);
234
+ this.pendingReject = null;
235
+ this.pendingResolve = null;
236
+ });
237
+ this.process.on("exit", (code) => {
238
+ if (code !== 0) {
239
+ const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
240
+ this.pendingReject?.(err);
241
+ this.pendingReject = null;
242
+ this.pendingResolve = null;
243
+ }
244
+ });
245
+ this.process.stdout.on("data", (chunk) => {
246
+ this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
247
+ this._tryReceive();
248
+ });
249
+ await new Promise((resolve2, reject) => {
250
+ const timeout = setTimeout(() => resolve2(), 2e3);
251
+ this.process?.on("error", (err) => {
252
+ clearTimeout(timeout);
253
+ reject(err);
254
+ });
255
+ this.process?.on("exit", (code) => {
256
+ clearTimeout(timeout);
257
+ if (code !== 0) {
258
+ reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
259
+ }
260
+ });
261
+ });
262
+ }
263
+ _tryReceive() {
264
+ if (this.receiveBuffer.length < 4) return;
265
+ const length = this.receiveBuffer.readUInt32LE(0);
266
+ if (this.receiveBuffer.length < 4 + length) return;
267
+ const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
268
+ this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
269
+ const resolve2 = this.pendingResolve;
270
+ const reject = this.pendingReject;
271
+ this.pendingResolve = null;
272
+ this.pendingReject = null;
273
+ if (!resolve2) return;
274
+ try {
275
+ const parsed = JSON.parse(jsonBytes.toString("utf8"));
276
+ resolve2(parsed);
277
+ } catch (err) {
278
+ reject?.(err instanceof Error ? err : new Error(String(err)));
279
+ }
280
+ }
281
+ /** Send JPEG buffer, receive JSON detection results */
282
+ async runJpeg(jpeg) {
283
+ if (!this.process?.stdin) {
284
+ throw new Error("PythonInferenceEngine: process not initialized");
285
+ }
286
+ return new Promise((resolve2, reject) => {
287
+ this.pendingResolve = resolve2;
288
+ this.pendingReject = reject;
289
+ const lengthBuf = Buffer.allocUnsafe(4);
290
+ lengthBuf.writeUInt32LE(jpeg.length, 0);
291
+ this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
292
+ });
293
+ }
294
+ /** IInferenceEngine.run — wraps runJpeg for compatibility */
295
+ async run(_input, _inputShape) {
296
+ throw new Error(
297
+ "PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
298
+ );
299
+ }
300
+ /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
301
+ async runMultiOutput(_input, _inputShape) {
302
+ throw new Error(
303
+ "PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
304
+ );
305
+ }
306
+ async dispose() {
307
+ if (this.process) {
308
+ this.process.stdin?.end();
309
+ this.process.kill("SIGTERM");
310
+ this.process = null;
311
+ }
312
+ }
313
+ };
314
+
315
+ // src/shared/engine-resolver.ts
316
+ var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
317
+ var BACKEND_TO_FORMAT = {
318
+ cpu: "onnx",
319
+ coreml: "onnx",
320
+ cuda: "onnx",
321
+ tensorrt: "onnx"
322
+ };
323
+ var RUNTIME_TO_FORMAT = {
324
+ onnx: "onnx",
325
+ coreml: "coreml",
326
+ openvino: "openvino",
327
+ tflite: "tflite",
328
+ pytorch: "pt"
329
+ };
330
+ function modelFilePath(modelsDir, modelEntry, format) {
331
+ const formatEntry = modelEntry.formats[format];
332
+ if (!formatEntry) {
333
+ throw new Error(`Model ${modelEntry.id} has no ${format} format`);
334
+ }
335
+ const urlParts = formatEntry.url.split("/");
336
+ const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
337
+ return path2.join(modelsDir, filename);
338
+ }
339
+ function modelExists(filePath) {
340
+ try {
341
+ return fs.existsSync(filePath);
342
+ } catch {
343
+ return false;
344
+ }
345
+ }
346
+ async function resolveEngine(options) {
347
+ const { runtime, backend, modelEntry, modelsDir, models } = options;
348
+ let selectedFormat;
349
+ let selectedBackend;
350
+ if (runtime === "auto") {
351
+ const available = await probeOnnxBackends();
352
+ let chosen = null;
353
+ for (const b of AUTO_BACKEND_PRIORITY) {
354
+ if (!available.includes(b)) continue;
355
+ const fmt = BACKEND_TO_FORMAT[b];
356
+ if (!fmt) continue;
357
+ if (!modelEntry.formats[fmt]) continue;
358
+ chosen = { backend: b, format: fmt };
359
+ break;
360
+ }
361
+ if (!chosen) {
362
+ throw new Error(
363
+ `resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
364
+ );
365
+ }
366
+ selectedFormat = chosen.format;
367
+ selectedBackend = chosen.backend;
368
+ } else {
369
+ const fmt = RUNTIME_TO_FORMAT[runtime];
370
+ if (!fmt) {
371
+ throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
372
+ }
373
+ if (!modelEntry.formats[fmt]) {
374
+ throw new Error(
375
+ `resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
376
+ );
377
+ }
378
+ selectedFormat = fmt;
379
+ selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
380
+ }
381
+ let modelPath;
382
+ if (models) {
383
+ modelPath = await models.ensure(modelEntry.id, selectedFormat);
384
+ } else {
385
+ modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
386
+ if (!modelExists(modelPath)) {
387
+ throw new Error(
388
+ `resolveEngine: model file not found at ${modelPath} and no model service provided`
389
+ );
390
+ }
391
+ }
392
+ if (selectedFormat === "onnx") {
393
+ const engine = new NodeInferenceEngine(modelPath, selectedBackend);
394
+ await engine.initialize();
395
+ return { engine, format: selectedFormat, modelPath };
396
+ }
397
+ const { pythonPath } = options;
398
+ const PYTHON_SCRIPT_MAP = {
399
+ coreml: "coreml_inference.py",
400
+ pytorch: "pytorch_inference.py",
401
+ openvino: "openvino_inference.py"
402
+ };
403
+ const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
404
+ const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
405
+ if (scriptName && pythonPath) {
406
+ const candidates = [
407
+ path2.join(__dirname, "../../python", scriptName),
408
+ path2.join(__dirname, "../python", scriptName),
409
+ path2.join(__dirname, "../../../python", scriptName)
410
+ ];
411
+ const scriptPath = candidates.find((p) => fs.existsSync(p));
412
+ if (!scriptPath) {
413
+ throw new Error(
414
+ `resolveEngine: Python script "${scriptName}" not found. Searched:
415
+ ${candidates.join("\n")}`
416
+ );
417
+ }
418
+ const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
419
+ const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
420
+ `--input-size=${inputSize}`,
421
+ `--confidence=0.25`
422
+ ]);
423
+ await engine.initialize();
424
+ return { engine, format: selectedFormat, modelPath };
425
+ }
426
+ const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
427
+ if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
428
+ const engine = new NodeInferenceEngine(fallbackPath, "cpu");
429
+ await engine.initialize();
430
+ return { engine, format: "onnx", modelPath: fallbackPath };
431
+ }
432
+ throw new Error(
433
+ `resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
434
+ );
435
+ }
436
+ async function probeOnnxBackends() {
437
+ const available = ["cpu"];
438
+ try {
439
+ const ort = await import("onnxruntime-node");
440
+ const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
441
+ for (const p of providers) {
442
+ const normalized = p.toLowerCase().replace("executionprovider", "");
443
+ if (normalized === "coreml") available.push("coreml");
444
+ else if (normalized === "cuda") available.push("cuda");
445
+ else if (normalized === "tensorrt") available.push("tensorrt");
446
+ }
447
+ } catch {
448
+ }
449
+ if (process.platform === "darwin" && !available.includes("coreml")) {
450
+ available.push("coreml");
451
+ }
452
+ return [...new Set(available)];
453
+ }
454
+
455
+ // src/addons/vehicle-classifier/index.ts
456
+ var fs2 = __toESM(require("fs"));
457
+ var path3 = __toESM(require("path"));
458
+ var VEHICLE_TYPE_LABEL = { id: "vehicle-type", name: "Vehicle Type" };
459
+ var VEHICLE_TYPE_LABELS = [VEHICLE_TYPE_LABEL];
460
+ var VEHICLE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
461
+ function loadLabels(modelsDir, modelId) {
462
+ const labelNames = [
463
+ `camstack-${modelId}-labels.json`,
464
+ `camstack-vehicle-type-labels.json`
465
+ ];
466
+ for (const name of labelNames) {
467
+ const labelPath = path3.join(modelsDir, name);
468
+ if (fs2.existsSync(labelPath)) {
469
+ const raw = fs2.readFileSync(labelPath, "utf-8");
470
+ return JSON.parse(raw);
471
+ }
472
+ }
473
+ throw new Error(`VehicleClassifierAddon: labels JSON not found in ${modelsDir}`);
474
+ }
475
+ function softmax(logits) {
476
+ const max = logits.reduce((a, b) => Math.max(a, b), -Infinity);
477
+ const exps = logits.map((v) => Math.exp(v - max));
478
+ const sum = exps.reduce((a, b) => a + b, 0);
479
+ return exps.map((v) => v / sum);
480
+ }
481
+ var VehicleClassifierAddon = class {
482
+ id = "vehicle-classifier";
483
+ slot = "classifier";
484
+ inputClasses = ["vehicle"];
485
+ outputClasses = ["vehicle-type:*"];
486
+ slotPriority = 0;
487
+ requiredSteps = [];
488
+ manifest = {
489
+ id: "vehicle-classifier",
490
+ name: "Vehicle Classifier",
491
+ version: "0.1.0",
492
+ description: "EfficientNet-B4 \u2014 8,949 vehicle make/model/year classes from VMMRdb",
493
+ slot: "classifier",
494
+ labelOutputType: "classification",
495
+ inputClasses: ["vehicle"],
496
+ outputClasses: ["vehicle-type:*"],
497
+ supportsCustomModels: false,
498
+ mayRequirePython: false,
499
+ defaultConfig: {
500
+ modelId: "vehicle-type-efficientnet",
501
+ runtime: "node",
502
+ backend: "cpu",
503
+ confidence: 0.05,
504
+ // Low pipeline threshold for 8949-class model
505
+ minConfidence: 0.05
506
+ }
507
+ };
508
+ engine = null;
509
+ modelEntry;
510
+ labels = [];
511
+ minConfidence = 0.05;
512
+ resolvedConfig = null;
513
+ ctx = null;
514
+ getModelRequirements() {
515
+ return VEHICLE_TYPE_MODELS.map((m) => ({
516
+ modelId: m.id,
517
+ name: m.name,
518
+ minRAM_MB: 120,
519
+ accuracyScore: 80,
520
+ formats: Object.keys(m.formats)
521
+ }));
522
+ }
523
+ configure(config) {
524
+ this.resolvedConfig = config;
525
+ }
526
+ async initialize(ctx) {
527
+ this.ctx = ctx;
528
+ const cfg = ctx.addonConfig;
529
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "vehicle-type-efficientnet";
530
+ this.minConfidence = cfg["minConfidence"] ?? 0.05;
531
+ const entry = VEHICLE_TYPE_MODELS.find((m) => m.id === modelId);
532
+ if (!entry) {
533
+ throw new Error(`VehicleClassifierAddon: unknown modelId "${modelId}"`);
534
+ }
535
+ this.modelEntry = entry;
536
+ }
537
+ async classify(input) {
538
+ if (!this.engine) await this.ensureEngine();
539
+ const start = Date.now();
540
+ const { width: inputW, height: inputH } = this.modelEntry.inputSize;
541
+ const vehicleCrop = await cropRegion(input.frame.data, input.roi);
542
+ const normalized = await resizeAndNormalize(vehicleCrop, inputW, inputH, "imagenet", "nchw");
543
+ const rawOutput = await this.engine.run(normalized, [1, 3, inputH, inputW]);
544
+ console.log(`[VehicleClassifier] Output length: ${rawOutput.length}, labels: ${this.labels.length}, first 5 raw: [${Array.from(rawOutput.slice(0, 5)).map((v) => v.toFixed(3)).join(", ")}]`);
545
+ const probs = softmax(rawOutput);
546
+ const indexed = Array.from(probs).map((v, i) => ({ score: v, idx: i }));
547
+ indexed.sort((a, b) => b.score - a.score);
548
+ const top3 = indexed.slice(0, 3);
549
+ console.log(`[VehicleClassifier] Top-3: ${top3.map((t) => `${this.labels[t.idx] ?? t.idx}=${(t.score * 100).toFixed(1)}%`).join(", ")}`);
550
+ const maxIdx = top3[0]?.idx ?? 0;
551
+ const maxScore = top3[0]?.score ?? 0;
552
+ if (maxScore < this.minConfidence) {
553
+ console.log(`[VehicleClassifier] Below confidence threshold: ${(maxScore * 100).toFixed(1)}% < ${(this.minConfidence * 100).toFixed(0)}%`);
554
+ return {
555
+ classifications: [],
556
+ inferenceMs: Date.now() - start,
557
+ modelId: this.modelEntry.id
558
+ };
559
+ }
560
+ const label = this.labels[maxIdx] ?? `vehicle_type_${maxIdx}`;
561
+ console.log(`[VehicleClassifier] Result: ${label} (${(maxScore * 100).toFixed(1)}%)`);
562
+ return {
563
+ classifications: [
564
+ {
565
+ class: label,
566
+ score: maxScore
567
+ }
568
+ ],
569
+ inferenceMs: Date.now() - start,
570
+ modelId: this.modelEntry.id
571
+ };
572
+ }
573
+ async ensureEngine() {
574
+ const config = this.resolvedConfig;
575
+ const modelId = config?.modelId ?? this.modelEntry.id;
576
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
577
+ const backend = config?.backend ?? "cpu";
578
+ const format = config?.format ?? "onnx";
579
+ const entry = VEHICLE_TYPE_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
580
+ this.modelEntry = entry;
581
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
582
+ if (this.ctx.models) {
583
+ await this.ctx.models.ensure(modelId, format);
584
+ }
585
+ this.labels = loadLabels(modelsDir, modelId);
586
+ const resolved = await resolveEngine({
587
+ runtime,
588
+ backend,
589
+ modelEntry: entry,
590
+ modelsDir,
591
+ models: this.ctx.models
592
+ });
593
+ this.engine = resolved.engine;
594
+ }
595
+ async shutdown() {
596
+ await this.engine?.dispose();
597
+ }
598
+ getConfigSchema() {
599
+ return {
600
+ sections: [
601
+ {
602
+ id: "model",
603
+ title: "Model",
604
+ columns: 1,
605
+ fields: [
606
+ {
607
+ key: "modelId",
608
+ label: "Model",
609
+ type: "model-selector",
610
+ catalog: [...VEHICLE_TYPE_MODELS],
611
+ allowCustom: false,
612
+ allowConversion: false,
613
+ acceptFormats: ["onnx", "coreml", "openvino"],
614
+ requiredMetadata: ["inputSize", "labels"],
615
+ outputFormatHint: "classification"
616
+ }
617
+ ]
618
+ },
619
+ {
620
+ id: "runtime",
621
+ title: "Runtime",
622
+ columns: 2,
623
+ fields: [
624
+ {
625
+ key: "runtime",
626
+ label: "Runtime",
627
+ type: "select",
628
+ options: [
629
+ { value: "auto", label: "Auto" },
630
+ { value: "onnx", label: "ONNX Runtime" },
631
+ { value: "coreml", label: "CoreML (Apple)" },
632
+ { value: "openvino", label: "OpenVINO (Intel)" }
633
+ ]
634
+ },
635
+ {
636
+ key: "backend",
637
+ label: "Backend",
638
+ type: "select",
639
+ showWhen: { field: "runtime", equals: "onnx" },
640
+ options: [
641
+ { value: "auto", label: "Auto" },
642
+ { value: "cpu", label: "CPU" },
643
+ { value: "coreml", label: "CoreML" },
644
+ { value: "cuda", label: "CUDA (NVIDIA)" }
645
+ ]
646
+ }
647
+ ]
648
+ },
649
+ {
650
+ id: "thresholds",
651
+ title: "Classification Settings",
652
+ columns: 1,
653
+ fields: [
654
+ {
655
+ key: "minConfidence",
656
+ label: "Minimum Confidence",
657
+ type: "slider",
658
+ min: 0.05,
659
+ max: 1,
660
+ step: 0.05,
661
+ default: 0.3
662
+ }
663
+ ]
664
+ }
665
+ ]
666
+ };
667
+ }
668
+ getClassMap() {
669
+ return VEHICLE_CLASS_MAP;
670
+ }
671
+ getModelCatalog() {
672
+ return [...VEHICLE_TYPE_MODELS];
673
+ }
674
+ getAvailableModels() {
675
+ return [];
676
+ }
677
+ getActiveLabels() {
678
+ return VEHICLE_TYPE_LABELS;
679
+ }
680
+ async probe() {
681
+ return {
682
+ available: true,
683
+ runtime: this.engine?.runtime ?? "onnx",
684
+ device: this.engine?.device ?? "cpu",
685
+ capabilities: ["fp32"]
686
+ };
687
+ }
688
+ };
689
+ //# sourceMappingURL=index.js.map