@camstack/addon-vision 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/dist/addons/animal-classifier/index.js +999 -822
  2. package/dist/addons/animal-classifier/index.js.map +1 -1
  3. package/dist/addons/animal-classifier/index.mjs +242 -7
  4. package/dist/addons/animal-classifier/index.mjs.map +1 -1
  5. package/dist/addons/audio-classification/index.js +501 -378
  6. package/dist/addons/audio-classification/index.js.map +1 -1
  7. package/dist/addons/audio-classification/index.mjs +224 -4
  8. package/dist/addons/audio-classification/index.mjs.map +1 -1
  9. package/dist/addons/bird-global-classifier/index.js +1002 -825
  10. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  11. package/dist/addons/bird-global-classifier/index.mjs +248 -7
  12. package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
  13. package/dist/addons/bird-nabirds-classifier/index.js +1002 -825
  14. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  15. package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
  16. package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
  17. package/dist/addons/face-detection/index.js +1196 -934
  18. package/dist/addons/face-detection/index.js.map +1 -1
  19. package/dist/addons/face-detection/index.mjs +227 -7
  20. package/dist/addons/face-detection/index.mjs.map +1 -1
  21. package/dist/addons/face-recognition/index.js +1003 -807
  22. package/dist/addons/face-recognition/index.js.map +1 -1
  23. package/dist/addons/face-recognition/index.mjs +197 -6
  24. package/dist/addons/face-recognition/index.mjs.map +1 -1
  25. package/dist/addons/motion-detection/index.js +214 -111
  26. package/dist/addons/motion-detection/index.js.map +1 -1
  27. package/dist/addons/motion-detection/index.mjs +12 -9
  28. package/dist/addons/motion-detection/index.mjs.map +1 -1
  29. package/dist/addons/object-detection/index.js +1287 -1082
  30. package/dist/addons/object-detection/index.js.map +1 -1
  31. package/dist/addons/object-detection/index.mjs +373 -7
  32. package/dist/addons/object-detection/index.mjs.map +1 -1
  33. package/dist/addons/plate-detection/index.js +1075 -868
  34. package/dist/addons/plate-detection/index.js.map +1 -1
  35. package/dist/addons/plate-detection/index.mjs +230 -7
  36. package/dist/addons/plate-detection/index.mjs.map +1 -1
  37. package/dist/addons/plate-recognition/index.js +684 -505
  38. package/dist/addons/plate-recognition/index.js.map +1 -1
  39. package/dist/addons/plate-recognition/index.mjs +244 -5
  40. package/dist/addons/plate-recognition/index.mjs.map +1 -1
  41. package/dist/addons/segmentation-refiner/index.js +967 -790
  42. package/dist/addons/segmentation-refiner/index.js.map +1 -1
  43. package/dist/addons/segmentation-refiner/index.mjs +21 -17
  44. package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
  45. package/dist/addons/vehicle-classifier/index.js +581 -410
  46. package/dist/addons/vehicle-classifier/index.js.map +1 -1
  47. package/dist/addons/vehicle-classifier/index.mjs +20 -16
  48. package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
  49. package/dist/chunk-2YMA6QOV.mjs +193 -0
  50. package/dist/chunk-2YMA6QOV.mjs.map +1 -0
  51. package/dist/chunk-3IIFBJCD.mjs +45 -0
  52. package/dist/chunk-BS4DKYGN.mjs +48 -0
  53. package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
  54. package/dist/chunk-DE7I3VHO.mjs +106 -0
  55. package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
  56. package/dist/chunk-F6D2OZ36.mjs +89 -0
  57. package/dist/chunk-F6D2OZ36.mjs.map +1 -0
  58. package/dist/chunk-GAOIFQDX.mjs +59 -0
  59. package/dist/chunk-GAOIFQDX.mjs.map +1 -0
  60. package/dist/chunk-HUIX2XVR.mjs +159 -0
  61. package/dist/chunk-HUIX2XVR.mjs.map +1 -0
  62. package/dist/chunk-K36R6HWY.mjs +51 -0
  63. package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
  64. package/dist/chunk-MBTAI3WE.mjs +78 -0
  65. package/dist/chunk-MBTAI3WE.mjs.map +1 -0
  66. package/dist/chunk-MGT6RUVX.mjs +423 -0
  67. package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
  68. package/dist/chunk-PIFS7AIT.mjs +446 -0
  69. package/dist/{chunk-2IOKI4ES.mjs.map → chunk-PIFS7AIT.mjs.map} +1 -1
  70. package/dist/chunk-WG66JYYW.mjs +116 -0
  71. package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
  72. package/dist/chunk-XD7WGXHZ.mjs +82 -0
  73. package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
  74. package/dist/chunk-YYDM6V2F.mjs +113 -0
  75. package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
  76. package/dist/chunk-ZK7P3TZN.mjs +286 -0
  77. package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
  78. package/dist/index.js +4443 -3924
  79. package/dist/index.js.map +1 -1
  80. package/dist/index.mjs +2698 -250
  81. package/dist/index.mjs.map +1 -1
  82. package/package.json +1 -1
  83. package/dist/chunk-22BHCDT5.mjs +0 -101
  84. package/dist/chunk-2IOKI4ES.mjs +0 -335
  85. package/dist/chunk-7DYHXUPZ.mjs +0 -36
  86. package/dist/chunk-BJTO5JO5.mjs +0 -11
  87. package/dist/chunk-BP7H4NFS.mjs +0 -412
  88. package/dist/chunk-BR2FPGOX.mjs +0 -98
  89. package/dist/chunk-D6WEHN33.mjs +0 -276
  90. package/dist/chunk-D6WEHN33.mjs.map +0 -1
  91. package/dist/chunk-DRYFGARD.mjs +0 -289
  92. package/dist/chunk-DRYFGARD.mjs.map +0 -1
  93. package/dist/chunk-DUN6XU3N.mjs +0 -72
  94. package/dist/chunk-ESLHNWWE.mjs +0 -387
  95. package/dist/chunk-ESLHNWWE.mjs.map +0 -1
  96. package/dist/chunk-JUQEW6ON.mjs +0 -256
  97. package/dist/chunk-JUQEW6ON.mjs.map +0 -1
  98. package/dist/chunk-KUO2BVFY.mjs +0 -90
  99. package/dist/chunk-R5J3WAUI.mjs +0 -645
  100. package/dist/chunk-R5J3WAUI.mjs.map +0 -1
  101. package/dist/chunk-XZ6ZMXXU.mjs +0 -39
  102. package/dist/chunk-YPU4WTXZ.mjs +0 -269
  103. package/dist/chunk-YPU4WTXZ.mjs.map +0 -1
  104. package/dist/chunk-YUCD2TFH.mjs +0 -242
  105. package/dist/chunk-YUCD2TFH.mjs.map +0 -1
  106. package/dist/chunk-ZTJENCFC.mjs +0 -379
  107. package/dist/chunk-ZTJENCFC.mjs.map +0 -1
  108. package/dist/chunk-ZWYXXCXP.mjs +0 -248
  109. package/dist/chunk-ZWYXXCXP.mjs.map +0 -1
  110. /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
@@ -1,335 +0,0 @@
1
- // src/shared/node-engine.ts
2
- import * as path from "path";
3
- var BACKEND_TO_PROVIDER = {
4
- cpu: "cpu",
5
- coreml: "coreml",
6
- cuda: "cuda",
7
- tensorrt: "tensorrt",
8
- dml: "dml"
9
- };
10
- var BACKEND_TO_DEVICE = {
11
- cpu: "cpu",
12
- coreml: "gpu-mps",
13
- cuda: "gpu-cuda",
14
- tensorrt: "tensorrt"
15
- };
16
- var NodeInferenceEngine = class {
17
- constructor(modelPath, backend) {
18
- this.modelPath = modelPath;
19
- this.backend = backend;
20
- this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
21
- }
22
- runtime = "onnx";
23
- device;
24
- session = null;
25
- async initialize() {
26
- const ort = await import("onnxruntime-node");
27
- const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
28
- const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
29
- const sessionOptions = {
30
- executionProviders: [provider]
31
- };
32
- this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
33
- }
34
- async run(input, inputShape) {
35
- if (!this.session) {
36
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
37
- }
38
- const ort = await import("onnxruntime-node");
39
- const sess = this.session;
40
- const inputName = sess.inputNames[0];
41
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
42
- const feeds = { [inputName]: tensor };
43
- const results = await sess.run(feeds);
44
- const outputName = sess.outputNames[0];
45
- const outputTensor = results[outputName];
46
- return outputTensor.data;
47
- }
48
- async runMultiOutput(input, inputShape) {
49
- if (!this.session) {
50
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
51
- }
52
- const ort = await import("onnxruntime-node");
53
- const sess = this.session;
54
- const inputName = sess.inputNames[0];
55
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
56
- const feeds = { [inputName]: tensor };
57
- const results = await sess.run(feeds);
58
- const out = {};
59
- for (const name of sess.outputNames) {
60
- out[name] = results[name].data;
61
- }
62
- return out;
63
- }
64
- async dispose() {
65
- this.session = null;
66
- }
67
- };
68
-
69
- // src/shared/python-engine.ts
70
- import { spawn } from "child_process";
71
- var PythonInferenceEngine = class {
72
- constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
73
- this.pythonPath = pythonPath;
74
- this.scriptPath = scriptPath;
75
- this.modelPath = modelPath;
76
- this.extraArgs = extraArgs;
77
- this.runtime = runtime;
78
- const runtimeDeviceMap = {
79
- onnx: "cpu",
80
- coreml: "gpu-mps",
81
- pytorch: "cpu",
82
- openvino: "cpu",
83
- tflite: "cpu"
84
- };
85
- this.device = runtimeDeviceMap[runtime];
86
- }
87
- runtime;
88
- device;
89
- process = null;
90
- receiveBuffer = Buffer.alloc(0);
91
- pendingResolve = null;
92
- pendingReject = null;
93
- async initialize() {
94
- const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
95
- this.process = spawn(this.pythonPath, args, {
96
- stdio: ["pipe", "pipe", "pipe"]
97
- });
98
- if (!this.process.stdout || !this.process.stdin) {
99
- throw new Error("PythonInferenceEngine: failed to create process pipes");
100
- }
101
- this.process.stderr?.on("data", (chunk) => {
102
- process.stderr.write(`[python-engine] ${chunk.toString()}`);
103
- });
104
- this.process.on("error", (err) => {
105
- this.pendingReject?.(err);
106
- this.pendingReject = null;
107
- this.pendingResolve = null;
108
- });
109
- this.process.on("exit", (code) => {
110
- if (code !== 0) {
111
- const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
112
- this.pendingReject?.(err);
113
- this.pendingReject = null;
114
- this.pendingResolve = null;
115
- }
116
- });
117
- this.process.stdout.on("data", (chunk) => {
118
- this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
119
- this._tryReceive();
120
- });
121
- await new Promise((resolve2, reject) => {
122
- const timeout = setTimeout(() => resolve2(), 2e3);
123
- this.process?.on("error", (err) => {
124
- clearTimeout(timeout);
125
- reject(err);
126
- });
127
- this.process?.on("exit", (code) => {
128
- clearTimeout(timeout);
129
- if (code !== 0) {
130
- reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
131
- }
132
- });
133
- });
134
- }
135
- _tryReceive() {
136
- if (this.receiveBuffer.length < 4) return;
137
- const length = this.receiveBuffer.readUInt32LE(0);
138
- if (this.receiveBuffer.length < 4 + length) return;
139
- const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
140
- this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
141
- const resolve2 = this.pendingResolve;
142
- const reject = this.pendingReject;
143
- this.pendingResolve = null;
144
- this.pendingReject = null;
145
- if (!resolve2) return;
146
- try {
147
- const parsed = JSON.parse(jsonBytes.toString("utf8"));
148
- resolve2(parsed);
149
- } catch (err) {
150
- reject?.(err instanceof Error ? err : new Error(String(err)));
151
- }
152
- }
153
- /** Send JPEG buffer, receive JSON detection results */
154
- async runJpeg(jpeg) {
155
- if (!this.process?.stdin) {
156
- throw new Error("PythonInferenceEngine: process not initialized");
157
- }
158
- return new Promise((resolve2, reject) => {
159
- this.pendingResolve = resolve2;
160
- this.pendingReject = reject;
161
- const lengthBuf = Buffer.allocUnsafe(4);
162
- lengthBuf.writeUInt32LE(jpeg.length, 0);
163
- this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
164
- });
165
- }
166
- /** IInferenceEngine.run — wraps runJpeg for compatibility */
167
- async run(_input, _inputShape) {
168
- throw new Error(
169
- "PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
170
- );
171
- }
172
- /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
173
- async runMultiOutput(_input, _inputShape) {
174
- throw new Error(
175
- "PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
176
- );
177
- }
178
- async dispose() {
179
- if (this.process) {
180
- this.process.stdin?.end();
181
- this.process.kill("SIGTERM");
182
- this.process = null;
183
- }
184
- }
185
- };
186
-
187
- // src/shared/engine-resolver.ts
188
- import * as fs from "fs";
189
- import * as path2 from "path";
190
- var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
191
- var BACKEND_TO_FORMAT = {
192
- cpu: "onnx",
193
- coreml: "onnx",
194
- cuda: "onnx",
195
- tensorrt: "onnx"
196
- };
197
- var RUNTIME_TO_FORMAT = {
198
- onnx: "onnx",
199
- coreml: "coreml",
200
- openvino: "openvino",
201
- tflite: "tflite",
202
- pytorch: "pt"
203
- };
204
- function modelFilePath(modelsDir, modelEntry, format) {
205
- const formatEntry = modelEntry.formats[format];
206
- if (!formatEntry) {
207
- throw new Error(`Model ${modelEntry.id} has no ${format} format`);
208
- }
209
- const urlParts = formatEntry.url.split("/");
210
- const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
211
- return path2.join(modelsDir, filename);
212
- }
213
- function modelExists(filePath) {
214
- try {
215
- return fs.existsSync(filePath);
216
- } catch {
217
- return false;
218
- }
219
- }
220
- async function resolveEngine(options) {
221
- const { runtime, backend, modelEntry, modelsDir, models } = options;
222
- let selectedFormat;
223
- let selectedBackend;
224
- if (runtime === "auto") {
225
- const available = await probeOnnxBackends();
226
- let chosen = null;
227
- for (const b of AUTO_BACKEND_PRIORITY) {
228
- if (!available.includes(b)) continue;
229
- const fmt = BACKEND_TO_FORMAT[b];
230
- if (!fmt) continue;
231
- if (!modelEntry.formats[fmt]) continue;
232
- chosen = { backend: b, format: fmt };
233
- break;
234
- }
235
- if (!chosen) {
236
- throw new Error(
237
- `resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
238
- );
239
- }
240
- selectedFormat = chosen.format;
241
- selectedBackend = chosen.backend;
242
- } else {
243
- const fmt = RUNTIME_TO_FORMAT[runtime];
244
- if (!fmt) {
245
- throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
246
- }
247
- if (!modelEntry.formats[fmt]) {
248
- throw new Error(
249
- `resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
250
- );
251
- }
252
- selectedFormat = fmt;
253
- selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
254
- }
255
- let modelPath;
256
- if (models) {
257
- modelPath = await models.ensure(modelEntry.id, selectedFormat);
258
- } else {
259
- modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
260
- if (!modelExists(modelPath)) {
261
- throw new Error(
262
- `resolveEngine: model file not found at ${modelPath} and no model service provided`
263
- );
264
- }
265
- }
266
- if (selectedFormat === "onnx") {
267
- const engine = new NodeInferenceEngine(modelPath, selectedBackend);
268
- await engine.initialize();
269
- return { engine, format: selectedFormat, modelPath };
270
- }
271
- const { pythonPath } = options;
272
- const PYTHON_SCRIPT_MAP = {
273
- coreml: "coreml_inference.py",
274
- pytorch: "pytorch_inference.py",
275
- openvino: "openvino_inference.py"
276
- };
277
- const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
278
- const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
279
- if (scriptName && pythonPath) {
280
- const candidates = [
281
- path2.join(__dirname, "../../python", scriptName),
282
- path2.join(__dirname, "../python", scriptName),
283
- path2.join(__dirname, "../../../python", scriptName)
284
- ];
285
- const scriptPath = candidates.find((p) => fs.existsSync(p));
286
- if (!scriptPath) {
287
- throw new Error(
288
- `resolveEngine: Python script "${scriptName}" not found. Searched:
289
- ${candidates.join("\n")}`
290
- );
291
- }
292
- const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
293
- const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
294
- `--input-size=${inputSize}`,
295
- `--confidence=0.25`
296
- ]);
297
- await engine.initialize();
298
- return { engine, format: selectedFormat, modelPath };
299
- }
300
- const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
301
- if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
302
- const engine = new NodeInferenceEngine(fallbackPath, "cpu");
303
- await engine.initialize();
304
- return { engine, format: "onnx", modelPath: fallbackPath };
305
- }
306
- throw new Error(
307
- `resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
308
- );
309
- }
310
- async function probeOnnxBackends() {
311
- const available = ["cpu"];
312
- try {
313
- const ort = await import("onnxruntime-node");
314
- const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
315
- for (const p of providers) {
316
- const normalized = p.toLowerCase().replace("executionprovider", "");
317
- if (normalized === "coreml") available.push("coreml");
318
- else if (normalized === "cuda") available.push("cuda");
319
- else if (normalized === "tensorrt") available.push("tensorrt");
320
- }
321
- } catch {
322
- }
323
- if (process.platform === "darwin" && !available.includes("coreml")) {
324
- available.push("coreml");
325
- }
326
- return [...new Set(available)];
327
- }
328
-
329
- export {
330
- NodeInferenceEngine,
331
- PythonInferenceEngine,
332
- resolveEngine,
333
- probeOnnxBackends
334
- };
335
- //# sourceMappingURL=chunk-2IOKI4ES.mjs.map
@@ -1,36 +0,0 @@
1
- import {
2
- MLPACKAGE_FILES
3
- } from "./chunk-BP7H4NFS.mjs";
4
-
5
- // src/catalogs/segmentation-refiner-models.ts
6
- import { hfModelUrl } from "@camstack/types";
7
- var HF_REPO = "camstack/camstack-models";
8
- var hf = (path) => hfModelUrl(HF_REPO, path);
9
- var MASK_LABELS = [
10
- { id: "mask", name: "Segmentation Mask" }
11
- ];
12
- var SEGMENTATION_REFINER_MODELS = [
13
- {
14
- id: "u2netp",
15
- name: "U2-Net Portable",
16
- description: "U2-Net-P \u2014 ultra-lightweight salient object segmentation (4.7 MB), no prompt needed",
17
- inputSize: { width: 320, height: 320 },
18
- labels: MASK_LABELS,
19
- formats: {
20
- onnx: { url: hf("segmentationRefiner/u2netp/onnx/camstack-u2netp.onnx"), sizeMB: 5 },
21
- coreml: {
22
- url: hf("segmentationRefiner/u2netp/coreml/camstack-u2netp.mlpackage"),
23
- sizeMB: 3,
24
- isDirectory: true,
25
- files: MLPACKAGE_FILES,
26
- runtimes: ["python"]
27
- }
28
- // OpenVINO: not yet converted
29
- }
30
- }
31
- ];
32
-
33
- export {
34
- SEGMENTATION_REFINER_MODELS
35
- };
36
- //# sourceMappingURL=chunk-7DYHXUPZ.mjs.map
@@ -1,11 +0,0 @@
1
- var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
2
- get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
3
- }) : x)(function(x) {
4
- if (typeof require !== "undefined") return require.apply(this, arguments);
5
- throw Error('Dynamic require of "' + x + '" is not supported');
6
- });
7
-
8
- export {
9
- __require
10
- };
11
- //# sourceMappingURL=chunk-BJTO5JO5.mjs.map