@camstack/addon-vision 0.1.7 → 0.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +30 -0
- package/dist/addons/animal-classifier/index.d.ts +30 -0
- package/dist/addons/animal-classifier/index.js +822 -999
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +7 -242
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.d.mts +36 -0
- package/dist/addons/audio-classification/index.d.ts +36 -0
- package/dist/addons/audio-classification/index.js +378 -501
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +4 -224
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.d.mts +31 -0
- package/dist/addons/bird-global-classifier/index.d.ts +31 -0
- package/dist/addons/bird-global-classifier/index.js +825 -1002
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +7 -248
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.d.mts +33 -0
- package/dist/addons/bird-nabirds-classifier/index.d.ts +33 -0
- package/dist/addons/bird-nabirds-classifier/index.js +825 -1002
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +7 -289
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.d.mts +29 -0
- package/dist/addons/face-detection/index.d.ts +29 -0
- package/dist/addons/face-detection/index.js +934 -1196
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +7 -227
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.d.mts +29 -0
- package/dist/addons/face-recognition/index.d.ts +29 -0
- package/dist/addons/face-recognition/index.js +807 -1003
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +6 -197
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.d.mts +28 -0
- package/dist/addons/motion-detection/index.d.ts +28 -0
- package/dist/addons/motion-detection/index.js +111 -214
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +9 -12
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +31 -0
- package/dist/addons/object-detection/index.d.ts +31 -0
- package/dist/addons/object-detection/index.js +1082 -1287
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +7 -373
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.d.mts +30 -0
- package/dist/addons/plate-detection/index.d.ts +30 -0
- package/dist/addons/plate-detection/index.js +868 -1075
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +7 -230
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.d.mts +31 -0
- package/dist/addons/plate-recognition/index.d.ts +31 -0
- package/dist/addons/plate-recognition/index.js +505 -684
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +5 -244
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +790 -967
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +17 -21
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +410 -581
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +16 -20
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-22BHCDT5.mjs +101 -0
- package/dist/{chunk-WG66JYYW.mjs.map → chunk-22BHCDT5.mjs.map} +1 -1
- package/dist/chunk-2IOKI4ES.mjs +335 -0
- package/dist/{chunk-PIFS7AIT.mjs.map → chunk-2IOKI4ES.mjs.map} +1 -1
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/{chunk-BS4DKYGN.mjs.map → chunk-7DYHXUPZ.mjs.map} +1 -1
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/{chunk-MGT6RUVX.mjs.map → chunk-BP7H4NFS.mjs.map} +1 -1
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/{chunk-YYDM6V2F.mjs.map → chunk-BR2FPGOX.mjs.map} +1 -1
- package/dist/chunk-D6WEHN33.mjs +276 -0
- package/dist/chunk-D6WEHN33.mjs.map +1 -0
- package/dist/chunk-DRYFGARD.mjs +289 -0
- package/dist/chunk-DRYFGARD.mjs.map +1 -0
- package/dist/chunk-DUN6XU3N.mjs +72 -0
- package/dist/{chunk-XD7WGXHZ.mjs.map → chunk-DUN6XU3N.mjs.map} +1 -1
- package/dist/chunk-ESLHNWWE.mjs +387 -0
- package/dist/chunk-ESLHNWWE.mjs.map +1 -0
- package/dist/chunk-JUQEW6ON.mjs +256 -0
- package/dist/chunk-JUQEW6ON.mjs.map +1 -0
- package/dist/chunk-KUO2BVFY.mjs +90 -0
- package/dist/{chunk-DE7I3VHO.mjs.map → chunk-KUO2BVFY.mjs.map} +1 -1
- package/dist/chunk-R5J3WAUI.mjs +645 -0
- package/dist/chunk-R5J3WAUI.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/{chunk-K36R6HWY.mjs.map → chunk-XZ6ZMXXU.mjs.map} +1 -1
- package/dist/chunk-YPU4WTXZ.mjs +269 -0
- package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
- package/dist/chunk-YUCD2TFH.mjs +242 -0
- package/dist/chunk-YUCD2TFH.mjs.map +1 -0
- package/dist/chunk-ZTJENCFC.mjs +379 -0
- package/dist/chunk-ZTJENCFC.mjs.map +1 -0
- package/dist/chunk-ZWYXXCXP.mjs +248 -0
- package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
- package/dist/index.d.mts +183 -0
- package/dist/index.d.ts +183 -0
- package/dist/index.js +3930 -4449
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +250 -2698
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -5
- package/dist/chunk-2YMA6QOV.mjs +0 -193
- package/dist/chunk-2YMA6QOV.mjs.map +0 -1
- package/dist/chunk-3IIFBJCD.mjs +0 -45
- package/dist/chunk-BS4DKYGN.mjs +0 -48
- package/dist/chunk-DE7I3VHO.mjs +0 -106
- package/dist/chunk-F6D2OZ36.mjs +0 -89
- package/dist/chunk-F6D2OZ36.mjs.map +0 -1
- package/dist/chunk-GAOIFQDX.mjs +0 -59
- package/dist/chunk-GAOIFQDX.mjs.map +0 -1
- package/dist/chunk-HUIX2XVR.mjs +0 -159
- package/dist/chunk-HUIX2XVR.mjs.map +0 -1
- package/dist/chunk-K36R6HWY.mjs +0 -51
- package/dist/chunk-MBTAI3WE.mjs +0 -78
- package/dist/chunk-MBTAI3WE.mjs.map +0 -1
- package/dist/chunk-MGT6RUVX.mjs +0 -423
- package/dist/chunk-PIFS7AIT.mjs +0 -446
- package/dist/chunk-WG66JYYW.mjs +0 -116
- package/dist/chunk-XD7WGXHZ.mjs +0 -82
- package/dist/chunk-YYDM6V2F.mjs +0 -113
- package/dist/chunk-ZK7P3TZN.mjs +0 -286
- package/dist/chunk-ZK7P3TZN.mjs.map +0 -1
- /package/dist/{chunk-3IIFBJCD.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -5,9 +5,6 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
-
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
-
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
-
};
|
|
11
8
|
var __export = (target, all) => {
|
|
12
9
|
for (var name in all)
|
|
13
10
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -30,521 +27,401 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
30
27
|
));
|
|
31
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
29
|
|
|
33
|
-
// src/
|
|
34
|
-
var
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
38
|
-
exports2.AUDIO_CLASSIFICATION_MODELS = void 0;
|
|
39
|
-
var types_1 = require("@camstack/types");
|
|
40
|
-
var HF_REPO = "camstack/camstack-models";
|
|
41
|
-
var AUDIO_LABELS2 = [
|
|
42
|
-
{ id: "audio", name: "Audio Event" }
|
|
43
|
-
];
|
|
44
|
-
exports2.AUDIO_CLASSIFICATION_MODELS = [
|
|
45
|
-
{
|
|
46
|
-
id: "yamnet",
|
|
47
|
-
name: "YAMNet",
|
|
48
|
-
description: "YAMNet \u2014 audio event classification from raw waveform",
|
|
49
|
-
inputSize: { width: 1, height: 16e3 },
|
|
50
|
-
labels: AUDIO_LABELS2,
|
|
51
|
-
formats: {
|
|
52
|
-
onnx: {
|
|
53
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "audioClassification/yamnet/onnx/camstack-yamnet.onnx"),
|
|
54
|
-
sizeMB: 15
|
|
55
|
-
},
|
|
56
|
-
openvino: {
|
|
57
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "audioClassification/yamnet/openvino/camstack-yamnet.xml"),
|
|
58
|
-
sizeMB: 8
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
];
|
|
63
|
-
}
|
|
30
|
+
// src/addons/audio-classification/index.ts
|
|
31
|
+
var audio_classification_exports = {};
|
|
32
|
+
__export(audio_classification_exports, {
|
|
33
|
+
default: () => AudioClassificationAddon
|
|
64
34
|
});
|
|
35
|
+
module.exports = __toCommonJS(audio_classification_exports);
|
|
65
36
|
|
|
66
|
-
// src/
|
|
67
|
-
var
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
for (let c = 0; c < numClasses; c++) {
|
|
88
|
-
const score = avgScores[c];
|
|
89
|
-
if (score >= minScore) {
|
|
90
|
-
results.push({
|
|
91
|
-
className: classNames[c] ?? String(c),
|
|
92
|
-
score
|
|
93
|
-
});
|
|
94
|
-
}
|
|
37
|
+
// src/catalogs/audio-classification-models.ts
|
|
38
|
+
var import_types = require("@camstack/types");
|
|
39
|
+
var HF_REPO = "camstack/camstack-models";
|
|
40
|
+
var AUDIO_LABELS = [
|
|
41
|
+
{ id: "audio", name: "Audio Event" }
|
|
42
|
+
];
|
|
43
|
+
var AUDIO_CLASSIFICATION_MODELS = [
|
|
44
|
+
{
|
|
45
|
+
id: "yamnet",
|
|
46
|
+
name: "YAMNet",
|
|
47
|
+
description: "YAMNet \u2014 audio event classification from raw waveform",
|
|
48
|
+
inputSize: { width: 1, height: 16e3 },
|
|
49
|
+
labels: AUDIO_LABELS,
|
|
50
|
+
formats: {
|
|
51
|
+
onnx: {
|
|
52
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "audioClassification/yamnet/onnx/camstack-yamnet.onnx"),
|
|
53
|
+
sizeMB: 15
|
|
54
|
+
},
|
|
55
|
+
openvino: {
|
|
56
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "audioClassification/yamnet/openvino/camstack-yamnet.xml"),
|
|
57
|
+
sizeMB: 8
|
|
95
58
|
}
|
|
96
|
-
return results.sort((a, b) => b.score - a.score);
|
|
97
59
|
}
|
|
98
60
|
}
|
|
99
|
-
|
|
61
|
+
];
|
|
100
62
|
|
|
101
|
-
// src/shared/
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
63
|
+
// src/shared/postprocess/yamnet.ts
|
|
64
|
+
function yamnetPostprocess(output, numFrames, numClasses, classNames, minScore) {
|
|
65
|
+
const avgScores = new Float32Array(numClasses);
|
|
66
|
+
for (let f = 0; f < numFrames; f++) {
|
|
67
|
+
for (let c = 0; c < numClasses; c++) {
|
|
68
|
+
const prev = avgScores[c] ?? 0;
|
|
69
|
+
avgScores[c] = prev + (output[f * numClasses + c] ?? 0);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
if (numFrames > 0) {
|
|
73
|
+
for (let c = 0; c < numClasses; c++) {
|
|
74
|
+
const val = avgScores[c] ?? 0;
|
|
75
|
+
avgScores[c] = val / numFrames;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
const results = [];
|
|
79
|
+
for (let c = 0; c < numClasses; c++) {
|
|
80
|
+
const score = avgScores[c];
|
|
81
|
+
if (score >= minScore) {
|
|
82
|
+
results.push({
|
|
83
|
+
className: classNames[c] ?? String(c),
|
|
84
|
+
score
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
return results.sort((a, b) => b.score - a.score);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// src/shared/engine-resolver.ts
|
|
92
|
+
var fs = __toESM(require("fs"));
|
|
93
|
+
var path2 = __toESM(require("path"));
|
|
94
|
+
|
|
95
|
+
// src/shared/node-engine.ts
|
|
96
|
+
var path = __toESM(require("path"));
|
|
97
|
+
var BACKEND_TO_PROVIDER = {
|
|
98
|
+
cpu: "cpu",
|
|
99
|
+
coreml: "coreml",
|
|
100
|
+
cuda: "cuda",
|
|
101
|
+
tensorrt: "tensorrt",
|
|
102
|
+
dml: "dml"
|
|
103
|
+
};
|
|
104
|
+
var BACKEND_TO_DEVICE = {
|
|
105
|
+
cpu: "cpu",
|
|
106
|
+
coreml: "gpu-mps",
|
|
107
|
+
cuda: "gpu-cuda",
|
|
108
|
+
tensorrt: "tensorrt"
|
|
109
|
+
};
|
|
110
|
+
var NodeInferenceEngine = class {
|
|
111
|
+
constructor(modelPath, backend) {
|
|
112
|
+
this.modelPath = modelPath;
|
|
113
|
+
this.backend = backend;
|
|
114
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
115
|
+
}
|
|
116
|
+
runtime = "onnx";
|
|
117
|
+
device;
|
|
118
|
+
session = null;
|
|
119
|
+
async initialize() {
|
|
120
|
+
const ort = await import("onnxruntime-node");
|
|
121
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
122
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
123
|
+
const sessionOptions = {
|
|
124
|
+
executionProviders: [provider]
|
|
151
125
|
};
|
|
152
|
-
|
|
153
|
-
|
|
126
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
127
|
+
}
|
|
128
|
+
async run(input, inputShape) {
|
|
129
|
+
if (!this.session) {
|
|
130
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
131
|
+
}
|
|
132
|
+
const ort = await import("onnxruntime-node");
|
|
133
|
+
const sess = this.session;
|
|
134
|
+
const inputName = sess.inputNames[0];
|
|
135
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
136
|
+
const feeds = { [inputName]: tensor };
|
|
137
|
+
const results = await sess.run(feeds);
|
|
138
|
+
const outputName = sess.outputNames[0];
|
|
139
|
+
const outputTensor = results[outputName];
|
|
140
|
+
return outputTensor.data;
|
|
141
|
+
}
|
|
142
|
+
async runMultiOutput(input, inputShape) {
|
|
143
|
+
if (!this.session) {
|
|
144
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
145
|
+
}
|
|
146
|
+
const ort = await import("onnxruntime-node");
|
|
147
|
+
const sess = this.session;
|
|
148
|
+
const inputName = sess.inputNames[0];
|
|
149
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
150
|
+
const feeds = { [inputName]: tensor };
|
|
151
|
+
const results = await sess.run(feeds);
|
|
152
|
+
const out = {};
|
|
153
|
+
for (const name of sess.outputNames) {
|
|
154
|
+
out[name] = results[name].data;
|
|
155
|
+
}
|
|
156
|
+
return out;
|
|
157
|
+
}
|
|
158
|
+
async dispose() {
|
|
159
|
+
this.session = null;
|
|
160
|
+
}
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
// src/shared/python-engine.ts
|
|
164
|
+
var import_node_child_process = require("child_process");
|
|
165
|
+
var PythonInferenceEngine = class {
|
|
166
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
167
|
+
this.pythonPath = pythonPath;
|
|
168
|
+
this.scriptPath = scriptPath;
|
|
169
|
+
this.modelPath = modelPath;
|
|
170
|
+
this.extraArgs = extraArgs;
|
|
171
|
+
this.runtime = runtime;
|
|
172
|
+
const runtimeDeviceMap = {
|
|
173
|
+
onnx: "cpu",
|
|
154
174
|
coreml: "gpu-mps",
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
var NodeInferenceEngine = class {
|
|
159
|
-
modelPath;
|
|
160
|
-
backend;
|
|
161
|
-
runtime = "onnx";
|
|
162
|
-
device;
|
|
163
|
-
session = null;
|
|
164
|
-
constructor(modelPath, backend) {
|
|
165
|
-
this.modelPath = modelPath;
|
|
166
|
-
this.backend = backend;
|
|
167
|
-
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
168
|
-
}
|
|
169
|
-
async initialize() {
|
|
170
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
171
|
-
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
172
|
-
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
173
|
-
const sessionOptions = {
|
|
174
|
-
executionProviders: [provider]
|
|
175
|
-
};
|
|
176
|
-
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
177
|
-
}
|
|
178
|
-
async run(input, inputShape) {
|
|
179
|
-
if (!this.session) {
|
|
180
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
181
|
-
}
|
|
182
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
183
|
-
const sess = this.session;
|
|
184
|
-
const inputName = sess.inputNames[0];
|
|
185
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
186
|
-
const feeds = { [inputName]: tensor };
|
|
187
|
-
const results = await sess.run(feeds);
|
|
188
|
-
const outputName = sess.outputNames[0];
|
|
189
|
-
const outputTensor = results[outputName];
|
|
190
|
-
return outputTensor.data;
|
|
191
|
-
}
|
|
192
|
-
async runMultiOutput(input, inputShape) {
|
|
193
|
-
if (!this.session) {
|
|
194
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
195
|
-
}
|
|
196
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
197
|
-
const sess = this.session;
|
|
198
|
-
const inputName = sess.inputNames[0];
|
|
199
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
200
|
-
const feeds = { [inputName]: tensor };
|
|
201
|
-
const results = await sess.run(feeds);
|
|
202
|
-
const out = {};
|
|
203
|
-
for (const name of sess.outputNames) {
|
|
204
|
-
out[name] = results[name].data;
|
|
205
|
-
}
|
|
206
|
-
return out;
|
|
207
|
-
}
|
|
208
|
-
async dispose() {
|
|
209
|
-
this.session = null;
|
|
210
|
-
}
|
|
175
|
+
pytorch: "cpu",
|
|
176
|
+
openvino: "cpu",
|
|
177
|
+
tflite: "cpu"
|
|
211
178
|
};
|
|
212
|
-
|
|
179
|
+
this.device = runtimeDeviceMap[runtime];
|
|
213
180
|
}
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
this.
|
|
240
|
-
this.runtime = runtime;
|
|
241
|
-
const runtimeDeviceMap = {
|
|
242
|
-
onnx: "cpu",
|
|
243
|
-
coreml: "gpu-mps",
|
|
244
|
-
pytorch: "cpu",
|
|
245
|
-
openvino: "cpu",
|
|
246
|
-
tflite: "cpu"
|
|
247
|
-
};
|
|
248
|
-
this.device = runtimeDeviceMap[runtime];
|
|
249
|
-
}
|
|
250
|
-
async initialize() {
|
|
251
|
-
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
252
|
-
this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
|
|
253
|
-
stdio: ["pipe", "pipe", "pipe"]
|
|
254
|
-
});
|
|
255
|
-
if (!this.process.stdout || !this.process.stdin) {
|
|
256
|
-
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
257
|
-
}
|
|
258
|
-
this.process.stderr?.on("data", (chunk) => {
|
|
259
|
-
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
260
|
-
});
|
|
261
|
-
this.process.on("error", (err) => {
|
|
262
|
-
this.pendingReject?.(err);
|
|
263
|
-
this.pendingReject = null;
|
|
264
|
-
this.pendingResolve = null;
|
|
265
|
-
});
|
|
266
|
-
this.process.on("exit", (code) => {
|
|
267
|
-
if (code !== 0) {
|
|
268
|
-
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
269
|
-
this.pendingReject?.(err);
|
|
270
|
-
this.pendingReject = null;
|
|
271
|
-
this.pendingResolve = null;
|
|
272
|
-
}
|
|
273
|
-
});
|
|
274
|
-
this.process.stdout.on("data", (chunk) => {
|
|
275
|
-
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
276
|
-
this._tryReceive();
|
|
277
|
-
});
|
|
278
|
-
await new Promise((resolve, reject) => {
|
|
279
|
-
const timeout = setTimeout(() => resolve(), 2e3);
|
|
280
|
-
this.process?.on("error", (err) => {
|
|
281
|
-
clearTimeout(timeout);
|
|
282
|
-
reject(err);
|
|
283
|
-
});
|
|
284
|
-
this.process?.on("exit", (code) => {
|
|
285
|
-
clearTimeout(timeout);
|
|
286
|
-
if (code !== 0) {
|
|
287
|
-
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
288
|
-
}
|
|
289
|
-
});
|
|
290
|
-
});
|
|
291
|
-
}
|
|
292
|
-
_tryReceive() {
|
|
293
|
-
if (this.receiveBuffer.length < 4)
|
|
294
|
-
return;
|
|
295
|
-
const length = this.receiveBuffer.readUInt32LE(0);
|
|
296
|
-
if (this.receiveBuffer.length < 4 + length)
|
|
297
|
-
return;
|
|
298
|
-
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
299
|
-
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
300
|
-
const resolve = this.pendingResolve;
|
|
301
|
-
const reject = this.pendingReject;
|
|
302
|
-
this.pendingResolve = null;
|
|
181
|
+
runtime;
|
|
182
|
+
device;
|
|
183
|
+
process = null;
|
|
184
|
+
receiveBuffer = Buffer.alloc(0);
|
|
185
|
+
pendingResolve = null;
|
|
186
|
+
pendingReject = null;
|
|
187
|
+
async initialize() {
|
|
188
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
189
|
+
this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
|
|
190
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
191
|
+
});
|
|
192
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
193
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
194
|
+
}
|
|
195
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
196
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
197
|
+
});
|
|
198
|
+
this.process.on("error", (err) => {
|
|
199
|
+
this.pendingReject?.(err);
|
|
200
|
+
this.pendingReject = null;
|
|
201
|
+
this.pendingResolve = null;
|
|
202
|
+
});
|
|
203
|
+
this.process.on("exit", (code) => {
|
|
204
|
+
if (code !== 0) {
|
|
205
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
206
|
+
this.pendingReject?.(err);
|
|
303
207
|
this.pendingReject = null;
|
|
304
|
-
|
|
305
|
-
return;
|
|
306
|
-
try {
|
|
307
|
-
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
308
|
-
resolve(parsed);
|
|
309
|
-
} catch (err) {
|
|
310
|
-
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
311
|
-
}
|
|
312
|
-
}
|
|
313
|
-
/** Send JPEG buffer, receive JSON detection results */
|
|
314
|
-
async runJpeg(jpeg) {
|
|
315
|
-
if (!this.process?.stdin) {
|
|
316
|
-
throw new Error("PythonInferenceEngine: process not initialized");
|
|
317
|
-
}
|
|
318
|
-
return new Promise((resolve, reject) => {
|
|
319
|
-
this.pendingResolve = resolve;
|
|
320
|
-
this.pendingReject = reject;
|
|
321
|
-
const lengthBuf = Buffer.allocUnsafe(4);
|
|
322
|
-
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
323
|
-
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
324
|
-
});
|
|
325
|
-
}
|
|
326
|
-
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
327
|
-
async run(_input, _inputShape) {
|
|
328
|
-
throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
|
|
329
|
-
}
|
|
330
|
-
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
331
|
-
async runMultiOutput(_input, _inputShape) {
|
|
332
|
-
throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
|
|
208
|
+
this.pendingResolve = null;
|
|
333
209
|
}
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
210
|
+
});
|
|
211
|
+
this.process.stdout.on("data", (chunk) => {
|
|
212
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
213
|
+
this._tryReceive();
|
|
214
|
+
});
|
|
215
|
+
await new Promise((resolve2, reject) => {
|
|
216
|
+
const timeout = setTimeout(() => resolve2(), 2e3);
|
|
217
|
+
this.process?.on("error", (err) => {
|
|
218
|
+
clearTimeout(timeout);
|
|
219
|
+
reject(err);
|
|
220
|
+
});
|
|
221
|
+
this.process?.on("exit", (code) => {
|
|
222
|
+
clearTimeout(timeout);
|
|
223
|
+
if (code !== 0) {
|
|
224
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
339
225
|
}
|
|
340
|
-
}
|
|
341
|
-
};
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
226
|
+
});
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
_tryReceive() {
|
|
230
|
+
if (this.receiveBuffer.length < 4) return;
|
|
231
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
232
|
+
if (this.receiveBuffer.length < 4 + length) return;
|
|
233
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
234
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
235
|
+
const resolve2 = this.pendingResolve;
|
|
236
|
+
const reject = this.pendingReject;
|
|
237
|
+
this.pendingResolve = null;
|
|
238
|
+
this.pendingReject = null;
|
|
239
|
+
if (!resolve2) return;
|
|
240
|
+
try {
|
|
241
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
242
|
+
resolve2(parsed);
|
|
243
|
+
} catch (err) {
|
|
244
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
347
245
|
}
|
|
348
246
|
}
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
return m[k];
|
|
361
|
-
} };
|
|
362
|
-
}
|
|
363
|
-
Object.defineProperty(o, k2, desc);
|
|
364
|
-
}) : (function(o, m, k, k2) {
|
|
365
|
-
if (k2 === void 0) k2 = k;
|
|
366
|
-
o[k2] = m[k];
|
|
367
|
-
}));
|
|
368
|
-
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
369
|
-
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
370
|
-
}) : function(o, v) {
|
|
371
|
-
o["default"] = v;
|
|
247
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
248
|
+
async runJpeg(jpeg) {
|
|
249
|
+
if (!this.process?.stdin) {
|
|
250
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
251
|
+
}
|
|
252
|
+
return new Promise((resolve2, reject) => {
|
|
253
|
+
this.pendingResolve = resolve2;
|
|
254
|
+
this.pendingReject = reject;
|
|
255
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
256
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
257
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
372
258
|
});
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
})();
|
|
392
|
-
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
393
|
-
exports2.resolveEngine = resolveEngine2;
|
|
394
|
-
exports2.probeOnnxBackends = probeOnnxBackends;
|
|
395
|
-
var fs = __importStar(require("fs"));
|
|
396
|
-
var path = __importStar(require("path"));
|
|
397
|
-
var node_engine_js_1 = require_node_engine();
|
|
398
|
-
var python_engine_js_1 = require_python_engine();
|
|
399
|
-
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
400
|
-
var BACKEND_TO_FORMAT = {
|
|
401
|
-
cpu: "onnx",
|
|
402
|
-
coreml: "onnx",
|
|
403
|
-
cuda: "onnx",
|
|
404
|
-
tensorrt: "onnx"
|
|
405
|
-
};
|
|
406
|
-
var RUNTIME_TO_FORMAT = {
|
|
407
|
-
onnx: "onnx",
|
|
408
|
-
coreml: "coreml",
|
|
409
|
-
openvino: "openvino",
|
|
410
|
-
tflite: "tflite",
|
|
411
|
-
pytorch: "pt"
|
|
412
|
-
};
|
|
413
|
-
function modelFilePath(modelsDir, modelEntry, format) {
|
|
414
|
-
const formatEntry = modelEntry.formats[format];
|
|
415
|
-
if (!formatEntry) {
|
|
416
|
-
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
417
|
-
}
|
|
418
|
-
const urlParts = formatEntry.url.split("/");
|
|
419
|
-
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
420
|
-
return path.join(modelsDir, filename);
|
|
259
|
+
}
|
|
260
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
261
|
+
async run(_input, _inputShape) {
|
|
262
|
+
throw new Error(
|
|
263
|
+
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
264
|
+
);
|
|
265
|
+
}
|
|
266
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
267
|
+
async runMultiOutput(_input, _inputShape) {
|
|
268
|
+
throw new Error(
|
|
269
|
+
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
270
|
+
);
|
|
271
|
+
}
|
|
272
|
+
async dispose() {
|
|
273
|
+
if (this.process) {
|
|
274
|
+
this.process.stdin?.end();
|
|
275
|
+
this.process.kill("SIGTERM");
|
|
276
|
+
this.process = null;
|
|
421
277
|
}
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
278
|
+
}
|
|
279
|
+
};
|
|
280
|
+
|
|
281
|
+
// src/shared/engine-resolver.ts
|
|
282
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
283
|
+
var BACKEND_TO_FORMAT = {
|
|
284
|
+
cpu: "onnx",
|
|
285
|
+
coreml: "onnx",
|
|
286
|
+
cuda: "onnx",
|
|
287
|
+
tensorrt: "onnx"
|
|
288
|
+
};
|
|
289
|
+
var RUNTIME_TO_FORMAT = {
|
|
290
|
+
onnx: "onnx",
|
|
291
|
+
coreml: "coreml",
|
|
292
|
+
openvino: "openvino",
|
|
293
|
+
tflite: "tflite",
|
|
294
|
+
pytorch: "pt"
|
|
295
|
+
};
|
|
296
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
297
|
+
const formatEntry = modelEntry.formats[format];
|
|
298
|
+
if (!formatEntry) {
|
|
299
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
300
|
+
}
|
|
301
|
+
const urlParts = formatEntry.url.split("/");
|
|
302
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
303
|
+
return path2.join(modelsDir, filename);
|
|
304
|
+
}
|
|
305
|
+
function modelExists(filePath) {
|
|
306
|
+
try {
|
|
307
|
+
return fs.existsSync(filePath);
|
|
308
|
+
} catch {
|
|
309
|
+
return false;
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
async function resolveEngine(options) {
|
|
313
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
314
|
+
let selectedFormat;
|
|
315
|
+
let selectedBackend;
|
|
316
|
+
if (runtime === "auto") {
|
|
317
|
+
const available = await probeOnnxBackends();
|
|
318
|
+
let chosen = null;
|
|
319
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
320
|
+
if (!available.includes(b)) continue;
|
|
321
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
322
|
+
if (!fmt) continue;
|
|
323
|
+
if (!modelEntry.formats[fmt]) continue;
|
|
324
|
+
chosen = { backend: b, format: fmt };
|
|
325
|
+
break;
|
|
428
326
|
}
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
if (runtime === "auto") {
|
|
434
|
-
const available = await probeOnnxBackends();
|
|
435
|
-
let chosen = null;
|
|
436
|
-
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
437
|
-
if (!available.includes(b))
|
|
438
|
-
continue;
|
|
439
|
-
const fmt = BACKEND_TO_FORMAT[b];
|
|
440
|
-
if (!fmt)
|
|
441
|
-
continue;
|
|
442
|
-
if (!modelEntry.formats[fmt])
|
|
443
|
-
continue;
|
|
444
|
-
chosen = { backend: b, format: fmt };
|
|
445
|
-
break;
|
|
446
|
-
}
|
|
447
|
-
if (!chosen) {
|
|
448
|
-
throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
|
|
449
|
-
}
|
|
450
|
-
selectedFormat = chosen.format;
|
|
451
|
-
selectedBackend = chosen.backend;
|
|
452
|
-
} else {
|
|
453
|
-
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
454
|
-
if (!fmt) {
|
|
455
|
-
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
456
|
-
}
|
|
457
|
-
if (!modelEntry.formats[fmt]) {
|
|
458
|
-
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
459
|
-
}
|
|
460
|
-
selectedFormat = fmt;
|
|
461
|
-
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
462
|
-
}
|
|
463
|
-
let modelPath;
|
|
464
|
-
if (models) {
|
|
465
|
-
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
466
|
-
} else {
|
|
467
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
468
|
-
if (!modelExists(modelPath)) {
|
|
469
|
-
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
470
|
-
}
|
|
471
|
-
}
|
|
472
|
-
if (selectedFormat === "onnx") {
|
|
473
|
-
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
474
|
-
await engine.initialize();
|
|
475
|
-
return { engine, format: selectedFormat, modelPath };
|
|
476
|
-
}
|
|
477
|
-
const { pythonPath } = options;
|
|
478
|
-
const PYTHON_SCRIPT_MAP = {
|
|
479
|
-
coreml: "coreml_inference.py",
|
|
480
|
-
pytorch: "pytorch_inference.py",
|
|
481
|
-
openvino: "openvino_inference.py"
|
|
482
|
-
};
|
|
483
|
-
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
484
|
-
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
485
|
-
if (scriptName && pythonPath) {
|
|
486
|
-
const candidates = [
|
|
487
|
-
path.join(__dirname, "../../python", scriptName),
|
|
488
|
-
path.join(__dirname, "../python", scriptName),
|
|
489
|
-
path.join(__dirname, "../../../python", scriptName)
|
|
490
|
-
];
|
|
491
|
-
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
492
|
-
if (!scriptPath) {
|
|
493
|
-
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
494
|
-
${candidates.join("\n")}`);
|
|
495
|
-
}
|
|
496
|
-
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
497
|
-
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
498
|
-
`--input-size=${inputSize}`,
|
|
499
|
-
`--confidence=0.25`
|
|
500
|
-
]);
|
|
501
|
-
await engine.initialize();
|
|
502
|
-
return { engine, format: selectedFormat, modelPath };
|
|
503
|
-
}
|
|
504
|
-
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
505
|
-
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
506
|
-
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
507
|
-
await engine.initialize();
|
|
508
|
-
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
509
|
-
}
|
|
510
|
-
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
327
|
+
if (!chosen) {
|
|
328
|
+
throw new Error(
|
|
329
|
+
`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
|
|
330
|
+
);
|
|
511
331
|
}
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
else if (normalized === "tensorrt")
|
|
524
|
-
available.push("tensorrt");
|
|
525
|
-
}
|
|
526
|
-
} catch {
|
|
527
|
-
}
|
|
528
|
-
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
529
|
-
available.push("coreml");
|
|
530
|
-
}
|
|
531
|
-
return [...new Set(available)];
|
|
332
|
+
selectedFormat = chosen.format;
|
|
333
|
+
selectedBackend = chosen.backend;
|
|
334
|
+
} else {
|
|
335
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
336
|
+
if (!fmt) {
|
|
337
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
338
|
+
}
|
|
339
|
+
if (!modelEntry.formats[fmt]) {
|
|
340
|
+
throw new Error(
|
|
341
|
+
`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
|
|
342
|
+
);
|
|
532
343
|
}
|
|
344
|
+
selectedFormat = fmt;
|
|
345
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
533
346
|
}
|
|
534
|
-
|
|
347
|
+
let modelPath;
|
|
348
|
+
if (models) {
|
|
349
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
350
|
+
} else {
|
|
351
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
352
|
+
if (!modelExists(modelPath)) {
|
|
353
|
+
throw new Error(
|
|
354
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
355
|
+
);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
if (selectedFormat === "onnx") {
|
|
359
|
+
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
360
|
+
await engine.initialize();
|
|
361
|
+
return { engine, format: selectedFormat, modelPath };
|
|
362
|
+
}
|
|
363
|
+
const { pythonPath } = options;
|
|
364
|
+
const PYTHON_SCRIPT_MAP = {
|
|
365
|
+
coreml: "coreml_inference.py",
|
|
366
|
+
pytorch: "pytorch_inference.py",
|
|
367
|
+
openvino: "openvino_inference.py"
|
|
368
|
+
};
|
|
369
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
370
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
371
|
+
if (scriptName && pythonPath) {
|
|
372
|
+
const candidates = [
|
|
373
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
374
|
+
path2.join(__dirname, "../python", scriptName),
|
|
375
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
376
|
+
];
|
|
377
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
378
|
+
if (!scriptPath) {
|
|
379
|
+
throw new Error(
|
|
380
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
381
|
+
${candidates.join("\n")}`
|
|
382
|
+
);
|
|
383
|
+
}
|
|
384
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
385
|
+
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
386
|
+
`--input-size=${inputSize}`,
|
|
387
|
+
`--confidence=0.25`
|
|
388
|
+
]);
|
|
389
|
+
await engine.initialize();
|
|
390
|
+
return { engine, format: selectedFormat, modelPath };
|
|
391
|
+
}
|
|
392
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
393
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
394
|
+
const engine = new NodeInferenceEngine(fallbackPath, "cpu");
|
|
395
|
+
await engine.initialize();
|
|
396
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
397
|
+
}
|
|
398
|
+
throw new Error(
|
|
399
|
+
`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
|
|
400
|
+
);
|
|
401
|
+
}
|
|
402
|
+
async function probeOnnxBackends() {
|
|
403
|
+
const available = ["cpu"];
|
|
404
|
+
try {
|
|
405
|
+
const ort = await import("onnxruntime-node");
|
|
406
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
407
|
+
for (const p of providers) {
|
|
408
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
409
|
+
if (normalized === "coreml") available.push("coreml");
|
|
410
|
+
else if (normalized === "cuda") available.push("cuda");
|
|
411
|
+
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
412
|
+
}
|
|
413
|
+
} catch {
|
|
414
|
+
}
|
|
415
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
416
|
+
available.push("coreml");
|
|
417
|
+
}
|
|
418
|
+
return [...new Set(available)];
|
|
419
|
+
}
|
|
535
420
|
|
|
536
421
|
// src/addons/audio-classification/index.ts
|
|
537
|
-
var audio_classification_exports = {};
|
|
538
|
-
__export(audio_classification_exports, {
|
|
539
|
-
default: () => AudioClassificationAddon
|
|
540
|
-
});
|
|
541
|
-
module.exports = __toCommonJS(audio_classification_exports);
|
|
542
|
-
var import_audio_classification_models = __toESM(require_audio_classification_models());
|
|
543
|
-
var import_yamnet = __toESM(require_yamnet());
|
|
544
|
-
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
545
422
|
var YAMNET_NUM_CLASSES = 521;
|
|
546
423
|
var AUDIO_EVENT_LABEL = { id: "audio-event", name: "Audio Event" };
|
|
547
|
-
var
|
|
424
|
+
var AUDIO_LABELS2 = [AUDIO_EVENT_LABEL];
|
|
548
425
|
var AUDIO_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
549
426
|
var AudioClassificationAddon = class {
|
|
550
427
|
id = "audio-classification";
|
|
@@ -576,7 +453,7 @@ var AudioClassificationAddon = class {
|
|
|
576
453
|
resolvedConfig = null;
|
|
577
454
|
ctx = null;
|
|
578
455
|
getModelRequirements() {
|
|
579
|
-
return
|
|
456
|
+
return AUDIO_CLASSIFICATION_MODELS.map((m) => ({
|
|
580
457
|
modelId: m.id,
|
|
581
458
|
name: m.name,
|
|
582
459
|
minRAM_MB: 100,
|
|
@@ -592,7 +469,7 @@ var AudioClassificationAddon = class {
|
|
|
592
469
|
const cfg = ctx.addonConfig;
|
|
593
470
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "yamnet";
|
|
594
471
|
this.minScore = cfg["minScore"] ?? 0.3;
|
|
595
|
-
const entry =
|
|
472
|
+
const entry = AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId);
|
|
596
473
|
if (!entry) {
|
|
597
474
|
throw new Error(`AudioClassificationAddon: unknown modelId "${modelId}"`);
|
|
598
475
|
}
|
|
@@ -620,7 +497,7 @@ var AudioClassificationAddon = class {
|
|
|
620
497
|
while (classNames.length < YAMNET_NUM_CLASSES) {
|
|
621
498
|
classNames.push(`class_${classNames.length}`);
|
|
622
499
|
}
|
|
623
|
-
const results =
|
|
500
|
+
const results = yamnetPostprocess(
|
|
624
501
|
output,
|
|
625
502
|
Math.round(numFrames),
|
|
626
503
|
YAMNET_NUM_CLASSES,
|
|
@@ -643,13 +520,13 @@ var AudioClassificationAddon = class {
|
|
|
643
520
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
644
521
|
const backend = config?.backend ?? "cpu";
|
|
645
522
|
const format = config?.format ?? "onnx";
|
|
646
|
-
const entry =
|
|
523
|
+
const entry = AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
647
524
|
this.modelEntry = entry;
|
|
648
525
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
649
526
|
if (this.ctx.models) {
|
|
650
527
|
await this.ctx.models.ensure(modelId, format);
|
|
651
528
|
}
|
|
652
|
-
const resolved = await
|
|
529
|
+
const resolved = await resolveEngine({
|
|
653
530
|
runtime,
|
|
654
531
|
backend,
|
|
655
532
|
modelEntry: entry,
|
|
@@ -673,7 +550,7 @@ var AudioClassificationAddon = class {
|
|
|
673
550
|
key: "modelId",
|
|
674
551
|
label: "Model",
|
|
675
552
|
type: "model-selector",
|
|
676
|
-
catalog: [...
|
|
553
|
+
catalog: [...AUDIO_CLASSIFICATION_MODELS],
|
|
677
554
|
allowCustom: false,
|
|
678
555
|
allowConversion: false,
|
|
679
556
|
acceptFormats: ["onnx", "openvino"],
|
|
@@ -735,13 +612,13 @@ var AudioClassificationAddon = class {
|
|
|
735
612
|
return AUDIO_CLASS_MAP;
|
|
736
613
|
}
|
|
737
614
|
getModelCatalog() {
|
|
738
|
-
return [...
|
|
615
|
+
return [...AUDIO_CLASSIFICATION_MODELS];
|
|
739
616
|
}
|
|
740
617
|
getAvailableModels() {
|
|
741
618
|
return [];
|
|
742
619
|
}
|
|
743
620
|
getActiveLabels() {
|
|
744
|
-
return
|
|
621
|
+
return AUDIO_LABELS2;
|
|
745
622
|
}
|
|
746
623
|
async probe() {
|
|
747
624
|
return {
|