@camstack/addon-vision 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.js +999 -823
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +242 -7
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.js +501 -379
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +224 -4
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.js +1002 -826
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +248 -7
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.js +1002 -826
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.js +1196 -935
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +227 -7
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.js +1003 -808
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +197 -6
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.js +214 -111
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +12 -9
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.js +1287 -1083
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +373 -7
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.js +1075 -869
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +230 -7
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.js +684 -506
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +244 -5
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.js +967 -791
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +21 -17
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.js +581 -411
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +20 -16
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-2YMA6QOV.mjs +193 -0
- package/dist/chunk-2YMA6QOV.mjs.map +1 -0
- package/dist/chunk-3IIFBJCD.mjs +45 -0
- package/dist/chunk-BS4DKYGN.mjs +48 -0
- package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
- package/dist/chunk-DE7I3VHO.mjs +106 -0
- package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
- package/dist/chunk-F6D2OZ36.mjs +89 -0
- package/dist/chunk-F6D2OZ36.mjs.map +1 -0
- package/dist/chunk-GAOIFQDX.mjs +59 -0
- package/dist/chunk-GAOIFQDX.mjs.map +1 -0
- package/dist/chunk-HUIX2XVR.mjs +159 -0
- package/dist/chunk-HUIX2XVR.mjs.map +1 -0
- package/dist/chunk-K36R6HWY.mjs +51 -0
- package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
- package/dist/chunk-MBTAI3WE.mjs +78 -0
- package/dist/chunk-MBTAI3WE.mjs.map +1 -0
- package/dist/chunk-MGT6RUVX.mjs +423 -0
- package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
- package/dist/chunk-PIFS7AIT.mjs +446 -0
- package/dist/chunk-PIFS7AIT.mjs.map +1 -0
- package/dist/chunk-WG66JYYW.mjs +116 -0
- package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
- package/dist/chunk-XD7WGXHZ.mjs +82 -0
- package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
- package/dist/chunk-YYDM6V2F.mjs +113 -0
- package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
- package/dist/chunk-ZK7P3TZN.mjs +286 -0
- package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
- package/dist/index.js +4443 -3925
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2698 -250
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -3
- package/dist/chunk-22BHCDT5.mjs +0 -101
- package/dist/chunk-6DJZZR64.mjs +0 -336
- package/dist/chunk-6DJZZR64.mjs.map +0 -1
- package/dist/chunk-7DYHXUPZ.mjs +0 -36
- package/dist/chunk-BJTO5JO5.mjs +0 -11
- package/dist/chunk-BP7H4NFS.mjs +0 -412
- package/dist/chunk-BR2FPGOX.mjs +0 -98
- package/dist/chunk-DNQNGDR4.mjs +0 -256
- package/dist/chunk-DNQNGDR4.mjs.map +0 -1
- package/dist/chunk-DUN6XU3N.mjs +0 -72
- package/dist/chunk-EPNWLSCG.mjs +0 -387
- package/dist/chunk-EPNWLSCG.mjs.map +0 -1
- package/dist/chunk-G32RCIUI.mjs +0 -645
- package/dist/chunk-G32RCIUI.mjs.map +0 -1
- package/dist/chunk-GR65KM6X.mjs +0 -289
- package/dist/chunk-GR65KM6X.mjs.map +0 -1
- package/dist/chunk-H7LMBTS5.mjs +0 -276
- package/dist/chunk-H7LMBTS5.mjs.map +0 -1
- package/dist/chunk-IK4XIQPC.mjs +0 -242
- package/dist/chunk-IK4XIQPC.mjs.map +0 -1
- package/dist/chunk-J6VNIIYX.mjs +0 -269
- package/dist/chunk-J6VNIIYX.mjs.map +0 -1
- package/dist/chunk-KUO2BVFY.mjs +0 -90
- package/dist/chunk-ML2JX43J.mjs +0 -248
- package/dist/chunk-ML2JX43J.mjs.map +0 -1
- package/dist/chunk-WUMV524J.mjs +0 -379
- package/dist/chunk-WUMV524J.mjs.map +0 -1
- package/dist/chunk-XZ6ZMXXU.mjs +0 -39
- /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
|
@@ -5,6 +5,9 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
+
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
+
};
|
|
8
11
|
var __export = (target, all) => {
|
|
9
12
|
for (var name in all)
|
|
10
13
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -27,402 +30,521 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
27
30
|
));
|
|
28
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
32
|
|
|
30
|
-
// src/
|
|
31
|
-
var
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
});
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
var
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
33
|
+
// src/catalogs/audio-classification-models.js
|
|
34
|
+
var require_audio_classification_models = __commonJS({
|
|
35
|
+
"src/catalogs/audio-classification-models.js"(exports2) {
|
|
36
|
+
"use strict";
|
|
37
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
38
|
+
exports2.AUDIO_CLASSIFICATION_MODELS = void 0;
|
|
39
|
+
var types_1 = require("@camstack/types");
|
|
40
|
+
var HF_REPO = "camstack/camstack-models";
|
|
41
|
+
var AUDIO_LABELS2 = [
|
|
42
|
+
{ id: "audio", name: "Audio Event" }
|
|
43
|
+
];
|
|
44
|
+
exports2.AUDIO_CLASSIFICATION_MODELS = [
|
|
45
|
+
{
|
|
46
|
+
id: "yamnet",
|
|
47
|
+
name: "YAMNet",
|
|
48
|
+
description: "YAMNet \u2014 audio event classification from raw waveform",
|
|
49
|
+
inputSize: { width: 1, height: 16e3 },
|
|
50
|
+
labels: AUDIO_LABELS2,
|
|
51
|
+
formats: {
|
|
52
|
+
onnx: {
|
|
53
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "audioClassification/yamnet/onnx/camstack-yamnet.onnx"),
|
|
54
|
+
sizeMB: 15
|
|
55
|
+
},
|
|
56
|
+
openvino: {
|
|
57
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "audioClassification/yamnet/openvino/camstack-yamnet.xml"),
|
|
58
|
+
sizeMB: 8
|
|
59
|
+
}
|
|
60
|
+
}
|
|
58
61
|
}
|
|
59
|
-
|
|
62
|
+
];
|
|
60
63
|
}
|
|
61
|
-
|
|
64
|
+
});
|
|
62
65
|
|
|
63
|
-
// src/shared/postprocess/yamnet.
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
66
|
+
// src/shared/postprocess/yamnet.js
|
|
67
|
+
var require_yamnet = __commonJS({
|
|
68
|
+
"src/shared/postprocess/yamnet.js"(exports2) {
|
|
69
|
+
"use strict";
|
|
70
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
71
|
+
exports2.yamnetPostprocess = yamnetPostprocess2;
|
|
72
|
+
function yamnetPostprocess2(output, numFrames, numClasses, classNames, minScore) {
|
|
73
|
+
const avgScores = new Float32Array(numClasses);
|
|
74
|
+
for (let f = 0; f < numFrames; f++) {
|
|
75
|
+
for (let c = 0; c < numClasses; c++) {
|
|
76
|
+
const prev = avgScores[c] ?? 0;
|
|
77
|
+
avgScores[c] = prev + (output[f * numClasses + c] ?? 0);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
if (numFrames > 0) {
|
|
81
|
+
for (let c = 0; c < numClasses; c++) {
|
|
82
|
+
const val = avgScores[c] ?? 0;
|
|
83
|
+
avgScores[c] = val / numFrames;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
const results = [];
|
|
87
|
+
for (let c = 0; c < numClasses; c++) {
|
|
88
|
+
const score = avgScores[c];
|
|
89
|
+
if (score >= minScore) {
|
|
90
|
+
results.push({
|
|
91
|
+
className: classNames[c] ?? String(c),
|
|
92
|
+
score
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
return results.sort((a, b) => b.score - a.score);
|
|
86
97
|
}
|
|
87
98
|
}
|
|
88
|
-
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
// src/shared/engine-resolver.ts
|
|
92
|
-
var fs = __toESM(require("fs"));
|
|
93
|
-
var path2 = __toESM(require("path"));
|
|
99
|
+
});
|
|
94
100
|
|
|
95
|
-
// src/shared/node-engine.
|
|
96
|
-
var
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
101
|
+
// src/shared/node-engine.js
|
|
102
|
+
var require_node_engine = __commonJS({
|
|
103
|
+
"src/shared/node-engine.js"(exports2) {
|
|
104
|
+
"use strict";
|
|
105
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
106
|
+
if (k2 === void 0) k2 = k;
|
|
107
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
108
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
109
|
+
desc = { enumerable: true, get: function() {
|
|
110
|
+
return m[k];
|
|
111
|
+
} };
|
|
112
|
+
}
|
|
113
|
+
Object.defineProperty(o, k2, desc);
|
|
114
|
+
}) : (function(o, m, k, k2) {
|
|
115
|
+
if (k2 === void 0) k2 = k;
|
|
116
|
+
o[k2] = m[k];
|
|
117
|
+
}));
|
|
118
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
119
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
120
|
+
}) : function(o, v) {
|
|
121
|
+
o["default"] = v;
|
|
122
|
+
});
|
|
123
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
124
|
+
var ownKeys = function(o) {
|
|
125
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
126
|
+
var ar = [];
|
|
127
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
128
|
+
return ar;
|
|
129
|
+
};
|
|
130
|
+
return ownKeys(o);
|
|
131
|
+
};
|
|
132
|
+
return function(mod) {
|
|
133
|
+
if (mod && mod.__esModule) return mod;
|
|
134
|
+
var result = {};
|
|
135
|
+
if (mod != null) {
|
|
136
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
137
|
+
}
|
|
138
|
+
__setModuleDefault(result, mod);
|
|
139
|
+
return result;
|
|
140
|
+
};
|
|
141
|
+
})();
|
|
142
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
143
|
+
exports2.NodeInferenceEngine = void 0;
|
|
144
|
+
var path = __importStar(require("path"));
|
|
145
|
+
var BACKEND_TO_PROVIDER = {
|
|
146
|
+
cpu: "cpu",
|
|
147
|
+
coreml: "coreml",
|
|
148
|
+
cuda: "cuda",
|
|
149
|
+
tensorrt: "tensorrt",
|
|
150
|
+
dml: "dml"
|
|
125
151
|
};
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
async run(input, inputShape) {
|
|
129
|
-
if (!this.session) {
|
|
130
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
131
|
-
}
|
|
132
|
-
const ort = await import("onnxruntime-node");
|
|
133
|
-
const sess = this.session;
|
|
134
|
-
const inputName = sess.inputNames[0];
|
|
135
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
136
|
-
const feeds = { [inputName]: tensor };
|
|
137
|
-
const results = await sess.run(feeds);
|
|
138
|
-
const outputName = sess.outputNames[0];
|
|
139
|
-
const outputTensor = results[outputName];
|
|
140
|
-
return outputTensor.data;
|
|
141
|
-
}
|
|
142
|
-
async runMultiOutput(input, inputShape) {
|
|
143
|
-
if (!this.session) {
|
|
144
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
145
|
-
}
|
|
146
|
-
const ort = await import("onnxruntime-node");
|
|
147
|
-
const sess = this.session;
|
|
148
|
-
const inputName = sess.inputNames[0];
|
|
149
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
150
|
-
const feeds = { [inputName]: tensor };
|
|
151
|
-
const results = await sess.run(feeds);
|
|
152
|
-
const out = {};
|
|
153
|
-
for (const name of sess.outputNames) {
|
|
154
|
-
out[name] = results[name].data;
|
|
155
|
-
}
|
|
156
|
-
return out;
|
|
157
|
-
}
|
|
158
|
-
async dispose() {
|
|
159
|
-
this.session = null;
|
|
160
|
-
}
|
|
161
|
-
};
|
|
162
|
-
|
|
163
|
-
// src/shared/python-engine.ts
|
|
164
|
-
var import_node_child_process = require("child_process");
|
|
165
|
-
var import_core = require("@camstack/core");
|
|
166
|
-
var PythonInferenceEngine = class {
|
|
167
|
-
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
168
|
-
this.pythonPath = pythonPath;
|
|
169
|
-
this.scriptPath = scriptPath;
|
|
170
|
-
this.modelPath = modelPath;
|
|
171
|
-
this.extraArgs = extraArgs;
|
|
172
|
-
this.runtime = runtime;
|
|
173
|
-
const runtimeDeviceMap = {
|
|
174
|
-
onnx: "cpu",
|
|
152
|
+
var BACKEND_TO_DEVICE = {
|
|
153
|
+
cpu: "cpu",
|
|
175
154
|
coreml: "gpu-mps",
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
155
|
+
cuda: "gpu-cuda",
|
|
156
|
+
tensorrt: "tensorrt"
|
|
157
|
+
};
|
|
158
|
+
var NodeInferenceEngine = class {
|
|
159
|
+
modelPath;
|
|
160
|
+
backend;
|
|
161
|
+
runtime = "onnx";
|
|
162
|
+
device;
|
|
163
|
+
session = null;
|
|
164
|
+
constructor(modelPath, backend) {
|
|
165
|
+
this.modelPath = modelPath;
|
|
166
|
+
this.backend = backend;
|
|
167
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
168
|
+
}
|
|
169
|
+
async initialize() {
|
|
170
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
171
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
172
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
173
|
+
const sessionOptions = {
|
|
174
|
+
executionProviders: [provider]
|
|
175
|
+
};
|
|
176
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
177
|
+
}
|
|
178
|
+
async run(input, inputShape) {
|
|
179
|
+
if (!this.session) {
|
|
180
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
181
|
+
}
|
|
182
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
183
|
+
const sess = this.session;
|
|
184
|
+
const inputName = sess.inputNames[0];
|
|
185
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
186
|
+
const feeds = { [inputName]: tensor };
|
|
187
|
+
const results = await sess.run(feeds);
|
|
188
|
+
const outputName = sess.outputNames[0];
|
|
189
|
+
const outputTensor = results[outputName];
|
|
190
|
+
return outputTensor.data;
|
|
191
|
+
}
|
|
192
|
+
async runMultiOutput(input, inputShape) {
|
|
193
|
+
if (!this.session) {
|
|
194
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
195
|
+
}
|
|
196
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
197
|
+
const sess = this.session;
|
|
198
|
+
const inputName = sess.inputNames[0];
|
|
199
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
200
|
+
const feeds = { [inputName]: tensor };
|
|
201
|
+
const results = await sess.run(feeds);
|
|
202
|
+
const out = {};
|
|
203
|
+
for (const name of sess.outputNames) {
|
|
204
|
+
out[name] = results[name].data;
|
|
205
|
+
}
|
|
206
|
+
return out;
|
|
207
|
+
}
|
|
208
|
+
async dispose() {
|
|
209
|
+
this.session = null;
|
|
210
|
+
}
|
|
179
211
|
};
|
|
180
|
-
|
|
212
|
+
exports2.NodeInferenceEngine = NodeInferenceEngine;
|
|
181
213
|
}
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
this.
|
|
208
|
-
this.
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
// src/shared/python-engine.js
|
|
217
|
+
var require_python_engine = __commonJS({
|
|
218
|
+
"src/shared/python-engine.js"(exports2) {
|
|
219
|
+
"use strict";
|
|
220
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
221
|
+
exports2.PythonInferenceEngine = void 0;
|
|
222
|
+
exports2.resolvePythonBinary = resolvePythonBinary;
|
|
223
|
+
var node_child_process_1 = require("child_process");
|
|
224
|
+
var PythonInferenceEngine = class {
|
|
225
|
+
pythonPath;
|
|
226
|
+
scriptPath;
|
|
227
|
+
modelPath;
|
|
228
|
+
extraArgs;
|
|
229
|
+
runtime;
|
|
230
|
+
device;
|
|
231
|
+
process = null;
|
|
232
|
+
receiveBuffer = Buffer.alloc(0);
|
|
233
|
+
pendingResolve = null;
|
|
234
|
+
pendingReject = null;
|
|
235
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
236
|
+
this.pythonPath = pythonPath;
|
|
237
|
+
this.scriptPath = scriptPath;
|
|
238
|
+
this.modelPath = modelPath;
|
|
239
|
+
this.extraArgs = extraArgs;
|
|
240
|
+
this.runtime = runtime;
|
|
241
|
+
const runtimeDeviceMap = {
|
|
242
|
+
onnx: "cpu",
|
|
243
|
+
coreml: "gpu-mps",
|
|
244
|
+
pytorch: "cpu",
|
|
245
|
+
openvino: "cpu",
|
|
246
|
+
tflite: "cpu"
|
|
247
|
+
};
|
|
248
|
+
this.device = runtimeDeviceMap[runtime];
|
|
249
|
+
}
|
|
250
|
+
async initialize() {
|
|
251
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
252
|
+
this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
|
|
253
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
254
|
+
});
|
|
255
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
256
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
257
|
+
}
|
|
258
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
259
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
260
|
+
});
|
|
261
|
+
this.process.on("error", (err) => {
|
|
262
|
+
this.pendingReject?.(err);
|
|
263
|
+
this.pendingReject = null;
|
|
264
|
+
this.pendingResolve = null;
|
|
265
|
+
});
|
|
266
|
+
this.process.on("exit", (code) => {
|
|
267
|
+
if (code !== 0) {
|
|
268
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
269
|
+
this.pendingReject?.(err);
|
|
270
|
+
this.pendingReject = null;
|
|
271
|
+
this.pendingResolve = null;
|
|
272
|
+
}
|
|
273
|
+
});
|
|
274
|
+
this.process.stdout.on("data", (chunk) => {
|
|
275
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
276
|
+
this._tryReceive();
|
|
277
|
+
});
|
|
278
|
+
await new Promise((resolve, reject) => {
|
|
279
|
+
const timeout = setTimeout(() => resolve(), 2e3);
|
|
280
|
+
this.process?.on("error", (err) => {
|
|
281
|
+
clearTimeout(timeout);
|
|
282
|
+
reject(err);
|
|
283
|
+
});
|
|
284
|
+
this.process?.on("exit", (code) => {
|
|
285
|
+
clearTimeout(timeout);
|
|
286
|
+
if (code !== 0) {
|
|
287
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
288
|
+
}
|
|
289
|
+
});
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
_tryReceive() {
|
|
293
|
+
if (this.receiveBuffer.length < 4)
|
|
294
|
+
return;
|
|
295
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
296
|
+
if (this.receiveBuffer.length < 4 + length)
|
|
297
|
+
return;
|
|
298
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
299
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
300
|
+
const resolve = this.pendingResolve;
|
|
301
|
+
const reject = this.pendingReject;
|
|
209
302
|
this.pendingResolve = null;
|
|
303
|
+
this.pendingReject = null;
|
|
304
|
+
if (!resolve)
|
|
305
|
+
return;
|
|
306
|
+
try {
|
|
307
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
308
|
+
resolve(parsed);
|
|
309
|
+
} catch (err) {
|
|
310
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
311
|
+
}
|
|
210
312
|
}
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
});
|
|
216
|
-
await new Promise((resolve2, reject) => {
|
|
217
|
-
const timeout = setTimeout(() => resolve2(), 2e3);
|
|
218
|
-
this.process?.on("error", (err) => {
|
|
219
|
-
clearTimeout(timeout);
|
|
220
|
-
reject(err);
|
|
221
|
-
});
|
|
222
|
-
this.process?.on("exit", (code) => {
|
|
223
|
-
clearTimeout(timeout);
|
|
224
|
-
if (code !== 0) {
|
|
225
|
-
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
313
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
314
|
+
async runJpeg(jpeg) {
|
|
315
|
+
if (!this.process?.stdin) {
|
|
316
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
226
317
|
}
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
const lengthBuf = Buffer.allocUnsafe(4);
|
|
257
|
-
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
258
|
-
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
259
|
-
});
|
|
260
|
-
}
|
|
261
|
-
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
262
|
-
async run(_input, _inputShape) {
|
|
263
|
-
throw new Error(
|
|
264
|
-
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
265
|
-
);
|
|
266
|
-
}
|
|
267
|
-
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
268
|
-
async runMultiOutput(_input, _inputShape) {
|
|
269
|
-
throw new Error(
|
|
270
|
-
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
271
|
-
);
|
|
272
|
-
}
|
|
273
|
-
async dispose() {
|
|
274
|
-
if (this.process) {
|
|
275
|
-
this.process.stdin?.end();
|
|
276
|
-
this.process.kill("SIGTERM");
|
|
277
|
-
this.process = null;
|
|
318
|
+
return new Promise((resolve, reject) => {
|
|
319
|
+
this.pendingResolve = resolve;
|
|
320
|
+
this.pendingReject = reject;
|
|
321
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
322
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
323
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
324
|
+
});
|
|
325
|
+
}
|
|
326
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
327
|
+
async run(_input, _inputShape) {
|
|
328
|
+
throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
|
|
329
|
+
}
|
|
330
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
331
|
+
async runMultiOutput(_input, _inputShape) {
|
|
332
|
+
throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
|
|
333
|
+
}
|
|
334
|
+
async dispose() {
|
|
335
|
+
if (this.process) {
|
|
336
|
+
this.process.stdin?.end();
|
|
337
|
+
this.process.kill("SIGTERM");
|
|
338
|
+
this.process = null;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
};
|
|
342
|
+
exports2.PythonInferenceEngine = PythonInferenceEngine;
|
|
343
|
+
async function resolvePythonBinary(configPath, deps) {
|
|
344
|
+
if (configPath)
|
|
345
|
+
return configPath;
|
|
346
|
+
return deps.ensurePython();
|
|
278
347
|
}
|
|
279
348
|
}
|
|
280
|
-
};
|
|
349
|
+
});
|
|
281
350
|
|
|
282
|
-
// src/shared/engine-resolver.
|
|
283
|
-
var
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
function
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
351
|
+
// src/shared/engine-resolver.js
|
|
352
|
+
var require_engine_resolver = __commonJS({
|
|
353
|
+
"src/shared/engine-resolver.js"(exports2) {
|
|
354
|
+
"use strict";
|
|
355
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
356
|
+
if (k2 === void 0) k2 = k;
|
|
357
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
358
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
359
|
+
desc = { enumerable: true, get: function() {
|
|
360
|
+
return m[k];
|
|
361
|
+
} };
|
|
362
|
+
}
|
|
363
|
+
Object.defineProperty(o, k2, desc);
|
|
364
|
+
}) : (function(o, m, k, k2) {
|
|
365
|
+
if (k2 === void 0) k2 = k;
|
|
366
|
+
o[k2] = m[k];
|
|
367
|
+
}));
|
|
368
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
369
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
370
|
+
}) : function(o, v) {
|
|
371
|
+
o["default"] = v;
|
|
372
|
+
});
|
|
373
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
374
|
+
var ownKeys = function(o) {
|
|
375
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
376
|
+
var ar = [];
|
|
377
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
378
|
+
return ar;
|
|
379
|
+
};
|
|
380
|
+
return ownKeys(o);
|
|
381
|
+
};
|
|
382
|
+
return function(mod) {
|
|
383
|
+
if (mod && mod.__esModule) return mod;
|
|
384
|
+
var result = {};
|
|
385
|
+
if (mod != null) {
|
|
386
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
387
|
+
}
|
|
388
|
+
__setModuleDefault(result, mod);
|
|
389
|
+
return result;
|
|
390
|
+
};
|
|
391
|
+
})();
|
|
392
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
393
|
+
exports2.resolveEngine = resolveEngine2;
|
|
394
|
+
exports2.probeOnnxBackends = probeOnnxBackends;
|
|
395
|
+
var fs = __importStar(require("fs"));
|
|
396
|
+
var path = __importStar(require("path"));
|
|
397
|
+
var node_engine_js_1 = require_node_engine();
|
|
398
|
+
var python_engine_js_1 = require_python_engine();
|
|
399
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
400
|
+
var BACKEND_TO_FORMAT = {
|
|
401
|
+
cpu: "onnx",
|
|
402
|
+
coreml: "onnx",
|
|
403
|
+
cuda: "onnx",
|
|
404
|
+
tensorrt: "onnx"
|
|
405
|
+
};
|
|
406
|
+
var RUNTIME_TO_FORMAT = {
|
|
407
|
+
onnx: "onnx",
|
|
408
|
+
coreml: "coreml",
|
|
409
|
+
openvino: "openvino",
|
|
410
|
+
tflite: "tflite",
|
|
411
|
+
pytorch: "pt"
|
|
412
|
+
};
|
|
413
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
414
|
+
const formatEntry = modelEntry.formats[format];
|
|
415
|
+
if (!formatEntry) {
|
|
416
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
417
|
+
}
|
|
418
|
+
const urlParts = formatEntry.url.split("/");
|
|
419
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
420
|
+
return path.join(modelsDir, filename);
|
|
344
421
|
}
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
} else {
|
|
352
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
353
|
-
if (!modelExists(modelPath)) {
|
|
354
|
-
throw new Error(
|
|
355
|
-
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
356
|
-
);
|
|
422
|
+
function modelExists(filePath) {
|
|
423
|
+
try {
|
|
424
|
+
return fs.existsSync(filePath);
|
|
425
|
+
} catch {
|
|
426
|
+
return false;
|
|
427
|
+
}
|
|
357
428
|
}
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
429
|
+
async function resolveEngine2(options) {
|
|
430
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
431
|
+
let selectedFormat;
|
|
432
|
+
let selectedBackend;
|
|
433
|
+
if (runtime === "auto") {
|
|
434
|
+
const available = await probeOnnxBackends();
|
|
435
|
+
let chosen = null;
|
|
436
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
437
|
+
if (!available.includes(b))
|
|
438
|
+
continue;
|
|
439
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
440
|
+
if (!fmt)
|
|
441
|
+
continue;
|
|
442
|
+
if (!modelEntry.formats[fmt])
|
|
443
|
+
continue;
|
|
444
|
+
chosen = { backend: b, format: fmt };
|
|
445
|
+
break;
|
|
446
|
+
}
|
|
447
|
+
if (!chosen) {
|
|
448
|
+
throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
|
|
449
|
+
}
|
|
450
|
+
selectedFormat = chosen.format;
|
|
451
|
+
selectedBackend = chosen.backend;
|
|
452
|
+
} else {
|
|
453
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
454
|
+
if (!fmt) {
|
|
455
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
456
|
+
}
|
|
457
|
+
if (!modelEntry.formats[fmt]) {
|
|
458
|
+
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
459
|
+
}
|
|
460
|
+
selectedFormat = fmt;
|
|
461
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
462
|
+
}
|
|
463
|
+
let modelPath;
|
|
464
|
+
if (models) {
|
|
465
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
466
|
+
} else {
|
|
467
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
468
|
+
if (!modelExists(modelPath)) {
|
|
469
|
+
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
if (selectedFormat === "onnx") {
|
|
473
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
474
|
+
await engine.initialize();
|
|
475
|
+
return { engine, format: selectedFormat, modelPath };
|
|
476
|
+
}
|
|
477
|
+
const { pythonPath } = options;
|
|
478
|
+
const PYTHON_SCRIPT_MAP = {
|
|
479
|
+
coreml: "coreml_inference.py",
|
|
480
|
+
pytorch: "pytorch_inference.py",
|
|
481
|
+
openvino: "openvino_inference.py"
|
|
482
|
+
};
|
|
483
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
484
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
485
|
+
if (scriptName && pythonPath) {
|
|
486
|
+
const candidates = [
|
|
487
|
+
path.join(__dirname, "../../python", scriptName),
|
|
488
|
+
path.join(__dirname, "../python", scriptName),
|
|
489
|
+
path.join(__dirname, "../../../python", scriptName)
|
|
490
|
+
];
|
|
491
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
492
|
+
if (!scriptPath) {
|
|
493
|
+
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
494
|
+
${candidates.join("\n")}`);
|
|
495
|
+
}
|
|
496
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
497
|
+
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
498
|
+
`--input-size=${inputSize}`,
|
|
499
|
+
`--confidence=0.25`
|
|
500
|
+
]);
|
|
501
|
+
await engine.initialize();
|
|
502
|
+
return { engine, format: selectedFormat, modelPath };
|
|
503
|
+
}
|
|
504
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
505
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
506
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
507
|
+
await engine.initialize();
|
|
508
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
509
|
+
}
|
|
510
|
+
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
384
511
|
}
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
try {
|
|
406
|
-
const ort = await import("onnxruntime-node");
|
|
407
|
-
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
408
|
-
for (const p of providers) {
|
|
409
|
-
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
410
|
-
if (normalized === "coreml") available.push("coreml");
|
|
411
|
-
else if (normalized === "cuda") available.push("cuda");
|
|
412
|
-
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
512
|
+
async function probeOnnxBackends() {
|
|
513
|
+
const available = ["cpu"];
|
|
514
|
+
try {
|
|
515
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
516
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
517
|
+
for (const p of providers) {
|
|
518
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
519
|
+
if (normalized === "coreml")
|
|
520
|
+
available.push("coreml");
|
|
521
|
+
else if (normalized === "cuda")
|
|
522
|
+
available.push("cuda");
|
|
523
|
+
else if (normalized === "tensorrt")
|
|
524
|
+
available.push("tensorrt");
|
|
525
|
+
}
|
|
526
|
+
} catch {
|
|
527
|
+
}
|
|
528
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
529
|
+
available.push("coreml");
|
|
530
|
+
}
|
|
531
|
+
return [...new Set(available)];
|
|
413
532
|
}
|
|
414
|
-
} catch {
|
|
415
533
|
}
|
|
416
|
-
|
|
417
|
-
available.push("coreml");
|
|
418
|
-
}
|
|
419
|
-
return [...new Set(available)];
|
|
420
|
-
}
|
|
534
|
+
});
|
|
421
535
|
|
|
422
536
|
// src/addons/audio-classification/index.ts
|
|
537
|
+
var audio_classification_exports = {};
|
|
538
|
+
__export(audio_classification_exports, {
|
|
539
|
+
default: () => AudioClassificationAddon
|
|
540
|
+
});
|
|
541
|
+
module.exports = __toCommonJS(audio_classification_exports);
|
|
542
|
+
var import_audio_classification_models = __toESM(require_audio_classification_models());
|
|
543
|
+
var import_yamnet = __toESM(require_yamnet());
|
|
544
|
+
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
423
545
|
var YAMNET_NUM_CLASSES = 521;
|
|
424
546
|
var AUDIO_EVENT_LABEL = { id: "audio-event", name: "Audio Event" };
|
|
425
|
-
var
|
|
547
|
+
var AUDIO_LABELS = [AUDIO_EVENT_LABEL];
|
|
426
548
|
var AUDIO_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
427
549
|
var AudioClassificationAddon = class {
|
|
428
550
|
id = "audio-classification";
|
|
@@ -454,7 +576,7 @@ var AudioClassificationAddon = class {
|
|
|
454
576
|
resolvedConfig = null;
|
|
455
577
|
ctx = null;
|
|
456
578
|
getModelRequirements() {
|
|
457
|
-
return AUDIO_CLASSIFICATION_MODELS.map((m) => ({
|
|
579
|
+
return import_audio_classification_models.AUDIO_CLASSIFICATION_MODELS.map((m) => ({
|
|
458
580
|
modelId: m.id,
|
|
459
581
|
name: m.name,
|
|
460
582
|
minRAM_MB: 100,
|
|
@@ -470,7 +592,7 @@ var AudioClassificationAddon = class {
|
|
|
470
592
|
const cfg = ctx.addonConfig;
|
|
471
593
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "yamnet";
|
|
472
594
|
this.minScore = cfg["minScore"] ?? 0.3;
|
|
473
|
-
const entry = AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId);
|
|
595
|
+
const entry = import_audio_classification_models.AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId);
|
|
474
596
|
if (!entry) {
|
|
475
597
|
throw new Error(`AudioClassificationAddon: unknown modelId "${modelId}"`);
|
|
476
598
|
}
|
|
@@ -498,7 +620,7 @@ var AudioClassificationAddon = class {
|
|
|
498
620
|
while (classNames.length < YAMNET_NUM_CLASSES) {
|
|
499
621
|
classNames.push(`class_${classNames.length}`);
|
|
500
622
|
}
|
|
501
|
-
const results = yamnetPostprocess(
|
|
623
|
+
const results = (0, import_yamnet.yamnetPostprocess)(
|
|
502
624
|
output,
|
|
503
625
|
Math.round(numFrames),
|
|
504
626
|
YAMNET_NUM_CLASSES,
|
|
@@ -521,13 +643,13 @@ var AudioClassificationAddon = class {
|
|
|
521
643
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
522
644
|
const backend = config?.backend ?? "cpu";
|
|
523
645
|
const format = config?.format ?? "onnx";
|
|
524
|
-
const entry = AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
646
|
+
const entry = import_audio_classification_models.AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
525
647
|
this.modelEntry = entry;
|
|
526
648
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
527
649
|
if (this.ctx.models) {
|
|
528
650
|
await this.ctx.models.ensure(modelId, format);
|
|
529
651
|
}
|
|
530
|
-
const resolved = await resolveEngine({
|
|
652
|
+
const resolved = await (0, import_engine_resolver.resolveEngine)({
|
|
531
653
|
runtime,
|
|
532
654
|
backend,
|
|
533
655
|
modelEntry: entry,
|
|
@@ -551,7 +673,7 @@ var AudioClassificationAddon = class {
|
|
|
551
673
|
key: "modelId",
|
|
552
674
|
label: "Model",
|
|
553
675
|
type: "model-selector",
|
|
554
|
-
catalog: [...AUDIO_CLASSIFICATION_MODELS],
|
|
676
|
+
catalog: [...import_audio_classification_models.AUDIO_CLASSIFICATION_MODELS],
|
|
555
677
|
allowCustom: false,
|
|
556
678
|
allowConversion: false,
|
|
557
679
|
acceptFormats: ["onnx", "openvino"],
|
|
@@ -613,13 +735,13 @@ var AudioClassificationAddon = class {
|
|
|
613
735
|
return AUDIO_CLASS_MAP;
|
|
614
736
|
}
|
|
615
737
|
getModelCatalog() {
|
|
616
|
-
return [...AUDIO_CLASSIFICATION_MODELS];
|
|
738
|
+
return [...import_audio_classification_models.AUDIO_CLASSIFICATION_MODELS];
|
|
617
739
|
}
|
|
618
740
|
getAvailableModels() {
|
|
619
741
|
return [];
|
|
620
742
|
}
|
|
621
743
|
getActiveLabels() {
|
|
622
|
-
return
|
|
744
|
+
return AUDIO_LABELS;
|
|
623
745
|
}
|
|
624
746
|
async probe() {
|
|
625
747
|
return {
|