@camstack/addon-vision 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/dist/addons/animal-classifier/index.d.mts +25 -0
  2. package/dist/addons/animal-classifier/index.d.ts +25 -0
  3. package/dist/addons/animal-classifier/index.js +652 -0
  4. package/dist/addons/animal-classifier/index.js.map +1 -0
  5. package/dist/addons/animal-classifier/index.mjs +10 -0
  6. package/dist/addons/animal-classifier/index.mjs.map +1 -0
  7. package/dist/addons/audio-classification/index.d.mts +31 -0
  8. package/dist/addons/audio-classification/index.d.ts +31 -0
  9. package/dist/addons/audio-classification/index.js +572 -0
  10. package/dist/addons/audio-classification/index.js.map +1 -0
  11. package/dist/addons/audio-classification/index.mjs +8 -0
  12. package/dist/addons/audio-classification/index.mjs.map +1 -0
  13. package/dist/addons/bird-global-classifier/index.d.mts +26 -0
  14. package/dist/addons/bird-global-classifier/index.d.ts +26 -0
  15. package/dist/addons/bird-global-classifier/index.js +658 -0
  16. package/dist/addons/bird-global-classifier/index.js.map +1 -0
  17. package/dist/addons/bird-global-classifier/index.mjs +10 -0
  18. package/dist/addons/bird-global-classifier/index.mjs.map +1 -0
  19. package/dist/addons/bird-nabirds-classifier/index.d.mts +28 -0
  20. package/dist/addons/bird-nabirds-classifier/index.d.ts +28 -0
  21. package/dist/addons/bird-nabirds-classifier/index.js +700 -0
  22. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -0
  23. package/dist/addons/bird-nabirds-classifier/index.mjs +10 -0
  24. package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -0
  25. package/dist/addons/camera-native-detection/index.d.mts +32 -0
  26. package/dist/addons/camera-native-detection/index.d.ts +32 -0
  27. package/dist/addons/camera-native-detection/index.js +99 -0
  28. package/dist/addons/camera-native-detection/index.js.map +1 -0
  29. package/dist/addons/camera-native-detection/index.mjs +7 -0
  30. package/dist/addons/camera-native-detection/index.mjs.map +1 -0
  31. package/dist/addons/face-detection/index.d.mts +24 -0
  32. package/dist/addons/face-detection/index.d.ts +24 -0
  33. package/dist/addons/face-detection/index.js +720 -0
  34. package/dist/addons/face-detection/index.js.map +1 -0
  35. package/dist/addons/face-detection/index.mjs +10 -0
  36. package/dist/addons/face-detection/index.mjs.map +1 -0
  37. package/dist/addons/face-recognition/index.d.mts +24 -0
  38. package/dist/addons/face-recognition/index.d.ts +24 -0
  39. package/dist/addons/face-recognition/index.js +603 -0
  40. package/dist/addons/face-recognition/index.js.map +1 -0
  41. package/dist/addons/face-recognition/index.mjs +9 -0
  42. package/dist/addons/face-recognition/index.mjs.map +1 -0
  43. package/dist/addons/motion-detection/index.d.mts +26 -0
  44. package/dist/addons/motion-detection/index.d.ts +26 -0
  45. package/dist/addons/motion-detection/index.js +273 -0
  46. package/dist/addons/motion-detection/index.js.map +1 -0
  47. package/dist/addons/motion-detection/index.mjs +8 -0
  48. package/dist/addons/motion-detection/index.mjs.map +1 -0
  49. package/dist/addons/object-detection/index.d.mts +26 -0
  50. package/dist/addons/object-detection/index.d.ts +26 -0
  51. package/dist/addons/object-detection/index.js +1214 -0
  52. package/dist/addons/object-detection/index.js.map +1 -0
  53. package/dist/addons/object-detection/index.mjs +10 -0
  54. package/dist/addons/object-detection/index.mjs.map +1 -0
  55. package/dist/addons/plate-detection/index.d.mts +25 -0
  56. package/dist/addons/plate-detection/index.d.ts +25 -0
  57. package/dist/addons/plate-detection/index.js +646 -0
  58. package/dist/addons/plate-detection/index.js.map +1 -0
  59. package/dist/addons/plate-detection/index.mjs +10 -0
  60. package/dist/addons/plate-detection/index.mjs.map +1 -0
  61. package/dist/addons/plate-recognition/index.d.mts +25 -0
  62. package/dist/addons/plate-recognition/index.d.ts +25 -0
  63. package/dist/addons/plate-recognition/index.js +648 -0
  64. package/dist/addons/plate-recognition/index.js.map +1 -0
  65. package/dist/addons/plate-recognition/index.mjs +9 -0
  66. package/dist/addons/plate-recognition/index.mjs.map +1 -0
  67. package/dist/chunk-3MQFUDRU.mjs +260 -0
  68. package/dist/chunk-3MQFUDRU.mjs.map +1 -0
  69. package/dist/chunk-5AIQSN32.mjs +227 -0
  70. package/dist/chunk-5AIQSN32.mjs.map +1 -0
  71. package/dist/chunk-5JJZGKL7.mjs +186 -0
  72. package/dist/chunk-5JJZGKL7.mjs.map +1 -0
  73. package/dist/chunk-6OR5TE7A.mjs +101 -0
  74. package/dist/chunk-6OR5TE7A.mjs.map +1 -0
  75. package/dist/chunk-AYBFB7ID.mjs +763 -0
  76. package/dist/chunk-AYBFB7ID.mjs.map +1 -0
  77. package/dist/chunk-B3R66MPF.mjs +219 -0
  78. package/dist/chunk-B3R66MPF.mjs.map +1 -0
  79. package/dist/chunk-DTOAB2CE.mjs +79 -0
  80. package/dist/chunk-DTOAB2CE.mjs.map +1 -0
  81. package/dist/chunk-ISOIDU4U.mjs +54 -0
  82. package/dist/chunk-ISOIDU4U.mjs.map +1 -0
  83. package/dist/chunk-J4WRYHHY.mjs +212 -0
  84. package/dist/chunk-J4WRYHHY.mjs.map +1 -0
  85. package/dist/chunk-KUO2BVFY.mjs +90 -0
  86. package/dist/chunk-KUO2BVFY.mjs.map +1 -0
  87. package/dist/chunk-LPI42WL6.mjs +324 -0
  88. package/dist/chunk-LPI42WL6.mjs.map +1 -0
  89. package/dist/chunk-MEVASN3P.mjs +305 -0
  90. package/dist/chunk-MEVASN3P.mjs.map +1 -0
  91. package/dist/chunk-PDSHDDPV.mjs +255 -0
  92. package/dist/chunk-PDSHDDPV.mjs.map +1 -0
  93. package/dist/chunk-Q3SQOYG6.mjs +218 -0
  94. package/dist/chunk-Q3SQOYG6.mjs.map +1 -0
  95. package/dist/chunk-QIMDG34B.mjs +229 -0
  96. package/dist/chunk-QIMDG34B.mjs.map +1 -0
  97. package/dist/index.d.mts +171 -0
  98. package/dist/index.d.ts +171 -0
  99. package/dist/index.js +3463 -0
  100. package/dist/index.js.map +1 -0
  101. package/dist/index.mjs +111 -0
  102. package/dist/index.mjs.map +1 -0
  103. package/package.json +49 -0
  104. package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
  105. package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
  106. package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
  107. package/python/coreml_inference.py +319 -0
  108. package/python/openvino_inference.py +247 -0
  109. package/python/pytorch_inference.py +255 -0
@@ -0,0 +1,212 @@
1
+ import {
2
+ ANIMAL_TYPE_MODELS
3
+ } from "./chunk-ISOIDU4U.mjs";
4
+ import {
5
+ cropRegion,
6
+ resizeAndNormalize
7
+ } from "./chunk-6OR5TE7A.mjs";
8
+ import {
9
+ resolveEngine
10
+ } from "./chunk-LPI42WL6.mjs";
11
+
12
+ // src/addons/animal-classifier/index.ts
13
+ var ANIMAL_TYPE_LABEL = { id: "animal-type", name: "Animal Type" };
14
+ var ANIMAL_TYPE_LABELS = [ANIMAL_TYPE_LABEL];
15
+ var ANIMAL_CLASS_MAP = { mapping: {}, preserveOriginal: true };
16
+ var ANIMAL_10_CLASSES = [
17
+ "cat",
18
+ "cow",
19
+ "dog",
20
+ "dolphin",
21
+ "eagle",
22
+ "giant panda",
23
+ "horse",
24
+ "monkey",
25
+ "sheep",
26
+ "spider"
27
+ ];
28
+ function softmax(logits) {
29
+ const max = logits.reduce((a, b) => Math.max(a, b), -Infinity);
30
+ const exps = logits.map((v) => Math.exp(v - max));
31
+ const sum = exps.reduce((a, b) => a + b, 0);
32
+ return exps.map((v) => v / sum);
33
+ }
34
+ var AnimalClassifierAddon = class {
35
+ id = "animal-classifier";
36
+ slot = "classifier";
37
+ inputClasses = ["animal"];
38
+ outputClasses = ["animal-type:*"];
39
+ slotPriority = 0;
40
+ requiredSteps = [];
41
+ manifest = {
42
+ id: "animal-classifier",
43
+ name: "Animal Classifier",
44
+ version: "0.1.0",
45
+ description: "ViT-based animal type classifier \u2014 10 common species",
46
+ packageName: "@camstack/addon-vision",
47
+ slot: "classifier",
48
+ inputClasses: ["animal"],
49
+ outputClasses: ["animal-type:*"],
50
+ supportsCustomModels: false,
51
+ mayRequirePython: false,
52
+ defaultConfig: {
53
+ modelId: "animals-10",
54
+ runtime: "auto",
55
+ backend: "cpu",
56
+ minConfidence: 0.3
57
+ }
58
+ };
59
+ engine;
60
+ modelEntry;
61
+ minConfidence = 0.3;
62
+ async initialize(ctx) {
63
+ const cfg = ctx.addonConfig;
64
+ const modelId = cfg["modelId"] ?? "animals-10";
65
+ const runtime = cfg["runtime"] ?? "auto";
66
+ const backend = cfg["backend"] ?? "cpu";
67
+ this.minConfidence = cfg["minConfidence"] ?? 0.3;
68
+ const entry = ANIMAL_TYPE_MODELS.find((m) => m.id === modelId);
69
+ if (!entry) {
70
+ throw new Error(`AnimalClassifierAddon: unknown modelId "${modelId}"`);
71
+ }
72
+ this.modelEntry = entry;
73
+ const resolved = await resolveEngine({
74
+ runtime,
75
+ backend,
76
+ modelEntry: entry,
77
+ modelsDir: ctx.locationPaths.models
78
+ });
79
+ this.engine = resolved.engine;
80
+ }
81
+ async classify(input) {
82
+ const start = Date.now();
83
+ const { width: inputW, height: inputH } = this.modelEntry.inputSize;
84
+ const animalCrop = await cropRegion(input.frame.data, input.roi);
85
+ const normalized = await resizeAndNormalize(animalCrop, inputW, inputH, "imagenet", "nchw");
86
+ const rawOutput = await this.engine.run(normalized, [1, 3, inputH, inputW]);
87
+ const probs = softmax(rawOutput);
88
+ let maxIdx = 0;
89
+ let maxScore = probs[0] ?? 0;
90
+ for (let i = 1; i < probs.length; i++) {
91
+ const score = probs[i] ?? 0;
92
+ if (score > maxScore) {
93
+ maxScore = score;
94
+ maxIdx = i;
95
+ }
96
+ }
97
+ if (maxScore < this.minConfidence) {
98
+ return {
99
+ classifications: [],
100
+ inferenceMs: Date.now() - start,
101
+ modelId: this.modelEntry.id
102
+ };
103
+ }
104
+ const label = ANIMAL_10_CLASSES[maxIdx] ?? `animal_${maxIdx}`;
105
+ return {
106
+ classifications: [
107
+ {
108
+ class: label,
109
+ score: maxScore
110
+ }
111
+ ],
112
+ inferenceMs: Date.now() - start,
113
+ modelId: this.modelEntry.id
114
+ };
115
+ }
116
+ async shutdown() {
117
+ await this.engine?.dispose();
118
+ }
119
+ getConfigSchema() {
120
+ return {
121
+ sections: [
122
+ {
123
+ id: "model",
124
+ title: "Model",
125
+ columns: 1,
126
+ fields: [
127
+ {
128
+ key: "modelId",
129
+ label: "Model",
130
+ type: "model-selector",
131
+ catalog: [...ANIMAL_TYPE_MODELS],
132
+ allowCustom: false,
133
+ allowConversion: false,
134
+ acceptFormats: ["onnx", "coreml", "openvino"],
135
+ requiredMetadata: ["inputSize", "labels"],
136
+ outputFormatHint: "classification"
137
+ }
138
+ ]
139
+ },
140
+ {
141
+ id: "thresholds",
142
+ title: "Classification Settings",
143
+ columns: 1,
144
+ fields: [
145
+ {
146
+ key: "minConfidence",
147
+ label: "Minimum Confidence",
148
+ type: "slider",
149
+ min: 0.05,
150
+ max: 1,
151
+ step: 0.05,
152
+ default: 0.3
153
+ }
154
+ ]
155
+ },
156
+ {
157
+ id: "runtime",
158
+ title: "Runtime",
159
+ columns: 2,
160
+ fields: [
161
+ {
162
+ key: "runtime",
163
+ label: "Runtime",
164
+ type: "select",
165
+ options: [
166
+ { value: "auto", label: "Auto (recommended)" },
167
+ { value: "onnx", label: "ONNX Runtime" },
168
+ { value: "coreml", label: "CoreML (Apple)" }
169
+ ]
170
+ },
171
+ {
172
+ key: "backend",
173
+ label: "Backend",
174
+ type: "select",
175
+ dependsOn: { runtime: "onnx" },
176
+ options: [
177
+ { value: "cpu", label: "CPU" },
178
+ { value: "coreml", label: "CoreML" },
179
+ { value: "cuda", label: "CUDA (NVIDIA)" }
180
+ ]
181
+ }
182
+ ]
183
+ }
184
+ ]
185
+ };
186
+ }
187
+ getClassMap() {
188
+ return ANIMAL_CLASS_MAP;
189
+ }
190
+ getModelCatalog() {
191
+ return [...ANIMAL_TYPE_MODELS];
192
+ }
193
+ getAvailableModels() {
194
+ return [];
195
+ }
196
+ getActiveLabels() {
197
+ return ANIMAL_TYPE_LABELS;
198
+ }
199
+ async probe() {
200
+ return {
201
+ available: true,
202
+ runtime: this.engine?.runtime ?? "onnx",
203
+ device: this.engine?.device ?? "cpu",
204
+ capabilities: ["fp32"]
205
+ };
206
+ }
207
+ };
208
+
209
+ export {
210
+ AnimalClassifierAddon
211
+ };
212
+ //# sourceMappingURL=chunk-J4WRYHHY.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/addons/animal-classifier/index.ts"],"sourcesContent":["import type {\n IClassifierProvider,\n IDetectionAddon,\n AddonManifest,\n AddonContext,\n CropInput,\n ClassifierOutput,\n ConfigUISchema,\n ClassMapDefinition,\n ProbeResult,\n ModelCatalogEntry,\n DetectionModel,\n LabelDefinition,\n IInferenceEngine,\n} from '@camstack/types'\nimport { ANIMAL_TYPE_MODELS } from '../../catalogs/animal-classification-models.js'\nimport { cropRegion, resizeAndNormalize } from '../../shared/image-utils.js'\nimport { resolveEngine } from '../../shared/engine-resolver.js'\n\nconst ANIMAL_TYPE_LABEL: LabelDefinition = { id: 'animal-type', name: 'Animal Type' }\nconst ANIMAL_TYPE_LABELS: readonly LabelDefinition[] = [ANIMAL_TYPE_LABEL]\nconst ANIMAL_CLASS_MAP: ClassMapDefinition = { mapping: {}, preserveOriginal: true }\n\nconst ANIMAL_10_CLASSES = [\n 'cat',\n 'cow',\n 'dog',\n 'dolphin',\n 'eagle',\n 'giant panda',\n 'horse',\n 'monkey',\n 'sheep',\n 'spider',\n] as const\n\nfunction softmax(logits: Float32Array): Float32Array {\n const max = logits.reduce((a, b) => Math.max(a, b), -Infinity)\n const exps = logits.map((v) => Math.exp(v - max))\n const sum = exps.reduce((a, b) => a + b, 0)\n return exps.map((v) => v / sum) as unknown as Float32Array\n}\n\nexport default class AnimalClassifierAddon implements IClassifierProvider, IDetectionAddon {\n readonly id = 'animal-classifier'\n readonly slot = 'classifier' as const\n readonly inputClasses = ['animal'] as const\n readonly outputClasses = ['animal-type:*'] as const\n readonly slotPriority = 0\n readonly requiredSteps = [] as const\n readonly manifest: AddonManifest = {\n id: 'animal-classifier',\n name: 'Animal Classifier',\n version: '0.1.0',\n description: 'ViT-based animal type classifier — 10 common species',\n packageName: '@camstack/addon-vision',\n slot: 'classifier',\n inputClasses: ['animal'],\n outputClasses: ['animal-type:*'],\n supportsCustomModels: false,\n mayRequirePython: false,\n defaultConfig: {\n modelId: 'animals-10',\n runtime: 'auto',\n backend: 'cpu',\n minConfidence: 0.3,\n },\n }\n\n private engine!: IInferenceEngine\n private modelEntry!: ModelCatalogEntry\n private minConfidence = 0.3\n\n async initialize(ctx: AddonContext): Promise<void> {\n const cfg = ctx.addonConfig\n const modelId = (cfg['modelId'] as string | undefined) ?? 'animals-10'\n const runtime = (cfg['runtime'] as string | undefined) ?? 'auto'\n const backend = (cfg['backend'] as string | undefined) ?? 'cpu'\n this.minConfidence = (cfg['minConfidence'] as number | undefined) ?? 0.3\n\n const entry = ANIMAL_TYPE_MODELS.find((m) => m.id === modelId)\n if (!entry) {\n throw new Error(`AnimalClassifierAddon: unknown modelId \"${modelId}\"`)\n }\n this.modelEntry = entry\n\n const resolved = await resolveEngine({\n runtime: runtime as 'auto',\n backend,\n modelEntry: entry,\n modelsDir: ctx.locationPaths.models,\n })\n this.engine = resolved.engine\n }\n\n async classify(input: CropInput): Promise<ClassifierOutput> {\n const start = Date.now()\n const { width: inputW, height: inputH } = this.modelEntry.inputSize\n\n // Crop the animal region\n const animalCrop = await cropRegion(input.frame.data, input.roi)\n\n // Resize to 224x224, ImageNet normalization, NCHW\n const normalized = await resizeAndNormalize(animalCrop, inputW, inputH, 'imagenet', 'nchw')\n\n // Run inference — output shape: [1, 10]\n const rawOutput = await this.engine.run(normalized, [1, 3, inputH, inputW])\n\n // Softmax to get probabilities\n const probs = softmax(rawOutput)\n\n // Find argmax\n let maxIdx = 0\n let maxScore = probs[0] ?? 0\n for (let i = 1; i < probs.length; i++) {\n const score = probs[i] ?? 0\n if (score > maxScore) {\n maxScore = score\n maxIdx = i\n }\n }\n\n if (maxScore < this.minConfidence) {\n return {\n classifications: [],\n inferenceMs: Date.now() - start,\n modelId: this.modelEntry.id,\n }\n }\n\n const label = ANIMAL_10_CLASSES[maxIdx] ?? `animal_${maxIdx}`\n\n return {\n classifications: [\n {\n class: label,\n score: maxScore,\n },\n ],\n inferenceMs: Date.now() - start,\n modelId: this.modelEntry.id,\n }\n }\n\n async shutdown(): Promise<void> {\n await this.engine?.dispose()\n }\n\n getConfigSchema(): ConfigUISchema {\n return {\n sections: [\n {\n id: 'model',\n title: 'Model',\n columns: 1,\n fields: [\n {\n key: 'modelId',\n label: 'Model',\n type: 'model-selector',\n catalog: [...ANIMAL_TYPE_MODELS],\n allowCustom: false,\n allowConversion: false,\n acceptFormats: ['onnx', 'coreml', 'openvino'],\n requiredMetadata: ['inputSize', 'labels'],\n outputFormatHint: 'classification',\n },\n ],\n },\n {\n id: 'thresholds',\n title: 'Classification Settings',\n columns: 1,\n fields: [\n {\n key: 'minConfidence',\n label: 'Minimum Confidence',\n type: 'slider',\n min: 0.05,\n max: 1.0,\n step: 0.05,\n default: 0.3,\n },\n ],\n },\n {\n id: 'runtime',\n title: 'Runtime',\n columns: 2,\n fields: [\n {\n key: 'runtime',\n label: 'Runtime',\n type: 'select',\n options: [\n { value: 'auto', label: 'Auto (recommended)' },\n { value: 'onnx', label: 'ONNX Runtime' },\n { value: 'coreml', label: 'CoreML (Apple)' },\n ],\n },\n {\n key: 'backend',\n label: 'Backend',\n type: 'select',\n dependsOn: { runtime: 'onnx' },\n options: [\n { value: 'cpu', label: 'CPU' },\n { value: 'coreml', label: 'CoreML' },\n { value: 'cuda', label: 'CUDA (NVIDIA)' },\n ],\n },\n ],\n },\n ],\n }\n }\n\n getClassMap(): ClassMapDefinition {\n return ANIMAL_CLASS_MAP\n }\n\n getModelCatalog(): ModelCatalogEntry[] {\n return [...ANIMAL_TYPE_MODELS]\n }\n\n getAvailableModels(): DetectionModel[] {\n return []\n }\n\n getActiveLabels(): readonly LabelDefinition[] {\n return ANIMAL_TYPE_LABELS\n }\n\n async probe(): Promise<ProbeResult> {\n return {\n available: true,\n runtime: this.engine?.runtime ?? 'onnx',\n device: this.engine?.device ?? 'cpu',\n capabilities: ['fp32'],\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;AAmBA,IAAM,oBAAqC,EAAE,IAAI,eAAe,MAAM,cAAc;AACpF,IAAM,qBAAiD,CAAC,iBAAiB;AACzE,IAAM,mBAAuC,EAAE,SAAS,CAAC,GAAG,kBAAkB,KAAK;AAEnF,IAAM,oBAAoB;AAAA,EACxB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,SAAS,QAAQ,QAAoC;AACnD,QAAM,MAAM,OAAO,OAAO,CAAC,GAAG,MAAM,KAAK,IAAI,GAAG,CAAC,GAAG,SAAS;AAC7D,QAAM,OAAO,OAAO,IAAI,CAAC,MAAM,KAAK,IAAI,IAAI,GAAG,CAAC;AAChD,QAAM,MAAM,KAAK,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC;AAC1C,SAAO,KAAK,IAAI,CAAC,MAAM,IAAI,GAAG;AAChC;AAEA,IAAqB,wBAArB,MAA2F;AAAA,EAChF,KAAK;AAAA,EACL,OAAO;AAAA,EACP,eAAe,CAAC,QAAQ;AAAA,EACxB,gBAAgB,CAAC,eAAe;AAAA,EAChC,eAAe;AAAA,EACf,gBAAgB,CAAC;AAAA,EACjB,WAA0B;AAAA,IACjC,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,IACb,aAAa;AAAA,IACb,MAAM;AAAA,IACN,cAAc,CAAC,QAAQ;AAAA,IACvB,eAAe,CAAC,eAAe;AAAA,IAC/B,sBAAsB;AAAA,IACtB,kBAAkB;AAAA,IAClB,eAAe;AAAA,MACb,SAAS;AAAA,MACT,SAAS;AAAA,MACT,SAAS;AAAA,MACT,eAAe;AAAA,IACjB;AAAA,EACF;AAAA,EAEQ;AAAA,EACA;AAAA,EACA,gBAAgB;AAAA,EAExB,MAAM,WAAW,KAAkC;AACjD,UAAM,MAAM,IAAI;AAChB,UAAM,UAAW,IAAI,SAAS,KAA4B;AAC1D,UAAM,UAAW,IAAI,SAAS,KAA4B;AAC1D,UAAM,UAAW,IAAI,SAAS,KAA4B;AAC1D,SAAK,gBAAiB,IAAI,eAAe,KAA4B;AAErE,UAAM,QAAQ,mBAAmB,KAAK,CAAC,MAAM,EAAE,OAAO,OAAO;AAC7D,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,MAAM,2CAA2C,OAAO,GAAG;AAAA,IACvE;AACA,SAAK,aAAa;AAElB,UAAM,WAAW,MAAM,cAAc;AAAA,MACnC;AAAA,MACA;AAAA,MACA,YAAY;AAAA,MACZ,WAAW,IAAI,cAAc;AAAA,IAC/B,CAAC;AACD,SAAK,SAAS,SAAS;AAAA,EACzB;AAAA,EAEA,MAAM,SAAS,OAA6C;AAC1D,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,EAAE,OAAO,QAAQ,QAAQ,OAAO,IAAI,KAAK,WAAW;AAG1D,UAAM,aAAa,MAAM,WAAW,MAAM,MAAM,MAAM,MAAM,GAAG;AAG/D,UAAM,aAAa,MAAM,mBAAmB,YAAY,QAAQ,QAAQ,YAAY,MAAM;AAG1F,UAAM,YAAY,MAAM,KAAK,OAAO,IAAI,YAAY,CAAC,GAAG,GAAG,QAAQ,MAAM,CAAC;AAG1E,UAAM,QAAQ,QAAQ,SAAS;AAG/B,QAAI,SAAS;AACb,QAAI,WAAW,MAAM,CAAC,KAAK;AAC3B,aAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,YAAM,QAAQ,MAAM,CAAC,KAAK;AAC1B,UAAI,QAAQ,UAAU;AACpB,mBAAW;AACX,iBAAS;AAAA,MACX;AAAA,IACF;AAEA,QAAI,WAAW,KAAK,eAAe;AACjC,aAAO;AAAA,QACL,iBAAiB,CAAC;AAAA,QAClB,aAAa,KAAK,IAAI,IAAI;AAAA,QAC1B,SAAS,KAAK,WAAW;AAAA,MAC3B;AAAA,IACF;AAEA,UAAM,QAAQ,kBAAkB,MAAM,KAAK,UAAU,MAAM;AAE3D,WAAO;AAAA,MACL,iBAAiB;AAAA,QACf;AAAA,UACE,OAAO;AAAA,UACP,OAAO;AAAA,QACT;AAAA,MACF;AAAA,MACA,aAAa,KAAK,IAAI,IAAI;AAAA,MAC1B,SAAS,KAAK,WAAW;AAAA,IAC3B;AAAA,EACF;AAAA,EAEA,MAAM,WAA0B;AAC9B,UAAM,KAAK,QAAQ,QAAQ;AAAA,EAC7B;AAAA,EAEA,kBAAkC;AAChC,WAAO;AAAA,MACL,UAAU;AAAA,QACR;AAAA,UACE,IAAI;AAAA,UACJ,OAAO;AAAA,UACP,SAAS;AAAA,UACT,QAAQ;AAAA,YACN;AAAA,cACE,KAAK;AAAA,cACL,OAAO;AAAA,cACP,MAAM;AAAA,cACN,SAAS,CAAC,GAAG,kBAAkB;AAAA,cAC/B,aAAa;AAAA,cACb,iBAAiB;AAAA,cACjB,eAAe,CAAC,QAAQ,UAAU,UAAU;AAAA,cAC5C,kBAAkB,CAAC,aAAa,QAAQ;AAAA,cACxC,kBAAkB;AAAA,YACpB;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,IAAI;AAAA,UACJ,OAAO;AAAA,UACP,SAAS;AAAA,UACT,QAAQ;AAAA,YACN;AAAA,cACE,KAAK;AAAA,cACL,OAAO;AAAA,cACP,MAAM;AAAA,cACN,KAAK;AAAA,cACL,KAAK;AAAA,cACL,MAAM;AAAA,cACN,SAAS;AAAA,YACX;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,IAAI;AAAA,UACJ,OAAO;AAAA,UACP,SAAS;AAAA,UACT,QAAQ;AAAA,YACN;AAAA,cACE,KAAK;AAAA,cACL,OAAO;AAAA,cACP,MAAM;AAAA,cACN,SAAS;AAAA,gBACP,EAAE,OAAO,QAAQ,OAAO,qBAAqB;AAAA,gBAC7C,EAAE,OAAO,QAAQ,OAAO,eAAe;AAAA,gBACvC,EAAE,OAAO,UAAU,OAAO,iBAAiB;AAAA,cAC7C;AAAA,YACF;AAAA,YACA;AAAA,cACE,KAAK;AAAA,cACL,OAAO;AAAA,cACP,MAAM;AAAA,cACN,WAAW,EAAE,SAAS,OAAO;AAAA,cAC7B,SAAS;AAAA,gBACP,EAAE,OAAO,OAAO,OAAO,MAAM;AAAA,gBAC7B,EAAE,OAAO,UAAU,OAAO,SAAS;AAAA,gBACnC,EAAE,OAAO,QAAQ,OAAO,gBAAgB;AAAA,cAC1C;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA,EAEA,cAAkC;AAChC,WAAO;AAAA,EACT;AAAA,EAEA,kBAAuC;AACrC,WAAO,CAAC,GAAG,kBAAkB;AAAA,EAC/B;AAAA,EAEA,qBAAuC;AACrC,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,kBAA8C;AAC5C,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAA8B;AAClC,WAAO;AAAA,MACL,WAAW;AAAA,MACX,SAAS,KAAK,QAAQ,WAAW;AAAA,MACjC,QAAQ,KAAK,QAAQ,UAAU;AAAA,MAC/B,cAAc,CAAC,MAAM;AAAA,IACvB;AAAA,EACF;AACF;","names":[]}
@@ -0,0 +1,90 @@
1
+ // src/shared/postprocess/yolo.ts
2
+ function iou(a, b) {
3
+ const ax1 = a.x;
4
+ const ay1 = a.y;
5
+ const ax2 = a.x + a.w;
6
+ const ay2 = a.y + a.h;
7
+ const bx1 = b.x;
8
+ const by1 = b.y;
9
+ const bx2 = b.x + b.w;
10
+ const by2 = b.y + b.h;
11
+ const interX1 = Math.max(ax1, bx1);
12
+ const interY1 = Math.max(ay1, by1);
13
+ const interX2 = Math.min(ax2, bx2);
14
+ const interY2 = Math.min(ay2, by2);
15
+ const interW = Math.max(0, interX2 - interX1);
16
+ const interH = Math.max(0, interY2 - interY1);
17
+ const interArea = interW * interH;
18
+ if (interArea === 0) return 0;
19
+ const areaA = a.w * a.h;
20
+ const areaB = b.w * b.h;
21
+ const unionArea = areaA + areaB - interArea;
22
+ return unionArea === 0 ? 0 : interArea / unionArea;
23
+ }
24
+ function nms(boxes, iouThreshold) {
25
+ const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
26
+ const kept = [];
27
+ const suppressed = /* @__PURE__ */ new Set();
28
+ for (const idx of indices) {
29
+ if (suppressed.has(idx)) continue;
30
+ kept.push(idx);
31
+ for (const other of indices) {
32
+ if (other === idx || suppressed.has(other)) continue;
33
+ if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
34
+ suppressed.add(other);
35
+ }
36
+ }
37
+ }
38
+ return kept;
39
+ }
40
+ function yoloPostprocess(output, numClasses, numBoxes, options) {
41
+ const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight } = options;
42
+ const candidates = [];
43
+ for (let i = 0; i < numBoxes; i++) {
44
+ const cx = output[0 * numBoxes + i];
45
+ const cy = output[1 * numBoxes + i];
46
+ const w = output[2 * numBoxes + i];
47
+ const h = output[3 * numBoxes + i];
48
+ let bestScore = -Infinity;
49
+ let bestClass = 0;
50
+ for (let j = 0; j < numClasses; j++) {
51
+ const score = output[(4 + j) * numBoxes + i];
52
+ if (score > bestScore) {
53
+ bestScore = score;
54
+ bestClass = j;
55
+ }
56
+ }
57
+ if (bestScore < confidence) continue;
58
+ const bbox = {
59
+ x: cx - w / 2,
60
+ y: cy - h / 2,
61
+ w,
62
+ h
63
+ };
64
+ candidates.push({ bbox, score: bestScore, classIdx: bestClass });
65
+ }
66
+ if (candidates.length === 0) return [];
67
+ const keptIndices = nms(candidates, iouThreshold);
68
+ return keptIndices.map((idx) => {
69
+ const { bbox, score, classIdx } = candidates[idx];
70
+ const label = labels[classIdx] ?? String(classIdx);
71
+ const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
72
+ const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
73
+ const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
74
+ const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
75
+ const finalBbox = { x, y, w: x2 - x, h: y2 - y };
76
+ return {
77
+ class: label,
78
+ originalClass: label,
79
+ score,
80
+ bbox: finalBbox
81
+ };
82
+ });
83
+ }
84
+
85
+ export {
86
+ iou,
87
+ nms,
88
+ yoloPostprocess
89
+ };
90
+ //# sourceMappingURL=chunk-KUO2BVFY.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/shared/postprocess/yolo.ts"],"sourcesContent":["import type { SpatialDetection, BoundingBox } from '@camstack/types'\n\nexport interface YoloPostprocessOptions {\n readonly confidence: number\n readonly iouThreshold: number\n readonly labels: readonly string[]\n readonly scale: number\n readonly padX: number\n readonly padY: number\n readonly originalWidth: number\n readonly originalHeight: number\n}\n\n/** Calculate IoU between two bounding boxes */\nexport function iou(a: BoundingBox, b: BoundingBox): number {\n const ax1 = a.x\n const ay1 = a.y\n const ax2 = a.x + a.w\n const ay2 = a.y + a.h\n\n const bx1 = b.x\n const by1 = b.y\n const bx2 = b.x + b.w\n const by2 = b.y + b.h\n\n const interX1 = Math.max(ax1, bx1)\n const interY1 = Math.max(ay1, by1)\n const interX2 = Math.min(ax2, bx2)\n const interY2 = Math.min(ay2, by2)\n\n const interW = Math.max(0, interX2 - interX1)\n const interH = Math.max(0, interY2 - interY1)\n const interArea = interW * interH\n\n if (interArea === 0) return 0\n\n const areaA = a.w * a.h\n const areaB = b.w * b.h\n const unionArea = areaA + areaB - interArea\n\n return unionArea === 0 ? 0 : interArea / unionArea\n}\n\n/** Non-maximum suppression — returns indices of kept boxes (sorted by score desc) */\nexport function nms(\n boxes: ReadonlyArray<{ readonly bbox: BoundingBox; readonly score: number }>,\n iouThreshold: number,\n): number[] {\n const indices = boxes\n .map((_, i) => i)\n .sort((a, b) => (boxes[b]!.score) - (boxes[a]!.score))\n\n const kept: number[] = []\n const suppressed = new Set<number>()\n\n for (const idx of indices) {\n if (suppressed.has(idx)) continue\n kept.push(idx)\n for (const other of indices) {\n if (other === idx || suppressed.has(other)) continue\n if (iou(boxes[idx]!.bbox, boxes[other]!.bbox) > iouThreshold) {\n suppressed.add(other)\n }\n }\n }\n\n return kept\n}\n\n/** Full YOLO v8/v9 postprocessing: filter → NMS → scale back to original coords */\nexport function yoloPostprocess(\n output: Float32Array,\n numClasses: number,\n numBoxes: number,\n options: YoloPostprocessOptions,\n): SpatialDetection[] {\n const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight } = options\n\n interface Candidate {\n readonly bbox: BoundingBox\n readonly score: number\n readonly classIdx: number\n }\n\n const candidates: Candidate[] = []\n\n for (let i = 0; i < numBoxes; i++) {\n // YOLO v8/v9 output layout: [1, 4+numClasses, numBoxes] stored row-major\n const cx = output[0 * numBoxes + i]!\n const cy = output[1 * numBoxes + i]!\n const w = output[2 * numBoxes + i]!\n const h = output[3 * numBoxes + i]!\n\n let bestScore = -Infinity\n let bestClass = 0\n\n for (let j = 0; j < numClasses; j++) {\n const score = output[(4 + j) * numBoxes + i]!\n if (score > bestScore) {\n bestScore = score\n bestClass = j\n }\n }\n\n if (bestScore < confidence) continue\n\n // Convert cx,cy,w,h to x,y,w,h (top-left origin)\n const bbox: BoundingBox = {\n x: cx - w / 2,\n y: cy - h / 2,\n w,\n h,\n }\n\n candidates.push({ bbox, score: bestScore, classIdx: bestClass })\n }\n\n if (candidates.length === 0) return []\n\n const keptIndices = nms(candidates, iouThreshold)\n\n return keptIndices.map((idx) => {\n const { bbox, score, classIdx } = candidates[idx]!\n const label = labels[classIdx] ?? String(classIdx)\n\n // Transform from letterbox coords back to original image coords\n const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale))\n const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale))\n const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale))\n const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale))\n\n const finalBbox: BoundingBox = { x, y, w: x2 - x, h: y2 - y }\n\n return {\n class: label,\n originalClass: label,\n score,\n bbox: finalBbox,\n } satisfies SpatialDetection\n })\n}\n"],"mappings":";AAcO,SAAS,IAAI,GAAgB,GAAwB;AAC1D,QAAM,MAAM,EAAE;AACd,QAAM,MAAM,EAAE;AACd,QAAM,MAAM,EAAE,IAAI,EAAE;AACpB,QAAM,MAAM,EAAE,IAAI,EAAE;AAEpB,QAAM,MAAM,EAAE;AACd,QAAM,MAAM,EAAE;AACd,QAAM,MAAM,EAAE,IAAI,EAAE;AACpB,QAAM,MAAM,EAAE,IAAI,EAAE;AAEpB,QAAM,UAAU,KAAK,IAAI,KAAK,GAAG;AACjC,QAAM,UAAU,KAAK,IAAI,KAAK,GAAG;AACjC,QAAM,UAAU,KAAK,IAAI,KAAK,GAAG;AACjC,QAAM,UAAU,KAAK,IAAI,KAAK,GAAG;AAEjC,QAAM,SAAS,KAAK,IAAI,GAAG,UAAU,OAAO;AAC5C,QAAM,SAAS,KAAK,IAAI,GAAG,UAAU,OAAO;AAC5C,QAAM,YAAY,SAAS;AAE3B,MAAI,cAAc,EAAG,QAAO;AAE5B,QAAM,QAAQ,EAAE,IAAI,EAAE;AACtB,QAAM,QAAQ,EAAE,IAAI,EAAE;AACtB,QAAM,YAAY,QAAQ,QAAQ;AAElC,SAAO,cAAc,IAAI,IAAI,YAAY;AAC3C;AAGO,SAAS,IACd,OACA,cACU;AACV,QAAM,UAAU,MACb,IAAI,CAAC,GAAG,MAAM,CAAC,EACf,KAAK,CAAC,GAAG,MAAO,MAAM,CAAC,EAAG,QAAU,MAAM,CAAC,EAAG,KAAM;AAEvD,QAAM,OAAiB,CAAC;AACxB,QAAM,aAAa,oBAAI,IAAY;AAEnC,aAAW,OAAO,SAAS;AACzB,QAAI,WAAW,IAAI,GAAG,EAAG;AACzB,SAAK,KAAK,GAAG;AACb,eAAW,SAAS,SAAS;AAC3B,UAAI,UAAU,OAAO,WAAW,IAAI,KAAK,EAAG;AAC5C,UAAI,IAAI,MAAM,GAAG,EAAG,MAAM,MAAM,KAAK,EAAG,IAAI,IAAI,cAAc;AAC5D,mBAAW,IAAI,KAAK;AAAA,MACtB;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAGO,SAAS,gBACd,QACA,YACA,UACA,SACoB;AACpB,QAAM,EAAE,YAAY,cAAc,QAAQ,OAAO,MAAM,MAAM,eAAe,eAAe,IAAI;AAQ/F,QAAM,aAA0B,CAAC;AAEjC,WAAS,IAAI,GAAG,IAAI,UAAU,KAAK;AAEjC,UAAM,KAAK,OAAO,IAAI,WAAW,CAAC;AAClC,UAAM,KAAK,OAAO,IAAI,WAAW,CAAC;AAClC,UAAM,IAAI,OAAO,IAAI,WAAW,CAAC;AACjC,UAAM,IAAI,OAAO,IAAI,WAAW,CAAC;AAEjC,QAAI,YAAY;AAChB,QAAI,YAAY;AAEhB,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,YAAM,QAAQ,QAAQ,IAAI,KAAK,WAAW,CAAC;AAC3C,UAAI,QAAQ,WAAW;AACrB,oBAAY;AACZ,oBAAY;AAAA,MACd;AAAA,IACF;AAEA,QAAI,YAAY,WAAY;AAG5B,UAAM,OAAoB;AAAA,MACxB,GAAG,KAAK,IAAI;AAAA,MACZ,GAAG,KAAK,IAAI;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAEA,eAAW,KAAK,EAAE,MAAM,OAAO,WAAW,UAAU,UAAU,CAAC;AAAA,EACjE;AAEA,MAAI,WAAW,WAAW,EAAG,QAAO,CAAC;AAErC,QAAM,cAAc,IAAI,YAAY,YAAY;AAEhD,SAAO,YAAY,IAAI,CAAC,QAAQ;AAC9B,UAAM,EAAE,MAAM,OAAO,SAAS,IAAI,WAAW,GAAG;AAChD,UAAM,QAAQ,OAAO,QAAQ,KAAK,OAAO,QAAQ;AAGjD,UAAM,IAAI,KAAK,IAAI,GAAG,KAAK,IAAI,gBAAgB,KAAK,IAAI,QAAQ,KAAK,CAAC;AACtE,UAAM,IAAI,KAAK,IAAI,GAAG,KAAK,IAAI,iBAAiB,KAAK,IAAI,QAAQ,KAAK,CAAC;AACvE,UAAM,KAAK,KAAK,IAAI,GAAG,KAAK,IAAI,gBAAgB,KAAK,IAAI,KAAK,IAAI,QAAQ,KAAK,CAAC;AAChF,UAAM,KAAK,KAAK,IAAI,GAAG,KAAK,IAAI,iBAAiB,KAAK,IAAI,KAAK,IAAI,QAAQ,KAAK,CAAC;AAEjF,UAAM,YAAyB,EAAE,GAAG,GAAG,GAAG,KAAK,GAAG,GAAG,KAAK,EAAE;AAE5D,WAAO;AAAA,MACL,OAAO;AAAA,MACP,eAAe;AAAA,MACf;AAAA,MACA,MAAM;AAAA,IACR;AAAA,EACF,CAAC;AACH;","names":[]}
@@ -0,0 +1,324 @@
1
+ // src/shared/node-engine.ts
2
+ import * as path from "path";
3
+ var BACKEND_TO_PROVIDER = {
4
+ cpu: "cpu",
5
+ coreml: "coreml",
6
+ cuda: "cuda",
7
+ tensorrt: "tensorrt",
8
+ dml: "dml"
9
+ };
10
+ var BACKEND_TO_DEVICE = {
11
+ cpu: "cpu",
12
+ coreml: "gpu-mps",
13
+ cuda: "gpu-cuda",
14
+ tensorrt: "tensorrt"
15
+ };
16
+ var NodeInferenceEngine = class {
17
+ constructor(modelPath, backend) {
18
+ this.modelPath = modelPath;
19
+ this.backend = backend;
20
+ this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
21
+ }
22
+ runtime = "onnx";
23
+ device;
24
+ session = null;
25
+ async initialize() {
26
+ const ort = await import("onnxruntime-node");
27
+ const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
28
+ const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
29
+ const sessionOptions = {
30
+ executionProviders: [provider]
31
+ };
32
+ this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
33
+ }
34
+ async run(input, inputShape) {
35
+ if (!this.session) {
36
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
37
+ }
38
+ const ort = await import("onnxruntime-node");
39
+ const sess = this.session;
40
+ const inputName = sess.inputNames[0];
41
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
42
+ const feeds = { [inputName]: tensor };
43
+ const results = await sess.run(feeds);
44
+ const outputName = sess.outputNames[0];
45
+ const outputTensor = results[outputName];
46
+ return outputTensor.data;
47
+ }
48
+ async runMultiOutput(input, inputShape) {
49
+ if (!this.session) {
50
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
51
+ }
52
+ const ort = await import("onnxruntime-node");
53
+ const sess = this.session;
54
+ const inputName = sess.inputNames[0];
55
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
56
+ const feeds = { [inputName]: tensor };
57
+ const results = await sess.run(feeds);
58
+ const out = {};
59
+ for (const name of sess.outputNames) {
60
+ out[name] = results[name].data;
61
+ }
62
+ return out;
63
+ }
64
+ async dispose() {
65
+ this.session = null;
66
+ }
67
+ };
68
+
69
+ // src/shared/python-engine.ts
70
+ import { spawn } from "child_process";
71
+ var PythonInferenceEngine = class {
72
+ constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
73
+ this.pythonPath = pythonPath;
74
+ this.scriptPath = scriptPath;
75
+ this.modelPath = modelPath;
76
+ this.extraArgs = extraArgs;
77
+ this.runtime = runtime;
78
+ const runtimeDeviceMap = {
79
+ onnx: "cpu",
80
+ coreml: "gpu-mps",
81
+ pytorch: "cpu",
82
+ openvino: "cpu",
83
+ tflite: "cpu"
84
+ };
85
+ this.device = runtimeDeviceMap[runtime];
86
+ }
87
+ runtime;
88
+ device;
89
+ process = null;
90
+ receiveBuffer = Buffer.alloc(0);
91
+ pendingResolve = null;
92
+ pendingReject = null;
93
+ async initialize() {
94
+ const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
95
+ this.process = spawn(this.pythonPath, args, {
96
+ stdio: ["pipe", "pipe", "pipe"]
97
+ });
98
+ if (!this.process.stdout || !this.process.stdin) {
99
+ throw new Error("PythonInferenceEngine: failed to create process pipes");
100
+ }
101
+ this.process.stderr?.on("data", (chunk) => {
102
+ process.stderr.write(`[python-engine] ${chunk.toString()}`);
103
+ });
104
+ this.process.on("error", (err) => {
105
+ this.pendingReject?.(err);
106
+ this.pendingReject = null;
107
+ this.pendingResolve = null;
108
+ });
109
+ this.process.on("exit", (code) => {
110
+ if (code !== 0) {
111
+ const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
112
+ this.pendingReject?.(err);
113
+ this.pendingReject = null;
114
+ this.pendingResolve = null;
115
+ }
116
+ });
117
+ this.process.stdout.on("data", (chunk) => {
118
+ this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
119
+ this._tryReceive();
120
+ });
121
+ await new Promise((resolve2, reject) => {
122
+ const timeout = setTimeout(() => resolve2(), 2e3);
123
+ this.process?.on("error", (err) => {
124
+ clearTimeout(timeout);
125
+ reject(err);
126
+ });
127
+ this.process?.on("exit", (code) => {
128
+ clearTimeout(timeout);
129
+ if (code !== 0) {
130
+ reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
131
+ }
132
+ });
133
+ });
134
+ }
135
+ _tryReceive() {
136
+ if (this.receiveBuffer.length < 4) return;
137
+ const length = this.receiveBuffer.readUInt32LE(0);
138
+ if (this.receiveBuffer.length < 4 + length) return;
139
+ const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
140
+ this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
141
+ const resolve2 = this.pendingResolve;
142
+ const reject = this.pendingReject;
143
+ this.pendingResolve = null;
144
+ this.pendingReject = null;
145
+ if (!resolve2) return;
146
+ try {
147
+ const parsed = JSON.parse(jsonBytes.toString("utf8"));
148
+ resolve2(parsed);
149
+ } catch (err) {
150
+ reject?.(err instanceof Error ? err : new Error(String(err)));
151
+ }
152
+ }
153
+ /** Send JPEG buffer, receive JSON detection results */
154
+ async runJpeg(jpeg) {
155
+ if (!this.process?.stdin) {
156
+ throw new Error("PythonInferenceEngine: process not initialized");
157
+ }
158
+ return new Promise((resolve2, reject) => {
159
+ this.pendingResolve = resolve2;
160
+ this.pendingReject = reject;
161
+ const lengthBuf = Buffer.allocUnsafe(4);
162
+ lengthBuf.writeUInt32LE(jpeg.length, 0);
163
+ this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
164
+ });
165
+ }
166
+ /** IInferenceEngine.run — wraps runJpeg for compatibility */
167
+ async run(_input, _inputShape) {
168
+ throw new Error(
169
+ "PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
170
+ );
171
+ }
172
+ /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
173
+ async runMultiOutput(_input, _inputShape) {
174
+ throw new Error(
175
+ "PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
176
+ );
177
+ }
178
+ async dispose() {
179
+ if (this.process) {
180
+ this.process.stdin?.end();
181
+ this.process.kill("SIGTERM");
182
+ this.process = null;
183
+ }
184
+ }
185
+ };
186
+
187
+ // src/shared/engine-resolver.ts
188
+ import * as fs from "fs";
189
+ import * as path2 from "path";
190
+ var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
191
+ var BACKEND_TO_FORMAT = {
192
+ cpu: "onnx",
193
+ coreml: "coreml",
194
+ cuda: "onnx",
195
+ tensorrt: "onnx"
196
+ };
197
+ var RUNTIME_TO_FORMAT = {
198
+ onnx: "onnx",
199
+ coreml: "coreml",
200
+ openvino: "openvino",
201
+ tflite: "tflite",
202
+ pytorch: "pt"
203
+ };
204
+ function modelFilePath(modelsDir, modelEntry, format) {
205
+ const formatEntry = modelEntry.formats[format];
206
+ if (!formatEntry) {
207
+ throw new Error(`Model ${modelEntry.id} has no ${format} format`);
208
+ }
209
+ const urlParts = formatEntry.url.split("/");
210
+ const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
211
+ return path2.join(modelsDir, filename);
212
+ }
213
+ function modelExists(filePath) {
214
+ try {
215
+ return fs.existsSync(filePath);
216
+ } catch {
217
+ return false;
218
+ }
219
+ }
220
+ async function resolveEngine(options) {
221
+ const { runtime, backend, modelEntry, modelsDir, downloadModel } = options;
222
+ let selectedFormat;
223
+ let selectedBackend;
224
+ if (runtime === "auto") {
225
+ const available = await probeOnnxBackends();
226
+ let chosen = null;
227
+ for (const b of AUTO_BACKEND_PRIORITY) {
228
+ if (!available.includes(b)) continue;
229
+ const fmt = BACKEND_TO_FORMAT[b];
230
+ if (!fmt) continue;
231
+ if (!modelEntry.formats[fmt]) continue;
232
+ chosen = { backend: b, format: fmt };
233
+ break;
234
+ }
235
+ if (!chosen) {
236
+ throw new Error(
237
+ `resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
238
+ );
239
+ }
240
+ selectedFormat = chosen.format;
241
+ selectedBackend = chosen.backend;
242
+ } else {
243
+ const fmt = RUNTIME_TO_FORMAT[runtime];
244
+ if (!fmt) {
245
+ throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
246
+ }
247
+ if (!modelEntry.formats[fmt]) {
248
+ throw new Error(
249
+ `resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
250
+ );
251
+ }
252
+ selectedFormat = fmt;
253
+ selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
254
+ }
255
+ let modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
256
+ if (!modelExists(modelPath)) {
257
+ if (downloadModel) {
258
+ const formatEntry = modelEntry.formats[selectedFormat];
259
+ modelPath = await downloadModel(formatEntry.url, modelsDir);
260
+ } else {
261
+ throw new Error(
262
+ `resolveEngine: model file not found at ${modelPath} and no downloadModel function provided`
263
+ );
264
+ }
265
+ }
266
+ if (selectedFormat === "onnx" || selectedFormat === "coreml") {
267
+ const engine = new NodeInferenceEngine(modelPath, selectedBackend);
268
+ await engine.initialize();
269
+ return { engine, format: selectedFormat, modelPath };
270
+ }
271
+ const { pythonPath } = options;
272
+ const PYTHON_SCRIPT_MAP = {
273
+ coreml: "coreml_inference.py",
274
+ pytorch: "pytorch_inference.py",
275
+ openvino: "openvino_inference.py"
276
+ };
277
+ const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
278
+ const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
279
+ if (scriptName && pythonPath) {
280
+ const scriptPath = path2.join(__dirname, "../../python", scriptName);
281
+ const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
282
+ const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
283
+ `--input-size=${inputSize}`,
284
+ `--confidence=0.25`
285
+ ]);
286
+ await engine.initialize();
287
+ return { engine, format: selectedFormat, modelPath };
288
+ }
289
+ const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
290
+ if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
291
+ const engine = new NodeInferenceEngine(fallbackPath, "cpu");
292
+ await engine.initialize();
293
+ return { engine, format: "onnx", modelPath: fallbackPath };
294
+ }
295
+ throw new Error(
296
+ `resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
297
+ );
298
+ }
299
+ async function probeOnnxBackends() {
300
+ const available = ["cpu"];
301
+ try {
302
+ const ort = await import("onnxruntime-node");
303
+ const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
304
+ for (const p of providers) {
305
+ const normalized = p.toLowerCase().replace("executionprovider", "");
306
+ if (normalized === "coreml") available.push("coreml");
307
+ else if (normalized === "cuda") available.push("cuda");
308
+ else if (normalized === "tensorrt") available.push("tensorrt");
309
+ }
310
+ } catch {
311
+ }
312
+ if (process.platform === "darwin" && !available.includes("coreml")) {
313
+ available.push("coreml");
314
+ }
315
+ return [...new Set(available)];
316
+ }
317
+
318
+ export {
319
+ NodeInferenceEngine,
320
+ PythonInferenceEngine,
321
+ resolveEngine,
322
+ probeOnnxBackends
323
+ };
324
+ //# sourceMappingURL=chunk-LPI42WL6.mjs.map