@camstack/vision 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +25 -0
- package/dist/addons/animal-classifier/index.d.ts +25 -0
- package/dist/addons/animal-classifier/index.js +469 -0
- package/dist/addons/animal-classifier/index.js.map +1 -0
- package/dist/addons/animal-classifier/index.mjs +9 -0
- package/dist/addons/animal-classifier/index.mjs.map +1 -0
- package/dist/addons/audio-classification/index.d.mts +31 -0
- package/dist/addons/audio-classification/index.d.ts +31 -0
- package/dist/addons/audio-classification/index.js +411 -0
- package/dist/addons/audio-classification/index.js.map +1 -0
- package/dist/addons/audio-classification/index.mjs +8 -0
- package/dist/addons/audio-classification/index.mjs.map +1 -0
- package/dist/addons/bird-global-classifier/index.d.mts +26 -0
- package/dist/addons/bird-global-classifier/index.d.ts +26 -0
- package/dist/addons/bird-global-classifier/index.js +475 -0
- package/dist/addons/bird-global-classifier/index.js.map +1 -0
- package/dist/addons/bird-global-classifier/index.mjs +9 -0
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -0
- package/dist/addons/bird-nabirds-classifier/index.d.mts +28 -0
- package/dist/addons/bird-nabirds-classifier/index.d.ts +28 -0
- package/dist/addons/bird-nabirds-classifier/index.js +517 -0
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -0
- package/dist/addons/bird-nabirds-classifier/index.mjs +9 -0
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -0
- package/dist/addons/camera-native-detection/index.d.mts +32 -0
- package/dist/addons/camera-native-detection/index.d.ts +32 -0
- package/dist/addons/camera-native-detection/index.js +99 -0
- package/dist/addons/camera-native-detection/index.js.map +1 -0
- package/dist/addons/camera-native-detection/index.mjs +7 -0
- package/dist/addons/camera-native-detection/index.mjs.map +1 -0
- package/dist/addons/face-detection/index.d.mts +24 -0
- package/dist/addons/face-detection/index.d.ts +24 -0
- package/dist/addons/face-detection/index.js +513 -0
- package/dist/addons/face-detection/index.js.map +1 -0
- package/dist/addons/face-detection/index.mjs +10 -0
- package/dist/addons/face-detection/index.mjs.map +1 -0
- package/dist/addons/face-recognition/index.d.mts +24 -0
- package/dist/addons/face-recognition/index.d.ts +24 -0
- package/dist/addons/face-recognition/index.js +437 -0
- package/dist/addons/face-recognition/index.js.map +1 -0
- package/dist/addons/face-recognition/index.mjs +9 -0
- package/dist/addons/face-recognition/index.mjs.map +1 -0
- package/dist/addons/motion-detection/index.d.mts +26 -0
- package/dist/addons/motion-detection/index.d.ts +26 -0
- package/dist/addons/motion-detection/index.js +273 -0
- package/dist/addons/motion-detection/index.js.map +1 -0
- package/dist/addons/motion-detection/index.mjs +8 -0
- package/dist/addons/motion-detection/index.mjs.map +1 -0
- package/dist/addons/object-detection/index.d.mts +25 -0
- package/dist/addons/object-detection/index.d.ts +25 -0
- package/dist/addons/object-detection/index.js +673 -0
- package/dist/addons/object-detection/index.js.map +1 -0
- package/dist/addons/object-detection/index.mjs +10 -0
- package/dist/addons/object-detection/index.mjs.map +1 -0
- package/dist/addons/plate-detection/index.d.mts +25 -0
- package/dist/addons/plate-detection/index.d.ts +25 -0
- package/dist/addons/plate-detection/index.js +477 -0
- package/dist/addons/plate-detection/index.js.map +1 -0
- package/dist/addons/plate-detection/index.mjs +10 -0
- package/dist/addons/plate-detection/index.mjs.map +1 -0
- package/dist/addons/plate-recognition/index.d.mts +25 -0
- package/dist/addons/plate-recognition/index.d.ts +25 -0
- package/dist/addons/plate-recognition/index.js +470 -0
- package/dist/addons/plate-recognition/index.js.map +1 -0
- package/dist/addons/plate-recognition/index.mjs +9 -0
- package/dist/addons/plate-recognition/index.mjs.map +1 -0
- package/dist/chunk-3BKYLBBH.mjs +229 -0
- package/dist/chunk-3BKYLBBH.mjs.map +1 -0
- package/dist/chunk-4PC262GU.mjs +203 -0
- package/dist/chunk-4PC262GU.mjs.map +1 -0
- package/dist/chunk-6OR5TE7A.mjs +101 -0
- package/dist/chunk-6OR5TE7A.mjs.map +1 -0
- package/dist/chunk-7SZAISGP.mjs +210 -0
- package/dist/chunk-7SZAISGP.mjs.map +1 -0
- package/dist/chunk-AD2TFYZA.mjs +235 -0
- package/dist/chunk-AD2TFYZA.mjs.map +1 -0
- package/dist/chunk-CGYSSHHM.mjs +363 -0
- package/dist/chunk-CGYSSHHM.mjs.map +1 -0
- package/dist/chunk-IYHMGYGP.mjs +79 -0
- package/dist/chunk-IYHMGYGP.mjs.map +1 -0
- package/dist/chunk-J3IUBPRE.mjs +187 -0
- package/dist/chunk-J3IUBPRE.mjs.map +1 -0
- package/dist/chunk-KFZDJPYL.mjs +190 -0
- package/dist/chunk-KFZDJPYL.mjs.map +1 -0
- package/dist/chunk-KUO2BVFY.mjs +90 -0
- package/dist/chunk-KUO2BVFY.mjs.map +1 -0
- package/dist/chunk-PXBY3QOA.mjs +152 -0
- package/dist/chunk-PXBY3QOA.mjs.map +1 -0
- package/dist/chunk-XUKDL23Y.mjs +216 -0
- package/dist/chunk-XUKDL23Y.mjs.map +1 -0
- package/dist/chunk-Z26BVC7S.mjs +214 -0
- package/dist/chunk-Z26BVC7S.mjs.map +1 -0
- package/dist/chunk-Z5AHZQEZ.mjs +258 -0
- package/dist/chunk-Z5AHZQEZ.mjs.map +1 -0
- package/dist/index.d.mts +152 -0
- package/dist/index.d.ts +152 -0
- package/dist/index.js +2775 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +205 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +43 -0
- package/python/coreml_inference.py +67 -0
- package/python/openvino_inference.py +76 -0
- package/python/pytorch_inference.py +74 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/shared/python-engine.ts"],"sourcesContent":["import type { IInferenceEngine, DetectionRuntime, DetectionDevice } from '@camstack/types'\nimport { spawn, type ChildProcess } from 'node:child_process'\n\nexport class PythonInferenceEngine implements IInferenceEngine {\n readonly runtime: DetectionRuntime\n readonly device: DetectionDevice\n private process: ChildProcess | null = null\n private receiveBuffer: Buffer = Buffer.alloc(0)\n private pendingResolve: ((value: Record<string, unknown>) => void) | null = null\n private pendingReject: ((reason: Error) => void) | null = null\n\n constructor(\n private readonly pythonPath: string,\n runtime: DetectionRuntime,\n private readonly modelPath: string,\n private readonly extraArgs: readonly string[] = [],\n ) {\n this.runtime = runtime\n // Determine device from runtime\n const runtimeDeviceMap: Readonly<Record<DetectionRuntime, DetectionDevice>> = {\n onnx: 'cpu',\n coreml: 'gpu-mps',\n pytorch: 'cpu',\n openvino: 'cpu',\n tflite: 'cpu',\n }\n this.device = runtimeDeviceMap[runtime]\n }\n\n async initialize(): Promise<void> {\n const args = [this.modelPath, ...this.extraArgs]\n this.process = spawn(this.pythonPath, args, {\n stdio: ['pipe', 'pipe', 'pipe'],\n })\n\n if (!this.process.stdout || !this.process.stdin) {\n throw new Error('PythonInferenceEngine: failed to create process pipes')\n }\n\n this.process.stderr?.on('data', (chunk: Buffer) => {\n // Log stderr from python process for debugging\n process.stderr.write(`[python-engine] ${chunk.toString()}`)\n })\n\n this.process.on('error', (err) => {\n this.pendingReject?.(err)\n this.pendingReject = null\n this.pendingResolve = null\n })\n\n this.process.on('exit', (code) => {\n if (code !== 0) {\n const err = new Error(`PythonInferenceEngine: process exited with code ${code}`)\n this.pendingReject?.(err)\n this.pendingReject = null\n this.pendingResolve = null\n }\n })\n\n this.process.stdout.on('data', (chunk: Buffer) => {\n this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk])\n this._tryReceive()\n })\n\n // Give the process a moment to start up and load the model\n await new Promise<void>((resolve, reject) => {\n const timeout = setTimeout(() => resolve(), 2000)\n this.process?.on('error', (err) => {\n clearTimeout(timeout)\n reject(err)\n })\n this.process?.on('exit', (code) => {\n clearTimeout(timeout)\n if (code !== 0) {\n reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`))\n }\n })\n })\n }\n\n private _tryReceive(): void {\n // Binary IPC: [4 bytes LE uint32 length][JSON bytes]\n if (this.receiveBuffer.length < 4) return\n\n const length = this.receiveBuffer.readUInt32LE(0)\n if (this.receiveBuffer.length < 4 + length) return\n\n const jsonBytes = this.receiveBuffer.subarray(4, 4 + length)\n this.receiveBuffer = this.receiveBuffer.subarray(4 + length)\n\n const resolve = this.pendingResolve\n const reject = this.pendingReject\n this.pendingResolve = null\n this.pendingReject = null\n\n if (!resolve) return\n\n try {\n const parsed = JSON.parse(jsonBytes.toString('utf8')) as Record<string, unknown>\n resolve(parsed)\n } catch (err) {\n reject?.(err instanceof Error ? err : new Error(String(err)))\n }\n }\n\n /** Send JPEG buffer, receive JSON detection results */\n async runJpeg(jpeg: Buffer): Promise<Record<string, unknown>> {\n if (!this.process?.stdin) {\n throw new Error('PythonInferenceEngine: process not initialized')\n }\n\n return new Promise<Record<string, unknown>>((resolve, reject) => {\n this.pendingResolve = resolve\n this.pendingReject = reject\n\n // Binary IPC: [4 bytes LE uint32 length][JPEG bytes]\n const lengthBuf = Buffer.allocUnsafe(4)\n lengthBuf.writeUInt32LE(jpeg.length, 0)\n this.process!.stdin!.write(Buffer.concat([lengthBuf, jpeg]))\n })\n }\n\n /** IInferenceEngine.run — wraps runJpeg for compatibility */\n async run(_input: Float32Array, _inputShape: readonly number[]): Promise<Float32Array> {\n throw new Error(\n 'PythonInferenceEngine: use runJpeg() directly — this engine operates on JPEG input',\n )\n }\n\n /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */\n async runMultiOutput(\n _input: Float32Array,\n _inputShape: readonly number[],\n ): Promise<Record<string, Float32Array>> {\n throw new Error(\n 'PythonInferenceEngine: runMultiOutput() is not supported — this engine operates on JPEG input',\n )\n }\n\n async dispose(): Promise<void> {\n if (this.process) {\n this.process.stdin?.end()\n this.process.kill('SIGTERM')\n this.process = null\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA,SAAS,aAAgC;AAElC,IAAM,wBAAN,MAAwD;AAAA,EAQ7D,YACmB,YACjB,SACiB,WACA,YAA+B,CAAC,GACjD;AAJiB;AAEA;AACA;AAEjB,SAAK,UAAU;AAEf,UAAM,mBAAwE;AAAA,MAC5E,MAAM;AAAA,MACN,QAAQ;AAAA,MACR,SAAS;AAAA,MACT,UAAU;AAAA,MACV,QAAQ;AAAA,IACV;AACA,SAAK,SAAS,iBAAiB,OAAO;AAAA,EACxC;AAAA,EAvBS;AAAA,EACA;AAAA,EACD,UAA+B;AAAA,EAC/B,gBAAwB,OAAO,MAAM,CAAC;AAAA,EACtC,iBAAoE;AAAA,EACpE,gBAAkD;AAAA,EAoB1D,MAAM,aAA4B;AAChC,UAAM,OAAO,CAAC,KAAK,WAAW,GAAG,KAAK,SAAS;AAC/C,SAAK,UAAU,MAAM,KAAK,YAAY,MAAM;AAAA,MAC1C,OAAO,CAAC,QAAQ,QAAQ,MAAM;AAAA,IAChC,CAAC;AAED,QAAI,CAAC,KAAK,QAAQ,UAAU,CAAC,KAAK,QAAQ,OAAO;AAC/C,YAAM,IAAI,MAAM,uDAAuD;AAAA,IACzE;AAEA,SAAK,QAAQ,QAAQ,GAAG,QAAQ,CAAC,UAAkB;AAEjD,cAAQ,OAAO,MAAM,mBAAmB,MAAM,SAAS,CAAC,EAAE;AAAA,IAC5D,CAAC;AAED,SAAK,QAAQ,GAAG,SAAS,CAAC,QAAQ;AAChC,WAAK,gBAAgB,GAAG;AACxB,WAAK,gBAAgB;AACrB,WAAK,iBAAiB;AAAA,IACxB,CAAC;AAED,SAAK,QAAQ,GAAG,QAAQ,CAAC,SAAS;AAChC,UAAI,SAAS,GAAG;AACd,cAAM,MAAM,IAAI,MAAM,mDAAmD,IAAI,EAAE;AAC/E,aAAK,gBAAgB,GAAG;AACxB,aAAK,gBAAgB;AACrB,aAAK,iBAAiB;AAAA,MACxB;AAAA,IACF,CAAC;AAED,SAAK,QAAQ,OAAO,GAAG,QAAQ,CAAC,UAAkB;AAChD,WAAK,gBAAgB,OAAO,OAAO,CAAC,KAAK,eAAe,KAAK,CAAC;AAC9D,WAAK,YAAY;AAAA,IACnB,CAAC;AAGD,UAAM,IAAI,QAAc,CAAC,SAAS,WAAW;AAC3C,YAAM,UAAU,WAAW,MAAM,QAAQ,GAAG,GAAI;AAChD,WAAK,SAAS,GAAG,SAAS,CAAC,QAAQ;AACjC,qBAAa,OAAO;AACpB,eAAO,GAAG;AAAA,MACZ,CAAC;AACD,WAAK,SAAS,GAAG,QAAQ,CAAC,SAAS;AACjC,qBAAa,OAAO;AACpB,YAAI,SAAS,GAAG;AACd,iBAAO,IAAI,MAAM,yDAAyD,IAAI,EAAE,CAAC;AAAA,QACnF;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAAA,EAEQ,cAAoB;AAE1B,QAAI,KAAK,cAAc,SAAS,EAAG;AAEnC,UAAM,SAAS,KAAK,cAAc,aAAa,CAAC;AAChD,QAAI,KAAK,cAAc,SAAS,IAAI,OAAQ;AAE5C,UAAM,YAAY,KAAK,cAAc,SAAS,GAAG,IAAI,MAAM;AAC3D,SAAK,gBAAgB,KAAK,cAAc,SAAS,IAAI,MAAM;AAE3D,UAAM,UAAU,KAAK;AACrB,UAAM,SAAS,KAAK;AACpB,SAAK,iBAAiB;AACtB,SAAK,gBAAgB;AAErB,QAAI,CAAC,QAAS;AAEd,QAAI;AACF,YAAM,SAAS,KAAK,MAAM,UAAU,SAAS,MAAM,CAAC;AACpD,cAAQ,MAAM;AAAA,IAChB,SAAS,KAAK;AACZ,eAAS,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC,CAAC;AAAA,IAC9D;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,QAAQ,MAAgD;AAC5D,QAAI,CAAC,KAAK,SAAS,OAAO;AACxB,YAAM,IAAI,MAAM,gDAAgD;AAAA,IAClE;AAEA,WAAO,IAAI,QAAiC,CAAC,SAAS,WAAW;AAC/D,WAAK,iBAAiB;AACtB,WAAK,gBAAgB;AAGrB,YAAM,YAAY,OAAO,YAAY,CAAC;AACtC,gBAAU,cAAc,KAAK,QAAQ,CAAC;AACtC,WAAK,QAAS,MAAO,MAAM,OAAO,OAAO,CAAC,WAAW,IAAI,CAAC,CAAC;AAAA,IAC7D,CAAC;AAAA,EACH;AAAA;AAAA,EAGA,MAAM,IAAI,QAAsB,aAAuD;AACrF,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,eACJ,QACA,aACuC;AACvC,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,UAAyB;AAC7B,QAAI,KAAK,SAAS;AAChB,WAAK,QAAQ,OAAO,IAAI;AACxB,WAAK,QAAQ,KAAK,SAAS;AAC3B,WAAK,UAAU;AAAA,IACjB;AAAA,EACF;AACF;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@camstack/vision",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Builtin detection addons for CamStack — object detection, face, plate, audio, motion",
|
|
5
|
+
"main": "./dist/index.js",
|
|
6
|
+
"module": "./dist/index.mjs",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": { "import": "./dist/index.mjs", "require": "./dist/index.js", "types": "./dist/index.d.ts" },
|
|
10
|
+
"./addons/*": { "import": "./dist/addons/*/index.mjs", "require": "./dist/addons/*/index.js", "types": "./dist/addons/*/index.d.ts" }
|
|
11
|
+
},
|
|
12
|
+
"camstack": {
|
|
13
|
+
"addons": [
|
|
14
|
+
{ "id": "motion-detection", "entry": "./dist/addons/motion-detection/index.js", "slot": "detector" },
|
|
15
|
+
{ "id": "object-detection", "entry": "./dist/addons/object-detection/index.js", "slot": "detector" },
|
|
16
|
+
{ "id": "face-detection", "entry": "./dist/addons/face-detection/index.js", "slot": "cropper" },
|
|
17
|
+
{ "id": "face-recognition", "entry": "./dist/addons/face-recognition/index.js", "slot": "classifier" },
|
|
18
|
+
{ "id": "plate-detection", "entry": "./dist/addons/plate-detection/index.js", "slot": "cropper" },
|
|
19
|
+
{ "id": "plate-recognition", "entry": "./dist/addons/plate-recognition/index.js", "slot": "classifier" },
|
|
20
|
+
{ "id": "audio-classification", "entry": "./dist/addons/audio-classification/index.js", "slot": "classifier" },
|
|
21
|
+
{ "id": "camera-native-detection", "entry": "./dist/addons/camera-native-detection/index.js", "slot": "detector" },
|
|
22
|
+
{ "id": "bird-global-classifier", "entry": "./dist/addons/bird-global-classifier/index.js", "slot": "classifier" },
|
|
23
|
+
{ "id": "bird-nabirds-classifier", "entry": "./dist/addons/bird-nabirds-classifier/index.js", "slot": "classifier" },
|
|
24
|
+
{ "id": "animal-classifier", "entry": "./dist/addons/animal-classifier/index.js", "slot": "classifier" }
|
|
25
|
+
]
|
|
26
|
+
},
|
|
27
|
+
"files": ["dist", "python"],
|
|
28
|
+
"scripts": {
|
|
29
|
+
"build": "tsup",
|
|
30
|
+
"dev": "tsup --watch",
|
|
31
|
+
"typecheck": "tsc --noEmit",
|
|
32
|
+
"test": "vitest run",
|
|
33
|
+
"test:watch": "vitest"
|
|
34
|
+
},
|
|
35
|
+
"peerDependencies": { "@camstack/types": "^0.1.0" },
|
|
36
|
+
"dependencies": { "onnxruntime-node": "^1.24.3", "sharp": "^0.34.0" },
|
|
37
|
+
"devDependencies": {
|
|
38
|
+
"@camstack/types": "*",
|
|
39
|
+
"tsup": "^8.0.0",
|
|
40
|
+
"typescript": "~5.9.0",
|
|
41
|
+
"vitest": "^3.0.0"
|
|
42
|
+
}
|
|
43
|
+
}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""CoreML inference script. Binary IPC protocol over stdin/stdout.
|
|
3
|
+
|
|
4
|
+
Protocol:
|
|
5
|
+
Send: [4 bytes LE uint32 length][JPEG bytes]
|
|
6
|
+
Receive: [4 bytes LE uint32 length][JSON bytes]
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python coreml_inference.py <model_path> [--device cpu|ane|gpu]
|
|
10
|
+
"""
|
|
11
|
+
import sys
|
|
12
|
+
import struct
|
|
13
|
+
import json
|
|
14
|
+
import argparse
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def read_frame(stream) -> bytes:
|
|
18
|
+
"""Read one JPEG frame from binary IPC stream."""
|
|
19
|
+
header = stream.read(4)
|
|
20
|
+
if len(header) < 4:
|
|
21
|
+
return b""
|
|
22
|
+
length = struct.unpack("<I", header)[0]
|
|
23
|
+
data = stream.read(length)
|
|
24
|
+
return data
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def write_result(stream, result: dict) -> None:
|
|
28
|
+
"""Write JSON result to binary IPC stream."""
|
|
29
|
+
payload = json.dumps(result).encode("utf-8")
|
|
30
|
+
header = struct.pack("<I", len(payload))
|
|
31
|
+
stream.write(header + payload)
|
|
32
|
+
stream.flush()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def main() -> None:
|
|
36
|
+
parser = argparse.ArgumentParser(description="CoreML inference via binary IPC")
|
|
37
|
+
parser.add_argument("model_path", help="Path to .mlpackage or .mlmodel")
|
|
38
|
+
parser.add_argument("--device", default="ane", choices=["cpu", "ane", "gpu"],
|
|
39
|
+
help="Compute unit: cpu, ane (Apple Neural Engine), or gpu")
|
|
40
|
+
args = parser.parse_args()
|
|
41
|
+
|
|
42
|
+
# TODO: Load CoreML model
|
|
43
|
+
# import coremltools as ct
|
|
44
|
+
# model = ct.models.MLModel(args.model_path)
|
|
45
|
+
|
|
46
|
+
stdin_binary = sys.stdin.buffer
|
|
47
|
+
stdout_binary = sys.stdout.buffer
|
|
48
|
+
|
|
49
|
+
# Inference loop
|
|
50
|
+
while True:
|
|
51
|
+
jpeg = read_frame(stdin_binary)
|
|
52
|
+
if not jpeg:
|
|
53
|
+
break
|
|
54
|
+
|
|
55
|
+
# TODO: Run CoreML inference
|
|
56
|
+
# import PIL.Image, io
|
|
57
|
+
# img = PIL.Image.open(io.BytesIO(jpeg))
|
|
58
|
+
# predictions = model.predict({"image": img})
|
|
59
|
+
# detections = parse_predictions(predictions)
|
|
60
|
+
|
|
61
|
+
# Stub: return empty detections
|
|
62
|
+
result = {"detections": [], "error": None}
|
|
63
|
+
write_result(stdout_binary, result)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
if __name__ == "__main__":
|
|
67
|
+
main()
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""OpenVINO inference script. Binary IPC protocol over stdin/stdout.
|
|
3
|
+
|
|
4
|
+
Protocol:
|
|
5
|
+
Send: [4 bytes LE uint32 length][JPEG bytes]
|
|
6
|
+
Receive: [4 bytes LE uint32 length][JSON bytes]
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python openvino_inference.py <model_path> [--device CPU|GPU|AUTO] [--input-size 640]
|
|
10
|
+
"""
|
|
11
|
+
import sys
|
|
12
|
+
import struct
|
|
13
|
+
import json
|
|
14
|
+
import argparse
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def read_frame(stream) -> bytes:
|
|
18
|
+
"""Read one JPEG frame from binary IPC stream."""
|
|
19
|
+
header = stream.read(4)
|
|
20
|
+
if len(header) < 4:
|
|
21
|
+
return b""
|
|
22
|
+
length = struct.unpack("<I", header)[0]
|
|
23
|
+
data = stream.read(length)
|
|
24
|
+
return data
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def write_result(stream, result: dict) -> None:
|
|
28
|
+
"""Write JSON result to binary IPC stream."""
|
|
29
|
+
payload = json.dumps(result).encode("utf-8")
|
|
30
|
+
header = struct.pack("<I", len(payload))
|
|
31
|
+
stream.write(header + payload)
|
|
32
|
+
stream.flush()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def main() -> None:
|
|
36
|
+
parser = argparse.ArgumentParser(description="OpenVINO inference via binary IPC")
|
|
37
|
+
parser.add_argument("model_path", help="Path to .xml model (IR format)")
|
|
38
|
+
parser.add_argument("--device", default="AUTO", choices=["CPU", "GPU", "AUTO"],
|
|
39
|
+
help="OpenVINO compute device")
|
|
40
|
+
parser.add_argument("--input-size", type=int, default=640,
|
|
41
|
+
help="Model input size (square)")
|
|
42
|
+
args = parser.parse_args()
|
|
43
|
+
|
|
44
|
+
# TODO: Load OpenVINO model
|
|
45
|
+
# from openvino.runtime import Core
|
|
46
|
+
# ie = Core()
|
|
47
|
+
# model = ie.read_model(args.model_path)
|
|
48
|
+
# compiled = ie.compile_model(model, args.device)
|
|
49
|
+
# infer_request = compiled.create_infer_request()
|
|
50
|
+
|
|
51
|
+
stdin_binary = sys.stdin.buffer
|
|
52
|
+
stdout_binary = sys.stdout.buffer
|
|
53
|
+
|
|
54
|
+
# Inference loop
|
|
55
|
+
while True:
|
|
56
|
+
jpeg = read_frame(stdin_binary)
|
|
57
|
+
if not jpeg:
|
|
58
|
+
break
|
|
59
|
+
|
|
60
|
+
# TODO: Run OpenVINO inference
|
|
61
|
+
# import io, PIL.Image
|
|
62
|
+
# import numpy as np
|
|
63
|
+
# img = PIL.Image.open(io.BytesIO(jpeg)).convert("RGB")
|
|
64
|
+
# img_resized = img.resize((args.input_size, args.input_size))
|
|
65
|
+
# img_array = np.array(img_resized).transpose(2, 0, 1)[None].astype(np.float32) / 255.0
|
|
66
|
+
# infer_request.infer({0: img_array})
|
|
67
|
+
# output = infer_request.get_output_tensor(0).data
|
|
68
|
+
# detections = parse_output(output)
|
|
69
|
+
|
|
70
|
+
# Stub: return empty detections
|
|
71
|
+
result = {"detections": [], "error": None}
|
|
72
|
+
write_result(stdout_binary, result)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
if __name__ == "__main__":
|
|
76
|
+
main()
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""PyTorch inference script. Binary IPC protocol over stdin/stdout.
|
|
3
|
+
|
|
4
|
+
Protocol:
|
|
5
|
+
Send: [4 bytes LE uint32 length][JPEG bytes]
|
|
6
|
+
Receive: [4 bytes LE uint32 length][JSON bytes]
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python pytorch_inference.py <model_path> [--device cpu|cuda|mps] [--input-size 640]
|
|
10
|
+
"""
|
|
11
|
+
import sys
|
|
12
|
+
import struct
|
|
13
|
+
import json
|
|
14
|
+
import argparse
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def read_frame(stream) -> bytes:
|
|
18
|
+
"""Read one JPEG frame from binary IPC stream."""
|
|
19
|
+
header = stream.read(4)
|
|
20
|
+
if len(header) < 4:
|
|
21
|
+
return b""
|
|
22
|
+
length = struct.unpack("<I", header)[0]
|
|
23
|
+
data = stream.read(length)
|
|
24
|
+
return data
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def write_result(stream, result: dict) -> None:
|
|
28
|
+
"""Write JSON result to binary IPC stream."""
|
|
29
|
+
payload = json.dumps(result).encode("utf-8")
|
|
30
|
+
header = struct.pack("<I", len(payload))
|
|
31
|
+
stream.write(header + payload)
|
|
32
|
+
stream.flush()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def main() -> None:
|
|
36
|
+
parser = argparse.ArgumentParser(description="PyTorch inference via binary IPC")
|
|
37
|
+
parser.add_argument("model_path", help="Path to .pt or .torchscript model")
|
|
38
|
+
parser.add_argument("--device", default="cpu", choices=["cpu", "cuda", "mps"],
|
|
39
|
+
help="Compute device")
|
|
40
|
+
parser.add_argument("--input-size", type=int, default=640,
|
|
41
|
+
help="Model input size (square)")
|
|
42
|
+
args = parser.parse_args()
|
|
43
|
+
|
|
44
|
+
# TODO: Load PyTorch model
|
|
45
|
+
# import torch
|
|
46
|
+
# device = torch.device(args.device)
|
|
47
|
+
# model = torch.jit.load(args.model_path, map_location=device)
|
|
48
|
+
# model.eval()
|
|
49
|
+
|
|
50
|
+
stdin_binary = sys.stdin.buffer
|
|
51
|
+
stdout_binary = sys.stdout.buffer
|
|
52
|
+
|
|
53
|
+
# Inference loop
|
|
54
|
+
while True:
|
|
55
|
+
jpeg = read_frame(stdin_binary)
|
|
56
|
+
if not jpeg:
|
|
57
|
+
break
|
|
58
|
+
|
|
59
|
+
# TODO: Run PyTorch inference
|
|
60
|
+
# import io, PIL.Image, torchvision.transforms as T
|
|
61
|
+
# img = PIL.Image.open(io.BytesIO(jpeg)).convert("RGB")
|
|
62
|
+
# transform = T.Compose([T.Resize((args.input_size, args.input_size)), T.ToTensor()])
|
|
63
|
+
# tensor = transform(img).unsqueeze(0).to(device)
|
|
64
|
+
# with torch.no_grad():
|
|
65
|
+
# output = model(tensor)
|
|
66
|
+
# detections = parse_output(output)
|
|
67
|
+
|
|
68
|
+
# Stub: return empty detections
|
|
69
|
+
result = {"detections": [], "error": None}
|
|
70
|
+
write_result(stdout_binary, result)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
if __name__ == "__main__":
|
|
74
|
+
main()
|