@camstack/vision 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +25 -0
- package/dist/addons/animal-classifier/index.d.ts +25 -0
- package/dist/addons/animal-classifier/index.js +469 -0
- package/dist/addons/animal-classifier/index.js.map +1 -0
- package/dist/addons/animal-classifier/index.mjs +9 -0
- package/dist/addons/animal-classifier/index.mjs.map +1 -0
- package/dist/addons/audio-classification/index.d.mts +31 -0
- package/dist/addons/audio-classification/index.d.ts +31 -0
- package/dist/addons/audio-classification/index.js +411 -0
- package/dist/addons/audio-classification/index.js.map +1 -0
- package/dist/addons/audio-classification/index.mjs +8 -0
- package/dist/addons/audio-classification/index.mjs.map +1 -0
- package/dist/addons/bird-global-classifier/index.d.mts +26 -0
- package/dist/addons/bird-global-classifier/index.d.ts +26 -0
- package/dist/addons/bird-global-classifier/index.js +475 -0
- package/dist/addons/bird-global-classifier/index.js.map +1 -0
- package/dist/addons/bird-global-classifier/index.mjs +9 -0
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -0
- package/dist/addons/bird-nabirds-classifier/index.d.mts +28 -0
- package/dist/addons/bird-nabirds-classifier/index.d.ts +28 -0
- package/dist/addons/bird-nabirds-classifier/index.js +517 -0
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -0
- package/dist/addons/bird-nabirds-classifier/index.mjs +9 -0
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -0
- package/dist/addons/camera-native-detection/index.d.mts +32 -0
- package/dist/addons/camera-native-detection/index.d.ts +32 -0
- package/dist/addons/camera-native-detection/index.js +99 -0
- package/dist/addons/camera-native-detection/index.js.map +1 -0
- package/dist/addons/camera-native-detection/index.mjs +7 -0
- package/dist/addons/camera-native-detection/index.mjs.map +1 -0
- package/dist/addons/face-detection/index.d.mts +24 -0
- package/dist/addons/face-detection/index.d.ts +24 -0
- package/dist/addons/face-detection/index.js +513 -0
- package/dist/addons/face-detection/index.js.map +1 -0
- package/dist/addons/face-detection/index.mjs +10 -0
- package/dist/addons/face-detection/index.mjs.map +1 -0
- package/dist/addons/face-recognition/index.d.mts +24 -0
- package/dist/addons/face-recognition/index.d.ts +24 -0
- package/dist/addons/face-recognition/index.js +437 -0
- package/dist/addons/face-recognition/index.js.map +1 -0
- package/dist/addons/face-recognition/index.mjs +9 -0
- package/dist/addons/face-recognition/index.mjs.map +1 -0
- package/dist/addons/motion-detection/index.d.mts +26 -0
- package/dist/addons/motion-detection/index.d.ts +26 -0
- package/dist/addons/motion-detection/index.js +273 -0
- package/dist/addons/motion-detection/index.js.map +1 -0
- package/dist/addons/motion-detection/index.mjs +8 -0
- package/dist/addons/motion-detection/index.mjs.map +1 -0
- package/dist/addons/object-detection/index.d.mts +25 -0
- package/dist/addons/object-detection/index.d.ts +25 -0
- package/dist/addons/object-detection/index.js +673 -0
- package/dist/addons/object-detection/index.js.map +1 -0
- package/dist/addons/object-detection/index.mjs +10 -0
- package/dist/addons/object-detection/index.mjs.map +1 -0
- package/dist/addons/plate-detection/index.d.mts +25 -0
- package/dist/addons/plate-detection/index.d.ts +25 -0
- package/dist/addons/plate-detection/index.js +477 -0
- package/dist/addons/plate-detection/index.js.map +1 -0
- package/dist/addons/plate-detection/index.mjs +10 -0
- package/dist/addons/plate-detection/index.mjs.map +1 -0
- package/dist/addons/plate-recognition/index.d.mts +25 -0
- package/dist/addons/plate-recognition/index.d.ts +25 -0
- package/dist/addons/plate-recognition/index.js +470 -0
- package/dist/addons/plate-recognition/index.js.map +1 -0
- package/dist/addons/plate-recognition/index.mjs +9 -0
- package/dist/addons/plate-recognition/index.mjs.map +1 -0
- package/dist/chunk-3BKYLBBH.mjs +229 -0
- package/dist/chunk-3BKYLBBH.mjs.map +1 -0
- package/dist/chunk-4PC262GU.mjs +203 -0
- package/dist/chunk-4PC262GU.mjs.map +1 -0
- package/dist/chunk-6OR5TE7A.mjs +101 -0
- package/dist/chunk-6OR5TE7A.mjs.map +1 -0
- package/dist/chunk-7SZAISGP.mjs +210 -0
- package/dist/chunk-7SZAISGP.mjs.map +1 -0
- package/dist/chunk-AD2TFYZA.mjs +235 -0
- package/dist/chunk-AD2TFYZA.mjs.map +1 -0
- package/dist/chunk-CGYSSHHM.mjs +363 -0
- package/dist/chunk-CGYSSHHM.mjs.map +1 -0
- package/dist/chunk-IYHMGYGP.mjs +79 -0
- package/dist/chunk-IYHMGYGP.mjs.map +1 -0
- package/dist/chunk-J3IUBPRE.mjs +187 -0
- package/dist/chunk-J3IUBPRE.mjs.map +1 -0
- package/dist/chunk-KFZDJPYL.mjs +190 -0
- package/dist/chunk-KFZDJPYL.mjs.map +1 -0
- package/dist/chunk-KUO2BVFY.mjs +90 -0
- package/dist/chunk-KUO2BVFY.mjs.map +1 -0
- package/dist/chunk-PXBY3QOA.mjs +152 -0
- package/dist/chunk-PXBY3QOA.mjs.map +1 -0
- package/dist/chunk-XUKDL23Y.mjs +216 -0
- package/dist/chunk-XUKDL23Y.mjs.map +1 -0
- package/dist/chunk-Z26BVC7S.mjs +214 -0
- package/dist/chunk-Z26BVC7S.mjs.map +1 -0
- package/dist/chunk-Z5AHZQEZ.mjs +258 -0
- package/dist/chunk-Z5AHZQEZ.mjs.map +1 -0
- package/dist/index.d.mts +152 -0
- package/dist/index.d.ts +152 -0
- package/dist/index.js +2775 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +205 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +43 -0
- package/python/coreml_inference.py +67 -0
- package/python/openvino_inference.py +76 -0
- package/python/pytorch_inference.py +74 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/addons/camera-native-detection/index.ts
|
|
21
|
+
var camera_native_detection_exports = {};
|
|
22
|
+
__export(camera_native_detection_exports, {
|
|
23
|
+
default: () => CameraNativeDetectionAddon
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(camera_native_detection_exports);
|
|
26
|
+
var NATIVE_LABELS = [
|
|
27
|
+
{ id: "person", name: "Person" },
|
|
28
|
+
{ id: "vehicle", name: "Vehicle" },
|
|
29
|
+
{ id: "motion", name: "Motion" },
|
|
30
|
+
{ id: "face", name: "Face" }
|
|
31
|
+
];
|
|
32
|
+
var NATIVE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
33
|
+
var CameraNativeDetectionAddon = class {
|
|
34
|
+
id = "camera-native-detection";
|
|
35
|
+
slot = "detector";
|
|
36
|
+
inputClasses = null;
|
|
37
|
+
outputClasses = ["person", "vehicle", "motion", "face"];
|
|
38
|
+
slotPriority = 5;
|
|
39
|
+
manifest = {
|
|
40
|
+
id: "camera-native-detection",
|
|
41
|
+
name: "Camera Native Detection",
|
|
42
|
+
version: "0.1.0",
|
|
43
|
+
description: "Passthrough adapter for camera-native events (Frigate, Scrypted, ONVIF) \u2014 no inference engine",
|
|
44
|
+
packageName: "@camstack/vision",
|
|
45
|
+
slot: "detector",
|
|
46
|
+
inputClasses: void 0,
|
|
47
|
+
outputClasses: ["person", "vehicle", "motion", "face"],
|
|
48
|
+
supportsCustomModels: false,
|
|
49
|
+
mayRequirePython: false,
|
|
50
|
+
defaultConfig: {}
|
|
51
|
+
};
|
|
52
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
53
|
+
async initialize(_ctx) {
|
|
54
|
+
}
|
|
55
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
56
|
+
async detect(_frame) {
|
|
57
|
+
return {
|
|
58
|
+
detections: [],
|
|
59
|
+
inferenceMs: 0,
|
|
60
|
+
modelId: "camera-native"
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
async shutdown() {
|
|
64
|
+
}
|
|
65
|
+
getConfigSchema() {
|
|
66
|
+
return {
|
|
67
|
+
sections: [
|
|
68
|
+
{
|
|
69
|
+
id: "info",
|
|
70
|
+
title: "Camera Native Detection",
|
|
71
|
+
description: "This addon forwards detections from native camera events (Frigate webhooks, Scrypted push notifications, ONVIF events). No configuration required.",
|
|
72
|
+
fields: []
|
|
73
|
+
}
|
|
74
|
+
]
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
getClassMap() {
|
|
78
|
+
return NATIVE_CLASS_MAP;
|
|
79
|
+
}
|
|
80
|
+
getModelCatalog() {
|
|
81
|
+
return [];
|
|
82
|
+
}
|
|
83
|
+
getAvailableModels() {
|
|
84
|
+
return [];
|
|
85
|
+
}
|
|
86
|
+
getActiveLabels() {
|
|
87
|
+
return NATIVE_LABELS;
|
|
88
|
+
}
|
|
89
|
+
async probe() {
|
|
90
|
+
return {
|
|
91
|
+
available: true,
|
|
92
|
+
runtime: "onnx",
|
|
93
|
+
// no runtime used; satisfies the type
|
|
94
|
+
device: "cpu",
|
|
95
|
+
capabilities: ["fp32"]
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
};
|
|
99
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../src/addons/camera-native-detection/index.ts"],"sourcesContent":["import type {\n IDetectorProvider,\n IDetectionAddon,\n AddonManifest,\n AddonContext,\n FrameInput,\n DetectorOutput,\n ConfigUISchema,\n ClassMapDefinition,\n ProbeResult,\n ModelCatalogEntry,\n DetectionModel,\n LabelDefinition,\n} from '@camstack/types'\n\n// Labels reported by cameras natively (a superset across manufacturers)\nconst NATIVE_LABELS: readonly LabelDefinition[] = [\n { id: 'person', name: 'Person' },\n { id: 'vehicle', name: 'Vehicle' },\n { id: 'motion', name: 'Motion' },\n { id: 'face', name: 'Face' },\n] as const\n\nconst NATIVE_CLASS_MAP: ClassMapDefinition = { mapping: {}, preserveOriginal: true }\n\n/**\n * CameraNativeDetectionAddon\n *\n * A stub detector that wraps native camera event subscriptions (e.g. Frigate / Scrypted\n * webhooks). The detect() method always returns empty — real detections are delivered\n * asynchronously via external event subscriptions which should populate the pipeline\n * from outside this addon's detect() call.\n *\n * This addon exists so the pipeline can declare a 'detector' slot backed by camera events\n * without requiring any inference model.\n */\nexport default class CameraNativeDetectionAddon implements IDetectorProvider, IDetectionAddon {\n readonly id = 'camera-native-detection'\n readonly slot = 'detector' as const\n readonly inputClasses: readonly string[] | null = null\n readonly outputClasses = ['person', 'vehicle', 'motion', 'face'] as const\n readonly slotPriority = 5\n readonly manifest: AddonManifest = {\n id: 'camera-native-detection',\n name: 'Camera Native Detection',\n version: '0.1.0',\n description:\n 'Passthrough adapter for camera-native events (Frigate, Scrypted, ONVIF) — no inference engine',\n packageName: '@camstack/vision',\n slot: 'detector',\n inputClasses: undefined,\n outputClasses: ['person', 'vehicle', 'motion', 'face'],\n supportsCustomModels: false,\n mayRequirePython: false,\n defaultConfig: {},\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n async initialize(_ctx: AddonContext): Promise<void> {\n // No engine to initialize — events come from external subscriptions\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n async detect(_frame: FrameInput): Promise<DetectorOutput> {\n // Real detections are delivered via event subscriptions, not through detect().\n // This method intentionally returns empty.\n return {\n detections: [],\n inferenceMs: 0,\n modelId: 'camera-native',\n }\n }\n\n async shutdown(): Promise<void> {\n // Nothing to tear down\n }\n\n getConfigSchema(): ConfigUISchema {\n return {\n sections: [\n {\n id: 'info',\n title: 'Camera Native Detection',\n description:\n 'This addon forwards detections from native camera events (Frigate webhooks, ' +\n 'Scrypted push notifications, ONVIF events). No configuration required.',\n fields: [],\n },\n ],\n }\n }\n\n getClassMap(): ClassMapDefinition {\n return NATIVE_CLASS_MAP\n }\n\n getModelCatalog(): ModelCatalogEntry[] {\n return []\n }\n\n getAvailableModels(): DetectionModel[] {\n return []\n }\n\n getActiveLabels(): readonly LabelDefinition[] {\n return NATIVE_LABELS\n }\n\n async probe(): Promise<ProbeResult> {\n return {\n available: true,\n runtime: 'onnx', // no runtime used; satisfies the type\n device: 'cpu',\n capabilities: ['fp32'],\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,IAAM,gBAA4C;AAAA,EAChD,EAAE,IAAI,UAAU,MAAM,SAAS;AAAA,EAC/B,EAAE,IAAI,WAAW,MAAM,UAAU;AAAA,EACjC,EAAE,IAAI,UAAU,MAAM,SAAS;AAAA,EAC/B,EAAE,IAAI,QAAQ,MAAM,OAAO;AAC7B;AAEA,IAAM,mBAAuC,EAAE,SAAS,CAAC,GAAG,kBAAkB,KAAK;AAanF,IAAqB,6BAArB,MAA8F;AAAA,EACnF,KAAK;AAAA,EACL,OAAO;AAAA,EACP,eAAyC;AAAA,EACzC,gBAAgB,CAAC,UAAU,WAAW,UAAU,MAAM;AAAA,EACtD,eAAe;AAAA,EACf,WAA0B;AAAA,IACjC,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aACE;AAAA,IACF,aAAa;AAAA,IACb,MAAM;AAAA,IACN,cAAc;AAAA,IACd,eAAe,CAAC,UAAU,WAAW,UAAU,MAAM;AAAA,IACrD,sBAAsB;AAAA,IACtB,kBAAkB;AAAA,IAClB,eAAe,CAAC;AAAA,EAClB;AAAA;AAAA,EAGA,MAAM,WAAW,MAAmC;AAAA,EAEpD;AAAA;AAAA,EAGA,MAAM,OAAO,QAA6C;AAGxD,WAAO;AAAA,MACL,YAAY,CAAC;AAAA,MACb,aAAa;AAAA,MACb,SAAS;AAAA,IACX;AAAA,EACF;AAAA,EAEA,MAAM,WAA0B;AAAA,EAEhC;AAAA,EAEA,kBAAkC;AAChC,WAAO;AAAA,MACL,UAAU;AAAA,QACR;AAAA,UACE,IAAI;AAAA,UACJ,OAAO;AAAA,UACP,aACE;AAAA,UAEF,QAAQ,CAAC;AAAA,QACX;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA,EAEA,cAAkC;AAChC,WAAO;AAAA,EACT;AAAA,EAEA,kBAAuC;AACrC,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,qBAAuC;AACrC,WAAO,CAAC;AAAA,EACV;AAAA,EAEA,kBAA8C;AAC5C,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAA8B;AAClC,WAAO;AAAA,MACL,WAAW;AAAA,MACX,SAAS;AAAA;AAAA,MACT,QAAQ;AAAA,MACR,cAAc,CAAC,MAAM;AAAA,IACvB;AAAA,EACF;AACF;","names":[]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { ICropperProvider, IDetectionAddon, AddonManifest, AddonContext, CropInput, CropperOutput, ConfigUISchema, ClassMapDefinition, ModelCatalogEntry, DetectionModel, LabelDefinition, ProbeResult } from '@camstack/types';
|
|
2
|
+
|
|
3
|
+
declare class FaceDetectionAddon implements ICropperProvider, IDetectionAddon {
|
|
4
|
+
readonly id = "face-detection";
|
|
5
|
+
readonly slot: "cropper";
|
|
6
|
+
readonly inputClasses: readonly ["person"];
|
|
7
|
+
readonly outputClasses: readonly ["face"];
|
|
8
|
+
readonly slotPriority = 0;
|
|
9
|
+
readonly manifest: AddonManifest;
|
|
10
|
+
private engine;
|
|
11
|
+
private modelEntry;
|
|
12
|
+
private confidence;
|
|
13
|
+
initialize(ctx: AddonContext): Promise<void>;
|
|
14
|
+
crop(input: CropInput): Promise<CropperOutput>;
|
|
15
|
+
shutdown(): Promise<void>;
|
|
16
|
+
getConfigSchema(): ConfigUISchema;
|
|
17
|
+
getClassMap(): ClassMapDefinition;
|
|
18
|
+
getModelCatalog(): ModelCatalogEntry[];
|
|
19
|
+
getAvailableModels(): DetectionModel[];
|
|
20
|
+
getActiveLabels(): readonly LabelDefinition[];
|
|
21
|
+
probe(): Promise<ProbeResult>;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export { FaceDetectionAddon as default };
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { ICropperProvider, IDetectionAddon, AddonManifest, AddonContext, CropInput, CropperOutput, ConfigUISchema, ClassMapDefinition, ModelCatalogEntry, DetectionModel, LabelDefinition, ProbeResult } from '@camstack/types';
|
|
2
|
+
|
|
3
|
+
declare class FaceDetectionAddon implements ICropperProvider, IDetectionAddon {
|
|
4
|
+
readonly id = "face-detection";
|
|
5
|
+
readonly slot: "cropper";
|
|
6
|
+
readonly inputClasses: readonly ["person"];
|
|
7
|
+
readonly outputClasses: readonly ["face"];
|
|
8
|
+
readonly slotPriority = 0;
|
|
9
|
+
readonly manifest: AddonManifest;
|
|
10
|
+
private engine;
|
|
11
|
+
private modelEntry;
|
|
12
|
+
private confidence;
|
|
13
|
+
initialize(ctx: AddonContext): Promise<void>;
|
|
14
|
+
crop(input: CropInput): Promise<CropperOutput>;
|
|
15
|
+
shutdown(): Promise<void>;
|
|
16
|
+
getConfigSchema(): ConfigUISchema;
|
|
17
|
+
getClassMap(): ClassMapDefinition;
|
|
18
|
+
getModelCatalog(): ModelCatalogEntry[];
|
|
19
|
+
getAvailableModels(): DetectionModel[];
|
|
20
|
+
getActiveLabels(): readonly LabelDefinition[];
|
|
21
|
+
probe(): Promise<ProbeResult>;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export { FaceDetectionAddon as default };
|
|
@@ -0,0 +1,513 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __export = (target, all) => {
|
|
9
|
+
for (var name in all)
|
|
10
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
11
|
+
};
|
|
12
|
+
var __copyProps = (to, from, except, desc) => {
|
|
13
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
14
|
+
for (let key of __getOwnPropNames(from))
|
|
15
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
16
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
17
|
+
}
|
|
18
|
+
return to;
|
|
19
|
+
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
28
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
|
+
|
|
30
|
+
// src/addons/face-detection/index.ts
|
|
31
|
+
var face_detection_exports = {};
|
|
32
|
+
__export(face_detection_exports, {
|
|
33
|
+
default: () => FaceDetectionAddon
|
|
34
|
+
});
|
|
35
|
+
module.exports = __toCommonJS(face_detection_exports);
|
|
36
|
+
var import_types = require("@camstack/types");
|
|
37
|
+
|
|
38
|
+
// src/shared/image-utils.ts
|
|
39
|
+
var import_sharp = __toESM(require("sharp"));
|
|
40
|
+
async function cropRegion(jpeg, roi) {
|
|
41
|
+
return (0, import_sharp.default)(jpeg).extract({
|
|
42
|
+
left: Math.round(roi.x),
|
|
43
|
+
top: Math.round(roi.y),
|
|
44
|
+
width: Math.round(roi.w),
|
|
45
|
+
height: Math.round(roi.h)
|
|
46
|
+
}).jpeg().toBuffer();
|
|
47
|
+
}
|
|
48
|
+
async function letterbox(jpeg, targetSize) {
|
|
49
|
+
const meta = await (0, import_sharp.default)(jpeg).metadata();
|
|
50
|
+
const originalWidth = meta.width ?? 0;
|
|
51
|
+
const originalHeight = meta.height ?? 0;
|
|
52
|
+
const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
|
|
53
|
+
const scaledWidth = Math.round(originalWidth * scale);
|
|
54
|
+
const scaledHeight = Math.round(originalHeight * scale);
|
|
55
|
+
const padX = Math.floor((targetSize - scaledWidth) / 2);
|
|
56
|
+
const padY = Math.floor((targetSize - scaledHeight) / 2);
|
|
57
|
+
const { data } = await (0, import_sharp.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
|
|
58
|
+
top: padY,
|
|
59
|
+
bottom: targetSize - scaledHeight - padY,
|
|
60
|
+
left: padX,
|
|
61
|
+
right: targetSize - scaledWidth - padX,
|
|
62
|
+
background: { r: 114, g: 114, b: 114 }
|
|
63
|
+
}).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
64
|
+
const numPixels = targetSize * targetSize;
|
|
65
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
66
|
+
for (let i = 0; i < numPixels; i++) {
|
|
67
|
+
const srcBase = i * 3;
|
|
68
|
+
float32[0 * numPixels + i] = data[srcBase] / 255;
|
|
69
|
+
float32[1 * numPixels + i] = data[srcBase + 1] / 255;
|
|
70
|
+
float32[2 * numPixels + i] = data[srcBase + 2] / 255;
|
|
71
|
+
}
|
|
72
|
+
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// src/shared/postprocess/yolo.ts
|
|
76
|
+
function iou(a, b) {
|
|
77
|
+
const ax1 = a.x;
|
|
78
|
+
const ay1 = a.y;
|
|
79
|
+
const ax2 = a.x + a.w;
|
|
80
|
+
const ay2 = a.y + a.h;
|
|
81
|
+
const bx1 = b.x;
|
|
82
|
+
const by1 = b.y;
|
|
83
|
+
const bx2 = b.x + b.w;
|
|
84
|
+
const by2 = b.y + b.h;
|
|
85
|
+
const interX1 = Math.max(ax1, bx1);
|
|
86
|
+
const interY1 = Math.max(ay1, by1);
|
|
87
|
+
const interX2 = Math.min(ax2, bx2);
|
|
88
|
+
const interY2 = Math.min(ay2, by2);
|
|
89
|
+
const interW = Math.max(0, interX2 - interX1);
|
|
90
|
+
const interH = Math.max(0, interY2 - interY1);
|
|
91
|
+
const interArea = interW * interH;
|
|
92
|
+
if (interArea === 0) return 0;
|
|
93
|
+
const areaA = a.w * a.h;
|
|
94
|
+
const areaB = b.w * b.h;
|
|
95
|
+
const unionArea = areaA + areaB - interArea;
|
|
96
|
+
return unionArea === 0 ? 0 : interArea / unionArea;
|
|
97
|
+
}
|
|
98
|
+
function nms(boxes, iouThreshold) {
|
|
99
|
+
const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
|
|
100
|
+
const kept = [];
|
|
101
|
+
const suppressed = /* @__PURE__ */ new Set();
|
|
102
|
+
for (const idx of indices) {
|
|
103
|
+
if (suppressed.has(idx)) continue;
|
|
104
|
+
kept.push(idx);
|
|
105
|
+
for (const other of indices) {
|
|
106
|
+
if (other === idx || suppressed.has(other)) continue;
|
|
107
|
+
if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
|
|
108
|
+
suppressed.add(other);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
return kept;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// src/shared/postprocess/scrfd.ts
|
|
116
|
+
var STRIDES = [8, 16, 32];
|
|
117
|
+
var NUM_ANCHORS_PER_STRIDE = 2;
|
|
118
|
+
function generateAnchors(stride, inputSize) {
|
|
119
|
+
const featureSize = Math.ceil(inputSize / stride);
|
|
120
|
+
const anchors = [];
|
|
121
|
+
for (let y = 0; y < featureSize; y++) {
|
|
122
|
+
for (let x = 0; x < featureSize; x++) {
|
|
123
|
+
for (let k = 0; k < NUM_ANCHORS_PER_STRIDE; k++) {
|
|
124
|
+
anchors.push({
|
|
125
|
+
cx: (x + 0.5) * stride,
|
|
126
|
+
cy: (y + 0.5) * stride
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
return anchors;
|
|
132
|
+
}
|
|
133
|
+
function scrfdPostprocess(outputs, confidence, inputSize, originalWidth, originalHeight) {
|
|
134
|
+
const scaleX = originalWidth / inputSize;
|
|
135
|
+
const scaleY = originalHeight / inputSize;
|
|
136
|
+
const candidates = [];
|
|
137
|
+
for (const stride of STRIDES) {
|
|
138
|
+
const scoreKey = Object.keys(outputs).find((k) => k.includes(`score_${stride}`) || k.includes(`_${stride}_score`));
|
|
139
|
+
const bboxKey = Object.keys(outputs).find((k) => k.includes(`bbox_${stride}`) || k.includes(`_${stride}_bbox`));
|
|
140
|
+
const kpsKey = Object.keys(outputs).find((k) => k.includes(`kps_${stride}`) || k.includes(`_${stride}_kps`));
|
|
141
|
+
if (!scoreKey || !bboxKey) continue;
|
|
142
|
+
const scores = outputs[scoreKey];
|
|
143
|
+
const bboxes = outputs[bboxKey];
|
|
144
|
+
const kps = kpsKey ? outputs[kpsKey] : void 0;
|
|
145
|
+
const anchors = generateAnchors(stride, inputSize);
|
|
146
|
+
const n = anchors.length;
|
|
147
|
+
for (let i = 0; i < n; i++) {
|
|
148
|
+
const score = scores[i];
|
|
149
|
+
if (score < confidence) continue;
|
|
150
|
+
const anchor = anchors[i];
|
|
151
|
+
const x1 = anchor.cx - bboxes[i * 4] * stride;
|
|
152
|
+
const y1 = anchor.cy - bboxes[i * 4 + 1] * stride;
|
|
153
|
+
const x2 = anchor.cx + bboxes[i * 4 + 2] * stride;
|
|
154
|
+
const y2 = anchor.cy + bboxes[i * 4 + 3] * stride;
|
|
155
|
+
const bbox = {
|
|
156
|
+
x: x1 * scaleX,
|
|
157
|
+
y: y1 * scaleY,
|
|
158
|
+
w: (x2 - x1) * scaleX,
|
|
159
|
+
h: (y2 - y1) * scaleY
|
|
160
|
+
};
|
|
161
|
+
let landmarks;
|
|
162
|
+
if (kps) {
|
|
163
|
+
const pts = [];
|
|
164
|
+
for (let p = 0; p < 5; p++) {
|
|
165
|
+
pts.push({
|
|
166
|
+
x: (anchor.cx + kps[i * 10 + p * 2] * stride) * scaleX,
|
|
167
|
+
y: (anchor.cy + kps[i * 10 + p * 2 + 1] * stride) * scaleY
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
landmarks = pts;
|
|
171
|
+
}
|
|
172
|
+
candidates.push({ bbox, score, landmarks });
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
if (candidates.length === 0) return [];
|
|
176
|
+
const keptIndices = nms(candidates, 0.45);
|
|
177
|
+
return keptIndices.map((idx) => {
|
|
178
|
+
const { bbox, score, landmarks } = candidates[idx];
|
|
179
|
+
return {
|
|
180
|
+
class: "face",
|
|
181
|
+
originalClass: "face",
|
|
182
|
+
score,
|
|
183
|
+
bbox,
|
|
184
|
+
...landmarks ? { landmarks } : {}
|
|
185
|
+
};
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// src/shared/engine-resolver.ts
|
|
190
|
+
var fs = __toESM(require("fs"));
|
|
191
|
+
var path2 = __toESM(require("path"));
|
|
192
|
+
|
|
193
|
+
// src/shared/node-engine.ts
|
|
194
|
+
var path = __toESM(require("path"));
|
|
195
|
+
var BACKEND_TO_PROVIDER = {
|
|
196
|
+
cpu: "cpu",
|
|
197
|
+
coreml: "coreml",
|
|
198
|
+
cuda: "cuda",
|
|
199
|
+
tensorrt: "tensorrt",
|
|
200
|
+
dml: "dml"
|
|
201
|
+
};
|
|
202
|
+
var BACKEND_TO_DEVICE = {
|
|
203
|
+
cpu: "cpu",
|
|
204
|
+
coreml: "gpu-mps",
|
|
205
|
+
cuda: "gpu-cuda",
|
|
206
|
+
tensorrt: "tensorrt"
|
|
207
|
+
};
|
|
208
|
+
var NodeInferenceEngine = class {
|
|
209
|
+
constructor(modelPath, backend) {
|
|
210
|
+
this.modelPath = modelPath;
|
|
211
|
+
this.backend = backend;
|
|
212
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
213
|
+
}
|
|
214
|
+
runtime = "onnx";
|
|
215
|
+
device;
|
|
216
|
+
session = null;
|
|
217
|
+
async initialize() {
|
|
218
|
+
const ort = await import("onnxruntime-node");
|
|
219
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
220
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
221
|
+
const sessionOptions = {
|
|
222
|
+
executionProviders: [provider]
|
|
223
|
+
};
|
|
224
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
225
|
+
}
|
|
226
|
+
async run(input, inputShape) {
|
|
227
|
+
if (!this.session) {
|
|
228
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
229
|
+
}
|
|
230
|
+
const ort = await import("onnxruntime-node");
|
|
231
|
+
const sess = this.session;
|
|
232
|
+
const inputName = sess.inputNames[0];
|
|
233
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
234
|
+
const feeds = { [inputName]: tensor };
|
|
235
|
+
const results = await sess.run(feeds);
|
|
236
|
+
const outputName = sess.outputNames[0];
|
|
237
|
+
const outputTensor = results[outputName];
|
|
238
|
+
return outputTensor.data;
|
|
239
|
+
}
|
|
240
|
+
async runMultiOutput(input, inputShape) {
|
|
241
|
+
if (!this.session) {
|
|
242
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
243
|
+
}
|
|
244
|
+
const ort = await import("onnxruntime-node");
|
|
245
|
+
const sess = this.session;
|
|
246
|
+
const inputName = sess.inputNames[0];
|
|
247
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
248
|
+
const feeds = { [inputName]: tensor };
|
|
249
|
+
const results = await sess.run(feeds);
|
|
250
|
+
const out = {};
|
|
251
|
+
for (const name of sess.outputNames) {
|
|
252
|
+
out[name] = results[name].data;
|
|
253
|
+
}
|
|
254
|
+
return out;
|
|
255
|
+
}
|
|
256
|
+
async dispose() {
|
|
257
|
+
this.session = null;
|
|
258
|
+
}
|
|
259
|
+
};
|
|
260
|
+
|
|
261
|
+
// src/shared/engine-resolver.ts
|
|
262
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
263
|
+
var BACKEND_TO_FORMAT = {
|
|
264
|
+
cpu: "onnx",
|
|
265
|
+
coreml: "coreml",
|
|
266
|
+
cuda: "onnx",
|
|
267
|
+
tensorrt: "onnx"
|
|
268
|
+
};
|
|
269
|
+
var RUNTIME_TO_FORMAT = {
|
|
270
|
+
onnx: "onnx",
|
|
271
|
+
coreml: "coreml",
|
|
272
|
+
openvino: "openvino",
|
|
273
|
+
tflite: "tflite",
|
|
274
|
+
pytorch: "pt"
|
|
275
|
+
};
|
|
276
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
277
|
+
const formatEntry = modelEntry.formats[format];
|
|
278
|
+
if (!formatEntry) {
|
|
279
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
280
|
+
}
|
|
281
|
+
const urlParts = formatEntry.url.split("/");
|
|
282
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
283
|
+
return path2.join(modelsDir, filename);
|
|
284
|
+
}
|
|
285
|
+
function modelExists(filePath) {
|
|
286
|
+
try {
|
|
287
|
+
return fs.existsSync(filePath);
|
|
288
|
+
} catch {
|
|
289
|
+
return false;
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
async function resolveEngine(options) {
|
|
293
|
+
const { runtime, backend, modelEntry, modelsDir, downloadModel } = options;
|
|
294
|
+
let selectedFormat;
|
|
295
|
+
let selectedBackend;
|
|
296
|
+
if (runtime === "auto") {
|
|
297
|
+
const available = await probeOnnxBackends();
|
|
298
|
+
let chosen = null;
|
|
299
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
300
|
+
if (!available.includes(b)) continue;
|
|
301
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
302
|
+
if (!fmt) continue;
|
|
303
|
+
if (!modelEntry.formats[fmt]) continue;
|
|
304
|
+
chosen = { backend: b, format: fmt };
|
|
305
|
+
break;
|
|
306
|
+
}
|
|
307
|
+
if (!chosen) {
|
|
308
|
+
throw new Error(
|
|
309
|
+
`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
|
|
310
|
+
);
|
|
311
|
+
}
|
|
312
|
+
selectedFormat = chosen.format;
|
|
313
|
+
selectedBackend = chosen.backend;
|
|
314
|
+
} else {
|
|
315
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
316
|
+
if (!fmt) {
|
|
317
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
318
|
+
}
|
|
319
|
+
if (!modelEntry.formats[fmt]) {
|
|
320
|
+
throw new Error(
|
|
321
|
+
`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
|
|
322
|
+
);
|
|
323
|
+
}
|
|
324
|
+
selectedFormat = fmt;
|
|
325
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
326
|
+
}
|
|
327
|
+
let modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
328
|
+
if (!modelExists(modelPath)) {
|
|
329
|
+
if (downloadModel) {
|
|
330
|
+
const formatEntry = modelEntry.formats[selectedFormat];
|
|
331
|
+
modelPath = await downloadModel(formatEntry.url, modelsDir);
|
|
332
|
+
} else {
|
|
333
|
+
throw new Error(
|
|
334
|
+
`resolveEngine: model file not found at ${modelPath} and no downloadModel function provided`
|
|
335
|
+
);
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
if (selectedFormat === "onnx" || selectedFormat === "coreml") {
|
|
339
|
+
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
340
|
+
await engine.initialize();
|
|
341
|
+
return { engine, format: selectedFormat, modelPath };
|
|
342
|
+
}
|
|
343
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
344
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
345
|
+
const engine = new NodeInferenceEngine(fallbackPath, "cpu");
|
|
346
|
+
await engine.initialize();
|
|
347
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
348
|
+
}
|
|
349
|
+
throw new Error(
|
|
350
|
+
`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine and no ONNX fallback is available`
|
|
351
|
+
);
|
|
352
|
+
}
|
|
353
|
+
async function probeOnnxBackends() {
|
|
354
|
+
const available = ["cpu"];
|
|
355
|
+
try {
|
|
356
|
+
const ort = await import("onnxruntime-node");
|
|
357
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
358
|
+
for (const p of providers) {
|
|
359
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
360
|
+
if (normalized === "coreml") available.push("coreml");
|
|
361
|
+
else if (normalized === "cuda") available.push("cuda");
|
|
362
|
+
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
363
|
+
}
|
|
364
|
+
} catch {
|
|
365
|
+
}
|
|
366
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
367
|
+
available.push("coreml");
|
|
368
|
+
}
|
|
369
|
+
return [...new Set(available)];
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// src/addons/face-detection/index.ts
|
|
373
|
+
var FACE_LABEL = { id: "face", name: "Face" };
|
|
374
|
+
var FACE_LABELS = [FACE_LABEL];
|
|
375
|
+
var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
376
|
+
var FaceDetectionAddon = class {
|
|
377
|
+
id = "face-detection";
|
|
378
|
+
slot = "cropper";
|
|
379
|
+
inputClasses = ["person"];
|
|
380
|
+
outputClasses = ["face"];
|
|
381
|
+
slotPriority = 0;
|
|
382
|
+
manifest = {
|
|
383
|
+
id: "face-detection",
|
|
384
|
+
name: "Face Detection",
|
|
385
|
+
version: "0.1.0",
|
|
386
|
+
description: "SCRFD-based face detector \u2014 crops face regions from person detections",
|
|
387
|
+
packageName: "@camstack/vision",
|
|
388
|
+
slot: "cropper",
|
|
389
|
+
inputClasses: ["person"],
|
|
390
|
+
outputClasses: ["face"],
|
|
391
|
+
supportsCustomModels: false,
|
|
392
|
+
mayRequirePython: false,
|
|
393
|
+
defaultConfig: {
|
|
394
|
+
modelId: "scrfd-500m",
|
|
395
|
+
runtime: "auto",
|
|
396
|
+
backend: "cpu",
|
|
397
|
+
confidence: 0.5
|
|
398
|
+
}
|
|
399
|
+
};
|
|
400
|
+
engine;
|
|
401
|
+
modelEntry;
|
|
402
|
+
confidence = 0.5;
|
|
403
|
+
async initialize(ctx) {
|
|
404
|
+
const cfg = ctx.addonConfig;
|
|
405
|
+
const modelId = cfg["modelId"] ?? "scrfd-500m";
|
|
406
|
+
const runtime = cfg["runtime"] ?? "auto";
|
|
407
|
+
const backend = cfg["backend"] ?? "cpu";
|
|
408
|
+
this.confidence = cfg["confidence"] ?? 0.5;
|
|
409
|
+
const entry = import_types.FACE_DETECTION_MODELS.find((m) => m.id === modelId);
|
|
410
|
+
if (!entry) {
|
|
411
|
+
throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
|
|
412
|
+
}
|
|
413
|
+
this.modelEntry = entry;
|
|
414
|
+
const resolved = await resolveEngine({
|
|
415
|
+
runtime,
|
|
416
|
+
backend,
|
|
417
|
+
modelEntry: entry,
|
|
418
|
+
modelsDir: ctx.locationPaths.models
|
|
419
|
+
});
|
|
420
|
+
this.engine = resolved.engine;
|
|
421
|
+
}
|
|
422
|
+
async crop(input) {
|
|
423
|
+
const start = Date.now();
|
|
424
|
+
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
425
|
+
const targetSize = Math.max(inputW, inputH);
|
|
426
|
+
const personCrop = await cropRegion(input.frame.data, input.roi);
|
|
427
|
+
const lb = await letterbox(personCrop, targetSize);
|
|
428
|
+
const engineWithMulti = this.engine;
|
|
429
|
+
let outputs;
|
|
430
|
+
if (typeof engineWithMulti.runMultiOutput === "function") {
|
|
431
|
+
outputs = await engineWithMulti.runMultiOutput(lb.data, [1, 3, targetSize, targetSize]);
|
|
432
|
+
} else {
|
|
433
|
+
const single = await this.engine.run(lb.data, [1, 3, targetSize, targetSize]);
|
|
434
|
+
outputs = { output0: single };
|
|
435
|
+
}
|
|
436
|
+
const crops = scrfdPostprocess(
|
|
437
|
+
outputs,
|
|
438
|
+
this.confidence,
|
|
439
|
+
targetSize,
|
|
440
|
+
lb.originalWidth,
|
|
441
|
+
lb.originalHeight
|
|
442
|
+
);
|
|
443
|
+
return {
|
|
444
|
+
crops,
|
|
445
|
+
inferenceMs: Date.now() - start,
|
|
446
|
+
modelId: this.modelEntry.id
|
|
447
|
+
};
|
|
448
|
+
}
|
|
449
|
+
async shutdown() {
|
|
450
|
+
await this.engine?.dispose();
|
|
451
|
+
}
|
|
452
|
+
getConfigSchema() {
|
|
453
|
+
return {
|
|
454
|
+
sections: [
|
|
455
|
+
{
|
|
456
|
+
id: "model",
|
|
457
|
+
title: "Model",
|
|
458
|
+
columns: 1,
|
|
459
|
+
fields: [
|
|
460
|
+
{
|
|
461
|
+
key: "modelId",
|
|
462
|
+
label: "Model",
|
|
463
|
+
type: "model-selector",
|
|
464
|
+
catalog: [...import_types.FACE_DETECTION_MODELS],
|
|
465
|
+
allowCustom: false,
|
|
466
|
+
allowConversion: false,
|
|
467
|
+
acceptFormats: ["onnx", "coreml", "openvino"],
|
|
468
|
+
requiredMetadata: ["inputSize", "labels", "outputFormat"],
|
|
469
|
+
outputFormatHint: "ssd"
|
|
470
|
+
}
|
|
471
|
+
]
|
|
472
|
+
},
|
|
473
|
+
{
|
|
474
|
+
id: "thresholds",
|
|
475
|
+
title: "Detection Thresholds",
|
|
476
|
+
columns: 1,
|
|
477
|
+
fields: [
|
|
478
|
+
{
|
|
479
|
+
key: "confidence",
|
|
480
|
+
label: "Confidence Threshold",
|
|
481
|
+
type: "slider",
|
|
482
|
+
min: 0.1,
|
|
483
|
+
max: 1,
|
|
484
|
+
step: 0.05,
|
|
485
|
+
default: 0.5
|
|
486
|
+
}
|
|
487
|
+
]
|
|
488
|
+
}
|
|
489
|
+
]
|
|
490
|
+
};
|
|
491
|
+
}
|
|
492
|
+
getClassMap() {
|
|
493
|
+
return FACE_CLASS_MAP;
|
|
494
|
+
}
|
|
495
|
+
getModelCatalog() {
|
|
496
|
+
return [...import_types.FACE_DETECTION_MODELS];
|
|
497
|
+
}
|
|
498
|
+
getAvailableModels() {
|
|
499
|
+
return [];
|
|
500
|
+
}
|
|
501
|
+
getActiveLabels() {
|
|
502
|
+
return FACE_LABELS;
|
|
503
|
+
}
|
|
504
|
+
async probe() {
|
|
505
|
+
return {
|
|
506
|
+
available: true,
|
|
507
|
+
runtime: this.engine?.runtime ?? "onnx",
|
|
508
|
+
device: this.engine?.device ?? "cpu",
|
|
509
|
+
capabilities: ["fp32"]
|
|
510
|
+
};
|
|
511
|
+
}
|
|
512
|
+
};
|
|
513
|
+
//# sourceMappingURL=index.js.map
|