@camstack/addon-scene-intelligence 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,23 @@
1
+ import { ICamstackAddon, ISceneIntelligence, AddonManifest, AddonContext, CapabilityProviderMap, CropInput, ClassifierOutput, EmbeddingMetadata, EmbeddingFilter, VectorSearchResult, SceneStateResult } from '@camstack/types';
2
+
3
+ declare class SceneIntelligenceAddon implements ICamstackAddon, ISceneIntelligence {
4
+ readonly manifest: AddonManifest;
5
+ private logger;
6
+ private imageEncoder;
7
+ private textEncoder;
8
+ private sceneStateMachine;
9
+ private referenceStore;
10
+ private searchService;
11
+ private ctx;
12
+ initialize(context: AddonContext): Promise<void>;
13
+ shutdown(): Promise<void>;
14
+ getCapabilityProvider<K extends keyof CapabilityProviderMap>(name: K): CapabilityProviderMap[K] | null;
15
+ classify(input: CropInput): Promise<ClassifierOutput>;
16
+ embed(deviceId: string, crop: Buffer, metadata: EmbeddingMetadata): Promise<string>;
17
+ search(query: string, topK: number, filter?: EmbeddingFilter): Promise<VectorSearchResult[]>;
18
+ searchByImage(image: Buffer, topK: number, filter?: EmbeddingFilter): Promise<VectorSearchResult[]>;
19
+ evaluateSceneState(deviceId: string, crop: Buffer): Promise<SceneStateResult | null>;
20
+ private ensureTextEncoder;
21
+ }
22
+
23
+ export { SceneIntelligenceAddon };
@@ -0,0 +1,23 @@
1
+ import { ICamstackAddon, ISceneIntelligence, AddonManifest, AddonContext, CapabilityProviderMap, CropInput, ClassifierOutput, EmbeddingMetadata, EmbeddingFilter, VectorSearchResult, SceneStateResult } from '@camstack/types';
2
+
3
+ declare class SceneIntelligenceAddon implements ICamstackAddon, ISceneIntelligence {
4
+ readonly manifest: AddonManifest;
5
+ private logger;
6
+ private imageEncoder;
7
+ private textEncoder;
8
+ private sceneStateMachine;
9
+ private referenceStore;
10
+ private searchService;
11
+ private ctx;
12
+ initialize(context: AddonContext): Promise<void>;
13
+ shutdown(): Promise<void>;
14
+ getCapabilityProvider<K extends keyof CapabilityProviderMap>(name: K): CapabilityProviderMap[K] | null;
15
+ classify(input: CropInput): Promise<ClassifierOutput>;
16
+ embed(deviceId: string, crop: Buffer, metadata: EmbeddingMetadata): Promise<string>;
17
+ search(query: string, topK: number, filter?: EmbeddingFilter): Promise<VectorSearchResult[]>;
18
+ searchByImage(image: Buffer, topK: number, filter?: EmbeddingFilter): Promise<VectorSearchResult[]>;
19
+ evaluateSceneState(deviceId: string, crop: Buffer): Promise<SceneStateResult | null>;
20
+ private ensureTextEncoder;
21
+ }
22
+
23
+ export { SceneIntelligenceAddon };
package/dist/addon.js ADDED
@@ -0,0 +1,447 @@
1
+ "use strict";
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+
30
+ // src/addon.ts
31
+ var addon_exports = {};
32
+ __export(addon_exports, {
33
+ SceneIntelligenceAddon: () => SceneIntelligenceAddon
34
+ });
35
+ module.exports = __toCommonJS(addon_exports);
36
+
37
+ // src/clip/preprocessing.ts
38
+ var CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073];
39
+ var CLIP_STD = [0.26862954, 0.26130258, 0.27577711];
40
+ function preprocessForClip(rgb, srcWidth, srcHeight, targetWidth, targetHeight) {
41
+ const pixels = targetWidth * targetHeight;
42
+ const result = new Float32Array(3 * pixels);
43
+ for (let y = 0; y < targetHeight; y++) {
44
+ for (let x = 0; x < targetWidth; x++) {
45
+ const srcX = Math.min(Math.floor(x / targetWidth * srcWidth), srcWidth - 1);
46
+ const srcY = Math.min(Math.floor(y / targetHeight * srcHeight), srcHeight - 1);
47
+ const srcIdx = (srcY * srcWidth + srcX) * 3;
48
+ const dstIdx = y * targetWidth + x;
49
+ for (let c = 0; c < 3; c++) {
50
+ const val = (rgb[srcIdx + c] ?? 0) / 255;
51
+ const mean = CLIP_MEAN[c];
52
+ const std = CLIP_STD[c];
53
+ result[c * pixels + dstIdx] = (val - mean) / std;
54
+ }
55
+ }
56
+ }
57
+ return result;
58
+ }
59
+ function l2Normalize(vec) {
60
+ let norm = 0;
61
+ for (let i = 0; i < vec.length; i++) norm += vec[i] * vec[i];
62
+ norm = Math.sqrt(norm);
63
+ if (norm > 0) {
64
+ for (let i = 0; i < vec.length; i++) vec[i] /= norm;
65
+ }
66
+ return vec;
67
+ }
68
+
69
+ // src/clip/models.ts
70
+ var DEFAULT_CLIP_MODEL = "mobileclip-s0";
71
+ var CLIP_EMBEDDING_DIM = 512;
72
+ var MOBILECLIP_INPUT_SIZE = 256;
73
+ var VITB32_INPUT_SIZE = 224;
74
+ function getInputSize(modelId) {
75
+ if (modelId.startsWith("mobileclip")) return MOBILECLIP_INPUT_SIZE;
76
+ return VITB32_INPUT_SIZE;
77
+ }
78
+ function getTextModelId(imageModelId) {
79
+ return `${imageModelId}-text`;
80
+ }
81
+
82
+ // src/clip/image-encoder.ts
83
+ var ClipImageEncoder = class {
84
+ session = null;
85
+ // onnxruntime InferenceSession
86
+ modelId;
87
+ inputSize;
88
+ logger;
89
+ constructor(modelId, logger) {
90
+ this.modelId = modelId;
91
+ this.inputSize = getInputSize(modelId);
92
+ this.logger = logger;
93
+ }
94
+ async load(modelPath) {
95
+ const ort = await import("onnxruntime-node");
96
+ this.session = await ort.InferenceSession.create(modelPath, {
97
+ executionProviders: ["cpu"]
98
+ });
99
+ this.logger.info(`CLIP image encoder loaded: ${this.modelId} (${this.inputSize}\xD7${this.inputSize})`);
100
+ }
101
+ /**
102
+ * Encode a raw RGB buffer into a CLIP embedding.
103
+ * Caller must provide RGB buffer (use sharp to decode JPEG first).
104
+ */
105
+ async encode(rgb, width, height) {
106
+ if (!this.session) throw new Error("Image encoder not loaded");
107
+ const ort = await import("onnxruntime-node");
108
+ const input = preprocessForClip(rgb, width, height, this.inputSize, this.inputSize);
109
+ const tensor = new ort.Tensor("float32", input, [1, 3, this.inputSize, this.inputSize]);
110
+ const feeds = {};
111
+ const inputName = this.session.inputNames[0];
112
+ feeds[inputName] = tensor;
113
+ const results = await this.session.run(feeds);
114
+ const outputName = this.session.outputNames[0];
115
+ const output = results[outputName].data;
116
+ const embedding = new Float32Array(output.slice(0, CLIP_EMBEDDING_DIM));
117
+ return l2Normalize(embedding);
118
+ }
119
+ async dispose() {
120
+ if (this.session) {
121
+ await this.session.release?.();
122
+ this.session = null;
123
+ }
124
+ }
125
+ };
126
+
127
+ // src/clip/tokenizer.ts
128
+ var SOT_TOKEN = 49406;
129
+ var EOT_TOKEN = 49407;
130
+ var MAX_LENGTH = 77;
131
+ function tokenize(text) {
132
+ const tokens = [SOT_TOKEN];
133
+ const cleaned = text.toLowerCase().trim();
134
+ for (const char of cleaned) {
135
+ const code = char.codePointAt(0) ?? 0;
136
+ if (code < 256) {
137
+ tokens.push(code + 1);
138
+ }
139
+ }
140
+ tokens.push(EOT_TOKEN);
141
+ while (tokens.length < MAX_LENGTH) tokens.push(0);
142
+ if (tokens.length > MAX_LENGTH) tokens.length = MAX_LENGTH;
143
+ return BigInt64Array.from(tokens.map((t) => BigInt(t)));
144
+ }
145
+
146
+ // src/clip/text-encoder.ts
147
+ var ClipTextEncoder = class {
148
+ session = null;
149
+ logger;
150
+ constructor(logger) {
151
+ this.logger = logger;
152
+ }
153
+ async load(modelPath) {
154
+ const ort = await import("onnxruntime-node");
155
+ this.session = await ort.InferenceSession.create(modelPath, {
156
+ executionProviders: ["cpu"]
157
+ });
158
+ this.logger.info("CLIP text encoder loaded");
159
+ }
160
+ async encode(text) {
161
+ if (!this.session) throw new Error("Text encoder not loaded");
162
+ const ort = await import("onnxruntime-node");
163
+ const tokens = tokenize(text);
164
+ const tensor = new ort.Tensor("int64", tokens, [1, 77]);
165
+ const feeds = {};
166
+ const inputName = this.session.inputNames[0];
167
+ feeds[inputName] = tensor;
168
+ const results = await this.session.run(feeds);
169
+ const outputName = this.session.outputNames[0];
170
+ const output = results[outputName].data;
171
+ const embedding = new Float32Array(output.slice(0, CLIP_EMBEDDING_DIM));
172
+ return l2Normalize(embedding);
173
+ }
174
+ async dispose() {
175
+ if (this.session) {
176
+ await this.session.release?.();
177
+ this.session = null;
178
+ }
179
+ }
180
+ };
181
+
182
+ // src/scene-state/state-machine.ts
183
+ var import_types = require("@camstack/types");
184
+ var SceneStateMachine = class {
185
+ debounceFrames;
186
+ cameraStates = /* @__PURE__ */ new Map();
187
+ constructor(debounceFrames = 3) {
188
+ this.debounceFrames = debounceFrames;
189
+ }
190
+ evaluate(deviceId, embedding, referenceStates) {
191
+ if (referenceStates.length === 0) return null;
192
+ let cs = this.cameraStates.get(deviceId);
193
+ if (!cs) {
194
+ cs = { currentState: null, pendingState: null, pendingCount: 0, pendingConfidence: 0 };
195
+ this.cameraStates.set(deviceId, cs);
196
+ }
197
+ let bestState = null;
198
+ let bestScore = -1;
199
+ for (const ref of referenceStates) {
200
+ const similarity = (0, import_types.cosineSimilarity)(embedding, ref.referenceEmbedding);
201
+ if (similarity >= ref.threshold && similarity > bestScore) {
202
+ bestScore = similarity;
203
+ bestState = ref.name;
204
+ }
205
+ }
206
+ if (!bestState) return null;
207
+ if (bestState === cs.pendingState) {
208
+ cs.pendingCount++;
209
+ cs.pendingConfidence = bestScore;
210
+ } else {
211
+ cs.pendingState = bestState;
212
+ cs.pendingCount = 1;
213
+ cs.pendingConfidence = bestScore;
214
+ }
215
+ if (cs.pendingCount < this.debounceFrames) return null;
216
+ if (bestState === cs.currentState) return null;
217
+ const previousState = cs.currentState ?? "unknown";
218
+ cs.currentState = bestState;
219
+ cs.pendingState = null;
220
+ cs.pendingCount = 0;
221
+ return {
222
+ previousState,
223
+ currentState: bestState,
224
+ confidence: bestScore
225
+ };
226
+ }
227
+ };
228
+
229
+ // src/scene-state/reference-store.ts
230
+ var COLLECTION = "device-settings";
231
+ var ReferenceStore = class {
232
+ settingsBackend;
233
+ constructor(settingsBackend) {
234
+ this.settingsBackend = settingsBackend;
235
+ }
236
+ async getStatesForCamera(deviceId) {
237
+ const raw = await this.settingsBackend.get(COLLECTION, `scene-states:${deviceId}`);
238
+ if (!raw || !Array.isArray(raw)) return [];
239
+ return raw.map((s) => ({
240
+ id: s.id,
241
+ name: s.name,
242
+ referenceEmbedding: new Float32Array(s.referenceEmbedding),
243
+ threshold: s.threshold ?? 0.7
244
+ }));
245
+ }
246
+ async setStatesForCamera(deviceId, states) {
247
+ const serializable = states.map((s) => ({
248
+ id: s.id,
249
+ name: s.name,
250
+ referenceEmbedding: Array.from(s.referenceEmbedding),
251
+ threshold: s.threshold
252
+ }));
253
+ await this.settingsBackend.set(COLLECTION, `scene-states:${deviceId}`, serializable);
254
+ }
255
+ };
256
+
257
+ // src/search/search-service.ts
258
+ var INDEX_NAME = "clip-embeddings";
259
+ var SearchService = class {
260
+ backend;
261
+ index = null;
262
+ constructor(backend) {
263
+ this.backend = backend;
264
+ }
265
+ async initialize() {
266
+ this.index = await this.backend.openIndex(INDEX_NAME, CLIP_EMBEDDING_DIM);
267
+ }
268
+ async storeEmbedding(id, embedding, metadata) {
269
+ if (!this.index) return;
270
+ await this.index.insert(id, embedding, metadata);
271
+ }
272
+ async searchByVector(query, topK, filter) {
273
+ if (!this.index) return [];
274
+ return this.index.search(query, topK, filter);
275
+ }
276
+ async count() {
277
+ return this.index?.count() ?? 0;
278
+ }
279
+ async shutdown() {
280
+ await this.index?.flush();
281
+ this.index = null;
282
+ }
283
+ };
284
+
285
+ // src/addon.ts
286
+ var SceneIntelligenceAddon = class {
287
+ manifest = {
288
+ id: "scene-intelligence",
289
+ name: "Scene Intelligence",
290
+ version: "0.1.0",
291
+ description: "CLIP embeddings, semantic search, and scene state detection",
292
+ slot: "classifier",
293
+ inputClasses: [],
294
+ outputClasses: [],
295
+ labelOutputType: "classification",
296
+ passive: false,
297
+ capabilities: [
298
+ { name: "scene-intelligence", mode: "singleton" }
299
+ ],
300
+ defaultConfig: {
301
+ modelId: DEFAULT_CLIP_MODEL,
302
+ minConfidence: 0.5
303
+ }
304
+ };
305
+ logger;
306
+ imageEncoder = null;
307
+ textEncoder = null;
308
+ sceneStateMachine = null;
309
+ referenceStore = null;
310
+ searchService = null;
311
+ ctx = null;
312
+ async initialize(context) {
313
+ this.ctx = context;
314
+ this.logger = context.logger;
315
+ const modelId = context.addonConfig["modelId"] ?? DEFAULT_CLIP_MODEL;
316
+ if (context.models) {
317
+ try {
318
+ const imagePath = await context.models.ensure(modelId, "onnx");
319
+ this.imageEncoder = new ClipImageEncoder(modelId, this.logger);
320
+ await this.imageEncoder.load(imagePath);
321
+ } catch (err) {
322
+ this.logger.warn(`Failed to load CLIP image encoder: ${err}`);
323
+ }
324
+ }
325
+ this.sceneStateMachine = new SceneStateMachine(3);
326
+ if (context.settingsBackend) {
327
+ this.referenceStore = new ReferenceStore(context.settingsBackend);
328
+ }
329
+ if (context.embeddingsBackend) {
330
+ this.searchService = new SearchService(context.embeddingsBackend);
331
+ await this.searchService.initialize();
332
+ }
333
+ this.logger.info(`Scene Intelligence initialized (model=${modelId})`);
334
+ }
335
+ async shutdown() {
336
+ await this.imageEncoder?.dispose();
337
+ await this.textEncoder?.dispose();
338
+ await this.searchService?.shutdown();
339
+ this.imageEncoder = null;
340
+ this.textEncoder = null;
341
+ this.searchService = null;
342
+ this.sceneStateMachine = null;
343
+ this.referenceStore = null;
344
+ this.ctx = null;
345
+ }
346
+ getCapabilityProvider(name) {
347
+ if (name === "scene-intelligence") {
348
+ return this;
349
+ }
350
+ return null;
351
+ }
352
+ // --- IClassifierProvider (pipeline classifier slot) ---
353
+ async classify(input) {
354
+ if (!this.imageEncoder) {
355
+ return { classifications: [], inferenceMs: 0, modelId: "none" };
356
+ }
357
+ const start = performance.now();
358
+ try {
359
+ const sharp = await import("sharp").then((m) => m.default ?? m);
360
+ const { data, info } = await sharp(input.frame.data).extract({
361
+ left: Math.round(input.roi.x),
362
+ top: Math.round(input.roi.y),
363
+ width: Math.round(input.roi.w),
364
+ height: Math.round(input.roi.h)
365
+ }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
366
+ const embedding = await this.imageEncoder.encode(data, info.width, info.height);
367
+ const inferenceMs = performance.now() - start;
368
+ if (this.searchService) {
369
+ const embeddingId = `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
370
+ this.searchService.storeEmbedding(embeddingId, embedding, {
371
+ timestamp: Date.now(),
372
+ className: input.parentDetection.class,
373
+ deviceId: ""
374
+ }).catch(() => {
375
+ });
376
+ }
377
+ return {
378
+ classifications: [{
379
+ class: "clip-embedding",
380
+ score: 1,
381
+ embedding,
382
+ metadata: { embeddingDim: embedding.length }
383
+ }],
384
+ inferenceMs,
385
+ modelId: this.manifest.defaultConfig?.["modelId"] ?? DEFAULT_CLIP_MODEL
386
+ };
387
+ } catch (err) {
388
+ this.logger.debug(`CLIP classify failed: ${err}`);
389
+ return { classifications: [], inferenceMs: performance.now() - start, modelId: "error" };
390
+ }
391
+ }
392
+ // --- ISceneIntelligence ---
393
+ async embed(deviceId, crop, metadata) {
394
+ if (!this.imageEncoder || !this.searchService) throw new Error("Not initialized");
395
+ const sharp = await import("sharp").then((m) => m.default ?? m);
396
+ const { data, info } = await sharp(crop).removeAlpha().raw().toBuffer({ resolveWithObject: true });
397
+ const embedding = await this.imageEncoder.encode(data, info.width, info.height);
398
+ const id = `${deviceId}/${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
399
+ await this.searchService.storeEmbedding(id, embedding, metadata);
400
+ return id;
401
+ }
402
+ async search(query, topK, filter) {
403
+ if (!this.searchService) return [];
404
+ const textEncoder = await this.ensureTextEncoder();
405
+ if (!textEncoder) return [];
406
+ const queryEmbedding = await textEncoder.encode(query);
407
+ const results = await this.searchService.searchByVector(queryEmbedding, topK, filter);
408
+ return [...results];
409
+ }
410
+ async searchByImage(image, topK, filter) {
411
+ if (!this.imageEncoder || !this.searchService) return [];
412
+ const sharp = await import("sharp").then((m) => m.default ?? m);
413
+ const { data, info } = await sharp(image).removeAlpha().raw().toBuffer({ resolveWithObject: true });
414
+ const embedding = await this.imageEncoder.encode(data, info.width, info.height);
415
+ const results = await this.searchService.searchByVector(embedding, topK, filter);
416
+ return [...results];
417
+ }
418
+ async evaluateSceneState(deviceId, crop) {
419
+ if (!this.imageEncoder || !this.sceneStateMachine || !this.referenceStore) return null;
420
+ const sharp = await import("sharp").then((m) => m.default ?? m);
421
+ const { data, info } = await sharp(crop).removeAlpha().raw().toBuffer({ resolveWithObject: true });
422
+ const embedding = await this.imageEncoder.encode(data, info.width, info.height);
423
+ const states = await this.referenceStore.getStatesForCamera(deviceId);
424
+ return this.sceneStateMachine.evaluate(deviceId, embedding, states);
425
+ }
426
+ // --- Private ---
427
+ async ensureTextEncoder() {
428
+ if (this.textEncoder) return this.textEncoder;
429
+ if (!this.ctx?.models) return null;
430
+ const modelId = this.ctx.addonConfig["modelId"] ?? DEFAULT_CLIP_MODEL;
431
+ const textModelId = getTextModelId(modelId);
432
+ try {
433
+ const textPath = await this.ctx.models.ensure(textModelId, "onnx");
434
+ this.textEncoder = new ClipTextEncoder(this.logger);
435
+ await this.textEncoder.load(textPath);
436
+ return this.textEncoder;
437
+ } catch (err) {
438
+ this.logger.warn(`Failed to load CLIP text encoder: ${err}`);
439
+ return null;
440
+ }
441
+ }
442
+ };
443
+ // Annotate the CommonJS export names for ESM import in node:
444
+ 0 && (module.exports = {
445
+ SceneIntelligenceAddon
446
+ });
447
+ //# sourceMappingURL=addon.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/addon.ts","../src/clip/preprocessing.ts","../src/clip/models.ts","../src/clip/image-encoder.ts","../src/clip/tokenizer.ts","../src/clip/text-encoder.ts","../src/scene-state/state-machine.ts","../src/scene-state/reference-store.ts","../src/search/search-service.ts"],"sourcesContent":["import type {\n ICamstackAddon, AddonManifest, AddonContext, CapabilityProviderMap,\n CropInput, ClassifierOutput, ISceneIntelligence, EmbeddingMetadata,\n EmbeddingFilter, VectorSearchResult, SceneStateResult, IScopedLogger,\n} from '@camstack/types'\nimport { ClipImageEncoder } from './clip/image-encoder.js'\nimport { ClipTextEncoder } from './clip/text-encoder.js'\nimport { DEFAULT_CLIP_MODEL, getTextModelId } from './clip/models.js'\nimport { SceneStateMachine } from './scene-state/state-machine.js'\nimport { ReferenceStore } from './scene-state/reference-store.js'\nimport { SearchService } from './search/search-service.js'\n\nexport class SceneIntelligenceAddon implements ICamstackAddon, ISceneIntelligence {\n readonly manifest: AddonManifest = {\n id: 'scene-intelligence',\n name: 'Scene Intelligence',\n version: '0.1.0',\n description: 'CLIP embeddings, semantic search, and scene state detection',\n slot: 'classifier',\n inputClasses: [],\n outputClasses: [],\n labelOutputType: 'classification',\n passive: false,\n capabilities: [\n { name: 'scene-intelligence', mode: 'singleton' as const },\n ],\n defaultConfig: {\n modelId: DEFAULT_CLIP_MODEL,\n minConfidence: 0.5,\n },\n }\n\n private logger!: IScopedLogger\n private imageEncoder: ClipImageEncoder | null = null\n private textEncoder: ClipTextEncoder | null = null\n private sceneStateMachine: SceneStateMachine | null = null\n private referenceStore: ReferenceStore | null = null\n private searchService: SearchService | null = null\n private ctx: AddonContext | null = null\n\n async initialize(context: AddonContext): Promise<void> {\n this.ctx = context\n this.logger = context.logger\n\n const modelId = (context.addonConfig['modelId'] as string) ?? DEFAULT_CLIP_MODEL\n\n // Load image encoder\n if (context.models) {\n try {\n const imagePath = await context.models.ensure(modelId, 'onnx')\n this.imageEncoder = new ClipImageEncoder(modelId, this.logger)\n await this.imageEncoder.load(imagePath)\n } catch (err) {\n this.logger.warn(`Failed to load CLIP image encoder: ${err}`)\n }\n }\n\n // Load text encoder lazily — only when search is requested to save memory\n\n // Scene state machine\n this.sceneStateMachine = new SceneStateMachine(3)\n\n // Reference store\n if (context.settingsBackend) {\n this.referenceStore = new ReferenceStore(context.settingsBackend)\n }\n\n // Search service\n if (context.embeddingsBackend) {\n this.searchService = new SearchService(context.embeddingsBackend)\n await this.searchService.initialize()\n }\n\n this.logger.info(`Scene Intelligence initialized (model=${modelId})`)\n }\n\n async shutdown(): Promise<void> {\n await this.imageEncoder?.dispose()\n await this.textEncoder?.dispose()\n await this.searchService?.shutdown()\n this.imageEncoder = null\n this.textEncoder = null\n this.searchService = null\n this.sceneStateMachine = null\n this.referenceStore = null\n this.ctx = null\n }\n\n getCapabilityProvider<K extends keyof CapabilityProviderMap>(\n name: K,\n ): CapabilityProviderMap[K] | null {\n if (name === 'scene-intelligence') {\n return this as unknown as CapabilityProviderMap[K]\n }\n return null\n }\n\n // --- IClassifierProvider (pipeline classifier slot) ---\n\n async classify(input: CropInput): Promise<ClassifierOutput> {\n if (!this.imageEncoder) {\n return { classifications: [], inferenceMs: 0, modelId: 'none' }\n }\n\n const start = performance.now()\n\n try {\n // Decode crop to RGB using sharp\n const sharp = await import('sharp').then(m => m.default ?? m)\n const { data, info } = await sharp(input.frame.data)\n .extract({\n left: Math.round(input.roi.x),\n top: Math.round(input.roi.y),\n width: Math.round(input.roi.w),\n height: Math.round(input.roi.h),\n })\n .removeAlpha()\n .raw()\n .toBuffer({ resolveWithObject: true })\n\n const embedding = await this.imageEncoder.encode(data, info.width, info.height)\n const inferenceMs = performance.now() - start\n\n // Store embedding async (fire-and-forget)\n if (this.searchService) {\n const embeddingId = `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`\n this.searchService.storeEmbedding(embeddingId, embedding, {\n timestamp: Date.now(),\n className: input.parentDetection.class,\n deviceId: '',\n }).catch(() => {}) // fire-and-forget\n }\n\n return {\n classifications: [{\n class: 'clip-embedding',\n score: 1.0,\n embedding,\n metadata: { embeddingDim: embedding.length },\n }],\n inferenceMs,\n modelId: (this.manifest.defaultConfig?.['modelId'] as string) ?? DEFAULT_CLIP_MODEL,\n }\n } catch (err) {\n this.logger.debug(`CLIP classify failed: ${err}`)\n return { classifications: [], inferenceMs: performance.now() - start, modelId: 'error' }\n }\n }\n\n // --- ISceneIntelligence ---\n\n async embed(deviceId: string, crop: Buffer, metadata: EmbeddingMetadata): Promise<string> {\n if (!this.imageEncoder || !this.searchService) throw new Error('Not initialized')\n\n const sharp = await import('sharp').then(m => m.default ?? m)\n const { data, info } = await sharp(crop).removeAlpha().raw().toBuffer({ resolveWithObject: true })\n\n const embedding = await this.imageEncoder.encode(data, info.width, info.height)\n const id = `${deviceId}/${Date.now()}-${Math.random().toString(36).slice(2, 8)}`\n await this.searchService.storeEmbedding(id, embedding, metadata)\n return id\n }\n\n async search(query: string, topK: number, filter?: EmbeddingFilter): Promise<VectorSearchResult[]> {\n if (!this.searchService) return []\n\n const textEncoder = await this.ensureTextEncoder()\n if (!textEncoder) return []\n\n const queryEmbedding = await textEncoder.encode(query)\n const results = await this.searchService.searchByVector(queryEmbedding, topK, filter)\n return [...results]\n }\n\n async searchByImage(image: Buffer, topK: number, filter?: EmbeddingFilter): Promise<VectorSearchResult[]> {\n if (!this.imageEncoder || !this.searchService) return []\n\n const sharp = await import('sharp').then(m => m.default ?? m)\n const { data, info } = await sharp(image).removeAlpha().raw().toBuffer({ resolveWithObject: true })\n\n const embedding = await this.imageEncoder.encode(data, info.width, info.height)\n const results = await this.searchService.searchByVector(embedding, topK, filter)\n return [...results]\n }\n\n async evaluateSceneState(deviceId: string, crop: Buffer): Promise<SceneStateResult | null> {\n if (!this.imageEncoder || !this.sceneStateMachine || !this.referenceStore) return null\n\n const sharp = await import('sharp').then(m => m.default ?? m)\n const { data, info } = await sharp(crop).removeAlpha().raw().toBuffer({ resolveWithObject: true })\n\n const embedding = await this.imageEncoder.encode(data, info.width, info.height)\n const states = await this.referenceStore.getStatesForCamera(deviceId)\n return this.sceneStateMachine.evaluate(deviceId, embedding, states)\n }\n\n // --- Private ---\n\n private async ensureTextEncoder(): Promise<ClipTextEncoder | null> {\n if (this.textEncoder) return this.textEncoder\n\n if (!this.ctx?.models) return null\n\n const modelId = (this.ctx.addonConfig['modelId'] as string) ?? DEFAULT_CLIP_MODEL\n const textModelId = getTextModelId(modelId)\n\n try {\n const textPath = await this.ctx.models.ensure(textModelId, 'onnx')\n this.textEncoder = new ClipTextEncoder(this.logger)\n await this.textEncoder.load(textPath)\n return this.textEncoder\n } catch (err) {\n this.logger.warn(`Failed to load CLIP text encoder: ${err}`)\n return null\n }\n }\n}\n","// CLIP normalization constants (OpenAI CLIP)\nconst CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] as const\nconst CLIP_STD = [0.26862954, 0.26130258, 0.27577711] as const\n\n/**\n * Preprocess raw RGB buffer for CLIP inference.\n * Resizes (nearest-neighbor for speed), normalizes with CLIP mean/std, outputs NCHW Float32Array.\n * For production use, the caller should use sharp to resize the JPEG to targetW×targetH\n * before calling this with the raw RGB. This function handles normalization + layout.\n */\nexport function preprocessForClip(\n rgb: Buffer,\n srcWidth: number,\n srcHeight: number,\n targetWidth: number,\n targetHeight: number,\n): Float32Array {\n const pixels = targetWidth * targetHeight\n const result = new Float32Array(3 * pixels)\n\n for (let y = 0; y < targetHeight; y++) {\n for (let x = 0; x < targetWidth; x++) {\n // Nearest-neighbor sampling\n const srcX = Math.min(Math.floor((x / targetWidth) * srcWidth), srcWidth - 1)\n const srcY = Math.min(Math.floor((y / targetHeight) * srcHeight), srcHeight - 1)\n const srcIdx = (srcY * srcWidth + srcX) * 3\n const dstIdx = y * targetWidth + x\n\n for (let c = 0; c < 3; c++) {\n const val = (rgb[srcIdx + c] ?? 0) / 255.0\n const mean = CLIP_MEAN[c as 0 | 1 | 2]\n const std = CLIP_STD[c as 0 | 1 | 2]\n result[c * pixels + dstIdx] = (val - mean) / std\n }\n }\n }\n\n return result\n}\n\n/**\n * L2-normalize a vector in-place and return it.\n */\nexport function l2Normalize(vec: Float32Array): Float32Array {\n let norm = 0\n for (let i = 0; i < vec.length; i++) norm += vec[i]! * vec[i]!\n norm = Math.sqrt(norm)\n if (norm > 0) {\n for (let i = 0; i < vec.length; i++) vec[i]! /= norm\n }\n return vec\n}\n","import type { ModelCatalogEntry } from '@camstack/types'\n\nexport const CLIP_MODELS: readonly ModelCatalogEntry[] = [\n {\n id: 'mobileclip-s0',\n name: 'MobileCLIP-S0',\n description: 'Lightweight CLIP model optimized for edge devices (~60MB)',\n inputSize: { width: 256, height: 256 },\n labels: [],\n inputLayout: 'nchw',\n inputNormalization: 'none', // We handle CLIP normalization ourselves\n formats: {\n onnx: {\n url: 'https://huggingface.co/nicovdw/mobileclip-s0-onnx/resolve/main/image_encoder.onnx',\n sizeMB: 60,\n },\n },\n },\n {\n id: 'mobileclip-s0-text',\n name: 'MobileCLIP-S0 Text Encoder',\n description: 'Text encoder for MobileCLIP-S0',\n inputSize: { width: 0, height: 0 },\n labels: [],\n formats: {\n onnx: {\n url: 'https://huggingface.co/nicovdw/mobileclip-s0-onnx/resolve/main/text_encoder.onnx',\n sizeMB: 65,\n },\n },\n },\n {\n id: 'clip-vit-b32',\n name: 'CLIP ViT-B/32',\n description: 'Standard OpenAI CLIP model, higher accuracy (~340MB)',\n inputSize: { width: 224, height: 224 },\n labels: [],\n inputLayout: 'nchw',\n inputNormalization: 'none',\n formats: {\n onnx: {\n url: 'https://huggingface.co/nicovdw/clip-vit-b32-onnx/resolve/main/image_encoder.onnx',\n sizeMB: 340,\n },\n },\n },\n {\n id: 'clip-vit-b32-text',\n name: 'CLIP ViT-B/32 Text Encoder',\n description: 'Text encoder for CLIP ViT-B/32',\n inputSize: { width: 0, height: 0 },\n labels: [],\n formats: {\n onnx: {\n url: 'https://huggingface.co/nicovdw/clip-vit-b32-onnx/resolve/main/text_encoder.onnx',\n sizeMB: 170,\n },\n },\n },\n] as const\n\nexport const DEFAULT_CLIP_MODEL = 'mobileclip-s0'\nexport const CLIP_EMBEDDING_DIM = 512\nexport const MOBILECLIP_INPUT_SIZE = 256\nexport const VITB32_INPUT_SIZE = 224\n\nexport function getInputSize(modelId: string): number {\n if (modelId.startsWith('mobileclip')) return MOBILECLIP_INPUT_SIZE\n return VITB32_INPUT_SIZE\n}\n\nexport function getTextModelId(imageModelId: string): string {\n return `${imageModelId}-text`\n}\n","import type { IScopedLogger } from '@camstack/types'\nimport { preprocessForClip, l2Normalize } from './preprocessing.js'\nimport { getInputSize, CLIP_EMBEDDING_DIM } from './models.js'\n\nexport class ClipImageEncoder {\n private session: any | null = null // onnxruntime InferenceSession\n private readonly modelId: string\n private readonly inputSize: number\n private readonly logger: IScopedLogger\n\n constructor(modelId: string, logger: IScopedLogger) {\n this.modelId = modelId\n this.inputSize = getInputSize(modelId)\n this.logger = logger\n }\n\n async load(modelPath: string): Promise<void> {\n const ort = await import('onnxruntime-node')\n this.session = await ort.InferenceSession.create(modelPath, {\n executionProviders: ['cpu'],\n })\n this.logger.info(`CLIP image encoder loaded: ${this.modelId} (${this.inputSize}×${this.inputSize})`)\n }\n\n /**\n * Encode a raw RGB buffer into a CLIP embedding.\n * Caller must provide RGB buffer (use sharp to decode JPEG first).\n */\n async encode(rgb: Buffer, width: number, height: number): Promise<Float32Array> {\n if (!this.session) throw new Error('Image encoder not loaded')\n\n const ort = await import('onnxruntime-node')\n const input = preprocessForClip(rgb, width, height, this.inputSize, this.inputSize)\n const tensor = new ort.Tensor('float32', input, [1, 3, this.inputSize, this.inputSize])\n\n const feeds: Record<string, any> = {}\n const inputName = this.session.inputNames[0]\n feeds[inputName] = tensor\n\n const results = await this.session.run(feeds)\n const outputName = this.session.outputNames[0]\n const output = results[outputName].data as Float32Array\n\n // Extract embedding (first CLIP_EMBEDDING_DIM values) and L2-normalize\n const embedding = new Float32Array(output.slice(0, CLIP_EMBEDDING_DIM))\n return l2Normalize(embedding)\n }\n\n async dispose(): Promise<void> {\n if (this.session) {\n await this.session.release?.()\n this.session = null\n }\n }\n}\n","/**\n * Minimal CLIP tokenizer.\n * CLIP uses BPE with a 49152-token vocabulary.\n * For simplicity, we use a basic whitespace+lowercasing tokenizer that maps words\n * to token IDs. The full BPE vocab is loaded lazily from a JSON file.\n * For production, this should be replaced with a proper BPE implementation.\n */\n\nconst SOT_TOKEN = 49406 // <|startoftext|>\nconst EOT_TOKEN = 49407 // <|endoftext|>\nconst MAX_LENGTH = 77 // CLIP context length\n\n/**\n * Simple tokenization: split on whitespace, map to byte-level IDs.\n * This is a placeholder — real CLIP BPE tokenization requires the merges file.\n * For the ONNX text encoder, we pad/truncate to 77 tokens.\n */\nexport function tokenize(text: string): BigInt64Array {\n const tokens: number[] = [SOT_TOKEN]\n\n // Simple byte-level fallback: each character's codepoint becomes a token\n const cleaned = text.toLowerCase().trim()\n for (const char of cleaned) {\n const code = char.codePointAt(0) ?? 0\n if (code < 256) {\n tokens.push(code + 1) // offset by 1 to avoid token 0\n }\n }\n\n tokens.push(EOT_TOKEN)\n\n // Pad or truncate to MAX_LENGTH\n while (tokens.length < MAX_LENGTH) tokens.push(0)\n if (tokens.length > MAX_LENGTH) tokens.length = MAX_LENGTH\n\n return BigInt64Array.from(tokens.map(t => BigInt(t)))\n}\n","import type { IScopedLogger } from '@camstack/types'\nimport { l2Normalize } from './preprocessing.js'\nimport { CLIP_EMBEDDING_DIM } from './models.js'\nimport { tokenize } from './tokenizer.js'\n\nexport class ClipTextEncoder {\n private session: any | null = null\n private readonly logger: IScopedLogger\n\n constructor(logger: IScopedLogger) {\n this.logger = logger\n }\n\n async load(modelPath: string): Promise<void> {\n const ort = await import('onnxruntime-node')\n this.session = await ort.InferenceSession.create(modelPath, {\n executionProviders: ['cpu'],\n })\n this.logger.info('CLIP text encoder loaded')\n }\n\n async encode(text: string): Promise<Float32Array> {\n if (!this.session) throw new Error('Text encoder not loaded')\n\n const ort = await import('onnxruntime-node')\n const tokens = tokenize(text)\n const tensor = new ort.Tensor('int64', tokens, [1, 77])\n\n const feeds: Record<string, any> = {}\n const inputName = this.session.inputNames[0]\n feeds[inputName] = tensor\n\n const results = await this.session.run(feeds)\n const outputName = this.session.outputNames[0]\n const output = results[outputName].data as Float32Array\n\n const embedding = new Float32Array(output.slice(0, CLIP_EMBEDDING_DIM))\n return l2Normalize(embedding)\n }\n\n async dispose(): Promise<void> {\n if (this.session) {\n await this.session.release?.()\n this.session = null\n }\n }\n}\n","import type { SceneStateResult } from '@camstack/types'\nimport { cosineSimilarity } from '@camstack/types'\n\nexport interface SceneStateDefinition {\n readonly id: string\n readonly name: string\n readonly referenceEmbedding: Float32Array\n readonly threshold: number\n}\n\ninterface CameraSceneState {\n currentState: string | null\n pendingState: string | null\n pendingCount: number\n pendingConfidence: number\n}\n\nexport class SceneStateMachine {\n private readonly debounceFrames: number\n private readonly cameraStates = new Map<string, CameraSceneState>()\n\n constructor(debounceFrames = 3) {\n this.debounceFrames = debounceFrames\n }\n\n evaluate(\n deviceId: string,\n embedding: Float32Array,\n referenceStates: readonly SceneStateDefinition[],\n ): SceneStateResult | null {\n if (referenceStates.length === 0) return null\n\n let cs = this.cameraStates.get(deviceId)\n if (!cs) {\n cs = { currentState: null, pendingState: null, pendingCount: 0, pendingConfidence: 0 }\n this.cameraStates.set(deviceId, cs)\n }\n\n // Find best matching state\n let bestState: string | null = null\n let bestScore = -1\n for (const ref of referenceStates) {\n const similarity = cosineSimilarity(embedding, ref.referenceEmbedding)\n if (similarity >= ref.threshold && similarity > bestScore) {\n bestScore = similarity\n bestState = ref.name\n }\n }\n\n if (!bestState) return null\n\n // Debounce: state must persist for N consecutive frames\n if (bestState === cs.pendingState) {\n cs.pendingCount++\n cs.pendingConfidence = bestScore\n } else {\n cs.pendingState = bestState\n cs.pendingCount = 1\n cs.pendingConfidence = bestScore\n }\n\n if (cs.pendingCount < this.debounceFrames) return null\n\n // State confirmed — check if it's actually a change\n if (bestState === cs.currentState) return null\n\n const previousState = cs.currentState ?? 'unknown'\n cs.currentState = bestState\n cs.pendingState = null\n cs.pendingCount = 0\n\n return {\n previousState,\n currentState: bestState,\n confidence: bestScore,\n }\n }\n}\n","import type { ISettingsBackend } from '@camstack/types'\nimport type { SceneStateDefinition } from './state-machine.js'\n\nconst COLLECTION = 'device-settings' as const\n\nexport class ReferenceStore {\n private readonly settingsBackend: ISettingsBackend\n\n constructor(settingsBackend: ISettingsBackend) {\n this.settingsBackend = settingsBackend\n }\n\n async getStatesForCamera(deviceId: string): Promise<SceneStateDefinition[]> {\n const raw = await this.settingsBackend.get(COLLECTION, `scene-states:${deviceId}`)\n if (!raw || !Array.isArray(raw)) return []\n return (raw as any[]).map(s => ({\n id: s.id,\n name: s.name,\n referenceEmbedding: new Float32Array(s.referenceEmbedding),\n threshold: s.threshold ?? 0.7,\n }))\n }\n\n async setStatesForCamera(deviceId: string, states: readonly SceneStateDefinition[]): Promise<void> {\n const serializable = states.map(s => ({\n id: s.id,\n name: s.name,\n referenceEmbedding: Array.from(s.referenceEmbedding),\n threshold: s.threshold,\n }))\n await this.settingsBackend.set(COLLECTION, `scene-states:${deviceId}`, serializable)\n }\n}\n","import type { IEmbeddingsBackend, IVectorIndex, VectorSearchResult, EmbeddingFilter, EmbeddingMetadata } from '@camstack/types'\nimport { CLIP_EMBEDDING_DIM } from '../clip/models.js'\n\nconst INDEX_NAME = 'clip-embeddings'\n\nexport class SearchService {\n private readonly backend: IEmbeddingsBackend\n private index: IVectorIndex | null = null\n\n constructor(backend: IEmbeddingsBackend) {\n this.backend = backend\n }\n\n async initialize(): Promise<void> {\n this.index = await this.backend.openIndex(INDEX_NAME, CLIP_EMBEDDING_DIM)\n }\n\n async storeEmbedding(\n id: string,\n embedding: Float32Array,\n metadata: EmbeddingMetadata,\n ): Promise<void> {\n if (!this.index) return\n await this.index.insert(id, embedding, metadata as unknown as Record<string, unknown>)\n }\n\n async searchByVector(\n query: Float32Array,\n topK: number,\n filter?: EmbeddingFilter,\n ): Promise<readonly VectorSearchResult[]> {\n if (!this.index) return []\n return this.index.search(query, topK, filter)\n }\n\n async count(): Promise<number> {\n return this.index?.count() ?? 0\n }\n\n async shutdown(): Promise<void> {\n await this.index?.flush()\n this.index = null\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACCA,IAAM,YAAY,CAAC,YAAY,WAAW,UAAU;AACpD,IAAM,WAAW,CAAC,YAAY,YAAY,UAAU;AAQ7C,SAAS,kBACd,KACA,UACA,WACA,aACA,cACc;AACd,QAAM,SAAS,cAAc;AAC7B,QAAM,SAAS,IAAI,aAAa,IAAI,MAAM;AAE1C,WAAS,IAAI,GAAG,IAAI,cAAc,KAAK;AACrC,aAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AAEpC,YAAM,OAAO,KAAK,IAAI,KAAK,MAAO,IAAI,cAAe,QAAQ,GAAG,WAAW,CAAC;AAC5E,YAAM,OAAO,KAAK,IAAI,KAAK,MAAO,IAAI,eAAgB,SAAS,GAAG,YAAY,CAAC;AAC/E,YAAM,UAAU,OAAO,WAAW,QAAQ;AAC1C,YAAM,SAAS,IAAI,cAAc;AAEjC,eAAS,IAAI,GAAG,IAAI,GAAG,KAAK;AAC1B,cAAM,OAAO,IAAI,SAAS,CAAC,KAAK,KAAK;AACrC,cAAM,OAAO,UAAU,CAAc;AACrC,cAAM,MAAM,SAAS,CAAc;AACnC,eAAO,IAAI,SAAS,MAAM,KAAK,MAAM,QAAQ;AAAA,MAC/C;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAKO,SAAS,YAAY,KAAiC;AAC3D,MAAI,OAAO;AACX,WAAS,IAAI,GAAG,IAAI,IAAI,QAAQ,IAAK,SAAQ,IAAI,CAAC,IAAK,IAAI,CAAC;AAC5D,SAAO,KAAK,KAAK,IAAI;AACrB,MAAI,OAAO,GAAG;AACZ,aAAS,IAAI,GAAG,IAAI,IAAI,QAAQ,IAAK,KAAI,CAAC,KAAM;AAAA,EAClD;AACA,SAAO;AACT;;;ACUO,IAAM,qBAAqB;AAC3B,IAAM,qBAAqB;AAC3B,IAAM,wBAAwB;AAC9B,IAAM,oBAAoB;AAE1B,SAAS,aAAa,SAAyB;AACpD,MAAI,QAAQ,WAAW,YAAY,EAAG,QAAO;AAC7C,SAAO;AACT;AAEO,SAAS,eAAe,cAA8B;AAC3D,SAAO,GAAG,YAAY;AACxB;;;ACrEO,IAAM,mBAAN,MAAuB;AAAA,EACpB,UAAsB;AAAA;AAAA,EACb;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,SAAiB,QAAuB;AAClD,SAAK,UAAU;AACf,SAAK,YAAY,aAAa,OAAO;AACrC,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,MAAM,KAAK,WAAkC;AAC3C,UAAM,MAAM,MAAM,OAAO,kBAAkB;AAC3C,SAAK,UAAU,MAAM,IAAI,iBAAiB,OAAO,WAAW;AAAA,MAC1D,oBAAoB,CAAC,KAAK;AAAA,IAC5B,CAAC;AACD,SAAK,OAAO,KAAK,8BAA8B,KAAK,OAAO,KAAK,KAAK,SAAS,OAAI,KAAK,SAAS,GAAG;AAAA,EACrG;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,OAAO,KAAa,OAAe,QAAuC;AAC9E,QAAI,CAAC,KAAK,QAAS,OAAM,IAAI,MAAM,0BAA0B;AAE7D,UAAM,MAAM,MAAM,OAAO,kBAAkB;AAC3C,UAAM,QAAQ,kBAAkB,KAAK,OAAO,QAAQ,KAAK,WAAW,KAAK,SAAS;AAClF,UAAM,SAAS,IAAI,IAAI,OAAO,WAAW,OAAO,CAAC,GAAG,GAAG,KAAK,WAAW,KAAK,SAAS,CAAC;AAEtF,UAAM,QAA6B,CAAC;AACpC,UAAM,YAAY,KAAK,QAAQ,WAAW,CAAC;AAC3C,UAAM,SAAS,IAAI;AAEnB,UAAM,UAAU,MAAM,KAAK,QAAQ,IAAI,KAAK;AAC5C,UAAM,aAAa,KAAK,QAAQ,YAAY,CAAC;AAC7C,UAAM,SAAS,QAAQ,UAAU,EAAE;AAGnC,UAAM,YAAY,IAAI,aAAa,OAAO,MAAM,GAAG,kBAAkB,CAAC;AACtE,WAAO,YAAY,SAAS;AAAA,EAC9B;AAAA,EAEA,MAAM,UAAyB;AAC7B,QAAI,KAAK,SAAS;AAChB,YAAM,KAAK,QAAQ,UAAU;AAC7B,WAAK,UAAU;AAAA,IACjB;AAAA,EACF;AACF;;;AC9CA,IAAM,YAAY;AAClB,IAAM,YAAY;AAClB,IAAM,aAAa;AAOZ,SAAS,SAAS,MAA6B;AACpD,QAAM,SAAmB,CAAC,SAAS;AAGnC,QAAM,UAAU,KAAK,YAAY,EAAE,KAAK;AACxC,aAAW,QAAQ,SAAS;AAC1B,UAAM,OAAO,KAAK,YAAY,CAAC,KAAK;AACpC,QAAI,OAAO,KAAK;AACd,aAAO,KAAK,OAAO,CAAC;AAAA,IACtB;AAAA,EACF;AAEA,SAAO,KAAK,SAAS;AAGrB,SAAO,OAAO,SAAS,WAAY,QAAO,KAAK,CAAC;AAChD,MAAI,OAAO,SAAS,WAAY,QAAO,SAAS;AAEhD,SAAO,cAAc,KAAK,OAAO,IAAI,OAAK,OAAO,CAAC,CAAC,CAAC;AACtD;;;AC/BO,IAAM,kBAAN,MAAsB;AAAA,EACnB,UAAsB;AAAA,EACb;AAAA,EAEjB,YAAY,QAAuB;AACjC,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,MAAM,KAAK,WAAkC;AAC3C,UAAM,MAAM,MAAM,OAAO,kBAAkB;AAC3C,SAAK,UAAU,MAAM,IAAI,iBAAiB,OAAO,WAAW;AAAA,MAC1D,oBAAoB,CAAC,KAAK;AAAA,IAC5B,CAAC;AACD,SAAK,OAAO,KAAK,0BAA0B;AAAA,EAC7C;AAAA,EAEA,MAAM,OAAO,MAAqC;AAChD,QAAI,CAAC,KAAK,QAAS,OAAM,IAAI,MAAM,yBAAyB;AAE5D,UAAM,MAAM,MAAM,OAAO,kBAAkB;AAC3C,UAAM,SAAS,SAAS,IAAI;AAC5B,UAAM,SAAS,IAAI,IAAI,OAAO,SAAS,QAAQ,CAAC,GAAG,EAAE,CAAC;AAEtD,UAAM,QAA6B,CAAC;AACpC,UAAM,YAAY,KAAK,QAAQ,WAAW,CAAC;AAC3C,UAAM,SAAS,IAAI;AAEnB,UAAM,UAAU,MAAM,KAAK,QAAQ,IAAI,KAAK;AAC5C,UAAM,aAAa,KAAK,QAAQ,YAAY,CAAC;AAC7C,UAAM,SAAS,QAAQ,UAAU,EAAE;AAEnC,UAAM,YAAY,IAAI,aAAa,OAAO,MAAM,GAAG,kBAAkB,CAAC;AACtE,WAAO,YAAY,SAAS;AAAA,EAC9B;AAAA,EAEA,MAAM,UAAyB;AAC7B,QAAI,KAAK,SAAS;AAChB,YAAM,KAAK,QAAQ,UAAU;AAC7B,WAAK,UAAU;AAAA,IACjB;AAAA,EACF;AACF;;;AC7CA,mBAAiC;AAgB1B,IAAM,oBAAN,MAAwB;AAAA,EACZ;AAAA,EACA,eAAe,oBAAI,IAA8B;AAAA,EAElE,YAAY,iBAAiB,GAAG;AAC9B,SAAK,iBAAiB;AAAA,EACxB;AAAA,EAEA,SACE,UACA,WACA,iBACyB;AACzB,QAAI,gBAAgB,WAAW,EAAG,QAAO;AAEzC,QAAI,KAAK,KAAK,aAAa,IAAI,QAAQ;AACvC,QAAI,CAAC,IAAI;AACP,WAAK,EAAE,cAAc,MAAM,cAAc,MAAM,cAAc,GAAG,mBAAmB,EAAE;AACrF,WAAK,aAAa,IAAI,UAAU,EAAE;AAAA,IACpC;AAGA,QAAI,YAA2B;AAC/B,QAAI,YAAY;AAChB,eAAW,OAAO,iBAAiB;AACjC,YAAM,iBAAa,+BAAiB,WAAW,IAAI,kBAAkB;AACrE,UAAI,cAAc,IAAI,aAAa,aAAa,WAAW;AACzD,oBAAY;AACZ,oBAAY,IAAI;AAAA,MAClB;AAAA,IACF;AAEA,QAAI,CAAC,UAAW,QAAO;AAGvB,QAAI,cAAc,GAAG,cAAc;AACjC,SAAG;AACH,SAAG,oBAAoB;AAAA,IACzB,OAAO;AACL,SAAG,eAAe;AAClB,SAAG,eAAe;AAClB,SAAG,oBAAoB;AAAA,IACzB;AAEA,QAAI,GAAG,eAAe,KAAK,eAAgB,QAAO;AAGlD,QAAI,cAAc,GAAG,aAAc,QAAO;AAE1C,UAAM,gBAAgB,GAAG,gBAAgB;AACzC,OAAG,eAAe;AAClB,OAAG,eAAe;AAClB,OAAG,eAAe;AAElB,WAAO;AAAA,MACL;AAAA,MACA,cAAc;AAAA,MACd,YAAY;AAAA,IACd;AAAA,EACF;AACF;;;AC1EA,IAAM,aAAa;AAEZ,IAAM,iBAAN,MAAqB;AAAA,EACT;AAAA,EAEjB,YAAY,iBAAmC;AAC7C,SAAK,kBAAkB;AAAA,EACzB;AAAA,EAEA,MAAM,mBAAmB,UAAmD;AAC1E,UAAM,MAAM,MAAM,KAAK,gBAAgB,IAAI,YAAY,gBAAgB,QAAQ,EAAE;AACjF,QAAI,CAAC,OAAO,CAAC,MAAM,QAAQ,GAAG,EAAG,QAAO,CAAC;AACzC,WAAQ,IAAc,IAAI,QAAM;AAAA,MAC9B,IAAI,EAAE;AAAA,MACN,MAAM,EAAE;AAAA,MACR,oBAAoB,IAAI,aAAa,EAAE,kBAAkB;AAAA,MACzD,WAAW,EAAE,aAAa;AAAA,IAC5B,EAAE;AAAA,EACJ;AAAA,EAEA,MAAM,mBAAmB,UAAkB,QAAwD;AACjG,UAAM,eAAe,OAAO,IAAI,QAAM;AAAA,MACpC,IAAI,EAAE;AAAA,MACN,MAAM,EAAE;AAAA,MACR,oBAAoB,MAAM,KAAK,EAAE,kBAAkB;AAAA,MACnD,WAAW,EAAE;AAAA,IACf,EAAE;AACF,UAAM,KAAK,gBAAgB,IAAI,YAAY,gBAAgB,QAAQ,IAAI,YAAY;AAAA,EACrF;AACF;;;AC7BA,IAAM,aAAa;AAEZ,IAAM,gBAAN,MAAoB;AAAA,EACR;AAAA,EACT,QAA6B;AAAA,EAErC,YAAY,SAA6B;AACvC,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,MAAM,aAA4B;AAChC,SAAK,QAAQ,MAAM,KAAK,QAAQ,UAAU,YAAY,kBAAkB;AAAA,EAC1E;AAAA,EAEA,MAAM,eACJ,IACA,WACA,UACe;AACf,QAAI,CAAC,KAAK,MAAO;AACjB,UAAM,KAAK,MAAM,OAAO,IAAI,WAAW,QAA8C;AAAA,EACvF;AAAA,EAEA,MAAM,eACJ,OACA,MACA,QACwC;AACxC,QAAI,CAAC,KAAK,MAAO,QAAO,CAAC;AACzB,WAAO,KAAK,MAAM,OAAO,OAAO,MAAM,MAAM;AAAA,EAC9C;AAAA,EAEA,MAAM,QAAyB;AAC7B,WAAO,KAAK,OAAO,MAAM,KAAK;AAAA,EAChC;AAAA,EAEA,MAAM,WAA0B;AAC9B,UAAM,KAAK,OAAO,MAAM;AACxB,SAAK,QAAQ;AAAA,EACf;AACF;;;AR/BO,IAAM,yBAAN,MAA2E;AAAA,EACvE,WAA0B;AAAA,IACjC,IAAI;AAAA,IACJ,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,IACb,MAAM;AAAA,IACN,cAAc,CAAC;AAAA,IACf,eAAe,CAAC;AAAA,IAChB,iBAAiB;AAAA,IACjB,SAAS;AAAA,IACT,cAAc;AAAA,MACZ,EAAE,MAAM,sBAAsB,MAAM,YAAqB;AAAA,IAC3D;AAAA,IACA,eAAe;AAAA,MACb,SAAS;AAAA,MACT,eAAe;AAAA,IACjB;AAAA,EACF;AAAA,EAEQ;AAAA,EACA,eAAwC;AAAA,EACxC,cAAsC;AAAA,EACtC,oBAA8C;AAAA,EAC9C,iBAAwC;AAAA,EACxC,gBAAsC;AAAA,EACtC,MAA2B;AAAA,EAEnC,MAAM,WAAW,SAAsC;AACrD,SAAK,MAAM;AACX,SAAK,SAAS,QAAQ;AAEtB,UAAM,UAAW,QAAQ,YAAY,SAAS,KAAgB;AAG9D,QAAI,QAAQ,QAAQ;AAClB,UAAI;AACF,cAAM,YAAY,MAAM,QAAQ,OAAO,OAAO,SAAS,MAAM;AAC7D,aAAK,eAAe,IAAI,iBAAiB,SAAS,KAAK,MAAM;AAC7D,cAAM,KAAK,aAAa,KAAK,SAAS;AAAA,MACxC,SAAS,KAAK;AACZ,aAAK,OAAO,KAAK,sCAAsC,GAAG,EAAE;AAAA,MAC9D;AAAA,IACF;AAKA,SAAK,oBAAoB,IAAI,kBAAkB,CAAC;AAGhD,QAAI,QAAQ,iBAAiB;AAC3B,WAAK,iBAAiB,IAAI,eAAe,QAAQ,eAAe;AAAA,IAClE;AAGA,QAAI,QAAQ,mBAAmB;AAC7B,WAAK,gBAAgB,IAAI,cAAc,QAAQ,iBAAiB;AAChE,YAAM,KAAK,cAAc,WAAW;AAAA,IACtC;AAEA,SAAK,OAAO,KAAK,yCAAyC,OAAO,GAAG;AAAA,EACtE;AAAA,EAEA,MAAM,WAA0B;AAC9B,UAAM,KAAK,cAAc,QAAQ;AACjC,UAAM,KAAK,aAAa,QAAQ;AAChC,UAAM,KAAK,eAAe,SAAS;AACnC,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,SAAK,oBAAoB;AACzB,SAAK,iBAAiB;AACtB,SAAK,MAAM;AAAA,EACb;AAAA,EAEA,sBACE,MACiC;AACjC,QAAI,SAAS,sBAAsB;AACjC,aAAO;AAAA,IACT;AACA,WAAO;AAAA,EACT;AAAA;AAAA,EAIA,MAAM,SAAS,OAA6C;AAC1D,QAAI,CAAC,KAAK,cAAc;AACtB,aAAO,EAAE,iBAAiB,CAAC,GAAG,aAAa,GAAG,SAAS,OAAO;AAAA,IAChE;AAEA,UAAM,QAAQ,YAAY,IAAI;AAE9B,QAAI;AAEF,YAAM,QAAQ,MAAM,OAAO,OAAO,EAAE,KAAK,OAAK,EAAE,WAAW,CAAC;AAC5D,YAAM,EAAE,MAAM,KAAK,IAAI,MAAM,MAAM,MAAM,MAAM,IAAI,EAChD,QAAQ;AAAA,QACP,MAAM,KAAK,MAAM,MAAM,IAAI,CAAC;AAAA,QAC5B,KAAK,KAAK,MAAM,MAAM,IAAI,CAAC;AAAA,QAC3B,OAAO,KAAK,MAAM,MAAM,IAAI,CAAC;AAAA,QAC7B,QAAQ,KAAK,MAAM,MAAM,IAAI,CAAC;AAAA,MAChC,CAAC,EACA,YAAY,EACZ,IAAI,EACJ,SAAS,EAAE,mBAAmB,KAAK,CAAC;AAEvC,YAAM,YAAY,MAAM,KAAK,aAAa,OAAO,MAAM,KAAK,OAAO,KAAK,MAAM;AAC9E,YAAM,cAAc,YAAY,IAAI,IAAI;AAGxC,UAAI,KAAK,eAAe;AACtB,cAAM,cAAc,GAAG,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,MAAM,GAAG,CAAC,CAAC;AAC3E,aAAK,cAAc,eAAe,aAAa,WAAW;AAAA,UACxD,WAAW,KAAK,IAAI;AAAA,UACpB,WAAW,MAAM,gBAAgB;AAAA,UACjC,UAAU;AAAA,QACZ,CAAC,EAAE,MAAM,MAAM;AAAA,QAAC,CAAC;AAAA,MACnB;AAEA,aAAO;AAAA,QACL,iBAAiB,CAAC;AAAA,UAChB,OAAO;AAAA,UACP,OAAO;AAAA,UACP;AAAA,UACA,UAAU,EAAE,cAAc,UAAU,OAAO;AAAA,QAC7C,CAAC;AAAA,QACD;AAAA,QACA,SAAU,KAAK,SAAS,gBAAgB,SAAS,KAAgB;AAAA,MACnE;AAAA,IACF,SAAS,KAAK;AACZ,WAAK,OAAO,MAAM,yBAAyB,GAAG,EAAE;AAChD,aAAO,EAAE,iBAAiB,CAAC,GAAG,aAAa,YAAY,IAAI,IAAI,OAAO,SAAS,QAAQ;AAAA,IACzF;AAAA,EACF;AAAA;AAAA,EAIA,MAAM,MAAM,UAAkB,MAAc,UAA8C;AACxF,QAAI,CAAC,KAAK,gBAAgB,CAAC,KAAK,cAAe,OAAM,IAAI,MAAM,iBAAiB;AAEhF,UAAM,QAAQ,MAAM,OAAO,OAAO,EAAE,KAAK,OAAK,EAAE,WAAW,CAAC;AAC5D,UAAM,EAAE,MAAM,KAAK,IAAI,MAAM,MAAM,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,SAAS,EAAE,mBAAmB,KAAK,CAAC;AAEjG,UAAM,YAAY,MAAM,KAAK,aAAa,OAAO,MAAM,KAAK,OAAO,KAAK,MAAM;AAC9E,UAAM,KAAK,GAAG,QAAQ,IAAI,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,MAAM,GAAG,CAAC,CAAC;AAC9E,UAAM,KAAK,cAAc,eAAe,IAAI,WAAW,QAAQ;AAC/D,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,OAAO,OAAe,MAAc,QAAyD;AACjG,QAAI,CAAC,KAAK,cAAe,QAAO,CAAC;AAEjC,UAAM,cAAc,MAAM,KAAK,kBAAkB;AACjD,QAAI,CAAC,YAAa,QAAO,CAAC;AAE1B,UAAM,iBAAiB,MAAM,YAAY,OAAO,KAAK;AACrD,UAAM,UAAU,MAAM,KAAK,cAAc,eAAe,gBAAgB,MAAM,MAAM;AACpF,WAAO,CAAC,GAAG,OAAO;AAAA,EACpB;AAAA,EAEA,MAAM,cAAc,OAAe,MAAc,QAAyD;AACxG,QAAI,CAAC,KAAK,gBAAgB,CAAC,KAAK,cAAe,QAAO,CAAC;AAEvD,UAAM,QAAQ,MAAM,OAAO,OAAO,EAAE,KAAK,OAAK,EAAE,WAAW,CAAC;AAC5D,UAAM,EAAE,MAAM,KAAK,IAAI,MAAM,MAAM,KAAK,EAAE,YAAY,EAAE,IAAI,EAAE,SAAS,EAAE,mBAAmB,KAAK,CAAC;AAElG,UAAM,YAAY,MAAM,KAAK,aAAa,OAAO,MAAM,KAAK,OAAO,KAAK,MAAM;AAC9E,UAAM,UAAU,MAAM,KAAK,cAAc,eAAe,WAAW,MAAM,MAAM;AAC/E,WAAO,CAAC,GAAG,OAAO;AAAA,EACpB;AAAA,EAEA,MAAM,mBAAmB,UAAkB,MAAgD;AACzF,QAAI,CAAC,KAAK,gBAAgB,CAAC,KAAK,qBAAqB,CAAC,KAAK,eAAgB,QAAO;AAElF,UAAM,QAAQ,MAAM,OAAO,OAAO,EAAE,KAAK,OAAK,EAAE,WAAW,CAAC;AAC5D,UAAM,EAAE,MAAM,KAAK,IAAI,MAAM,MAAM,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,SAAS,EAAE,mBAAmB,KAAK,CAAC;AAEjG,UAAM,YAAY,MAAM,KAAK,aAAa,OAAO,MAAM,KAAK,OAAO,KAAK,MAAM;AAC9E,UAAM,SAAS,MAAM,KAAK,eAAe,mBAAmB,QAAQ;AACpE,WAAO,KAAK,kBAAkB,SAAS,UAAU,WAAW,MAAM;AAAA,EACpE;AAAA;AAAA,EAIA,MAAc,oBAAqD;AACjE,QAAI,KAAK,YAAa,QAAO,KAAK;AAElC,QAAI,CAAC,KAAK,KAAK,OAAQ,QAAO;AAE9B,UAAM,UAAW,KAAK,IAAI,YAAY,SAAS,KAAgB;AAC/D,UAAM,cAAc,eAAe,OAAO;AAE1C,QAAI;AACF,YAAM,WAAW,MAAM,KAAK,IAAI,OAAO,OAAO,aAAa,MAAM;AACjE,WAAK,cAAc,IAAI,gBAAgB,KAAK,MAAM;AAClD,YAAM,KAAK,YAAY,KAAK,QAAQ;AACpC,aAAO,KAAK;AAAA,IACd,SAAS,KAAK;AACZ,WAAK,OAAO,KAAK,qCAAqC,GAAG,EAAE;AAC3D,aAAO;AAAA,IACT;AAAA,EACF;AACF;","names":[]}
package/dist/addon.mjs ADDED
@@ -0,0 +1,7 @@
1
+ import {
2
+ SceneIntelligenceAddon
3
+ } from "./chunk-KKV7JX7G.mjs";
4
+ export {
5
+ SceneIntelligenceAddon
6
+ };
7
+ //# sourceMappingURL=addon.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}