@blueharford/scrypted-spatial-awareness 0.6.24 → 0.6.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/main.nodejs.js +1 -1
- package/dist/main.nodejs.js.map +1 -1
- package/dist/plugin.zip +0 -0
- package/out/main.nodejs.js +203 -65
- package/out/main.nodejs.js.map +1 -1
- package/out/plugin.zip +0 -0
- package/package.json +1 -1
- package/src/core/spatial-reasoning.ts +108 -26
- package/src/core/topology-discovery.ts +84 -19
- package/src/core/tracking-engine.ts +42 -19
- package/src/main.ts +2 -2
package/out/plugin.zip
CHANGED
|
Binary file
|
package/package.json
CHANGED
|
@@ -436,26 +436,32 @@ export class SpatialReasoningEngine {
|
|
|
436
436
|
private llmProvider: string | null = null;
|
|
437
437
|
private llmProviderType: LlmProvider = 'unknown';
|
|
438
438
|
|
|
439
|
-
|
|
440
|
-
private
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
439
|
+
// Load balancing for multiple LLMs
|
|
440
|
+
private llmDevices: Array<{
|
|
441
|
+
device: ChatCompletionDevice;
|
|
442
|
+
id: string;
|
|
443
|
+
name: string;
|
|
444
|
+
providerType: LlmProvider;
|
|
445
|
+
lastUsed: number;
|
|
446
|
+
errorCount: number;
|
|
447
|
+
}> = [];
|
|
448
|
+
private llmIndex: number = 0;
|
|
449
|
+
|
|
450
|
+
/** Find ALL LLM devices for load balancing */
|
|
451
|
+
private async findAllLlmDevices(): Promise<void> {
|
|
452
|
+
if (this.llmSearched) return;
|
|
444
453
|
this.llmSearched = true;
|
|
445
454
|
|
|
446
455
|
try {
|
|
447
|
-
// Look for devices with ChatCompletion interface (the correct interface for @scrypted/llm)
|
|
448
456
|
for (const id of Object.keys(systemManager.getSystemState())) {
|
|
449
457
|
const device = systemManager.getDeviceById(id);
|
|
450
458
|
if (!device) continue;
|
|
451
459
|
|
|
452
|
-
// Check if this device has ChatCompletion interface
|
|
453
|
-
// The @scrypted/llm plugin exposes ChatCompletion, not ObjectDetection
|
|
454
460
|
if (device.interfaces?.includes('ChatCompletion')) {
|
|
455
461
|
const deviceName = device.name?.toLowerCase() || '';
|
|
456
462
|
const pluginId = (device as any).pluginId?.toLowerCase() || '';
|
|
457
463
|
|
|
458
|
-
// Identify the provider type for
|
|
464
|
+
// Identify the provider type for image format selection
|
|
459
465
|
let providerType = 'Unknown';
|
|
460
466
|
let providerTypeEnum: LlmProvider = 'unknown';
|
|
461
467
|
|
|
@@ -467,38 +473,104 @@ export class SpatialReasoningEngine {
|
|
|
467
473
|
providerTypeEnum = 'anthropic';
|
|
468
474
|
} else if (deviceName.includes('ollama')) {
|
|
469
475
|
providerType = 'Ollama';
|
|
470
|
-
providerTypeEnum = 'openai';
|
|
476
|
+
providerTypeEnum = 'openai';
|
|
471
477
|
} else if (deviceName.includes('gemini') || deviceName.includes('google')) {
|
|
472
478
|
providerType = 'Google';
|
|
473
|
-
providerTypeEnum = 'openai';
|
|
479
|
+
providerTypeEnum = 'openai';
|
|
474
480
|
} else if (deviceName.includes('llama')) {
|
|
475
481
|
providerType = 'llama.cpp';
|
|
476
|
-
providerTypeEnum = 'openai';
|
|
482
|
+
providerTypeEnum = 'openai';
|
|
477
483
|
} else if (pluginId.includes('@scrypted/llm') || pluginId.includes('llm')) {
|
|
478
484
|
providerType = 'Scrypted LLM';
|
|
479
485
|
providerTypeEnum = 'unknown';
|
|
480
486
|
}
|
|
481
487
|
|
|
482
|
-
this.
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
488
|
+
this.llmDevices.push({
|
|
489
|
+
device: device as unknown as ChatCompletionDevice,
|
|
490
|
+
id,
|
|
491
|
+
name: device.name || id,
|
|
492
|
+
providerType: providerTypeEnum,
|
|
493
|
+
lastUsed: 0,
|
|
494
|
+
errorCount: 0,
|
|
495
|
+
});
|
|
496
|
+
|
|
497
|
+
this.console.log(`[LLM] Found ${providerType}: ${device.name}`);
|
|
490
498
|
}
|
|
491
499
|
}
|
|
492
500
|
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
501
|
+
if (this.llmDevices.length === 0) {
|
|
502
|
+
this.console.warn('[LLM] No ChatCompletion devices found. Install @scrypted/llm for enhanced descriptions.');
|
|
503
|
+
} else {
|
|
504
|
+
this.console.log(`[LLM] Load balancing across ${this.llmDevices.length} LLM device(s)`);
|
|
505
|
+
}
|
|
497
506
|
} catch (e) {
|
|
498
|
-
this.console.error('[LLM] Error searching for LLM
|
|
507
|
+
this.console.error('[LLM] Error searching for LLM devices:', e);
|
|
499
508
|
}
|
|
509
|
+
}
|
|
500
510
|
|
|
501
|
-
|
|
511
|
+
/** Get the next available LLM using round-robin with least-recently-used preference */
|
|
512
|
+
private async findLlmDevice(): Promise<ChatCompletionDevice | null> {
|
|
513
|
+
await this.findAllLlmDevices();
|
|
514
|
+
|
|
515
|
+
if (this.llmDevices.length === 0) return null;
|
|
516
|
+
|
|
517
|
+
// If only one LLM, just use it
|
|
518
|
+
if (this.llmDevices.length === 1) {
|
|
519
|
+
const llm = this.llmDevices[0];
|
|
520
|
+
this.llmDevice = llm.device;
|
|
521
|
+
this.llmProvider = llm.name;
|
|
522
|
+
this.llmProviderType = llm.providerType;
|
|
523
|
+
return llm.device;
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
// Find the LLM with the oldest lastUsed time (least recently used)
|
|
527
|
+
// Also prefer LLMs with fewer errors
|
|
528
|
+
let bestIndex = 0;
|
|
529
|
+
let bestScore = Infinity;
|
|
530
|
+
|
|
531
|
+
for (let i = 0; i < this.llmDevices.length; i++) {
|
|
532
|
+
const llm = this.llmDevices[i];
|
|
533
|
+
// Score = lastUsed time + (errorCount * 60 seconds penalty)
|
|
534
|
+
const score = llm.lastUsed + (llm.errorCount * 60000);
|
|
535
|
+
if (score < bestScore) {
|
|
536
|
+
bestScore = score;
|
|
537
|
+
bestIndex = i;
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
const selected = this.llmDevices[bestIndex];
|
|
542
|
+
this.llmDevice = selected.device;
|
|
543
|
+
this.llmProvider = selected.name;
|
|
544
|
+
this.llmProviderType = selected.providerType;
|
|
545
|
+
|
|
546
|
+
this.console.log(`[LLM] Selected: ${selected.name} (last used ${Math.round((Date.now() - selected.lastUsed) / 1000)}s ago, errors: ${selected.errorCount})`);
|
|
547
|
+
|
|
548
|
+
return selected.device;
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
/** Mark an LLM as used (for load balancing) */
|
|
552
|
+
private markLlmUsed(device: ChatCompletionDevice): void {
|
|
553
|
+
const llm = this.llmDevices.find(l => l.device === device);
|
|
554
|
+
if (llm) {
|
|
555
|
+
llm.lastUsed = Date.now();
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
/** Mark an LLM as having an error (for load balancing - will be deprioritized) */
|
|
560
|
+
private markLlmError(device: ChatCompletionDevice): void {
|
|
561
|
+
const llm = this.llmDevices.find(l => l.device === device);
|
|
562
|
+
if (llm) {
|
|
563
|
+
llm.errorCount++;
|
|
564
|
+
this.console.log(`[LLM] ${llm.name} error count: ${llm.errorCount}`);
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
/** Reset error count for an LLM after successful call */
|
|
569
|
+
private markLlmSuccess(device: ChatCompletionDevice): void {
|
|
570
|
+
const llm = this.llmDevices.find(l => l.device === device);
|
|
571
|
+
if (llm && llm.errorCount > 0) {
|
|
572
|
+
llm.errorCount = Math.max(0, llm.errorCount - 1); // Gradually reduce error count
|
|
573
|
+
}
|
|
502
574
|
}
|
|
503
575
|
|
|
504
576
|
/** Get the current LLM provider name */
|
|
@@ -946,6 +1018,9 @@ export class SpatialReasoningEngine {
|
|
|
946
1018
|
messageContent = prompt;
|
|
947
1019
|
}
|
|
948
1020
|
|
|
1021
|
+
// Mark LLM as used for load balancing
|
|
1022
|
+
this.markLlmUsed(llm);
|
|
1023
|
+
|
|
949
1024
|
// Call LLM using ChatCompletion interface
|
|
950
1025
|
const result = await llm.getChatCompletion({
|
|
951
1026
|
messages: [
|
|
@@ -961,12 +1036,14 @@ export class SpatialReasoningEngine {
|
|
|
961
1036
|
// Extract description from ChatCompletion result
|
|
962
1037
|
const content = result?.choices?.[0]?.message?.content;
|
|
963
1038
|
if (content && typeof content === 'string') {
|
|
1039
|
+
this.markLlmSuccess(llm);
|
|
964
1040
|
return content.trim();
|
|
965
1041
|
}
|
|
966
1042
|
|
|
967
1043
|
return null;
|
|
968
1044
|
} catch (e) {
|
|
969
1045
|
this.console.warn('LLM description generation failed:', e);
|
|
1046
|
+
this.markLlmError(llm);
|
|
970
1047
|
return null;
|
|
971
1048
|
}
|
|
972
1049
|
}
|
|
@@ -1049,6 +1126,9 @@ Examples of good descriptions:
|
|
|
1049
1126
|
|
|
1050
1127
|
Generate ONLY the description, nothing else:`;
|
|
1051
1128
|
|
|
1129
|
+
// Mark LLM as used for load balancing
|
|
1130
|
+
this.markLlmUsed(llm);
|
|
1131
|
+
|
|
1052
1132
|
// Try multimodal format first, fall back to text-only if it fails
|
|
1053
1133
|
let result: any;
|
|
1054
1134
|
let usedVision = false;
|
|
@@ -1101,6 +1181,7 @@ Generate ONLY the description, nothing else:`;
|
|
|
1101
1181
|
const content = result?.choices?.[0]?.message?.content;
|
|
1102
1182
|
if (content && typeof content === 'string') {
|
|
1103
1183
|
this.console.log(`[LLM] Got ${eventType} description (vision=${usedVision}): ${content.trim().substring(0, 50)}...`);
|
|
1184
|
+
this.markLlmSuccess(llm);
|
|
1104
1185
|
return content.trim();
|
|
1105
1186
|
}
|
|
1106
1187
|
|
|
@@ -1108,6 +1189,7 @@ Generate ONLY the description, nothing else:`;
|
|
|
1108
1189
|
return null;
|
|
1109
1190
|
} catch (e) {
|
|
1110
1191
|
this.console.warn(`[LLM] ${eventType} description generation failed:`, e);
|
|
1192
|
+
this.markLlmError(llm);
|
|
1111
1193
|
return null;
|
|
1112
1194
|
}
|
|
1113
1195
|
}
|
|
@@ -254,11 +254,19 @@ export class TopologyDiscoveryEngine {
|
|
|
254
254
|
return this.config.discoveryIntervalHours > 0;
|
|
255
255
|
}
|
|
256
256
|
|
|
257
|
-
|
|
258
|
-
private
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
257
|
+
// Load balancing for multiple LLMs
|
|
258
|
+
private llmDevices: Array<{
|
|
259
|
+
device: ChatCompletionDevice;
|
|
260
|
+
id: string;
|
|
261
|
+
name: string;
|
|
262
|
+
providerType: LlmProvider;
|
|
263
|
+
lastUsed: number;
|
|
264
|
+
errorCount: number;
|
|
265
|
+
}> = [];
|
|
266
|
+
|
|
267
|
+
/** Find ALL LLM devices for load balancing */
|
|
268
|
+
private async findAllLlmDevices(): Promise<void> {
|
|
269
|
+
if (this.llmSearched) return;
|
|
262
270
|
this.llmSearched = true;
|
|
263
271
|
|
|
264
272
|
try {
|
|
@@ -269,32 +277,84 @@ export class TopologyDiscoveryEngine {
|
|
|
269
277
|
if (device.interfaces?.includes('ChatCompletion')) {
|
|
270
278
|
const deviceName = device.name?.toLowerCase() || '';
|
|
271
279
|
|
|
272
|
-
|
|
280
|
+
let providerType: LlmProvider = 'unknown';
|
|
273
281
|
if (deviceName.includes('openai') || deviceName.includes('gpt')) {
|
|
274
|
-
|
|
282
|
+
providerType = 'openai';
|
|
275
283
|
} else if (deviceName.includes('anthropic') || deviceName.includes('claude')) {
|
|
276
|
-
|
|
284
|
+
providerType = 'anthropic';
|
|
277
285
|
} else if (deviceName.includes('ollama') || deviceName.includes('gemini') ||
|
|
278
286
|
deviceName.includes('google') || deviceName.includes('llama')) {
|
|
279
|
-
|
|
280
|
-
this.llmProviderType = 'openai';
|
|
281
|
-
} else {
|
|
282
|
-
this.llmProviderType = 'unknown';
|
|
287
|
+
providerType = 'openai';
|
|
283
288
|
}
|
|
284
289
|
|
|
285
|
-
this.
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
290
|
+
this.llmDevices.push({
|
|
291
|
+
device: device as unknown as ChatCompletionDevice,
|
|
292
|
+
id,
|
|
293
|
+
name: device.name || id,
|
|
294
|
+
providerType,
|
|
295
|
+
lastUsed: 0,
|
|
296
|
+
errorCount: 0,
|
|
297
|
+
});
|
|
298
|
+
|
|
299
|
+
this.console.log(`[Discovery] Found LLM: ${device.name}`);
|
|
289
300
|
}
|
|
290
301
|
}
|
|
291
302
|
|
|
292
|
-
this.
|
|
303
|
+
if (this.llmDevices.length === 0) {
|
|
304
|
+
this.console.warn('[Discovery] No ChatCompletion devices found. Vision-based discovery unavailable.');
|
|
305
|
+
} else {
|
|
306
|
+
this.console.log(`[Discovery] Load balancing across ${this.llmDevices.length} LLM device(s)`);
|
|
307
|
+
}
|
|
293
308
|
} catch (e) {
|
|
294
|
-
this.console.error('[Discovery] Error finding LLM
|
|
309
|
+
this.console.error('[Discovery] Error finding LLM devices:', e);
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
/** Find LLM device with ChatCompletion interface - uses load balancing */
|
|
314
|
+
private async findLlmDevice(): Promise<ChatCompletionDevice | null> {
|
|
315
|
+
await this.findAllLlmDevices();
|
|
316
|
+
|
|
317
|
+
if (this.llmDevices.length === 0) return null;
|
|
318
|
+
|
|
319
|
+
// If only one LLM, just use it
|
|
320
|
+
if (this.llmDevices.length === 1) {
|
|
321
|
+
const llm = this.llmDevices[0];
|
|
322
|
+
this.llmDevice = llm.device;
|
|
323
|
+
this.llmProviderType = llm.providerType;
|
|
324
|
+
return llm.device;
|
|
295
325
|
}
|
|
296
326
|
|
|
297
|
-
|
|
327
|
+
// Find the LLM with oldest lastUsed time (least recently used)
|
|
328
|
+
let bestIndex = 0;
|
|
329
|
+
let bestScore = Infinity;
|
|
330
|
+
|
|
331
|
+
for (let i = 0; i < this.llmDevices.length; i++) {
|
|
332
|
+
const llm = this.llmDevices[i];
|
|
333
|
+
const score = llm.lastUsed + (llm.errorCount * 60000);
|
|
334
|
+
if (score < bestScore) {
|
|
335
|
+
bestScore = score;
|
|
336
|
+
bestIndex = i;
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
const selected = this.llmDevices[bestIndex];
|
|
341
|
+
this.llmDevice = selected.device;
|
|
342
|
+
this.llmProviderType = selected.providerType;
|
|
343
|
+
|
|
344
|
+
// Mark as used
|
|
345
|
+
selected.lastUsed = Date.now();
|
|
346
|
+
|
|
347
|
+
this.console.log(`[Discovery] Selected LLM: ${selected.name}`);
|
|
348
|
+
return selected.device;
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
/** Mark an LLM as having an error */
|
|
352
|
+
private markLlmError(device: ChatCompletionDevice): void {
|
|
353
|
+
const llm = this.llmDevices.find(l => l.device === device);
|
|
354
|
+
if (llm) {
|
|
355
|
+
llm.errorCount++;
|
|
356
|
+
this.console.log(`[Discovery] ${llm.name} error count: ${llm.errorCount}`);
|
|
357
|
+
}
|
|
298
358
|
}
|
|
299
359
|
|
|
300
360
|
/** Get camera snapshot as ImageData */
|
|
@@ -495,6 +555,11 @@ Use the mount height to help estimate distances - objects at ground level will a
|
|
|
495
555
|
|
|
496
556
|
// All formats failed
|
|
497
557
|
if (lastError) {
|
|
558
|
+
// Track error for load balancing
|
|
559
|
+
if (llm) {
|
|
560
|
+
this.markLlmError(llm);
|
|
561
|
+
}
|
|
562
|
+
|
|
498
563
|
const errorStr = String(lastError);
|
|
499
564
|
if (isVisionFormatError(lastError)) {
|
|
500
565
|
analysis.error = 'Vision/image analysis failed with all formats. Ensure you have a vision-capable model (e.g., gpt-4o, gpt-4-turbo, claude-3-sonnet) configured and the @scrypted/llm plugin supports vision.';
|
|
@@ -324,6 +324,16 @@ export class TrackingEngine {
|
|
|
324
324
|
this.lastLlmCallTime = Date.now();
|
|
325
325
|
}
|
|
326
326
|
|
|
327
|
+
/** Check and record LLM call - returns false if rate limited */
|
|
328
|
+
private tryLlmCall(): boolean {
|
|
329
|
+
if (!this.isLlmCallAllowed()) {
|
|
330
|
+
this.console.log('[LLM] Rate limited, skipping LLM call');
|
|
331
|
+
return false;
|
|
332
|
+
}
|
|
333
|
+
this.recordLlmCall();
|
|
334
|
+
return true;
|
|
335
|
+
}
|
|
336
|
+
|
|
327
337
|
/** Get spatial reasoning result for movement (uses RAG + LLM) with debouncing and fallback */
|
|
328
338
|
private async getSpatialDescription(
|
|
329
339
|
tracked: TrackedObject,
|
|
@@ -337,8 +347,8 @@ export class TrackingEngine {
|
|
|
337
347
|
|
|
338
348
|
try {
|
|
339
349
|
// Check rate limiting - if not allowed, return null to use basic description
|
|
340
|
-
if (!this.
|
|
341
|
-
this.console.log('LLM rate-limited, using basic notification');
|
|
350
|
+
if (!this.tryLlmCall()) {
|
|
351
|
+
this.console.log('[Movement] LLM rate-limited, using basic notification');
|
|
342
352
|
return null;
|
|
343
353
|
}
|
|
344
354
|
|
|
@@ -351,9 +361,6 @@ export class TrackingEngine {
|
|
|
351
361
|
}
|
|
352
362
|
}
|
|
353
363
|
|
|
354
|
-
// Record that we're making an LLM call
|
|
355
|
-
this.recordLlmCall();
|
|
356
|
-
|
|
357
364
|
// Use spatial reasoning engine for rich context-aware description
|
|
358
365
|
// Apply timeout if fallback is enabled
|
|
359
366
|
let result: SpatialReasoningResult;
|
|
@@ -567,16 +574,23 @@ export class TrackingEngine {
|
|
|
567
574
|
spatialResult = await pendingDescription;
|
|
568
575
|
this.console.log(`[Entry Alert] Prefetch result: "${spatialResult.description.substring(0, 60)}...", usedLlm=${spatialResult.usedLlm}`);
|
|
569
576
|
} catch (e) {
|
|
570
|
-
this.console.warn(`[Entry Alert] Prefetch failed,
|
|
577
|
+
this.console.warn(`[Entry Alert] Prefetch failed, using basic description: ${e}`);
|
|
578
|
+
// Don't make another LLM call - use basic description (no mediaObject = no LLM)
|
|
571
579
|
spatialResult = await this.spatialReasoning.generateEntryDescription(tracked, sighting.cameraId);
|
|
572
580
|
}
|
|
573
581
|
this.pendingDescriptions.delete(globalId);
|
|
574
582
|
} else {
|
|
575
|
-
//
|
|
576
|
-
this.
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
583
|
+
// No prefetch available - only call LLM if rate limit allows
|
|
584
|
+
if (this.tryLlmCall()) {
|
|
585
|
+
this.console.log(`[Entry Alert] No prefetch, generating with LLM`);
|
|
586
|
+
const mediaObject = this.snapshotCache.get(globalId);
|
|
587
|
+
spatialResult = await this.spatialReasoning.generateEntryDescription(tracked, sighting.cameraId, mediaObject);
|
|
588
|
+
this.console.log(`[Entry Alert] Got description: "${spatialResult.description.substring(0, 60)}...", usedLlm=${spatialResult.usedLlm}`);
|
|
589
|
+
} else {
|
|
590
|
+
// Rate limited - use basic description (no LLM)
|
|
591
|
+
this.console.log(`[Entry Alert] Rate limited, using basic description`);
|
|
592
|
+
spatialResult = await this.spatialReasoning.generateEntryDescription(tracked, sighting.cameraId);
|
|
593
|
+
}
|
|
580
594
|
}
|
|
581
595
|
|
|
582
596
|
// Always use movement alert type for smart notifications with LLM descriptions
|
|
@@ -611,9 +625,9 @@ export class TrackingEngine {
|
|
|
611
625
|
this.snapshotCache.set(globalId, mediaObject);
|
|
612
626
|
this.console.log(`[Snapshot] Cached snapshot for ${globalId.slice(0, 8)} from ${cameraId}`);
|
|
613
627
|
|
|
614
|
-
// Start LLM analysis immediately in parallel (don't await)
|
|
628
|
+
// Start LLM analysis immediately in parallel (don't await) - but respect rate limits
|
|
615
629
|
const tracked = this.state.getObject(globalId);
|
|
616
|
-
if (tracked && this.config.useLlmDescriptions) {
|
|
630
|
+
if (tracked && this.config.useLlmDescriptions && this.tryLlmCall()) {
|
|
617
631
|
this.console.log(`[LLM Prefetch] Starting ${eventType} analysis for ${globalId.slice(0, 8)}`);
|
|
618
632
|
const descriptionPromise = eventType === 'exit'
|
|
619
633
|
? this.spatialReasoning.generateExitDescription(tracked, cameraId, mediaObject)
|
|
@@ -627,6 +641,8 @@ export class TrackingEngine {
|
|
|
627
641
|
}).catch(e => {
|
|
628
642
|
this.console.warn(`[LLM Prefetch] Failed for ${globalId.slice(0, 8)}: ${e}`);
|
|
629
643
|
});
|
|
644
|
+
} else if (tracked && this.config.useLlmDescriptions) {
|
|
645
|
+
this.console.log(`[LLM Prefetch] Skipped for ${globalId.slice(0, 8)} - rate limited`);
|
|
630
646
|
}
|
|
631
647
|
}
|
|
632
648
|
}
|
|
@@ -706,16 +722,23 @@ export class TrackingEngine {
|
|
|
706
722
|
spatialResult = await pendingDescription;
|
|
707
723
|
this.console.log(`[Exit Alert] Prefetch result: "${spatialResult.description.substring(0, 60)}...", usedLlm=${spatialResult.usedLlm}`);
|
|
708
724
|
} catch (e) {
|
|
709
|
-
this.console.warn(`[Exit Alert] Prefetch failed,
|
|
725
|
+
this.console.warn(`[Exit Alert] Prefetch failed, using basic description: ${e}`);
|
|
726
|
+
// Don't make another LLM call - use basic description
|
|
710
727
|
spatialResult = await this.spatialReasoning.generateExitDescription(current, sighting.cameraId);
|
|
711
728
|
}
|
|
712
729
|
this.pendingDescriptions.delete(tracked.globalId);
|
|
713
730
|
} else {
|
|
714
|
-
//
|
|
715
|
-
this.
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
731
|
+
// No prefetch available - only call LLM if rate limit allows
|
|
732
|
+
if (this.tryLlmCall()) {
|
|
733
|
+
this.console.log(`[Exit Alert] No prefetch, generating with LLM`);
|
|
734
|
+
const mediaObject = this.snapshotCache.get(tracked.globalId);
|
|
735
|
+
spatialResult = await this.spatialReasoning.generateExitDescription(current, sighting.cameraId, mediaObject);
|
|
736
|
+
this.console.log(`[Exit Alert] Got description: "${spatialResult.description.substring(0, 60)}...", usedLlm=${spatialResult.usedLlm}`);
|
|
737
|
+
} else {
|
|
738
|
+
// Rate limited - use basic description (no LLM)
|
|
739
|
+
this.console.log(`[Exit Alert] Rate limited, using basic description`);
|
|
740
|
+
spatialResult = await this.spatialReasoning.generateExitDescription(current, sighting.cameraId);
|
|
741
|
+
}
|
|
719
742
|
}
|
|
720
743
|
|
|
721
744
|
// Use movement alert for exit too - smart notifications with LLM descriptions
|
package/src/main.ts
CHANGED
|
@@ -130,8 +130,8 @@ export class SpatialAwarenessPlugin extends ScryptedDeviceBase
|
|
|
130
130
|
llmDebounceInterval: {
|
|
131
131
|
title: 'LLM Rate Limit (seconds)',
|
|
132
132
|
type: 'number',
|
|
133
|
-
defaultValue:
|
|
134
|
-
description: 'Minimum time between LLM calls to prevent API
|
|
133
|
+
defaultValue: 30,
|
|
134
|
+
description: 'Minimum time between LLM calls to prevent API rate limiting. Increase if you get rate limit errors. (0 = no limit)',
|
|
135
135
|
group: 'AI & Spatial Reasoning',
|
|
136
136
|
},
|
|
137
137
|
llmFallbackEnabled: {
|