@blueharford/scrypted-spatial-awareness 0.6.25 → 0.6.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/plugin.zip CHANGED
Binary file
@@ -35407,69 +35407,142 @@ class SpatialReasoningEngine {
35407
35407
  llmSearched = false;
35408
35408
  llmProvider = null;
35409
35409
  llmProviderType = 'unknown';
35410
- /** Find or initialize LLM device - looks for ChatCompletion interface from @scrypted/llm plugin */
35411
- async findLlmDevice() {
35412
- if (this.llmDevice)
35413
- return this.llmDevice;
35410
+ // Load balancing for multiple LLMs
35411
+ llmDevices = [];
35412
+ llmIndex = 0;
35413
+ /** Find LLM devices - uses configured device or auto-discovers all for load balancing */
35414
+ async findAllLlmDevices() {
35414
35415
  if (this.llmSearched)
35415
- return null; // Already searched and found nothing
35416
+ return;
35416
35417
  this.llmSearched = true;
35417
35418
  try {
35418
- // Look for devices with ChatCompletion interface (the correct interface for @scrypted/llm)
35419
+ // If a specific LLM device is configured, use only that one
35420
+ if (this.config.llmDeviceId) {
35421
+ const device = systemManager.getDeviceById(this.config.llmDeviceId);
35422
+ if (device?.interfaces?.includes('ChatCompletion')) {
35423
+ const providerTypeEnum = this.detectProviderType(device);
35424
+ this.llmDevices.push({
35425
+ device: device,
35426
+ id: this.config.llmDeviceId,
35427
+ name: device.name || this.config.llmDeviceId,
35428
+ providerType: providerTypeEnum,
35429
+ lastUsed: 0,
35430
+ errorCount: 0,
35431
+ });
35432
+ this.console.log(`[LLM] Using configured LLM: ${device.name}`);
35433
+ return;
35434
+ }
35435
+ else {
35436
+ this.console.warn(`[LLM] Configured device ${this.config.llmDeviceId} not found or doesn't support ChatCompletion`);
35437
+ }
35438
+ }
35439
+ // Auto-discover all LLM devices for load balancing
35419
35440
  for (const id of Object.keys(systemManager.getSystemState())) {
35420
35441
  const device = systemManager.getDeviceById(id);
35421
35442
  if (!device)
35422
35443
  continue;
35423
- // Check if this device has ChatCompletion interface
35424
- // The @scrypted/llm plugin exposes ChatCompletion, not ObjectDetection
35425
35444
  if (device.interfaces?.includes('ChatCompletion')) {
35426
- const deviceName = device.name?.toLowerCase() || '';
35427
- const pluginId = device.pluginId?.toLowerCase() || '';
35428
- // Identify the provider type for logging and image format selection
35429
- let providerType = 'Unknown';
35430
- let providerTypeEnum = 'unknown';
35431
- if (deviceName.includes('openai') || deviceName.includes('gpt')) {
35432
- providerType = 'OpenAI';
35433
- providerTypeEnum = 'openai';
35434
- }
35435
- else if (deviceName.includes('anthropic') || deviceName.includes('claude')) {
35436
- providerType = 'Anthropic';
35437
- providerTypeEnum = 'anthropic';
35438
- }
35439
- else if (deviceName.includes('ollama')) {
35440
- providerType = 'Ollama';
35441
- providerTypeEnum = 'openai'; // Ollama uses OpenAI-compatible format
35442
- }
35443
- else if (deviceName.includes('gemini') || deviceName.includes('google')) {
35444
- providerType = 'Google';
35445
- providerTypeEnum = 'openai'; // Google uses OpenAI-compatible format
35446
- }
35447
- else if (deviceName.includes('llama')) {
35448
- providerType = 'llama.cpp';
35449
- providerTypeEnum = 'openai'; // llama.cpp uses OpenAI-compatible format
35450
- }
35451
- else if (pluginId.includes('@scrypted/llm') || pluginId.includes('llm')) {
35452
- providerType = 'Scrypted LLM';
35453
- providerTypeEnum = 'unknown';
35454
- }
35455
- this.llmDevice = device;
35456
- this.llmProvider = `${providerType} (${device.name})`;
35457
- this.llmProviderType = providerTypeEnum;
35458
- this.console.log(`[LLM] Connected to ${providerType}: ${device.name}`);
35459
- this.console.log(`[LLM] Plugin: ${pluginId || 'N/A'}`);
35460
- this.console.log(`[LLM] Image format: ${providerTypeEnum}`);
35461
- this.console.log(`[LLM] Interfaces: ${device.interfaces?.join(', ')}`);
35462
- return this.llmDevice;
35445
+ const providerTypeEnum = this.detectProviderType(device);
35446
+ this.llmDevices.push({
35447
+ device: device,
35448
+ id,
35449
+ name: device.name || id,
35450
+ providerType: providerTypeEnum,
35451
+ lastUsed: 0,
35452
+ errorCount: 0,
35453
+ });
35454
+ this.console.log(`[LLM] Found: ${device.name}`);
35463
35455
  }
35464
35456
  }
35465
- // If we get here, no LLM plugin found
35466
- this.console.warn('[LLM] No ChatCompletion device found. Install @scrypted/llm for enhanced descriptions.');
35467
- this.console.warn('[LLM] Falling back to rule-based descriptions using topology data.');
35457
+ if (this.llmDevices.length === 0) {
35458
+ this.console.warn('[LLM] No ChatCompletion devices found. Install @scrypted/llm for enhanced descriptions.');
35459
+ }
35460
+ else {
35461
+ this.console.log(`[LLM] Load balancing across ${this.llmDevices.length} LLM device(s)`);
35462
+ }
35468
35463
  }
35469
35464
  catch (e) {
35470
- this.console.error('[LLM] Error searching for LLM device:', e);
35465
+ this.console.error('[LLM] Error searching for LLM devices:', e);
35466
+ }
35467
+ }
35468
+ /** Detect the provider type from device name */
35469
+ detectProviderType(device) {
35470
+ const deviceName = device.name?.toLowerCase() || '';
35471
+ const pluginId = device.pluginId?.toLowerCase() || '';
35472
+ if (deviceName.includes('openai') || deviceName.includes('gpt')) {
35473
+ return 'openai';
35474
+ }
35475
+ else if (deviceName.includes('anthropic') || deviceName.includes('claude')) {
35476
+ return 'anthropic';
35477
+ }
35478
+ else if (deviceName.includes('ollama')) {
35479
+ return 'openai'; // Ollama uses OpenAI-compatible format
35480
+ }
35481
+ else if (deviceName.includes('gemini') || deviceName.includes('google')) {
35482
+ return 'openai'; // Google uses OpenAI-compatible format
35483
+ }
35484
+ else if (deviceName.includes('llama')) {
35485
+ return 'openai'; // llama.cpp uses OpenAI-compatible format
35486
+ }
35487
+ else if (pluginId.includes('@scrypted/llm') || pluginId.includes('llm')) {
35488
+ return 'unknown';
35489
+ }
35490
+ return 'unknown';
35491
+ }
35492
+ /** Get the next available LLM using round-robin with least-recently-used preference */
35493
+ async findLlmDevice() {
35494
+ await this.findAllLlmDevices();
35495
+ if (this.llmDevices.length === 0)
35496
+ return null;
35497
+ // If only one LLM, just use it
35498
+ if (this.llmDevices.length === 1) {
35499
+ const llm = this.llmDevices[0];
35500
+ this.llmDevice = llm.device;
35501
+ this.llmProvider = llm.name;
35502
+ this.llmProviderType = llm.providerType;
35503
+ return llm.device;
35504
+ }
35505
+ // Find the LLM with the oldest lastUsed time (least recently used)
35506
+ // Also prefer LLMs with fewer errors
35507
+ let bestIndex = 0;
35508
+ let bestScore = Infinity;
35509
+ for (let i = 0; i < this.llmDevices.length; i++) {
35510
+ const llm = this.llmDevices[i];
35511
+ // Score = lastUsed time + (errorCount * 60 seconds penalty)
35512
+ const score = llm.lastUsed + (llm.errorCount * 60000);
35513
+ if (score < bestScore) {
35514
+ bestScore = score;
35515
+ bestIndex = i;
35516
+ }
35517
+ }
35518
+ const selected = this.llmDevices[bestIndex];
35519
+ this.llmDevice = selected.device;
35520
+ this.llmProvider = selected.name;
35521
+ this.llmProviderType = selected.providerType;
35522
+ this.console.log(`[LLM] Selected: ${selected.name} (last used ${Math.round((Date.now() - selected.lastUsed) / 1000)}s ago, errors: ${selected.errorCount})`);
35523
+ return selected.device;
35524
+ }
35525
+ /** Mark an LLM as used (for load balancing) */
35526
+ markLlmUsed(device) {
35527
+ const llm = this.llmDevices.find(l => l.device === device);
35528
+ if (llm) {
35529
+ llm.lastUsed = Date.now();
35530
+ }
35531
+ }
35532
+ /** Mark an LLM as having an error (for load balancing - will be deprioritized) */
35533
+ markLlmError(device) {
35534
+ const llm = this.llmDevices.find(l => l.device === device);
35535
+ if (llm) {
35536
+ llm.errorCount++;
35537
+ this.console.log(`[LLM] ${llm.name} error count: ${llm.errorCount}`);
35538
+ }
35539
+ }
35540
+ /** Reset error count for an LLM after successful call */
35541
+ markLlmSuccess(device) {
35542
+ const llm = this.llmDevices.find(l => l.device === device);
35543
+ if (llm && llm.errorCount > 0) {
35544
+ llm.errorCount = Math.max(0, llm.errorCount - 1); // Gradually reduce error count
35471
35545
  }
35472
- return null;
35473
35546
  }
35474
35547
  /** Get the current LLM provider name */
35475
35548
  getLlmProvider() {
@@ -35804,6 +35877,8 @@ class SpatialReasoningEngine {
35804
35877
  // Fallback to text-only if image conversion failed
35805
35878
  messageContent = prompt;
35806
35879
  }
35880
+ // Mark LLM as used for load balancing
35881
+ this.markLlmUsed(llm);
35807
35882
  // Call LLM using ChatCompletion interface
35808
35883
  const result = await llm.getChatCompletion({
35809
35884
  messages: [
@@ -35818,12 +35893,14 @@ class SpatialReasoningEngine {
35818
35893
  // Extract description from ChatCompletion result
35819
35894
  const content = result?.choices?.[0]?.message?.content;
35820
35895
  if (content && typeof content === 'string') {
35896
+ this.markLlmSuccess(llm);
35821
35897
  return content.trim();
35822
35898
  }
35823
35899
  return null;
35824
35900
  }
35825
35901
  catch (e) {
35826
35902
  this.console.warn('LLM description generation failed:', e);
35903
+ this.markLlmError(llm);
35827
35904
  return null;
35828
35905
  }
35829
35906
  }
@@ -35892,6 +35969,8 @@ Examples of good descriptions:
35892
35969
  - "Landscaper with leaf blower heading to work truck"
35893
35970
 
35894
35971
  Generate ONLY the description, nothing else:`;
35972
+ // Mark LLM as used for load balancing
35973
+ this.markLlmUsed(llm);
35895
35974
  // Try multimodal format first, fall back to text-only if it fails
35896
35975
  let result;
35897
35976
  let usedVision = false;
@@ -35942,6 +36021,7 @@ Generate ONLY the description, nothing else:`;
35942
36021
  const content = result?.choices?.[0]?.message?.content;
35943
36022
  if (content && typeof content === 'string') {
35944
36023
  this.console.log(`[LLM] Got ${eventType} description (vision=${usedVision}): ${content.trim().substring(0, 50)}...`);
36024
+ this.markLlmSuccess(llm);
35945
36025
  return content.trim();
35946
36026
  }
35947
36027
  this.console.warn(`[LLM] No content in response for ${eventType}`);
@@ -35949,6 +36029,7 @@ Generate ONLY the description, nothing else:`;
35949
36029
  }
35950
36030
  catch (e) {
35951
36031
  this.console.warn(`[LLM] ${eventType} description generation failed:`, e);
36032
+ this.markLlmError(llm);
35952
36033
  return null;
35953
36034
  }
35954
36035
  }
@@ -36386,12 +36467,12 @@ class TopologyDiscoveryEngine {
36386
36467
  isEnabled() {
36387
36468
  return this.config.discoveryIntervalHours > 0;
36388
36469
  }
36389
- /** Find LLM device with ChatCompletion interface */
36390
- async findLlmDevice() {
36391
- if (this.llmDevice)
36392
- return this.llmDevice;
36470
+ // Load balancing for multiple LLMs
36471
+ llmDevices = [];
36472
+ /** Find ALL LLM devices for load balancing */
36473
+ async findAllLlmDevices() {
36393
36474
  if (this.llmSearched)
36394
- return null;
36475
+ return;
36395
36476
  this.llmSearched = true;
36396
36477
  try {
36397
36478
  for (const id of Object.keys(systemManager.getSystemState())) {
@@ -36400,33 +36481,77 @@ class TopologyDiscoveryEngine {
36400
36481
  continue;
36401
36482
  if (device.interfaces?.includes('ChatCompletion')) {
36402
36483
  const deviceName = device.name?.toLowerCase() || '';
36403
- // Detect provider type for image format selection
36484
+ let providerType = 'unknown';
36404
36485
  if (deviceName.includes('openai') || deviceName.includes('gpt')) {
36405
- this.llmProviderType = 'openai';
36486
+ providerType = 'openai';
36406
36487
  }
36407
36488
  else if (deviceName.includes('anthropic') || deviceName.includes('claude')) {
36408
- this.llmProviderType = 'anthropic';
36489
+ providerType = 'anthropic';
36409
36490
  }
36410
36491
  else if (deviceName.includes('ollama') || deviceName.includes('gemini') ||
36411
36492
  deviceName.includes('google') || deviceName.includes('llama')) {
36412
- // These providers use OpenAI-compatible format
36413
- this.llmProviderType = 'openai';
36414
- }
36415
- else {
36416
- this.llmProviderType = 'unknown';
36493
+ providerType = 'openai';
36417
36494
  }
36418
- this.llmDevice = device;
36419
- this.console.log(`[Discovery] Connected to LLM: ${device.name}`);
36420
- this.console.log(`[Discovery] Image format: ${this.llmProviderType}`);
36421
- return this.llmDevice;
36495
+ this.llmDevices.push({
36496
+ device: device,
36497
+ id,
36498
+ name: device.name || id,
36499
+ providerType,
36500
+ lastUsed: 0,
36501
+ errorCount: 0,
36502
+ });
36503
+ this.console.log(`[Discovery] Found LLM: ${device.name}`);
36422
36504
  }
36423
36505
  }
36424
- this.console.warn('[Discovery] No ChatCompletion device found. Vision-based discovery unavailable.');
36506
+ if (this.llmDevices.length === 0) {
36507
+ this.console.warn('[Discovery] No ChatCompletion devices found. Vision-based discovery unavailable.');
36508
+ }
36509
+ else {
36510
+ this.console.log(`[Discovery] Load balancing across ${this.llmDevices.length} LLM device(s)`);
36511
+ }
36425
36512
  }
36426
36513
  catch (e) {
36427
- this.console.error('[Discovery] Error finding LLM device:', e);
36514
+ this.console.error('[Discovery] Error finding LLM devices:', e);
36515
+ }
36516
+ }
36517
+ /** Find LLM device with ChatCompletion interface - uses load balancing */
36518
+ async findLlmDevice() {
36519
+ await this.findAllLlmDevices();
36520
+ if (this.llmDevices.length === 0)
36521
+ return null;
36522
+ // If only one LLM, just use it
36523
+ if (this.llmDevices.length === 1) {
36524
+ const llm = this.llmDevices[0];
36525
+ this.llmDevice = llm.device;
36526
+ this.llmProviderType = llm.providerType;
36527
+ return llm.device;
36528
+ }
36529
+ // Find the LLM with oldest lastUsed time (least recently used)
36530
+ let bestIndex = 0;
36531
+ let bestScore = Infinity;
36532
+ for (let i = 0; i < this.llmDevices.length; i++) {
36533
+ const llm = this.llmDevices[i];
36534
+ const score = llm.lastUsed + (llm.errorCount * 60000);
36535
+ if (score < bestScore) {
36536
+ bestScore = score;
36537
+ bestIndex = i;
36538
+ }
36539
+ }
36540
+ const selected = this.llmDevices[bestIndex];
36541
+ this.llmDevice = selected.device;
36542
+ this.llmProviderType = selected.providerType;
36543
+ // Mark as used
36544
+ selected.lastUsed = Date.now();
36545
+ this.console.log(`[Discovery] Selected LLM: ${selected.name}`);
36546
+ return selected.device;
36547
+ }
36548
+ /** Mark an LLM as having an error */
36549
+ markLlmError(device) {
36550
+ const llm = this.llmDevices.find(l => l.device === device);
36551
+ if (llm) {
36552
+ llm.errorCount++;
36553
+ this.console.log(`[Discovery] ${llm.name} error count: ${llm.errorCount}`);
36428
36554
  }
36429
- return null;
36430
36555
  }
36431
36556
  /** Get camera snapshot as ImageData */
36432
36557
  async getCameraSnapshot(cameraId) {
@@ -36602,6 +36727,10 @@ Use the mount height to help estimate distances - objects at ground level will a
36602
36727
  }
36603
36728
  // All formats failed
36604
36729
  if (lastError) {
36730
+ // Track error for load balancing
36731
+ if (llm) {
36732
+ this.markLlmError(llm);
36733
+ }
36605
36734
  const errorStr = String(lastError);
36606
36735
  if ((0, spatial_reasoning_1.isVisionFormatError)(lastError)) {
36607
36736
  analysis.error = 'Vision/image analysis failed with all formats. Ensure you have a vision-capable model (e.g., gpt-4o, gpt-4-turbo, claude-3-sonnet) configured and the @scrypted/llm plugin supports vision.';
@@ -37214,6 +37343,7 @@ class TrackingEngine {
37214
37343
  // Initialize spatial reasoning engine
37215
37344
  const spatialConfig = {
37216
37345
  enableLlm: config.useLlmDescriptions,
37346
+ llmDeviceId: config.llmDeviceId,
37217
37347
  enableLandmarkLearning: config.enableLandmarkLearning ?? true,
37218
37348
  landmarkConfidenceThreshold: config.landmarkConfidenceThreshold ?? 0.7,
37219
37349
  contextCacheTtl: 60000, // 1 minute cache
@@ -39327,20 +39457,21 @@ class SpatialAwarenessPlugin extends sdk_1.ScryptedDeviceBase {
39327
39457
  defaultValue: 'scrypted/spatial-awareness',
39328
39458
  group: 'MQTT Integration',
39329
39459
  },
39330
- // Alert Settings
39331
- enableAlerts: {
39332
- title: 'Enable Alerts',
39333
- type: 'boolean',
39334
- defaultValue: true,
39335
- group: 'Alerts',
39460
+ // Integrations
39461
+ llmDevice: {
39462
+ title: 'LLM Provider',
39463
+ type: 'device',
39464
+ deviceFilter: `interfaces.includes('ChatCompletion')`,
39465
+ description: 'Select the LLM plugin to use for smart descriptions (e.g., OpenAI, Anthropic, Ollama)',
39466
+ group: 'Integrations',
39336
39467
  },
39337
39468
  defaultNotifiers: {
39338
- title: 'Notifiers',
39469
+ title: 'Notification Service',
39339
39470
  type: 'device',
39340
39471
  multiple: true,
39341
39472
  deviceFilter: `interfaces.includes('${sdk_1.ScryptedInterface.Notifier}')`,
39342
- description: 'Select one or more notifiers to receive alerts',
39343
- group: 'Alerts',
39473
+ description: 'Select one or more notifiers to receive alerts (e.g., Pushover, Home Assistant)',
39474
+ group: 'Integrations',
39344
39475
  },
39345
39476
  // Tracked Cameras
39346
39477
  trackedCameras: {
@@ -39351,12 +39482,6 @@ class SpatialAwarenessPlugin extends sdk_1.ScryptedDeviceBase {
39351
39482
  group: 'Cameras',
39352
39483
  description: 'Select cameras with object detection to track',
39353
39484
  },
39354
- // Alert Rules (stored as JSON)
39355
- alertRules: {
39356
- title: 'Alert Rules',
39357
- type: 'string',
39358
- hide: true,
39359
- },
39360
39485
  });
39361
39486
  constructor(nativeId) {
39362
39487
  super(nativeId);
@@ -39443,7 +39568,8 @@ class SpatialAwarenessPlugin extends sdk_1.ScryptedDeviceBase {
39443
39568
  loiteringThreshold: (this.storageSettings.values.loiteringThreshold || 3) * 1000,
39444
39569
  objectAlertCooldown: (this.storageSettings.values.objectAlertCooldown || 30) * 1000,
39445
39570
  useLlmDescriptions: this.storageSettings.values.useLlmDescriptions ?? true,
39446
- llmDebounceInterval: (this.storageSettings.values.llmDebounceInterval || 10) * 1000,
39571
+ llmDeviceId: this.storageSettings.values.llmDevice || undefined,
39572
+ llmDebounceInterval: (this.storageSettings.values.llmDebounceInterval || 30) * 1000,
39447
39573
  llmFallbackEnabled: this.storageSettings.values.llmFallbackEnabled ?? true,
39448
39574
  llmFallbackTimeout: (this.storageSettings.values.llmFallbackTimeout || 3) * 1000,
39449
39575
  enableTransitTimeLearning: this.storageSettings.values.enableTransitTimeLearning ?? true,
@@ -39736,84 +39862,16 @@ class SpatialAwarenessPlugin extends sdk_1.ScryptedDeviceBase {
39736
39862
  }
39737
39863
  // ==================== 5. Tracking ====================
39738
39864
  addGroup('Tracking');
39739
- // ==================== 6. AI & Spatial Reasoning ====================
39865
+ // ==================== 6. Integrations ====================
39866
+ addGroup('Integrations');
39867
+ // ==================== 7. AI & Spatial Reasoning ====================
39740
39868
  addGroup('AI & Spatial Reasoning');
39741
- // ==================== 7. Auto-Topology Discovery ====================
39869
+ // ==================== 8. Auto-Topology Discovery ====================
39742
39870
  addGroup('Auto-Topology Discovery');
39743
- // ==================== 8. Alerts ====================
39744
- addGroup('Alerts');
39745
- // Add alert rules configuration UI
39746
- const alertRules = this.alertManager.getRules();
39747
- const rulesHtml = this.generateAlertRulesHtml(alertRules);
39748
- settings.push({
39749
- key: 'alertRulesEditor',
39750
- title: 'Alert Rules',
39751
- type: 'html',
39752
- value: rulesHtml,
39753
- group: 'Alerts',
39754
- });
39755
39871
  // ==================== 9. MQTT Integration ====================
39756
39872
  addGroup('MQTT Integration');
39757
39873
  return settings;
39758
39874
  }
39759
- generateAlertRulesHtml(rules) {
39760
- const ruleRows = rules.map(rule => `
39761
- <tr data-rule-id="${rule.id}">
39762
- <td style="padding:8px;border-bottom:1px solid #333;">
39763
- <input type="checkbox" ${rule.enabled ? 'checked' : ''}
39764
- onchange="(function(el){var rules=JSON.parse(localStorage.getItem('sa-temp-rules')||'[]');var r=rules.find(x=>x.id==='${rule.id}');if(r)r.enabled=el.checked;localStorage.setItem('sa-temp-rules',JSON.stringify(rules));})(this)" />
39765
- </td>
39766
- <td style="padding:8px;border-bottom:1px solid #333;color:#fff;">${rule.name}</td>
39767
- <td style="padding:8px;border-bottom:1px solid #333;color:#888;">${rule.type}</td>
39768
- <td style="padding:8px;border-bottom:1px solid #333;">
39769
- <span style="padding:2px 8px;border-radius:4px;font-size:12px;background:${rule.severity === 'critical' ? '#e94560' :
39770
- rule.severity === 'warning' ? '#f39c12' : '#3498db'};color:white;">${rule.severity}</span>
39771
- </td>
39772
- <td style="padding:8px;border-bottom:1px solid #333;color:#888;">${Math.round(rule.cooldown / 1000)}s</td>
39773
- </tr>
39774
- `).join('');
39775
- const initCode = `localStorage.setItem('sa-temp-rules',JSON.stringify(${JSON.stringify(rules)}))`;
39776
- const saveCode = `(function(){var rules=JSON.parse(localStorage.getItem('sa-temp-rules')||'[]');fetch('/endpoint/@blueharford/scrypted-spatial-awareness/api/alert-rules',{method:'PUT',headers:{'Content-Type':'application/json'},body:JSON.stringify(rules)}).then(r=>r.json()).then(d=>{if(d.success)alert('Alert rules saved!');else alert('Error: '+d.error);}).catch(e=>alert('Error: '+e));})()`;
39777
- return `
39778
- <style>
39779
- .sa-rules-table { width:100%; border-collapse:collapse; margin-top:10px; }
39780
- .sa-rules-table th { text-align:left; padding:10px 8px; border-bottom:2px solid #e94560; color:#e94560; font-size:13px; }
39781
- .sa-save-rules-btn {
39782
- background: linear-gradient(135deg, #27ae60 0%, #2ecc71 100%);
39783
- color: white;
39784
- border: none;
39785
- padding: 10px 20px;
39786
- border-radius: 6px;
39787
- font-size: 14px;
39788
- font-weight: 600;
39789
- cursor: pointer;
39790
- margin-top: 15px;
39791
- }
39792
- .sa-save-rules-btn:hover { opacity: 0.9; }
39793
- .sa-rules-container { background:#16213e; border-radius:8px; padding:15px; }
39794
- .sa-rules-desc { color:#888; font-size:13px; margin-bottom:10px; }
39795
- </style>
39796
- <div class="sa-rules-container">
39797
- <p class="sa-rules-desc">Enable or disable alert types. Movement alerts notify you when someone moves between cameras.</p>
39798
- <table class="sa-rules-table">
39799
- <thead>
39800
- <tr>
39801
- <th style="width:40px;">On</th>
39802
- <th>Alert Type</th>
39803
- <th>Event</th>
39804
- <th>Severity</th>
39805
- <th>Cooldown</th>
39806
- </tr>
39807
- </thead>
39808
- <tbody>
39809
- ${ruleRows}
39810
- </tbody>
39811
- </table>
39812
- <button class="sa-save-rules-btn" onclick="${saveCode}">Save Alert Rules</button>
39813
- <script>(function(){${initCode}})();</script>
39814
- </div>
39815
- `;
39816
- }
39817
39875
  async putSetting(key, value) {
39818
39876
  await this.storageSettings.putSetting(key, value);
39819
39877
  // Handle setting changes that require engine restart
@@ -39828,6 +39886,7 @@ class SpatialAwarenessPlugin extends sdk_1.ScryptedDeviceBase {
39828
39886
  key === 'llmDebounceInterval' ||
39829
39887
  key === 'llmFallbackEnabled' ||
39830
39888
  key === 'llmFallbackTimeout' ||
39889
+ key === 'llmDevice' ||
39831
39890
  key === 'enableTransitTimeLearning' ||
39832
39891
  key === 'enableConnectionSuggestions' ||
39833
39892
  key === 'enableLandmarkLearning' ||