matrice-analytics 0.1.60__py3-none-any.whl → 0.1.89__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. matrice_analytics/post_processing/config.py +2 -2
  2. matrice_analytics/post_processing/core/base.py +1 -1
  3. matrice_analytics/post_processing/face_reg/embedding_manager.py +8 -8
  4. matrice_analytics/post_processing/face_reg/face_recognition.py +886 -201
  5. matrice_analytics/post_processing/face_reg/face_recognition_client.py +68 -2
  6. matrice_analytics/post_processing/usecases/advanced_customer_service.py +908 -498
  7. matrice_analytics/post_processing/usecases/color_detection.py +18 -18
  8. matrice_analytics/post_processing/usecases/customer_service.py +356 -9
  9. matrice_analytics/post_processing/usecases/fire_detection.py +149 -11
  10. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +548 -40
  11. matrice_analytics/post_processing/usecases/people_counting.py +11 -11
  12. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +34 -34
  13. matrice_analytics/post_processing/usecases/weapon_detection.py +98 -22
  14. matrice_analytics/post_processing/utils/alert_instance_utils.py +950 -0
  15. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1245 -0
  16. matrice_analytics/post_processing/utils/incident_manager_utils.py +1657 -0
  17. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/METADATA +1 -1
  18. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/RECORD +21 -18
  19. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/WHEEL +0 -0
  20. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/licenses/LICENSE.txt +0 -0
  21. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,950 @@
1
+ """
2
+ alert_instance_utils.py
3
+
4
+ PRODUCTION-READY VERSION
5
+ Robust JSON parsing with fallback handling.
6
+ """
7
+
8
+ import json
9
+ import time
10
+ import threading
11
+ import logging
12
+ from typing import Dict, List, Optional, Any
13
+ from datetime import datetime, timezone
14
+ from dataclasses import dataclass, field
15
+
16
+
17
+ @dataclass
18
+ class AlertConfig:
19
+ """Represents an instant alert configuration."""
20
+ instant_alert_id: str
21
+ camera_id: str
22
+ app_deployment_id: str
23
+ application_id: str
24
+ alert_name: str
25
+ detection_config: Dict[str, Any]
26
+ severity_level: str
27
+ is_active: bool
28
+ action: str
29
+ timestamp: str
30
+ last_updated: float = field(default_factory=time.time)
31
+
32
+ @classmethod
33
+ def from_dict(cls, data: Dict[str, Any]) -> "AlertConfig":
34
+ """Create AlertConfig from dictionary."""
35
+ # Handle is_active as string (e.g., "True" or "true" -> True)
36
+ is_active_raw = data.get("is_active", True)
37
+ if isinstance(is_active_raw, str):
38
+ is_active = is_active_raw.lower() in ("true", "1", "yes")
39
+ else:
40
+ is_active = bool(is_active_raw)
41
+
42
+ return cls(
43
+ instant_alert_id=data.get("instant_alert_id", ""),
44
+ camera_id=data.get("camera_id", ""),
45
+ app_deployment_id=data.get("app_deployment_id", ""),
46
+ application_id=data.get("application_id", ""),
47
+ alert_name=data.get("alert_name", ""),
48
+ detection_config=data.get("detection_config", {}),
49
+ severity_level=data.get("severity_level", "medium"),
50
+ is_active=is_active,
51
+ action=data.get("action", "create"),
52
+ timestamp=data.get("timestamp", datetime.now(timezone.utc).isoformat()),
53
+ last_updated=time.time()
54
+ )
55
+
56
+
57
+ class ALERT_INSTANCE:
58
+ """
59
+ Manages instant alert configurations and evaluates detection events.
60
+
61
+ This class handles:
62
+ - Polling alert configs from Redis/Kafka every polling_interval seconds
63
+ - Maintaining in-memory alert state
64
+ - Evaluating detection events against alert criteria
65
+ - Publishing trigger messages when matches occur
66
+
67
+ Transport Priority:
68
+ - Redis is primary for both config reading and trigger publishing
69
+ - Kafka is fallback when Redis operations fail
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ redis_client: Optional[Any] = None,
75
+ kafka_client: Optional[Any] = None,
76
+ config_topic: str = "alert_instant_config_request",
77
+ trigger_topic: str = "alert_instant_triggered",
78
+ polling_interval: int = 10,
79
+ logger: Optional[logging.Logger] = None
80
+ ):
81
+ """
82
+ Initialize ALERT_INSTANCE.
83
+
84
+ Args:
85
+ redis_client: MatriceStream instance configured for Redis (primary transport)
86
+ kafka_client: MatriceStream instance configured for Kafka (fallback transport)
87
+ config_topic: Topic/stream name for receiving alert configs
88
+ trigger_topic: Topic/stream name for publishing triggers
89
+ polling_interval: Seconds between config polling
90
+ logger: Python logger instance
91
+ """
92
+ self.redis_client = redis_client
93
+ self.kafka_client = kafka_client
94
+ self.config_topic = config_topic
95
+ self.trigger_topic = trigger_topic
96
+ self.polling_interval = polling_interval
97
+ self.logger = logger or logging.getLogger(__name__)
98
+
99
+ # In-memory alert storage: {instant_alert_id: AlertConfig}
100
+ self._alerts: Dict[str, AlertConfig] = {}
101
+ self._alerts_lock = threading.Lock()
102
+
103
+ # Cooldown tracking: {(instant_alert_id, detection_key): last_trigger_timestamp}
104
+ # detection_key = plateNumber for LPR, objectClass for count/intrusion, "fire_smoke" for fire/smoke
105
+ self._cooldown_cache: Dict[tuple, float] = {}
106
+ self._cooldown_lock = threading.Lock()
107
+ self._cooldown_seconds = 5 # 5 second cooldown per alert+detection combination
108
+
109
+ # Polling thread control
110
+ self._polling_thread: Optional[threading.Thread] = None
111
+ self._stop_event = threading.Event()
112
+ self._running = False
113
+
114
+ self.logger.info(
115
+ f"Initialized ALERT_INSTANCE with config_topic={config_topic}, "
116
+ f"trigger_topic={trigger_topic}, polling_interval={polling_interval}s, "
117
+ f"cooldown={self._cooldown_seconds}s"
118
+ )
119
+
120
+ def start(self):
121
+ """Start the background polling thread for config updates."""
122
+ if self._running:
123
+ self.logger.warning("ALERT_INSTANCE already running")
124
+ return
125
+
126
+ self._running = True
127
+ self._stop_event.clear()
128
+ self._polling_thread = threading.Thread(
129
+ target=self._polling_loop,
130
+ daemon=True,
131
+ name="AlertConfigPoller"
132
+ )
133
+ self._polling_thread.start()
134
+ self.logger.info("Started alert config polling thread")
135
+
136
+ def stop(self):
137
+ """Stop the background polling thread gracefully."""
138
+ if not self._running:
139
+ return
140
+
141
+ self.logger.info("Stopping ALERT_INSTANCE...")
142
+ self._running = False
143
+ self._stop_event.set()
144
+
145
+ if self._polling_thread and self._polling_thread.is_alive():
146
+ self._polling_thread.join(timeout=5)
147
+
148
+ self.logger.info("ALERT_INSTANCE stopped")
149
+
150
+ def _polling_loop(self):
151
+ """Background thread that polls for config updates every polling_interval seconds."""
152
+ self.logger.info(f"Alert config polling loop started (interval: {self.polling_interval}s)")
153
+
154
+ while not self._stop_event.is_set():
155
+ try:
156
+ self._fetch_and_update_configs()
157
+ except Exception as e:
158
+ self.logger.error(f"Error in polling loop: {e}", exc_info=True)
159
+
160
+ # Sleep in small increments to allow quick shutdown
161
+ for _ in range(self.polling_interval):
162
+ if self._stop_event.is_set():
163
+ break
164
+ time.sleep(1)
165
+
166
+ self.logger.info("Alert config polling loop exited")
167
+
168
+ def _fetch_and_update_configs(self):
169
+ """Fetch config messages from Redis (primary) or Kafka (fallback)."""
170
+ configs = []
171
+
172
+ # Try Redis first (primary)
173
+ if self.redis_client:
174
+ try:
175
+ self.logger.debug(f"Fetching configs from Redis stream: {self.config_topic}")
176
+ configs = self._read_from_redis(self.config_topic)
177
+ if configs:
178
+ self.logger.info(f"Fetched {len(configs)} config(s) from Redis")
179
+ except Exception as e:
180
+ self.logger.error(f"Redis config fetch failed: {e}", exc_info=True)
181
+
182
+ # Fallback to Kafka if Redis failed or no client
183
+ if not configs and self.kafka_client:
184
+ try:
185
+ self.logger.debug(f"Falling back to Kafka topic: {self.config_topic}")
186
+ configs = self._read_from_kafka(self.config_topic)
187
+ if configs:
188
+ self.logger.info(f"Fetched {len(configs)} config(s) from Kafka")
189
+ except Exception as e:
190
+ self.logger.error(f"Kafka config fetch failed: {e}", exc_info=True)
191
+
192
+ # Update in-memory alert configs
193
+ for config_data in configs:
194
+ try:
195
+ self._handle_config_message(config_data)
196
+ except Exception as e:
197
+ self.logger.error(f"Error handling config message: {e}", exc_info=True)
198
+
199
+ def _read_from_redis(self, topic: str, max_messages: int = 100) -> List[Dict[str, Any]]:
200
+ """
201
+ Read messages from Redis stream.
202
+
203
+ Args:
204
+ topic: Redis stream name
205
+ max_messages: Maximum messages to fetch
206
+
207
+ Returns:
208
+ List of parsed message dictionaries
209
+ """
210
+ messages = []
211
+ try:
212
+ self.logger.debug(f"[ALERT_DEBUG] Reading from Redis topic: {topic}, max_messages: {max_messages}")
213
+ for msg_count in range(max_messages):
214
+ msg = self.redis_client.get_message(timeout=0.1)
215
+ if not msg:
216
+ self.logger.debug(f"[ALERT_DEBUG] No more messages from Redis after {msg_count} messages")
217
+ break
218
+
219
+ self.logger.debug(f"[ALERT_DEBUG] Raw message #{msg_count + 1} received: {msg}")
220
+ value = msg.get('value') or msg.get('data') or msg.get('message')
221
+ if value:
222
+ self.logger.debug(f"[ALERT_DEBUG] Extracted value type: {type(value)}, length: {len(value) if hasattr(value, '__len__') else 'N/A'}")
223
+
224
+ # Handle case where value is already a dict (Redis stream format)
225
+ if isinstance(value, dict):
226
+ self.logger.debug(f"[ALERT_DEBUG] Value is already a dict, keys: {list(value.keys())}")
227
+ # Check if there's a nested 'data' key (common Redis stream pattern)
228
+ if 'data' in value and isinstance(value['data'], dict):
229
+ parsed = value['data']
230
+ self.logger.info(f"[ALERT_DEBUG] Extracted nested 'data' dict: {parsed}")
231
+ messages.append(parsed)
232
+ else:
233
+ # Use the dict directly
234
+ self.logger.info(f"[ALERT_DEBUG] Using dict directly: {value}")
235
+ messages.append(value)
236
+ continue
237
+
238
+ if isinstance(value, bytes):
239
+ value = value.decode('utf-8')
240
+ self.logger.debug(f"[ALERT_DEBUG] Decoded bytes to string: {value[:200]}...")
241
+ if isinstance(value, str):
242
+ self.logger.debug(f"[ALERT_DEBUG] Raw JSON string: {value}")
243
+ # Robust JSON parsing with error handling
244
+ try:
245
+ parsed = json.loads(value)
246
+ self.logger.info(f"[ALERT_DEBUG] Successfully parsed JSON: {parsed}")
247
+ messages.append(parsed)
248
+ except json.JSONDecodeError as e:
249
+ self.logger.error(f"[ALERT_DEBUG] JSON parse error: {e}")
250
+ self.logger.error(f"[ALERT_DEBUG] Invalid JSON (first 500 chars): {value[:500]}")
251
+ # Try to fix common issues
252
+ try:
253
+ # Replace Python booleans with JSON booleans
254
+ self.logger.debug(f"[ALERT_DEBUG] Attempting to fix Python-style formatting...")
255
+ fixed = value
256
+
257
+ # Fix Python booleans (True/False -> true/false)
258
+ fixed = fixed.replace(": True", ": true").replace(": False", ": false")
259
+ fixed = fixed.replace(":True", ":true").replace(":False", ":false")
260
+ fixed = fixed.replace(" True,", " true,").replace(" False,", " false,")
261
+ fixed = fixed.replace(" True}", " true}").replace(" False}", " false}")
262
+ fixed = fixed.replace("{True", "{true").replace("{False", "{false")
263
+
264
+ # Fix Python None (None -> null)
265
+ fixed = fixed.replace(": None", ": null").replace(":None", ":null")
266
+ fixed = fixed.replace(" None,", " null,").replace(" None}", " null}")
267
+
268
+ # Fix single quotes (Python dict style) -> double quotes (JSON style)
269
+ # This is a simple replacement that works for most cases
270
+ if "'" in fixed and '"' not in fixed:
271
+ self.logger.debug(f"[ALERT_DEBUG] Detected single quotes, replacing with double quotes")
272
+ fixed = fixed.replace("'", '"')
273
+
274
+ self.logger.debug(f"[ALERT_DEBUG] Fixed JSON string: {fixed[:500]}...")
275
+ parsed = json.loads(fixed)
276
+ self.logger.info(f"[ALERT_DEBUG] Successfully fixed and parsed JSON: {parsed}")
277
+ messages.append(parsed)
278
+ except Exception as fix_error:
279
+ self.logger.error(f"[ALERT_DEBUG] Could not fix JSON: {fix_error}, skipping message")
280
+ continue
281
+ else:
282
+ self.logger.warning(f"[ALERT_DEBUG] Message has no value/data/message field: {msg}")
283
+ except Exception as e:
284
+ self.logger.error(f"[ALERT_DEBUG] Error reading from Redis: {e}", exc_info=True)
285
+ raise
286
+
287
+ self.logger.info(f"[ALERT_DEBUG] Total messages parsed from Redis: {len(messages)}")
288
+ return messages
289
+
290
+ def _read_from_kafka(self, topic: str, max_messages: int = 100) -> List[Dict[str, Any]]:
291
+ """
292
+ Read messages from Kafka topic.
293
+
294
+ Args:
295
+ topic: Kafka topic name
296
+ max_messages: Maximum messages to fetch
297
+
298
+ Returns:
299
+ List of parsed message dictionaries
300
+ """
301
+ messages = []
302
+ try:
303
+ self.logger.debug(f"[ALERT_DEBUG] Reading from Kafka topic: {topic}, max_messages: {max_messages}")
304
+ for msg_count in range(max_messages):
305
+ msg = self.kafka_client.get_message(timeout=0.1)
306
+ if not msg:
307
+ self.logger.debug(f"[ALERT_DEBUG] No more messages from Kafka after {msg_count} messages")
308
+ break
309
+
310
+ self.logger.debug(f"[ALERT_DEBUG] Raw Kafka message #{msg_count + 1} received: {msg}")
311
+ value = msg.get('value') or msg.get('data') or msg.get('message')
312
+ if value:
313
+ self.logger.debug(f"[ALERT_DEBUG] Extracted value type: {type(value)}, length: {len(value) if hasattr(value, '__len__') else 'N/A'}")
314
+
315
+ # Handle case where value is already a dict (Kafka message format)
316
+ if isinstance(value, dict):
317
+ self.logger.debug(f"[ALERT_DEBUG] Value is already a dict, keys: {list(value.keys())}")
318
+ # Check if there's a nested 'data' key (common Kafka message pattern)
319
+ if 'data' in value and isinstance(value['data'], dict):
320
+ parsed = value['data']
321
+ self.logger.info(f"[ALERT_DEBUG] Extracted nested 'data' dict: {parsed}")
322
+ messages.append(parsed)
323
+ else:
324
+ # Use the dict directly
325
+ self.logger.info(f"[ALERT_DEBUG] Using dict directly: {value}")
326
+ messages.append(value)
327
+ continue
328
+
329
+ if isinstance(value, bytes):
330
+ value = value.decode('utf-8')
331
+ self.logger.debug(f"[ALERT_DEBUG] Decoded bytes to string: {value[:200]}...")
332
+ if isinstance(value, str):
333
+ self.logger.debug(f"[ALERT_DEBUG] Raw JSON string: {value}")
334
+ try:
335
+ parsed = json.loads(value)
336
+ self.logger.info(f"[ALERT_DEBUG] Successfully parsed Kafka JSON: {parsed}")
337
+ messages.append(parsed)
338
+ except json.JSONDecodeError as e:
339
+ self.logger.error(f"[ALERT_DEBUG] Kafka JSON parse error: {e}")
340
+ self.logger.error(f"[ALERT_DEBUG] Invalid JSON (first 500 chars): {value[:500]}")
341
+ # Try to fix common issues
342
+ try:
343
+ self.logger.debug(f"[ALERT_DEBUG] Attempting to fix Python-style formatting...")
344
+ fixed = value
345
+
346
+ # Fix Python booleans (True/False -> true/false)
347
+ fixed = fixed.replace(": True", ": true").replace(": False", ": false")
348
+ fixed = fixed.replace(":True", ":true").replace(":False", ":false")
349
+ fixed = fixed.replace(" True,", " true,").replace(" False,", " false,")
350
+ fixed = fixed.replace(" True}", " true}").replace(" False}", " false}")
351
+ fixed = fixed.replace("{True", "{true").replace("{False", "{false")
352
+
353
+ # Fix Python None (None -> null)
354
+ fixed = fixed.replace(": None", ": null").replace(":None", ":null")
355
+ fixed = fixed.replace(" None,", " null,").replace(" None}", " null}")
356
+
357
+ # Fix single quotes (Python dict style) -> double quotes (JSON style)
358
+ # This is a simple replacement that works for most cases
359
+ if "'" in fixed and '"' not in fixed:
360
+ self.logger.debug(f"[ALERT_DEBUG] Detected single quotes, replacing with double quotes")
361
+ fixed = fixed.replace("'", '"')
362
+
363
+ self.logger.debug(f"[ALERT_DEBUG] Fixed JSON string: {fixed[:500]}...")
364
+ parsed = json.loads(fixed)
365
+ self.logger.info(f"[ALERT_DEBUG] Successfully fixed and parsed Kafka JSON: {parsed}")
366
+ messages.append(parsed)
367
+ except Exception as fix_error:
368
+ self.logger.error(f"[ALERT_DEBUG] Could not fix Kafka JSON: {fix_error}, skipping message")
369
+ continue
370
+ else:
371
+ self.logger.warning(f"[ALERT_DEBUG] Kafka message has no value/data/message field: {msg}")
372
+ except Exception as e:
373
+ self.logger.error(f"[ALERT_DEBUG] Error reading from Kafka: {e}", exc_info=True)
374
+ raise
375
+
376
+ self.logger.info(f"[ALERT_DEBUG] Total messages parsed from Kafka: {len(messages)}")
377
+ return messages
378
+
379
+ def _handle_config_message(self, config_data: Dict[str, Any]):
380
+ """
381
+ Handle a single config message (create/update/delete).
382
+
383
+ Args:
384
+ config_data: Alert configuration dictionary
385
+ """
386
+ try:
387
+ self.logger.info(f"[ALERT_DEBUG] ========== HANDLING CONFIG MESSAGE ==========")
388
+ self.logger.info(f"[ALERT_DEBUG] Raw config_data type: {type(config_data)}")
389
+ self.logger.info(f"[ALERT_DEBUG] Raw config_data keys: {list(config_data.keys()) if isinstance(config_data, dict) else 'N/A'}")
390
+ self.logger.info(f"[ALERT_DEBUG] Raw config_data: {config_data}")
391
+
392
+ # Skip if this is a wrapper with 'raw' key (from failed JSON parse)
393
+ if 'raw' in config_data and len(config_data) == 1:
394
+ self.logger.warning("[ALERT_DEBUG] Skipping malformed config with 'raw' key only")
395
+ return
396
+
397
+ # Log individual fields before creating AlertConfig
398
+ self.logger.debug(f"[ALERT_DEBUG] Extracted fields from config_data:")
399
+ self.logger.debug(f"[ALERT_DEBUG] - instant_alert_id: '{config_data.get('instant_alert_id', 'MISSING')}'")
400
+ self.logger.debug(f"[ALERT_DEBUG] - camera_id: '{config_data.get('camera_id', 'MISSING')}'")
401
+ self.logger.debug(f"[ALERT_DEBUG] - app_deployment_id: '{config_data.get('app_deployment_id', 'MISSING')}'")
402
+ self.logger.debug(f"[ALERT_DEBUG] - application_id: '{config_data.get('application_id', 'MISSING')}'")
403
+ self.logger.debug(f"[ALERT_DEBUG] - alert_name: '{config_data.get('alert_name', 'MISSING')}'")
404
+ self.logger.debug(f"[ALERT_DEBUG] - detection_config: {config_data.get('detection_config', 'MISSING')}")
405
+ self.logger.debug(f"[ALERT_DEBUG] - severity_level: '{config_data.get('severity_level', 'MISSING')}'")
406
+ self.logger.debug(f"[ALERT_DEBUG] - is_active: {config_data.get('is_active', 'MISSING')}")
407
+ self.logger.debug(f"[ALERT_DEBUG] - action: '{config_data.get('action', 'MISSING')}'")
408
+ self.logger.debug(f"[ALERT_DEBUG] - timestamp: '{config_data.get('timestamp', 'MISSING')}'")
409
+
410
+ alert_config = AlertConfig.from_dict(config_data)
411
+
412
+ self.logger.info(f"[ALERT_DEBUG] AlertConfig created successfully")
413
+ self.logger.info(f"[ALERT_DEBUG] AlertConfig fields:")
414
+ self.logger.info(f"[ALERT_DEBUG] - instant_alert_id: '{alert_config.instant_alert_id}'")
415
+ self.logger.info(f"[ALERT_DEBUG] - camera_id: '{alert_config.camera_id}'")
416
+ self.logger.info(f"[ALERT_DEBUG] - app_deployment_id: '{alert_config.app_deployment_id}'")
417
+ self.logger.info(f"[ALERT_DEBUG] - application_id: '{alert_config.application_id}'")
418
+ self.logger.info(f"[ALERT_DEBUG] - alert_name: '{alert_config.alert_name}'")
419
+ self.logger.info(f"[ALERT_DEBUG] - detection_config: {alert_config.detection_config}")
420
+ self.logger.info(f"[ALERT_DEBUG] - severity_level: '{alert_config.severity_level}'")
421
+ self.logger.info(f"[ALERT_DEBUG] - is_active: {alert_config.is_active}")
422
+ self.logger.info(f"[ALERT_DEBUG] - action: '{alert_config.action}'")
423
+ self.logger.info(f"[ALERT_DEBUG] - timestamp: '{alert_config.timestamp}'")
424
+
425
+ action = alert_config.action.lower()
426
+ alert_id = alert_config.instant_alert_id
427
+
428
+ self.logger.info(f"[ALERT_DEBUG] Action (lowercase): '{action}'")
429
+ self.logger.info(f"[ALERT_DEBUG] Alert ID: '{alert_id}'")
430
+
431
+ # Validate required fields
432
+ if not alert_id:
433
+ self.logger.error(f"[ALERT_DEBUG] ❌ VALIDATION FAILED: Missing 'instant_alert_id'")
434
+ self.logger.error(f"[ALERT_DEBUG] Full config data: {config_data}")
435
+ return
436
+ if not alert_config.camera_id:
437
+ self.logger.warning(f"[ALERT_DEBUG] camera_id missing for alert '{alert_id}', defaulting to empty and proceeding")
438
+
439
+ self.logger.info(f"[ALERT_DEBUG] ✓ Validation passed")
440
+
441
+ with self._alerts_lock:
442
+ if action == "create":
443
+ if alert_id in self._alerts:
444
+ self.logger.info(f"[ALERT_DEBUG] Alert {alert_id} already exists, treating as update")
445
+ self._alerts[alert_id] = alert_config
446
+ self.logger.info(f"[ALERT_DEBUG] ✓ Created/Updated alert: {alert_id} ({alert_config.alert_name})")
447
+ self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
448
+
449
+ elif action == "update":
450
+ self._alerts[alert_id] = alert_config
451
+ self.logger.info(f"[ALERT_DEBUG] ✓ Updated alert: {alert_id} ({alert_config.alert_name})")
452
+ self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
453
+
454
+ elif action == "delete":
455
+ if alert_id in self._alerts:
456
+ del self._alerts[alert_id]
457
+ self.logger.info(f"[ALERT_DEBUG] ✓ Deleted alert: {alert_id}")
458
+ self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
459
+ else:
460
+ self.logger.warning(f"[ALERT_DEBUG] Delete requested for non-existent alert: {alert_id}")
461
+
462
+ # Also deactivate if is_active is False
463
+ if not alert_config.is_active and alert_id in self._alerts:
464
+ del self._alerts[alert_id]
465
+ self.logger.info(f"[ALERT_DEBUG] ✓ Deactivated alert: {alert_id}")
466
+ self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
467
+
468
+ self.logger.info(f"[ALERT_DEBUG] ========== CONFIG MESSAGE HANDLED ==========")
469
+
470
+ except Exception as e:
471
+ self.logger.error(f"[ALERT_DEBUG] ❌ EXCEPTION in _handle_config_message: {e}", exc_info=True)
472
+ self.logger.error(f"[ALERT_DEBUG] Failed config_data: {config_data}")
473
+
474
+ def process_detection_event(self, detection_payload: Dict[str, Any]):
475
+ """
476
+ Process a detection event and evaluate against active alerts.
477
+
478
+ Args:
479
+ detection_payload: Detection event data
480
+ """
481
+ try:
482
+ self.logger.info(f"[ALERT_DEBUG] ========== PROCESSING DETECTION EVENT ==========")
483
+ self.logger.info(f"[ALERT_DEBUG] Detection payload: {detection_payload}")
484
+
485
+ camera_id = detection_payload.get("camera_id")
486
+ self.logger.debug(f"[ALERT_DEBUG] Camera ID: '{camera_id}'")
487
+
488
+ if not camera_id:
489
+ self.logger.warning("[ALERT_DEBUG] Detection event missing camera_id; defaulting to empty and evaluating against all active alerts")
490
+ camera_id=''
491
+
492
+ # Get all active alerts for this camera
493
+ matching_alerts = self._get_alerts_for_camera(camera_id)
494
+
495
+ # Fallback: if no alerts found for this camera (or camera_id missing), evaluate against all active alerts
496
+ if not matching_alerts:
497
+ self.logger.info(f"[ALERT_DEBUG] No camera-specific alerts for '{camera_id}'. Evaluating against all active alerts.")
498
+ with self._alerts_lock:
499
+ matching_alerts = [a for a in self._alerts.values() if a.is_active]
500
+
501
+ self.logger.info(f"[ALERT_DEBUG] Found {len(matching_alerts)} active alert(s) for camera '{camera_id}'")
502
+ for i, alert in enumerate(matching_alerts):
503
+ self.logger.debug(f"[ALERT_DEBUG] Alert #{i+1}: ID={alert.instant_alert_id}, Name={alert.alert_name}")
504
+
505
+ if not matching_alerts:
506
+ self.logger.debug(f"[ALERT_DEBUG] No active alerts for camera: {camera_id}")
507
+ self.logger.debug(f"[ALERT_DEBUG] Total alerts in system: {len(self._alerts)}")
508
+ with self._alerts_lock:
509
+ all_camera_ids = [a.camera_id for a in self._alerts.values()]
510
+ self.logger.debug(f"[ALERT_DEBUG] All camera IDs in alert system: {all_camera_ids}")
511
+ #return
512
+
513
+ # Evaluate each alert
514
+ for alert in matching_alerts:
515
+ try:
516
+ self.logger.info(f"[ALERT_DEBUG] Evaluating alert: {alert.instant_alert_id} ({alert.alert_name})")
517
+
518
+ # First check if alert criteria match
519
+ if self._evaluate_alert(alert, detection_payload):
520
+ # Extract detection key for cooldown check
521
+ detection_key = self._get_detection_key(detection_payload)
522
+
523
+ # Atomically acquire cooldown slot (check+set)
524
+ acquired, prev_time = self._try_acquire_cooldown(alert.instant_alert_id, detection_key)
525
+ if acquired:
526
+ self.logger.info(f"[ALERT_DEBUG] ✓ Alert matched and cooldown acquired, publishing trigger...")
527
+ publish_ok = self._publish_trigger(alert, detection_payload)
528
+ if not publish_ok:
529
+ # Rollback cooldown if publish failed
530
+ self._rollback_cooldown(alert.instant_alert_id, detection_key, prev_time)
531
+ self.logger.warning(
532
+ f"[ALERT_DEBUG] Publish failed, cooldown rolled back: "
533
+ f"alert={alert.instant_alert_id}, detection_key={detection_key}"
534
+ )
535
+ else:
536
+ # Cooldown active, skip publish
537
+ self.logger.info(
538
+ f"[ALERT_DEBUG] ⏱️ Alert matched but in cooldown period, skipping: "
539
+ f"alert={alert.instant_alert_id}, detection_key={detection_key}"
540
+ )
541
+ else:
542
+ self.logger.debug(f"[ALERT_DEBUG] ✗ Alert did not match criteria")
543
+ except Exception as e:
544
+ self.logger.error(
545
+ f"[ALERT_DEBUG] ❌ Error evaluating alert {alert.instant_alert_id}: {e}",
546
+ exc_info=True
547
+ )
548
+
549
+ self.logger.info(f"[ALERT_DEBUG] ========== DETECTION EVENT PROCESSED ==========")
550
+
551
+ except Exception as e:
552
+ self.logger.error(f"[ALERT_DEBUG] ❌ Error processing detection event: {e}", exc_info=True)
553
+
554
+ def _get_alerts_for_camera(self, camera_id: str) -> List[AlertConfig]:
555
+ """Get all active alerts for a specific camera."""
556
+ with self._alerts_lock:
557
+ return [
558
+ alert for alert in self._alerts.values()
559
+ if alert.camera_id == camera_id and alert.is_active
560
+ ]
561
+
562
+ def _get_detection_key(self, detection: Dict[str, Any]) -> str:
563
+ """
564
+ Extract the unique detection key based on detection type.
565
+
566
+ Returns:
567
+ - plateNumber for license_plate
568
+ - objectClass for object_count/intrusion
569
+ - "fire_smoke" for fire_smoke detection
570
+ """
571
+ detection_type = detection.get("detectionType", "").lower()
572
+
573
+ if detection_type == "license_plate":
574
+ return detection.get("plateNumber", "").upper().strip()
575
+ elif detection_type in ["object_count", "intrusion"]:
576
+ return detection.get("objectClass", "unknown")
577
+ elif detection_type == "fire_smoke":
578
+ return "fire_smoke"
579
+ else:
580
+ return "unknown"
581
+
582
+ def _check_cooldown(self, alert_id: str, detection_key: str) -> bool:
583
+ """
584
+ Check if alert+detection is in cooldown period.
585
+
586
+ Args:
587
+ alert_id: instant_alert_id
588
+ detection_key: plateNumber, objectClass, or detection type
589
+
590
+ Returns:
591
+ True if allowed to trigger (not in cooldown), False if in cooldown
592
+ """
593
+ cooldown_key = (alert_id, detection_key)
594
+ current_time = time.time()
595
+
596
+ with self._cooldown_lock:
597
+ last_trigger_time = self._cooldown_cache.get(cooldown_key, 0)
598
+ time_since_last = current_time - last_trigger_time
599
+
600
+ if time_since_last < self._cooldown_seconds:
601
+ remaining = self._cooldown_seconds - time_since_last
602
+ self.logger.debug(
603
+ f"[ALERT_DEBUG] ⏱️ COOLDOWN ACTIVE: alert={alert_id}, key={detection_key}, "
604
+ f"remaining={remaining:.1f}s"
605
+ )
606
+ return False
607
+
608
+ return True
609
+
610
+ def _update_cooldown(self, alert_id: str, detection_key: str):
611
+ """
612
+ Update the cooldown timestamp for alert+detection combination.
613
+
614
+ Args:
615
+ alert_id: instant_alert_id
616
+ detection_key: plateNumber, objectClass, or detection type
617
+ """
618
+ cooldown_key = (alert_id, detection_key)
619
+ current_time = time.time()
620
+
621
+ with self._cooldown_lock:
622
+ self._cooldown_cache[cooldown_key] = current_time
623
+ self.logger.debug(
624
+ f"[ALERT_DEBUG] ⏱️ COOLDOWN SET: alert={alert_id}, key={detection_key}, "
625
+ f"duration={self._cooldown_seconds}s"
626
+ )
627
+
628
+ # Clean up old entries (older than 2x cooldown period)
629
+ cleanup_threshold = current_time - (self._cooldown_seconds * 2)
630
+ keys_to_remove = [
631
+ key for key, timestamp in self._cooldown_cache.items()
632
+ if timestamp < cleanup_threshold
633
+ ]
634
+ for key in keys_to_remove:
635
+ del self._cooldown_cache[key]
636
+
637
+ if keys_to_remove:
638
+ self.logger.debug(f"[ALERT_DEBUG] Cleaned up {len(keys_to_remove)} old cooldown entries")
639
+
640
+ def _try_acquire_cooldown(self, alert_id: str, detection_key: str) -> (bool, float):
641
+ """
642
+ Atomically check and set cooldown.
643
+
644
+ Returns:
645
+ (acquired, prev_timestamp)
646
+ - acquired: True if cooldown slot acquired (allowed to publish)
647
+ - prev_timestamp: previous timestamp to support rollback if publish fails
648
+ """
649
+ cooldown_key = (alert_id, detection_key)
650
+ current_time = time.time()
651
+
652
+ with self._cooldown_lock:
653
+ prev_timestamp = self._cooldown_cache.get(cooldown_key, 0)
654
+ time_since_last = current_time - prev_timestamp
655
+
656
+ if time_since_last < self._cooldown_seconds:
657
+ remaining = self._cooldown_seconds - time_since_last
658
+ self.logger.debug(
659
+ f"[ALERT_DEBUG] ⏱️ COOLDOWN ACTIVE (acquire failed): alert={alert_id}, key={detection_key}, "
660
+ f"remaining={remaining:.1f}s"
661
+ )
662
+ return False, prev_timestamp
663
+
664
+ # Acquire slot by setting to now
665
+ self._cooldown_cache[cooldown_key] = current_time
666
+ self.logger.debug(
667
+ f"[ALERT_DEBUG] ⏱️ COOLDOWN ACQUIRED: alert={alert_id}, key={detection_key}, "
668
+ f"timestamp={current_time:.3f}"
669
+ )
670
+ return True, prev_timestamp
671
+
672
+ def _rollback_cooldown(self, alert_id: str, detection_key: str, prev_timestamp: float):
673
+ """Rollback cooldown to the previous timestamp (used when publish fails)."""
674
+ cooldown_key = (alert_id, detection_key)
675
+ with self._cooldown_lock:
676
+ if prev_timestamp == 0:
677
+ # Remove key entirely if there was no previous value
678
+ self._cooldown_cache.pop(cooldown_key, None)
679
+ else:
680
+ self._cooldown_cache[cooldown_key] = prev_timestamp
681
+ self.logger.debug(
682
+ f"[ALERT_DEBUG] ⏱️ COOLDOWN ROLLBACK: alert={alert_id}, key={detection_key}, "
683
+ f"restored_timestamp={prev_timestamp:.3f}"
684
+ )
685
+
686
+ def _evaluate_alert(self, alert: AlertConfig, detection: Dict[str, Any]) -> bool:
687
+ """Evaluate if a detection matches alert criteria."""
688
+ detection_type = detection.get("detectionType", "").lower()
689
+ config = alert.detection_config
690
+
691
+ if detection_type == "license_plate":
692
+ return self._evaluate_lpr_alert(alert, detection, config)
693
+ elif detection_type == "object_count":
694
+ return self._evaluate_count_alert(alert, detection, config)
695
+ elif detection_type == "fire_smoke":
696
+ return self._evaluate_fire_smoke_alert(alert, detection, config)
697
+ elif detection_type == "intrusion":
698
+ return self._evaluate_intrusion_alert(alert, detection, config)
699
+ else:
700
+ self.logger.warning(f"Unknown detection type: {detection_type}")
701
+ return False
702
+
703
+ def _evaluate_lpr_alert(
704
+ self,
705
+ alert: AlertConfig,
706
+ detection: Dict[str, Any],
707
+ config: Dict[str, Any]
708
+ ) -> bool:
709
+ """Evaluate license plate detection against alert criteria."""
710
+ self.logger.debug(f"[ALERT_DEBUG] ========== EVALUATING LPR ALERT ==========")
711
+ self.logger.debug(f"[ALERT_DEBUG] Alert ID: {alert.instant_alert_id}")
712
+ self.logger.debug(f"[ALERT_DEBUG] Alert Name: {alert.alert_name}")
713
+ self.logger.debug(f"[ALERT_DEBUG] Detection config: {config}")
714
+ self.logger.debug(f"[ALERT_DEBUG] Detection data: {detection}")
715
+
716
+ target_plates = config.get("targetPlates", [])
717
+ min_confidence = config.get("minConfidence", 0.0)
718
+
719
+ self.logger.debug(f"[ALERT_DEBUG] Target plates: {target_plates}")
720
+ self.logger.debug(f"[ALERT_DEBUG] Min confidence: {min_confidence}")
721
+
722
+ plate_number = detection.get("plateNumber", "").upper().strip()
723
+ confidence = detection.get("confidence", 0.0)
724
+
725
+ self.logger.debug(f"[ALERT_DEBUG] Detected plate (normalized): '{plate_number}'")
726
+ self.logger.debug(f"[ALERT_DEBUG] Detection confidence: {confidence}")
727
+
728
+ # Check if plate matches target list (case-insensitive)
729
+ plate_match = any(
730
+ plate_number == str(target).upper().strip()
731
+ for target in target_plates
732
+ )
733
+
734
+ self.logger.debug(f"[ALERT_DEBUG] Plate match result: {plate_match}")
735
+ if not plate_match and target_plates:
736
+ normalized_targets = [str(t).upper().strip() for t in target_plates]
737
+ self.logger.debug(f"[ALERT_DEBUG] Normalized target plates: {normalized_targets}")
738
+ self.logger.debug(f"[ALERT_DEBUG] Plate '{plate_number}' not in {normalized_targets}")
739
+
740
+ # Check confidence threshold
741
+ min_confidence=0.05
742
+ confidence_match = confidence >= min_confidence
743
+
744
+ self.logger.debug(f"[ALERT_DEBUG] Confidence match result: {confidence_match} ({confidence} >= {min_confidence})")
745
+
746
+ if plate_match:
747
+ self.logger.info(
748
+ f"[ALERT_DEBUG] ✓ LPR ALERT TRIGGERED: {alert.alert_name} - "
749
+ f"Plate: {plate_number}, Confidence: {confidence:.2f}"
750
+ )
751
+ return True
752
+ else:
753
+ self.logger.debug(
754
+ f"[ALERT_DEBUG] ✗ LPR alert NOT triggered: {alert.alert_name} - "
755
+ f"Plate match: {plate_match}"
756
+ )
757
+
758
+ return False
759
+
760
+ def _evaluate_count_alert(
761
+ self,
762
+ alert: AlertConfig,
763
+ detection: Dict[str, Any],
764
+ config: Dict[str, Any]
765
+ ) -> bool:
766
+ """Evaluate object count against threshold."""
767
+ threshold_count = config.get("thresholdCount", 0)
768
+ current_count = detection.get("currentCount", 0)
769
+
770
+ if current_count >= threshold_count:
771
+ self.logger.info(
772
+ f"Count alert triggered: {alert.alert_name} - "
773
+ f"Count: {current_count}, Threshold: {threshold_count}"
774
+ )
775
+ return True
776
+
777
+ return False
778
+
779
+ def _evaluate_fire_smoke_alert(
780
+ self,
781
+ alert: AlertConfig,
782
+ detection: Dict[str, Any],
783
+ config: Dict[str, Any]
784
+ ) -> bool:
785
+ """Evaluate fire/smoke detection."""
786
+ min_confidence = config.get("minConfidence", 0.0)
787
+ confidence = detection.get("confidence", 0.0)
788
+
789
+ fire_detected = detection.get("fireDetected", False)
790
+ smoke_detected = detection.get("smokeDetected", False)
791
+ min_confidence=0.05
792
+
793
+ if (fire_detected or smoke_detected) and confidence >= min_confidence:
794
+ self.logger.info(
795
+ f"Fire/Smoke alert triggered: {alert.alert_name} - "
796
+ f"Fire: {fire_detected}, Smoke: {smoke_detected}, Confidence: {confidence:.2f}"
797
+ )
798
+ return True
799
+
800
+ return False
801
+
802
+ def _evaluate_intrusion_alert(
803
+ self,
804
+ alert: AlertConfig,
805
+ detection: Dict[str, Any],
806
+ config: Dict[str, Any]
807
+ ) -> bool:
808
+ """Evaluate intrusion detection."""
809
+ min_confidence = config.get("minConfidence", 0.0)
810
+ confidence = detection.get("confidence", 0.0)
811
+ min_confidence=0.05
812
+
813
+ if confidence >= min_confidence:
814
+ self.logger.info(
815
+ f"Intrusion alert triggered: {alert.alert_name} - "
816
+ f"Confidence: {confidence:.2f}"
817
+ )
818
+ return True
819
+
820
+ return False
821
+
822
+ def _publish_trigger(self, alert: AlertConfig, detection: Dict[str, Any]) -> bool:
823
+ """Publish trigger message to backend. Returns True if published successfully."""
824
+ self.logger.info(f"[ALERT_DEBUG] ========== PUBLISHING TRIGGER ==========")
825
+ self.logger.info(f"[ALERT_DEBUG] Alert ID: {alert.instant_alert_id}")
826
+ self.logger.info(f"[ALERT_DEBUG] Alert Name: {alert.alert_name}")
827
+
828
+ trigger_message = self._build_trigger_message(alert, detection)
829
+
830
+ self.logger.info(f"[ALERT_DEBUG] Built trigger message: {trigger_message}")
831
+
832
+ # Publish via Redis (primary) or Kafka (fallback)
833
+ success = False
834
+
835
+ if self.redis_client:
836
+ try:
837
+ self.logger.debug(f"[ALERT_DEBUG] Publishing trigger to Redis stream: {self.trigger_topic}")
838
+ self._publish_to_redis(self.trigger_topic, trigger_message)
839
+ self.logger.info(f"[ALERT_DEBUG] ✓ Trigger published to Redis for alert: {alert.instant_alert_id}")
840
+ success = True
841
+ except Exception as e:
842
+ self.logger.error(f"[ALERT_DEBUG] ❌ Redis publish failed: {e}", exc_info=True)
843
+
844
+ if not success and self.kafka_client:
845
+ try:
846
+ self.logger.debug(f"[ALERT_DEBUG] Falling back to Kafka topic: {self.trigger_topic}")
847
+ self._publish_to_kafka(self.trigger_topic, trigger_message)
848
+ self.logger.info(f"[ALERT_DEBUG] ✓ Trigger published to Kafka for alert: {alert.instant_alert_id}")
849
+ success = True
850
+ except Exception as e:
851
+ self.logger.error(f"[ALERT_DEBUG] ❌ Kafka publish failed: {e}", exc_info=True)
852
+
853
+ if success:
854
+ self.logger.info(f"[ALERT_DEBUG] ========== TRIGGER PUBLISHED ==========")
855
+ else:
856
+ self.logger.error(f"[ALERT_DEBUG] ❌ TRIGGER NOT PUBLISHED (both transports failed) ==========")
857
+ return success
858
+
859
+ def _build_trigger_message(
860
+ self,
861
+ alert: AlertConfig,
862
+ detection: Dict[str, Any]
863
+ ) -> Dict[str, Any]:
864
+ """Build trigger message in exact format specified in documentation."""
865
+ detection_type_raw = detection.get("detectionType", "").lower()
866
+
867
+ context_data = {
868
+ "detectionType": detection_type_raw,
869
+ "confidence": detection.get("confidence", 0.0),
870
+ "coordinates": detection.get("coordinates", {}),
871
+ "cameraName": detection.get("cameraName", "")
872
+ }
873
+
874
+ # Add type-specific fieldss
875
+ if detection_type_raw == "license_plate":
876
+ context_data.update({
877
+ "plateNumber": detection.get("plateNumber", ""),
878
+ # "vehicleType": detection.get("vehicleType", ""),
879
+ # "vehicleColor": detection.get("vehicleColor", "")
880
+ })
881
+ elif detection_type_raw == "object_count":
882
+ context_data.update({
883
+ "objectClass": detection.get("objectClass", "person"),
884
+ "currentCount": detection.get("currentCount", 0),
885
+ "thresholdCount": alert.detection_config.get("thresholdCount", 0)
886
+ })
887
+ elif detection_type_raw == "fire_smoke":
888
+ context_data.update({
889
+ "fireDetected": detection.get("fireDetected", False),
890
+ "smokeDetected": detection.get("smokeDetected", False),
891
+ "severity": alert.severity_level
892
+ })
893
+ elif detection_type_raw == "intrusion":
894
+ context_data.update({
895
+ "objectClass": detection.get("objectClass", "person"),
896
+ "zoneName": detection.get("zoneName", ""),
897
+ "personCount": detection.get("personCount", 1)
898
+ })
899
+
900
+ trigger_message = {
901
+ "instant_alert_id": alert.instant_alert_id,
902
+ "camera_id": alert.camera_id,
903
+ "triggered_at": datetime.now(timezone.utc).isoformat(),
904
+ "context_data": context_data
905
+ }
906
+
907
+ return trigger_message
908
+
909
+ def _publish_to_redis(self, topic: str, message: Dict[str, Any]):
910
+ """Publish message to Redis stream."""
911
+ try:
912
+ self.redis_client.add_message(
913
+ topic_or_channel=topic,
914
+ message=json.dumps(message),
915
+ key=message.get("instant_alert_id", "")
916
+ )
917
+ except Exception as e:
918
+ self.logger.error(f"Redis publish error: {e}")
919
+ raise
920
+
921
+ def _publish_to_kafka(self, topic: str, message: Dict[str, Any]):
922
+ """Publish message to Kafka topic."""
923
+ try:
924
+ self.kafka_client.add_message(
925
+ topic_or_channel=topic,
926
+ message=json.dumps(message),
927
+ key=message.get("instant_alert_id", "")
928
+ )
929
+ except Exception as e:
930
+ self.logger.error(f"Kafka publish error: {e}")
931
+ raise
932
+
933
+ def get_active_alerts_count(self) -> int:
934
+ """Get count of active alerts."""
935
+ with self._alerts_lock:
936
+ return len(self._alerts)
937
+
938
+ def get_alerts_for_camera(self, camera_id: str) -> List[Dict[str, Any]]:
939
+ """Get all active alerts for a camera (for debugging/monitoring)."""
940
+ with self._alerts_lock:
941
+ return [
942
+ {
943
+ "instant_alert_id": alert.instant_alert_id,
944
+ "alert_name": alert.alert_name,
945
+ "severity_level": alert.severity_level,
946
+ "detection_config": alert.detection_config
947
+ }
948
+ for alert in self._alerts.values()
949
+ if alert.camera_id == camera_id and alert.is_active
950
+ ]