matrice-analytics 0.1.70__py3-none-any.whl → 0.1.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/__init__.py +8 -2
- matrice_analytics/post_processing/config.py +4 -2
- matrice_analytics/post_processing/core/base.py +1 -1
- matrice_analytics/post_processing/core/config.py +40 -3
- matrice_analytics/post_processing/face_reg/face_recognition.py +1014 -201
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +171 -29
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
- matrice_analytics/post_processing/post_processor.py +4 -0
- matrice_analytics/post_processing/usecases/__init__.py +4 -1
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +913 -500
- matrice_analytics/post_processing/usecases/color_detection.py +19 -18
- matrice_analytics/post_processing/usecases/customer_service.py +356 -9
- matrice_analytics/post_processing/usecases/fire_detection.py +241 -23
- matrice_analytics/post_processing/usecases/footfall.py +750 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +638 -40
- matrice_analytics/post_processing/usecases/people_counting.py +66 -33
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +35 -34
- matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
- matrice_analytics/post_processing/utils/alert_instance_utils.py +1018 -0
- matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1338 -0
- matrice_analytics/post_processing/utils/incident_manager_utils.py +1754 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +26 -22
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1018 @@
|
|
|
1
|
+
"""
|
|
2
|
+
alert_instance_utils.py
|
|
3
|
+
|
|
4
|
+
PRODUCTION-READY VERSION
|
|
5
|
+
Robust JSON parsing with fallback handling.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import time
|
|
10
|
+
import threading
|
|
11
|
+
import logging
|
|
12
|
+
from typing import Dict, List, Optional, Any
|
|
13
|
+
from datetime import datetime, timezone
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class AlertConfig:
|
|
19
|
+
"""Represents an instant alert configuration."""
|
|
20
|
+
instant_alert_id: str
|
|
21
|
+
camera_id: str
|
|
22
|
+
app_deployment_id: str
|
|
23
|
+
application_id: str
|
|
24
|
+
alert_name: str
|
|
25
|
+
detection_config: Dict[str, Any]
|
|
26
|
+
severity_level: str
|
|
27
|
+
is_active: bool
|
|
28
|
+
action: str
|
|
29
|
+
timestamp: str
|
|
30
|
+
last_updated: float = field(default_factory=time.time)
|
|
31
|
+
|
|
32
|
+
@classmethod
|
|
33
|
+
def from_dict(cls, data: Dict[str, Any]) -> "AlertConfig":
|
|
34
|
+
"""Create AlertConfig from dictionary."""
|
|
35
|
+
# Handle is_active as string (e.g., "True" or "true" -> True)
|
|
36
|
+
is_active_raw = data.get("is_active", True)
|
|
37
|
+
if isinstance(is_active_raw, str):
|
|
38
|
+
is_active = is_active_raw.lower() in ("true", "1", "yes")
|
|
39
|
+
else:
|
|
40
|
+
is_active = bool(is_active_raw)
|
|
41
|
+
|
|
42
|
+
return cls(
|
|
43
|
+
instant_alert_id=data.get("instant_alert_id", ""),
|
|
44
|
+
camera_id=data.get("camera_id", ""),
|
|
45
|
+
app_deployment_id=data.get("app_deployment_id", ""),
|
|
46
|
+
application_id=data.get("application_id", ""),
|
|
47
|
+
alert_name=data.get("alert_name", ""),
|
|
48
|
+
detection_config=data.get("detection_config", {}),
|
|
49
|
+
severity_level=data.get("severity_level", "medium"),
|
|
50
|
+
is_active=is_active,
|
|
51
|
+
action=data.get("action", "create"),
|
|
52
|
+
timestamp=data.get("timestamp", datetime.now(timezone.utc).isoformat()),
|
|
53
|
+
last_updated=time.time()
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class ALERT_INSTANCE:
|
|
58
|
+
"""
|
|
59
|
+
Manages instant alert configurations and evaluates detection events.
|
|
60
|
+
|
|
61
|
+
This class handles:
|
|
62
|
+
- Polling alert configs from Redis/Kafka every polling_interval seconds
|
|
63
|
+
- Maintaining in-memory alert state
|
|
64
|
+
- Evaluating detection events against alert criteria
|
|
65
|
+
- Publishing trigger messages when matches occur
|
|
66
|
+
|
|
67
|
+
Transport Priority:
|
|
68
|
+
- Redis is primary for both config reading and trigger publishing
|
|
69
|
+
- Kafka is fallback when Redis operations fail
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
redis_client: Optional[Any] = None,
|
|
75
|
+
kafka_client: Optional[Any] = None,
|
|
76
|
+
config_topic: str = "alert_instant_config_request",
|
|
77
|
+
trigger_topic: str = "alert_instant_triggered",
|
|
78
|
+
polling_interval: int = 10,
|
|
79
|
+
logger: Optional[logging.Logger] = None,
|
|
80
|
+
app_deployment_id: Optional[str] = None
|
|
81
|
+
):
|
|
82
|
+
"""
|
|
83
|
+
Initialize ALERT_INSTANCE.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
redis_client: MatriceStream instance configured for Redis (primary transport)
|
|
87
|
+
kafka_client: MatriceStream instance configured for Kafka (fallback transport)
|
|
88
|
+
config_topic: Topic/stream name for receiving alert configs
|
|
89
|
+
trigger_topic: Topic/stream name for publishing triggers
|
|
90
|
+
polling_interval: Seconds between config polling
|
|
91
|
+
logger: Python logger instance
|
|
92
|
+
app_deployment_id: App deployment ID to filter incoming alerts (only process alerts matching this ID)
|
|
93
|
+
"""
|
|
94
|
+
self.redis_client = redis_client
|
|
95
|
+
self.kafka_client = kafka_client
|
|
96
|
+
self.config_topic = config_topic
|
|
97
|
+
self.trigger_topic = trigger_topic
|
|
98
|
+
self.polling_interval = polling_interval
|
|
99
|
+
self.logger = logger or logging.getLogger(__name__)
|
|
100
|
+
self.app_deployment_id = app_deployment_id
|
|
101
|
+
|
|
102
|
+
# In-memory alert storage: {instant_alert_id: AlertConfig}
|
|
103
|
+
self._alerts: Dict[str, AlertConfig] = {}
|
|
104
|
+
self._alerts_lock = threading.Lock()
|
|
105
|
+
|
|
106
|
+
# Cooldown tracking: {(instant_alert_id, detection_key): last_trigger_timestamp}
|
|
107
|
+
# detection_key = plateNumber for LPR, objectClass for count/intrusion, "fire_smoke" for fire/smoke
|
|
108
|
+
self._cooldown_cache: Dict[tuple, float] = {}
|
|
109
|
+
self._cooldown_lock = threading.Lock()
|
|
110
|
+
self._cooldown_seconds = 5 # 5 second cooldown per alert+detection combination
|
|
111
|
+
|
|
112
|
+
# Polling thread control
|
|
113
|
+
self._polling_thread: Optional[threading.Thread] = None
|
|
114
|
+
self._stop_event = threading.Event()
|
|
115
|
+
self._running = False
|
|
116
|
+
|
|
117
|
+
self.logger.info(
|
|
118
|
+
f"Initialized ALERT_INSTANCE with config_topic={config_topic}, "
|
|
119
|
+
f"trigger_topic={trigger_topic}, polling_interval={polling_interval}s, "
|
|
120
|
+
f"cooldown={self._cooldown_seconds}s, app_deployment_id={app_deployment_id}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
def start(self):
|
|
124
|
+
"""Start the background polling thread for config updates."""
|
|
125
|
+
if self._running:
|
|
126
|
+
self.logger.warning("ALERT_INSTANCE already running")
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
self._running = True
|
|
130
|
+
self._stop_event.clear()
|
|
131
|
+
self._polling_thread = threading.Thread(
|
|
132
|
+
target=self._polling_loop,
|
|
133
|
+
daemon=True,
|
|
134
|
+
name="AlertConfigPoller"
|
|
135
|
+
)
|
|
136
|
+
self._polling_thread.start()
|
|
137
|
+
self.logger.info("Started alert config polling thread")
|
|
138
|
+
|
|
139
|
+
def stop(self):
|
|
140
|
+
"""Stop the background polling thread gracefully."""
|
|
141
|
+
if not self._running:
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
self.logger.info("Stopping ALERT_INSTANCE...")
|
|
145
|
+
self._running = False
|
|
146
|
+
self._stop_event.set()
|
|
147
|
+
|
|
148
|
+
if self._polling_thread and self._polling_thread.is_alive():
|
|
149
|
+
self._polling_thread.join(timeout=5)
|
|
150
|
+
|
|
151
|
+
self.logger.info("ALERT_INSTANCE stopped")
|
|
152
|
+
|
|
153
|
+
def _polling_loop(self):
|
|
154
|
+
"""Background thread that polls for config updates every polling_interval seconds."""
|
|
155
|
+
self.logger.info(f"Alert config polling loop started (interval: {self.polling_interval}s)")
|
|
156
|
+
|
|
157
|
+
while not self._stop_event.is_set():
|
|
158
|
+
try:
|
|
159
|
+
self._fetch_and_update_configs()
|
|
160
|
+
except Exception as e:
|
|
161
|
+
self.logger.error(f"Error in polling loop: {e}", exc_info=True)
|
|
162
|
+
|
|
163
|
+
# Sleep in small increments to allow quick shutdown
|
|
164
|
+
for _ in range(self.polling_interval):
|
|
165
|
+
if self._stop_event.is_set():
|
|
166
|
+
break
|
|
167
|
+
time.sleep(1)
|
|
168
|
+
|
|
169
|
+
self.logger.info("Alert config polling loop exited")
|
|
170
|
+
|
|
171
|
+
def _fetch_and_update_configs(self):
|
|
172
|
+
"""Fetch config messages from Redis (primary) or Kafka (fallback)."""
|
|
173
|
+
configs = []
|
|
174
|
+
|
|
175
|
+
# Try Redis first (primary)
|
|
176
|
+
if self.redis_client:
|
|
177
|
+
try:
|
|
178
|
+
self.logger.debug(f"Fetching configs from Redis stream: {self.config_topic}")
|
|
179
|
+
configs = self._read_from_redis(self.config_topic)
|
|
180
|
+
if configs:
|
|
181
|
+
self.logger.info(f"Fetched {len(configs)} config(s) from Redis")
|
|
182
|
+
except Exception as e:
|
|
183
|
+
self.logger.error(f"Redis config fetch failed: {e}", exc_info=True)
|
|
184
|
+
|
|
185
|
+
# Fallback to Kafka if Redis failed or no client
|
|
186
|
+
if not configs and self.kafka_client:
|
|
187
|
+
try:
|
|
188
|
+
self.logger.debug(f"Falling back to Kafka topic: {self.config_topic}")
|
|
189
|
+
configs = self._read_from_kafka(self.config_topic)
|
|
190
|
+
if configs:
|
|
191
|
+
self.logger.info(f"Fetched {len(configs)} config(s) from Kafka")
|
|
192
|
+
except Exception as e:
|
|
193
|
+
self.logger.error(f"Kafka config fetch failed: {e}", exc_info=True)
|
|
194
|
+
|
|
195
|
+
# Update in-memory alert configs
|
|
196
|
+
for config_data in configs:
|
|
197
|
+
try:
|
|
198
|
+
self._handle_config_message(config_data)
|
|
199
|
+
except Exception as e:
|
|
200
|
+
self.logger.error(f"Error handling config message: {e}", exc_info=True)
|
|
201
|
+
|
|
202
|
+
def _read_from_redis(self, topic: str, max_messages: int = 100) -> List[Dict[str, Any]]:
|
|
203
|
+
"""
|
|
204
|
+
Read messages from Redis stream.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
topic: Redis stream name
|
|
208
|
+
max_messages: Maximum messages to fetch
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
List of parsed message dictionaries
|
|
212
|
+
"""
|
|
213
|
+
messages = []
|
|
214
|
+
try:
|
|
215
|
+
self.logger.debug(f"[ALERT_DEBUG] Reading from Redis topic: {topic}, max_messages: {max_messages}")
|
|
216
|
+
for msg_count in range(max_messages):
|
|
217
|
+
msg = self.redis_client.get_message(timeout=0.1)
|
|
218
|
+
if not msg:
|
|
219
|
+
self.logger.debug(f"[ALERT_DEBUG] No more messages from Redis after {msg_count} messages")
|
|
220
|
+
break
|
|
221
|
+
|
|
222
|
+
self.logger.debug(f"[ALERT_DEBUG] Raw message #{msg_count + 1} received: {msg}")
|
|
223
|
+
value = msg.get('value') or msg.get('data') or msg.get('message')
|
|
224
|
+
if value:
|
|
225
|
+
self.logger.debug(f"[ALERT_DEBUG] Extracted value type: {type(value)}, length: {len(value) if hasattr(value, '__len__') else 'N/A'}")
|
|
226
|
+
|
|
227
|
+
# Handle case where value is already a dict (Redis stream format)
|
|
228
|
+
if isinstance(value, dict):
|
|
229
|
+
self.logger.debug(f"[ALERT_DEBUG] Value is already a dict, keys: {list(value.keys())}")
|
|
230
|
+
# Check if there's a nested 'data' key (common Redis stream pattern)
|
|
231
|
+
if 'data' in value and isinstance(value['data'], dict):
|
|
232
|
+
parsed = value['data']
|
|
233
|
+
self.logger.info(f"[ALERT_DEBUG] Extracted nested 'data' dict: {parsed}")
|
|
234
|
+
messages.append(parsed)
|
|
235
|
+
else:
|
|
236
|
+
# Use the dict directly
|
|
237
|
+
self.logger.info(f"[ALERT_DEBUG] Using dict directly: {value}")
|
|
238
|
+
messages.append(value)
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
if isinstance(value, bytes):
|
|
242
|
+
value = value.decode('utf-8')
|
|
243
|
+
self.logger.debug(f"[ALERT_DEBUG] Decoded bytes to string: {value[:200]}...")
|
|
244
|
+
if isinstance(value, str):
|
|
245
|
+
self.logger.debug(f"[ALERT_DEBUG] Raw JSON string: {value}")
|
|
246
|
+
# Robust JSON parsing with error handling
|
|
247
|
+
try:
|
|
248
|
+
parsed = json.loads(value)
|
|
249
|
+
self.logger.info(f"[ALERT_DEBUG] Successfully parsed JSON: {parsed}")
|
|
250
|
+
messages.append(parsed)
|
|
251
|
+
except json.JSONDecodeError as e:
|
|
252
|
+
self.logger.error(f"[ALERT_DEBUG] JSON parse error: {e}")
|
|
253
|
+
self.logger.error(f"[ALERT_DEBUG] Invalid JSON (first 500 chars): {value[:500]}")
|
|
254
|
+
# Try to fix common issues
|
|
255
|
+
try:
|
|
256
|
+
# Replace Python booleans with JSON booleans
|
|
257
|
+
self.logger.debug(f"[ALERT_DEBUG] Attempting to fix Python-style formatting...")
|
|
258
|
+
fixed = value
|
|
259
|
+
|
|
260
|
+
# Fix Python booleans (True/False -> true/false)
|
|
261
|
+
fixed = fixed.replace(": True", ": true").replace(": False", ": false")
|
|
262
|
+
fixed = fixed.replace(":True", ":true").replace(":False", ":false")
|
|
263
|
+
fixed = fixed.replace(" True,", " true,").replace(" False,", " false,")
|
|
264
|
+
fixed = fixed.replace(" True}", " true}").replace(" False}", " false}")
|
|
265
|
+
fixed = fixed.replace("{True", "{true").replace("{False", "{false")
|
|
266
|
+
|
|
267
|
+
# Fix Python None (None -> null)
|
|
268
|
+
fixed = fixed.replace(": None", ": null").replace(":None", ":null")
|
|
269
|
+
fixed = fixed.replace(" None,", " null,").replace(" None}", " null}")
|
|
270
|
+
|
|
271
|
+
# Fix single quotes (Python dict style) -> double quotes (JSON style)
|
|
272
|
+
# This is a simple replacement that works for most cases
|
|
273
|
+
if "'" in fixed and '"' not in fixed:
|
|
274
|
+
self.logger.debug(f"[ALERT_DEBUG] Detected single quotes, replacing with double quotes")
|
|
275
|
+
fixed = fixed.replace("'", '"')
|
|
276
|
+
|
|
277
|
+
self.logger.debug(f"[ALERT_DEBUG] Fixed JSON string: {fixed[:500]}...")
|
|
278
|
+
parsed = json.loads(fixed)
|
|
279
|
+
self.logger.info(f"[ALERT_DEBUG] Successfully fixed and parsed JSON: {parsed}")
|
|
280
|
+
messages.append(parsed)
|
|
281
|
+
except Exception as fix_error:
|
|
282
|
+
self.logger.error(f"[ALERT_DEBUG] Could not fix JSON: {fix_error}, skipping message")
|
|
283
|
+
continue
|
|
284
|
+
else:
|
|
285
|
+
self.logger.warning(f"[ALERT_DEBUG] Message has no value/data/message field: {msg}")
|
|
286
|
+
except Exception as e:
|
|
287
|
+
self.logger.error(f"[ALERT_DEBUG] Error reading from Redis: {e}", exc_info=True)
|
|
288
|
+
raise
|
|
289
|
+
|
|
290
|
+
self.logger.info(f"[ALERT_DEBUG] Total messages parsed from Redis: {len(messages)}")
|
|
291
|
+
return messages
|
|
292
|
+
|
|
293
|
+
def _read_from_kafka(self, topic: str, max_messages: int = 100) -> List[Dict[str, Any]]:
|
|
294
|
+
"""
|
|
295
|
+
Read messages from Kafka topic.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
topic: Kafka topic name
|
|
299
|
+
max_messages: Maximum messages to fetch
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
List of parsed message dictionaries
|
|
303
|
+
"""
|
|
304
|
+
messages = []
|
|
305
|
+
try:
|
|
306
|
+
self.logger.debug(f"[ALERT_DEBUG] Reading from Kafka topic: {topic}, max_messages: {max_messages}")
|
|
307
|
+
for msg_count in range(max_messages):
|
|
308
|
+
msg = self.kafka_client.get_message(timeout=0.1)
|
|
309
|
+
if not msg:
|
|
310
|
+
self.logger.debug(f"[ALERT_DEBUG] No more messages from Kafka after {msg_count} messages")
|
|
311
|
+
break
|
|
312
|
+
|
|
313
|
+
self.logger.debug(f"[ALERT_DEBUG] Raw Kafka message #{msg_count + 1} received: {msg}")
|
|
314
|
+
value = msg.get('value') or msg.get('data') or msg.get('message')
|
|
315
|
+
if value:
|
|
316
|
+
self.logger.debug(f"[ALERT_DEBUG] Extracted value type: {type(value)}, length: {len(value) if hasattr(value, '__len__') else 'N/A'}")
|
|
317
|
+
|
|
318
|
+
# Handle case where value is already a dict (Kafka message format)
|
|
319
|
+
if isinstance(value, dict):
|
|
320
|
+
self.logger.debug(f"[ALERT_DEBUG] Value is already a dict, keys: {list(value.keys())}")
|
|
321
|
+
# Check if there's a nested 'data' key (common Kafka message pattern)
|
|
322
|
+
if 'data' in value and isinstance(value['data'], dict):
|
|
323
|
+
parsed = value['data']
|
|
324
|
+
self.logger.info(f"[ALERT_DEBUG] Extracted nested 'data' dict: {parsed}")
|
|
325
|
+
messages.append(parsed)
|
|
326
|
+
else:
|
|
327
|
+
# Use the dict directly
|
|
328
|
+
self.logger.info(f"[ALERT_DEBUG] Using dict directly: {value}")
|
|
329
|
+
messages.append(value)
|
|
330
|
+
continue
|
|
331
|
+
|
|
332
|
+
if isinstance(value, bytes):
|
|
333
|
+
value = value.decode('utf-8')
|
|
334
|
+
self.logger.debug(f"[ALERT_DEBUG] Decoded bytes to string: {value[:200]}...")
|
|
335
|
+
if isinstance(value, str):
|
|
336
|
+
self.logger.debug(f"[ALERT_DEBUG] Raw JSON string: {value}")
|
|
337
|
+
try:
|
|
338
|
+
parsed = json.loads(value)
|
|
339
|
+
self.logger.info(f"[ALERT_DEBUG] Successfully parsed Kafka JSON: {parsed}")
|
|
340
|
+
messages.append(parsed)
|
|
341
|
+
except json.JSONDecodeError as e:
|
|
342
|
+
self.logger.error(f"[ALERT_DEBUG] Kafka JSON parse error: {e}")
|
|
343
|
+
self.logger.error(f"[ALERT_DEBUG] Invalid JSON (first 500 chars): {value[:500]}")
|
|
344
|
+
# Try to fix common issues
|
|
345
|
+
try:
|
|
346
|
+
self.logger.debug(f"[ALERT_DEBUG] Attempting to fix Python-style formatting...")
|
|
347
|
+
fixed = value
|
|
348
|
+
|
|
349
|
+
# Fix Python booleans (True/False -> true/false)
|
|
350
|
+
fixed = fixed.replace(": True", ": true").replace(": False", ": false")
|
|
351
|
+
fixed = fixed.replace(":True", ":true").replace(":False", ":false")
|
|
352
|
+
fixed = fixed.replace(" True,", " true,").replace(" False,", " false,")
|
|
353
|
+
fixed = fixed.replace(" True}", " true}").replace(" False}", " false}")
|
|
354
|
+
fixed = fixed.replace("{True", "{true").replace("{False", "{false")
|
|
355
|
+
|
|
356
|
+
# Fix Python None (None -> null)
|
|
357
|
+
fixed = fixed.replace(": None", ": null").replace(":None", ":null")
|
|
358
|
+
fixed = fixed.replace(" None,", " null,").replace(" None}", " null}")
|
|
359
|
+
|
|
360
|
+
# Fix single quotes (Python dict style) -> double quotes (JSON style)
|
|
361
|
+
# This is a simple replacement that works for most cases
|
|
362
|
+
if "'" in fixed and '"' not in fixed:
|
|
363
|
+
self.logger.debug(f"[ALERT_DEBUG] Detected single quotes, replacing with double quotes")
|
|
364
|
+
fixed = fixed.replace("'", '"')
|
|
365
|
+
|
|
366
|
+
self.logger.debug(f"[ALERT_DEBUG] Fixed JSON string: {fixed[:500]}...")
|
|
367
|
+
parsed = json.loads(fixed)
|
|
368
|
+
self.logger.info(f"[ALERT_DEBUG] Successfully fixed and parsed Kafka JSON: {parsed}")
|
|
369
|
+
messages.append(parsed)
|
|
370
|
+
except Exception as fix_error:
|
|
371
|
+
self.logger.error(f"[ALERT_DEBUG] Could not fix Kafka JSON: {fix_error}, skipping message")
|
|
372
|
+
continue
|
|
373
|
+
else:
|
|
374
|
+
self.logger.warning(f"[ALERT_DEBUG] Kafka message has no value/data/message field: {msg}")
|
|
375
|
+
except Exception as e:
|
|
376
|
+
self.logger.error(f"[ALERT_DEBUG] Error reading from Kafka: {e}", exc_info=True)
|
|
377
|
+
raise
|
|
378
|
+
|
|
379
|
+
self.logger.info(f"[ALERT_DEBUG] Total messages parsed from Kafka: {len(messages)}")
|
|
380
|
+
return messages
|
|
381
|
+
|
|
382
|
+
def _handle_config_message(self, config_data: Dict[str, Any]):
|
|
383
|
+
"""
|
|
384
|
+
Handle a single config message (create/update/delete).
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
config_data: Alert configuration dictionary
|
|
388
|
+
"""
|
|
389
|
+
try:
|
|
390
|
+
self.logger.info(f"[ALERT_DEBUG] ========== HANDLING CONFIG MESSAGE ==========")
|
|
391
|
+
self.logger.info(f"[ALERT_DEBUG] Raw config_data type: {type(config_data)}")
|
|
392
|
+
self.logger.info(f"[ALERT_DEBUG] Raw config_data keys: {list(config_data.keys()) if isinstance(config_data, dict) else 'N/A'}")
|
|
393
|
+
self.logger.info(f"[ALERT_DEBUG] Raw config_data: {config_data}")
|
|
394
|
+
|
|
395
|
+
# Skip if this is a wrapper with 'raw' key (from failed JSON parse)
|
|
396
|
+
if 'raw' in config_data and len(config_data) == 1:
|
|
397
|
+
self.logger.warning("[ALERT_DEBUG] Skipping malformed config with 'raw' key only")
|
|
398
|
+
return
|
|
399
|
+
|
|
400
|
+
# Log detection_service field (informational only, no filtering)
|
|
401
|
+
detection_service = config_data.get('detection_service', '')
|
|
402
|
+
self.logger.info(f"[ALERT_DEBUG] detection_service: '{detection_service}'")
|
|
403
|
+
|
|
404
|
+
# Filter by app_deployment_id - only process alerts that match our app_deployment_id
|
|
405
|
+
incoming_app_deployment_id = config_data.get('app_deployment_id', '')
|
|
406
|
+
if self.app_deployment_id:
|
|
407
|
+
if incoming_app_deployment_id != self.app_deployment_id:
|
|
408
|
+
self.logger.info(
|
|
409
|
+
f"[ALERT_DEBUG] Skipping alert - app_deployment_id mismatch: "
|
|
410
|
+
f"incoming='{incoming_app_deployment_id}', ours='{self.app_deployment_id}'"
|
|
411
|
+
)
|
|
412
|
+
return
|
|
413
|
+
else:
|
|
414
|
+
self.logger.info(
|
|
415
|
+
f"[ALERT_DEBUG] ✓ app_deployment_id match: '{incoming_app_deployment_id}'"
|
|
416
|
+
)
|
|
417
|
+
else:
|
|
418
|
+
self.logger.warning(
|
|
419
|
+
f"[ALERT_DEBUG] No app_deployment_id filter set, processing all alerts. "
|
|
420
|
+
f"Incoming app_deployment_id: '{incoming_app_deployment_id}'"
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
# Log individual fields before creating AlertConfig
|
|
424
|
+
self.logger.debug(f"[ALERT_DEBUG] Extracted fields from config_data:")
|
|
425
|
+
self.logger.debug(f"[ALERT_DEBUG] - instant_alert_id: '{config_data.get('instant_alert_id', 'MISSING')}'")
|
|
426
|
+
self.logger.debug(f"[ALERT_DEBUG] - camera_id: '{config_data.get('camera_id', 'MISSING')}'")
|
|
427
|
+
self.logger.debug(f"[ALERT_DEBUG] - app_deployment_id: '{config_data.get('app_deployment_id', 'MISSING')}'")
|
|
428
|
+
self.logger.debug(f"[ALERT_DEBUG] - application_id: '{config_data.get('application_id', 'MISSING')}'")
|
|
429
|
+
self.logger.debug(f"[ALERT_DEBUG] - alert_name: '{config_data.get('alert_name', 'MISSING')}'")
|
|
430
|
+
self.logger.debug(f"[ALERT_DEBUG] - detection_config: {config_data.get('detection_config', 'MISSING')}")
|
|
431
|
+
self.logger.debug(f"[ALERT_DEBUG] - severity_level: '{config_data.get('severity_level', 'MISSING')}'")
|
|
432
|
+
self.logger.debug(f"[ALERT_DEBUG] - is_active: {config_data.get('is_active', 'MISSING')}")
|
|
433
|
+
self.logger.debug(f"[ALERT_DEBUG] - action: '{config_data.get('action', 'MISSING')}'")
|
|
434
|
+
self.logger.debug(f"[ALERT_DEBUG] - timestamp: '{config_data.get('timestamp', 'MISSING')}'")
|
|
435
|
+
|
|
436
|
+
alert_config = AlertConfig.from_dict(config_data)
|
|
437
|
+
|
|
438
|
+
self.logger.info(f"[ALERT_DEBUG] AlertConfig created successfully")
|
|
439
|
+
self.logger.info(f"[ALERT_DEBUG] AlertConfig fields:")
|
|
440
|
+
self.logger.info(f"[ALERT_DEBUG] - instant_alert_id: '{alert_config.instant_alert_id}'")
|
|
441
|
+
self.logger.info(f"[ALERT_DEBUG] - camera_id: '{alert_config.camera_id}'")
|
|
442
|
+
self.logger.info(f"[ALERT_DEBUG] - app_deployment_id: '{alert_config.app_deployment_id}'")
|
|
443
|
+
self.logger.info(f"[ALERT_DEBUG] - application_id: '{alert_config.application_id}'")
|
|
444
|
+
self.logger.info(f"[ALERT_DEBUG] - alert_name: '{alert_config.alert_name}'")
|
|
445
|
+
self.logger.info(f"[ALERT_DEBUG] - detection_config: {alert_config.detection_config}")
|
|
446
|
+
self.logger.info(f"[ALERT_DEBUG] - severity_level: '{alert_config.severity_level}'")
|
|
447
|
+
self.logger.info(f"[ALERT_DEBUG] - is_active: {alert_config.is_active}")
|
|
448
|
+
self.logger.info(f"[ALERT_DEBUG] - action: '{alert_config.action}'")
|
|
449
|
+
self.logger.info(f"[ALERT_DEBUG] - timestamp: '{alert_config.timestamp}'")
|
|
450
|
+
|
|
451
|
+
action = alert_config.action.lower()
|
|
452
|
+
alert_id = alert_config.instant_alert_id
|
|
453
|
+
|
|
454
|
+
self.logger.info(f"[ALERT_DEBUG] Action (lowercase): '{action}'")
|
|
455
|
+
self.logger.info(f"[ALERT_DEBUG] Alert ID: '{alert_id}'")
|
|
456
|
+
|
|
457
|
+
# Validate required fields
|
|
458
|
+
if not alert_id:
|
|
459
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ VALIDATION FAILED: Missing 'instant_alert_id'")
|
|
460
|
+
self.logger.error(f"[ALERT_DEBUG] Full config data: {config_data}")
|
|
461
|
+
return
|
|
462
|
+
if not alert_config.camera_id:
|
|
463
|
+
self.logger.warning(f"[ALERT_DEBUG] camera_id missing for alert '{alert_id}', defaulting to empty and proceeding")
|
|
464
|
+
|
|
465
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Validation passed")
|
|
466
|
+
|
|
467
|
+
with self._alerts_lock:
|
|
468
|
+
if action == "create":
|
|
469
|
+
if alert_id in self._alerts:
|
|
470
|
+
self.logger.info(f"[ALERT_DEBUG] Alert {alert_id} already exists, treating as update")
|
|
471
|
+
self._alerts[alert_id] = alert_config
|
|
472
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Created/Updated alert: {alert_id} ({alert_config.alert_name})")
|
|
473
|
+
self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
|
|
474
|
+
|
|
475
|
+
elif action == "update":
|
|
476
|
+
self._alerts[alert_id] = alert_config
|
|
477
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Updated alert: {alert_id} ({alert_config.alert_name})")
|
|
478
|
+
self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
|
|
479
|
+
|
|
480
|
+
elif action == "delete":
|
|
481
|
+
if alert_id in self._alerts:
|
|
482
|
+
del self._alerts[alert_id]
|
|
483
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Deleted alert: {alert_id}")
|
|
484
|
+
self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
|
|
485
|
+
else:
|
|
486
|
+
self.logger.warning(f"[ALERT_DEBUG] Delete requested for non-existent alert: {alert_id}")
|
|
487
|
+
|
|
488
|
+
# Also deactivate if is_active is False
|
|
489
|
+
if not alert_config.is_active and alert_id in self._alerts:
|
|
490
|
+
del self._alerts[alert_id]
|
|
491
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Deactivated alert: {alert_id}")
|
|
492
|
+
self.logger.info(f"[ALERT_DEBUG] Total active alerts now: {len(self._alerts)}")
|
|
493
|
+
|
|
494
|
+
self.logger.info(f"[ALERT_DEBUG] ========== CONFIG MESSAGE HANDLED ==========")
|
|
495
|
+
|
|
496
|
+
except Exception as e:
|
|
497
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ EXCEPTION in _handle_config_message: {e}", exc_info=True)
|
|
498
|
+
self.logger.error(f"[ALERT_DEBUG] Failed config_data: {config_data}")
|
|
499
|
+
|
|
500
|
+
def process_detection_event(self, detection_payload: Dict[str, Any]):
|
|
501
|
+
"""
|
|
502
|
+
Process a detection event and evaluate against active alerts.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
detection_payload: Detection event data
|
|
506
|
+
"""
|
|
507
|
+
try:
|
|
508
|
+
self.logger.info(f"[ALERT_DEBUG] ========== PROCESSING DETECTION EVENT ==========")
|
|
509
|
+
self.logger.info(f"[ALERT_DEBUG] Detection payload: {detection_payload}")
|
|
510
|
+
|
|
511
|
+
camera_id = detection_payload.get("camera_id")
|
|
512
|
+
self.logger.debug(f"[ALERT_DEBUG] Camera ID: '{camera_id}'")
|
|
513
|
+
|
|
514
|
+
if not camera_id:
|
|
515
|
+
self.logger.warning("[ALERT_DEBUG] Detection event missing camera_id; defaulting to empty and evaluating against all active alerts")
|
|
516
|
+
camera_id=''
|
|
517
|
+
|
|
518
|
+
# Get all active alerts for this camera
|
|
519
|
+
matching_alerts = self._get_alerts_for_camera(camera_id)
|
|
520
|
+
|
|
521
|
+
# Fallback: if no alerts found for this camera (or camera_id missing), evaluate against all active alerts
|
|
522
|
+
if not matching_alerts:
|
|
523
|
+
self.logger.info(f"[ALERT_DEBUG] No camera-specific alerts for '{camera_id}'. Evaluating against all active alerts.")
|
|
524
|
+
with self._alerts_lock:
|
|
525
|
+
matching_alerts = [a for a in self._alerts.values() if a.is_active]
|
|
526
|
+
|
|
527
|
+
self.logger.info(f"[ALERT_DEBUG] Found {len(matching_alerts)} active alert(s) for camera '{camera_id}'")
|
|
528
|
+
for i, alert in enumerate(matching_alerts):
|
|
529
|
+
self.logger.debug(f"[ALERT_DEBUG] Alert #{i+1}: ID={alert.instant_alert_id}, Name={alert.alert_name}")
|
|
530
|
+
|
|
531
|
+
if not matching_alerts:
|
|
532
|
+
self.logger.debug(f"[ALERT_DEBUG] No active alerts for camera: {camera_id}")
|
|
533
|
+
self.logger.debug(f"[ALERT_DEBUG] Total alerts in system: {len(self._alerts)}")
|
|
534
|
+
with self._alerts_lock:
|
|
535
|
+
all_camera_ids = [a.camera_id for a in self._alerts.values()]
|
|
536
|
+
self.logger.debug(f"[ALERT_DEBUG] All camera IDs in alert system: {all_camera_ids}")
|
|
537
|
+
#return
|
|
538
|
+
|
|
539
|
+
# Evaluate each alert
|
|
540
|
+
for alert in matching_alerts:
|
|
541
|
+
try:
|
|
542
|
+
self.logger.info(f"[ALERT_DEBUG] Evaluating alert: {alert.instant_alert_id} ({alert.alert_name})")
|
|
543
|
+
|
|
544
|
+
# First check if alert criteria match
|
|
545
|
+
if self._evaluate_alert(alert, detection_payload):
|
|
546
|
+
# Extract detection key for cooldown check
|
|
547
|
+
detection_key = self._get_detection_key(detection_payload)
|
|
548
|
+
|
|
549
|
+
# Atomically acquire cooldown slot (check+set)
|
|
550
|
+
acquired, prev_time = self._try_acquire_cooldown(alert.instant_alert_id, detection_key)
|
|
551
|
+
if acquired:
|
|
552
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Alert matched and cooldown acquired, publishing trigger...")
|
|
553
|
+
publish_ok = self._publish_trigger(alert, detection_payload)
|
|
554
|
+
if not publish_ok:
|
|
555
|
+
# Rollback cooldown if publish failed
|
|
556
|
+
self._rollback_cooldown(alert.instant_alert_id, detection_key, prev_time)
|
|
557
|
+
self.logger.warning(
|
|
558
|
+
f"[ALERT_DEBUG] Publish failed, cooldown rolled back: "
|
|
559
|
+
f"alert={alert.instant_alert_id}, detection_key={detection_key}"
|
|
560
|
+
)
|
|
561
|
+
else:
|
|
562
|
+
# Cooldown active, skip publish
|
|
563
|
+
self.logger.info(
|
|
564
|
+
f"[ALERT_DEBUG] ⏱️ Alert matched but in cooldown period, skipping: "
|
|
565
|
+
f"alert={alert.instant_alert_id}, detection_key={detection_key}"
|
|
566
|
+
)
|
|
567
|
+
else:
|
|
568
|
+
self.logger.debug(f"[ALERT_DEBUG] ✗ Alert did not match criteria")
|
|
569
|
+
except Exception as e:
|
|
570
|
+
self.logger.error(
|
|
571
|
+
f"[ALERT_DEBUG] ❌ Error evaluating alert {alert.instant_alert_id}: {e}",
|
|
572
|
+
exc_info=True
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
self.logger.info(f"[ALERT_DEBUG] ========== DETECTION EVENT PROCESSED ==========")
|
|
576
|
+
|
|
577
|
+
except Exception as e:
|
|
578
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ Error processing detection event: {e}", exc_info=True)
|
|
579
|
+
|
|
580
|
+
def _get_alerts_for_camera(self, camera_id: str) -> List[AlertConfig]:
|
|
581
|
+
"""Get all active alerts for a specific camera."""
|
|
582
|
+
with self._alerts_lock:
|
|
583
|
+
return [
|
|
584
|
+
alert for alert in self._alerts.values()
|
|
585
|
+
if alert.camera_id == camera_id and alert.is_active
|
|
586
|
+
]
|
|
587
|
+
|
|
588
|
+
def _get_detection_key(self, detection: Dict[str, Any]) -> str:
|
|
589
|
+
"""
|
|
590
|
+
Extract the unique detection key based on detection type.
|
|
591
|
+
|
|
592
|
+
Returns:
|
|
593
|
+
- plateNumber for license_plate
|
|
594
|
+
- objectClass for object_count/intrusion
|
|
595
|
+
- "fire_smoke" for fire_smoke detection
|
|
596
|
+
"""
|
|
597
|
+
detection_type = detection.get("detectionType", "").lower()
|
|
598
|
+
|
|
599
|
+
if detection_type == "license_plate":
|
|
600
|
+
return detection.get("plateNumber", "").upper().strip()
|
|
601
|
+
elif detection_type in ["object_count", "intrusion"]:
|
|
602
|
+
return detection.get("objectClass", "unknown")
|
|
603
|
+
elif detection_type == "fire_smoke":
|
|
604
|
+
return "fire_smoke"
|
|
605
|
+
else:
|
|
606
|
+
return "unknown"
|
|
607
|
+
|
|
608
|
+
def _check_cooldown(self, alert_id: str, detection_key: str) -> bool:
|
|
609
|
+
"""
|
|
610
|
+
Check if alert+detection is in cooldown period.
|
|
611
|
+
|
|
612
|
+
Args:
|
|
613
|
+
alert_id: instant_alert_id
|
|
614
|
+
detection_key: plateNumber, objectClass, or detection type
|
|
615
|
+
|
|
616
|
+
Returns:
|
|
617
|
+
True if allowed to trigger (not in cooldown), False if in cooldown
|
|
618
|
+
"""
|
|
619
|
+
cooldown_key = (alert_id, detection_key)
|
|
620
|
+
current_time = time.time()
|
|
621
|
+
|
|
622
|
+
with self._cooldown_lock:
|
|
623
|
+
last_trigger_time = self._cooldown_cache.get(cooldown_key, 0)
|
|
624
|
+
time_since_last = current_time - last_trigger_time
|
|
625
|
+
|
|
626
|
+
if time_since_last < self._cooldown_seconds:
|
|
627
|
+
remaining = self._cooldown_seconds - time_since_last
|
|
628
|
+
self.logger.debug(
|
|
629
|
+
f"[ALERT_DEBUG] ⏱️ COOLDOWN ACTIVE: alert={alert_id}, key={detection_key}, "
|
|
630
|
+
f"remaining={remaining:.1f}s"
|
|
631
|
+
)
|
|
632
|
+
return False
|
|
633
|
+
|
|
634
|
+
return True
|
|
635
|
+
|
|
636
|
+
def _update_cooldown(self, alert_id: str, detection_key: str):
|
|
637
|
+
"""
|
|
638
|
+
Update the cooldown timestamp for alert+detection combination.
|
|
639
|
+
|
|
640
|
+
Args:
|
|
641
|
+
alert_id: instant_alert_id
|
|
642
|
+
detection_key: plateNumber, objectClass, or detection type
|
|
643
|
+
"""
|
|
644
|
+
cooldown_key = (alert_id, detection_key)
|
|
645
|
+
current_time = time.time()
|
|
646
|
+
|
|
647
|
+
with self._cooldown_lock:
|
|
648
|
+
self._cooldown_cache[cooldown_key] = current_time
|
|
649
|
+
self.logger.debug(
|
|
650
|
+
f"[ALERT_DEBUG] ⏱️ COOLDOWN SET: alert={alert_id}, key={detection_key}, "
|
|
651
|
+
f"duration={self._cooldown_seconds}s"
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
# Clean up old entries (older than 2x cooldown period)
|
|
655
|
+
cleanup_threshold = current_time - (self._cooldown_seconds * 2)
|
|
656
|
+
keys_to_remove = [
|
|
657
|
+
key for key, timestamp in self._cooldown_cache.items()
|
|
658
|
+
if timestamp < cleanup_threshold
|
|
659
|
+
]
|
|
660
|
+
for key in keys_to_remove:
|
|
661
|
+
del self._cooldown_cache[key]
|
|
662
|
+
|
|
663
|
+
if keys_to_remove:
|
|
664
|
+
self.logger.debug(f"[ALERT_DEBUG] Cleaned up {len(keys_to_remove)} old cooldown entries")
|
|
665
|
+
|
|
666
|
+
def _try_acquire_cooldown(self, alert_id: str, detection_key: str) -> (bool, float):
|
|
667
|
+
"""
|
|
668
|
+
Atomically check and set cooldown.
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
(acquired, prev_timestamp)
|
|
672
|
+
- acquired: True if cooldown slot acquired (allowed to publish)
|
|
673
|
+
- prev_timestamp: previous timestamp to support rollback if publish fails
|
|
674
|
+
"""
|
|
675
|
+
cooldown_key = (alert_id, detection_key)
|
|
676
|
+
current_time = time.time()
|
|
677
|
+
|
|
678
|
+
with self._cooldown_lock:
|
|
679
|
+
prev_timestamp = self._cooldown_cache.get(cooldown_key, 0)
|
|
680
|
+
time_since_last = current_time - prev_timestamp
|
|
681
|
+
|
|
682
|
+
if time_since_last < self._cooldown_seconds:
|
|
683
|
+
remaining = self._cooldown_seconds - time_since_last
|
|
684
|
+
self.logger.debug(
|
|
685
|
+
f"[ALERT_DEBUG] ⏱️ COOLDOWN ACTIVE (acquire failed): alert={alert_id}, key={detection_key}, "
|
|
686
|
+
f"remaining={remaining:.1f}s"
|
|
687
|
+
)
|
|
688
|
+
return False, prev_timestamp
|
|
689
|
+
|
|
690
|
+
# Acquire slot by setting to now
|
|
691
|
+
self._cooldown_cache[cooldown_key] = current_time
|
|
692
|
+
self.logger.debug(
|
|
693
|
+
f"[ALERT_DEBUG] ⏱️ COOLDOWN ACQUIRED: alert={alert_id}, key={detection_key}, "
|
|
694
|
+
f"timestamp={current_time:.3f}"
|
|
695
|
+
)
|
|
696
|
+
return True, prev_timestamp
|
|
697
|
+
|
|
698
|
+
def _rollback_cooldown(self, alert_id: str, detection_key: str, prev_timestamp: float):
|
|
699
|
+
"""Rollback cooldown to the previous timestamp (used when publish fails)."""
|
|
700
|
+
cooldown_key = (alert_id, detection_key)
|
|
701
|
+
with self._cooldown_lock:
|
|
702
|
+
if prev_timestamp == 0:
|
|
703
|
+
# Remove key entirely if there was no previous value
|
|
704
|
+
self._cooldown_cache.pop(cooldown_key, None)
|
|
705
|
+
else:
|
|
706
|
+
self._cooldown_cache[cooldown_key] = prev_timestamp
|
|
707
|
+
self.logger.debug(
|
|
708
|
+
f"[ALERT_DEBUG] ⏱️ COOLDOWN ROLLBACK: alert={alert_id}, key={detection_key}, "
|
|
709
|
+
f"restored_timestamp={prev_timestamp:.3f}"
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
def _evaluate_alert(self, alert: AlertConfig, detection: Dict[str, Any]) -> bool:
|
|
713
|
+
"""Evaluate if a detection matches alert criteria."""
|
|
714
|
+
detection_type = detection.get("detectionType", "").lower()
|
|
715
|
+
config = alert.detection_config
|
|
716
|
+
|
|
717
|
+
if detection_type == "license_plate":
|
|
718
|
+
return self._evaluate_lpr_alert(alert, detection, config)
|
|
719
|
+
elif detection_type == "object_count":
|
|
720
|
+
return self._evaluate_count_alert(alert, detection, config)
|
|
721
|
+
elif detection_type == "fire_smoke":
|
|
722
|
+
return self._evaluate_fire_smoke_alert(alert, detection, config)
|
|
723
|
+
elif detection_type == "intrusion":
|
|
724
|
+
return self._evaluate_intrusion_alert(alert, detection, config)
|
|
725
|
+
else:
|
|
726
|
+
self.logger.warning(f"Unknown detection type: {detection_type}")
|
|
727
|
+
return False
|
|
728
|
+
|
|
729
|
+
def _evaluate_lpr_alert(
|
|
730
|
+
self,
|
|
731
|
+
alert: AlertConfig,
|
|
732
|
+
detection: Dict[str, Any],
|
|
733
|
+
config: Dict[str, Any]
|
|
734
|
+
) -> bool:
|
|
735
|
+
"""
|
|
736
|
+
Evaluate license plate detection against alert criteria.
|
|
737
|
+
|
|
738
|
+
Supports two alert conditions:
|
|
739
|
+
- "in_list" (BLACKLIST): Alert ONLY when detected plate IS in targetPlates list
|
|
740
|
+
- "not_in_list" (WHITELIST): Alert when detected plate is NOT in targetPlates list
|
|
741
|
+
"""
|
|
742
|
+
self.logger.debug(f"[ALERT_DEBUG] ========== EVALUATING LPR ALERT ==========")
|
|
743
|
+
self.logger.debug(f"[ALERT_DEBUG] Alert ID: {alert.instant_alert_id}")
|
|
744
|
+
self.logger.debug(f"[ALERT_DEBUG] Alert Name: {alert.alert_name}")
|
|
745
|
+
self.logger.debug(f"[ALERT_DEBUG] Detection config: {config}")
|
|
746
|
+
self.logger.debug(f"[ALERT_DEBUG] Detection data: {detection}")
|
|
747
|
+
|
|
748
|
+
target_plates = config.get("targetPlates", [])
|
|
749
|
+
min_confidence = config.get("minConfidence", 0.0)
|
|
750
|
+
# Get alertCondition: "in_list" (blacklist) or "not_in_list" (whitelist)
|
|
751
|
+
alert_condition = config.get("alertCondition", "in_list")
|
|
752
|
+
|
|
753
|
+
self.logger.debug(f"[ALERT_DEBUG] Target plates: {target_plates}")
|
|
754
|
+
self.logger.debug(f"[ALERT_DEBUG] Min confidence: {min_confidence}")
|
|
755
|
+
self.logger.info(f"[ALERT_DEBUG] Alert condition: '{alert_condition}' (in_list=blacklist, not_in_list=whitelist)")
|
|
756
|
+
|
|
757
|
+
plate_number = detection.get("plateNumber", "").upper().strip()
|
|
758
|
+
confidence = detection.get("confidence", 0.0)
|
|
759
|
+
|
|
760
|
+
self.logger.debug(f"[ALERT_DEBUG] Detected plate (normalized): '{plate_number}'")
|
|
761
|
+
self.logger.debug(f"[ALERT_DEBUG] Detection confidence: {confidence}")
|
|
762
|
+
|
|
763
|
+
# Skip empty plate numbers
|
|
764
|
+
if not plate_number:
|
|
765
|
+
self.logger.debug(f"[ALERT_DEBUG] ✗ Empty plate number, skipping")
|
|
766
|
+
return False
|
|
767
|
+
|
|
768
|
+
# Check if plate matches target list (case-insensitive)
|
|
769
|
+
normalized_targets = [str(t).upper().strip() for t in target_plates]
|
|
770
|
+
plate_in_list = plate_number in normalized_targets
|
|
771
|
+
|
|
772
|
+
self.logger.debug(f"[ALERT_DEBUG] Normalized target plates: {normalized_targets}")
|
|
773
|
+
self.logger.debug(f"[ALERT_DEBUG] Plate '{plate_number}' in list: {plate_in_list}")
|
|
774
|
+
|
|
775
|
+
# Check confidence threshold (minimum 0.05)
|
|
776
|
+
min_confidence = max(0.05, min_confidence)
|
|
777
|
+
confidence_match = confidence >= min_confidence
|
|
778
|
+
|
|
779
|
+
self.logger.debug(f"[ALERT_DEBUG] Confidence match result: {confidence_match} ({confidence} >= {min_confidence})")
|
|
780
|
+
|
|
781
|
+
# Determine if alert should trigger based on alertCondition
|
|
782
|
+
should_trigger = False
|
|
783
|
+
|
|
784
|
+
if alert_condition == "in_list":
|
|
785
|
+
# BLACKLIST: Alert only when plate IS in the target list
|
|
786
|
+
if plate_in_list and confidence_match:
|
|
787
|
+
should_trigger = True
|
|
788
|
+
self.logger.info(
|
|
789
|
+
f"[ALERT_DEBUG] ✓ LPR BLACKLIST ALERT TRIGGERED: {alert.alert_name} - "
|
|
790
|
+
f"Plate: {plate_number} IS in blacklist, Confidence: {confidence:.2f}"
|
|
791
|
+
)
|
|
792
|
+
else:
|
|
793
|
+
self.logger.debug(
|
|
794
|
+
f"[ALERT_DEBUG] ✗ LPR blacklist alert NOT triggered: {alert.alert_name} - "
|
|
795
|
+
f"Plate '{plate_number}' in_list={plate_in_list}, confidence_match={confidence_match}"
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
elif alert_condition == "not_in_list":
|
|
799
|
+
# WHITELIST: Alert when plate is NOT in the target list
|
|
800
|
+
if not plate_in_list and confidence_match:
|
|
801
|
+
should_trigger = True
|
|
802
|
+
self.logger.info(
|
|
803
|
+
f"[ALERT_DEBUG] ✓ LPR WHITELIST ALERT TRIGGERED: {alert.alert_name} - "
|
|
804
|
+
f"Plate: {plate_number} is NOT in whitelist, Confidence: {confidence:.2f}"
|
|
805
|
+
)
|
|
806
|
+
else:
|
|
807
|
+
self.logger.debug(
|
|
808
|
+
f"[ALERT_DEBUG] ✗ LPR whitelist alert NOT triggered: {alert.alert_name} - "
|
|
809
|
+
f"Plate '{plate_number}' in_list={plate_in_list} (whitelisted), confidence_match={confidence_match}"
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
else:
|
|
813
|
+
# Unknown condition, default to blacklist behavior for backward compatibility
|
|
814
|
+
self.logger.warning(
|
|
815
|
+
f"[ALERT_DEBUG] Unknown alertCondition '{alert_condition}', defaulting to 'in_list' (blacklist) behavior"
|
|
816
|
+
)
|
|
817
|
+
if plate_in_list and confidence_match:
|
|
818
|
+
should_trigger = True
|
|
819
|
+
self.logger.info(
|
|
820
|
+
f"[ALERT_DEBUG] ✓ LPR ALERT TRIGGERED (default): {alert.alert_name} - "
|
|
821
|
+
f"Plate: {plate_number}, Confidence: {confidence:.2f}"
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
return should_trigger
|
|
825
|
+
|
|
826
|
+
def _evaluate_count_alert(
|
|
827
|
+
self,
|
|
828
|
+
alert: AlertConfig,
|
|
829
|
+
detection: Dict[str, Any],
|
|
830
|
+
config: Dict[str, Any]
|
|
831
|
+
) -> bool:
|
|
832
|
+
"""Evaluate object count against threshold."""
|
|
833
|
+
threshold_count = config.get("thresholdCount", 0)
|
|
834
|
+
current_count = detection.get("currentCount", 0)
|
|
835
|
+
|
|
836
|
+
if current_count >= threshold_count:
|
|
837
|
+
self.logger.info(
|
|
838
|
+
f"Count alert triggered: {alert.alert_name} - "
|
|
839
|
+
f"Count: {current_count}, Threshold: {threshold_count}"
|
|
840
|
+
)
|
|
841
|
+
return True
|
|
842
|
+
|
|
843
|
+
return False
|
|
844
|
+
|
|
845
|
+
def _evaluate_fire_smoke_alert(
|
|
846
|
+
self,
|
|
847
|
+
alert: AlertConfig,
|
|
848
|
+
detection: Dict[str, Any],
|
|
849
|
+
config: Dict[str, Any]
|
|
850
|
+
) -> bool:
|
|
851
|
+
"""Evaluate fire/smoke detection."""
|
|
852
|
+
min_confidence = config.get("minConfidence", 0.0)
|
|
853
|
+
confidence = detection.get("confidence", 0.0)
|
|
854
|
+
|
|
855
|
+
fire_detected = detection.get("fireDetected", False)
|
|
856
|
+
smoke_detected = detection.get("smokeDetected", False)
|
|
857
|
+
min_confidence=0.05
|
|
858
|
+
|
|
859
|
+
if (fire_detected or smoke_detected) and confidence >= min_confidence:
|
|
860
|
+
self.logger.info(
|
|
861
|
+
f"Fire/Smoke alert triggered: {alert.alert_name} - "
|
|
862
|
+
f"Fire: {fire_detected}, Smoke: {smoke_detected}, Confidence: {confidence:.2f}"
|
|
863
|
+
)
|
|
864
|
+
return True
|
|
865
|
+
|
|
866
|
+
return False
|
|
867
|
+
|
|
868
|
+
def _evaluate_intrusion_alert(
|
|
869
|
+
self,
|
|
870
|
+
alert: AlertConfig,
|
|
871
|
+
detection: Dict[str, Any],
|
|
872
|
+
config: Dict[str, Any]
|
|
873
|
+
) -> bool:
|
|
874
|
+
"""Evaluate intrusion detection."""
|
|
875
|
+
min_confidence = config.get("minConfidence", 0.0)
|
|
876
|
+
confidence = detection.get("confidence", 0.0)
|
|
877
|
+
min_confidence=0.05
|
|
878
|
+
|
|
879
|
+
if confidence >= min_confidence:
|
|
880
|
+
self.logger.info(
|
|
881
|
+
f"Intrusion alert triggered: {alert.alert_name} - "
|
|
882
|
+
f"Confidence: {confidence:.2f}"
|
|
883
|
+
)
|
|
884
|
+
return True
|
|
885
|
+
|
|
886
|
+
return False
|
|
887
|
+
|
|
888
|
+
def _publish_trigger(self, alert: AlertConfig, detection: Dict[str, Any]) -> bool:
|
|
889
|
+
"""Publish trigger message to backend. Returns True if published successfully."""
|
|
890
|
+
self.logger.info(f"[ALERT_DEBUG] ========== PUBLISHING TRIGGER ==========")
|
|
891
|
+
self.logger.info(f"[ALERT_DEBUG] Alert ID: {alert.instant_alert_id}")
|
|
892
|
+
self.logger.info(f"[ALERT_DEBUG] Alert Name: {alert.alert_name}")
|
|
893
|
+
|
|
894
|
+
trigger_message = self._build_trigger_message(alert, detection)
|
|
895
|
+
|
|
896
|
+
self.logger.info(f"[ALERT_DEBUG] Built trigger message: {trigger_message}")
|
|
897
|
+
|
|
898
|
+
# Publish via Redis (primary) or Kafka (fallback)
|
|
899
|
+
success = False
|
|
900
|
+
|
|
901
|
+
if self.redis_client:
|
|
902
|
+
try:
|
|
903
|
+
self.logger.debug(f"[ALERT_DEBUG] Publishing trigger to Redis stream: {self.trigger_topic}")
|
|
904
|
+
self._publish_to_redis(self.trigger_topic, trigger_message)
|
|
905
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Trigger published to Redis for alert: {alert.instant_alert_id}")
|
|
906
|
+
success = True
|
|
907
|
+
except Exception as e:
|
|
908
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ Redis publish failed: {e}", exc_info=True)
|
|
909
|
+
|
|
910
|
+
if not success and self.kafka_client:
|
|
911
|
+
try:
|
|
912
|
+
self.logger.debug(f"[ALERT_DEBUG] Falling back to Kafka topic: {self.trigger_topic}")
|
|
913
|
+
self._publish_to_kafka(self.trigger_topic, trigger_message)
|
|
914
|
+
self.logger.info(f"[ALERT_DEBUG] ✓ Trigger published to Kafka for alert: {alert.instant_alert_id}")
|
|
915
|
+
success = True
|
|
916
|
+
except Exception as e:
|
|
917
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ Kafka publish failed: {e}", exc_info=True)
|
|
918
|
+
|
|
919
|
+
if success:
|
|
920
|
+
self.logger.info(f"[ALERT_DEBUG] ========== TRIGGER PUBLISHED ==========")
|
|
921
|
+
else:
|
|
922
|
+
self.logger.error(f"[ALERT_DEBUG] ❌ TRIGGER NOT PUBLISHED (both transports failed) ==========")
|
|
923
|
+
return success
|
|
924
|
+
|
|
925
|
+
def _build_trigger_message(
|
|
926
|
+
self,
|
|
927
|
+
alert: AlertConfig,
|
|
928
|
+
detection: Dict[str, Any]
|
|
929
|
+
) -> Dict[str, Any]:
|
|
930
|
+
"""Build trigger message in exact format specified in documentation."""
|
|
931
|
+
detection_type_raw = detection.get("detectionType", "").lower()
|
|
932
|
+
|
|
933
|
+
context_data = {
|
|
934
|
+
"detectionType": detection_type_raw,
|
|
935
|
+
"confidence": detection.get("confidence", 0.0),
|
|
936
|
+
"coordinates": detection.get("coordinates", {}),
|
|
937
|
+
"cameraName": detection.get("cameraName", ""),
|
|
938
|
+
"locationName": detection.get("locationName", "")
|
|
939
|
+
}
|
|
940
|
+
|
|
941
|
+
# Add type-specific fields
|
|
942
|
+
if detection_type_raw == "license_plate":
|
|
943
|
+
context_data.update({
|
|
944
|
+
"plateNumber": detection.get("plateNumber", ""),
|
|
945
|
+
# "vehicleType": detection.get("vehicleType", ""),
|
|
946
|
+
# "vehicleColor": detection.get("vehicleColor", "")
|
|
947
|
+
})
|
|
948
|
+
elif detection_type_raw == "object_count":
|
|
949
|
+
context_data.update({
|
|
950
|
+
"objectClass": detection.get("objectClass", "person"),
|
|
951
|
+
"currentCount": detection.get("currentCount", 0),
|
|
952
|
+
"thresholdCount": alert.detection_config.get("thresholdCount", 0)
|
|
953
|
+
})
|
|
954
|
+
elif detection_type_raw == "fire_smoke":
|
|
955
|
+
context_data.update({
|
|
956
|
+
"fireDetected": detection.get("fireDetected", False),
|
|
957
|
+
"smokeDetected": detection.get("smokeDetected", False),
|
|
958
|
+
"severity": alert.severity_level
|
|
959
|
+
})
|
|
960
|
+
elif detection_type_raw == "intrusion":
|
|
961
|
+
context_data.update({
|
|
962
|
+
"objectClass": detection.get("objectClass", "person"),
|
|
963
|
+
"zoneName": detection.get("zoneName", ""),
|
|
964
|
+
"personCount": detection.get("personCount", 1)
|
|
965
|
+
})
|
|
966
|
+
|
|
967
|
+
trigger_message = {
|
|
968
|
+
"instant_alert_id": alert.instant_alert_id,
|
|
969
|
+
"camera_id": alert.camera_id,
|
|
970
|
+
"frame_id": detection.get("frame_id", ""),
|
|
971
|
+
"triggered_at": datetime.now(timezone.utc).isoformat(),
|
|
972
|
+
"context_data": context_data
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
return trigger_message
|
|
976
|
+
|
|
977
|
+
def _publish_to_redis(self, topic: str, message: Dict[str, Any]):
|
|
978
|
+
"""Publish message to Redis stream."""
|
|
979
|
+
try:
|
|
980
|
+
self.redis_client.add_message(
|
|
981
|
+
topic_or_channel=topic,
|
|
982
|
+
message=json.dumps(message),
|
|
983
|
+
key=message.get("instant_alert_id", "")
|
|
984
|
+
)
|
|
985
|
+
except Exception as e:
|
|
986
|
+
self.logger.error(f"Redis publish error: {e}")
|
|
987
|
+
raise
|
|
988
|
+
|
|
989
|
+
def _publish_to_kafka(self, topic: str, message: Dict[str, Any]):
|
|
990
|
+
"""Publish message to Kafka topic."""
|
|
991
|
+
try:
|
|
992
|
+
self.kafka_client.add_message(
|
|
993
|
+
topic_or_channel=topic,
|
|
994
|
+
message=json.dumps(message),
|
|
995
|
+
key=message.get("instant_alert_id", "")
|
|
996
|
+
)
|
|
997
|
+
except Exception as e:
|
|
998
|
+
self.logger.error(f"Kafka publish error: {e}")
|
|
999
|
+
raise
|
|
1000
|
+
|
|
1001
|
+
def get_active_alerts_count(self) -> int:
|
|
1002
|
+
"""Get count of active alerts."""
|
|
1003
|
+
with self._alerts_lock:
|
|
1004
|
+
return len(self._alerts)
|
|
1005
|
+
|
|
1006
|
+
def get_alerts_for_camera(self, camera_id: str) -> List[Dict[str, Any]]:
|
|
1007
|
+
"""Get all active alerts for a camera (for debugging/monitoring)."""
|
|
1008
|
+
with self._alerts_lock:
|
|
1009
|
+
return [
|
|
1010
|
+
{
|
|
1011
|
+
"instant_alert_id": alert.instant_alert_id,
|
|
1012
|
+
"alert_name": alert.alert_name,
|
|
1013
|
+
"severity_level": alert.severity_level,
|
|
1014
|
+
"detection_config": alert.detection_config
|
|
1015
|
+
}
|
|
1016
|
+
for alert in self._alerts.values()
|
|
1017
|
+
if alert.camera_id == camera_id and alert.is_active
|
|
1018
|
+
]
|