matrice-analytics 0.1.60__py3-none-any.whl → 0.1.89__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. matrice_analytics/post_processing/config.py +2 -2
  2. matrice_analytics/post_processing/core/base.py +1 -1
  3. matrice_analytics/post_processing/face_reg/embedding_manager.py +8 -8
  4. matrice_analytics/post_processing/face_reg/face_recognition.py +886 -201
  5. matrice_analytics/post_processing/face_reg/face_recognition_client.py +68 -2
  6. matrice_analytics/post_processing/usecases/advanced_customer_service.py +908 -498
  7. matrice_analytics/post_processing/usecases/color_detection.py +18 -18
  8. matrice_analytics/post_processing/usecases/customer_service.py +356 -9
  9. matrice_analytics/post_processing/usecases/fire_detection.py +149 -11
  10. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +548 -40
  11. matrice_analytics/post_processing/usecases/people_counting.py +11 -11
  12. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +34 -34
  13. matrice_analytics/post_processing/usecases/weapon_detection.py +98 -22
  14. matrice_analytics/post_processing/utils/alert_instance_utils.py +950 -0
  15. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1245 -0
  16. matrice_analytics/post_processing/utils/incident_manager_utils.py +1657 -0
  17. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/METADATA +1 -1
  18. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/RECORD +21 -18
  19. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/WHEEL +0 -0
  20. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/licenses/LICENSE.txt +0 -0
  21. {matrice_analytics-0.1.60.dist-info → matrice_analytics-0.1.89.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1657 @@
1
+ """
2
+ incident_manager_utils.py
3
+
4
+ Manages incident publishing to Redis/Kafka when severity levels change.
5
+ Implements consecutive-frame validation before publishing:
6
+ - 5 consecutive frames for medium/significant/critical
7
+ - 10 consecutive frames for low (stricter)
8
+ - 101 consecutive empty frames to send 'info' (incident ended)
9
+
10
+ Polls 'incident_modification_config' topic for dynamic threshold settings.
11
+ Publishes to 'incident_res' topic.
12
+
13
+ PRODUCTION-READY VERSION
14
+ """
15
+
16
+ import json
17
+ import time
18
+ import threading
19
+ import logging
20
+ import os
21
+ import urllib.request
22
+ import base64
23
+ import re
24
+ from typing import Dict, List, Optional, Any, Tuple
25
+ from datetime import datetime, timezone
26
+ from dataclasses import dataclass, field
27
+ from pathlib import Path
28
+
29
+
30
+ # Severity level ordering for comparison (none = no incident)
31
+ SEVERITY_LEVELS = ["none", "info", "low", "medium", "significant", "critical"]
32
+
33
+ # Default thresholds if none provided (same as fire_detection.py defaults)
34
+ DEFAULT_THRESHOLDS = [
35
+ {"level": "low", "percentage": 0.0001},
36
+ {"level": "medium", "percentage": 3},
37
+ {"level": "significant", "percentage": 13},
38
+ {"level": "critical", "percentage": 30}
39
+ ]
40
+
41
+
42
+ @dataclass
43
+ class IncidentState:
44
+ """Tracks the current incident state for a camera/usecase."""
45
+ current_level: str = "none" # Current confirmed severity level
46
+ pending_level: str = "none" # Level being validated (needs consecutive frames)
47
+ consecutive_count: int = 0 # Consecutive frames with pending_level
48
+ last_published_level: str = "none" # Last level that was published (for spam prevention)
49
+ incident_cycle_id: int = 1 # Starts at 1, incremented when cycle resets (after info sent)
50
+ empty_frames_count: int = 0 # Consecutive empty incident frames (for "info" detection)
51
+ current_incident_id: str = "" # Current incident_id for this cycle (managed per camera)
52
+ incident_active: bool = False # Whether an incident is currently active in this cycle
53
+
54
+
55
+ @dataclass
56
+ class ThresholdConfig:
57
+ """Stores threshold configuration for a camera."""
58
+ camera_id: str
59
+ application_id: str = ""
60
+ app_deployment_id: str = ""
61
+ incident_type: str = ""
62
+ thresholds: List[Dict[str, Any]] = field(default_factory=lambda: DEFAULT_THRESHOLDS.copy())
63
+ last_updated: float = field(default_factory=time.time)
64
+ camera_name: str = "" # Store camera_name from config
65
+
66
+
67
+ class INCIDENT_MANAGER:
68
+ """
69
+ Manages incident severity level tracking and publishing.
70
+
71
+ Key behaviors:
72
+ - Polls 'incident_modification_config' topic for dynamic threshold settings
73
+ - Calculates severity_level from incident_quant using thresholds
74
+ - Publishes incidents ONLY when severity level changes
75
+ - Requires different consecutive frames based on level:
76
+ - 5 frames for medium/significant/critical
77
+ - 10 frames for low (stricter to avoid false positives)
78
+ - 101 empty frames to send "info" (incident ended)
79
+ - Supports both Redis and Kafka transports
80
+ - Thread-safe operations
81
+
82
+ Usage:
83
+ manager = INCIDENT_MANAGER(redis_client=..., kafka_client=...)
84
+ manager.start() # Start config polling
85
+ manager.process_incident(camera_id, incident_data, stream_info)
86
+ manager.stop() # Stop polling on shutdown
87
+ """
88
+
89
+ # Frame thresholds for different severity levels
90
+ CONSECUTIVE_FRAMES_DEFAULT = 5 # For medium, significant, critical
91
+ CONSECUTIVE_FRAMES_LOW = 10 # For low level (stricter)
92
+ CONSECUTIVE_FRAMES_EMPTY = 101 # For sending "info" after no detections
93
+
94
+ CONFIG_POLLING_INTERVAL = 10 # Poll every 10 seconds
95
+ CONFIG_TOPIC = "incident_modification_config"
96
+ INCIDENT_TOPIC = "incident_res"
97
+
98
+ def __init__(
99
+ self,
100
+ redis_client: Optional[Any] = None,
101
+ kafka_client: Optional[Any] = None,
102
+ incident_topic: str = "incident_res",
103
+ config_topic: str = "incident_modification_config",
104
+ logger: Optional[logging.Logger] = None
105
+ ):
106
+ """
107
+ Initialize INCIDENT_MANAGER.
108
+
109
+ Args:
110
+ redis_client: MatriceStream instance configured for Redis
111
+ kafka_client: MatriceStream instance configured for Kafka
112
+ incident_topic: Topic/stream name for publishing incidents
113
+ config_topic: Topic/stream name for receiving threshold configs
114
+ logger: Python logger instance
115
+ """
116
+ self.redis_client = redis_client
117
+ self.kafka_client = kafka_client
118
+ self.incident_topic = incident_topic
119
+ self.config_topic = config_topic
120
+ self.logger = logger or logging.getLogger(__name__)
121
+
122
+ # Per-camera incident state tracking: {camera_id: IncidentState}
123
+ self._incident_states: Dict[str, IncidentState] = {}
124
+ self._states_lock = threading.Lock()
125
+
126
+ # Per-camera threshold configuration: {camera_id: ThresholdConfig}
127
+ self._threshold_configs: Dict[str, ThresholdConfig] = {}
128
+ self._config_lock = threading.Lock()
129
+
130
+ # Config polling thread control
131
+ self._polling_thread: Optional[threading.Thread] = None
132
+ self._stop_event = threading.Event()
133
+ self._running = False
134
+
135
+ # Store factory reference for fetching camera info
136
+ self._factory_ref: Optional['IncidentManagerFactory'] = None
137
+
138
+ self.logger.info(
139
+ f"[INCIDENT_MANAGER] Initialized with incident_topic={incident_topic}, "
140
+ f"config_topic={config_topic}, "
141
+ f"low_frames={self.CONSECUTIVE_FRAMES_LOW}, "
142
+ f"default_frames={self.CONSECUTIVE_FRAMES_DEFAULT}, "
143
+ f"empty_frames_for_info={self.CONSECUTIVE_FRAMES_EMPTY}, "
144
+ f"polling_interval={self.CONFIG_POLLING_INTERVAL}s"
145
+ )
146
+
147
+ def set_factory_ref(self, factory: 'IncidentManagerFactory'):
148
+ """Set reference to factory for accessing deployment info."""
149
+ self._factory_ref = factory
150
+
151
+ def start(self):
152
+ """Start the background config polling thread."""
153
+ if self._running:
154
+ self.logger.warning("[INCIDENT_MANAGER] Already running")
155
+ return
156
+
157
+ self._running = True
158
+ self._stop_event.clear()
159
+ self._polling_thread = threading.Thread(
160
+ target=self._config_polling_loop,
161
+ daemon=True,
162
+ name="IncidentConfigPoller"
163
+ )
164
+ self._polling_thread.start()
165
+ self.logger.info("[INCIDENT_MANAGER] ✓ Started config polling thread")
166
+
167
+ def stop(self):
168
+ """Stop the background polling thread gracefully."""
169
+ if not self._running:
170
+ return
171
+
172
+ self.logger.info("[INCIDENT_MANAGER] Stopping...")
173
+ self._running = False
174
+ self._stop_event.set()
175
+
176
+ if self._polling_thread and self._polling_thread.is_alive():
177
+ self._polling_thread.join(timeout=5)
178
+
179
+ self.logger.info("[INCIDENT_MANAGER] ✓ Stopped")
180
+
181
+ def _config_polling_loop(self):
182
+ """Background thread that polls for config updates every CONFIG_POLLING_INTERVAL seconds."""
183
+ self.logger.info(f"[INCIDENT_MANAGER] Config polling loop started (interval: {self.CONFIG_POLLING_INTERVAL}s)")
184
+
185
+ while not self._stop_event.is_set():
186
+ try:
187
+ self._fetch_and_update_configs()
188
+ except Exception as e:
189
+ self.logger.error(f"[INCIDENT_MANAGER] Error in config polling loop: {e}", exc_info=True)
190
+
191
+ # Sleep in small increments to allow quick shutdown
192
+ for _ in range(self.CONFIG_POLLING_INTERVAL):
193
+ if self._stop_event.is_set():
194
+ break
195
+ time.sleep(1)
196
+
197
+ self.logger.info("[INCIDENT_MANAGER] Config polling loop exited")
198
+
199
+ def _fetch_and_update_configs(self):
200
+ """Fetch config messages from Redis (primary) or Kafka (fallback)."""
201
+ configs = []
202
+
203
+ # Try Redis first (primary)
204
+ if self.redis_client:
205
+ try:
206
+ self.logger.debug(f"[INCIDENT_MANAGER] Fetching configs from Redis: {self.config_topic}")
207
+ configs = self._read_configs_from_redis(max_messages=100)
208
+ if configs:
209
+ self.logger.info(f"[INCIDENT_MANAGER] Fetched {len(configs)} config(s) from Redis")
210
+ except Exception as e:
211
+ self.logger.debug(f"[INCIDENT_MANAGER] Redis config fetch: {e}")
212
+
213
+ # Fallback to Kafka if Redis failed or no messages
214
+ if not configs and self.kafka_client:
215
+ try:
216
+ self.logger.debug(f"[INCIDENT_MANAGER] Fetching configs from Kafka: {self.config_topic}")
217
+ configs = self._read_configs_from_kafka(max_messages=100)
218
+ if configs:
219
+ self.logger.info(f"[INCIDENT_MANAGER] Fetched {len(configs)} config(s) from Kafka")
220
+ except Exception as e:
221
+ self.logger.debug(f"[INCIDENT_MANAGER] Kafka config fetch: {e}")
222
+
223
+ # Update in-memory threshold configs
224
+ for config_data in configs:
225
+ try:
226
+ self._handle_config_message(config_data)
227
+ except Exception as e:
228
+ self.logger.error(f"[INCIDENT_MANAGER] Error handling config message: {e}", exc_info=True)
229
+
230
+ def _read_configs_from_redis(self, max_messages: int = 100) -> List[Dict[str, Any]]:
231
+ """Read config messages from Redis stream."""
232
+ messages = []
233
+ try:
234
+ for msg_count in range(max_messages):
235
+ msg = self.redis_client.get_message(timeout=0.1)
236
+ if not msg:
237
+ break
238
+
239
+ value = msg.get('value') or msg.get('data') or msg.get('message')
240
+ if value:
241
+ parsed = self._parse_message_value(value)
242
+ if parsed:
243
+ messages.append(parsed)
244
+ except Exception as e:
245
+ self.logger.debug(f"[INCIDENT_MANAGER] Error reading from Redis: {e}")
246
+
247
+ return messages
248
+
249
+ def _read_configs_from_kafka(self, max_messages: int = 100) -> List[Dict[str, Any]]:
250
+ """Read config messages from Kafka topic."""
251
+ messages = []
252
+ try:
253
+ for msg_count in range(max_messages):
254
+ msg = self.kafka_client.get_message(timeout=0.1)
255
+ if not msg:
256
+ break
257
+
258
+ value = msg.get('value') or msg.get('data') or msg.get('message')
259
+ if value:
260
+ parsed = self._parse_message_value(value)
261
+ if parsed:
262
+ messages.append(parsed)
263
+ except Exception as e:
264
+ self.logger.debug(f"[INCIDENT_MANAGER] Error reading from Kafka: {e}")
265
+
266
+ return messages
267
+
268
+ def _parse_message_value(self, value: Any) -> Optional[Dict[str, Any]]:
269
+ """Parse message value into a dictionary."""
270
+ try:
271
+ # Already a dict
272
+ if isinstance(value, dict):
273
+ if 'data' in value and isinstance(value['data'], dict):
274
+ return value['data']
275
+ return value
276
+
277
+ # Bytes to string
278
+ if isinstance(value, bytes):
279
+ value = value.decode('utf-8')
280
+
281
+ # Parse JSON string
282
+ if isinstance(value, str):
283
+ try:
284
+ return json.loads(value)
285
+ except json.JSONDecodeError:
286
+ # Try fixing Python-style formatting
287
+ fixed = value
288
+ fixed = fixed.replace(": True", ": true").replace(": False", ": false")
289
+ fixed = fixed.replace(":True", ":true").replace(":False", ":false")
290
+ fixed = fixed.replace(": None", ": null").replace(":None", ":null")
291
+ if "'" in fixed and '"' not in fixed:
292
+ fixed = fixed.replace("'", '"')
293
+ return json.loads(fixed)
294
+ except Exception as e:
295
+ self.logger.debug(f"[INCIDENT_MANAGER] Failed to parse message: {e}")
296
+
297
+ return None
298
+
299
+ def _handle_config_message(self, config_data: Dict[str, Any]):
300
+ """
301
+ Handle a threshold config message.
302
+
303
+ Expected format:
304
+ {
305
+ "camera_id": "68f9d95cfaff6151c774e0e7",
306
+ "application_id": "...",
307
+ "app_deployment_id": "...",
308
+ "incident_type": "fire",
309
+ "camera_name": "camera_1",
310
+ "thresholds": [
311
+ {"level": "low", "percentage": 0.0001},
312
+ {"level": "medium", "percentage": 3},
313
+ {"level": "significant", "percentage": 13},
314
+ {"level": "critical", "percentage": 30}
315
+ ]
316
+ }
317
+ """
318
+ try:
319
+ camera_id = config_data.get("camera_id", "")
320
+ if not camera_id:
321
+ self.logger.debug("[INCIDENT_MANAGER] Config message missing camera_id, skipping")
322
+ return
323
+
324
+ # Extract fields with defaults
325
+ application_id = config_data.get("application_id", "")
326
+ app_deployment_id = config_data.get("app_deployment_id", "")
327
+ incident_type = config_data.get("incident_type", "")
328
+ camera_name = config_data.get("camera_name", "")
329
+ thresholds = config_data.get("thresholds", [])
330
+
331
+ # Validate thresholds - use defaults if invalid
332
+ if not thresholds or not isinstance(thresholds, list):
333
+ thresholds = DEFAULT_THRESHOLDS.copy()
334
+ self.logger.debug(f"[INCIDENT_MANAGER] Using default thresholds for camera: {camera_id}")
335
+ else:
336
+ # Validate each threshold has required fields
337
+ # Also map "high" -> "significant" (backend uses "high", we use "significant")
338
+ valid_thresholds = []
339
+ for t in thresholds:
340
+ if isinstance(t, dict) and "level" in t and "percentage" in t:
341
+ level = t.get("level", "").lower().strip()
342
+ # Map "high" to "significant" when receiving from backend
343
+ if level == "high":
344
+ self.logger.debug(f"[INCIDENT_MANAGER] Mapping level 'high' -> 'significant' for camera {camera_id}")
345
+ t = dict(t) # Make a copy to avoid modifying original
346
+ t["level"] = "significant"
347
+ valid_thresholds.append(t)
348
+
349
+ if not valid_thresholds:
350
+ thresholds = DEFAULT_THRESHOLDS.copy()
351
+ else:
352
+ thresholds = valid_thresholds
353
+
354
+ # Create or update threshold config
355
+ with self._config_lock:
356
+ self._threshold_configs[camera_id] = ThresholdConfig(
357
+ camera_id=camera_id,
358
+ application_id=application_id,
359
+ app_deployment_id=app_deployment_id,
360
+ incident_type=incident_type,
361
+ thresholds=thresholds,
362
+ last_updated=time.time(),
363
+ camera_name=camera_name
364
+ )
365
+
366
+ self.logger.info(
367
+ f"[INCIDENT_MANAGER] ✓ Updated thresholds for camera: {camera_id}, "
368
+ f"thresholds: {thresholds}"
369
+ )
370
+
371
+ except Exception as e:
372
+ self.logger.error(f"[INCIDENT_MANAGER] Error handling config message: {e}", exc_info=True)
373
+
374
+ def _get_thresholds_for_camera(self, camera_id: str) -> Tuple[List[Dict[str, Any]], Optional[ThresholdConfig]]:
375
+ """
376
+ Get thresholds for a specific camera, or defaults if not configured.
377
+
378
+ Returns:
379
+ Tuple of (thresholds list, ThresholdConfig or None)
380
+ """
381
+ with self._config_lock:
382
+ config = self._threshold_configs.get(camera_id)
383
+ if config:
384
+ return config.thresholds, config
385
+ return DEFAULT_THRESHOLDS, None
386
+
387
+ def _calculate_severity_from_quant(
388
+ self,
389
+ incident_quant: float,
390
+ thresholds: List[Dict[str, Any]]
391
+ ) -> str:
392
+ """
393
+ Calculate severity level from incident_quant using thresholds.
394
+
395
+ Args:
396
+ incident_quant: The quantitative value (e.g., intensity percentage)
397
+ thresholds: List of threshold configs sorted by percentage
398
+
399
+ Returns:
400
+ Severity level string (none, low, medium, significant, critical)
401
+ """
402
+ if incident_quant is None or incident_quant < 0:
403
+ return "none"
404
+
405
+ # Sort thresholds by percentage (ascending)
406
+ sorted_thresholds = sorted(thresholds, key=lambda x: float(x.get("percentage", 0)))
407
+
408
+ # Find the highest level where percentage threshold is met
409
+ severity = "none"
410
+ for t in sorted_thresholds:
411
+ level = t.get("level", "").lower()
412
+ percentage = float(t.get("percentage", 0))
413
+
414
+ if incident_quant >= percentage:
415
+ severity = level
416
+ else:
417
+ break # Since sorted ascending, no need to check further
418
+
419
+ # Validate severity
420
+ if severity not in SEVERITY_LEVELS:
421
+ severity = "none"
422
+
423
+ return severity
424
+
425
+ def _get_frames_required_for_level(self, level: str) -> int:
426
+ """
427
+ Get the number of consecutive frames required to confirm a level.
428
+
429
+ Args:
430
+ level: Severity level string
431
+
432
+ Returns:
433
+ Number of consecutive frames required
434
+ """
435
+ if level == "low":
436
+ return self.CONSECUTIVE_FRAMES_LOW # 10 frames for low (stricter)
437
+ return self.CONSECUTIVE_FRAMES_DEFAULT # 5 frames for others
438
+
439
+ def _extract_camera_info_from_stream(
440
+ self,
441
+ stream_info: Optional[Dict[str, Any]]
442
+ ) -> Dict[str, str]:
443
+ """
444
+ Extract camera info from stream_info (similar to ResultsIngestor pattern).
445
+
446
+ Stream info structure example:
447
+ {
448
+ 'broker': 'localhost:9092',
449
+ 'topic': '692d7bde42582ffde3611908_input_topic', # camera_id is prefix before _input_topic
450
+ 'stream_time': '2025-12-02-05:09:53.914224 UTC',
451
+ 'camera_info': {
452
+ 'camera_name': 'cusstomer-cam-1',
453
+ 'camera_group': 'staging-customer-1',
454
+ 'location': '6908756db129880c34f2e09a'
455
+ },
456
+ 'frame_id': '...'
457
+ }
458
+
459
+ Args:
460
+ stream_info: Stream metadata from usecase
461
+
462
+ Returns:
463
+ Dict with camera_id, camera_name, app_deployment_id, application_id
464
+ """
465
+ result = {
466
+ "camera_id": "",
467
+ "camera_name": "",
468
+ "app_deployment_id": "",
469
+ "application_id": ""
470
+ }
471
+
472
+ if not stream_info:
473
+ return result
474
+
475
+ try:
476
+ # Try multiple paths to get camera info (like ResultsIngestor)
477
+ # Path 1: Direct camera_info in stream_info
478
+ camera_info = stream_info.get("camera_info", {}) or {}
479
+
480
+ # Path 2: From input_settings -> input_stream pattern
481
+ input_settings = stream_info.get("input_settings", {}) or {}
482
+ input_stream = input_settings.get("input_stream", {}) or {}
483
+ input_camera_info = input_stream.get("camera_info", {}) or {}
484
+
485
+ # Path 3: From input_streams array (like ResultsIngestor)
486
+ input_streams = stream_info.get("input_streams", [])
487
+ if input_streams and len(input_streams) > 0:
488
+ input_data = input_streams[0] if isinstance(input_streams[0], dict) else {}
489
+ input_stream_inner = input_data.get("input_stream", input_data)
490
+ input_camera_info = input_stream_inner.get("camera_info", {}) or input_camera_info
491
+
492
+ # Merge all sources, preferring non-empty values
493
+ # camera_name - check all possible locations
494
+ result["camera_name"] = (
495
+ camera_info.get("camera_name", "") or
496
+ camera_info.get("cameraName", "") or
497
+ input_camera_info.get("camera_name", "") or
498
+ input_camera_info.get("cameraName", "") or
499
+ stream_info.get("camera_name", "") or
500
+ stream_info.get("cameraName", "") or
501
+ input_settings.get("camera_name", "") or
502
+ input_settings.get("cameraName", "") or
503
+ ""
504
+ )
505
+
506
+ # camera_id - check direct fields first
507
+ result["camera_id"] = (
508
+ camera_info.get("camera_id", "") or
509
+ camera_info.get("cameraId", "") or
510
+ input_camera_info.get("camera_id", "") or
511
+ input_camera_info.get("cameraId", "") or
512
+ stream_info.get("camera_id", "") or
513
+ stream_info.get("cameraId", "") or
514
+ input_settings.get("camera_id", "") or
515
+ input_settings.get("cameraId", "") or
516
+ ""
517
+ )
518
+
519
+ # If camera_id still not found, extract from topic
520
+ # Topic format: {camera_id}_input_topic (e.g., "692d7bde42582ffde3611908_input_topic")
521
+ if not result["camera_id"]:
522
+ topic = stream_info.get("topic", "")
523
+ if topic:
524
+ extracted_camera_id = ""
525
+ if topic.endswith("_input_topic"):
526
+ extracted_camera_id = topic[: -len("_input_topic")]
527
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic (underscore): {extracted_camera_id}")
528
+ elif topic.endswith("_input-topic"):
529
+ extracted_camera_id = topic[: -len("_input-topic")]
530
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic (hyphen): {extracted_camera_id}")
531
+ else:
532
+ if "_input_topic" in topic:
533
+ extracted_camera_id = topic.split("_input_topic")[0]
534
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic split (underscore): {extracted_camera_id}")
535
+ elif "_input-topic" in topic:
536
+ extracted_camera_id = topic.split("_input-topic")[0]
537
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic split (hyphen): {extracted_camera_id}")
538
+ if extracted_camera_id:
539
+ result["camera_id"] = extracted_camera_id
540
+
541
+ # app_deployment_id
542
+ result["app_deployment_id"] = (
543
+ stream_info.get("app_deployment_id", "") or
544
+ stream_info.get("appDeploymentId", "") or
545
+ stream_info.get("app_deploymentId", "") or
546
+ input_settings.get("app_deployment_id", "") or
547
+ input_settings.get("appDeploymentId", "") or
548
+ camera_info.get("app_deployment_id", "") or
549
+ camera_info.get("appDeploymentId", "") or
550
+ ""
551
+ )
552
+
553
+ # application_id
554
+ result["application_id"] = (
555
+ stream_info.get("application_id", "") or
556
+ stream_info.get("applicationId", "") or
557
+ stream_info.get("app_id", "") or
558
+ stream_info.get("appId", "") or
559
+ input_settings.get("application_id", "") or
560
+ input_settings.get("applicationId", "") or
561
+ camera_info.get("application_id", "") or
562
+ camera_info.get("applicationId", "") or
563
+ ""
564
+ )
565
+
566
+ self.logger.debug(
567
+ f"[INCIDENT_MANAGER] Extracted from stream_info - "
568
+ f"camera_id={result['camera_id']}, camera_name={result['camera_name']}, "
569
+ f"app_deployment_id={result['app_deployment_id']}, application_id={result['application_id']}"
570
+ )
571
+
572
+ except Exception as e:
573
+ self.logger.debug(f"[INCIDENT_MANAGER] Error extracting camera info: {e}")
574
+
575
+ return result
576
+
577
+ def _map_level_from_backend(self, level: str) -> str:
578
+ """Map level from backend terminology to internal terminology.
579
+
580
+ Backend uses 'high', we use 'significant' internally.
581
+ """
582
+ if level and level.lower().strip() == "high":
583
+ return "significant"
584
+ return level
585
+
586
+ def _map_level_to_backend(self, level: str) -> str:
587
+ """Map level from internal terminology to backend terminology.
588
+
589
+ We use 'significant' internally, backend expects 'high'.
590
+ """
591
+ if level and level.lower().strip() == "significant":
592
+ return "high"
593
+ return level
594
+
595
+ def _generate_incident_id(self, camera_id: str, cycle_id: int) -> str:
596
+ """Generate a unique incident_id for a camera's cycle."""
597
+ return f"incident_{camera_id}_{cycle_id}"
598
+
599
+ def process_incident(
600
+ self,
601
+ camera_id: str,
602
+ incident_data: Dict[str, Any],
603
+ stream_info: Optional[Dict[str, Any]] = None
604
+ ) -> bool:
605
+ """
606
+ Process an incident and publish if severity level changed.
607
+
608
+ This method:
609
+ 1. Gets incident_quant from incident_data
610
+ 2. Calculates severity_level using dynamic thresholds for this camera
611
+ 3. Updates incident_data with new severity_level
612
+ 4. Tracks level changes with consecutive-frame validation:
613
+ - 5 frames for medium/significant/critical
614
+ - 10 frames for low (stricter)
615
+ 5. Tracks empty incidents and publishes "info" after 101 consecutive empty frames
616
+ 6. Publishes on level change
617
+ 7. Manages incident_id per camera per cycle (increments after info is sent)
618
+
619
+ Args:
620
+ camera_id: Unique camera identifier
621
+ incident_data: Incident dictionary from usecase (must include incident_quant)
622
+ stream_info: Stream metadata
623
+
624
+ Returns:
625
+ True if incident was published, False otherwise
626
+ """
627
+ try:
628
+ self.logger.debug(f"[INCIDENT_MANAGER] Processing incident for camera: {camera_id}")
629
+
630
+ # Get or create state for this camera
631
+ with self._states_lock:
632
+ if camera_id not in self._incident_states:
633
+ new_state = IncidentState()
634
+ # Initialize incident_id for new camera
635
+ new_state.current_incident_id = self._generate_incident_id(camera_id, new_state.incident_cycle_id)
636
+ self._incident_states[camera_id] = new_state
637
+ self.logger.info(
638
+ f"[INCIDENT_MANAGER] Created new state for camera: {camera_id}, "
639
+ f"initial incident_id: {new_state.current_incident_id}"
640
+ )
641
+
642
+ state = self._incident_states[camera_id]
643
+
644
+ # Ensure incident_id is set (for existing states that may not have it)
645
+ if not state.current_incident_id:
646
+ state.current_incident_id = self._generate_incident_id(camera_id, state.incident_cycle_id)
647
+ self.logger.info(f"[INCIDENT_MANAGER] Generated incident_id for existing state: {state.current_incident_id}")
648
+
649
+ # Handle empty incident data - track for "info" level
650
+ is_empty_incident = (not incident_data or incident_data == {})
651
+
652
+ if is_empty_incident:
653
+ self.logger.debug("[INCIDENT_MANAGER] Empty incident data, tracking for info level")
654
+ return self._handle_empty_incident(camera_id, state, stream_info)
655
+
656
+ # Step 1: Get thresholds for this camera
657
+ thresholds, threshold_config = self._get_thresholds_for_camera(camera_id)
658
+
659
+ # Step 2: Get incident_quant and calculate severity level dynamically
660
+ incident_quant = incident_data.get("incident_quant")
661
+
662
+ if incident_quant is not None:
663
+ # Calculate severity from quant using dynamic thresholds
664
+ severity_level = self._calculate_severity_from_quant(incident_quant, thresholds)
665
+
666
+ # Update incident_data with new severity level
667
+ incident_data["severity_level"] = severity_level
668
+
669
+ self.logger.debug(
670
+ f"[INCIDENT_MANAGER] Calculated severity from incident_quant={incident_quant}: "
671
+ f"severity_level={severity_level}"
672
+ )
673
+ else:
674
+ # Fallback to existing severity_level in incident_data
675
+ severity_level = incident_data.get("severity_level", "none")
676
+ if not severity_level or severity_level == "":
677
+ severity_level = "none"
678
+
679
+ # Store threshold config info in incident_data for output message
680
+ if threshold_config:
681
+ incident_data["_config_camera_id"] = threshold_config.camera_id
682
+ incident_data["_config_application_id"] = threshold_config.application_id
683
+ incident_data["_config_app_deployment_id"] = threshold_config.app_deployment_id
684
+ incident_data["_config_camera_name"] = threshold_config.camera_name
685
+
686
+ severity_level = severity_level.lower().strip()
687
+
688
+ self.logger.debug(f"[INCIDENT_MANAGER] Final severity_level: '{severity_level}'")
689
+
690
+ # Validate severity level
691
+ if severity_level not in SEVERITY_LEVELS:
692
+ self.logger.warning(
693
+ f"[INCIDENT_MANAGER] Unknown severity level '{severity_level}', treating as 'none'"
694
+ )
695
+ severity_level = "none"
696
+
697
+ # If level is "none", treat as empty incident (DO NOT reset empty_frames_count here!)
698
+ if severity_level == "none":
699
+ return self._handle_empty_incident(camera_id, state, stream_info)
700
+
701
+ # We have a real detection (severity != none), reset empty frame counter
702
+ with self._states_lock:
703
+ state.empty_frames_count = 0
704
+
705
+ with self._states_lock:
706
+ self.logger.debug(
707
+ f"[INCIDENT_MANAGER] Current state - "
708
+ f"current_level={state.current_level}, "
709
+ f"pending_level={state.pending_level}, "
710
+ f"consecutive_count={state.consecutive_count}, "
711
+ f"last_published_level={state.last_published_level}, "
712
+ f"incident_id={state.current_incident_id}, "
713
+ f"cycle_id={state.incident_cycle_id}, "
714
+ f"incident_active={state.incident_active}"
715
+ )
716
+
717
+ # Check if this is a new pending level or continuation
718
+ if severity_level == state.pending_level:
719
+ # Same level, increment counter
720
+ state.consecutive_count += 1
721
+ self.logger.debug(
722
+ f"[INCIDENT_MANAGER] Same pending level, "
723
+ f"consecutive_count now: {state.consecutive_count}"
724
+ )
725
+ else:
726
+ # Different level, reset counter
727
+ state.pending_level = severity_level
728
+ state.consecutive_count = 1
729
+ self.logger.debug(
730
+ f"[INCIDENT_MANAGER] New pending level: {severity_level}, "
731
+ f"reset consecutive_count to 1"
732
+ )
733
+
734
+ # Get required frames for this level
735
+ frames_required = self._get_frames_required_for_level(severity_level)
736
+
737
+ # Check if we've reached the threshold for confirmation
738
+ if state.consecutive_count >= frames_required:
739
+ # Level is confirmed after required consecutive frames
740
+ old_level = state.current_level
741
+ new_level = state.pending_level
742
+
743
+ self.logger.info(
744
+ f"[INCIDENT_MANAGER] Level confirmed after {state.consecutive_count} frames "
745
+ f"(required: {frames_required}): {old_level} -> {new_level}"
746
+ )
747
+
748
+ # Check if level actually changed
749
+ if new_level != state.current_level:
750
+ state.current_level = new_level
751
+
752
+ # Check if we should publish
753
+ # 1. Don't publish "none" level (no incident)
754
+ # 2. Don't publish same level again (spam prevention)
755
+ should_publish = (
756
+ new_level != "none" and
757
+ new_level != state.last_published_level
758
+ )
759
+
760
+ self.logger.info(
761
+ f"[INCIDENT_MANAGER] Level changed: {old_level} -> {new_level}, "
762
+ f"should_publish={should_publish} "
763
+ f"(last_published={state.last_published_level})"
764
+ )
765
+
766
+ if should_publish:
767
+ # Mark incident as active for this cycle
768
+ state.incident_active = True
769
+
770
+ # Use the managed incident_id for this cycle
771
+ incident_data["incident_id"] = state.current_incident_id
772
+
773
+ # Publish the incident
774
+ success = self._publish_incident(
775
+ camera_id, incident_data, stream_info
776
+ )
777
+ if success:
778
+ state.last_published_level = new_level
779
+ self.logger.info(
780
+ f"[INCIDENT_MANAGER] ✓ Published incident for level: {new_level}, "
781
+ f"incident_id: {state.current_incident_id}"
782
+ )
783
+ return success
784
+ else:
785
+ self.logger.debug(
786
+ f"[INCIDENT_MANAGER] Skipping publish - "
787
+ f"level={new_level}, already published"
788
+ )
789
+ else:
790
+ self.logger.debug(
791
+ f"[INCIDENT_MANAGER] No level change, staying at: {state.current_level}"
792
+ )
793
+
794
+ return False
795
+
796
+ except Exception as e:
797
+ self.logger.error(
798
+ f"[INCIDENT_MANAGER] Error processing incident: {e}",
799
+ exc_info=True
800
+ )
801
+ return False
802
+
803
+ def _handle_empty_incident(
804
+ self,
805
+ camera_id: str,
806
+ state: IncidentState,
807
+ stream_info: Optional[Dict[str, Any]] = None
808
+ ) -> bool:
809
+ """
810
+ Handle empty incident (no detection).
811
+
812
+ After 101 consecutive empty frames, send "info" level if an incident was active.
813
+ Info uses the SAME incident_id as the current cycle, then starts a new cycle.
814
+
815
+ Args:
816
+ camera_id: Camera identifier
817
+ state: Current incident state
818
+ stream_info: Stream metadata
819
+
820
+ Returns:
821
+ True if "info" incident was published, False otherwise
822
+ """
823
+ with self._states_lock:
824
+ state.empty_frames_count += 1
825
+
826
+ self.logger.debug(
827
+ f"[INCIDENT_MANAGER] Empty frame count for camera {camera_id}: "
828
+ f"{state.empty_frames_count}/{self.CONSECUTIVE_FRAMES_EMPTY}, "
829
+ f"incident_active={state.incident_active}, "
830
+ f"current_incident_id={state.current_incident_id}"
831
+ )
832
+
833
+ # Reset pending level tracking when empty
834
+ if state.pending_level not in ("none", "info"):
835
+ state.pending_level = "none"
836
+ state.consecutive_count = 0
837
+
838
+ # Check if we should send "info" (incident ended)
839
+ if state.empty_frames_count >= self.CONSECUTIVE_FRAMES_EMPTY:
840
+ # Only send "info" if:
841
+ # 1. An incident was actually active in this cycle (we published something)
842
+ # 2. Last published level was NOT "info" (don't send duplicate info)
843
+ should_send_info = (
844
+ state.incident_active and
845
+ state.last_published_level not in ("info", "none")
846
+ )
847
+
848
+ if should_send_info:
849
+ self.logger.info(
850
+ f"[INCIDENT_MANAGER] {self.CONSECUTIVE_FRAMES_EMPTY} consecutive empty frames for camera {camera_id}, "
851
+ f"sending 'info' level to close incident cycle "
852
+ f"(last_published={state.last_published_level}, incident_id={state.current_incident_id})"
853
+ )
854
+
855
+ # Get incident_type from threshold config if available
856
+ incident_type = "fire_smoke_detection" # Default
857
+ with self._config_lock:
858
+ config = self._threshold_configs.get(camera_id)
859
+ if config and config.incident_type:
860
+ incident_type = config.incident_type
861
+
862
+ # Create info incident data - USE THE SAME incident_id from this cycle!
863
+ info_incident = {
864
+ "incident_id": state.current_incident_id, # Same incident_id for this cycle
865
+ "incident_type": incident_type,
866
+ "severity_level": "info",
867
+ "human_text": "Incident ended"
868
+ }
869
+
870
+ # Update state BEFORE publishing
871
+ state.current_level = "info"
872
+ state.empty_frames_count = 0 # Reset counter
873
+
874
+ # Publish info incident
875
+ success = self._publish_incident(camera_id, info_incident, stream_info)
876
+ if success:
877
+ state.last_published_level = "info"
878
+
879
+ # END THIS CYCLE - Start a new cycle for future incidents
880
+ old_cycle_id = state.incident_cycle_id
881
+ old_incident_id = state.current_incident_id
882
+
883
+ state.incident_cycle_id += 1 # Increment cycle
884
+ state.current_incident_id = self._generate_incident_id(camera_id, state.incident_cycle_id)
885
+ state.incident_active = False # No active incident in new cycle yet
886
+ state.current_level = "none" # Reset level for new cycle
887
+ state.pending_level = "none"
888
+ state.consecutive_count = 0
889
+ # Note: We keep last_published_level as "info" to prevent duplicate info sends
890
+
891
+ self.logger.info(
892
+ f"[INCIDENT_MANAGER] ✓ Published 'info' for camera {camera_id}, "
893
+ f"closed incident_id={old_incident_id} (cycle {old_cycle_id}), "
894
+ f"started new cycle {state.incident_cycle_id} with incident_id={state.current_incident_id}"
895
+ )
896
+ return success
897
+ else:
898
+ # No active incident or already sent info
899
+ if not state.incident_active:
900
+ self.logger.debug(
901
+ f"[INCIDENT_MANAGER] Skipping 'info' for camera {camera_id} - "
902
+ f"no incident was active in this cycle"
903
+ )
904
+ else:
905
+ self.logger.debug(
906
+ f"[INCIDENT_MANAGER] Skipping 'info' for camera {camera_id} - "
907
+ f"last_published is already '{state.last_published_level}'"
908
+ )
909
+
910
+ # Reset empty frame counter if we decide not to send info
911
+ # to avoid repeated checks every frame after 101
912
+ state.empty_frames_count = 0
913
+
914
+ return False
915
+
916
+ def _publish_incident(
917
+ self,
918
+ camera_id: str,
919
+ incident_data: Dict[str, Any],
920
+ stream_info: Optional[Dict[str, Any]] = None
921
+ ) -> bool:
922
+ """
923
+ Publish incident to Redis/Kafka topic.
924
+
925
+ Args:
926
+ camera_id: Camera identifier
927
+ incident_data: Incident dictionary
928
+ stream_info: Stream metadata
929
+
930
+ Returns:
931
+ True if published successfully, False otherwise
932
+ """
933
+ self.logger.info(f"[INCIDENT_MANAGER] ========== PUBLISHING INCIDENT ==========")
934
+
935
+ try:
936
+ # Build the incident message
937
+ message = self._build_incident_message(camera_id, incident_data, stream_info)
938
+
939
+ self.logger.info(f"[INCIDENT_MANAGER] Built incident message: {json.dumps(message, default=str)[:500]}...")
940
+
941
+ success = False
942
+
943
+ # Try Redis first (primary)
944
+ if self.redis_client:
945
+ try:
946
+ self.logger.debug(
947
+ f"[INCIDENT_MANAGER] Publishing to Redis stream: {self.incident_topic}"
948
+ )
949
+ self._publish_to_redis(self.incident_topic, message)
950
+ self.logger.info(
951
+ f"[INCIDENT_MANAGER] ✓ Incident published to Redis"
952
+ )
953
+ success = True
954
+ except Exception as e:
955
+ self.logger.error(
956
+ f"[INCIDENT_MANAGER] ❌ Redis publish failed: {e}",
957
+ exc_info=True
958
+ )
959
+
960
+ # Fallback to Kafka if Redis failed or no Redis client
961
+ if not success and self.kafka_client:
962
+ try:
963
+ self.logger.debug(
964
+ f"[INCIDENT_MANAGER] Publishing to Kafka topic: {self.incident_topic}"
965
+ )
966
+ self._publish_to_kafka(self.incident_topic, message)
967
+ self.logger.info(
968
+ f"[INCIDENT_MANAGER] ✓ Incident published to Kafka"
969
+ )
970
+ success = True
971
+ except Exception as e:
972
+ self.logger.error(
973
+ f"[INCIDENT_MANAGER] ❌ Kafka publish failed: {e}",
974
+ exc_info=True
975
+ )
976
+
977
+ if success:
978
+ self.logger.info(f"[INCIDENT_MANAGER] ========== INCIDENT PUBLISHED ==========")
979
+ else:
980
+ self.logger.error(
981
+ f"[INCIDENT_MANAGER] ❌ INCIDENT NOT PUBLISHED (both transports failed)"
982
+ )
983
+
984
+ return success
985
+
986
+ except Exception as e:
987
+ self.logger.error(
988
+ f"[INCIDENT_MANAGER] Error publishing incident: {e}",
989
+ exc_info=True
990
+ )
991
+ return False
992
+
993
+ def _build_incident_message(
994
+ self,
995
+ camera_id: str,
996
+ incident_data: Dict[str, Any],
997
+ stream_info: Optional[Dict[str, Any]] = None
998
+ ) -> Dict[str, Any]:
999
+ """
1000
+ Build the incident message in the required format.
1001
+
1002
+ Output format (STRICT):
1003
+ {
1004
+ "camera_id": "...",
1005
+ "app_deployment_id": "...",
1006
+ "application_id": "...",
1007
+ "camera_name": "...",
1008
+ "incidents": [{
1009
+ "incident_id": "...",
1010
+ "incident_type": "...",
1011
+ "severity_level": "...",
1012
+ "human_text": "..."
1013
+ }]
1014
+ }
1015
+
1016
+ Keys to REMOVE: "alerts", "alert_settings", "duration", "incident_quant",
1017
+ "start_time", "end_time", "camera_info", "level_settings"
1018
+ """
1019
+
1020
+ # Extract camera info from multiple sources
1021
+ stream_camera_info = self._extract_camera_info_from_stream(stream_info)
1022
+
1023
+ # Get IDs from threshold config (if available - set by config polling)
1024
+ config_camera_id = incident_data.get("_config_camera_id", "")
1025
+ config_application_id = incident_data.get("_config_application_id", "")
1026
+ config_app_deployment_id = incident_data.get("_config_app_deployment_id", "")
1027
+ config_camera_name = incident_data.get("_config_camera_name", "")
1028
+
1029
+ # Get IDs from factory (from action_details)
1030
+ factory_app_deployment_id = ""
1031
+ factory_application_id = ""
1032
+ if self._factory_ref:
1033
+ factory_app_deployment_id = self._factory_ref._app_deployment_id or ""
1034
+ factory_application_id = self._factory_ref._application_id or ""
1035
+
1036
+ # Priority: stream_info > threshold_config > factory > camera_id param
1037
+ final_camera_id = (
1038
+ stream_camera_info.get("camera_id") or
1039
+ config_camera_id or
1040
+ camera_id or
1041
+ ""
1042
+ )
1043
+
1044
+ final_camera_name = (
1045
+ stream_camera_info.get("camera_name") or
1046
+ config_camera_name or
1047
+ ""
1048
+ )
1049
+
1050
+ final_app_deployment_id = (
1051
+ stream_camera_info.get("app_deployment_id") or
1052
+ config_app_deployment_id or
1053
+ factory_app_deployment_id or
1054
+ ""
1055
+ )
1056
+
1057
+ final_application_id = (
1058
+ stream_camera_info.get("application_id") or
1059
+ config_application_id or
1060
+ factory_application_id or
1061
+ ""
1062
+ )
1063
+
1064
+ self.logger.info(
1065
+ f"[INCIDENT_MANAGER] Building message with - "
1066
+ f"camera_id={final_camera_id}, camera_name={final_camera_name}, "
1067
+ f"app_deployment_id={final_app_deployment_id}, application_id={final_application_id}"
1068
+ )
1069
+
1070
+ # Build incident - ONLY include required fields
1071
+ # Map "significant" -> "high" for backend (we use "significant" internally, backend expects "high")
1072
+ severity_level = incident_data.get("severity_level", "")
1073
+ if severity_level.lower().strip() == "significant":
1074
+ severity_level = "high"
1075
+ self.logger.debug(f"[INCIDENT_MANAGER] Mapped severity_level 'significant' -> 'high' for publishing")
1076
+
1077
+ incident = {
1078
+ "incident_id": incident_data.get("incident_id", ""),
1079
+ "incident_type": incident_data.get("incident_type", "fire_smoke_detection"),
1080
+ "severity_level": severity_level,
1081
+ "human_text": incident_data.get("human_text", "")
1082
+ }
1083
+
1084
+ # Build final message in STRICT format
1085
+ message = {
1086
+ "camera_id": final_camera_id,
1087
+ "app_deployment_id": final_app_deployment_id,
1088
+ "application_id": final_application_id,
1089
+ "camera_name": final_camera_name,
1090
+ "incidents": [incident]
1091
+ }
1092
+
1093
+ return message
1094
+
1095
+ def _publish_to_redis(self, topic: str, message: Dict[str, Any]):
1096
+ """Publish message to Redis stream."""
1097
+ try:
1098
+ self.redis_client.add_message(
1099
+ topic_or_channel=topic,
1100
+ message=json.dumps(message),
1101
+ key=message.get("camera_id", "")
1102
+ )
1103
+ except Exception as e:
1104
+ self.logger.error(f"[INCIDENT_MANAGER] Redis publish error: {e}")
1105
+ raise
1106
+
1107
+ def _publish_to_kafka(self, topic: str, message: Dict[str, Any]):
1108
+ """Publish message to Kafka topic."""
1109
+ try:
1110
+ self.kafka_client.add_message(
1111
+ topic_or_channel=topic,
1112
+ message=json.dumps(message),
1113
+ key=message.get("camera_id", "")
1114
+ )
1115
+ except Exception as e:
1116
+ self.logger.error(f"[INCIDENT_MANAGER] Kafka publish error: {e}")
1117
+ raise
1118
+
1119
+ def reset_camera_state(self, camera_id: str):
1120
+ """Reset incident state for a specific camera."""
1121
+ with self._states_lock:
1122
+ if camera_id in self._incident_states:
1123
+ self._incident_states[camera_id] = IncidentState()
1124
+ self.logger.info(f"[INCIDENT_MANAGER] Reset state for camera: {camera_id}")
1125
+
1126
+ def get_camera_state(self, camera_id: str) -> Optional[Dict[str, Any]]:
1127
+ """Get current incident state for a camera (for debugging)."""
1128
+ with self._states_lock:
1129
+ state = self._incident_states.get(camera_id)
1130
+ if state:
1131
+ return {
1132
+ "current_level": state.current_level,
1133
+ "pending_level": state.pending_level,
1134
+ "consecutive_count": state.consecutive_count,
1135
+ "last_published_level": state.last_published_level,
1136
+ "incident_cycle_id": state.incident_cycle_id,
1137
+ "empty_frames_count": state.empty_frames_count,
1138
+ "current_incident_id": state.current_incident_id,
1139
+ "incident_active": state.incident_active
1140
+ }
1141
+ return None
1142
+
1143
+ def get_all_camera_states(self) -> Dict[str, Dict[str, Any]]:
1144
+ """Get all camera states for debugging/monitoring."""
1145
+ with self._states_lock:
1146
+ return {
1147
+ cam_id: {
1148
+ "current_level": state.current_level,
1149
+ "pending_level": state.pending_level,
1150
+ "consecutive_count": state.consecutive_count,
1151
+ "last_published_level": state.last_published_level,
1152
+ "incident_cycle_id": state.incident_cycle_id,
1153
+ "empty_frames_count": state.empty_frames_count,
1154
+ "current_incident_id": state.current_incident_id,
1155
+ "incident_active": state.incident_active
1156
+ }
1157
+ for cam_id, state in self._incident_states.items()
1158
+ }
1159
+
1160
+ def get_threshold_config(self, camera_id: str) -> Optional[Dict[str, Any]]:
1161
+ """Get threshold configuration for a camera (for debugging)."""
1162
+ with self._config_lock:
1163
+ config = self._threshold_configs.get(camera_id)
1164
+ if config:
1165
+ return {
1166
+ "camera_id": config.camera_id,
1167
+ "application_id": config.application_id,
1168
+ "app_deployment_id": config.app_deployment_id,
1169
+ "incident_type": config.incident_type,
1170
+ "thresholds": config.thresholds,
1171
+ "last_updated": config.last_updated,
1172
+ "camera_name": config.camera_name
1173
+ }
1174
+ return None
1175
+
1176
+ def set_thresholds_for_camera(
1177
+ self,
1178
+ camera_id: str,
1179
+ thresholds: List[Dict[str, Any]],
1180
+ application_id: str = "",
1181
+ app_deployment_id: str = "",
1182
+ incident_type: str = "",
1183
+ camera_name: str = ""
1184
+ ):
1185
+ """
1186
+ Manually set thresholds for a camera (useful for testing or direct config).
1187
+
1188
+ Args:
1189
+ camera_id: Camera identifier
1190
+ thresholds: List of threshold configs
1191
+ application_id: Application ID
1192
+ app_deployment_id: App deployment ID
1193
+ incident_type: Incident type (e.g., "fire")
1194
+ camera_name: Camera name
1195
+ """
1196
+ # Map "high" -> "significant" in thresholds (backend uses "high", we use "significant")
1197
+ mapped_thresholds = []
1198
+ if thresholds:
1199
+ for t in thresholds:
1200
+ if isinstance(t, dict):
1201
+ level = t.get("level", "").lower().strip()
1202
+ if level == "high":
1203
+ t = dict(t) # Copy to avoid modifying original
1204
+ t["level"] = "significant"
1205
+ self.logger.debug(f"[INCIDENT_MANAGER] Mapped threshold level 'high' -> 'significant'")
1206
+ mapped_thresholds.append(t)
1207
+
1208
+ with self._config_lock:
1209
+ self._threshold_configs[camera_id] = ThresholdConfig(
1210
+ camera_id=camera_id,
1211
+ application_id=application_id,
1212
+ app_deployment_id=app_deployment_id,
1213
+ incident_type=incident_type,
1214
+ thresholds=mapped_thresholds if mapped_thresholds else DEFAULT_THRESHOLDS.copy(),
1215
+ last_updated=time.time(),
1216
+ camera_name=camera_name
1217
+ )
1218
+ self.logger.info(f"[INCIDENT_MANAGER] Manually set thresholds for camera: {camera_id}")
1219
+
1220
+
1221
+ class IncidentManagerFactory:
1222
+ """
1223
+ Factory class for creating INCIDENT_MANAGER instances.
1224
+
1225
+ Handles session initialization and Redis/Kafka client creation
1226
+ following the same pattern as license_plate_monitoring.py.
1227
+ """
1228
+
1229
+ ACTION_ID_PATTERN = re.compile(r"^[0-9a-f]{8,}$", re.IGNORECASE)
1230
+
1231
+ def __init__(self, logger: Optional[logging.Logger] = None):
1232
+ self.logger = logger or logging.getLogger(__name__)
1233
+ self._initialized = False
1234
+ self._incident_manager: Optional[INCIDENT_MANAGER] = None
1235
+
1236
+ # Store these for later access
1237
+ self._session = None
1238
+ self._action_id: Optional[str] = None
1239
+ self._instance_id: Optional[str] = None
1240
+ self._deployment_id: Optional[str] = None
1241
+ self._app_deployment_id: Optional[str] = None
1242
+ self._application_id: Optional[str] = None # Store application_id from action_details
1243
+ self._external_ip: Optional[str] = None
1244
+
1245
+ def initialize(self, config: Any) -> Optional[INCIDENT_MANAGER]:
1246
+ """
1247
+ Initialize and return INCIDENT_MANAGER with Redis/Kafka clients.
1248
+
1249
+ This follows the same pattern as license_plate_monitoring.py for
1250
+ session initialization and Redis/Kafka client creation.
1251
+
1252
+ Args:
1253
+ config: Configuration object with session, server_id, etc.
1254
+
1255
+ Returns:
1256
+ INCIDENT_MANAGER instance or None if initialization failed
1257
+ """
1258
+ if self._initialized and self._incident_manager is not None:
1259
+ self.logger.debug("[INCIDENT_MANAGER_FACTORY] Already initialized, returning existing instance")
1260
+ return self._incident_manager
1261
+
1262
+ try:
1263
+ # Import required modules
1264
+ from matrice_common.stream.matrice_stream import MatriceStream, StreamType
1265
+ from matrice_common.session import Session
1266
+
1267
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ===== STARTING INITIALIZATION =====")
1268
+
1269
+ # Get or create session
1270
+ self._session = getattr(config, 'session', None)
1271
+ if not self._session:
1272
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] No session in config, creating from environment...")
1273
+ account_number = os.getenv("MATRICE_ACCOUNT_NUMBER", "")
1274
+ access_key_id = os.getenv("MATRICE_ACCESS_KEY_ID", "")
1275
+ secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
1276
+ project_id = os.getenv("MATRICE_PROJECT_ID", "")
1277
+
1278
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] Env vars - account: {'SET' if account_number else 'NOT SET'}, "
1279
+ f"access_key: {'SET' if access_key_id else 'NOT SET'}, "
1280
+ f"secret: {'SET' if secret_key else 'NOT SET'}")
1281
+
1282
+
1283
+ self._session = Session(
1284
+ account_number=account_number,
1285
+ access_key=access_key_id,
1286
+ secret_key=secret_key,
1287
+ project_id=project_id,
1288
+ )
1289
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ✓ Created session from environment")
1290
+ else:
1291
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ✓ Using session from config")
1292
+
1293
+ rpc = self._session.rpc
1294
+
1295
+ # Discover action_id
1296
+ self._action_id = self._discover_action_id()
1297
+ if not self._action_id:
1298
+ self.logger.error("[INCIDENT_MANAGER_FACTORY] ❌ Could not discover action_id")
1299
+ print("----- INCIDENT MANAGER ACTION DISCOVERY -----")
1300
+ print("action_id: NOT FOUND")
1301
+ print("---------------------------------------------")
1302
+ self._initialized = True
1303
+ return None
1304
+
1305
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] ✓ Discovered action_id: {self._action_id}")
1306
+
1307
+ # Fetch action details
1308
+ action_details = {}
1309
+ try:
1310
+ action_url = f"/v1/actions/action/{self._action_id}/details"
1311
+ action_resp = rpc.get(action_url)
1312
+ if not (action_resp and action_resp.get("success", False)):
1313
+ raise RuntimeError(
1314
+ action_resp.get("message", "Unknown error")
1315
+ if isinstance(action_resp, dict) else "Unknown error"
1316
+ )
1317
+ action_doc = action_resp.get("data", {}) if isinstance(action_resp, dict) else {}
1318
+ action_details = action_doc.get("actionDetails", {}) if isinstance(action_doc, dict) else {}
1319
+
1320
+ # IMPORTANT: jobParams contains application_id
1321
+ # Structure: response['data']['jobParams']['application_id']
1322
+ job_params = action_doc.get("jobParams", {}) if isinstance(action_doc, dict) else {}
1323
+
1324
+ # Extract server details
1325
+ server_id = (
1326
+ action_details.get("serverId")
1327
+ or action_details.get("server_id")
1328
+ or action_details.get("serverID")
1329
+ or action_details.get("redis_server_id")
1330
+ or action_details.get("kafka_server_id")
1331
+ )
1332
+ server_type = (
1333
+ action_details.get("serverType")
1334
+ or action_details.get("server_type")
1335
+ or action_details.get("type")
1336
+ )
1337
+
1338
+ # Store identifiers
1339
+ self._deployment_id = action_details.get("_idDeployment") or action_details.get("deployment_id")
1340
+
1341
+ # app_deployment_id: check actionDetails first, then jobParams
1342
+ self._app_deployment_id = (
1343
+ action_details.get("app_deployment_id") or
1344
+ action_details.get("appDeploymentId") or
1345
+ action_details.get("app_deploymentId") or
1346
+ job_params.get("app_deployment_id") or
1347
+ job_params.get("appDeploymentId") or
1348
+ job_params.get("app_deploymentId") or
1349
+ ""
1350
+ )
1351
+
1352
+ # application_id: PRIMARILY from jobParams (this is where it lives!)
1353
+ # response['data']['jobParams'].get('application_id', '')
1354
+ self._application_id = (
1355
+ job_params.get("application_id") or
1356
+ job_params.get("applicationId") or
1357
+ job_params.get("app_id") or
1358
+ job_params.get("appId") or
1359
+ action_details.get("application_id") or
1360
+ action_details.get("applicationId") or
1361
+ ""
1362
+ )
1363
+
1364
+ self._instance_id = action_details.get("instanceID") or action_details.get("instanceId")
1365
+ self._external_ip = action_details.get("externalIP") or action_details.get("externalIp")
1366
+
1367
+ print("----- INCIDENT MANAGER ACTION DETAILS -----")
1368
+ print(f"action_id: {self._action_id}")
1369
+ print(f"server_type: {server_type}")
1370
+ print(f"server_id: {server_id}")
1371
+ print(f"deployment_id: {self._deployment_id}")
1372
+ print(f"app_deployment_id: {self._app_deployment_id}")
1373
+ print(f"application_id: {self._application_id}")
1374
+ print(f"instance_id: {self._instance_id}")
1375
+ print(f"external_ip: {self._external_ip}")
1376
+ print(f"jobParams keys: {list(job_params.keys()) if job_params else []}")
1377
+ print("--------------------------------------------")
1378
+
1379
+ self.logger.info(
1380
+ f"[INCIDENT_MANAGER_FACTORY] Action details - server_type={server_type}, "
1381
+ f"instance_id={self._instance_id}, "
1382
+ f"app_deployment_id={self._app_deployment_id}, application_id={self._application_id}"
1383
+ )
1384
+
1385
+ # Log all available keys for debugging
1386
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] actionDetails keys: {list(action_details.keys())}")
1387
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] jobParams keys: {list(job_params.keys()) if job_params else []}")
1388
+
1389
+ except Exception as e:
1390
+ self.logger.error(f"[INCIDENT_MANAGER_FACTORY] ❌ Failed to fetch action details: {e}", exc_info=True)
1391
+ print("----- INCIDENT MANAGER ACTION DETAILS ERROR -----")
1392
+ print(f"action_id: {self._action_id}")
1393
+ print(f"error: {e}")
1394
+ print("-------------------------------------------------")
1395
+ self._initialized = True
1396
+ return None
1397
+
1398
+ # Determine localhost vs cloud using externalIP from action_details
1399
+ is_localhost = False
1400
+ public_ip = self._get_public_ip()
1401
+
1402
+ # Get server host from action_details (user's method - no dependency on server_id)
1403
+ server_host = (
1404
+ action_details.get("externalIP")
1405
+ or action_details.get("external_IP")
1406
+ or action_details.get("externalip")
1407
+ or action_details.get("external_ip")
1408
+ or action_details.get("externalIp")
1409
+ or action_details.get("external_Ip")
1410
+ )
1411
+ print(f"server_host: {server_host}")
1412
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] DEBUG - server_host: {server_host}")
1413
+
1414
+ localhost_indicators = ["localhost", "127.0.0.1", "0.0.0.0"]
1415
+ if server_host in localhost_indicators:
1416
+ is_localhost = True
1417
+ self.logger.info(
1418
+ f"[INCIDENT_MANAGER_FACTORY] Detected Localhost environment "
1419
+ f"(Public IP={public_ip}, Server IP={server_host})"
1420
+ )
1421
+ else:
1422
+ is_localhost = False
1423
+ self.logger.info(
1424
+ f"[INCIDENT_MANAGER_FACTORY] Detected Cloud environment "
1425
+ f"(Public IP={public_ip}, Server IP={server_host})"
1426
+ )
1427
+
1428
+ redis_client = None
1429
+ kafka_client = None
1430
+
1431
+ # STRICT SWITCH: Only Redis if localhost, Only Kafka if cloud
1432
+ if is_localhost:
1433
+ # Initialize Redis client (ONLY) using instance_id
1434
+ if not self._instance_id:
1435
+ self.logger.error("[INCIDENT_MANAGER_FACTORY] ❌ Localhost mode but instance_id missing")
1436
+ else:
1437
+ try:
1438
+ url = f"/v1/actions/get_redis_server_by_instance_id/{self._instance_id}"
1439
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] Fetching Redis server info for instance: {self._instance_id}")
1440
+ response = rpc.get(url)
1441
+
1442
+ if isinstance(response, dict) and response.get("success", False):
1443
+ data = response.get("data", {})
1444
+ host = data.get("host")
1445
+ port = data.get("port")
1446
+ username = data.get("username")
1447
+ password = data.get("password", "")
1448
+ db_index = data.get("db", 0)
1449
+ conn_timeout = data.get("connection_timeout", 120)
1450
+
1451
+ print("----- INCIDENT MANAGER REDIS SERVER PARAMS -----")
1452
+ print(f"instance_id: {self._instance_id}")
1453
+ print(f"host: {host}")
1454
+ print(f"port: {port}")
1455
+ print(f"username: {username}")
1456
+ print(f"password: {'*' * len(password) if password else ''}")
1457
+ print(f"db: {db_index}")
1458
+ print(f"connection_timeout: {conn_timeout}")
1459
+ print("------------------------------------------------")
1460
+
1461
+ self.logger.info(
1462
+ f"[INCIDENT_MANAGER_FACTORY] Redis params - host={host}, port={port}, user={username}"
1463
+ )
1464
+
1465
+ redis_client = MatriceStream(
1466
+ StreamType.REDIS,
1467
+ host=host,
1468
+ port=int(port),
1469
+ password=password,
1470
+ username=username,
1471
+ db=db_index,
1472
+ connection_timeout=conn_timeout
1473
+ )
1474
+ # Setup for both config polling and incident publishing
1475
+ redis_client.setup("incident_modification_config")
1476
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ✓ Redis client initialized")
1477
+ else:
1478
+ self.logger.warning(
1479
+ f"[INCIDENT_MANAGER_FACTORY] Failed to fetch Redis server info: "
1480
+ f"{response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}"
1481
+ )
1482
+ except Exception as e:
1483
+ self.logger.warning(f"[INCIDENT_MANAGER_FACTORY] Redis initialization failed: {e}")
1484
+
1485
+ else:
1486
+ # Initialize Kafka client (ONLY) using global info endpoint
1487
+ try:
1488
+ url = f"/v1/actions/get_kafka_info"
1489
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] Fetching Kafka server info for Cloud mode")
1490
+ response = rpc.get(url)
1491
+
1492
+ if isinstance(response, dict) and response.get("success", False):
1493
+ data = response.get("data", {})
1494
+ enc_ip = data.get("ip")
1495
+ enc_port = data.get("port")
1496
+
1497
+ # Decode base64 encoded values
1498
+ ip_addr = None
1499
+ port = None
1500
+ try:
1501
+ ip_addr = base64.b64decode(str(enc_ip)).decode("utf-8")
1502
+ except Exception:
1503
+ ip_addr = enc_ip
1504
+ try:
1505
+ port = base64.b64decode(str(enc_port)).decode("utf-8")
1506
+ except Exception:
1507
+ port = enc_port
1508
+
1509
+ print("----- INCIDENT MANAGER KAFKA SERVER PARAMS -----")
1510
+ print(f"ipAddress: {ip_addr}")
1511
+ print(f"port: {port}")
1512
+ print("------------------------------------------------")
1513
+
1514
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] Kafka params - ip={ip_addr}, port={port}")
1515
+
1516
+ bootstrap_servers = f"{ip_addr}:{port}"
1517
+ kafka_client = MatriceStream(
1518
+ StreamType.KAFKA,
1519
+ bootstrap_servers=bootstrap_servers,
1520
+ sasl_mechanism="SCRAM-SHA-256",
1521
+ sasl_username="matrice-sdk-user",
1522
+ sasl_password="matrice-sdk-password",
1523
+ security_protocol="SASL_PLAINTEXT"
1524
+ )
1525
+ # Setup for both config polling and incident publishing
1526
+ kafka_client.setup("incident_modification_config", consumer_group_id="py_analytics_incidents")
1527
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] ✓ Kafka client initialized (servers={bootstrap_servers})")
1528
+ else:
1529
+ self.logger.warning(
1530
+ f"[INCIDENT_MANAGER_FACTORY] Failed to fetch Kafka server info: "
1531
+ f"{response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}"
1532
+ )
1533
+ except Exception as e:
1534
+ self.logger.warning(f"[INCIDENT_MANAGER_FACTORY] Kafka initialization failed: {e}")
1535
+
1536
+ # Create incident manager if we have at least one transport
1537
+ if redis_client or kafka_client:
1538
+ self._incident_manager = INCIDENT_MANAGER(
1539
+ redis_client=redis_client,
1540
+ kafka_client=kafka_client,
1541
+ incident_topic="incident_res",
1542
+ config_topic="incident_modification_config",
1543
+ logger=self.logger
1544
+ )
1545
+ # Set factory reference for accessing deployment info
1546
+ self._incident_manager.set_factory_ref(self)
1547
+ # Start the config polling thread
1548
+ self._incident_manager.start()
1549
+
1550
+ transport = "Redis" if redis_client else "Kafka"
1551
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] ✓ Incident manager created with {transport}")
1552
+ print(f"----- INCIDENT MANAGER INITIALIZED ({transport}) -----")
1553
+ else:
1554
+ self.logger.warning(
1555
+ f"[INCIDENT_MANAGER_FACTORY] No {'Redis' if is_localhost else 'Kafka'} client available, "
1556
+ f"incident manager not created"
1557
+ )
1558
+
1559
+ self._initialized = True
1560
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ===== INITIALIZATION COMPLETE =====")
1561
+ return self._incident_manager
1562
+
1563
+ except ImportError as e:
1564
+ self.logger.error(f"[INCIDENT_MANAGER_FACTORY] Import error: {e}")
1565
+ self._initialized = True
1566
+ return None
1567
+ except Exception as e:
1568
+ self.logger.error(f"[INCIDENT_MANAGER_FACTORY] Initialization failed: {e}", exc_info=True)
1569
+ self._initialized = True
1570
+ return None
1571
+
1572
+ def _discover_action_id(self) -> Optional[str]:
1573
+ """Discover action_id from current working directory name (and parents)."""
1574
+ try:
1575
+ candidates: List[str] = []
1576
+
1577
+ try:
1578
+ cwd = Path.cwd()
1579
+ candidates.append(cwd.name)
1580
+ for parent in cwd.parents:
1581
+ candidates.append(parent.name)
1582
+ except Exception:
1583
+ pass
1584
+
1585
+ try:
1586
+ usr_src = Path("/usr/src")
1587
+ if usr_src.exists():
1588
+ for child in usr_src.iterdir():
1589
+ if child.is_dir():
1590
+ candidates.append(child.name)
1591
+ except Exception:
1592
+ pass
1593
+
1594
+ for candidate in candidates:
1595
+ if candidate and len(candidate) >= 8 and self.ACTION_ID_PATTERN.match(candidate):
1596
+ return candidate
1597
+ except Exception:
1598
+ pass
1599
+ return None
1600
+
1601
+ def _get_public_ip(self) -> str:
1602
+ """Get the public IP address of this machine."""
1603
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] Fetching public IP address...")
1604
+ try:
1605
+ public_ip = urllib.request.urlopen(
1606
+ "https://v4.ident.me", timeout=120
1607
+ ).read().decode("utf8").strip()
1608
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] Public IP: {public_ip}")
1609
+ return public_ip
1610
+ except Exception as e:
1611
+ self.logger.warning(f"[INCIDENT_MANAGER_FACTORY] Error fetching public IP: {e}")
1612
+ return "localhost"
1613
+
1614
+ def _get_backend_base_url(self) -> str:
1615
+ """Resolve backend base URL based on ENV variable."""
1616
+ env = os.getenv("ENV", "prod").strip().lower()
1617
+ if env in ("prod", "production"):
1618
+ host = "prod.backend.app.matrice.ai"
1619
+ elif env in ("dev", "development"):
1620
+ host = "dev.backend.app.matrice.ai"
1621
+ else:
1622
+ host = "staging.backend.app.matrice.ai"
1623
+ return f"https://{host}"
1624
+
1625
+ @property
1626
+ def is_initialized(self) -> bool:
1627
+ return self._initialized
1628
+
1629
+ @property
1630
+ def incident_manager(self) -> Optional[INCIDENT_MANAGER]:
1631
+ return self._incident_manager
1632
+
1633
+
1634
+ # Module-level factory instance for convenience
1635
+ _default_factory: Optional[IncidentManagerFactory] = None
1636
+
1637
+
1638
+ def get_incident_manager(config: Any, logger: Optional[logging.Logger] = None) -> Optional[INCIDENT_MANAGER]:
1639
+ """
1640
+ Get or create INCIDENT_MANAGER instance.
1641
+
1642
+ This is a convenience function that uses a module-level factory.
1643
+ For more control, use IncidentManagerFactory directly.
1644
+
1645
+ Args:
1646
+ config: Configuration object with session, server_id, etc.
1647
+ logger: Logger instance
1648
+
1649
+ Returns:
1650
+ INCIDENT_MANAGER instance or None
1651
+ """
1652
+ global _default_factory
1653
+
1654
+ if _default_factory is None:
1655
+ _default_factory = IncidentManagerFactory(logger=logger)
1656
+
1657
+ return _default_factory.initialize(config)