matrice-analytics 0.1.70__py3-none-any.whl → 0.1.96__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. matrice_analytics/post_processing/__init__.py +8 -2
  2. matrice_analytics/post_processing/config.py +4 -2
  3. matrice_analytics/post_processing/core/base.py +1 -1
  4. matrice_analytics/post_processing/core/config.py +40 -3
  5. matrice_analytics/post_processing/face_reg/face_recognition.py +1014 -201
  6. matrice_analytics/post_processing/face_reg/face_recognition_client.py +171 -29
  7. matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
  8. matrice_analytics/post_processing/post_processor.py +4 -0
  9. matrice_analytics/post_processing/usecases/__init__.py +4 -1
  10. matrice_analytics/post_processing/usecases/advanced_customer_service.py +913 -500
  11. matrice_analytics/post_processing/usecases/color_detection.py +19 -18
  12. matrice_analytics/post_processing/usecases/customer_service.py +356 -9
  13. matrice_analytics/post_processing/usecases/fire_detection.py +241 -23
  14. matrice_analytics/post_processing/usecases/footfall.py +750 -0
  15. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +638 -40
  16. matrice_analytics/post_processing/usecases/people_counting.py +66 -33
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +35 -34
  18. matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
  19. matrice_analytics/post_processing/utils/alert_instance_utils.py +1018 -0
  20. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1338 -0
  21. matrice_analytics/post_processing/utils/incident_manager_utils.py +1754 -0
  22. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
  23. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +26 -22
  24. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
  25. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
  26. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1754 @@
1
+ """
2
+ incident_manager_utils.py
3
+
4
+ Manages incident publishing to Redis/Kafka when severity levels change.
5
+ Implements consecutive-frame validation before publishing:
6
+ - 5 consecutive frames for medium/significant/critical
7
+ - 10 consecutive frames for low (stricter)
8
+ - 101 consecutive empty frames to send 'info' (incident ended)
9
+
10
+ Polls 'incident_modification_config' topic for dynamic threshold settings.
11
+ Publishes to 'incident_res' topic.
12
+
13
+ PRODUCTION-READY VERSION
14
+ """
15
+
16
+ import json
17
+ import time
18
+ import threading
19
+ import logging
20
+ import os
21
+ import urllib.request
22
+ import base64
23
+ import re
24
+ from typing import Dict, List, Optional, Any, Tuple
25
+ from datetime import datetime, timezone
26
+ from dataclasses import dataclass, field
27
+ from pathlib import Path
28
+
29
+
30
+ # Severity level ordering for comparison (none = no incident)
31
+ SEVERITY_LEVELS = ["none", "info", "low", "medium", "significant", "critical"]
32
+
33
+ # Default thresholds if none provided (same as fire_detection.py defaults)
34
+ DEFAULT_THRESHOLDS = [
35
+ {"level": "low", "percentage": 0.0001},
36
+ {"level": "medium", "percentage": 3},
37
+ {"level": "significant", "percentage": 13},
38
+ {"level": "critical", "percentage": 30}
39
+ ]
40
+
41
+ # Cache for location names to avoid repeated API calls
42
+ _location_name_cache: Dict[str, str] = {}
43
+
44
+
45
+ @dataclass
46
+ class IncidentState:
47
+ """Tracks the current incident state for a camera/usecase."""
48
+ current_level: str = "none" # Current confirmed severity level
49
+ pending_level: str = "none" # Level being validated (needs consecutive frames)
50
+ consecutive_count: int = 0 # Consecutive frames with pending_level
51
+ last_published_level: str = "none" # Last level that was published (for spam prevention)
52
+ incident_cycle_id: int = 1 # Starts at 1, incremented when cycle resets (after info sent)
53
+ empty_frames_count: int = 0 # Consecutive empty incident frames (for "info" detection)
54
+ current_incident_id: str = "" # Current incident_id for this cycle (managed per camera)
55
+ incident_active: bool = False # Whether an incident is currently active in this cycle
56
+
57
+
58
+ @dataclass
59
+ class ThresholdConfig:
60
+ """Stores threshold configuration for a camera."""
61
+ camera_id: str
62
+ application_id: str = ""
63
+ app_deployment_id: str = ""
64
+ incident_type: str = ""
65
+ thresholds: List[Dict[str, Any]] = field(default_factory=lambda: DEFAULT_THRESHOLDS.copy())
66
+ last_updated: float = field(default_factory=time.time)
67
+ camera_name: str = "" # Store camera_name from config
68
+
69
+
70
+ class INCIDENT_MANAGER:
71
+ """
72
+ Manages incident severity level tracking and publishing.
73
+
74
+ Key behaviors:
75
+ - Polls 'incident_modification_config' topic for dynamic threshold settings
76
+ - Calculates severity_level from incident_quant using thresholds
77
+ - Publishes incidents ONLY when severity level changes
78
+ - Requires different consecutive frames based on level:
79
+ - 5 frames for medium/significant/critical
80
+ - 10 frames for low (stricter to avoid false positives)
81
+ - 101 empty frames to send "info" (incident ended)
82
+ - Supports both Redis and Kafka transports
83
+ - Thread-safe operations
84
+
85
+ Usage:
86
+ manager = INCIDENT_MANAGER(redis_client=..., kafka_client=...)
87
+ manager.start() # Start config polling
88
+ manager.process_incident(camera_id, incident_data, stream_info)
89
+ manager.stop() # Stop polling on shutdown
90
+ """
91
+
92
+ # Frame thresholds for different severity levels
93
+ CONSECUTIVE_FRAMES_DEFAULT = 5 # For medium, significant, critical
94
+ CONSECUTIVE_FRAMES_LOW = 10 # For low level (stricter)
95
+ CONSECUTIVE_FRAMES_EMPTY = 101 # For sending "info" after no detections
96
+
97
+ CONFIG_POLLING_INTERVAL = 10 # Poll every 10 seconds
98
+ CONFIG_TOPIC = "incident_modification_config"
99
+ INCIDENT_TOPIC = "incident_res"
100
+
101
+ def __init__(
102
+ self,
103
+ redis_client: Optional[Any] = None,
104
+ kafka_client: Optional[Any] = None,
105
+ incident_topic: str = "incident_res",
106
+ config_topic: str = "incident_modification_config",
107
+ logger: Optional[logging.Logger] = None
108
+ ):
109
+ """
110
+ Initialize INCIDENT_MANAGER.
111
+
112
+ Args:
113
+ redis_client: MatriceStream instance configured for Redis
114
+ kafka_client: MatriceStream instance configured for Kafka
115
+ incident_topic: Topic/stream name for publishing incidents
116
+ config_topic: Topic/stream name for receiving threshold configs
117
+ logger: Python logger instance
118
+ """
119
+ self.redis_client = redis_client
120
+ self.kafka_client = kafka_client
121
+ self.incident_topic = incident_topic
122
+ self.config_topic = config_topic
123
+ self.logger = logger or logging.getLogger(__name__)
124
+
125
+ # Per-camera incident state tracking: {camera_id: IncidentState}
126
+ self._incident_states: Dict[str, IncidentState] = {}
127
+ self._states_lock = threading.Lock()
128
+
129
+ # Per-camera threshold configuration: {camera_id: ThresholdConfig}
130
+ self._threshold_configs: Dict[str, ThresholdConfig] = {}
131
+ self._config_lock = threading.Lock()
132
+
133
+ # Config polling thread control
134
+ self._polling_thread: Optional[threading.Thread] = None
135
+ self._stop_event = threading.Event()
136
+ self._running = False
137
+
138
+ # Store factory reference for fetching camera info
139
+ self._factory_ref: Optional['IncidentManagerFactory'] = None
140
+
141
+ self.logger.info(
142
+ f"[INCIDENT_MANAGER] Initialized with incident_topic={incident_topic}, "
143
+ f"config_topic={config_topic}, "
144
+ f"low_frames={self.CONSECUTIVE_FRAMES_LOW}, "
145
+ f"default_frames={self.CONSECUTIVE_FRAMES_DEFAULT}, "
146
+ f"empty_frames_for_info={self.CONSECUTIVE_FRAMES_EMPTY}, "
147
+ f"polling_interval={self.CONFIG_POLLING_INTERVAL}s"
148
+ )
149
+
150
+ def set_factory_ref(self, factory: 'IncidentManagerFactory'):
151
+ """Set reference to factory for accessing deployment info."""
152
+ self._factory_ref = factory
153
+
154
+ def start(self):
155
+ """Start the background config polling thread."""
156
+ if self._running:
157
+ self.logger.warning("[INCIDENT_MANAGER] Already running")
158
+ return
159
+
160
+ self._running = True
161
+ self._stop_event.clear()
162
+ self._polling_thread = threading.Thread(
163
+ target=self._config_polling_loop,
164
+ daemon=True,
165
+ name="IncidentConfigPoller"
166
+ )
167
+ self._polling_thread.start()
168
+ self.logger.info("[INCIDENT_MANAGER] ✓ Started config polling thread")
169
+
170
+ def stop(self):
171
+ """Stop the background polling thread gracefully."""
172
+ if not self._running:
173
+ return
174
+
175
+ self.logger.info("[INCIDENT_MANAGER] Stopping...")
176
+ self._running = False
177
+ self._stop_event.set()
178
+
179
+ if self._polling_thread and self._polling_thread.is_alive():
180
+ self._polling_thread.join(timeout=5)
181
+
182
+ self.logger.info("[INCIDENT_MANAGER] ✓ Stopped")
183
+
184
+ def _config_polling_loop(self):
185
+ """Background thread that polls for config updates every CONFIG_POLLING_INTERVAL seconds."""
186
+ self.logger.info(f"[INCIDENT_MANAGER] Config polling loop started (interval: {self.CONFIG_POLLING_INTERVAL}s)")
187
+
188
+ while not self._stop_event.is_set():
189
+ try:
190
+ self._fetch_and_update_configs()
191
+ except Exception as e:
192
+ self.logger.error(f"[INCIDENT_MANAGER] Error in config polling loop: {e}", exc_info=True)
193
+
194
+ # Sleep in small increments to allow quick shutdown
195
+ for _ in range(self.CONFIG_POLLING_INTERVAL):
196
+ if self._stop_event.is_set():
197
+ break
198
+ time.sleep(1)
199
+
200
+ self.logger.info("[INCIDENT_MANAGER] Config polling loop exited")
201
+
202
+ def _fetch_and_update_configs(self):
203
+ """Fetch config messages from Redis (primary) or Kafka (fallback)."""
204
+ configs = []
205
+
206
+ # Try Redis first (primary)
207
+ if self.redis_client:
208
+ try:
209
+ self.logger.debug(f"[INCIDENT_MANAGER] Fetching configs from Redis: {self.config_topic}")
210
+ configs = self._read_configs_from_redis(max_messages=100)
211
+ if configs:
212
+ self.logger.info(f"[INCIDENT_MANAGER] Fetched {len(configs)} config(s) from Redis")
213
+ except Exception as e:
214
+ self.logger.debug(f"[INCIDENT_MANAGER] Redis config fetch: {e}")
215
+
216
+ # Fallback to Kafka if Redis failed or no messages
217
+ if not configs and self.kafka_client:
218
+ try:
219
+ self.logger.debug(f"[INCIDENT_MANAGER] Fetching configs from Kafka: {self.config_topic}")
220
+ configs = self._read_configs_from_kafka(max_messages=100)
221
+ if configs:
222
+ self.logger.info(f"[INCIDENT_MANAGER] Fetched {len(configs)} config(s) from Kafka")
223
+ except Exception as e:
224
+ self.logger.debug(f"[INCIDENT_MANAGER] Kafka config fetch: {e}")
225
+
226
+ # Update in-memory threshold configs
227
+ for config_data in configs:
228
+ try:
229
+ self._handle_config_message(config_data)
230
+ except Exception as e:
231
+ self.logger.error(f"[INCIDENT_MANAGER] Error handling config message: {e}", exc_info=True)
232
+
233
+ def _read_configs_from_redis(self, max_messages: int = 100) -> List[Dict[str, Any]]:
234
+ """Read config messages from Redis stream."""
235
+ messages = []
236
+ try:
237
+ for msg_count in range(max_messages):
238
+ msg = self.redis_client.get_message(timeout=0.1)
239
+ if not msg:
240
+ break
241
+
242
+ value = msg.get('value') or msg.get('data') or msg.get('message')
243
+ if value:
244
+ parsed = self._parse_message_value(value)
245
+ if parsed:
246
+ messages.append(parsed)
247
+ except Exception as e:
248
+ self.logger.debug(f"[INCIDENT_MANAGER] Error reading from Redis: {e}")
249
+
250
+ return messages
251
+
252
+ def _read_configs_from_kafka(self, max_messages: int = 100) -> List[Dict[str, Any]]:
253
+ """Read config messages from Kafka topic."""
254
+ messages = []
255
+ try:
256
+ for msg_count in range(max_messages):
257
+ msg = self.kafka_client.get_message(timeout=0.1)
258
+ if not msg:
259
+ break
260
+
261
+ value = msg.get('value') or msg.get('data') or msg.get('message')
262
+ if value:
263
+ parsed = self._parse_message_value(value)
264
+ if parsed:
265
+ messages.append(parsed)
266
+ except Exception as e:
267
+ self.logger.debug(f"[INCIDENT_MANAGER] Error reading from Kafka: {e}")
268
+
269
+ return messages
270
+
271
+ def _parse_message_value(self, value: Any) -> Optional[Dict[str, Any]]:
272
+ """Parse message value into a dictionary."""
273
+ try:
274
+ # Already a dict
275
+ if isinstance(value, dict):
276
+ if 'data' in value and isinstance(value['data'], dict):
277
+ return value['data']
278
+ return value
279
+
280
+ # Bytes to string
281
+ if isinstance(value, bytes):
282
+ value = value.decode('utf-8')
283
+
284
+ # Parse JSON string
285
+ if isinstance(value, str):
286
+ try:
287
+ return json.loads(value)
288
+ except json.JSONDecodeError:
289
+ # Try fixing Python-style formatting
290
+ fixed = value
291
+ fixed = fixed.replace(": True", ": true").replace(": False", ": false")
292
+ fixed = fixed.replace(":True", ":true").replace(":False", ":false")
293
+ fixed = fixed.replace(": None", ": null").replace(":None", ":null")
294
+ if "'" in fixed and '"' not in fixed:
295
+ fixed = fixed.replace("'", '"')
296
+ return json.loads(fixed)
297
+ except Exception as e:
298
+ self.logger.debug(f"[INCIDENT_MANAGER] Failed to parse message: {e}")
299
+
300
+ return None
301
+
302
+ def _handle_config_message(self, config_data: Dict[str, Any]):
303
+ """
304
+ Handle a threshold config message.
305
+
306
+ Expected format:
307
+ {
308
+ "camera_id": "68f9d95cfaff6151c774e0e7",
309
+ "application_id": "...",
310
+ "app_deployment_id": "...",
311
+ "incident_type": "fire",
312
+ "camera_name": "camera_1",
313
+ "thresholds": [
314
+ {"level": "low", "percentage": 0.0001},
315
+ {"level": "medium", "percentage": 3},
316
+ {"level": "significant", "percentage": 13},
317
+ {"level": "critical", "percentage": 30}
318
+ ]
319
+ }
320
+ """
321
+ try:
322
+ camera_id = config_data.get("camera_id", "")
323
+ if not camera_id:
324
+ self.logger.debug("[INCIDENT_MANAGER] Config message missing camera_id, skipping")
325
+ return
326
+
327
+ # Extract fields with defaults
328
+ application_id = config_data.get("application_id", "")
329
+ app_deployment_id = config_data.get("app_deployment_id", "")
330
+ incident_type = config_data.get("incident_type", "")
331
+ camera_name = config_data.get("camera_name", "")
332
+ thresholds = config_data.get("thresholds", [])
333
+
334
+ # Validate thresholds - use defaults if invalid
335
+ if not thresholds or not isinstance(thresholds, list):
336
+ thresholds = DEFAULT_THRESHOLDS.copy()
337
+ self.logger.debug(f"[INCIDENT_MANAGER] Using default thresholds for camera: {camera_id}")
338
+ else:
339
+ # Validate each threshold has required fields
340
+ # Also map "high" -> "significant" (backend uses "high", we use "significant")
341
+ valid_thresholds = []
342
+ for t in thresholds:
343
+ if isinstance(t, dict) and "level" in t and "percentage" in t:
344
+ level = t.get("level", "").lower().strip()
345
+ # Map "high" to "significant" when receiving from backend
346
+ if level == "high":
347
+ self.logger.debug(f"[INCIDENT_MANAGER] Mapping level 'high' -> 'significant' for camera {camera_id}")
348
+ t = dict(t) # Make a copy to avoid modifying original
349
+ t["level"] = "significant"
350
+ valid_thresholds.append(t)
351
+
352
+ if not valid_thresholds:
353
+ thresholds = DEFAULT_THRESHOLDS.copy()
354
+ else:
355
+ thresholds = valid_thresholds
356
+
357
+ # Create or update threshold config
358
+ with self._config_lock:
359
+ self._threshold_configs[camera_id] = ThresholdConfig(
360
+ camera_id=camera_id,
361
+ application_id=application_id,
362
+ app_deployment_id=app_deployment_id,
363
+ incident_type=incident_type,
364
+ thresholds=thresholds,
365
+ last_updated=time.time(),
366
+ camera_name=camera_name
367
+ )
368
+
369
+ self.logger.info(
370
+ f"[INCIDENT_MANAGER] ✓ Updated thresholds for camera: {camera_id}, "
371
+ f"thresholds: {thresholds}"
372
+ )
373
+
374
+ except Exception as e:
375
+ self.logger.error(f"[INCIDENT_MANAGER] Error handling config message: {e}", exc_info=True)
376
+
377
+ def _get_thresholds_for_camera(self, camera_id: str) -> Tuple[List[Dict[str, Any]], Optional[ThresholdConfig]]:
378
+ """
379
+ Get thresholds for a specific camera, or defaults if not configured.
380
+
381
+ Returns:
382
+ Tuple of (thresholds list, ThresholdConfig or None)
383
+ """
384
+ with self._config_lock:
385
+ config = self._threshold_configs.get(camera_id)
386
+ if config:
387
+ return config.thresholds, config
388
+ return DEFAULT_THRESHOLDS, None
389
+
390
+ def _calculate_severity_from_quant(
391
+ self,
392
+ incident_quant: float,
393
+ thresholds: List[Dict[str, Any]]
394
+ ) -> str:
395
+ """
396
+ Calculate severity level from incident_quant using thresholds.
397
+
398
+ Args:
399
+ incident_quant: The quantitative value (e.g., intensity percentage)
400
+ thresholds: List of threshold configs sorted by percentage
401
+
402
+ Returns:
403
+ Severity level string (none, low, medium, significant, critical)
404
+ """
405
+ if incident_quant is None or incident_quant < 0:
406
+ return "none"
407
+
408
+ # Sort thresholds by percentage (ascending)
409
+ sorted_thresholds = sorted(thresholds, key=lambda x: float(x.get("percentage", 0)))
410
+
411
+ # Find the highest level where percentage threshold is met
412
+ severity = "none"
413
+ for t in sorted_thresholds:
414
+ level = t.get("level", "").lower()
415
+ percentage = float(t.get("percentage", 0))
416
+
417
+ if incident_quant >= percentage:
418
+ severity = level
419
+ else:
420
+ break # Since sorted ascending, no need to check further
421
+
422
+ # Validate severity
423
+ if severity not in SEVERITY_LEVELS:
424
+ severity = "none"
425
+
426
+ return severity
427
+
428
+ def _get_frames_required_for_level(self, level: str) -> int:
429
+ """
430
+ Get the number of consecutive frames required to confirm a level.
431
+
432
+ Args:
433
+ level: Severity level string
434
+
435
+ Returns:
436
+ Number of consecutive frames required
437
+ """
438
+ if level == "low":
439
+ return self.CONSECUTIVE_FRAMES_LOW # 10 frames for low (stricter)
440
+ return self.CONSECUTIVE_FRAMES_DEFAULT # 5 frames for others
441
+
442
+ def _extract_camera_info_from_stream(
443
+ self,
444
+ stream_info: Optional[Dict[str, Any]]
445
+ ) -> Dict[str, str]:
446
+ """
447
+ Extract camera info from stream_info (similar to ResultsIngestor pattern).
448
+
449
+ Stream info structure example:
450
+ {
451
+ 'broker': 'localhost:9092',
452
+ 'topic': '692d7bde42582ffde3611908_input_topic', # camera_id is prefix before _input_topic
453
+ 'stream_time': '2025-12-02-05:09:53.914224 UTC',
454
+ 'camera_info': {
455
+ 'camera_name': 'cusstomer-cam-1',
456
+ 'camera_group': 'staging-customer-1',
457
+ 'location': '6908756db129880c34f2e09a'
458
+ },
459
+ 'frame_id': '7b94e2f668fb456f95b73c3084e17f8a'
460
+ }
461
+
462
+ Args:
463
+ stream_info: Stream metadata from usecase
464
+
465
+ Returns:
466
+ Dict with camera_id, camera_name, app_deployment_id, application_id, frame_id, location_id
467
+ """
468
+ result = {
469
+ "camera_id": "",
470
+ "camera_name": "",
471
+ "app_deployment_id": "",
472
+ "application_id": "",
473
+ "frame_id": "",
474
+ "location_id": ""
475
+ }
476
+
477
+ if not stream_info:
478
+ return result
479
+
480
+ try:
481
+ # Try multiple paths to get camera info (like ResultsIngestor)
482
+ # Path 1: Direct camera_info in stream_info
483
+ camera_info = stream_info.get("camera_info", {}) or {}
484
+
485
+ # Path 2: From input_settings -> input_stream pattern
486
+ input_settings = stream_info.get("input_settings", {}) or {}
487
+ input_stream = input_settings.get("input_stream", {}) or {}
488
+ input_camera_info = input_stream.get("camera_info", {}) or {}
489
+
490
+ # Path 3: From input_streams array (like ResultsIngestor)
491
+ input_streams = stream_info.get("input_streams", [])
492
+ if input_streams and len(input_streams) > 0:
493
+ input_data = input_streams[0] if isinstance(input_streams[0], dict) else {}
494
+ input_stream_inner = input_data.get("input_stream", input_data)
495
+ input_camera_info = input_stream_inner.get("camera_info", {}) or input_camera_info
496
+
497
+ # Merge all sources, preferring non-empty values
498
+ # camera_name - check all possible locations
499
+ result["camera_name"] = (
500
+ camera_info.get("camera_name", "") or
501
+ camera_info.get("cameraName", "") or
502
+ input_camera_info.get("camera_name", "") or
503
+ input_camera_info.get("cameraName", "") or
504
+ stream_info.get("camera_name", "") or
505
+ stream_info.get("cameraName", "") or
506
+ input_settings.get("camera_name", "") or
507
+ input_settings.get("cameraName", "") or
508
+ ""
509
+ )
510
+
511
+ # camera_id - check direct fields first
512
+ result["camera_id"] = (
513
+ camera_info.get("camera_id", "") or
514
+ camera_info.get("cameraId", "") or
515
+ input_camera_info.get("camera_id", "") or
516
+ input_camera_info.get("cameraId", "") or
517
+ stream_info.get("camera_id", "") or
518
+ stream_info.get("cameraId", "") or
519
+ input_settings.get("camera_id", "") or
520
+ input_settings.get("cameraId", "") or
521
+ ""
522
+ )
523
+
524
+ # If camera_id still not found, extract from topic
525
+ # Topic format: {camera_id}_input_topic (e.g., "692d7bde42582ffde3611908_input_topic")
526
+ if not result["camera_id"]:
527
+ topic = stream_info.get("topic", "")
528
+ if topic:
529
+ extracted_camera_id = ""
530
+ if topic.endswith("_input_topic"):
531
+ extracted_camera_id = topic[: -len("_input_topic")]
532
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic (underscore): {extracted_camera_id}")
533
+ elif topic.endswith("_input-topic"):
534
+ extracted_camera_id = topic[: -len("_input-topic")]
535
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic (hyphen): {extracted_camera_id}")
536
+ else:
537
+ if "_input_topic" in topic:
538
+ extracted_camera_id = topic.split("_input_topic")[0]
539
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic split (underscore): {extracted_camera_id}")
540
+ elif "_input-topic" in topic:
541
+ extracted_camera_id = topic.split("_input-topic")[0]
542
+ self.logger.debug(f"[INCIDENT_MANAGER] Extracted camera_id from topic split (hyphen): {extracted_camera_id}")
543
+ if extracted_camera_id:
544
+ result["camera_id"] = extracted_camera_id
545
+
546
+ # app_deployment_id
547
+ result["app_deployment_id"] = (
548
+ stream_info.get("app_deployment_id", "") or
549
+ stream_info.get("appDeploymentId", "") or
550
+ stream_info.get("app_deploymentId", "") or
551
+ input_settings.get("app_deployment_id", "") or
552
+ input_settings.get("appDeploymentId", "") or
553
+ camera_info.get("app_deployment_id", "") or
554
+ camera_info.get("appDeploymentId", "") or
555
+ ""
556
+ )
557
+
558
+ # application_id
559
+ result["application_id"] = (
560
+ stream_info.get("application_id", "") or
561
+ stream_info.get("applicationId", "") or
562
+ stream_info.get("app_id", "") or
563
+ stream_info.get("appId", "") or
564
+ input_settings.get("application_id", "") or
565
+ input_settings.get("applicationId", "") or
566
+ camera_info.get("application_id", "") or
567
+ camera_info.get("applicationId", "") or
568
+ ""
569
+ )
570
+
571
+ # frame_id - at top level of stream_info
572
+ result["frame_id"] = (
573
+ stream_info.get("frame_id", "") or
574
+ stream_info.get("frameId", "") or
575
+ input_settings.get("frame_id", "") or
576
+ input_settings.get("frameId", "") or
577
+ ""
578
+ )
579
+
580
+ # location_id - from camera_info.location
581
+ result["location_id"] = (
582
+ camera_info.get("location", "") or
583
+ camera_info.get("location_id", "") or
584
+ camera_info.get("locationId", "") or
585
+ input_camera_info.get("location", "") or
586
+ input_camera_info.get("location_id", "") or
587
+ ""
588
+ )
589
+
590
+ self.logger.debug(
591
+ f"[INCIDENT_MANAGER] Extracted from stream_info - "
592
+ f"camera_id={result['camera_id']}, camera_name={result['camera_name']}, "
593
+ f"app_deployment_id={result['app_deployment_id']}, application_id={result['application_id']}, "
594
+ f"frame_id={result['frame_id']}, location_id={result['location_id']}"
595
+ )
596
+
597
+ except Exception as e:
598
+ self.logger.debug(f"[INCIDENT_MANAGER] Error extracting camera info: {e}")
599
+
600
+ return result
601
+
602
+ def _map_level_from_backend(self, level: str) -> str:
603
+ """Map level from backend terminology to internal terminology.
604
+
605
+ Backend uses 'high', we use 'significant' internally.
606
+ """
607
+ if level and level.lower().strip() == "high":
608
+ return "significant"
609
+ return level
610
+
611
+ def _map_level_to_backend(self, level: str) -> str:
612
+ """Map level from internal terminology to backend terminology.
613
+
614
+ We use 'significant' internally, backend expects 'high'.
615
+ """
616
+ if level and level.lower().strip() == "significant":
617
+ return "high"
618
+ return level
619
+
620
+ def _fetch_location_name(self, location_id: str) -> str:
621
+ """
622
+ Fetch location name from API using location_id.
623
+
624
+ Args:
625
+ location_id: The location ID to look up
626
+
627
+ Returns:
628
+ Location name string, or 'Entry Reception' as default if API fails
629
+ """
630
+ global _location_name_cache
631
+ default_location = "Entry Reception"
632
+
633
+ if not location_id:
634
+ self.logger.debug(f"[INCIDENT_MANAGER] No location_id provided, using default: '{default_location}'")
635
+ return default_location
636
+
637
+ # Check cache first
638
+ if location_id in _location_name_cache:
639
+ cached_name = _location_name_cache[location_id]
640
+ self.logger.debug(f"[INCIDENT_MANAGER] Using cached location name for '{location_id}': '{cached_name}'")
641
+ return cached_name
642
+
643
+ # Need factory reference with session to make API call
644
+ if not self._factory_ref or not self._factory_ref._session:
645
+ self.logger.warning(f"[INCIDENT_MANAGER] No session available for location API, using default: '{default_location}'")
646
+ return default_location
647
+
648
+ try:
649
+ endpoint = f"/v1/inference/get_location/{location_id}"
650
+ self.logger.info(f"[INCIDENT_MANAGER] Fetching location name from API: {endpoint}")
651
+
652
+ response = self._factory_ref._session.rpc.get(endpoint)
653
+
654
+ if response and isinstance(response, dict):
655
+ success = response.get("success", False)
656
+ if success:
657
+ data = response.get("data", {})
658
+ location_name = data.get("locationName", default_location)
659
+ self.logger.info(f"[INCIDENT_MANAGER] ✓ Fetched location name: '{location_name}' for location_id: '{location_id}'")
660
+
661
+ # Cache the result
662
+ _location_name_cache[location_id] = location_name
663
+ return location_name
664
+ else:
665
+ self.logger.warning(
666
+ f"[INCIDENT_MANAGER] API returned success=false for location_id '{location_id}': "
667
+ f"{response.get('message', 'Unknown error')}"
668
+ )
669
+ else:
670
+ self.logger.warning(f"[INCIDENT_MANAGER] Invalid response format from API: {response}")
671
+
672
+ except Exception as e:
673
+ self.logger.error(f"[INCIDENT_MANAGER] Error fetching location name for '{location_id}': {e}", exc_info=True)
674
+
675
+ # Use default on any failure
676
+ self.logger.info(f"[INCIDENT_MANAGER] Using default location name: '{default_location}'")
677
+ _location_name_cache[location_id] = default_location
678
+ return default_location
679
+
680
+ def _generate_incident_id(self, camera_id: str, cycle_id: int) -> str:
681
+ """Generate a unique incident_id for a camera's cycle."""
682
+ return f"incident_{camera_id}_{cycle_id}"
683
+
684
+ def process_incident(
685
+ self,
686
+ camera_id: str,
687
+ incident_data: Dict[str, Any],
688
+ stream_info: Optional[Dict[str, Any]] = None
689
+ ) -> bool:
690
+ """
691
+ Process an incident and publish if severity level changed.
692
+
693
+ This method:
694
+ 1. Gets incident_quant from incident_data
695
+ 2. Calculates severity_level using dynamic thresholds for this camera
696
+ 3. Updates incident_data with new severity_level
697
+ 4. Tracks level changes with consecutive-frame validation:
698
+ - 5 frames for medium/significant/critical
699
+ - 10 frames for low (stricter)
700
+ 5. Tracks empty incidents and publishes "info" after 101 consecutive empty frames
701
+ 6. Publishes on level change
702
+ 7. Manages incident_id per camera per cycle (increments after info is sent)
703
+
704
+ Args:
705
+ camera_id: Unique camera identifier
706
+ incident_data: Incident dictionary from usecase (must include incident_quant)
707
+ stream_info: Stream metadata
708
+
709
+ Returns:
710
+ True if incident was published, False otherwise
711
+ """
712
+ try:
713
+ self.logger.debug(f"[INCIDENT_MANAGER] Processing incident for camera: {camera_id}")
714
+
715
+ # Get or create state for this camera
716
+ with self._states_lock:
717
+ if camera_id not in self._incident_states:
718
+ new_state = IncidentState()
719
+ # Initialize incident_id for new camera
720
+ new_state.current_incident_id = self._generate_incident_id(camera_id, new_state.incident_cycle_id)
721
+ self._incident_states[camera_id] = new_state
722
+ self.logger.info(
723
+ f"[INCIDENT_MANAGER] Created new state for camera: {camera_id}, "
724
+ f"initial incident_id: {new_state.current_incident_id}"
725
+ )
726
+
727
+ state = self._incident_states[camera_id]
728
+
729
+ # Ensure incident_id is set (for existing states that may not have it)
730
+ if not state.current_incident_id:
731
+ state.current_incident_id = self._generate_incident_id(camera_id, state.incident_cycle_id)
732
+ self.logger.info(f"[INCIDENT_MANAGER] Generated incident_id for existing state: {state.current_incident_id}")
733
+
734
+ # Handle empty incident data - track for "info" level
735
+ is_empty_incident = (not incident_data or incident_data == {})
736
+
737
+ if is_empty_incident:
738
+ self.logger.debug("[INCIDENT_MANAGER] Empty incident data, tracking for info level")
739
+ return self._handle_empty_incident(camera_id, state, stream_info)
740
+
741
+ # Step 1: Get thresholds for this camera
742
+ thresholds, threshold_config = self._get_thresholds_for_camera(camera_id)
743
+
744
+ # Step 2: Get incident_quant and calculate severity level dynamically
745
+ incident_quant = incident_data.get("incident_quant")
746
+
747
+ if incident_quant is not None:
748
+ # Calculate severity from quant using dynamic thresholds
749
+ severity_level = self._calculate_severity_from_quant(incident_quant, thresholds)
750
+
751
+ # Update incident_data with new severity level
752
+ incident_data["severity_level"] = severity_level
753
+
754
+ self.logger.debug(
755
+ f"[INCIDENT_MANAGER] Calculated severity from incident_quant={incident_quant}: "
756
+ f"severity_level={severity_level}"
757
+ )
758
+ else:
759
+ # Fallback to existing severity_level in incident_data
760
+ severity_level = incident_data.get("severity_level", "none")
761
+ if not severity_level or severity_level == "":
762
+ severity_level = "none"
763
+
764
+ # Store threshold config info in incident_data for output message
765
+ if threshold_config:
766
+ incident_data["_config_camera_id"] = threshold_config.camera_id
767
+ incident_data["_config_application_id"] = threshold_config.application_id
768
+ incident_data["_config_app_deployment_id"] = threshold_config.app_deployment_id
769
+ incident_data["_config_camera_name"] = threshold_config.camera_name
770
+
771
+ severity_level = severity_level.lower().strip()
772
+
773
+ self.logger.debug(f"[INCIDENT_MANAGER] Final severity_level: '{severity_level}'")
774
+
775
+ # Validate severity level
776
+ if severity_level not in SEVERITY_LEVELS:
777
+ self.logger.warning(
778
+ f"[INCIDENT_MANAGER] Unknown severity level '{severity_level}', treating as 'none'"
779
+ )
780
+ severity_level = "none"
781
+
782
+ # If level is "none", treat as empty incident (DO NOT reset empty_frames_count here!)
783
+ if severity_level == "none":
784
+ return self._handle_empty_incident(camera_id, state, stream_info)
785
+
786
+ # We have a real detection (severity != none), reset empty frame counter
787
+ with self._states_lock:
788
+ state.empty_frames_count = 0
789
+
790
+ with self._states_lock:
791
+ self.logger.debug(
792
+ f"[INCIDENT_MANAGER] Current state - "
793
+ f"current_level={state.current_level}, "
794
+ f"pending_level={state.pending_level}, "
795
+ f"consecutive_count={state.consecutive_count}, "
796
+ f"last_published_level={state.last_published_level}, "
797
+ f"incident_id={state.current_incident_id}, "
798
+ f"cycle_id={state.incident_cycle_id}, "
799
+ f"incident_active={state.incident_active}"
800
+ )
801
+
802
+ # Check if this is a new pending level or continuation
803
+ if severity_level == state.pending_level:
804
+ # Same level, increment counter
805
+ state.consecutive_count += 1
806
+ self.logger.debug(
807
+ f"[INCIDENT_MANAGER] Same pending level, "
808
+ f"consecutive_count now: {state.consecutive_count}"
809
+ )
810
+ else:
811
+ # Different level, reset counter
812
+ state.pending_level = severity_level
813
+ state.consecutive_count = 1
814
+ self.logger.debug(
815
+ f"[INCIDENT_MANAGER] New pending level: {severity_level}, "
816
+ f"reset consecutive_count to 1"
817
+ )
818
+
819
+ # Get required frames for this level
820
+ frames_required = self._get_frames_required_for_level(severity_level)
821
+
822
+ # Check if we've reached the threshold for confirmation
823
+ if state.consecutive_count >= frames_required:
824
+ # Level is confirmed after required consecutive frames
825
+ old_level = state.current_level
826
+ new_level = state.pending_level
827
+
828
+ self.logger.info(
829
+ f"[INCIDENT_MANAGER] Level confirmed after {state.consecutive_count} frames "
830
+ f"(required: {frames_required}): {old_level} -> {new_level}"
831
+ )
832
+
833
+ # Check if level actually changed
834
+ if new_level != state.current_level:
835
+ state.current_level = new_level
836
+
837
+ # Check if we should publish
838
+ # 1. Don't publish "none" level (no incident)
839
+ # 2. Don't publish same level again (spam prevention)
840
+ should_publish = (
841
+ new_level != "none" and
842
+ new_level != state.last_published_level
843
+ )
844
+
845
+ self.logger.info(
846
+ f"[INCIDENT_MANAGER] Level changed: {old_level} -> {new_level}, "
847
+ f"should_publish={should_publish} "
848
+ f"(last_published={state.last_published_level})"
849
+ )
850
+
851
+ if should_publish:
852
+ # Mark incident as active for this cycle
853
+ state.incident_active = True
854
+
855
+ # Use the managed incident_id for this cycle
856
+ incident_data["incident_id"] = state.current_incident_id
857
+
858
+ # Publish the incident
859
+ success = self._publish_incident(
860
+ camera_id, incident_data, stream_info
861
+ )
862
+ if success:
863
+ state.last_published_level = new_level
864
+ self.logger.info(
865
+ f"[INCIDENT_MANAGER] ✓ Published incident for level: {new_level}, "
866
+ f"incident_id: {state.current_incident_id}"
867
+ )
868
+ return success
869
+ else:
870
+ self.logger.debug(
871
+ f"[INCIDENT_MANAGER] Skipping publish - "
872
+ f"level={new_level}, already published"
873
+ )
874
+ else:
875
+ self.logger.debug(
876
+ f"[INCIDENT_MANAGER] No level change, staying at: {state.current_level}"
877
+ )
878
+
879
+ return False
880
+
881
+ except Exception as e:
882
+ self.logger.error(
883
+ f"[INCIDENT_MANAGER] Error processing incident: {e}",
884
+ exc_info=True
885
+ )
886
+ return False
887
+
888
+ def _handle_empty_incident(
889
+ self,
890
+ camera_id: str,
891
+ state: IncidentState,
892
+ stream_info: Optional[Dict[str, Any]] = None
893
+ ) -> bool:
894
+ """
895
+ Handle empty incident (no detection).
896
+
897
+ After 101 consecutive empty frames, send "info" level if an incident was active.
898
+ Info uses the SAME incident_id as the current cycle, then starts a new cycle.
899
+
900
+ Args:
901
+ camera_id: Camera identifier
902
+ state: Current incident state
903
+ stream_info: Stream metadata
904
+
905
+ Returns:
906
+ True if "info" incident was published, False otherwise
907
+ """
908
+ with self._states_lock:
909
+ state.empty_frames_count += 1
910
+
911
+ self.logger.debug(
912
+ f"[INCIDENT_MANAGER] Empty frame count for camera {camera_id}: "
913
+ f"{state.empty_frames_count}/{self.CONSECUTIVE_FRAMES_EMPTY}, "
914
+ f"incident_active={state.incident_active}, "
915
+ f"current_incident_id={state.current_incident_id}"
916
+ )
917
+
918
+ # Reset pending level tracking when empty
919
+ if state.pending_level not in ("none", "info"):
920
+ state.pending_level = "none"
921
+ state.consecutive_count = 0
922
+
923
+ # Check if we should send "info" (incident ended)
924
+ if state.empty_frames_count >= self.CONSECUTIVE_FRAMES_EMPTY:
925
+ # Only send "info" if:
926
+ # 1. An incident was actually active in this cycle (we published something)
927
+ # 2. Last published level was NOT "info" (don't send duplicate info)
928
+ should_send_info = (
929
+ state.incident_active and
930
+ state.last_published_level not in ("info", "none")
931
+ )
932
+
933
+ if should_send_info:
934
+ self.logger.info(
935
+ f"[INCIDENT_MANAGER] {self.CONSECUTIVE_FRAMES_EMPTY} consecutive empty frames for camera {camera_id}, "
936
+ f"sending 'info' level to close incident cycle "
937
+ f"(last_published={state.last_published_level}, incident_id={state.current_incident_id})"
938
+ )
939
+
940
+ # Get incident_type from threshold config if available
941
+ incident_type = "fire_smoke_detection" # Default
942
+ with self._config_lock:
943
+ config = self._threshold_configs.get(camera_id)
944
+ if config and config.incident_type:
945
+ incident_type = config.incident_type
946
+
947
+ # Create info incident data - USE THE SAME incident_id from this cycle!
948
+ info_incident = {
949
+ "incident_id": state.current_incident_id, # Same incident_id for this cycle
950
+ "incident_type": incident_type,
951
+ "severity_level": "info",
952
+ "human_text": "Incident ended"
953
+ }
954
+
955
+ # Update state BEFORE publishing
956
+ state.current_level = "info"
957
+ state.empty_frames_count = 0 # Reset counter
958
+
959
+ # Publish info incident
960
+ success = self._publish_incident(camera_id, info_incident, stream_info)
961
+ if success:
962
+ state.last_published_level = "info"
963
+
964
+ # END THIS CYCLE - Start a new cycle for future incidents
965
+ old_cycle_id = state.incident_cycle_id
966
+ old_incident_id = state.current_incident_id
967
+
968
+ state.incident_cycle_id += 1 # Increment cycle
969
+ state.current_incident_id = self._generate_incident_id(camera_id, state.incident_cycle_id)
970
+ state.incident_active = False # No active incident in new cycle yet
971
+ state.current_level = "none" # Reset level for new cycle
972
+ state.pending_level = "none"
973
+ state.consecutive_count = 0
974
+ # Note: We keep last_published_level as "info" to prevent duplicate info sends
975
+
976
+ self.logger.info(
977
+ f"[INCIDENT_MANAGER] ✓ Published 'info' for camera {camera_id}, "
978
+ f"closed incident_id={old_incident_id} (cycle {old_cycle_id}), "
979
+ f"started new cycle {state.incident_cycle_id} with incident_id={state.current_incident_id}"
980
+ )
981
+ return success
982
+ else:
983
+ # No active incident or already sent info
984
+ if not state.incident_active:
985
+ self.logger.debug(
986
+ f"[INCIDENT_MANAGER] Skipping 'info' for camera {camera_id} - "
987
+ f"no incident was active in this cycle"
988
+ )
989
+ else:
990
+ self.logger.debug(
991
+ f"[INCIDENT_MANAGER] Skipping 'info' for camera {camera_id} - "
992
+ f"last_published is already '{state.last_published_level}'"
993
+ )
994
+
995
+ # Reset empty frame counter if we decide not to send info
996
+ # to avoid repeated checks every frame after 101
997
+ state.empty_frames_count = 0
998
+
999
+ return False
1000
+
1001
+ def _publish_incident(
1002
+ self,
1003
+ camera_id: str,
1004
+ incident_data: Dict[str, Any],
1005
+ stream_info: Optional[Dict[str, Any]] = None
1006
+ ) -> bool:
1007
+ """
1008
+ Publish incident to Redis/Kafka topic.
1009
+
1010
+ Args:
1011
+ camera_id: Camera identifier
1012
+ incident_data: Incident dictionary
1013
+ stream_info: Stream metadata
1014
+
1015
+ Returns:
1016
+ True if published successfully, False otherwise
1017
+ """
1018
+ self.logger.info(f"[INCIDENT_MANAGER] ========== PUBLISHING INCIDENT ==========")
1019
+
1020
+ try:
1021
+ # Build the incident message
1022
+ message = self._build_incident_message(camera_id, incident_data, stream_info)
1023
+
1024
+ self.logger.info(f"[INCIDENT_MANAGER] Built incident message: {json.dumps(message, default=str)[:500]}...")
1025
+
1026
+ success = False
1027
+
1028
+ # Try Redis first (primary)
1029
+ if self.redis_client:
1030
+ try:
1031
+ self.logger.debug(
1032
+ f"[INCIDENT_MANAGER] Publishing to Redis stream: {self.incident_topic}"
1033
+ )
1034
+ self._publish_to_redis(self.incident_topic, message)
1035
+ self.logger.info(
1036
+ f"[INCIDENT_MANAGER] ✓ Incident published to Redis"
1037
+ )
1038
+ success = True
1039
+ except Exception as e:
1040
+ self.logger.error(
1041
+ f"[INCIDENT_MANAGER] ❌ Redis publish failed: {e}",
1042
+ exc_info=True
1043
+ )
1044
+
1045
+ # Fallback to Kafka if Redis failed or no Redis client
1046
+ if not success and self.kafka_client:
1047
+ try:
1048
+ self.logger.debug(
1049
+ f"[INCIDENT_MANAGER] Publishing to Kafka topic: {self.incident_topic}"
1050
+ )
1051
+ self._publish_to_kafka(self.incident_topic, message)
1052
+ self.logger.info(
1053
+ f"[INCIDENT_MANAGER] ✓ Incident published to Kafka"
1054
+ )
1055
+ success = True
1056
+ except Exception as e:
1057
+ self.logger.error(
1058
+ f"[INCIDENT_MANAGER] ❌ Kafka publish failed: {e}",
1059
+ exc_info=True
1060
+ )
1061
+
1062
+ if success:
1063
+ self.logger.info(f"[INCIDENT_MANAGER] ========== INCIDENT PUBLISHED ==========")
1064
+ else:
1065
+ self.logger.error(
1066
+ f"[INCIDENT_MANAGER] ❌ INCIDENT NOT PUBLISHED (both transports failed)"
1067
+ )
1068
+
1069
+ return success
1070
+
1071
+ except Exception as e:
1072
+ self.logger.error(
1073
+ f"[INCIDENT_MANAGER] Error publishing incident: {e}",
1074
+ exc_info=True
1075
+ )
1076
+ return False
1077
+
1078
+ def _build_incident_message(
1079
+ self,
1080
+ camera_id: str,
1081
+ incident_data: Dict[str, Any],
1082
+ stream_info: Optional[Dict[str, Any]] = None
1083
+ ) -> Dict[str, Any]:
1084
+ """
1085
+ Build the incident message in the required format.
1086
+
1087
+ Output format (STRICT):
1088
+ {
1089
+ "camera_id": "...",
1090
+ "app_deployment_id": "...",
1091
+ "application_id": "...",
1092
+ "camera_name": "...",
1093
+ "frame_id": "...",
1094
+ "location_name": "...",
1095
+ "incidents": [{
1096
+ "incident_id": "...",
1097
+ "incident_type": "...",
1098
+ "severity_level": "...",
1099
+ "human_text": "..."
1100
+ }]
1101
+ }
1102
+
1103
+ Keys to REMOVE: "alerts", "alert_settings", "duration", "incident_quant",
1104
+ "start_time", "end_time", "camera_info", "level_settings"
1105
+ """
1106
+
1107
+ # Extract camera info from multiple sources
1108
+ stream_camera_info = self._extract_camera_info_from_stream(stream_info)
1109
+
1110
+ # Get IDs from threshold config (if available - set by config polling)
1111
+ config_camera_id = incident_data.get("_config_camera_id", "")
1112
+ config_application_id = incident_data.get("_config_application_id", "")
1113
+ config_app_deployment_id = incident_data.get("_config_app_deployment_id", "")
1114
+ config_camera_name = incident_data.get("_config_camera_name", "")
1115
+
1116
+ # Get IDs from factory (from action_details)
1117
+ factory_app_deployment_id = ""
1118
+ factory_application_id = ""
1119
+ if self._factory_ref:
1120
+ factory_app_deployment_id = self._factory_ref._app_deployment_id or ""
1121
+ factory_application_id = self._factory_ref._application_id or ""
1122
+
1123
+ # Priority: stream_info > threshold_config > factory > camera_id param
1124
+ final_camera_id = (
1125
+ stream_camera_info.get("camera_id") or
1126
+ config_camera_id or
1127
+ camera_id or
1128
+ ""
1129
+ )
1130
+
1131
+ final_camera_name = (
1132
+ stream_camera_info.get("camera_name") or
1133
+ config_camera_name or
1134
+ ""
1135
+ )
1136
+
1137
+ final_app_deployment_id = (
1138
+ stream_camera_info.get("app_deployment_id") or
1139
+ config_app_deployment_id or
1140
+ factory_app_deployment_id or
1141
+ ""
1142
+ )
1143
+
1144
+ final_application_id = (
1145
+ stream_camera_info.get("application_id") or
1146
+ config_application_id or
1147
+ factory_application_id or
1148
+ ""
1149
+ )
1150
+
1151
+ # Extract frame_id from stream_info
1152
+ final_frame_id = stream_camera_info.get("frame_id", "")
1153
+
1154
+ # Fetch location_name from API using location_id
1155
+ location_id = stream_camera_info.get("location_id", "")
1156
+ final_location_name = self._fetch_location_name(location_id)
1157
+
1158
+ self.logger.info(
1159
+ f"[INCIDENT_MANAGER] Building message with - "
1160
+ f"camera_id={final_camera_id}, camera_name={final_camera_name}, "
1161
+ f"app_deployment_id={final_app_deployment_id}, application_id={final_application_id}, "
1162
+ f"frame_id={final_frame_id}, location_name={final_location_name}"
1163
+ )
1164
+
1165
+ # Build incident - ONLY include required fields
1166
+ # Map "significant" -> "high" for backend (we use "significant" internally, backend expects "high")
1167
+ severity_level = incident_data.get("severity_level", "")
1168
+ if severity_level.lower().strip() == "significant":
1169
+ severity_level = "high"
1170
+ self.logger.debug(f"[INCIDENT_MANAGER] Mapped severity_level 'significant' -> 'high' for publishing")
1171
+
1172
+ incident = {
1173
+ "incident_id": incident_data.get("incident_id", ""),
1174
+ "incident_type": incident_data.get("incident_type", "fire_smoke_detection"),
1175
+ "severity_level": severity_level,
1176
+ "human_text": incident_data.get("human_text", "")
1177
+ }
1178
+
1179
+ # Build final message with all required fields
1180
+ message = {
1181
+ "camera_id": final_camera_id,
1182
+ "app_deployment_id": final_app_deployment_id,
1183
+ "application_id": final_application_id,
1184
+ "camera_name": final_camera_name,
1185
+ "frame_id": final_frame_id,
1186
+ "location_name": final_location_name,
1187
+ "incidents": [incident]
1188
+ }
1189
+
1190
+ return message
1191
+
1192
+ def _publish_to_redis(self, topic: str, message: Dict[str, Any]):
1193
+ """Publish message to Redis stream."""
1194
+ try:
1195
+ self.redis_client.add_message(
1196
+ topic_or_channel=topic,
1197
+ message=json.dumps(message),
1198
+ key=message.get("camera_id", "")
1199
+ )
1200
+ except Exception as e:
1201
+ self.logger.error(f"[INCIDENT_MANAGER] Redis publish error: {e}")
1202
+ raise
1203
+
1204
+ def _publish_to_kafka(self, topic: str, message: Dict[str, Any]):
1205
+ """Publish message to Kafka topic."""
1206
+ try:
1207
+ self.kafka_client.add_message(
1208
+ topic_or_channel=topic,
1209
+ message=json.dumps(message),
1210
+ key=message.get("camera_id", "")
1211
+ )
1212
+ except Exception as e:
1213
+ self.logger.error(f"[INCIDENT_MANAGER] Kafka publish error: {e}")
1214
+ raise
1215
+
1216
+ def reset_camera_state(self, camera_id: str):
1217
+ """Reset incident state for a specific camera."""
1218
+ with self._states_lock:
1219
+ if camera_id in self._incident_states:
1220
+ self._incident_states[camera_id] = IncidentState()
1221
+ self.logger.info(f"[INCIDENT_MANAGER] Reset state for camera: {camera_id}")
1222
+
1223
+ def get_camera_state(self, camera_id: str) -> Optional[Dict[str, Any]]:
1224
+ """Get current incident state for a camera (for debugging)."""
1225
+ with self._states_lock:
1226
+ state = self._incident_states.get(camera_id)
1227
+ if state:
1228
+ return {
1229
+ "current_level": state.current_level,
1230
+ "pending_level": state.pending_level,
1231
+ "consecutive_count": state.consecutive_count,
1232
+ "last_published_level": state.last_published_level,
1233
+ "incident_cycle_id": state.incident_cycle_id,
1234
+ "empty_frames_count": state.empty_frames_count,
1235
+ "current_incident_id": state.current_incident_id,
1236
+ "incident_active": state.incident_active
1237
+ }
1238
+ return None
1239
+
1240
+ def get_all_camera_states(self) -> Dict[str, Dict[str, Any]]:
1241
+ """Get all camera states for debugging/monitoring."""
1242
+ with self._states_lock:
1243
+ return {
1244
+ cam_id: {
1245
+ "current_level": state.current_level,
1246
+ "pending_level": state.pending_level,
1247
+ "consecutive_count": state.consecutive_count,
1248
+ "last_published_level": state.last_published_level,
1249
+ "incident_cycle_id": state.incident_cycle_id,
1250
+ "empty_frames_count": state.empty_frames_count,
1251
+ "current_incident_id": state.current_incident_id,
1252
+ "incident_active": state.incident_active
1253
+ }
1254
+ for cam_id, state in self._incident_states.items()
1255
+ }
1256
+
1257
+ def get_threshold_config(self, camera_id: str) -> Optional[Dict[str, Any]]:
1258
+ """Get threshold configuration for a camera (for debugging)."""
1259
+ with self._config_lock:
1260
+ config = self._threshold_configs.get(camera_id)
1261
+ if config:
1262
+ return {
1263
+ "camera_id": config.camera_id,
1264
+ "application_id": config.application_id,
1265
+ "app_deployment_id": config.app_deployment_id,
1266
+ "incident_type": config.incident_type,
1267
+ "thresholds": config.thresholds,
1268
+ "last_updated": config.last_updated,
1269
+ "camera_name": config.camera_name
1270
+ }
1271
+ return None
1272
+
1273
+ def set_thresholds_for_camera(
1274
+ self,
1275
+ camera_id: str,
1276
+ thresholds: List[Dict[str, Any]],
1277
+ application_id: str = "",
1278
+ app_deployment_id: str = "",
1279
+ incident_type: str = "",
1280
+ camera_name: str = ""
1281
+ ):
1282
+ """
1283
+ Manually set thresholds for a camera (useful for testing or direct config).
1284
+
1285
+ Args:
1286
+ camera_id: Camera identifier
1287
+ thresholds: List of threshold configs
1288
+ application_id: Application ID
1289
+ app_deployment_id: App deployment ID
1290
+ incident_type: Incident type (e.g., "fire")
1291
+ camera_name: Camera name
1292
+ """
1293
+ # Map "high" -> "significant" in thresholds (backend uses "high", we use "significant")
1294
+ mapped_thresholds = []
1295
+ if thresholds:
1296
+ for t in thresholds:
1297
+ if isinstance(t, dict):
1298
+ level = t.get("level", "").lower().strip()
1299
+ if level == "high":
1300
+ t = dict(t) # Copy to avoid modifying original
1301
+ t["level"] = "significant"
1302
+ self.logger.debug(f"[INCIDENT_MANAGER] Mapped threshold level 'high' -> 'significant'")
1303
+ mapped_thresholds.append(t)
1304
+
1305
+ with self._config_lock:
1306
+ self._threshold_configs[camera_id] = ThresholdConfig(
1307
+ camera_id=camera_id,
1308
+ application_id=application_id,
1309
+ app_deployment_id=app_deployment_id,
1310
+ incident_type=incident_type,
1311
+ thresholds=mapped_thresholds if mapped_thresholds else DEFAULT_THRESHOLDS.copy(),
1312
+ last_updated=time.time(),
1313
+ camera_name=camera_name
1314
+ )
1315
+ self.logger.info(f"[INCIDENT_MANAGER] Manually set thresholds for camera: {camera_id}")
1316
+
1317
+
1318
+ class IncidentManagerFactory:
1319
+ """
1320
+ Factory class for creating INCIDENT_MANAGER instances.
1321
+
1322
+ Handles session initialization and Redis/Kafka client creation
1323
+ following the same pattern as license_plate_monitoring.py.
1324
+ """
1325
+
1326
+ ACTION_ID_PATTERN = re.compile(r"^[0-9a-f]{8,}$", re.IGNORECASE)
1327
+
1328
+ def __init__(self, logger: Optional[logging.Logger] = None):
1329
+ self.logger = logger or logging.getLogger(__name__)
1330
+ self._initialized = False
1331
+ self._incident_manager: Optional[INCIDENT_MANAGER] = None
1332
+
1333
+ # Store these for later access
1334
+ self._session = None
1335
+ self._action_id: Optional[str] = None
1336
+ self._instance_id: Optional[str] = None
1337
+ self._deployment_id: Optional[str] = None
1338
+ self._app_deployment_id: Optional[str] = None
1339
+ self._application_id: Optional[str] = None # Store application_id from action_details
1340
+ self._external_ip: Optional[str] = None
1341
+
1342
+ def initialize(self, config: Any) -> Optional[INCIDENT_MANAGER]:
1343
+ """
1344
+ Initialize and return INCIDENT_MANAGER with Redis/Kafka clients.
1345
+
1346
+ This follows the same pattern as license_plate_monitoring.py for
1347
+ session initialization and Redis/Kafka client creation.
1348
+
1349
+ Args:
1350
+ config: Configuration object with session, server_id, etc.
1351
+
1352
+ Returns:
1353
+ INCIDENT_MANAGER instance or None if initialization failed
1354
+ """
1355
+ if self._initialized and self._incident_manager is not None:
1356
+ self.logger.debug("[INCIDENT_MANAGER_FACTORY] Already initialized, returning existing instance")
1357
+ return self._incident_manager
1358
+
1359
+ try:
1360
+ # Import required modules
1361
+ from matrice_common.stream.matrice_stream import MatriceStream, StreamType
1362
+ from matrice_common.session import Session
1363
+
1364
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ===== STARTING INITIALIZATION =====")
1365
+
1366
+ # Get or create session
1367
+ self._session = getattr(config, 'session', None)
1368
+ if not self._session:
1369
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] No session in config, creating from environment...")
1370
+ account_number = os.getenv("MATRICE_ACCOUNT_NUMBER", "")
1371
+ access_key_id = os.getenv("MATRICE_ACCESS_KEY_ID", "")
1372
+ secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
1373
+ project_id = os.getenv("MATRICE_PROJECT_ID", "")
1374
+
1375
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] Env vars - account: {'SET' if account_number else 'NOT SET'}, "
1376
+ f"access_key: {'SET' if access_key_id else 'NOT SET'}, "
1377
+ f"secret: {'SET' if secret_key else 'NOT SET'}")
1378
+
1379
+
1380
+ self._session = Session(
1381
+ account_number=account_number,
1382
+ access_key=access_key_id,
1383
+ secret_key=secret_key,
1384
+ project_id=project_id,
1385
+ )
1386
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ✓ Created session from environment")
1387
+ else:
1388
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ✓ Using session from config")
1389
+
1390
+ rpc = self._session.rpc
1391
+
1392
+ # Discover action_id
1393
+ self._action_id = self._discover_action_id()
1394
+ if not self._action_id:
1395
+ self.logger.error("[INCIDENT_MANAGER_FACTORY] ❌ Could not discover action_id")
1396
+ print("----- INCIDENT MANAGER ACTION DISCOVERY -----")
1397
+ print("action_id: NOT FOUND")
1398
+ print("---------------------------------------------")
1399
+ self._initialized = True
1400
+ return None
1401
+
1402
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] ✓ Discovered action_id: {self._action_id}")
1403
+
1404
+ # Fetch action details
1405
+ action_details = {}
1406
+ try:
1407
+ action_url = f"/v1/actions/action/{self._action_id}/details"
1408
+ action_resp = rpc.get(action_url)
1409
+ if not (action_resp and action_resp.get("success", False)):
1410
+ raise RuntimeError(
1411
+ action_resp.get("message", "Unknown error")
1412
+ if isinstance(action_resp, dict) else "Unknown error"
1413
+ )
1414
+ action_doc = action_resp.get("data", {}) if isinstance(action_resp, dict) else {}
1415
+ action_details = action_doc.get("actionDetails", {}) if isinstance(action_doc, dict) else {}
1416
+
1417
+ # IMPORTANT: jobParams contains application_id
1418
+ # Structure: response['data']['jobParams']['application_id']
1419
+ job_params = action_doc.get("jobParams", {}) if isinstance(action_doc, dict) else {}
1420
+
1421
+ # Extract server details
1422
+ server_id = (
1423
+ action_details.get("serverId")
1424
+ or action_details.get("server_id")
1425
+ or action_details.get("serverID")
1426
+ or action_details.get("redis_server_id")
1427
+ or action_details.get("kafka_server_id")
1428
+ )
1429
+ server_type = (
1430
+ action_details.get("serverType")
1431
+ or action_details.get("server_type")
1432
+ or action_details.get("type")
1433
+ )
1434
+
1435
+ # Store identifiers
1436
+ self._deployment_id = action_details.get("_idDeployment") or action_details.get("deployment_id")
1437
+
1438
+ # app_deployment_id: check actionDetails first, then jobParams
1439
+ self._app_deployment_id = (
1440
+ action_details.get("app_deployment_id") or
1441
+ action_details.get("appDeploymentId") or
1442
+ action_details.get("app_deploymentId") or
1443
+ job_params.get("app_deployment_id") or
1444
+ job_params.get("appDeploymentId") or
1445
+ job_params.get("app_deploymentId") or
1446
+ ""
1447
+ )
1448
+
1449
+ # application_id: PRIMARILY from jobParams (this is where it lives!)
1450
+ # response['data']['jobParams'].get('application_id', '')
1451
+ self._application_id = (
1452
+ job_params.get("application_id") or
1453
+ job_params.get("applicationId") or
1454
+ job_params.get("app_id") or
1455
+ job_params.get("appId") or
1456
+ action_details.get("application_id") or
1457
+ action_details.get("applicationId") or
1458
+ ""
1459
+ )
1460
+
1461
+ self._instance_id = action_details.get("instanceID") or action_details.get("instanceId")
1462
+ self._external_ip = action_details.get("externalIP") or action_details.get("externalIp")
1463
+
1464
+ print("----- INCIDENT MANAGER ACTION DETAILS -----")
1465
+ print(f"action_id: {self._action_id}")
1466
+ print(f"server_type: {server_type}")
1467
+ print(f"server_id: {server_id}")
1468
+ print(f"deployment_id: {self._deployment_id}")
1469
+ print(f"app_deployment_id: {self._app_deployment_id}")
1470
+ print(f"application_id: {self._application_id}")
1471
+ print(f"instance_id: {self._instance_id}")
1472
+ print(f"external_ip: {self._external_ip}")
1473
+ print(f"jobParams keys: {list(job_params.keys()) if job_params else []}")
1474
+ print("--------------------------------------------")
1475
+
1476
+ self.logger.info(
1477
+ f"[INCIDENT_MANAGER_FACTORY] Action details - server_type={server_type}, "
1478
+ f"instance_id={self._instance_id}, "
1479
+ f"app_deployment_id={self._app_deployment_id}, application_id={self._application_id}"
1480
+ )
1481
+
1482
+ # Log all available keys for debugging
1483
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] actionDetails keys: {list(action_details.keys())}")
1484
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] jobParams keys: {list(job_params.keys()) if job_params else []}")
1485
+
1486
+ except Exception as e:
1487
+ self.logger.error(f"[INCIDENT_MANAGER_FACTORY] ❌ Failed to fetch action details: {e}", exc_info=True)
1488
+ print("----- INCIDENT MANAGER ACTION DETAILS ERROR -----")
1489
+ print(f"action_id: {self._action_id}")
1490
+ print(f"error: {e}")
1491
+ print("-------------------------------------------------")
1492
+ self._initialized = True
1493
+ return None
1494
+
1495
+ # Determine localhost vs cloud using externalIP from action_details
1496
+ is_localhost = False
1497
+ public_ip = self._get_public_ip()
1498
+
1499
+ # Get server host from action_details (user's method - no dependency on server_id)
1500
+ server_host = (
1501
+ action_details.get("externalIP")
1502
+ or action_details.get("external_IP")
1503
+ or action_details.get("externalip")
1504
+ or action_details.get("external_ip")
1505
+ or action_details.get("externalIp")
1506
+ or action_details.get("external_Ip")
1507
+ )
1508
+ print(f"server_host: {server_host}")
1509
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] DEBUG - server_host: {server_host}")
1510
+
1511
+ localhost_indicators = ["localhost", "127.0.0.1", "0.0.0.0"]
1512
+ if server_host in localhost_indicators:
1513
+ is_localhost = True
1514
+ self.logger.info(
1515
+ f"[INCIDENT_MANAGER_FACTORY] Detected Localhost environment "
1516
+ f"(Public IP={public_ip}, Server IP={server_host})"
1517
+ )
1518
+ else:
1519
+ is_localhost = False
1520
+ self.logger.info(
1521
+ f"[INCIDENT_MANAGER_FACTORY] Detected Cloud environment "
1522
+ f"(Public IP={public_ip}, Server IP={server_host})"
1523
+ )
1524
+
1525
+ redis_client = None
1526
+ kafka_client = None
1527
+
1528
+ # STRICT SWITCH: Only Redis if localhost, Only Kafka if cloud
1529
+ if is_localhost:
1530
+ # Initialize Redis client (ONLY) using instance_id
1531
+ if not self._instance_id:
1532
+ self.logger.error("[INCIDENT_MANAGER_FACTORY] ❌ Localhost mode but instance_id missing")
1533
+ else:
1534
+ try:
1535
+ url = f"/v1/actions/get_redis_server_by_instance_id/{self._instance_id}"
1536
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] Fetching Redis server info for instance: {self._instance_id}")
1537
+ response = rpc.get(url)
1538
+
1539
+ if isinstance(response, dict) and response.get("success", False):
1540
+ data = response.get("data", {})
1541
+ host = data.get("host")
1542
+ port = data.get("port")
1543
+ username = data.get("username")
1544
+ password = data.get("password", "")
1545
+ db_index = data.get("db", 0)
1546
+ conn_timeout = data.get("connection_timeout", 120)
1547
+
1548
+ print("----- INCIDENT MANAGER REDIS SERVER PARAMS -----")
1549
+ print(f"instance_id: {self._instance_id}")
1550
+ print(f"host: {host}")
1551
+ print(f"port: {port}")
1552
+ print(f"username: {username}")
1553
+ print(f"password: {'*' * len(password) if password else ''}")
1554
+ print(f"db: {db_index}")
1555
+ print(f"connection_timeout: {conn_timeout}")
1556
+ print("------------------------------------------------")
1557
+
1558
+ self.logger.info(
1559
+ f"[INCIDENT_MANAGER_FACTORY] Redis params - host={host}, port={port}, user={username}"
1560
+ )
1561
+
1562
+ redis_client = MatriceStream(
1563
+ StreamType.REDIS,
1564
+ host=host,
1565
+ port=int(port),
1566
+ password=password,
1567
+ username=username,
1568
+ db=db_index,
1569
+ connection_timeout=conn_timeout
1570
+ )
1571
+ # Setup for both config polling and incident publishing
1572
+ redis_client.setup("incident_modification_config")
1573
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ✓ Redis client initialized")
1574
+ else:
1575
+ self.logger.warning(
1576
+ f"[INCIDENT_MANAGER_FACTORY] Failed to fetch Redis server info: "
1577
+ f"{response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}"
1578
+ )
1579
+ except Exception as e:
1580
+ self.logger.warning(f"[INCIDENT_MANAGER_FACTORY] Redis initialization failed: {e}")
1581
+
1582
+ else:
1583
+ # Initialize Kafka client (ONLY) using global info endpoint
1584
+ try:
1585
+ url = f"/v1/actions/get_kafka_info"
1586
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] Fetching Kafka server info for Cloud mode")
1587
+ response = rpc.get(url)
1588
+
1589
+ if isinstance(response, dict) and response.get("success", False):
1590
+ data = response.get("data", {})
1591
+ enc_ip = data.get("ip")
1592
+ enc_port = data.get("port")
1593
+
1594
+ # Decode base64 encoded values
1595
+ ip_addr = None
1596
+ port = None
1597
+ try:
1598
+ ip_addr = base64.b64decode(str(enc_ip)).decode("utf-8")
1599
+ except Exception:
1600
+ ip_addr = enc_ip
1601
+ try:
1602
+ port = base64.b64decode(str(enc_port)).decode("utf-8")
1603
+ except Exception:
1604
+ port = enc_port
1605
+
1606
+ print("----- INCIDENT MANAGER KAFKA SERVER PARAMS -----")
1607
+ print(f"ipAddress: {ip_addr}")
1608
+ print(f"port: {port}")
1609
+ print("------------------------------------------------")
1610
+
1611
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] Kafka params - ip={ip_addr}, port={port}")
1612
+
1613
+ bootstrap_servers = f"{ip_addr}:{port}"
1614
+ kafka_client = MatriceStream(
1615
+ StreamType.KAFKA,
1616
+ bootstrap_servers=bootstrap_servers,
1617
+ sasl_mechanism="SCRAM-SHA-256",
1618
+ sasl_username="matrice-sdk-user",
1619
+ sasl_password="matrice-sdk-password",
1620
+ security_protocol="SASL_PLAINTEXT"
1621
+ )
1622
+ # Setup for both config polling and incident publishing
1623
+ kafka_client.setup("incident_modification_config", consumer_group_id="py_analytics_incidents")
1624
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] ✓ Kafka client initialized (servers={bootstrap_servers})")
1625
+ else:
1626
+ self.logger.warning(
1627
+ f"[INCIDENT_MANAGER_FACTORY] Failed to fetch Kafka server info: "
1628
+ f"{response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}"
1629
+ )
1630
+ except Exception as e:
1631
+ self.logger.warning(f"[INCIDENT_MANAGER_FACTORY] Kafka initialization failed: {e}")
1632
+
1633
+ # Create incident manager if we have at least one transport
1634
+ if redis_client or kafka_client:
1635
+ self._incident_manager = INCIDENT_MANAGER(
1636
+ redis_client=redis_client,
1637
+ kafka_client=kafka_client,
1638
+ incident_topic="incident_res",
1639
+ config_topic="incident_modification_config",
1640
+ logger=self.logger
1641
+ )
1642
+ # Set factory reference for accessing deployment info
1643
+ self._incident_manager.set_factory_ref(self)
1644
+ # Start the config polling thread
1645
+ self._incident_manager.start()
1646
+
1647
+ transport = "Redis" if redis_client else "Kafka"
1648
+ self.logger.info(f"[INCIDENT_MANAGER_FACTORY] ✓ Incident manager created with {transport}")
1649
+ print(f"----- INCIDENT MANAGER INITIALIZED ({transport}) -----")
1650
+ else:
1651
+ self.logger.warning(
1652
+ f"[INCIDENT_MANAGER_FACTORY] No {'Redis' if is_localhost else 'Kafka'} client available, "
1653
+ f"incident manager not created"
1654
+ )
1655
+
1656
+ self._initialized = True
1657
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] ===== INITIALIZATION COMPLETE =====")
1658
+ return self._incident_manager
1659
+
1660
+ except ImportError as e:
1661
+ self.logger.error(f"[INCIDENT_MANAGER_FACTORY] Import error: {e}")
1662
+ self._initialized = True
1663
+ return None
1664
+ except Exception as e:
1665
+ self.logger.error(f"[INCIDENT_MANAGER_FACTORY] Initialization failed: {e}", exc_info=True)
1666
+ self._initialized = True
1667
+ return None
1668
+
1669
+ def _discover_action_id(self) -> Optional[str]:
1670
+ """Discover action_id from current working directory name (and parents)."""
1671
+ try:
1672
+ candidates: List[str] = []
1673
+
1674
+ try:
1675
+ cwd = Path.cwd()
1676
+ candidates.append(cwd.name)
1677
+ for parent in cwd.parents:
1678
+ candidates.append(parent.name)
1679
+ except Exception:
1680
+ pass
1681
+
1682
+ try:
1683
+ usr_src = Path("/usr/src")
1684
+ if usr_src.exists():
1685
+ for child in usr_src.iterdir():
1686
+ if child.is_dir():
1687
+ candidates.append(child.name)
1688
+ except Exception:
1689
+ pass
1690
+
1691
+ for candidate in candidates:
1692
+ if candidate and len(candidate) >= 8 and self.ACTION_ID_PATTERN.match(candidate):
1693
+ return candidate
1694
+ except Exception:
1695
+ pass
1696
+ return None
1697
+
1698
+ def _get_public_ip(self) -> str:
1699
+ """Get the public IP address of this machine."""
1700
+ self.logger.info("[INCIDENT_MANAGER_FACTORY] Fetching public IP address...")
1701
+ try:
1702
+ public_ip = urllib.request.urlopen(
1703
+ "https://v4.ident.me", timeout=120
1704
+ ).read().decode("utf8").strip()
1705
+ self.logger.debug(f"[INCIDENT_MANAGER_FACTORY] Public IP: {public_ip}")
1706
+ return public_ip
1707
+ except Exception as e:
1708
+ self.logger.warning(f"[INCIDENT_MANAGER_FACTORY] Error fetching public IP: {e}")
1709
+ return "localhost"
1710
+
1711
+ def _get_backend_base_url(self) -> str:
1712
+ """Resolve backend base URL based on ENV variable."""
1713
+ env = os.getenv("ENV", "prod").strip().lower()
1714
+ if env in ("prod", "production"):
1715
+ host = "prod.backend.app.matrice.ai"
1716
+ elif env in ("dev", "development"):
1717
+ host = "dev.backend.app.matrice.ai"
1718
+ else:
1719
+ host = "staging.backend.app.matrice.ai"
1720
+ return f"https://{host}"
1721
+
1722
+ @property
1723
+ def is_initialized(self) -> bool:
1724
+ return self._initialized
1725
+
1726
+ @property
1727
+ def incident_manager(self) -> Optional[INCIDENT_MANAGER]:
1728
+ return self._incident_manager
1729
+
1730
+
1731
+ # Module-level factory instance for convenience
1732
+ _default_factory: Optional[IncidentManagerFactory] = None
1733
+
1734
+
1735
+ def get_incident_manager(config: Any, logger: Optional[logging.Logger] = None) -> Optional[INCIDENT_MANAGER]:
1736
+ """
1737
+ Get or create INCIDENT_MANAGER instance.
1738
+
1739
+ This is a convenience function that uses a module-level factory.
1740
+ For more control, use IncidentManagerFactory directly.
1741
+
1742
+ Args:
1743
+ config: Configuration object with session, server_id, etc.
1744
+ logger: Logger instance
1745
+
1746
+ Returns:
1747
+ INCIDENT_MANAGER instance or None
1748
+ """
1749
+ global _default_factory
1750
+
1751
+ if _default_factory is None:
1752
+ _default_factory = IncidentManagerFactory(logger=logger)
1753
+
1754
+ return _default_factory.initialize(config)