matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1146 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Fire and Smoke Detection use case implementation.
|
|
3
|
+
|
|
4
|
+
This module provides a structured implementation of fire and smoke detection
|
|
5
|
+
with counting, insights generation, alerting, and tracking.
|
|
6
|
+
"""
|
|
7
|
+
from datetime import datetime, timezone, timedelta
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
import time
|
|
11
|
+
import re
|
|
12
|
+
from collections import Counter
|
|
13
|
+
|
|
14
|
+
from ..core.base import (
|
|
15
|
+
BaseProcessor,
|
|
16
|
+
ProcessingContext,
|
|
17
|
+
ProcessingResult,
|
|
18
|
+
ConfigProtocol,
|
|
19
|
+
)
|
|
20
|
+
from ..core.config import BaseConfig, AlertConfig
|
|
21
|
+
from ..utils import (
|
|
22
|
+
filter_by_confidence,
|
|
23
|
+
apply_category_mapping,
|
|
24
|
+
calculate_counting_summary,
|
|
25
|
+
match_results_structure,
|
|
26
|
+
bbox_smoothing,
|
|
27
|
+
BBoxSmoothingConfig,
|
|
28
|
+
BBoxSmoothingTracker
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# ======================
|
|
33
|
+
# Config Definition
|
|
34
|
+
# ======================
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class FireSmokeConfig(BaseConfig):
|
|
40
|
+
confidence_threshold: float = 0.85
|
|
41
|
+
|
|
42
|
+
# Only fire and smoke categories included here (exclude normal)
|
|
43
|
+
fire_smoke_categories: List[str] = field(
|
|
44
|
+
default_factory=lambda: ["fire", "smoke"]
|
|
45
|
+
)
|
|
46
|
+
target_categories: List[str] = field(
|
|
47
|
+
default_factory=lambda: ['fire']
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
alert_config: Optional[AlertConfig] = field(
|
|
51
|
+
default_factory=lambda: AlertConfig(
|
|
52
|
+
count_thresholds={"fire": 0},
|
|
53
|
+
alert_type=["email"],
|
|
54
|
+
alert_value=["FIRE_INFO@matrice.ai"],
|
|
55
|
+
alert_incident_category=["FIRE-ALERT"]
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
time_window_minutes: int = 60
|
|
61
|
+
enable_unique_counting: bool = True
|
|
62
|
+
|
|
63
|
+
# Map only fire and smoke; ignore normal (index 1 not included)
|
|
64
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
65
|
+
default_factory=lambda: {
|
|
66
|
+
0: "fire",
|
|
67
|
+
1: "smoke",
|
|
68
|
+
}
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# BBox smoothing configuration (added)
|
|
72
|
+
enable_smoothing: bool = False
|
|
73
|
+
smoothing_algorithm: str = "linear"
|
|
74
|
+
smoothing_window_size: int = 5
|
|
75
|
+
smoothing_cooldown_frames: int = 10
|
|
76
|
+
smoothing_confidence_range_factor: float = 0.2
|
|
77
|
+
threshold_area: Optional[float] = 250200.0
|
|
78
|
+
|
|
79
|
+
def __post_init__(self):
|
|
80
|
+
if not (0.0 <= self.confidence_threshold <= 1.0):
|
|
81
|
+
raise ValueError("confidence_threshold must be between 0.0 and 1.0")
|
|
82
|
+
|
|
83
|
+
# Normalize category names to lowercase for consistent matching
|
|
84
|
+
self.fire_smoke_categories = [cat.lower() for cat in self.fire_smoke_categories]
|
|
85
|
+
if self.index_to_category:
|
|
86
|
+
self.index_to_category = {k: v.lower() for k, v in self.index_to_category.items()}
|
|
87
|
+
if self.target_categories:
|
|
88
|
+
self.target_categories = [cat.lower() for cat in self.target_categories]
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# ======================
|
|
93
|
+
|
|
94
|
+
# ======================
|
|
95
|
+
class FireSmokeUseCase(BaseProcessor):
|
|
96
|
+
def __init__(self):
|
|
97
|
+
super().__init__("fire_smoke_detection")
|
|
98
|
+
self.category = "hazard"
|
|
99
|
+
self.CASE_TYPE: Optional[str] = 'fire_smoke_detection'
|
|
100
|
+
self.CASE_VERSION: Optional[str] = '1.3'
|
|
101
|
+
|
|
102
|
+
self.smoothing_tracker = None # Required for bbox smoothing
|
|
103
|
+
self._fire_smoke_recent_history = []
|
|
104
|
+
self.target_categories=['fire']
|
|
105
|
+
|
|
106
|
+
self._ascending_alert_list: List[str] = []
|
|
107
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
108
|
+
self.id_hit_list = ["low","medium","significant","critical","low"]
|
|
109
|
+
self.id_hit_counter = 0
|
|
110
|
+
self.latest_stack:str = None
|
|
111
|
+
self.id_timing_list = []
|
|
112
|
+
self.return_id_counter = 1
|
|
113
|
+
self.start_timer = None
|
|
114
|
+
self._tracking_start_time = None
|
|
115
|
+
|
|
116
|
+
def process(
|
|
117
|
+
self,
|
|
118
|
+
data: Any,
|
|
119
|
+
config: ConfigProtocol,
|
|
120
|
+
context: Optional[ProcessingContext] = None,
|
|
121
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
122
|
+
) -> ProcessingResult:
|
|
123
|
+
"""
|
|
124
|
+
Process fire and smoke detection use case.
|
|
125
|
+
"""
|
|
126
|
+
start_time = time.time()
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
# Step 0: Validate config
|
|
130
|
+
if not isinstance(config, FireSmokeConfig):
|
|
131
|
+
return self.create_error_result(
|
|
132
|
+
"Invalid configuration type for fire and smoke detection",
|
|
133
|
+
usecase=self.name,
|
|
134
|
+
category=self.category,
|
|
135
|
+
context=context,
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Step 1: Init context
|
|
139
|
+
if context is None:
|
|
140
|
+
context = ProcessingContext()
|
|
141
|
+
input_format = match_results_structure(data)
|
|
142
|
+
context.input_format = input_format
|
|
143
|
+
context.confidence_threshold = config.confidence_threshold
|
|
144
|
+
self.logger.info(f"Processing fire and smoke detection with format: {input_format.value} with threshold: {config.confidence_threshold}")
|
|
145
|
+
|
|
146
|
+
# Step 2: Confidence thresholding
|
|
147
|
+
processed_data = data
|
|
148
|
+
if config.confidence_threshold is not None:
|
|
149
|
+
processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
|
|
150
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
151
|
+
|
|
152
|
+
# Step 3: Category mapping
|
|
153
|
+
if config.index_to_category:
|
|
154
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
155
|
+
self.logger.debug("Applied category mapping")
|
|
156
|
+
|
|
157
|
+
if self.target_categories:
|
|
158
|
+
processed_data = [d for d in processed_data if d.get('category').lower() in self.target_categories]
|
|
159
|
+
self.logger.debug(f"Applied category filtering")
|
|
160
|
+
|
|
161
|
+
# Step 3.5: BBox smoothing for fire/smoke
|
|
162
|
+
if config.enable_smoothing:
|
|
163
|
+
if self.smoothing_tracker is None:
|
|
164
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
165
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
166
|
+
window_size=config.smoothing_window_size,
|
|
167
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
168
|
+
confidence_threshold=config.confidence_threshold,
|
|
169
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
170
|
+
enable_smoothing=True
|
|
171
|
+
)
|
|
172
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
173
|
+
|
|
174
|
+
smooth_categories = {"fire", "smoke"}
|
|
175
|
+
fire_smoke_detections = [d for d in processed_data if d.get("category", "").lower() in smooth_categories]
|
|
176
|
+
|
|
177
|
+
smoothed_detections = bbox_smoothing(
|
|
178
|
+
fire_smoke_detections,
|
|
179
|
+
self.smoothing_tracker.config,
|
|
180
|
+
self.smoothing_tracker
|
|
181
|
+
)
|
|
182
|
+
non_smoothed_detections = [d for d in processed_data if d.get("category", "").lower() not in smooth_categories]
|
|
183
|
+
|
|
184
|
+
processed_data = non_smoothed_detections + smoothed_detections
|
|
185
|
+
self.logger.debug("Applied bbox smoothing for fire/smoke categories")
|
|
186
|
+
|
|
187
|
+
# Step 4: Summarization
|
|
188
|
+
fire_smoke_summary = self._calculate_fire_smoke_summary(processed_data, config)
|
|
189
|
+
general_summary = calculate_counting_summary(processed_data)
|
|
190
|
+
|
|
191
|
+
# Step 5: Predictions
|
|
192
|
+
predictions = self._extract_predictions(processed_data, config)
|
|
193
|
+
|
|
194
|
+
# Step 6: Frame number extraction
|
|
195
|
+
frame_number = None
|
|
196
|
+
if stream_info:
|
|
197
|
+
input_settings = stream_info.get("input_settings", {})
|
|
198
|
+
start_frame = input_settings.get("start_frame")
|
|
199
|
+
end_frame = input_settings.get("end_frame")
|
|
200
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
201
|
+
frame_number = start_frame
|
|
202
|
+
elif start_frame is not None:
|
|
203
|
+
frame_number = start_frame
|
|
204
|
+
|
|
205
|
+
# Step 7: alerts
|
|
206
|
+
# Ensure we have an AlertConfig object. `dataclasses.field` is only
|
|
207
|
+
# meant for class-level default declarations – using it at runtime
|
|
208
|
+
# produces a `Field` object which later breaks JSON serialization.
|
|
209
|
+
if config.alert_config is None:
|
|
210
|
+
config.alert_config = AlertConfig(
|
|
211
|
+
count_thresholds={"fire": 0},
|
|
212
|
+
alert_type=["email"],
|
|
213
|
+
alert_value=["FIRE_INFO@matrice.ai"],
|
|
214
|
+
alert_incident_category=["FIRE-ALERT"]
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
alerts = self._check_alerts(fire_smoke_summary, frame_number, config, stream_info)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
# Step 8: Incidents and tracking stats
|
|
221
|
+
incidents_list = self._generate_incidents(fire_smoke_summary, alerts, config, frame_number=frame_number, stream_info=stream_info)
|
|
222
|
+
tracking_stats_list = self._generate_tracking_stats(
|
|
223
|
+
fire_smoke_summary, alerts, config,
|
|
224
|
+
frame_number=frame_number,
|
|
225
|
+
stream_info=stream_info
|
|
226
|
+
)
|
|
227
|
+
business_analytics_list = self._generate_business_analytics(fire_smoke_summary, alerts, config, stream_info, is_empty=True)
|
|
228
|
+
|
|
229
|
+
# Step 9: Human-readable summary
|
|
230
|
+
summary_list = self._generate_summary(fire_smoke_summary, general_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
231
|
+
|
|
232
|
+
# Finalize context and return result
|
|
233
|
+
context.processing_time = time.time() - start_time
|
|
234
|
+
|
|
235
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
236
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
237
|
+
#EVENT ENDED SIGNAL
|
|
238
|
+
|
|
239
|
+
if len(tracking_stats_list)>1:
|
|
240
|
+
alerts = tracking_stats_list[1]
|
|
241
|
+
incidents = tracking_stats_list[2]
|
|
242
|
+
tracking_stats = tracking_stats_list[0]
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else []
|
|
246
|
+
summary = summary_list[0] if summary_list else {}
|
|
247
|
+
agg_summary = {str(frame_number): {
|
|
248
|
+
"incidents": [incidents],
|
|
249
|
+
"tracking_stats": [tracking_stats],
|
|
250
|
+
"business_analytics": business_analytics,
|
|
251
|
+
"alerts": alerts,
|
|
252
|
+
"human_text": summary}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
context.mark_completed()
|
|
256
|
+
|
|
257
|
+
result = self.create_result(
|
|
258
|
+
data={"agg_summary": agg_summary},
|
|
259
|
+
usecase=self.name,
|
|
260
|
+
category=self.category,
|
|
261
|
+
context=context)
|
|
262
|
+
return result
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
except Exception as e:
|
|
266
|
+
self.logger.error(f"Error in fire and smoke processing: {str(e)}")
|
|
267
|
+
return self.create_error_result(
|
|
268
|
+
f"Fire and smoke processing failed: {str(e)}",
|
|
269
|
+
error_type="FireSmokeProcessingError",
|
|
270
|
+
usecase=self.name,
|
|
271
|
+
category=self.category,
|
|
272
|
+
context=context,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# ==== Internal Utilities ====
|
|
276
|
+
def _check_alerts(
|
|
277
|
+
self, summary: Dict, frame_number:Any, config: FireSmokeConfig, stream_info: Optional[Dict[str, Any]] = None
|
|
278
|
+
) -> List[Dict]:
|
|
279
|
+
"""Raise alerts if fire or smoke detected with severity based on intensity."""
|
|
280
|
+
def get_trend(data, lookback=23, prior=14):
|
|
281
|
+
'''
|
|
282
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
|
283
|
+
Now works with determining dominant levels.
|
|
284
|
+
'''
|
|
285
|
+
if len(data) < lookback:
|
|
286
|
+
return True
|
|
287
|
+
post=lookback-prior-1
|
|
288
|
+
levels_list = ["low","medium","significant","critical","low"]
|
|
289
|
+
|
|
290
|
+
current_dominant_incident = Counter(data[-lookback:][:-prior]).most_common(1)[0][0] #from LAST 23 elements fetch FIRST 15 elements
|
|
291
|
+
potential_dominant_incident = Counter(data[-post:]).most_common(1)[0][0] #fetch LAST 8 elements
|
|
292
|
+
current_dominant_incident_index = levels_list.index(current_dominant_incident)
|
|
293
|
+
potential_dominant_incident_index = levels_list.index(potential_dominant_incident)
|
|
294
|
+
|
|
295
|
+
if current_dominant_incident_index <= potential_dominant_incident_index:
|
|
296
|
+
return True
|
|
297
|
+
else:
|
|
298
|
+
return False
|
|
299
|
+
|
|
300
|
+
alerts = []
|
|
301
|
+
total = summary.get("total_objects", 0)
|
|
302
|
+
by_category = summary.get("by_category", {})
|
|
303
|
+
detections = summary.get("detections", [])
|
|
304
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
305
|
+
|
|
306
|
+
if total == 0:
|
|
307
|
+
return []
|
|
308
|
+
if not config.alert_config:
|
|
309
|
+
return alerts
|
|
310
|
+
|
|
311
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
312
|
+
# Safely fetch the last recorded severity level as a **string** (empty if no history yet)
|
|
313
|
+
last_level = self._ascending_alert_list[-1] if self._ascending_alert_list else "low"
|
|
314
|
+
rank_ids, alert_id = self._get_alert_incident_ids(last_level, stream_info)
|
|
315
|
+
if rank_ids not in [1,2,3,4,5]:
|
|
316
|
+
alert_id = 1
|
|
317
|
+
|
|
318
|
+
count_thresholds = {}
|
|
319
|
+
if config.alert_config and hasattr(config.alert_config, "count_thresholds"):
|
|
320
|
+
count_thresholds = config.alert_config.count_thresholds or {}
|
|
321
|
+
|
|
322
|
+
for category, threshold in count_thresholds.items():
|
|
323
|
+
alert_serial = getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default']
|
|
324
|
+
alert_serial = alert_serial[0]
|
|
325
|
+
if category == "all" and total > threshold:
|
|
326
|
+
|
|
327
|
+
alerts.append({
|
|
328
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
329
|
+
"alert_id": "alert_"+category+'_'+alert_serial+'_'+str(alert_id),
|
|
330
|
+
"incident_category": self.CASE_TYPE,
|
|
331
|
+
"threshold_level": threshold,
|
|
332
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=23, prior=14),
|
|
333
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
334
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
335
|
+
}
|
|
336
|
+
})
|
|
337
|
+
elif category in summary.get("per_category_count", {}):
|
|
338
|
+
|
|
339
|
+
count = summary.get("per_category_count", {})[category]
|
|
340
|
+
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
|
341
|
+
alerts.append({
|
|
342
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
343
|
+
"alert_id": "alert_"+category+'_'+alert_serial+'_'+str(alert_id),
|
|
344
|
+
"incident_category": self.CASE_TYPE,
|
|
345
|
+
"threshold_level": threshold,
|
|
346
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=23, prior=14),
|
|
347
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
348
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
349
|
+
}
|
|
350
|
+
})
|
|
351
|
+
else:
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
return alerts
|
|
355
|
+
|
|
356
|
+
def _generate_incidents(
|
|
357
|
+
self,
|
|
358
|
+
summary: Dict,
|
|
359
|
+
alerts: List[Dict],
|
|
360
|
+
config: FireSmokeConfig,
|
|
361
|
+
frame_number: Optional[int] = None,
|
|
362
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
363
|
+
) -> Dict:
|
|
364
|
+
"""Generate structured events for fire and smoke detection output with frame-aware keys."""
|
|
365
|
+
|
|
366
|
+
def get_trend_incident(data, lookback=23, prior=14):
|
|
367
|
+
'''
|
|
368
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
|
369
|
+
Now works with determining dominant levels.
|
|
370
|
+
'''
|
|
371
|
+
if len(data) < lookback:
|
|
372
|
+
return "",0,"",0
|
|
373
|
+
|
|
374
|
+
post=lookback-prior-1
|
|
375
|
+
levels_list = ["low","medium","significant","critical"]
|
|
376
|
+
current_dominant_incident = Counter(data[-lookback:][:-prior]).most_common(1)[0][0] #from LAST 23 elements fetch FIRST 15 elements
|
|
377
|
+
current_dominant_incident_index = levels_list.index(current_dominant_incident)
|
|
378
|
+
|
|
379
|
+
potential_dominant_incident = Counter(data[-post:]).most_common(1)[0][0] #fetch LAST 8 elements
|
|
380
|
+
potential_dominant_incident_index = levels_list.index(potential_dominant_incident)
|
|
381
|
+
|
|
382
|
+
return current_dominant_incident, current_dominant_incident_index, potential_dominant_incident, potential_dominant_incident_index
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
386
|
+
incidents = []
|
|
387
|
+
|
|
388
|
+
total = summary.get("total_objects", 0)
|
|
389
|
+
by_category = summary.get("by_category", {})
|
|
390
|
+
detections = summary.get("detections", [])
|
|
391
|
+
|
|
392
|
+
total_fire = by_category.get("fire", 0)
|
|
393
|
+
total_smoke = by_category.get("smoke", 0)
|
|
394
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
395
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
396
|
+
self._ascending_alert_list = self._ascending_alert_list[-5000:] if len(self._ascending_alert_list) > 5000 else self._ascending_alert_list
|
|
397
|
+
levels_list = ["low","medium","significant","critical"]
|
|
398
|
+
|
|
399
|
+
if total > 0:
|
|
400
|
+
# Calculate total bbox area
|
|
401
|
+
total_area = 0.0
|
|
402
|
+
# Safely retrieve count thresholds. If alert_config is None (e.g., when it
|
|
403
|
+
# is not provided or failed to parse) we default to an empty mapping so
|
|
404
|
+
# the subsequent logic can still execute without raising an AttributeError.
|
|
405
|
+
count_thresholds = {}
|
|
406
|
+
if config.alert_config and hasattr(config.alert_config, "count_thresholds"):
|
|
407
|
+
count_thresholds = config.alert_config.count_thresholds or {}
|
|
408
|
+
|
|
409
|
+
for category, threshold in count_thresholds.items():
|
|
410
|
+
if category in summary.get("per_category_count", {}):
|
|
411
|
+
|
|
412
|
+
#count = summary.get("per_category_count", {})[category]
|
|
413
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
414
|
+
|
|
415
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
|
416
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
417
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
|
418
|
+
current_dominant_incident, current_dominant_incident_index, potential_dominant_incident, potential_dominant_incident_index = get_trend_incident(self._ascending_alert_list, lookback=23, prior=14) #from LAST 23 elements fetch FIRST 15 elements
|
|
419
|
+
|
|
420
|
+
if current_dominant_incident != potential_dominant_incident:
|
|
421
|
+
|
|
422
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
423
|
+
self.current_incident_end_timestamp='Incident active'
|
|
424
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
|
425
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
426
|
+
|
|
427
|
+
for det in detections:
|
|
428
|
+
bbox = det.get("bounding_box") or det.get("bbox")
|
|
429
|
+
if bbox:
|
|
430
|
+
xmin = bbox.get("xmin")
|
|
431
|
+
ymin = bbox.get("ymin")
|
|
432
|
+
xmax = bbox.get("xmax")
|
|
433
|
+
ymax = bbox.get("ymax")
|
|
434
|
+
if None not in (xmin, ymin, xmax, ymax):
|
|
435
|
+
width = xmax - xmin
|
|
436
|
+
height = ymax - ymin
|
|
437
|
+
if width > 0 and height > 0:
|
|
438
|
+
total_area += width * height
|
|
439
|
+
|
|
440
|
+
threshold_area = config.threshold_area # 307200.0 | Same threshold as insights
|
|
441
|
+
|
|
442
|
+
intensity_pct = min(100.0, (total_area / threshold_area) * 100)
|
|
443
|
+
|
|
444
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
445
|
+
if intensity_pct >= 30:
|
|
446
|
+
level = "critical"
|
|
447
|
+
self._ascending_alert_list.append(level)
|
|
448
|
+
elif intensity_pct >= 13:
|
|
449
|
+
level = "significant"
|
|
450
|
+
self._ascending_alert_list.append(level)
|
|
451
|
+
elif intensity_pct >= 3:
|
|
452
|
+
level = "medium"
|
|
453
|
+
self._ascending_alert_list.append(level)
|
|
454
|
+
else:
|
|
455
|
+
level = "low"
|
|
456
|
+
self._ascending_alert_list.append(level)
|
|
457
|
+
else:
|
|
458
|
+
if intensity_pct > 29:
|
|
459
|
+
level = "critical"
|
|
460
|
+
intensity = 10.0
|
|
461
|
+
self._ascending_alert_list.append(level)
|
|
462
|
+
elif intensity_pct > 12:
|
|
463
|
+
level = "significant"
|
|
464
|
+
intensity = 9.0
|
|
465
|
+
self._ascending_alert_list.append(level)
|
|
466
|
+
elif intensity_pct > 2:
|
|
467
|
+
level = "medium"
|
|
468
|
+
intensity = 7.0
|
|
469
|
+
self._ascending_alert_list.append(level)
|
|
470
|
+
else:
|
|
471
|
+
level = "low"
|
|
472
|
+
intensity = min(10.0, intensity_pct / 3.0)
|
|
473
|
+
self._ascending_alert_list.append(level)
|
|
474
|
+
|
|
475
|
+
# Generate human text in new format
|
|
476
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
477
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
|
478
|
+
human_text = "\n".join(human_text_lines)
|
|
479
|
+
|
|
480
|
+
# Pass the last severity level **value** instead of a single-element list
|
|
481
|
+
last_level = level if level else self._ascending_alert_list[-1]
|
|
482
|
+
rank_ids, incident_id = self._get_alert_incident_ids(last_level, stream_info)
|
|
483
|
+
if rank_ids not in [1,2,3,4,5]:
|
|
484
|
+
incident_id = 1
|
|
485
|
+
if len(self.id_timing_list)>0 and len(self.id_timing_list)==rank_ids:
|
|
486
|
+
start_timestamp = self.id_timing_list[-1]
|
|
487
|
+
if len(self.id_timing_list)>0 and len(self.id_timing_list)>4 and level=='critical':
|
|
488
|
+
start_timestamp = self.id_timing_list[-1]
|
|
489
|
+
|
|
490
|
+
alert_settings=[]
|
|
491
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
492
|
+
alert_settings.append({
|
|
493
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
494
|
+
"incident_category": self.CASE_TYPE,
|
|
495
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
496
|
+
"ascending": True,
|
|
497
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
498
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
499
|
+
}
|
|
500
|
+
})
|
|
501
|
+
|
|
502
|
+
event= self.create_incident(incident_id='incident_'+self.CASE_TYPE+'_'+str(incident_id), incident_type=self.CASE_TYPE,
|
|
503
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
504
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
|
505
|
+
level_settings= {"low": 3, "medium": 5, "significant":15, "critical": 30})
|
|
506
|
+
event['duration'] = self.get_duration_seconds(start_timestamp, self.current_incident_end_timestamp)
|
|
507
|
+
incidents.append(event)
|
|
508
|
+
|
|
509
|
+
else:
|
|
510
|
+
#self._ascending_alert_list.append(level)
|
|
511
|
+
incidents.append({})
|
|
512
|
+
return incidents
|
|
513
|
+
|
|
514
|
+
def _generate_tracking_stats(
|
|
515
|
+
self,
|
|
516
|
+
summary: Dict,
|
|
517
|
+
alerts: List,
|
|
518
|
+
config: FireSmokeConfig,
|
|
519
|
+
frame_number: Optional[int] = None,
|
|
520
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
521
|
+
) -> Dict:
|
|
522
|
+
"""Generate structured tracking stats for fire and smoke detection with frame-based keys."""
|
|
523
|
+
|
|
524
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
525
|
+
tracking_stats = []
|
|
526
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
527
|
+
|
|
528
|
+
total = summary.get("total_objects", 0)
|
|
529
|
+
by_category = summary.get("by_category", {})
|
|
530
|
+
detections = summary.get("detections", [])
|
|
531
|
+
|
|
532
|
+
total_fire = by_category.get("fire", 0)
|
|
533
|
+
total_smoke = by_category.get("smoke", 0)
|
|
534
|
+
|
|
535
|
+
# Maintain rolling detection history
|
|
536
|
+
if frame_number is not None:
|
|
537
|
+
self._fire_smoke_recent_history.append({
|
|
538
|
+
"frame": frame_number,
|
|
539
|
+
"fire": total_fire,
|
|
540
|
+
"smoke": total_smoke,
|
|
541
|
+
})
|
|
542
|
+
if len(self._fire_smoke_recent_history) > 150:
|
|
543
|
+
self._fire_smoke_recent_history.pop(0)
|
|
544
|
+
|
|
545
|
+
# Generate human-readable tracking text (people-style format)
|
|
546
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
547
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
548
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
|
549
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
550
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
# Build total_counts array in expected format
|
|
554
|
+
total_counts = []
|
|
555
|
+
if total > 0:
|
|
556
|
+
total_counts.append({
|
|
557
|
+
"category": 'Fire/Smoke', #TODO: Discuss and fix what to do with this
|
|
558
|
+
"count": 1
|
|
559
|
+
})
|
|
560
|
+
|
|
561
|
+
# Build current_counts array in expected format
|
|
562
|
+
current_counts = []
|
|
563
|
+
if total > 0: # Include even if 0 when there are detections
|
|
564
|
+
current_counts.append({
|
|
565
|
+
"category": 'Fire/Smoke', #TODO: Discuss and fix what to do with this
|
|
566
|
+
"count": 1
|
|
567
|
+
})
|
|
568
|
+
|
|
569
|
+
human_lines = [f"CURRENT FRAME @ {current_timestamp}:"]
|
|
570
|
+
if total_fire > 0:
|
|
571
|
+
human_lines.append(f"\t- Fire regions detected: {total_fire}")
|
|
572
|
+
if total_smoke > 0:
|
|
573
|
+
human_lines.append(f"\t- Smoke clouds detected: {total_smoke}")
|
|
574
|
+
if total_fire == 0 and total_smoke == 0:
|
|
575
|
+
human_lines.append(f"\t- No fire or smoke detected")
|
|
576
|
+
|
|
577
|
+
human_lines.append("")
|
|
578
|
+
human_lines.append(f"ALERTS SINCE @ {start_timestamp}:")
|
|
579
|
+
|
|
580
|
+
recent_fire_detected = any(entry.get("fire", 0) > 0 for entry in self._fire_smoke_recent_history)
|
|
581
|
+
recent_smoke_detected = any(entry.get("smoke", 0) > 0 for entry in self._fire_smoke_recent_history)
|
|
582
|
+
|
|
583
|
+
if recent_fire_detected:
|
|
584
|
+
human_lines.append(f"\t- Fire alert")
|
|
585
|
+
if recent_smoke_detected:
|
|
586
|
+
human_lines.append(f"\t- Smoke alert")
|
|
587
|
+
if not recent_fire_detected and not recent_smoke_detected:
|
|
588
|
+
human_lines.append(f"\t- No fire or smoke detected in recent frames")
|
|
589
|
+
|
|
590
|
+
human_text = "\n".join(human_lines)
|
|
591
|
+
|
|
592
|
+
# Prepare detections without confidence scores (as per eg.json)
|
|
593
|
+
detections = []
|
|
594
|
+
for detection in summary.get("detections", []):
|
|
595
|
+
bbox = detection.get("bounding_box", {})
|
|
596
|
+
category = detection.get("category", "Fire/Smoke")
|
|
597
|
+
# Include segmentation if available (like in eg.json)
|
|
598
|
+
if detection.get("masks"):
|
|
599
|
+
segmentation= detection.get("masks", [])
|
|
600
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
601
|
+
elif detection.get("segmentation"):
|
|
602
|
+
segmentation= detection.get("segmentation")
|
|
603
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
604
|
+
elif detection.get("mask"):
|
|
605
|
+
segmentation= detection.get("mask")
|
|
606
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
607
|
+
else:
|
|
608
|
+
detection_obj = self.create_detection_object(category, bbox)
|
|
609
|
+
detections.append(detection_obj)
|
|
610
|
+
|
|
611
|
+
# Build alert_settings array in expected format
|
|
612
|
+
alert_settings = []
|
|
613
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
614
|
+
alert_settings.append({
|
|
615
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
616
|
+
"incident_category": self.CASE_TYPE,
|
|
617
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
618
|
+
"ascending": True,
|
|
619
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
620
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
621
|
+
}
|
|
622
|
+
})
|
|
623
|
+
|
|
624
|
+
reset_settings=[
|
|
625
|
+
{
|
|
626
|
+
"interval_type": "daily",
|
|
627
|
+
"reset_time": {
|
|
628
|
+
"value": 9,
|
|
629
|
+
"time_unit": "hour"
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
]
|
|
633
|
+
|
|
634
|
+
tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
|
|
635
|
+
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
636
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
|
637
|
+
reset_time=high_precision_reset_timestamp)
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
tracking_stats.append(tracking_stat)
|
|
641
|
+
|
|
642
|
+
if len(self.id_hit_list)==1:
|
|
643
|
+
last_ending_id, incident_id = self._get_alert_incident_ids("",stream_info)
|
|
644
|
+
|
|
645
|
+
if len(self.id_timing_list)>0 and len(self.id_timing_list)>=5:
|
|
646
|
+
start_timestamp = self.id_timing_list[-1]
|
|
647
|
+
if incident_id==self.return_id_counter:
|
|
648
|
+
incident_id = incident_id-1
|
|
649
|
+
if self.return_id_counter > incident_id:
|
|
650
|
+
incident_id = self.return_id_counter-incident_id
|
|
651
|
+
if last_ending_id==5:
|
|
652
|
+
alert_serial = getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default']
|
|
653
|
+
alert_serial = alert_serial[0]
|
|
654
|
+
alerts=[{
|
|
655
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
656
|
+
"alert_id": "alert_"+'Event_Ended'+'_'+alert_serial+'_'+str(incident_id),
|
|
657
|
+
"incident_category": self.CASE_TYPE,
|
|
658
|
+
"threshold_level": 0,
|
|
659
|
+
"ascending": False,
|
|
660
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
661
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
662
|
+
}
|
|
663
|
+
}]
|
|
664
|
+
tracking_stats.append(alerts)
|
|
665
|
+
tracking_stats[0]['alerts']=alerts
|
|
666
|
+
tracking_stats.append(self.create_incident(incident_id='incident_'+self.CASE_TYPE+'_'+str(incident_id), incident_type=self.CASE_TYPE,
|
|
667
|
+
severity_level='info', human_text='Event Over', camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
668
|
+
start_time=start_timestamp, end_time='Incident still active',
|
|
669
|
+
level_settings= {"low": 3, "medium": 5, "significant":15, "critical": 30}))
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
return tracking_stats
|
|
673
|
+
|
|
674
|
+
def _generate_summary(
|
|
675
|
+
self, summary: dict, general_summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List
|
|
676
|
+
) -> List[str]:
|
|
677
|
+
"""
|
|
678
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
|
679
|
+
"""
|
|
680
|
+
lines = []
|
|
681
|
+
lines.append("Application Name: "+self.CASE_TYPE)
|
|
682
|
+
lines.append("Application Version: "+self.CASE_VERSION)
|
|
683
|
+
if len(incidents) > 0:
|
|
684
|
+
lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
|
|
685
|
+
if len(tracking_stats) > 0:
|
|
686
|
+
lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
|
|
687
|
+
if len(business_analytics) > 0:
|
|
688
|
+
lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
|
|
689
|
+
|
|
690
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
691
|
+
lines.append("Summary: "+"No Summary Data")
|
|
692
|
+
|
|
693
|
+
return ["\n".join(lines)]
|
|
694
|
+
|
|
695
|
+
def _calculate_fire_smoke_summary(
|
|
696
|
+
self, data: Any, config: FireSmokeConfig
|
|
697
|
+
) -> Dict[str, Any]:
|
|
698
|
+
"""Calculate summary for fire and smoke detections."""
|
|
699
|
+
if isinstance(data, list):
|
|
700
|
+
# Normalize the categories to lowercase for matching
|
|
701
|
+
valid_categories = [cat.lower() for cat in config.fire_smoke_categories]
|
|
702
|
+
|
|
703
|
+
detections = [
|
|
704
|
+
det for det in data
|
|
705
|
+
if det.get("category", "").lower() in valid_categories
|
|
706
|
+
]
|
|
707
|
+
counts = {}
|
|
708
|
+
for det in detections:
|
|
709
|
+
cat = det.get('category', 'unknown').lower()
|
|
710
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
summary = {
|
|
714
|
+
"total_objects": len(detections),
|
|
715
|
+
"by_category": {},
|
|
716
|
+
"detections": detections,
|
|
717
|
+
"per_category_count": counts,
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
# Count by each category defined in config
|
|
721
|
+
for category in config.fire_smoke_categories:
|
|
722
|
+
count = len([
|
|
723
|
+
det for det in detections
|
|
724
|
+
if det.get("category", "").lower() == category.lower()
|
|
725
|
+
])
|
|
726
|
+
summary["by_category"][category] = count
|
|
727
|
+
|
|
728
|
+
return summary
|
|
729
|
+
|
|
730
|
+
return {"total_objects": 0, "by_category": {}, "detections": []}
|
|
731
|
+
|
|
732
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts:Any, config: FireSmokeConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
|
733
|
+
"""Generate standardized business analytics for the agg_summary structure."""
|
|
734
|
+
if is_empty:
|
|
735
|
+
return []
|
|
736
|
+
|
|
737
|
+
#-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
|
|
738
|
+
#camera_info = self.get_camera_info_from_stream(stream_info)
|
|
739
|
+
# business_analytics = self.create_business_analytics(nalysis_name, statistics,
|
|
740
|
+
# human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
741
|
+
# reset_settings)
|
|
742
|
+
# return business_analytics
|
|
743
|
+
|
|
744
|
+
def _calculate_metrics(
|
|
745
|
+
self,
|
|
746
|
+
summary: Dict,
|
|
747
|
+
config: FireSmokeConfig,
|
|
748
|
+
context: ProcessingContext,
|
|
749
|
+
) -> Dict[str, Any]:
|
|
750
|
+
"""Calculate detailed metrics for fire and smoke analytics."""
|
|
751
|
+
|
|
752
|
+
total = summary.get("total_objects", 0)
|
|
753
|
+
by_category = summary.get("by_category", {})
|
|
754
|
+
detections = summary.get("detections", [])
|
|
755
|
+
|
|
756
|
+
total_fire = by_category.get("fire", 0)
|
|
757
|
+
total_smoke = by_category.get("smoke", 0)
|
|
758
|
+
|
|
759
|
+
metrics = {
|
|
760
|
+
"total_detections": total,
|
|
761
|
+
"total_fire": total_fire,
|
|
762
|
+
"total_smoke": total_smoke,
|
|
763
|
+
"processing_time": context.processing_time or 0.0,
|
|
764
|
+
"confidence_threshold": config.confidence_threshold,
|
|
765
|
+
"intensity_percentage": 0.0,
|
|
766
|
+
"hazard_level": "unknown",
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
# Calculate total bbox area
|
|
770
|
+
total_area = 0.0
|
|
771
|
+
for det in detections:
|
|
772
|
+
bbox = det.get("bounding_box") or det.get("bbox")
|
|
773
|
+
if bbox:
|
|
774
|
+
xmin = bbox.get("xmin")
|
|
775
|
+
ymin = bbox.get("ymin")
|
|
776
|
+
xmax = bbox.get("xmax")
|
|
777
|
+
ymax = bbox.get("ymax")
|
|
778
|
+
if None not in (xmin, ymin, xmax, ymax):
|
|
779
|
+
width = xmax - xmin
|
|
780
|
+
height = ymax - ymin
|
|
781
|
+
if width > 0 and height > 0:
|
|
782
|
+
total_area += width * height
|
|
783
|
+
|
|
784
|
+
threshold_area = 250200.0 # Same threshold as insights/alerts
|
|
785
|
+
|
|
786
|
+
intensity_pct = min(100.0, (total_area / threshold_area) * 100)
|
|
787
|
+
metrics["intensity_percentage"] = intensity_pct
|
|
788
|
+
|
|
789
|
+
if intensity_pct < 20:
|
|
790
|
+
metrics["hazard_level"] = "low"
|
|
791
|
+
elif intensity_pct < 50:
|
|
792
|
+
metrics["hazard_level"] = "moderate"
|
|
793
|
+
elif intensity_pct < 80:
|
|
794
|
+
metrics["hazard_level"] = "high"
|
|
795
|
+
else:
|
|
796
|
+
metrics["hazard_level"] = "critical"
|
|
797
|
+
|
|
798
|
+
return metrics
|
|
799
|
+
|
|
800
|
+
def _extract_predictions(
|
|
801
|
+
self, data: Any, config: FireSmokeConfig
|
|
802
|
+
) -> List[Dict[str, Any]]:
|
|
803
|
+
"""Extract predictions from processed data for API compatibility."""
|
|
804
|
+
predictions = []
|
|
805
|
+
|
|
806
|
+
try:
|
|
807
|
+
if isinstance(data, list):
|
|
808
|
+
for item in data:
|
|
809
|
+
if isinstance(item, dict):
|
|
810
|
+
prediction = {
|
|
811
|
+
"category": item.get("category", item.get("class", "unknown")),
|
|
812
|
+
"confidence": item.get("confidence", item.get("score", 0.0)),
|
|
813
|
+
"bounding_box": item.get("bounding_box", item.get("bbox", {})),
|
|
814
|
+
}
|
|
815
|
+
predictions.append(prediction)
|
|
816
|
+
|
|
817
|
+
except Exception as e:
|
|
818
|
+
self.logger.warning(f"Failed to extract predictions: {str(e)}")
|
|
819
|
+
|
|
820
|
+
return predictions
|
|
821
|
+
|
|
822
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
|
823
|
+
"""Get configuration schema for fire and smoke detection."""
|
|
824
|
+
return {
|
|
825
|
+
"type": "object",
|
|
826
|
+
"properties": {
|
|
827
|
+
"confidence_threshold": {
|
|
828
|
+
"type": "number",
|
|
829
|
+
"minimum": 0.0,
|
|
830
|
+
"maximum": 1.0,
|
|
831
|
+
"default": 0.5,
|
|
832
|
+
"description": "Minimum confidence threshold for detections",
|
|
833
|
+
},
|
|
834
|
+
"fire_smoke_categories": {
|
|
835
|
+
"type": "array",
|
|
836
|
+
"items": {"type": "string"},
|
|
837
|
+
"default": ["fire", "smoke"],
|
|
838
|
+
"description": "Category names that represent fire and smoke",
|
|
839
|
+
},
|
|
840
|
+
"index_to_category": {
|
|
841
|
+
"type": "object",
|
|
842
|
+
"additionalProperties": {"type": "string"},
|
|
843
|
+
"description": "Mapping from category indices to names",
|
|
844
|
+
},
|
|
845
|
+
"alert_config": {
|
|
846
|
+
"type": "object",
|
|
847
|
+
"properties": {
|
|
848
|
+
"count_thresholds": {
|
|
849
|
+
"type": "object",
|
|
850
|
+
"additionalProperties": {"type": "integer", "minimum": 1},
|
|
851
|
+
"description": "Count thresholds for alerts",
|
|
852
|
+
}
|
|
853
|
+
},
|
|
854
|
+
},
|
|
855
|
+
},
|
|
856
|
+
"required": ["confidence_threshold"],
|
|
857
|
+
"additionalProperties": False,
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
def create_default_config(self, **overrides) -> FireSmokeConfig:
|
|
861
|
+
"""Create default configuration with optional overrides."""
|
|
862
|
+
defaults = {
|
|
863
|
+
"category": self.category,
|
|
864
|
+
"usecase": self.name,
|
|
865
|
+
"confidence_threshold": 0.3,
|
|
866
|
+
"fire_smoke_categories": ["fire", "smoke"],
|
|
867
|
+
}
|
|
868
|
+
defaults.update(overrides)
|
|
869
|
+
return FireSmokeConfig(**defaults)
|
|
870
|
+
|
|
871
|
+
def _count_unique_tracks(self, summary: Dict) -> Optional[int]:
|
|
872
|
+
"""Count unique track IDs from detections, if tracking info exists."""
|
|
873
|
+
detections = summary.get("detections", [])
|
|
874
|
+
if not detections:
|
|
875
|
+
return None
|
|
876
|
+
|
|
877
|
+
unique_tracks = set()
|
|
878
|
+
for detection in detections:
|
|
879
|
+
track_id = detection.get("track_id")
|
|
880
|
+
if track_id is not None:
|
|
881
|
+
unique_tracks.add(track_id)
|
|
882
|
+
|
|
883
|
+
return len(unique_tracks) if unique_tracks else None
|
|
884
|
+
|
|
885
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
886
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
887
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
888
|
+
|
|
889
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
890
|
+
hours = int(timestamp // 3600)
|
|
891
|
+
minutes = int((timestamp % 3600) // 60)
|
|
892
|
+
seconds = round(float(timestamp % 60), 2)
|
|
893
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
894
|
+
|
|
895
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
896
|
+
"""Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
|
|
897
|
+
|
|
898
|
+
The input can be either:
|
|
899
|
+
1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
|
|
900
|
+
2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
|
|
901
|
+
|
|
902
|
+
The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
|
|
903
|
+
|
|
904
|
+
Example
|
|
905
|
+
-------
|
|
906
|
+
>>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
|
|
907
|
+
'2025:10:27 19:31:20'
|
|
908
|
+
"""
|
|
909
|
+
|
|
910
|
+
# Convert numeric timestamps to datetime first
|
|
911
|
+
if isinstance(timestamp, (int, float)):
|
|
912
|
+
dt = datetime.fromtimestamp(timestamp, timezone.utc)
|
|
913
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
914
|
+
|
|
915
|
+
# Ensure we are working with a string from here on
|
|
916
|
+
if not isinstance(timestamp, str):
|
|
917
|
+
return str(timestamp)
|
|
918
|
+
|
|
919
|
+
# Remove ' UTC' suffix if present
|
|
920
|
+
timestamp_clean = timestamp.replace(' UTC', '').strip()
|
|
921
|
+
|
|
922
|
+
# Remove milliseconds if present (everything after the last dot)
|
|
923
|
+
if '.' in timestamp_clean:
|
|
924
|
+
timestamp_clean = timestamp_clean.split('.')[0]
|
|
925
|
+
|
|
926
|
+
# Parse the timestamp string and convert to desired format
|
|
927
|
+
try:
|
|
928
|
+
# Handle format: YYYY-MM-DD-HH:MM:SS
|
|
929
|
+
if timestamp_clean.count('-') >= 2:
|
|
930
|
+
# Replace first two dashes with colons for date part, third with space
|
|
931
|
+
parts = timestamp_clean.split('-')
|
|
932
|
+
if len(parts) >= 4:
|
|
933
|
+
# parts = ['2025', '10', '27', '19:31:20']
|
|
934
|
+
formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
|
|
935
|
+
return formatted
|
|
936
|
+
except Exception:
|
|
937
|
+
pass
|
|
938
|
+
|
|
939
|
+
# If parsing fails, return the cleaned string as-is
|
|
940
|
+
return timestamp_clean
|
|
941
|
+
|
|
942
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
943
|
+
"""Get formatted current timestamp based on stream type."""
|
|
944
|
+
|
|
945
|
+
if not stream_info:
|
|
946
|
+
return "00:00:00.00"
|
|
947
|
+
if precision:
|
|
948
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
949
|
+
if frame_id:
|
|
950
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
951
|
+
else:
|
|
952
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
953
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
954
|
+
|
|
955
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
956
|
+
else:
|
|
957
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
958
|
+
|
|
959
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
960
|
+
if frame_id:
|
|
961
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
962
|
+
else:
|
|
963
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
964
|
+
|
|
965
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
966
|
+
|
|
967
|
+
|
|
968
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
969
|
+
else:
|
|
970
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
971
|
+
if stream_time_str:
|
|
972
|
+
try:
|
|
973
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
974
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
975
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
976
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
977
|
+
except:
|
|
978
|
+
return self._format_timestamp_for_stream(time.time())
|
|
979
|
+
else:
|
|
980
|
+
return self._format_timestamp_for_stream(time.time())
|
|
981
|
+
|
|
982
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
983
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
984
|
+
if not stream_info:
|
|
985
|
+
return "00:00:00"
|
|
986
|
+
|
|
987
|
+
if precision:
|
|
988
|
+
if self.start_timer is None:
|
|
989
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
990
|
+
if not candidate or candidate == "NA":
|
|
991
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
992
|
+
self.start_timer = candidate
|
|
993
|
+
return self._format_timestamp(self.start_timer)
|
|
994
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
995
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
996
|
+
if not candidate or candidate == "NA":
|
|
997
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
998
|
+
self.start_timer = candidate
|
|
999
|
+
return self._format_timestamp(self.start_timer)
|
|
1000
|
+
else:
|
|
1001
|
+
return self._format_timestamp(self.start_timer)
|
|
1002
|
+
|
|
1003
|
+
if self.start_timer is None:
|
|
1004
|
+
# Prefer direct input_settings.stream_time if available and not NA
|
|
1005
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1006
|
+
if not candidate or candidate == "NA":
|
|
1007
|
+
# Fallback to nested stream_info.stream_time used by current timestamp path
|
|
1008
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1009
|
+
if stream_time_str:
|
|
1010
|
+
try:
|
|
1011
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1012
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1013
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1014
|
+
candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1015
|
+
except:
|
|
1016
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1017
|
+
else:
|
|
1018
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1019
|
+
self.start_timer = candidate
|
|
1020
|
+
return self._format_timestamp(self.start_timer)
|
|
1021
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1022
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1023
|
+
if not candidate or candidate == "NA":
|
|
1024
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1025
|
+
if stream_time_str:
|
|
1026
|
+
try:
|
|
1027
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1028
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1029
|
+
ts = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1030
|
+
candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1031
|
+
except:
|
|
1032
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1033
|
+
else:
|
|
1034
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1035
|
+
self.start_timer = candidate
|
|
1036
|
+
return self._format_timestamp(self.start_timer)
|
|
1037
|
+
|
|
1038
|
+
else:
|
|
1039
|
+
if self.start_timer is not None and self.start_timer != "NA":
|
|
1040
|
+
return self._format_timestamp(self.start_timer)
|
|
1041
|
+
|
|
1042
|
+
if self._tracking_start_time is None:
|
|
1043
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1044
|
+
if stream_time_str:
|
|
1045
|
+
try:
|
|
1046
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1047
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1048
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1049
|
+
except:
|
|
1050
|
+
self._tracking_start_time = time.time()
|
|
1051
|
+
else:
|
|
1052
|
+
self._tracking_start_time = time.time()
|
|
1053
|
+
|
|
1054
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
1055
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
1056
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1057
|
+
|
|
1058
|
+
def get_duration_seconds(self, start_time, end_time):
|
|
1059
|
+
def parse_relative_time(t):
|
|
1060
|
+
"""Parse HH:MM:SS(.f) manually into timedelta"""
|
|
1061
|
+
try:
|
|
1062
|
+
parts = t.strip().split(":")
|
|
1063
|
+
if len(parts) != 3:
|
|
1064
|
+
return None
|
|
1065
|
+
hours = int(parts[0])
|
|
1066
|
+
minutes = int(parts[1])
|
|
1067
|
+
seconds = float(parts[2]) # works for 7.4
|
|
1068
|
+
return timedelta(hours=hours, minutes=minutes, seconds=seconds)
|
|
1069
|
+
except:
|
|
1070
|
+
return None
|
|
1071
|
+
|
|
1072
|
+
def parse_time(t):
|
|
1073
|
+
# Check for HH:MM:SS(.ms) format
|
|
1074
|
+
if re.match(r'^\d{1,2}:\d{2}:\d{1,2}(\.\d+)?$', t):
|
|
1075
|
+
return parse_relative_time(t)
|
|
1076
|
+
|
|
1077
|
+
# Check for full UTC format like 2025-08-01-14:23:45.123456 UTC
|
|
1078
|
+
if "UTC" in t:
|
|
1079
|
+
try:
|
|
1080
|
+
return datetime.strptime(t, "%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1081
|
+
except ValueError:
|
|
1082
|
+
return None
|
|
1083
|
+
|
|
1084
|
+
return None
|
|
1085
|
+
|
|
1086
|
+
start_dt = parse_time(start_time)
|
|
1087
|
+
end_dt = parse_time(end_time)
|
|
1088
|
+
|
|
1089
|
+
# Return None if invalid
|
|
1090
|
+
if start_dt is None or end_dt is None:
|
|
1091
|
+
return 'N/A'
|
|
1092
|
+
|
|
1093
|
+
# If timedelta (relative time), subtract directly
|
|
1094
|
+
if isinstance(start_dt, timedelta) and isinstance(end_dt, timedelta):
|
|
1095
|
+
delta = end_dt - start_dt
|
|
1096
|
+
elif isinstance(start_dt, datetime) and isinstance(end_dt, datetime):
|
|
1097
|
+
delta = end_dt - start_dt
|
|
1098
|
+
else:
|
|
1099
|
+
return None
|
|
1100
|
+
|
|
1101
|
+
return delta.total_seconds()
|
|
1102
|
+
|
|
1103
|
+
def _get_alert_incident_ids(self, sev_level, stream_info: Optional[Dict[str, Any]] = None):
|
|
1104
|
+
|
|
1105
|
+
if sev_level!="":
|
|
1106
|
+
if sev_level==self.id_hit_list[0] and len(self.id_hit_list)>=2:
|
|
1107
|
+
self.id_hit_counter+=1
|
|
1108
|
+
if self.id_hit_counter>7:
|
|
1109
|
+
self.latest_stack = self.id_hit_list[0]
|
|
1110
|
+
self.id_hit_list.pop(0)
|
|
1111
|
+
self.id_hit_counter=0
|
|
1112
|
+
self.id_timing_list.append(self._get_current_timestamp_str(stream_info))
|
|
1113
|
+
return (5-len(self.id_hit_list),self.return_id_counter)
|
|
1114
|
+
|
|
1115
|
+
elif self.id_hit_counter>0:
|
|
1116
|
+
self.id_hit_counter-=1
|
|
1117
|
+
elif self.id_hit_counter<0:
|
|
1118
|
+
self.id_hit_counter=0
|
|
1119
|
+
|
|
1120
|
+
if len(self.id_hit_list) > 1:
|
|
1121
|
+
if sev_level==self.latest_stack:
|
|
1122
|
+
return (5-len(self.id_hit_list),self.return_id_counter)
|
|
1123
|
+
else:
|
|
1124
|
+
return (0,0)
|
|
1125
|
+
else:
|
|
1126
|
+
if len(self.id_hit_list)==1:
|
|
1127
|
+
self.id_hit_counter+=1
|
|
1128
|
+
if self.id_hit_counter>130:
|
|
1129
|
+
self.id_hit_list = ["low","medium","significant","critical","low"]
|
|
1130
|
+
pre_return_id = self.return_id_counter
|
|
1131
|
+
self.return_id_counter+=1
|
|
1132
|
+
self.id_hit_counter = 0
|
|
1133
|
+
self.latest_stack = None
|
|
1134
|
+
self.id_timing_list.append(self._get_current_timestamp_str(stream_info))
|
|
1135
|
+
return (int(5),pre_return_id)
|
|
1136
|
+
if sev_level==self.latest_stack:
|
|
1137
|
+
return (5-len(self.id_hit_list),self.return_id_counter)
|
|
1138
|
+
else:
|
|
1139
|
+
return (0,0)
|
|
1140
|
+
elif self.id_hit_counter>0:
|
|
1141
|
+
self.id_hit_counter-=1
|
|
1142
|
+
elif self.id_hit_counter<0:
|
|
1143
|
+
self.id_hit_counter=0
|
|
1144
|
+
return (1,1)
|
|
1145
|
+
|
|
1146
|
+
|