matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,836 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
import tempfile
|
|
5
|
+
import os
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
import time
|
|
10
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
|
11
|
+
from ..utils import (
|
|
12
|
+
filter_by_confidence,
|
|
13
|
+
filter_by_categories,
|
|
14
|
+
apply_category_mapping,
|
|
15
|
+
match_results_structure,
|
|
16
|
+
bbox_smoothing,
|
|
17
|
+
BBoxSmoothingConfig,
|
|
18
|
+
BBoxSmoothingTracker
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class FlareAnalysisConfig(BaseConfig):
|
|
26
|
+
"""Configuration for flare analysis use case."""
|
|
27
|
+
confidence_threshold: float = 0.5
|
|
28
|
+
top_k_colors: int = 3
|
|
29
|
+
frame_skip: int = 1
|
|
30
|
+
target_categories: List[str] = field(default_factory=lambda: ["BadFlare", "GoodFlare"])
|
|
31
|
+
fps: Optional[float] = None
|
|
32
|
+
bbox_format: str = "auto"
|
|
33
|
+
index_to_category: Dict[int, str] = field(default_factory=lambda: {0: 'BadFlare', 1: 'GoodFlare'})
|
|
34
|
+
alert_config: Optional[AlertConfig] = None
|
|
35
|
+
time_window_minutes: int = 60
|
|
36
|
+
enable_unique_counting: bool = True
|
|
37
|
+
enable_smoothing: bool = True
|
|
38
|
+
smoothing_algorithm: str = "observability"
|
|
39
|
+
smoothing_window_size: int = 20
|
|
40
|
+
smoothing_cooldown_frames: int = 5
|
|
41
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
42
|
+
|
|
43
|
+
def validate(self) -> List[str]:
|
|
44
|
+
errors = super().validate()
|
|
45
|
+
if self.confidence_threshold < 0 or self.confidence_threshold > 1:
|
|
46
|
+
errors.append("confidence_threshold must be between 0 and 1")
|
|
47
|
+
if self.top_k_colors <= 0:
|
|
48
|
+
errors.append("top_k_colors must be positive")
|
|
49
|
+
if self.frame_skip <= 0:
|
|
50
|
+
errors.append("frame_skip must be positive")
|
|
51
|
+
if self.bbox_format not in ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"]:
|
|
52
|
+
errors.append("bbox_format must be one of: auto, xmin_ymin_xmax_ymax, x_y_width_height")
|
|
53
|
+
if self.smoothing_window_size <= 0:
|
|
54
|
+
errors.append("smoothing_window_size must be positive")
|
|
55
|
+
if self.smoothing_cooldown_frames < 0:
|
|
56
|
+
errors.append("smoothing_cooldown_frames cannot be negative")
|
|
57
|
+
if self.smoothing_confidence_range_factor <= 0:
|
|
58
|
+
errors.append("smoothing_confidence_range_factor must be positive")
|
|
59
|
+
return errors
|
|
60
|
+
|
|
61
|
+
class FlareAnalysisUseCase(BaseProcessor):
|
|
62
|
+
"""Flare analysis processor for detecting and analyzing flare colors in video streams."""
|
|
63
|
+
|
|
64
|
+
CATEGORY_DISPLAY = {
|
|
65
|
+
"BadFlare": "BadFlare",
|
|
66
|
+
"GoodFlare": "GoodFlare"
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
def __init__(self):
|
|
70
|
+
super().__init__("flare_analysis")
|
|
71
|
+
self.category = "flare_detection"
|
|
72
|
+
self.CASE_TYPE: Optional[str] = 'flare_detection'
|
|
73
|
+
self.CASE_VERSION: Optional[str] = '1.2'
|
|
74
|
+
self.target_categories = ['BadFlare', 'GoodFlare']
|
|
75
|
+
self.tracker = None
|
|
76
|
+
self.smoothing_tracker = None
|
|
77
|
+
self._total_frame_counter = 0
|
|
78
|
+
self._global_frame_offset = 0
|
|
79
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
80
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
81
|
+
self._tracking_start_time = None
|
|
82
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
83
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
84
|
+
self._track_merge_iou_threshold: float = 0.05
|
|
85
|
+
self._track_merge_time_window: float = 7.0
|
|
86
|
+
self._ascending_alert_list: List[int] = []
|
|
87
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
88
|
+
|
|
89
|
+
def reset_tracker(self) -> None:
|
|
90
|
+
if self.tracker is not None:
|
|
91
|
+
self.tracker.reset()
|
|
92
|
+
self.logger.info("AdvancedTracker reset for new flare analysis session")
|
|
93
|
+
|
|
94
|
+
def reset_flare_tracking(self) -> None:
|
|
95
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
96
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
97
|
+
self._total_frame_counter = 0
|
|
98
|
+
self._global_frame_offset = 0
|
|
99
|
+
self._tracking_start_time = None
|
|
100
|
+
self._track_aliases = {}
|
|
101
|
+
self._canonical_tracks = {}
|
|
102
|
+
self._ascending_alert_list = []
|
|
103
|
+
self.current_incident_end_timestamp = "N/A"
|
|
104
|
+
self.logger.info("Flare tracking state reset")
|
|
105
|
+
|
|
106
|
+
def reset_all_tracking(self) -> None:
|
|
107
|
+
self.reset_tracker()
|
|
108
|
+
self.reset_flare_tracking()
|
|
109
|
+
self.logger.info("All flare tracking state reset")
|
|
110
|
+
|
|
111
|
+
@staticmethod
|
|
112
|
+
def _compute_iou(bbox1, bbox2) -> float:
|
|
113
|
+
if not bbox1 or not bbox2:
|
|
114
|
+
return 0.0
|
|
115
|
+
if "xmin" in bbox1:
|
|
116
|
+
x1_min, y1_min, x1_max, y1_max = bbox1["xmin"], bbox1["ymin"], bbox1["xmax"], bbox1["ymax"]
|
|
117
|
+
x2_min, y2_min, x2_max, y2_max = bbox2["xmin"], bbox2["ymin"], bbox2["xmax"], bbox2["ymax"]
|
|
118
|
+
else:
|
|
119
|
+
x1_min, y1_min = bbox1["x"], bbox1["y"]
|
|
120
|
+
x1_max, y1_max = x1_min + bbox1["width"], y1_min + bbox1["height"]
|
|
121
|
+
x2_min, y2_min = bbox2["x"], bbox2["y"]
|
|
122
|
+
x2_max, y2_max = x2_min + bbox2["width"], y2_min + bbox2["height"]
|
|
123
|
+
inter_x_min = max(x1_min, x2_min)
|
|
124
|
+
inter_y_min = max(y1_min, y2_min)
|
|
125
|
+
inter_x_max = min(x1_max, x2_max)
|
|
126
|
+
inter_y_max = min(y1_max, y2_max)
|
|
127
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
128
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
129
|
+
inter_area = inter_w * inter_h
|
|
130
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
131
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
132
|
+
union_area = area1 + area2 - inter_area
|
|
133
|
+
return inter_area / union_area if union_area > 0 else 0.0
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
137
|
+
if raw_id is None or bbox is None:
|
|
138
|
+
return raw_id
|
|
139
|
+
now = time.time()
|
|
140
|
+
if raw_id in self._track_aliases:
|
|
141
|
+
canonical_id = self._track_aliases[raw_id]
|
|
142
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
143
|
+
if track_info is not None:
|
|
144
|
+
track_info["last_bbox"] = bbox
|
|
145
|
+
track_info["last_update"] = now
|
|
146
|
+
track_info["raw_ids"].add(raw_id)
|
|
147
|
+
return canonical_id
|
|
148
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
149
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
150
|
+
continue
|
|
151
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
152
|
+
if iou >= self._track_merge_iou_threshold:
|
|
153
|
+
self._track_aliases[raw_id] = canonical_id
|
|
154
|
+
info["last_bbox"] = bbox
|
|
155
|
+
info["last_update"] = now
|
|
156
|
+
info["raw_ids"].add(raw_id)
|
|
157
|
+
return canonical_id
|
|
158
|
+
canonical_id = raw_id
|
|
159
|
+
self._track_aliases[raw_id] = canonical_id
|
|
160
|
+
self._canonical_tracks[canonical_id] = {
|
|
161
|
+
"last_bbox": bbox,
|
|
162
|
+
"last_update": now,
|
|
163
|
+
"raw_ids": {raw_id},
|
|
164
|
+
}
|
|
165
|
+
return canonical_id
|
|
166
|
+
|
|
167
|
+
def _update_tracking_state(self, detections: List[Dict]):
|
|
168
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
169
|
+
for det in detections:
|
|
170
|
+
cat = det.get("category")
|
|
171
|
+
raw_track_id = det.get("track_id")
|
|
172
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
173
|
+
continue
|
|
174
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
175
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
176
|
+
det["track_id"] = canonical_id
|
|
177
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
178
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
179
|
+
|
|
180
|
+
def get_total_counts(self):
|
|
181
|
+
return {cat: len(ids) for cat, ids in self._per_category_total_track_ids.items()}
|
|
182
|
+
|
|
183
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
184
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
185
|
+
hours = int(timestamp // 3600)
|
|
186
|
+
minutes = int((timestamp % 3600) // 60)
|
|
187
|
+
seconds = round(float(timestamp % 60),2)
|
|
188
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
189
|
+
|
|
190
|
+
def _format_timestamp_for_stream(self, timestamp: float, precision: bool = False) -> str:
|
|
191
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
192
|
+
return dt.strftime("%Y-%m-%d-%H:%M:%S.%f UTC" if precision else "%Y:%m:%d %H:%M:%S")
|
|
193
|
+
|
|
194
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
195
|
+
"""Get formatted current timestamp based on stream type."""
|
|
196
|
+
if not stream_info:
|
|
197
|
+
return "00:00:00.00"
|
|
198
|
+
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
|
199
|
+
if precision:
|
|
200
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
201
|
+
if frame_id:
|
|
202
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
203
|
+
else:
|
|
204
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
205
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
206
|
+
return stream_time_str
|
|
207
|
+
else:
|
|
208
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
209
|
+
|
|
210
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
211
|
+
if frame_id:
|
|
212
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
213
|
+
else:
|
|
214
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
215
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
216
|
+
return stream_time_str
|
|
217
|
+
else:
|
|
218
|
+
# For streams, use stream_time from stream_info
|
|
219
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
220
|
+
if stream_time_str:
|
|
221
|
+
# Parse the high precision timestamp string to get timestamp
|
|
222
|
+
try:
|
|
223
|
+
# Remove " UTC" suffix and parse
|
|
224
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
225
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
226
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
227
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
228
|
+
except:
|
|
229
|
+
# Fallback to current time if parsing fails
|
|
230
|
+
return self._format_timestamp_for_stream(time.time())
|
|
231
|
+
else:
|
|
232
|
+
return self._format_timestamp_for_stream(time.time())
|
|
233
|
+
|
|
234
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
235
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
236
|
+
if not stream_info:
|
|
237
|
+
return "00:00:00"
|
|
238
|
+
if precision:
|
|
239
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
240
|
+
return "00:00:00"
|
|
241
|
+
else:
|
|
242
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
243
|
+
|
|
244
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
245
|
+
# If video format, start from 00:00:00
|
|
246
|
+
return "00:00:00"
|
|
247
|
+
else:
|
|
248
|
+
# For streams, use tracking start time or current time with minutes/seconds reset
|
|
249
|
+
if self._tracking_start_time is None:
|
|
250
|
+
# Try to extract timestamp from stream_time string
|
|
251
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
252
|
+
if stream_time_str:
|
|
253
|
+
try:
|
|
254
|
+
# Remove " UTC" suffix and parse
|
|
255
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
256
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
257
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
258
|
+
except:
|
|
259
|
+
# Fallback to current time if parsing fails
|
|
260
|
+
self._tracking_start_time = time.time()
|
|
261
|
+
else:
|
|
262
|
+
self._tracking_start_time = time.time()
|
|
263
|
+
|
|
264
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
265
|
+
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
|
266
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
267
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
268
|
+
|
|
269
|
+
def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
|
|
270
|
+
frame_track_ids = set(det.get('track_id') for det in detections if det.get('track_id') is not None)
|
|
271
|
+
total_track_ids = set()
|
|
272
|
+
for s in self._per_category_total_track_ids.values():
|
|
273
|
+
total_track_ids.update(s)
|
|
274
|
+
return {
|
|
275
|
+
"total_count": len(total_track_ids),
|
|
276
|
+
"current_frame_count": len(frame_track_ids),
|
|
277
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
278
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
279
|
+
"last_update_time": time.time(),
|
|
280
|
+
"total_frames_processed": self._total_frame_counter
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
|
284
|
+
return {
|
|
285
|
+
"type": "object",
|
|
286
|
+
"properties": {
|
|
287
|
+
"confidence_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.5},
|
|
288
|
+
"top_k_colors": {"type": "integer", "minimum": 1, "default": 3},
|
|
289
|
+
"frame_skip": {"type": "integer", "minimum": 1, "default": 1},
|
|
290
|
+
"target_categories": {"type": ["array", "null"], "items": {"type": "string"}, "default": ["BadFlare", "GoodFlare"]},
|
|
291
|
+
"fps": {"type": ["number", "null"], "minimum": 1.0, "default": None},
|
|
292
|
+
"bbox_format": {"type": "string", "enum": ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"], "default": "auto"},
|
|
293
|
+
"index_to_category": {"type": ["object", "null"], "default": {0: 'BadFlare', 1: 'GoodFlare'}},
|
|
294
|
+
"alert_config": {"type": ["object", "null"], "default": None},
|
|
295
|
+
"time_window_minutes": {"type": "integer", "minimum": 1, "default": 60},
|
|
296
|
+
"enable_unique_counting": {"type": "boolean", "default": True},
|
|
297
|
+
"enable_smoothing": {"type": "boolean", "default": True},
|
|
298
|
+
"smoothing_algorithm": {"type": "string", "default": "observability"},
|
|
299
|
+
"smoothing_window_size": {"type": "integer", "minimum": 1, "default": 20},
|
|
300
|
+
"smoothing_cooldown_frames": {"type": "integer", "minimum": 0, "default": 5},
|
|
301
|
+
"smoothing_confidence_range_factor": {"type": "number", "minimum": 0, "default": 0.5}
|
|
302
|
+
},
|
|
303
|
+
"required": ["confidence_threshold", "top_k_colors"],
|
|
304
|
+
"additionalProperties": False
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
def create_default_config(self, **overrides) -> FlareAnalysisConfig:
|
|
308
|
+
defaults = {
|
|
309
|
+
"category": self.category,
|
|
310
|
+
"usecase": self.name,
|
|
311
|
+
"confidence_threshold": 0.5,
|
|
312
|
+
"top_k_colors": 3,
|
|
313
|
+
"frame_skip": 1,
|
|
314
|
+
"target_categories": ["BadFlare", "GoodFlare"],
|
|
315
|
+
"fps": None,
|
|
316
|
+
"bbox_format": "auto",
|
|
317
|
+
"index_to_category": {0: 'BadFlare', 1: 'GoodFlare'},
|
|
318
|
+
"alert_config": None,
|
|
319
|
+
"time_window_minutes": 60,
|
|
320
|
+
"enable_unique_counting": True,
|
|
321
|
+
"enable_smoothing": True,
|
|
322
|
+
"smoothing_algorithm": "observability",
|
|
323
|
+
"smoothing_window_size": 20,
|
|
324
|
+
"smoothing_cooldown_frames": 5,
|
|
325
|
+
"smoothing_confidence_range_factor": 0.5
|
|
326
|
+
}
|
|
327
|
+
defaults.update(overrides)
|
|
328
|
+
return FlareAnalysisConfig(**defaults)
|
|
329
|
+
|
|
330
|
+
def process(
|
|
331
|
+
self,
|
|
332
|
+
data: Any,
|
|
333
|
+
config: ConfigProtocol,
|
|
334
|
+
input_bytes: Optional[bytes] = None,
|
|
335
|
+
context: Optional[ProcessingContext] = None,
|
|
336
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
337
|
+
) -> ProcessingResult:
|
|
338
|
+
start_time = time.time()
|
|
339
|
+
try:
|
|
340
|
+
if not isinstance(config, FlareAnalysisConfig):
|
|
341
|
+
return self.create_error_result(
|
|
342
|
+
"Invalid configuration type for flare analysis",
|
|
343
|
+
usecase=self.name,
|
|
344
|
+
category=self.category,
|
|
345
|
+
context=context
|
|
346
|
+
)
|
|
347
|
+
if context is None:
|
|
348
|
+
context = ProcessingContext()
|
|
349
|
+
if not input_bytes:
|
|
350
|
+
return self.create_error_result(
|
|
351
|
+
"input_bytes (video/image) is required for flare analysis",
|
|
352
|
+
usecase=self.name,
|
|
353
|
+
category=self.category,
|
|
354
|
+
context=context
|
|
355
|
+
)
|
|
356
|
+
if not data:
|
|
357
|
+
return self.create_error_result(
|
|
358
|
+
"Detection data is required for flare analysis",
|
|
359
|
+
usecase=self.name,
|
|
360
|
+
category=self.category,
|
|
361
|
+
context=context
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
input_format = match_results_structure(data)
|
|
365
|
+
context.input_format = input_format
|
|
366
|
+
context.confidence_threshold = config.confidence_threshold
|
|
367
|
+
self.logger.info(f"Processing flare analysis with format: {input_format.value}")
|
|
368
|
+
|
|
369
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
370
|
+
if config.index_to_category:
|
|
371
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
372
|
+
flare_processed_data = filter_by_categories(processed_data.copy(), config.target_categories)
|
|
373
|
+
|
|
374
|
+
if config.enable_smoothing:
|
|
375
|
+
if self.smoothing_tracker is None:
|
|
376
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
377
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
378
|
+
window_size=config.smoothing_window_size,
|
|
379
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
380
|
+
confidence_threshold=config.confidence_threshold,
|
|
381
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
382
|
+
enable_smoothing=True
|
|
383
|
+
)
|
|
384
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
385
|
+
flare_processed_data = bbox_smoothing(flare_processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
386
|
+
|
|
387
|
+
try:
|
|
388
|
+
from ..advanced_tracker import AdvancedTracker
|
|
389
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
390
|
+
if self.tracker is None:
|
|
391
|
+
tracker_config = TrackerConfig()
|
|
392
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
393
|
+
self.logger.info("Initialized AdvancedTracker for flare analysis tracking")
|
|
394
|
+
flare_processed_data = self.tracker.update(flare_processed_data)
|
|
395
|
+
except Exception as e:
|
|
396
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
397
|
+
|
|
398
|
+
self._update_tracking_state(flare_processed_data)
|
|
399
|
+
self._total_frame_counter += 1
|
|
400
|
+
|
|
401
|
+
frame_number = None
|
|
402
|
+
if stream_info:
|
|
403
|
+
input_settings = stream_info.get("input_settings", {})
|
|
404
|
+
start_frame = input_settings.get("start_frame")
|
|
405
|
+
end_frame = input_settings.get("end_frame")
|
|
406
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
407
|
+
frame_number = start_frame
|
|
408
|
+
|
|
409
|
+
flare_analysis = self._analyze_flares_in_media(flare_processed_data, input_bytes, config)
|
|
410
|
+
counting_summary = self._count_categories(flare_analysis, config)
|
|
411
|
+
counting_summary['total_counts'] = self.get_total_counts()
|
|
412
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
413
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
|
414
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
|
415
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info)
|
|
416
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
417
|
+
|
|
418
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
419
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
420
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
421
|
+
agg_summary = {str(frame_number) if frame_number is not None else "current_frame": {
|
|
422
|
+
"incidents": incidents,
|
|
423
|
+
"tracking_stats": tracking_stats,
|
|
424
|
+
"business_analytics": business_analytics,
|
|
425
|
+
"alerts": alerts,
|
|
426
|
+
"human_text": summary_list[0] if summary_list else {}
|
|
427
|
+
}}
|
|
428
|
+
|
|
429
|
+
context.mark_completed()
|
|
430
|
+
result = self.create_result(
|
|
431
|
+
data={"agg_summary": agg_summary},
|
|
432
|
+
usecase=self.name,
|
|
433
|
+
category=self.category,
|
|
434
|
+
context=context
|
|
435
|
+
)
|
|
436
|
+
result.processing_time = context.processing_time or time.time() - start_time
|
|
437
|
+
self.logger.info(f"Flare analysis completed in {result.processing_time:.2f}s")
|
|
438
|
+
return result
|
|
439
|
+
except Exception as e:
|
|
440
|
+
self.logger.error(f"Flare analysis failed: {str(e)}", exc_info=True)
|
|
441
|
+
if context:
|
|
442
|
+
context.mark_completed()
|
|
443
|
+
return self.create_error_result(
|
|
444
|
+
str(e),
|
|
445
|
+
type(e).__name__,
|
|
446
|
+
usecase=self.name,
|
|
447
|
+
category=self.category,
|
|
448
|
+
context=context
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
def _is_video_bytes(self, media_bytes: bytes) -> bool:
|
|
452
|
+
video_signatures = [
|
|
453
|
+
b'\x00\x00\x00\x20ftypmp4', b'\x00\x00\x00\x18ftypmp4', b'RIFF', b'\x1aE\xdf\xa3', b'ftyp'
|
|
454
|
+
]
|
|
455
|
+
for signature in video_signatures:
|
|
456
|
+
if media_bytes.startswith(signature) or signature in media_bytes[:50]:
|
|
457
|
+
return True
|
|
458
|
+
return False
|
|
459
|
+
|
|
460
|
+
def _analyze_flares_in_media(self, data: Any, media_bytes: bytes, config: FlareAnalysisConfig) -> List[Dict[str, Any]]:
|
|
461
|
+
is_video = self._is_video_bytes(media_bytes)
|
|
462
|
+
return self._analyze_flares_in_video(data, media_bytes, config) if is_video else self._analyze_flares_in_image(data, media_bytes, config)
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
def _analyze_flares_in_image(self, data: Any, image_bytes: bytes, config: FlareAnalysisConfig) -> List[Dict[str, Any]]:
|
|
466
|
+
image_array = np.frombuffer(image_bytes, np.uint8)
|
|
467
|
+
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
|
468
|
+
if image is None:
|
|
469
|
+
raise RuntimeError("Failed to decode image from bytes")
|
|
470
|
+
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
471
|
+
flare_analysis = []
|
|
472
|
+
detections = self._get_frame_detections(data, "0")
|
|
473
|
+
for detection in detections:
|
|
474
|
+
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
475
|
+
continue
|
|
476
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
477
|
+
if not bbox:
|
|
478
|
+
continue
|
|
479
|
+
crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
|
|
480
|
+
if crop.size == 0:
|
|
481
|
+
continue
|
|
482
|
+
major_colors = [()] #extract_major_colors(crop, k=config.top_k_colors)
|
|
483
|
+
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
484
|
+
flare_record = {
|
|
485
|
+
"frame_id": "0",
|
|
486
|
+
"timestamp": 0.0,
|
|
487
|
+
"category": detection.get("category", "unknown"),
|
|
488
|
+
"confidence": round(detection.get("confidence", 0.0), 3),
|
|
489
|
+
"main_color": main_color,
|
|
490
|
+
"major_colors": major_colors,
|
|
491
|
+
"bounding_box": bbox,
|
|
492
|
+
"detection_id": detection.get("id", f"det_{len(flare_analysis)}"),
|
|
493
|
+
"track_id": detection.get("track_id")
|
|
494
|
+
}
|
|
495
|
+
flare_analysis.append(flare_record)
|
|
496
|
+
return flare_analysis
|
|
497
|
+
|
|
498
|
+
def _get_frame_detections(self, data: Any, frame_key: str) -> List[Dict[str, Any]]:
|
|
499
|
+
if isinstance(data, dict):
|
|
500
|
+
return data.get(frame_key, [])
|
|
501
|
+
elif isinstance(data, list):
|
|
502
|
+
return data
|
|
503
|
+
return []
|
|
504
|
+
|
|
505
|
+
def _crop_bbox(self, image: np.ndarray, bbox: Dict[str, Any], bbox_format: str) -> np.ndarray:
|
|
506
|
+
h, w = image.shape[:2]
|
|
507
|
+
if bbox_format == "auto":
|
|
508
|
+
bbox_format = "xmin_ymin_xmax_ymax" if "xmin" in bbox else "x_y_width_height"
|
|
509
|
+
if bbox_format == "xmin_ymin_xmax_ymax":
|
|
510
|
+
xmin = max(0, int(bbox["xmin"]))
|
|
511
|
+
ymin = max(0, int(bbox["ymin"]))
|
|
512
|
+
xmax = min(w, int(bbox["xmax"]))
|
|
513
|
+
ymax = min(h, int(bbox["ymax"]))
|
|
514
|
+
x_center = (xmin + xmax) / 2
|
|
515
|
+
x_offset = (xmax - xmin) / 4
|
|
516
|
+
y_center = (ymin + ymax) / 2
|
|
517
|
+
y_offset = (ymax - ymin) / 4
|
|
518
|
+
new_xmin = max(0, int(x_center - x_offset))
|
|
519
|
+
new_xmax = min(w, int(x_center + x_offset))
|
|
520
|
+
new_ymin = max(0, int(y_center - y_offset))
|
|
521
|
+
new_ymax = min(h, int(y_center + y_offset))
|
|
522
|
+
elif bbox_format == "x_y_width_height":
|
|
523
|
+
x = max(0, int(bbox["x"]))
|
|
524
|
+
y = max(0, int(bbox["y"]))
|
|
525
|
+
width = int(bbox["width"])
|
|
526
|
+
height = int(bbox["height"])
|
|
527
|
+
xmax = min(w, x + width)
|
|
528
|
+
ymax = min(h, y + height)
|
|
529
|
+
x_center = (x + xmax) / 2
|
|
530
|
+
x_offset = (xmax - x) / 4
|
|
531
|
+
y_center = (y + ymax) / 2
|
|
532
|
+
y_offset = (ymax - y) / 4
|
|
533
|
+
new_xmin = max(0, int(x_center - x_offset))
|
|
534
|
+
new_xmax = min(w, int(x_center + x_offset))
|
|
535
|
+
new_ymin = max(0, int(y_center - y_offset))
|
|
536
|
+
new_ymax = min(h, int(y_center + y_offset))
|
|
537
|
+
else:
|
|
538
|
+
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
539
|
+
return image[new_ymin:new_ymax, new_xmin:new_xmax]
|
|
540
|
+
|
|
541
|
+
def _count_categories(self, detections: List[Dict], config: FlareAnalysisConfig) -> Dict[str, Any]:
|
|
542
|
+
counts = {}
|
|
543
|
+
detections_list = []
|
|
544
|
+
category_colors = defaultdict(lambda: defaultdict(int))
|
|
545
|
+
for det in detections:
|
|
546
|
+
cat = det.get("category", "unknown")
|
|
547
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
548
|
+
main_color = det.get("main_color", "unknown")
|
|
549
|
+
category_colors[cat][main_color] += 1
|
|
550
|
+
detections_list.append({
|
|
551
|
+
"bounding_box": det.get("bounding_box"),
|
|
552
|
+
"category": cat,
|
|
553
|
+
"confidence": det.get("confidence"),
|
|
554
|
+
"track_id": det.get("track_id"),
|
|
555
|
+
"frame_id": det.get("frame_id"),
|
|
556
|
+
"main_color": main_color
|
|
557
|
+
})
|
|
558
|
+
return {
|
|
559
|
+
"total_count": sum(counts.values()),
|
|
560
|
+
"per_category_count": counts,
|
|
561
|
+
"detections": detections_list,
|
|
562
|
+
"color_distribution": {cat: dict(colors) for cat, colors in category_colors.items()}
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
def _check_alerts(self, summary: Dict, frame_number: Optional[int], config: FlareAnalysisConfig) -> List[Dict]:
|
|
566
|
+
def get_trend(data, lookback=900, threshold=0.8):
|
|
567
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
568
|
+
if len(window) < 2:
|
|
569
|
+
return True
|
|
570
|
+
increasing = 0
|
|
571
|
+
total = 0
|
|
572
|
+
for i in range(1, len(window)):
|
|
573
|
+
if window[i] >= window[i - 1]:
|
|
574
|
+
increasing += 1
|
|
575
|
+
total += 1
|
|
576
|
+
return increasing / total >= threshold
|
|
577
|
+
|
|
578
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
579
|
+
alerts = []
|
|
580
|
+
total_detections = summary.get("total_count", 0)
|
|
581
|
+
total_counts_dict = summary.get("total_counts", {})
|
|
582
|
+
per_category_count = summary.get("per_category_count", {})
|
|
583
|
+
|
|
584
|
+
if not config.alert_config:
|
|
585
|
+
return alerts
|
|
586
|
+
|
|
587
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
588
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
589
|
+
if category == "all" and total_detections > threshold:
|
|
590
|
+
alerts.append({
|
|
591
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
592
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
593
|
+
"incident_category": self.CASE_TYPE,
|
|
594
|
+
"threshold_level": threshold,
|
|
595
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
596
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
597
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
598
|
+
})
|
|
599
|
+
elif category in per_category_count:
|
|
600
|
+
count = per_category_count[category]
|
|
601
|
+
if count > threshold:
|
|
602
|
+
alerts.append({
|
|
603
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
604
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
605
|
+
"incident_category": self.CASE_TYPE,
|
|
606
|
+
"threshold_level": threshold,
|
|
607
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
608
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
609
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
610
|
+
})
|
|
611
|
+
return alerts
|
|
612
|
+
|
|
613
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: FlareAnalysisConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
614
|
+
incidents = []
|
|
615
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
616
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
617
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
618
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
619
|
+
|
|
620
|
+
if total_detections > 0:
|
|
621
|
+
level = "low"
|
|
622
|
+
intensity = 5.0
|
|
623
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
624
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
|
625
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
626
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
|
627
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
628
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
629
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
|
630
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
631
|
+
|
|
632
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
633
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
634
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
635
|
+
if intensity >= 9:
|
|
636
|
+
level = "critical"
|
|
637
|
+
self._ascending_alert_list.append(3)
|
|
638
|
+
elif intensity >= 7:
|
|
639
|
+
level = "significant"
|
|
640
|
+
self._ascending_alert_list.append(2)
|
|
641
|
+
elif intensity >= 5:
|
|
642
|
+
level = "medium"
|
|
643
|
+
self._ascending_alert_list.append(1)
|
|
644
|
+
else:
|
|
645
|
+
self._ascending_alert_list.append(0)
|
|
646
|
+
else:
|
|
647
|
+
if total_detections > 30:
|
|
648
|
+
level = "critical"
|
|
649
|
+
intensity = 10.0
|
|
650
|
+
self._ascending_alert_list.append(3)
|
|
651
|
+
elif total_detections > 25:
|
|
652
|
+
level = "significant"
|
|
653
|
+
intensity = 9.0
|
|
654
|
+
self._ascending_alert_list.append(2)
|
|
655
|
+
elif total_detections > 15:
|
|
656
|
+
level = "medium"
|
|
657
|
+
intensity = 7.0
|
|
658
|
+
self._ascending_alert_list.append(1)
|
|
659
|
+
else:
|
|
660
|
+
level = "low"
|
|
661
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
662
|
+
self._ascending_alert_list.append(0)
|
|
663
|
+
|
|
664
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
665
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
|
666
|
+
for cat, count in counting_summary.get("per_category_count", {}).items():
|
|
667
|
+
if count > 0:
|
|
668
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
|
669
|
+
human_text = "\n".join(human_text_lines)
|
|
670
|
+
|
|
671
|
+
alert_settings = []
|
|
672
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
673
|
+
alert_settings.append({
|
|
674
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
675
|
+
"incident_category": self.CASE_TYPE,
|
|
676
|
+
"threshold_level": config.alert_config.count_thresholds,
|
|
677
|
+
"ascending": True,
|
|
678
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
679
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
680
|
+
})
|
|
681
|
+
|
|
682
|
+
event = self.create_incident(
|
|
683
|
+
incident_id=f"{self.CASE_TYPE}_{frame_number if frame_number is not None else 'current_frame'}",
|
|
684
|
+
incident_type=self.CASE_TYPE,
|
|
685
|
+
severity_level=level,
|
|
686
|
+
human_text=human_text,
|
|
687
|
+
camera_info=camera_info,
|
|
688
|
+
alerts=alerts,
|
|
689
|
+
alert_settings=alert_settings,
|
|
690
|
+
start_time=start_timestamp,
|
|
691
|
+
end_time=self.current_incident_end_timestamp,
|
|
692
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
|
693
|
+
)
|
|
694
|
+
incidents.append(event)
|
|
695
|
+
else:
|
|
696
|
+
self._ascending_alert_list.append(0)
|
|
697
|
+
incidents.append({})
|
|
698
|
+
return incidents
|
|
699
|
+
|
|
700
|
+
def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: FlareAnalysisConfig, frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
701
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
702
|
+
tracking_stats = []
|
|
703
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
704
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
|
705
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
706
|
+
color_distribution = counting_summary.get("color_distribution", {})
|
|
707
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
708
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
709
|
+
high_precision_start_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
710
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
711
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
712
|
+
|
|
713
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
|
714
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
|
|
715
|
+
|
|
716
|
+
detections = []
|
|
717
|
+
for detection in counting_summary.get("detections", []):
|
|
718
|
+
bbox = detection.get("bounding_box", {})
|
|
719
|
+
category = detection.get("category", "unknown")
|
|
720
|
+
detection_obj = self.create_detection_object(category, bbox) # Remove main_color parameter
|
|
721
|
+
detection_obj["main_color"] = detection.get("main_color", "unknown") # Add main_color afterward
|
|
722
|
+
detections.append(detection_obj)
|
|
723
|
+
|
|
724
|
+
alert_settings = []
|
|
725
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
726
|
+
alert_settings.append({
|
|
727
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
728
|
+
"incident_category": self.CASE_TYPE,
|
|
729
|
+
"threshold_level": config.alert_config.count_thresholds,
|
|
730
|
+
"ascending": True,
|
|
731
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
732
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
733
|
+
})
|
|
734
|
+
|
|
735
|
+
human_text_lines = [f"Tracking Statistics:"]
|
|
736
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
|
737
|
+
for cat, count in per_category_count.items():
|
|
738
|
+
if count > 0:
|
|
739
|
+
colors = color_distribution.get(cat, {})
|
|
740
|
+
dominant_color = max(colors.items(), key=lambda x: x[1])[0] if colors else "unknown"
|
|
741
|
+
human_text_lines.append(f"\t{cat}: {count}, Dominant Color: {dominant_color}")
|
|
742
|
+
if total_detections == 0:
|
|
743
|
+
human_text_lines.append("\tNo Flares detected")
|
|
744
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
|
745
|
+
for cat, count in total_counts_dict.items():
|
|
746
|
+
if count > 0:
|
|
747
|
+
colors = color_distribution.get(cat, {})
|
|
748
|
+
dominant_color = max(colors.items(), key=lambda x: x[1])[0] if colors else "unknown"
|
|
749
|
+
human_text_lines.append(f"\t{cat}: {count}, Dominant Color: {dominant_color}")
|
|
750
|
+
if alerts:
|
|
751
|
+
for alert in alerts:
|
|
752
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
|
753
|
+
else:
|
|
754
|
+
human_text_lines.append("Alerts: None")
|
|
755
|
+
human_text = "\n".join(human_text_lines)
|
|
756
|
+
|
|
757
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
|
758
|
+
|
|
759
|
+
tracking_stat = self.create_tracking_stats(
|
|
760
|
+
total_counts=total_counts,
|
|
761
|
+
current_counts=current_counts,
|
|
762
|
+
detections=detections,
|
|
763
|
+
human_text=human_text,
|
|
764
|
+
camera_info=camera_info,
|
|
765
|
+
alerts=alerts,
|
|
766
|
+
alert_settings=alert_settings,
|
|
767
|
+
reset_settings=reset_settings,
|
|
768
|
+
start_time=high_precision_start_timestamp,
|
|
769
|
+
reset_time=high_precision_reset_timestamp
|
|
770
|
+
)
|
|
771
|
+
tracking_stats.append(tracking_stat)
|
|
772
|
+
return tracking_stats
|
|
773
|
+
|
|
774
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: List, config: FlareAnalysisConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty: bool = False) -> List[Dict]:
|
|
775
|
+
if is_empty:
|
|
776
|
+
return []
|
|
777
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
778
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
779
|
+
color_distribution = counting_summary.get("color_distribution", {})
|
|
780
|
+
human_text_lines = ["Business Analytics:"]
|
|
781
|
+
if total_detections > 0:
|
|
782
|
+
unique_colors = sum(len(colors) for colors in color_distribution.values())
|
|
783
|
+
human_text_lines.append(f"Total Flares: {total_detections}")
|
|
784
|
+
human_text_lines.append(f"Unique Colors: {unique_colors}")
|
|
785
|
+
for cat, colors in color_distribution.items():
|
|
786
|
+
if colors:
|
|
787
|
+
dominant_color = max(colors.items(), key=lambda x: x[1])[0]
|
|
788
|
+
human_text_lines.append(f"{cat}: {sum(colors.values())}, Dominant Color: {dominant_color}")
|
|
789
|
+
else:
|
|
790
|
+
human_text_lines.append("No Flares detected")
|
|
791
|
+
human_text = "\n".join(human_text_lines)
|
|
792
|
+
alert_settings = []
|
|
793
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
794
|
+
alert_settings.append({
|
|
795
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
796
|
+
"incident_category": self.CASE_TYPE,
|
|
797
|
+
"threshold_level": config.alert_config.count_thresholds,
|
|
798
|
+
"ascending": True,
|
|
799
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
800
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
801
|
+
})
|
|
802
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
|
803
|
+
return [self.create_business_analytics(
|
|
804
|
+
analysis_name="flare_color_analysis",
|
|
805
|
+
statistics={"total_detections": total_detections, "unique_colors": sum(len(colors) for colors in color_distribution.values())},
|
|
806
|
+
human_text=human_text,
|
|
807
|
+
camera_info=camera_info,
|
|
808
|
+
alerts=alerts,
|
|
809
|
+
alert_settings=alert_settings,
|
|
810
|
+
reset_settings=reset_settings
|
|
811
|
+
)]
|
|
812
|
+
|
|
813
|
+
def _generate_summary(self, counting_summary: Dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[Dict]:
|
|
814
|
+
lines = {}
|
|
815
|
+
lines["Application Name"] = self.CASE_TYPE
|
|
816
|
+
lines["Application Version"] = self.CASE_VERSION
|
|
817
|
+
if incidents and incidents[0]:
|
|
818
|
+
lines["Incidents"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
|
819
|
+
if tracking_stats and tracking_stats[0]:
|
|
820
|
+
lines["Tracking Statistics"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
|
821
|
+
if business_analytics and business_analytics[0]:
|
|
822
|
+
lines["Business Analytics"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
|
823
|
+
if not lines.get("Incidents") and not lines.get("Tracking Statistics") and not lines.get("Business Analytics"):
|
|
824
|
+
lines["Summary"] = "No Summary Data"
|
|
825
|
+
return [lines]
|
|
826
|
+
|
|
827
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
|
828
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
|
829
|
+
|
|
830
|
+
def _get_tracking_start_time(self) -> str:
|
|
831
|
+
if self._tracking_start_time is None:
|
|
832
|
+
return "N/A"
|
|
833
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
834
|
+
|
|
835
|
+
def _set_tracking_start_time(self) -> None:
|
|
836
|
+
self._tracking_start_time = time.time()
|