matrice-analytics 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +142 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3188 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +681 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +1870 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +283 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +248 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +271 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1153 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1043 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +232 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1835 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +930 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1112 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +891 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +914 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1194 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +1728 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +950 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.2.dist-info/METADATA +481 -0
- matrice_analytics-0.1.2.dist-info/RECORD +160 -0
- matrice_analytics-0.1.2.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.2.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1835 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
import tempfile
|
|
5
|
+
import os
|
|
6
|
+
import cv2
|
|
7
|
+
import copy
|
|
8
|
+
import numpy as np
|
|
9
|
+
from collections import defaultdict
|
|
10
|
+
import time
|
|
11
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
|
12
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
13
|
+
from ..utils import (
|
|
14
|
+
filter_by_confidence,
|
|
15
|
+
filter_by_categories,
|
|
16
|
+
apply_category_mapping,
|
|
17
|
+
match_results_structure,
|
|
18
|
+
extract_major_colors,
|
|
19
|
+
count_objects_by_category,
|
|
20
|
+
calculate_counting_summary,
|
|
21
|
+
match_results_structure,
|
|
22
|
+
count_objects_in_zones,
|
|
23
|
+
bbox_smoothing,
|
|
24
|
+
BBoxSmoothingConfig,
|
|
25
|
+
BBoxSmoothingTracker
|
|
26
|
+
)
|
|
27
|
+
from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
|
|
28
|
+
from ..usecases.color.clip import ClipProcessor
|
|
29
|
+
#from turbojpeg import TurboJPEG, TJPF_RGB
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ColorDetectionConfig(BaseConfig):
|
|
33
|
+
"""Configuration for color detection use case."""
|
|
34
|
+
confidence_threshold: float = 0.9
|
|
35
|
+
top_k_colors: int = 3
|
|
36
|
+
frame_skip: int = 1
|
|
37
|
+
usecase: str = "color_detection"
|
|
38
|
+
usecase_categories: List[str] = field(
|
|
39
|
+
default_factory=lambda: [
|
|
40
|
+
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
|
|
41
|
+
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog",
|
|
42
|
+
"horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
|
|
43
|
+
"handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
|
|
44
|
+
"baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
|
|
45
|
+
"wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
|
|
46
|
+
"orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
|
|
47
|
+
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
|
|
48
|
+
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book",
|
|
49
|
+
"clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
|
|
50
|
+
]
|
|
51
|
+
)
|
|
52
|
+
target_categories: List[str] = field(
|
|
53
|
+
default_factory=lambda: [
|
|
54
|
+
"car", "bicycle", "bus", "motorcycle"]
|
|
55
|
+
)
|
|
56
|
+
fps: Optional[float] = None
|
|
57
|
+
bbox_format: str = "auto"
|
|
58
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
59
|
+
default_factory=lambda: {
|
|
60
|
+
0: "person", 1: "bicycle", 2: "car", 3: "motorcycle", 4: "airplane", 5: "bus",
|
|
61
|
+
6: "train", 7: "truck", 8: "boat", 9: "traffic light", 10: "fire hydrant",
|
|
62
|
+
11: "stop sign", 12: "parking meter", 13: "bench", 14: "bird", 15: "cat",
|
|
63
|
+
16: "dog", 17: "horse", 18: "sheep", 19: "cow", 20: "elephant", 21: "bear",
|
|
64
|
+
22: "zebra", 23: "giraffe", 24: "backpack", 25: "umbrella", 26: "handbag",
|
|
65
|
+
27: "tie", 28: "suitcase", 29: "frisbee", 30: "skis", 31: "snowboard",
|
|
66
|
+
32: "sports ball", 33: "kite", 34: "baseball bat", 35: "baseball glove",
|
|
67
|
+
36: "skateboard", 37: "surfboard", 38: "tennis racket", 39: "bottle",
|
|
68
|
+
40: "wine glass", 41: "cup", 42: "fork", 43: "knife", 44: "spoon", 45: "bowl",
|
|
69
|
+
46: "banana", 47: "apple", 48: "sandwich", 49: "orange", 50: "broccoli",
|
|
70
|
+
51: "carrot", 52: "hot dog", 53: "pizza", 54: "donut", 55: "cake", 56: "chair",
|
|
71
|
+
57: "couch", 58: "potted plant", 59: "bed", 60: "dining table", 61: "toilet",
|
|
72
|
+
62: "tv", 63: "laptop", 64: "mouse", 65: "remote", 66: "keyboard",
|
|
73
|
+
67: "cell phone", 68: "microwave", 69: "oven", 70: "toaster", 71: "sink",
|
|
74
|
+
72: "refrigerator", 73: "book", 74: "clock", 75: "vase", 76: "scissors",
|
|
75
|
+
77: "teddy bear", 78: "hair drier", 79: "toothbrush"
|
|
76
|
+
}
|
|
77
|
+
)
|
|
78
|
+
alert_config: Optional[AlertConfig] = None
|
|
79
|
+
time_window_minutes: int = 60
|
|
80
|
+
enable_unique_counting: bool = True
|
|
81
|
+
enable_smoothing: bool = True
|
|
82
|
+
smoothing_algorithm: str = "observability"
|
|
83
|
+
smoothing_window_size: int = 20
|
|
84
|
+
smoothing_cooldown_frames: int = 5
|
|
85
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
86
|
+
|
|
87
|
+
#JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
|
|
88
|
+
zone_config: Optional[Dict[str, List[List[float]]]] = field(
|
|
89
|
+
default_factory=lambda: {
|
|
90
|
+
"zones": {
|
|
91
|
+
"Interest_Region": [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]],
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
)
|
|
95
|
+
true_import: bool = False
|
|
96
|
+
|
|
97
|
+
def validate(self) -> List[str]:
|
|
98
|
+
errors = super().validate()
|
|
99
|
+
if self.confidence_threshold < 0 or self.confidence_threshold > 1:
|
|
100
|
+
errors.append("confidence_threshold must be between 0 and 1")
|
|
101
|
+
if self.top_k_colors <= 0:
|
|
102
|
+
errors.append("top_k_colors must be positive")
|
|
103
|
+
if self.frame_skip <= 0:
|
|
104
|
+
errors.append("frame_skip must be positive")
|
|
105
|
+
if self.bbox_format not in ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"]:
|
|
106
|
+
errors.append("bbox_format must be one of: auto, xmin_ymin_xmax_ymax, x_y_width_height")
|
|
107
|
+
if self.smoothing_window_size <= 0:
|
|
108
|
+
errors.append("smoothing_window_size must be positive")
|
|
109
|
+
if self.smoothing_cooldown_frames < 0:
|
|
110
|
+
errors.append("smoothing_cooldown_frames cannot be negative")
|
|
111
|
+
if self.smoothing_confidence_range_factor <= 0:
|
|
112
|
+
errors.append("smoothing_confidence_range_factor must be positive")
|
|
113
|
+
return errors
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class ColorDetectionUseCase(BaseProcessor):
|
|
118
|
+
"""Color detection processor for analyzing object colors in video streams with tracking."""
|
|
119
|
+
CATEGORY_DISPLAY = {
|
|
120
|
+
"bicycle": "Bicycle", "car": "Car", "motorbike": "Motorbike", "auto rickshaw": "Auto Rickshaw",
|
|
121
|
+
"bus": "Bus", "garbagevan": "Garbage Van", "truck": "Truck", "minibus": "Minibus",
|
|
122
|
+
"army vehicle": "Army Vehicle", "pickup": "Pickup", "policecar": "Police Car",
|
|
123
|
+
"rickshaw": "Rickshaw", "scooter": "Scooter", "suv": "SUV", "taxi": "Taxi",
|
|
124
|
+
"three wheelers -CNG-": "Three Wheelers (CNG)", "human hauler": "Human Hauler",
|
|
125
|
+
"van": "Van", "wheelbarrow": "Wheelbarrow"
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
def __init__(self):
|
|
129
|
+
super().__init__("color_detection")
|
|
130
|
+
self.category = "visual_appearance"
|
|
131
|
+
|
|
132
|
+
self.target_categories = ["car", "bicycle", "bus", "motorcycle"]
|
|
133
|
+
|
|
134
|
+
self.CASE_TYPE: Optional[str] = 'color_detection'
|
|
135
|
+
self.CASE_VERSION: Optional[str] = '1.3'
|
|
136
|
+
|
|
137
|
+
self.tracker = None # AdvancedTracker instance
|
|
138
|
+
self.smoothing_tracker = None # BBoxSmoothingTracker instance
|
|
139
|
+
self._total_frame_counter = 0 # Total frames processed
|
|
140
|
+
self._global_frame_offset = 0 # Frame offset for new sessions
|
|
141
|
+
self._color_total_track_ids = defaultdict(set) # Cumulative track IDs per category-color
|
|
142
|
+
self._color_current_frame_track_ids = defaultdict(set) # Per-frame track IDs per category-color
|
|
143
|
+
|
|
144
|
+
self._tracking_start_time = None
|
|
145
|
+
|
|
146
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
147
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
148
|
+
# Tunable parameters – adjust if necessary for specific scenarios
|
|
149
|
+
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
|
|
150
|
+
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
|
151
|
+
|
|
152
|
+
self._ascending_alert_list: List[int] = []
|
|
153
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
154
|
+
self.color_det_dict = {}
|
|
155
|
+
self.start_timer = None
|
|
156
|
+
# Zone-based tracking storage
|
|
157
|
+
self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
|
|
158
|
+
self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
|
|
159
|
+
self._zone_current_counts = {} # zone_name -> current count in zone
|
|
160
|
+
self._zone_total_counts = {} # zone_name -> total count that have been in zone
|
|
161
|
+
self.logger.info("Initialized ColorDetectionUseCase with zone tracking")
|
|
162
|
+
self.detector = None #ClipProcessor()
|
|
163
|
+
self.all_color_data = {}
|
|
164
|
+
self.all_color_counts = {}
|
|
165
|
+
#self.jpeg = TurboJPEG()
|
|
166
|
+
|
|
167
|
+
def process(
|
|
168
|
+
self,
|
|
169
|
+
data: Any,
|
|
170
|
+
config: ConfigProtocol,
|
|
171
|
+
input_bytes: Optional[bytes] = None,
|
|
172
|
+
context: Optional[ProcessingContext] = None,
|
|
173
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
174
|
+
) -> ProcessingResult:
|
|
175
|
+
processing_start = time.time()
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
if not isinstance(config, ColorDetectionConfig):
|
|
179
|
+
return self.create_error_result(
|
|
180
|
+
"Invalid configuration type for color detection",
|
|
181
|
+
usecase=self.name,
|
|
182
|
+
category=self.category,
|
|
183
|
+
context=context
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
if config.true_import and self.detector is None:
|
|
187
|
+
self.detector = ClipProcessor()
|
|
188
|
+
self.logger.info("Initialized ClipProcessor for color detection")
|
|
189
|
+
|
|
190
|
+
if context is None:
|
|
191
|
+
context = ProcessingContext()
|
|
192
|
+
|
|
193
|
+
if not input_bytes:
|
|
194
|
+
print("input_bytes is required for color detection")
|
|
195
|
+
|
|
196
|
+
if not data:
|
|
197
|
+
#print("data",data)
|
|
198
|
+
print("Detection data is required for color detection")
|
|
199
|
+
|
|
200
|
+
input_format = match_results_structure(data)
|
|
201
|
+
context.input_format = input_format
|
|
202
|
+
context.confidence_threshold = config.confidence_threshold
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
self.logger.info(f"Processing color detection with format: {input_format.value}")
|
|
206
|
+
|
|
207
|
+
# Step 1: Apply confidence filtering
|
|
208
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
209
|
+
#self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
210
|
+
|
|
211
|
+
# Step 2: Apply category mapping if provided
|
|
212
|
+
if config.index_to_category:
|
|
213
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
214
|
+
#self.logger.debug("Applied category mapping")
|
|
215
|
+
|
|
216
|
+
if config.target_categories:
|
|
217
|
+
color_processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
218
|
+
self.logger.debug("Applied category filtering")
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
raw_processed_data = [copy.deepcopy(det) for det in color_processed_data]
|
|
222
|
+
# Step 3: Apply bounding box smoothing if enabled
|
|
223
|
+
if config.enable_smoothing:
|
|
224
|
+
if self.smoothing_tracker is None:
|
|
225
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
226
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
227
|
+
window_size=config.smoothing_window_size,
|
|
228
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
229
|
+
confidence_threshold=config.confidence_threshold,
|
|
230
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
231
|
+
enable_smoothing=True
|
|
232
|
+
)
|
|
233
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
234
|
+
color_processed_data = bbox_smoothing(color_processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
235
|
+
|
|
236
|
+
# Step 4: Apply advanced tracking
|
|
237
|
+
try:
|
|
238
|
+
from ..advanced_tracker import AdvancedTracker
|
|
239
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
240
|
+
|
|
241
|
+
if self.tracker is None:
|
|
242
|
+
tracker_config = TrackerConfig()
|
|
243
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
244
|
+
self.logger.info("Initialized AdvancedTracker for color detection tracking")
|
|
245
|
+
|
|
246
|
+
color_processed_data = self.tracker.update(color_processed_data)
|
|
247
|
+
|
|
248
|
+
except Exception as e:
|
|
249
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
color_processed_data = self._attach_masks_to_detections(color_processed_data, raw_processed_data)
|
|
253
|
+
self._total_frame_counter += 1
|
|
254
|
+
|
|
255
|
+
frame_number = None
|
|
256
|
+
if stream_info:
|
|
257
|
+
input_settings = stream_info.get("input_settings", {})
|
|
258
|
+
start_frame = input_settings.get("start_frame")
|
|
259
|
+
end_frame = input_settings.get("end_frame")
|
|
260
|
+
# If start and end frame are the same, it's a single frame
|
|
261
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
262
|
+
frame_number = start_frame
|
|
263
|
+
|
|
264
|
+
# Step 7: Analyze colors in media
|
|
265
|
+
color_analysis = self._analyze_colors_in_media(
|
|
266
|
+
color_processed_data,
|
|
267
|
+
input_bytes,
|
|
268
|
+
config
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
curr_frame_color = self.detector.process_color_in_frame(color_processed_data,input_bytes,config.zone_config)
|
|
272
|
+
|
|
273
|
+
# Step 8: Update color tracking state
|
|
274
|
+
self._update_color_tracking_state_from_analysis(color_analysis)
|
|
275
|
+
|
|
276
|
+
# Step 9: Calculate summaries
|
|
277
|
+
color_summary = self._calculate_color_summary(color_analysis, config)
|
|
278
|
+
totals = self.get_total_color_counts()
|
|
279
|
+
if not totals:
|
|
280
|
+
tmp = defaultdict(set)
|
|
281
|
+
for rec in color_analysis:
|
|
282
|
+
color = rec.get('main_color')
|
|
283
|
+
tid = rec.get('track_id') or rec.get('detection_id')
|
|
284
|
+
if color and tid is not None:
|
|
285
|
+
tmp[color].add(tid)
|
|
286
|
+
totals = {color: len(ids) for color, ids in tmp.items()}
|
|
287
|
+
total_category_counts = self.get_total_category_counts()
|
|
288
|
+
color_summary['total_color_counts'] = totals
|
|
289
|
+
color_summary['total_category_counts'] = total_category_counts
|
|
290
|
+
|
|
291
|
+
general_summary = self._calculate_general_summary(processed_data, config)
|
|
292
|
+
|
|
293
|
+
# Step 10: Zone analysis
|
|
294
|
+
|
|
295
|
+
zone_analysis = {}
|
|
296
|
+
if config.zone_config and config.zone_config['zones']:
|
|
297
|
+
frame_data = color_processed_data
|
|
298
|
+
zone_analysis = count_objects_in_zones(frame_data, config.zone_config['zones'], stream_info)
|
|
299
|
+
if zone_analysis and config.enable_unique_counting:
|
|
300
|
+
enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, color_processed_data, config)
|
|
301
|
+
for zone_name, enhanced_data in enhanced_zone_analysis.items():
|
|
302
|
+
zone_analysis[zone_name] = enhanced_data
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
# Step 11: Generate alerts, incidents, tracking stats, and summary
|
|
307
|
+
alerts = self._check_alerts(color_summary, frame_number, config)
|
|
308
|
+
|
|
309
|
+
incidents_list = self._generate_incidents(color_summary, alerts, config, frame_number, stream_info)
|
|
310
|
+
incidents_list = []
|
|
311
|
+
|
|
312
|
+
tracking_stats_list = self._generate_tracking_stats(color_summary, alerts, config, frame_number, stream_info, curr_frame_color)
|
|
313
|
+
|
|
314
|
+
business_analytics_list = []
|
|
315
|
+
summary_list = self._generate_summary(color_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
319
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
320
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
321
|
+
summary = summary_list[0] if summary_list else {}
|
|
322
|
+
agg_summary = {str(frame_number): {
|
|
323
|
+
"incidents": incidents,
|
|
324
|
+
"tracking_stats": tracking_stats,
|
|
325
|
+
"business_analytics": business_analytics,
|
|
326
|
+
"alerts": alerts,
|
|
327
|
+
"zone_analysis": zone_analysis,
|
|
328
|
+
"human_text": summary}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
context.mark_completed()
|
|
332
|
+
|
|
333
|
+
# Build result object following the new pattern
|
|
334
|
+
|
|
335
|
+
result = self.create_result(
|
|
336
|
+
data={"agg_summary": agg_summary},
|
|
337
|
+
usecase=self.name,
|
|
338
|
+
category=self.category,
|
|
339
|
+
context=context
|
|
340
|
+
)
|
|
341
|
+
proc_time = time.time() - processing_start
|
|
342
|
+
processing_latency_ms = proc_time * 1000.0
|
|
343
|
+
processing_fps = (1.0 / proc_time) if proc_time > 0 else None
|
|
344
|
+
# Log the performance metrics using the module-level logger
|
|
345
|
+
print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
|
|
346
|
+
return result
|
|
347
|
+
|
|
348
|
+
except Exception as e:
|
|
349
|
+
self.logger.error(f"Color detection failed: {str(e)}", exc_info=True)
|
|
350
|
+
if context:
|
|
351
|
+
context.mark_completed()
|
|
352
|
+
return self.create_error_result(
|
|
353
|
+
str(e),
|
|
354
|
+
type(e).__name__,
|
|
355
|
+
usecase=self.name,
|
|
356
|
+
category=self.category,
|
|
357
|
+
context=context
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
def color_helper(self, curr_data):
|
|
361
|
+
for tid, data in curr_data.items():
|
|
362
|
+
if tid not in self.all_color_data:
|
|
363
|
+
# First time seeing this track
|
|
364
|
+
self.all_color_data[tid] = {
|
|
365
|
+
"color": data.get("color"),
|
|
366
|
+
"confidence": data.get("confidence"),
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
# update color counts
|
|
370
|
+
color = data.get("color")
|
|
371
|
+
if color:
|
|
372
|
+
self.all_color_counts[color] = self.all_color_counts.get(color, 0) + 1
|
|
373
|
+
|
|
374
|
+
else:
|
|
375
|
+
# Update only if new confidence is higher
|
|
376
|
+
if data.get("confidence", 0) > self.all_color_data[tid]["confidence"]:
|
|
377
|
+
old_color = self.all_color_data[tid]["color"]
|
|
378
|
+
new_color = data.get("color")
|
|
379
|
+
|
|
380
|
+
if new_color != old_color:
|
|
381
|
+
# decrease old color count
|
|
382
|
+
if old_color in self.all_color_counts:
|
|
383
|
+
self.all_color_counts[old_color] -= 1
|
|
384
|
+
if self.all_color_counts[old_color] <= 0:
|
|
385
|
+
del self.all_color_counts[old_color]
|
|
386
|
+
|
|
387
|
+
# increase new color count
|
|
388
|
+
if new_color:
|
|
389
|
+
self.all_color_counts[new_color] = self.all_color_counts.get(new_color, 0) + 1
|
|
390
|
+
|
|
391
|
+
# update track info
|
|
392
|
+
self.all_color_data[tid]["color"] = new_color
|
|
393
|
+
self.all_color_data[tid]["confidence"] = data.get("confidence")
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
def _analyze_colors_in_media(
|
|
399
|
+
self,
|
|
400
|
+
data: Any,
|
|
401
|
+
media_bytes: bytes,
|
|
402
|
+
config: ColorDetectionConfig
|
|
403
|
+
) -> List[Dict[str, Any]]:
|
|
404
|
+
"""Analyze colors of detected objects in video frames or images."""
|
|
405
|
+
|
|
406
|
+
# Determine if input is video or image
|
|
407
|
+
is_video = self._is_video_bytes(media_bytes)
|
|
408
|
+
|
|
409
|
+
if is_video:
|
|
410
|
+
return self._analyze_colors_in_video(data, media_bytes, config)
|
|
411
|
+
else:
|
|
412
|
+
return self._analyze_colors_in_image(data, media_bytes, config)
|
|
413
|
+
|
|
414
|
+
def _update_color_tracking_state_from_analysis(self, color_analysis: List[Dict[str, Any]]) -> None:
|
|
415
|
+
"""Update total tracking store using analyzed color results.
|
|
416
|
+
Ensures totals are populated even if pre-analysis detections lacked colors/track_ids."""
|
|
417
|
+
existing_store = getattr(self, '_color_total_track_ids', None)
|
|
418
|
+
if not isinstance(existing_store, defaultdict):
|
|
419
|
+
existing_store = {} if existing_store is None else dict(existing_store)
|
|
420
|
+
self._color_total_track_ids = defaultdict(set, existing_store)
|
|
421
|
+
else:
|
|
422
|
+
self._color_total_track_ids = existing_store
|
|
423
|
+
# Reset current frame tracking for this frame
|
|
424
|
+
self._color_current_frame_track_ids = defaultdict(set)
|
|
425
|
+
|
|
426
|
+
for rec in color_analysis:
|
|
427
|
+
cat = rec.get('category')
|
|
428
|
+
color = rec.get('main_color')
|
|
429
|
+
track_id = rec.get('track_id')
|
|
430
|
+
major_colors = rec.get('major_colors') or []
|
|
431
|
+
# Safely extract color confidence
|
|
432
|
+
if major_colors and isinstance(major_colors[0], (list, tuple)) and len(major_colors[0]) > 2:
|
|
433
|
+
color_conf = major_colors[0][2]
|
|
434
|
+
else:
|
|
435
|
+
color_conf = 0.0
|
|
436
|
+
if track_id is None:
|
|
437
|
+
track_id = rec.get('detection_id')
|
|
438
|
+
if cat and track_id is not None:
|
|
439
|
+
# Update the color_det_dict with the actual color
|
|
440
|
+
if color and track_id in self.color_det_dict:
|
|
441
|
+
existing_color, existing_conf = self.color_det_dict.get(track_id, [None, -1])
|
|
442
|
+
if color_conf > existing_conf and color != existing_color:
|
|
443
|
+
# Move this track_id from any previous color bucket(s) to the new one
|
|
444
|
+
for k in list(self._color_total_track_ids.keys()):
|
|
445
|
+
if track_id in self._color_total_track_ids[k]:
|
|
446
|
+
self._color_total_track_ids[k].discard(track_id)
|
|
447
|
+
# Update assignment
|
|
448
|
+
self.color_det_dict[track_id] = [color, color_conf]
|
|
449
|
+
new_key = f"{cat}:{color}" if color else cat
|
|
450
|
+
self._color_total_track_ids[new_key].add(track_id)
|
|
451
|
+
# Update current frame tracking
|
|
452
|
+
self._color_current_frame_track_ids[new_key].add(track_id)
|
|
453
|
+
elif color_conf > existing_conf:
|
|
454
|
+
# Confidence improved but color unchanged; update confidence only
|
|
455
|
+
self.color_det_dict[track_id] = [existing_color, color_conf]
|
|
456
|
+
same_key = f"{cat}:{existing_color}" if existing_color else cat
|
|
457
|
+
self._color_current_frame_track_ids[same_key].add(track_id)
|
|
458
|
+
else:
|
|
459
|
+
# No improvement; still reflect in current frame under existing color
|
|
460
|
+
same_key = f"{cat}:{existing_color}" if existing_color else cat
|
|
461
|
+
self._color_current_frame_track_ids[same_key].add(track_id)
|
|
462
|
+
elif color and track_id not in self.color_det_dict:
|
|
463
|
+
# First assignment for this track
|
|
464
|
+
self.color_det_dict[track_id] = [color, color_conf]
|
|
465
|
+
key = f"{cat}:{color}" if color else cat
|
|
466
|
+
self._color_total_track_ids[key].add(track_id)
|
|
467
|
+
# Also update current frame tracking
|
|
468
|
+
self._color_current_frame_track_ids[key].add(track_id)
|
|
469
|
+
|
|
470
|
+
def _is_video_bytes(self, media_bytes: bytes) -> bool:
|
|
471
|
+
"""Determine if bytes represent a video file."""
|
|
472
|
+
# Check common video file signatures
|
|
473
|
+
video_signatures = [
|
|
474
|
+
b'\x00\x00\x00\x20ftypmp4', # MP4
|
|
475
|
+
b'\x00\x00\x00\x18ftypmp4', # MP4 variant
|
|
476
|
+
b'RIFF', # AVI
|
|
477
|
+
b'\x1aE\xdf\xa3', # MKV/WebM
|
|
478
|
+
b'ftyp', # General MP4 family
|
|
479
|
+
]
|
|
480
|
+
|
|
481
|
+
for signature in video_signatures:
|
|
482
|
+
if media_bytes.startswith(signature) or signature in media_bytes[:50]:
|
|
483
|
+
return True
|
|
484
|
+
return False
|
|
485
|
+
|
|
486
|
+
def _analyze_colors_in_video(
|
|
487
|
+
self,
|
|
488
|
+
data: Any,
|
|
489
|
+
video_bytes: bytes,
|
|
490
|
+
config: ColorDetectionConfig
|
|
491
|
+
) -> List[Dict[str, Any]]:
|
|
492
|
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video:
|
|
493
|
+
temp_video.write(video_bytes)
|
|
494
|
+
video_path = temp_video.name
|
|
495
|
+
|
|
496
|
+
try:
|
|
497
|
+
cap = cv2.VideoCapture(video_path)
|
|
498
|
+
if not cap.isOpened():
|
|
499
|
+
raise RuntimeError("Failed to open video file")
|
|
500
|
+
|
|
501
|
+
fps = config.fps or cap.get(cv2.CAP_PROP_FPS)
|
|
502
|
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
503
|
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
504
|
+
|
|
505
|
+
color_analysis = []
|
|
506
|
+
frame_id = 0
|
|
507
|
+
|
|
508
|
+
while True:
|
|
509
|
+
ret, frame = cap.read()
|
|
510
|
+
if not ret:
|
|
511
|
+
break
|
|
512
|
+
|
|
513
|
+
if frame_id % config.frame_skip != 0:
|
|
514
|
+
frame_id += 1
|
|
515
|
+
continue
|
|
516
|
+
|
|
517
|
+
frame_key = str(frame_id)
|
|
518
|
+
timestamp = frame_id / fps
|
|
519
|
+
frame_detections = self._get_frame_detections(data, frame_key)
|
|
520
|
+
if not frame_detections:
|
|
521
|
+
frame_id += 1
|
|
522
|
+
continue
|
|
523
|
+
|
|
524
|
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
525
|
+
|
|
526
|
+
for detection in frame_detections:
|
|
527
|
+
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
528
|
+
continue
|
|
529
|
+
|
|
530
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
531
|
+
if not bbox:
|
|
532
|
+
continue
|
|
533
|
+
|
|
534
|
+
# Check all zones
|
|
535
|
+
zones = config.zone_config['zones'] if config.zone_config else {}
|
|
536
|
+
in_any_zone = not zones # Process all if no zones
|
|
537
|
+
zone_name = None
|
|
538
|
+
for z_name, zone_polygon in zones.items():
|
|
539
|
+
if self._is_in_zone(bbox, zone_polygon):
|
|
540
|
+
in_any_zone = True
|
|
541
|
+
zone_name = z_name
|
|
542
|
+
break
|
|
543
|
+
if not in_any_zone:
|
|
544
|
+
continue # Skip detections outside zones
|
|
545
|
+
|
|
546
|
+
crop = self._crop_bbox(rgb_frame, bbox, config.bbox_format)
|
|
547
|
+
if crop.size == 0:
|
|
548
|
+
continue
|
|
549
|
+
|
|
550
|
+
major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
551
|
+
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
552
|
+
|
|
553
|
+
color_record = {
|
|
554
|
+
"frame_id": frame_key,
|
|
555
|
+
"timestamp": round(timestamp, 2),
|
|
556
|
+
"category": detection.get("category", "unknown"),
|
|
557
|
+
"confidence": round(detection.get("confidence", 0.0), 3),
|
|
558
|
+
"main_color": main_color,
|
|
559
|
+
"major_colors": major_colors,
|
|
560
|
+
"bbox": bbox,
|
|
561
|
+
"detection_id": detection.get("id", f"det_{len(color_analysis)}"),
|
|
562
|
+
"track_id": detection.get("track_id"),
|
|
563
|
+
"zone_name": zone_name
|
|
564
|
+
}
|
|
565
|
+
color_analysis.append(color_record)
|
|
566
|
+
|
|
567
|
+
frame_id += 1
|
|
568
|
+
|
|
569
|
+
cap.release()
|
|
570
|
+
return color_analysis
|
|
571
|
+
|
|
572
|
+
finally:
|
|
573
|
+
if os.path.exists(video_path):
|
|
574
|
+
os.unlink(video_path)
|
|
575
|
+
|
|
576
|
+
def _analyze_colors_in_image(
|
|
577
|
+
self,
|
|
578
|
+
data: Any,
|
|
579
|
+
image_bytes: bytes,
|
|
580
|
+
config: ColorDetectionConfig
|
|
581
|
+
) -> List[Dict[str, Any]]:
|
|
582
|
+
image_array = np.frombuffer(image_bytes, np.uint8)
|
|
583
|
+
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
|
584
|
+
#image = self.jpeg.decode(image_bytes, pixel_format=TJPF_RGB)
|
|
585
|
+
|
|
586
|
+
if image is None:
|
|
587
|
+
raise RuntimeError("Failed to decode image from bytes")
|
|
588
|
+
|
|
589
|
+
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
590
|
+
color_analysis = []
|
|
591
|
+
detections = self._get_frame_detections(data, "0")
|
|
592
|
+
|
|
593
|
+
for detection in detections:
|
|
594
|
+
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
595
|
+
continue
|
|
596
|
+
|
|
597
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
598
|
+
if not bbox:
|
|
599
|
+
continue
|
|
600
|
+
|
|
601
|
+
# Check all zones
|
|
602
|
+
zones = config.zone_config['zones'] if config.zone_config else {}
|
|
603
|
+
in_any_zone = not zones
|
|
604
|
+
zone_name = None
|
|
605
|
+
for z_name, zone_polygon in zones.items():
|
|
606
|
+
if self._is_in_zone(bbox, zone_polygon):
|
|
607
|
+
in_any_zone = True
|
|
608
|
+
zone_name = z_name
|
|
609
|
+
break
|
|
610
|
+
if not in_any_zone:
|
|
611
|
+
continue # Skip detections outside zones
|
|
612
|
+
|
|
613
|
+
crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
|
|
614
|
+
if crop.size == 0:
|
|
615
|
+
continue
|
|
616
|
+
|
|
617
|
+
major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
618
|
+
main_color = major_colors[0][0] if major_colors else "unknown"
|
|
619
|
+
|
|
620
|
+
color_record = {
|
|
621
|
+
"frame_id": "0",
|
|
622
|
+
"timestamp": 0.0,
|
|
623
|
+
"category": detection.get("category", "unknown"),
|
|
624
|
+
"confidence": round(detection.get("confidence", 0.0), 3),
|
|
625
|
+
"main_color": main_color,
|
|
626
|
+
"major_colors": major_colors,
|
|
627
|
+
"bbox": bbox,
|
|
628
|
+
"detection_id": detection.get("id", f"det_{len(color_analysis)}"),
|
|
629
|
+
"track_id": detection.get("track_id"),
|
|
630
|
+
"zone_name": zone_name
|
|
631
|
+
}
|
|
632
|
+
color_analysis.append(color_record)
|
|
633
|
+
|
|
634
|
+
return color_analysis
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
def _get_frame_detections(self, data: Any, frame_key: str) -> List[Dict[str, Any]]:
|
|
638
|
+
"""Extract detections for a specific frame from data."""
|
|
639
|
+
if isinstance(data, dict):
|
|
640
|
+
# Frame-based format
|
|
641
|
+
return data.get(frame_key, [])
|
|
642
|
+
elif isinstance(data, list):
|
|
643
|
+
# List format (single frame or all detections)
|
|
644
|
+
return data
|
|
645
|
+
else:
|
|
646
|
+
return []
|
|
647
|
+
|
|
648
|
+
def _crop_bbox(self, image: np.ndarray, bbox: Dict[str, Any], bbox_format: str) -> np.ndarray:
|
|
649
|
+
"""Crop bounding box region from image."""
|
|
650
|
+
h, w = image.shape[:2]
|
|
651
|
+
|
|
652
|
+
# Auto-detect bbox format
|
|
653
|
+
if bbox_format == "auto":
|
|
654
|
+
if "xmin" in bbox:
|
|
655
|
+
bbox_format = "xmin_ymin_xmax_ymax"
|
|
656
|
+
elif "x" in bbox:
|
|
657
|
+
bbox_format = "x_y_width_height"
|
|
658
|
+
else:
|
|
659
|
+
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
660
|
+
|
|
661
|
+
# Extract coordinates based on format
|
|
662
|
+
if bbox_format == "xmin_ymin_xmax_ymax":
|
|
663
|
+
xmin = max(0, int(bbox["xmin"]))
|
|
664
|
+
ymin = max(0, int(bbox["ymin"]))
|
|
665
|
+
xmax = min(w, int(bbox["xmax"]))
|
|
666
|
+
ymax = min(h, int(bbox["ymax"]))
|
|
667
|
+
elif bbox_format == "x_y_width_height":
|
|
668
|
+
xmin = max(0, int(bbox["x"]))
|
|
669
|
+
ymin = max(0, int(bbox["y"]))
|
|
670
|
+
xmax = min(w, int(bbox["x"] + bbox["width"]))
|
|
671
|
+
ymax = min(h, int(bbox["y"] + bbox["height"]))
|
|
672
|
+
else:
|
|
673
|
+
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
674
|
+
|
|
675
|
+
return image[ymin:ymax, xmin:xmax]
|
|
676
|
+
|
|
677
|
+
def _calculate_color_summary(self, color_analysis: List[Dict], config: ColorDetectionConfig) -> Dict[str, Any]:
|
|
678
|
+
category_colors = defaultdict(lambda: defaultdict(int))
|
|
679
|
+
total_detections = len(color_analysis)
|
|
680
|
+
detections = []
|
|
681
|
+
counts = {}
|
|
682
|
+
for record in color_analysis:
|
|
683
|
+
category = record["category"]
|
|
684
|
+
main_color = record["main_color"]
|
|
685
|
+
category_colors[category][main_color] += 1
|
|
686
|
+
counts[category] = counts.get(category, 0) + 1
|
|
687
|
+
detections.append({
|
|
688
|
+
"bounding_box": record["bbox"],
|
|
689
|
+
"category": record["category"],
|
|
690
|
+
"confidence": record["confidence"],
|
|
691
|
+
"track_id": record["track_id"],
|
|
692
|
+
"frame_id": record["frame_id"],
|
|
693
|
+
"main_color": record["main_color"]
|
|
694
|
+
})
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
self.logger.debug(f"Valid detections after filtering: {len(detections)}")
|
|
698
|
+
summary = {
|
|
699
|
+
"total_count": sum(counts.values()),
|
|
700
|
+
"per_category_count": counts,
|
|
701
|
+
"detections": detections,
|
|
702
|
+
"dominant_colors": {},
|
|
703
|
+
"zone_counts": self._zone_current_counts if config.zone_config and config.zone_config['zones'] else {}
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
|
|
707
|
+
all_colors = defaultdict(int)
|
|
708
|
+
for category_data in category_colors.values():
|
|
709
|
+
for color, count in category_data.items():
|
|
710
|
+
all_colors[color] += count
|
|
711
|
+
summary["color_distribution"] = dict(all_colors)
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
for category, colors in category_colors.items():
|
|
715
|
+
if colors:
|
|
716
|
+
if "dominant_colors" not in summary:
|
|
717
|
+
summary["dominant_colors"] = {}
|
|
718
|
+
else:
|
|
719
|
+
dominant_color = max(colors.items(), key=lambda x: x[1])
|
|
720
|
+
summary["dominant_colors"][category] = {
|
|
721
|
+
"color": dominant_color[0],
|
|
722
|
+
"count": dominant_color[1],
|
|
723
|
+
"percentage": round((dominant_color[1] / sum(colors.values())) * 100, 1)
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
|
|
727
|
+
return summary
|
|
728
|
+
|
|
729
|
+
def _calculate_general_summary(self, processed_data: Any, config: ColorDetectionConfig) -> Dict[str, Any]:
|
|
730
|
+
"""Calculate general detection summary."""
|
|
731
|
+
|
|
732
|
+
# Count objects by category
|
|
733
|
+
category_counts = defaultdict(int)
|
|
734
|
+
total_objects = 0
|
|
735
|
+
|
|
736
|
+
if isinstance(processed_data, dict):
|
|
737
|
+
# Frame-based format
|
|
738
|
+
for frame_data in processed_data.values():
|
|
739
|
+
if isinstance(frame_data, list):
|
|
740
|
+
for detection in frame_data:
|
|
741
|
+
if detection.get("confidence", 1.0) >= config.confidence_threshold:
|
|
742
|
+
category = detection.get("category", "unknown")
|
|
743
|
+
category_counts[category] += 1
|
|
744
|
+
total_objects += 1
|
|
745
|
+
elif isinstance(processed_data, list):
|
|
746
|
+
# List format
|
|
747
|
+
for detection in processed_data:
|
|
748
|
+
if detection.get("confidence", 1.0) >= config.confidence_threshold:
|
|
749
|
+
category = detection.get("category", "unknown")
|
|
750
|
+
category_counts[category] += 1
|
|
751
|
+
total_objects += 1
|
|
752
|
+
|
|
753
|
+
return {
|
|
754
|
+
"total_objects": total_objects,
|
|
755
|
+
"category_counts": dict(category_counts),
|
|
756
|
+
"categories_detected": list(category_counts.keys())
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
def _calculate_metrics(self, color_analysis: List[Dict], color_summary: Dict, config: ColorDetectionConfig, context: ProcessingContext) -> Dict[str, Any]:
|
|
760
|
+
"""Calculate detailed metrics for analytics."""
|
|
761
|
+
total_detections = len(color_analysis)
|
|
762
|
+
unique_colors = len(color_summary.get("color_distribution", {}))
|
|
763
|
+
|
|
764
|
+
metrics = {
|
|
765
|
+
"total_detections": total_detections,
|
|
766
|
+
"unique_colors": unique_colors,
|
|
767
|
+
"categories_analyzed": len(color_summary.get("categories", {})),
|
|
768
|
+
"processing_time": context.processing_time or 0.0,
|
|
769
|
+
"input_format": context.input_format.value,
|
|
770
|
+
"confidence_threshold": config.confidence_threshold,
|
|
771
|
+
"color_diversity": 0.0,
|
|
772
|
+
"detection_rate": 0.0,
|
|
773
|
+
"average_colors_per_detection": config.top_k_colors
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
# Calculate color diversity
|
|
777
|
+
if total_detections > 0:
|
|
778
|
+
metrics["color_diversity"] = (unique_colors / total_detections) * 100
|
|
779
|
+
|
|
780
|
+
# Calculate detection rate
|
|
781
|
+
if config.time_window_minutes and config.time_window_minutes > 0:
|
|
782
|
+
metrics["detection_rate"] = (total_detections / config.time_window_minutes) * 60
|
|
783
|
+
|
|
784
|
+
# Per-category metrics
|
|
785
|
+
if color_summary.get("categories"):
|
|
786
|
+
category_metrics = {}
|
|
787
|
+
for category, colors in color_summary["categories"].items():
|
|
788
|
+
category_total = sum(colors.values())
|
|
789
|
+
category_metrics[category] = {
|
|
790
|
+
"count": category_total,
|
|
791
|
+
"unique_colors": len(colors),
|
|
792
|
+
"color_diversity": (len(colors) / category_total) * 100 if category_total > 0 else 0
|
|
793
|
+
}
|
|
794
|
+
metrics["category_metrics"] = category_metrics
|
|
795
|
+
|
|
796
|
+
# Processing settings
|
|
797
|
+
metrics["processing_settings"] = {
|
|
798
|
+
"confidence_threshold": config.confidence_threshold,
|
|
799
|
+
"top_k_colors": config.top_k_colors,
|
|
800
|
+
"frame_skip": config.frame_skip,
|
|
801
|
+
"target_categories": config.target_categories,
|
|
802
|
+
"enable_unique_counting": config.enable_unique_counting
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
return metrics
|
|
806
|
+
|
|
807
|
+
def _extract_predictions(self, color_analysis: List[Dict], config: ColorDetectionConfig) -> List[Dict]:
|
|
808
|
+
"""Extract predictions in standard format."""
|
|
809
|
+
|
|
810
|
+
predictions = []
|
|
811
|
+
for record in color_analysis:
|
|
812
|
+
prediction = {
|
|
813
|
+
"category": record["category"],
|
|
814
|
+
"confidence": record["confidence"],
|
|
815
|
+
"bbox": record["bbox"],
|
|
816
|
+
"frame_id": record["frame_id"],
|
|
817
|
+
"timestamp": record["timestamp"],
|
|
818
|
+
"main_color": record["main_color"],
|
|
819
|
+
"major_colors": record["major_colors"]
|
|
820
|
+
}
|
|
821
|
+
if "detection_id" in record:
|
|
822
|
+
prediction["id"] = record["detection_id"]
|
|
823
|
+
predictions.append(prediction)
|
|
824
|
+
|
|
825
|
+
return predictions
|
|
826
|
+
|
|
827
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
828
|
+
"""
|
|
829
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
|
830
|
+
"""
|
|
831
|
+
lines = []
|
|
832
|
+
lines.append("Application Name: "+self.CASE_TYPE)
|
|
833
|
+
lines.append("Application Version: "+self.CASE_VERSION)
|
|
834
|
+
if len(incidents) > 0:
|
|
835
|
+
lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
|
|
836
|
+
if len(tracking_stats) > 0:
|
|
837
|
+
lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
|
|
838
|
+
if len(business_analytics) > 0:
|
|
839
|
+
lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
|
|
840
|
+
|
|
841
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
842
|
+
lines.append("Summary: "+"No Summary Data")
|
|
843
|
+
|
|
844
|
+
return ["\n".join(lines)]
|
|
845
|
+
|
|
846
|
+
def _generate_events(self, color_summary: Dict, alerts: List, config: ColorDetectionConfig, frame_number: Optional[int] = None) -> List[Dict]:
|
|
847
|
+
"""Generate structured events with frame-based keys."""
|
|
848
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
849
|
+
events = [{frame_key: []}]
|
|
850
|
+
frame_events = events[0][frame_key]
|
|
851
|
+
total_detections = color_summary.get("total_detections", 0)
|
|
852
|
+
|
|
853
|
+
if total_detections > 0:
|
|
854
|
+
level = "info"
|
|
855
|
+
intensity = min(10.0, total_detections / 5.0)
|
|
856
|
+
if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
857
|
+
threshold = config.alert_config.count_thresholds.get("all", 20)
|
|
858
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
859
|
+
level = "critical" if intensity >= 7 else "warning" if intensity >= 5 else "info"
|
|
860
|
+
elif total_detections > 50:
|
|
861
|
+
level = "critical"
|
|
862
|
+
intensity = 9.0
|
|
863
|
+
elif total_detections > 25:
|
|
864
|
+
level = "warning"
|
|
865
|
+
intensity = 7.0
|
|
866
|
+
|
|
867
|
+
event = {
|
|
868
|
+
"type": "color_detection",
|
|
869
|
+
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
|
870
|
+
"level": level,
|
|
871
|
+
"intensity": round(intensity, 1),
|
|
872
|
+
"config": {
|
|
873
|
+
"min_value": 0,
|
|
874
|
+
"max_value": 10,
|
|
875
|
+
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
|
876
|
+
},
|
|
877
|
+
"application_name": "Color Detection System",
|
|
878
|
+
"application_version": "1.2",
|
|
879
|
+
"location_info": None,
|
|
880
|
+
"human_text": (
|
|
881
|
+
f"Event: Color Detection\nLevel: {level.title()}\n"
|
|
882
|
+
f"Time: {datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')}\n"
|
|
883
|
+
f"Detections: {total_detections} objects analyzed\n"
|
|
884
|
+
f"Unique Colors: {len(color_summary.get('color_distribution', {}))}\n"
|
|
885
|
+
f"Intensity: {intensity:.1f}/10"
|
|
886
|
+
)
|
|
887
|
+
}
|
|
888
|
+
frame_events.append(event)
|
|
889
|
+
|
|
890
|
+
for alert in alerts:
|
|
891
|
+
alert_event = {
|
|
892
|
+
"type": alert.get("type", "color_alert"),
|
|
893
|
+
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
|
894
|
+
"level": alert.get("severity", "warning"),
|
|
895
|
+
"intensity": 8.0,
|
|
896
|
+
"config": {
|
|
897
|
+
"min_value": 0,
|
|
898
|
+
"max_value": 10,
|
|
899
|
+
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
|
900
|
+
},
|
|
901
|
+
"application_name": "Color Detection Alert System",
|
|
902
|
+
"application_version": "1.2",
|
|
903
|
+
"location_info": alert.get("category"),
|
|
904
|
+
"human_text": f"Event: {alert.get('type', 'Color Alert').title()}\nMessage: {alert.get('message', 'Color detection alert triggered')}"
|
|
905
|
+
}
|
|
906
|
+
frame_events.append(alert_event)
|
|
907
|
+
|
|
908
|
+
return events
|
|
909
|
+
|
|
910
|
+
def _generate_tracking_stats(
|
|
911
|
+
self,
|
|
912
|
+
counting_summary: Dict,
|
|
913
|
+
alerts: Any,
|
|
914
|
+
config: ColorDetectionConfig,
|
|
915
|
+
frame_number: Optional[int] = None,
|
|
916
|
+
stream_info: Optional[Dict[str, Any]] = None,
|
|
917
|
+
curr_frame_color: Any = None
|
|
918
|
+
) -> List[Dict]:
|
|
919
|
+
"""Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
|
|
920
|
+
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
921
|
+
# tracking_stats = [{frame_key: []}]
|
|
922
|
+
# frame_tracking_stats = tracking_stats[0][frame_key]
|
|
923
|
+
tracking_stats = []
|
|
924
|
+
|
|
925
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
926
|
+
total_color_counts_dict = counting_summary.get("total_color_counts", {})
|
|
927
|
+
total_category_counts_dict = counting_summary.get("total_category_counts", {})
|
|
928
|
+
cumulative_total = sum(total_color_counts_dict.values()) if total_color_counts_dict else 0
|
|
929
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
930
|
+
|
|
931
|
+
# Compute current color counts from detections
|
|
932
|
+
current_color_count: Dict[str, int] = {}
|
|
933
|
+
for det in counting_summary.get("detections", []):
|
|
934
|
+
color = det.get("main_color")
|
|
935
|
+
if color:
|
|
936
|
+
current_color_count[color] = current_color_count.get(color, 0) + 1
|
|
937
|
+
|
|
938
|
+
track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
|
|
939
|
+
|
|
940
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
941
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
942
|
+
|
|
943
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
|
944
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
945
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
946
|
+
|
|
947
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
948
|
+
total_color_data = self.color_helper(curr_frame_color)
|
|
949
|
+
print("========================CURR FRAME=======================")
|
|
950
|
+
print(curr_frame_color)
|
|
951
|
+
print("========================CURR FRAME=======================")
|
|
952
|
+
|
|
953
|
+
print("========================TOTAL=======================")
|
|
954
|
+
print(total_color_data)
|
|
955
|
+
print("========================TOTAL=======================")
|
|
956
|
+
|
|
957
|
+
human_text_lines = []
|
|
958
|
+
color_counts = {}
|
|
959
|
+
|
|
960
|
+
if curr_frame_color:
|
|
961
|
+
for tid, data in curr_frame_color.items():
|
|
962
|
+
color = data.get("color")
|
|
963
|
+
if color not in color_counts:
|
|
964
|
+
color_counts[color] = 0
|
|
965
|
+
color_counts[color] += 1
|
|
966
|
+
|
|
967
|
+
# After processing all frames, print the final counts
|
|
968
|
+
print("Unique color counts:", color_counts)
|
|
969
|
+
|
|
970
|
+
|
|
971
|
+
# CURRENT FRAME section
|
|
972
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
973
|
+
if total_detections > 0:
|
|
974
|
+
# Vehicle categories (current frame)
|
|
975
|
+
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
976
|
+
if len(category_counts) == 1:
|
|
977
|
+
detection_text = category_counts[0] + " detected"
|
|
978
|
+
elif len(category_counts) == 2:
|
|
979
|
+
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
|
980
|
+
else:
|
|
981
|
+
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
982
|
+
human_text_lines.append(f"\t- {detection_text}")
|
|
983
|
+
|
|
984
|
+
# Colors (current frame)
|
|
985
|
+
if color_counts:
|
|
986
|
+
color_counts_text = ", ".join([f"{count} {color}" for color, count in color_counts.items()])
|
|
987
|
+
human_text_lines.append(f"\t- Colors: {color_counts_text}")
|
|
988
|
+
else:
|
|
989
|
+
human_text_lines.append(f"\t- No detections")
|
|
990
|
+
|
|
991
|
+
human_text_lines.append("") # spacing
|
|
992
|
+
|
|
993
|
+
# TOTAL SINCE section
|
|
994
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
995
|
+
human_text_lines.append(f"\t- Total Detected (by color): {cumulative_total}")
|
|
996
|
+
# Add category-wise totals
|
|
997
|
+
|
|
998
|
+
if total_category_counts_dict:
|
|
999
|
+
human_text_lines.append("\t- Categories:")
|
|
1000
|
+
for cat, count in total_category_counts_dict.items():
|
|
1001
|
+
if count > 0:
|
|
1002
|
+
human_text_lines.append(f"\t\t- {cat}: {count}")
|
|
1003
|
+
# Add color-wise totals
|
|
1004
|
+
if total_color_data:
|
|
1005
|
+
human_text_lines.append("\t- Colors:")
|
|
1006
|
+
for color, count in total_color_data.items():
|
|
1007
|
+
if count > 0:
|
|
1008
|
+
human_text_lines.append(f"\t\t- {color}: {count}")
|
|
1009
|
+
# Build current_counts array in expected format
|
|
1010
|
+
# Build arrays
|
|
1011
|
+
current_counts_categories = []
|
|
1012
|
+
for cat, count in per_category_count.items():
|
|
1013
|
+
if count > 0 or total_detections > 0:
|
|
1014
|
+
current_counts_categories.append({"category": cat, "count": count})
|
|
1015
|
+
current_counts_colors = []
|
|
1016
|
+
for color, count in current_color_count.items():
|
|
1017
|
+
if count > 0 or total_detections > 0:
|
|
1018
|
+
current_counts_colors.append({"color": color, "count": count})
|
|
1019
|
+
total_counts_categories = []
|
|
1020
|
+
for cat, count in total_category_counts_dict.items():
|
|
1021
|
+
if count > 0 or cumulative_total > 0:
|
|
1022
|
+
total_counts_categories.append({"category": cat, "count": count})
|
|
1023
|
+
total_counts_colors = []
|
|
1024
|
+
for color, count in total_color_counts_dict.items():
|
|
1025
|
+
if count > 0 or cumulative_total > 0:
|
|
1026
|
+
total_counts_colors.append({"category": color, "count": count})
|
|
1027
|
+
|
|
1028
|
+
human_text = "\n".join(human_text_lines)
|
|
1029
|
+
|
|
1030
|
+
# Include detections with masks from counting_summary
|
|
1031
|
+
# Prepare detections without confidence scores (as per eg.json)
|
|
1032
|
+
detections = []
|
|
1033
|
+
for detection in counting_summary.get("detections", []):
|
|
1034
|
+
bbox = detection.get("bounding_box", {})
|
|
1035
|
+
category = detection.get("category", "person")
|
|
1036
|
+
# Include segmentation if available (like in eg.json)
|
|
1037
|
+
if detection.get("masks"):
|
|
1038
|
+
segmentation= detection.get("masks", [])
|
|
1039
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
1040
|
+
elif detection.get("segmentation"):
|
|
1041
|
+
segmentation= detection.get("segmentation")
|
|
1042
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
1043
|
+
elif detection.get("mask"):
|
|
1044
|
+
segmentation= detection.get("mask")
|
|
1045
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
1046
|
+
else:
|
|
1047
|
+
detection_obj = self.create_detection_object(category, bbox)
|
|
1048
|
+
detections.append(detection_obj)
|
|
1049
|
+
|
|
1050
|
+
# Build alert_settings array in expected format
|
|
1051
|
+
alert_settings = []
|
|
1052
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
1053
|
+
alert_settings.append({
|
|
1054
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1055
|
+
"incident_category": self.CASE_TYPE,
|
|
1056
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
1057
|
+
"ascending": True,
|
|
1058
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1059
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1060
|
+
}
|
|
1061
|
+
})
|
|
1062
|
+
|
|
1063
|
+
if alerts:
|
|
1064
|
+
for alert in alerts:
|
|
1065
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
|
1066
|
+
else:
|
|
1067
|
+
human_text_lines.append("Alerts: None")
|
|
1068
|
+
|
|
1069
|
+
human_text = "\n".join(human_text_lines)
|
|
1070
|
+
reset_settings = [
|
|
1071
|
+
{
|
|
1072
|
+
"interval_type": "daily",
|
|
1073
|
+
"reset_time": {
|
|
1074
|
+
"value": 9,
|
|
1075
|
+
"time_unit": "hour"
|
|
1076
|
+
}
|
|
1077
|
+
}
|
|
1078
|
+
]
|
|
1079
|
+
|
|
1080
|
+
# Keep backward-compat: put colors into total_counts and categories into current_counts
|
|
1081
|
+
tracking_stat=self.create_tracking_stats(total_counts=total_counts_colors, current_counts=current_counts_categories,
|
|
1082
|
+
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
1083
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
|
1084
|
+
reset_time=high_precision_reset_timestamp)
|
|
1085
|
+
|
|
1086
|
+
# Add explicit breakdowns for consumers who want both types
|
|
1087
|
+
# tracking_stat["current_category_counts"] = current_counts_categories
|
|
1088
|
+
# tracking_stat["current_color_counts"] = current_counts_colors
|
|
1089
|
+
# tracking_stat["total_category_counts"] = total_counts_categories
|
|
1090
|
+
# tracking_stat["total_color_counts"] = total_counts_colors
|
|
1091
|
+
|
|
1092
|
+
tracking_stats.append(tracking_stat)
|
|
1093
|
+
return tracking_stats
|
|
1094
|
+
|
|
1095
|
+
def _generate_human_text_for_tracking(self, total_detections: int, color_summary: Dict, insights: List[str], summary: str, config: ColorDetectionConfig) -> str:
|
|
1096
|
+
"""Generate human-readable text for tracking stats."""
|
|
1097
|
+
from datetime import datetime, timezone
|
|
1098
|
+
|
|
1099
|
+
text_parts = [
|
|
1100
|
+
#f"Tracking Start Time: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M')}",
|
|
1101
|
+
#f"Objects Analyzed: {total_detections}"
|
|
1102
|
+
]
|
|
1103
|
+
|
|
1104
|
+
if config.time_window_minutes:
|
|
1105
|
+
detection_rate_per_hour = (total_detections / config.time_window_minutes) * 60
|
|
1106
|
+
#text_parts.append(f"Detection Rate: {detection_rate_per_hour:.1f} objects per hour")
|
|
1107
|
+
|
|
1108
|
+
# Add color statistics
|
|
1109
|
+
unique_colors = len(color_summary.get("color_distribution", {}))
|
|
1110
|
+
#text_parts.append(f"Unique Colors Detected: {unique_colors}")
|
|
1111
|
+
|
|
1112
|
+
if total_detections > 0:
|
|
1113
|
+
color_diversity = (unique_colors / total_detections) * 100
|
|
1114
|
+
#text_parts.append(f"Color Diversity: {color_diversity:.1f}%")
|
|
1115
|
+
|
|
1116
|
+
# Add category breakdown
|
|
1117
|
+
categories = color_summary.get("categories", {})
|
|
1118
|
+
if categories:
|
|
1119
|
+
#text_parts.append(f"Categories Analyzed: {len(categories)}")
|
|
1120
|
+
for category, colors in categories.items():
|
|
1121
|
+
category_total = sum(colors.values())
|
|
1122
|
+
if category_total > 0:
|
|
1123
|
+
dominant_color = max(colors.items(), key=lambda x: x[1])[0] if colors else "unknown"
|
|
1124
|
+
text_parts.append(f" {category_total} {category.title()} detected, Color: {dominant_color}")
|
|
1125
|
+
|
|
1126
|
+
# Add color distribution summary
|
|
1127
|
+
color_distribution = color_summary.get("color_distribution", {})
|
|
1128
|
+
if color_distribution:
|
|
1129
|
+
top_colors = sorted(color_distribution.items(), key=lambda x: x[1], reverse=True)[:3]
|
|
1130
|
+
#text_parts.append("Top Colors:")
|
|
1131
|
+
for color, count in top_colors:
|
|
1132
|
+
percentage = (count / total_detections) * 100
|
|
1133
|
+
#text_parts.append(f" {color.title()}: {count} objects ({percentage:.1f}%)")
|
|
1134
|
+
|
|
1135
|
+
# Add key insights
|
|
1136
|
+
# if insights:
|
|
1137
|
+
# text_parts.append("Key Color Insights:")
|
|
1138
|
+
# for insight in insights[:3]: # Limit to first 3 insights
|
|
1139
|
+
# text_parts.append(f" - {insight}")
|
|
1140
|
+
|
|
1141
|
+
return "\n".join(text_parts)
|
|
1142
|
+
|
|
1143
|
+
def reset_tracker(self) -> None:
|
|
1144
|
+
"""Reset the advanced tracker instance."""
|
|
1145
|
+
if self.tracker is not None:
|
|
1146
|
+
self.tracker.reset()
|
|
1147
|
+
self.logger.info("AdvancedTracker reset for new tracking session")
|
|
1148
|
+
|
|
1149
|
+
def reset_color_tracking(self) -> None:
|
|
1150
|
+
"""Reset color tracking state."""
|
|
1151
|
+
self._color_total_track_ids = defaultdict(set)
|
|
1152
|
+
self._color_current_frame_track_ids = defaultdict(set)
|
|
1153
|
+
self._total_frame_counter = 0
|
|
1154
|
+
self._global_frame_offset = 0
|
|
1155
|
+
self.logger.info("Color tracking state reset")
|
|
1156
|
+
|
|
1157
|
+
def reset_all_tracking(self) -> None:
|
|
1158
|
+
"""Reset both advanced tracker and color tracking state."""
|
|
1159
|
+
self.reset_tracker()
|
|
1160
|
+
self.reset_color_tracking()
|
|
1161
|
+
self.logger.info("All color tracking state reset")
|
|
1162
|
+
|
|
1163
|
+
def _is_in_zone(self, bbox: Dict[str, Any], zone_polygon: List[List[int]]) -> bool:
|
|
1164
|
+
"""Check if the bottom 25% center point of a bounding box lies within the given zone polygon."""
|
|
1165
|
+
if not zone_polygon or not isinstance(bbox, dict):
|
|
1166
|
+
return True # No zone defined, or invalid bbox, process all detections
|
|
1167
|
+
try:
|
|
1168
|
+
# Get bottom 25% center point
|
|
1169
|
+
center_point = get_bbox_bottom25_center(bbox)
|
|
1170
|
+
# Convert zone polygon to list of tuples
|
|
1171
|
+
polygon_points = [(point[0], point[1]) for point in zone_polygon]
|
|
1172
|
+
# Check if point is inside polygon
|
|
1173
|
+
in_zone = point_in_polygon(center_point, polygon_points)
|
|
1174
|
+
self.logger.debug(f"BBox center {center_point} in zone: {in_zone}")
|
|
1175
|
+
return in_zone
|
|
1176
|
+
except (KeyError, TypeError) as e:
|
|
1177
|
+
self.logger.warning(f"Failed to check zone for bbox {bbox}: {e}")
|
|
1178
|
+
return False
|
|
1179
|
+
|
|
1180
|
+
@staticmethod
|
|
1181
|
+
def _iou(bbox1, bbox2):
|
|
1182
|
+
"""Compute IoU between two bboxes (dicts with xmin/ymin/xmax/ymax or x/y/width/height)."""
|
|
1183
|
+
if "xmin" in bbox1:
|
|
1184
|
+
x1 = max(bbox1["xmin"], bbox2["xmin"])
|
|
1185
|
+
y1 = max(bbox1["ymin"], bbox2["ymin"])
|
|
1186
|
+
x2 = min(bbox1["xmax"], bbox2["xmax"])
|
|
1187
|
+
y2 = min(bbox1["ymax"], bbox2["ymax"])
|
|
1188
|
+
area1 = (bbox1["xmax"] - bbox1["xmin"]) * (bbox1["ymax"] - bbox1["ymin"])
|
|
1189
|
+
area2 = (bbox2["xmax"] - bbox2["xmin"]) * (bbox2["ymax"] - bbox2["ymin"])
|
|
1190
|
+
else:
|
|
1191
|
+
x1 = max(bbox1["x"], bbox2["x"])
|
|
1192
|
+
y1 = max(bbox1["y"], bbox2["y"])
|
|
1193
|
+
x2 = min(bbox1["x"] + bbox1["width"], bbox2["x"] + bbox2["width"])
|
|
1194
|
+
y2 = min(bbox1["y"] + bbox1["height"], bbox2["y"] + bbox2["height"])
|
|
1195
|
+
area1 = bbox1["width"] * bbox1["height"]
|
|
1196
|
+
area2 = bbox2["width"] * bbox2["height"]
|
|
1197
|
+
inter_w = max(0, x2 - x1)
|
|
1198
|
+
inter_h = max(0, y2 - y1)
|
|
1199
|
+
inter_area = inter_w * inter_h
|
|
1200
|
+
union = area1 + area2 - inter_area
|
|
1201
|
+
return inter_area / union if union > 0 else 0.0
|
|
1202
|
+
|
|
1203
|
+
@staticmethod
|
|
1204
|
+
def _deduplicate_detections(detections, iou_thresh=0.7):
|
|
1205
|
+
"""Suppress duplicate/overlapping detections with same category and high IoU."""
|
|
1206
|
+
filtered = []
|
|
1207
|
+
used = [False] * len(detections)
|
|
1208
|
+
for i, det in enumerate(detections):
|
|
1209
|
+
if used[i]:
|
|
1210
|
+
continue
|
|
1211
|
+
group = [i]
|
|
1212
|
+
for j in range(i + 1, len(detections)):
|
|
1213
|
+
if used[j]:
|
|
1214
|
+
continue
|
|
1215
|
+
if det.get("category") == detections[j].get("category"):
|
|
1216
|
+
bbox1 = det.get("bounding_box", det.get("bbox"))
|
|
1217
|
+
bbox2 = detections[j].get("bounding_box", detections[j].get("bbox"))
|
|
1218
|
+
if bbox1 and bbox2 and ColorDetectionUseCase._iou(bbox1, bbox2) > iou_thresh:
|
|
1219
|
+
used[j] = True
|
|
1220
|
+
group.append(j)
|
|
1221
|
+
best_idx = max(group, key=lambda idx: detections[idx].get("confidence", 0))
|
|
1222
|
+
filtered.append(detections[best_idx])
|
|
1223
|
+
used[best_idx] = True
|
|
1224
|
+
return filtered
|
|
1225
|
+
|
|
1226
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
|
1227
|
+
"""Get JSON schema for configuration validation."""
|
|
1228
|
+
return {
|
|
1229
|
+
"type": "object",
|
|
1230
|
+
"properties": {
|
|
1231
|
+
"confidence_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.5},
|
|
1232
|
+
"top_k_colors": {"type": "integer", "minimum": 1, "default": 3},
|
|
1233
|
+
"frame_skip": {"type": "integer", "minimum": 1, "default": 1},
|
|
1234
|
+
"target_categories": {"type": ["array", "null"], "items": {"type": "string"}, "default": [
|
|
1235
|
+
"car", "bicycle", "bus", "motorcycle"
|
|
1236
|
+
]},
|
|
1237
|
+
"fps": {"type": ["number", "null"], "minimum": 1.0, "default": None},
|
|
1238
|
+
"bbox_format": {"type": "string", "enum": ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"], "default": "auto"},
|
|
1239
|
+
"index_to_category": {"type": ["object", "null"], "default": None},
|
|
1240
|
+
"alert_config": {"type": ["object", "null"], "default": None}
|
|
1241
|
+
},
|
|
1242
|
+
"required": ["confidence_threshold", "top_k_colors"],
|
|
1243
|
+
"additionalProperties": False
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
def create_default_config(self, **overrides) -> ColorDetectionConfig:
|
|
1247
|
+
"""Create default configuration with optional overrides."""
|
|
1248
|
+
defaults = {
|
|
1249
|
+
"category": self.category,
|
|
1250
|
+
"usecase": self.name,
|
|
1251
|
+
"confidence_threshold": 0.5,
|
|
1252
|
+
"top_k_colors": 3,
|
|
1253
|
+
"frame_skip": 1,
|
|
1254
|
+
"target_categories": [
|
|
1255
|
+
"car", "bicycle", "bus", "motorcycle"
|
|
1256
|
+
],
|
|
1257
|
+
"fps": None,
|
|
1258
|
+
"bbox_format": "auto",
|
|
1259
|
+
"index_to_category": None,
|
|
1260
|
+
"alert_config": None
|
|
1261
|
+
}
|
|
1262
|
+
defaults.update(overrides)
|
|
1263
|
+
return ColorDetectionConfig(**defaults)
|
|
1264
|
+
|
|
1265
|
+
def _update_color_tracking_state(self, detections: List[Dict]):
|
|
1266
|
+
"""Track unique track_ids per category and color for total count."""
|
|
1267
|
+
# Ensure storage is a defaultdict(set) to allow safe .add()
|
|
1268
|
+
existing_store = getattr(self, '_color_total_track_ids', None)
|
|
1269
|
+
if not isinstance(existing_store, defaultdict):
|
|
1270
|
+
existing_store = {} if existing_store is None else dict(existing_store)
|
|
1271
|
+
self._color_total_track_ids = defaultdict(set, existing_store)
|
|
1272
|
+
else:
|
|
1273
|
+
self._color_total_track_ids = existing_store
|
|
1274
|
+
self._color_current_frame_track_ids = defaultdict(set)
|
|
1275
|
+
for det in detections:
|
|
1276
|
+
cat = det.get('category')
|
|
1277
|
+
color = det.get('main_color')
|
|
1278
|
+
track_id = det.get('track_id')
|
|
1279
|
+
if cat and track_id is not None:
|
|
1280
|
+
key = f"{cat}:{color}" if color else cat
|
|
1281
|
+
self._color_total_track_ids[key].add(track_id)
|
|
1282
|
+
self._color_current_frame_track_ids[key].add(track_id)
|
|
1283
|
+
|
|
1284
|
+
def get_total_color_counts(self):
|
|
1285
|
+
"""Return total unique track_id count per color (across all categories)."""
|
|
1286
|
+
store = getattr(self, '_color_total_track_ids', {})
|
|
1287
|
+
if not isinstance(store, dict):
|
|
1288
|
+
return {}
|
|
1289
|
+
color_to_ids = defaultdict(set)
|
|
1290
|
+
for key, id_set in store.items():
|
|
1291
|
+
if isinstance(key, str) and ':' in key:
|
|
1292
|
+
_, color = key.split(':', 1)
|
|
1293
|
+
else:
|
|
1294
|
+
color = None
|
|
1295
|
+
# Support both set and iterable
|
|
1296
|
+
ids = id_set if isinstance(id_set, set) else set(id_set or [])
|
|
1297
|
+
if color:
|
|
1298
|
+
color_to_ids[color].update(ids)
|
|
1299
|
+
return {color: len(ids) for color, ids in color_to_ids.items()}
|
|
1300
|
+
|
|
1301
|
+
def get_total_category_counts(self):
|
|
1302
|
+
"""Return total unique track_id count per category (across all colors)."""
|
|
1303
|
+
store = getattr(self, '_color_total_track_ids', {})
|
|
1304
|
+
if not isinstance(store, dict):
|
|
1305
|
+
return {}
|
|
1306
|
+
category_to_ids = defaultdict(set)
|
|
1307
|
+
for key, id_set in store.items():
|
|
1308
|
+
if isinstance(key, str) and ':' in key:
|
|
1309
|
+
cat, _ = key.split(':', 1)
|
|
1310
|
+
else:
|
|
1311
|
+
cat = key
|
|
1312
|
+
ids = id_set if isinstance(id_set, set) else set(id_set or [])
|
|
1313
|
+
category_to_ids[cat].update(ids)
|
|
1314
|
+
return {cat: len(ids) for cat, ids in category_to_ids.items()}
|
|
1315
|
+
|
|
1316
|
+
def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
|
|
1317
|
+
"""Get detailed information about track IDs for color detections (per frame)."""
|
|
1318
|
+
frame_track_ids = set(det.get('track_id') for det in detections if det.get('track_id') is not None)
|
|
1319
|
+
total_track_ids = set()
|
|
1320
|
+
for s in getattr(self, '_color_total_track_ids', {}).values():
|
|
1321
|
+
total_track_ids.update(s)
|
|
1322
|
+
return {
|
|
1323
|
+
"total_count": len(total_track_ids),
|
|
1324
|
+
"current_frame_count": len(frame_track_ids),
|
|
1325
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
1326
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
1327
|
+
"last_update_time": time.time(),
|
|
1328
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
1329
|
+
}
|
|
1330
|
+
|
|
1331
|
+
def _attach_masks_to_detections(
|
|
1332
|
+
self,
|
|
1333
|
+
processed_detections: List[Dict[str, Any]],
|
|
1334
|
+
raw_detections: List[Dict[str, Any]],
|
|
1335
|
+
iou_threshold: float = 0.5,
|
|
1336
|
+
) -> List[Dict[str, Any]]:
|
|
1337
|
+
"""
|
|
1338
|
+
Attach segmentation masks from the original `raw_detections` list to the
|
|
1339
|
+
`processed_detections` list returned after smoothing/tracking.
|
|
1340
|
+
|
|
1341
|
+
Matching between detections is performed using Intersection-over-Union
|
|
1342
|
+
(IoU) of the bounding boxes. For each processed detection we select the
|
|
1343
|
+
raw detection with the highest IoU above `iou_threshold` and copy its
|
|
1344
|
+
`masks` (or `mask`) field. If no suitable match is found, the detection
|
|
1345
|
+
keeps an empty list for `masks` to maintain a consistent schema.
|
|
1346
|
+
"""
|
|
1347
|
+
|
|
1348
|
+
if not processed_detections or not raw_detections:
|
|
1349
|
+
# Nothing to do – ensure masks key exists for downstream logic.
|
|
1350
|
+
for det in processed_detections:
|
|
1351
|
+
det.setdefault("masks", [])
|
|
1352
|
+
return processed_detections
|
|
1353
|
+
|
|
1354
|
+
# Track which raw detections have already been matched to avoid
|
|
1355
|
+
# assigning the same mask to multiple processed detections.
|
|
1356
|
+
used_raw_indices = set()
|
|
1357
|
+
|
|
1358
|
+
for det in processed_detections:
|
|
1359
|
+
best_iou = 0.0
|
|
1360
|
+
best_idx = None
|
|
1361
|
+
|
|
1362
|
+
for idx, raw_det in enumerate(raw_detections):
|
|
1363
|
+
if idx in used_raw_indices:
|
|
1364
|
+
continue
|
|
1365
|
+
|
|
1366
|
+
iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
|
|
1367
|
+
if iou > best_iou:
|
|
1368
|
+
best_iou = iou
|
|
1369
|
+
best_idx = idx
|
|
1370
|
+
|
|
1371
|
+
if best_idx is not None and best_iou >= iou_threshold:
|
|
1372
|
+
raw_det = raw_detections[best_idx]
|
|
1373
|
+
masks = raw_det.get("masks", raw_det.get("mask"))
|
|
1374
|
+
if masks is not None:
|
|
1375
|
+
det["masks"] = masks
|
|
1376
|
+
used_raw_indices.add(best_idx)
|
|
1377
|
+
else:
|
|
1378
|
+
# No adequate match – default to empty list to keep schema consistent.
|
|
1379
|
+
det.setdefault("masks", ["EMPTY"])
|
|
1380
|
+
|
|
1381
|
+
return processed_detections
|
|
1382
|
+
|
|
1383
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ColorDetectionConfig,
|
|
1384
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
1385
|
+
"""Generate structured events for the output format with frame-based keys."""
|
|
1386
|
+
|
|
1387
|
+
# Use frame number as key, fallback to 'current_frame' if not available
|
|
1388
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
1389
|
+
incidents=[]
|
|
1390
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
1391
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
1392
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
1393
|
+
|
|
1394
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
1395
|
+
|
|
1396
|
+
if total_detections > 0:
|
|
1397
|
+
# Determine event level based on thresholds
|
|
1398
|
+
level = "low"
|
|
1399
|
+
intensity = 5.0
|
|
1400
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
1401
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
|
1402
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
1403
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
|
1404
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
1405
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
1406
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
|
1407
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
1408
|
+
|
|
1409
|
+
if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
1410
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
1411
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
1412
|
+
|
|
1413
|
+
if intensity >= 9:
|
|
1414
|
+
level = "critical"
|
|
1415
|
+
self._ascending_alert_list.append(3)
|
|
1416
|
+
elif intensity >= 7:
|
|
1417
|
+
level = "significant"
|
|
1418
|
+
self._ascending_alert_list.append(2)
|
|
1419
|
+
elif intensity >= 5:
|
|
1420
|
+
level = "medium"
|
|
1421
|
+
self._ascending_alert_list.append(1)
|
|
1422
|
+
else:
|
|
1423
|
+
level = "low"
|
|
1424
|
+
self._ascending_alert_list.append(0)
|
|
1425
|
+
else:
|
|
1426
|
+
if total_detections > 30:
|
|
1427
|
+
level = "critical"
|
|
1428
|
+
intensity = 10.0
|
|
1429
|
+
self._ascending_alert_list.append(3)
|
|
1430
|
+
elif total_detections > 25:
|
|
1431
|
+
level = "significant"
|
|
1432
|
+
intensity = 9.0
|
|
1433
|
+
self._ascending_alert_list.append(2)
|
|
1434
|
+
elif total_detections > 15:
|
|
1435
|
+
level = "medium"
|
|
1436
|
+
intensity = 7.0
|
|
1437
|
+
self._ascending_alert_list.append(1)
|
|
1438
|
+
else:
|
|
1439
|
+
level = "low"
|
|
1440
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
1441
|
+
self._ascending_alert_list.append(0)
|
|
1442
|
+
|
|
1443
|
+
# Generate human text in new format
|
|
1444
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
1445
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
|
1446
|
+
human_text = "\n".join(human_text_lines)
|
|
1447
|
+
|
|
1448
|
+
alert_settings=[]
|
|
1449
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
1450
|
+
alert_settings.append({
|
|
1451
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1452
|
+
"incident_category": self.CASE_TYPE,
|
|
1453
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
1454
|
+
"ascending": True,
|
|
1455
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1456
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1457
|
+
}
|
|
1458
|
+
})
|
|
1459
|
+
|
|
1460
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
|
|
1461
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
1462
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
|
1463
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
|
1464
|
+
incidents.append(event)
|
|
1465
|
+
|
|
1466
|
+
else:
|
|
1467
|
+
self._ascending_alert_list.append(0)
|
|
1468
|
+
incidents.append({})
|
|
1469
|
+
|
|
1470
|
+
return incidents
|
|
1471
|
+
|
|
1472
|
+
def _check_alerts(self, summary: dict, frame_number:Any, config: ColorDetectionConfig) -> List[Dict]:
|
|
1473
|
+
"""
|
|
1474
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
|
1475
|
+
"""
|
|
1476
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
1477
|
+
'''
|
|
1478
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
|
1479
|
+
Now works with values 0,1,2,3 (not just binary).
|
|
1480
|
+
'''
|
|
1481
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
1482
|
+
if len(window) < 2:
|
|
1483
|
+
return True # not enough data to determine trend
|
|
1484
|
+
increasing = 0
|
|
1485
|
+
total = 0
|
|
1486
|
+
for i in range(1, len(window)):
|
|
1487
|
+
if window[i] >= window[i - 1]:
|
|
1488
|
+
increasing += 1
|
|
1489
|
+
total += 1
|
|
1490
|
+
ratio = increasing / total
|
|
1491
|
+
if ratio >= threshold:
|
|
1492
|
+
return True
|
|
1493
|
+
elif ratio <= (1 - threshold):
|
|
1494
|
+
return False
|
|
1495
|
+
|
|
1496
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
1497
|
+
alerts = []
|
|
1498
|
+
total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
|
|
1499
|
+
total_counts_dict = summary.get("total_color_counts", {}) #TOTAL cumulative counts per class
|
|
1500
|
+
if isinstance(total_counts_dict, int):
|
|
1501
|
+
total_counts_dict = {}
|
|
1502
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
|
1503
|
+
per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
|
|
1504
|
+
|
|
1505
|
+
if not config.alert_config:
|
|
1506
|
+
return alerts
|
|
1507
|
+
|
|
1508
|
+
total = summary.get("total_count", 0)
|
|
1509
|
+
#self._ascending_alert_list
|
|
1510
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
1511
|
+
|
|
1512
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
1513
|
+
if category == "all" and total > threshold:
|
|
1514
|
+
|
|
1515
|
+
alerts.append({
|
|
1516
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1517
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
|
1518
|
+
"incident_category": self.CASE_TYPE,
|
|
1519
|
+
"threshold_level": threshold,
|
|
1520
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
1521
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1522
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1523
|
+
}
|
|
1524
|
+
})
|
|
1525
|
+
elif category in summary.get("per_category_count", {}):
|
|
1526
|
+
count = summary.get("per_category_count", {})[category]
|
|
1527
|
+
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
|
1528
|
+
alerts.append({
|
|
1529
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1530
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
|
1531
|
+
"incident_category": self.CASE_TYPE,
|
|
1532
|
+
"threshold_level": threshold,
|
|
1533
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
1534
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1535
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1536
|
+
}
|
|
1537
|
+
})
|
|
1538
|
+
else:
|
|
1539
|
+
pass
|
|
1540
|
+
return alerts
|
|
1541
|
+
|
|
1542
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
1543
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
1544
|
+
hours = int(timestamp // 3600)
|
|
1545
|
+
minutes = int((timestamp % 3600) // 60)
|
|
1546
|
+
seconds = round(float(timestamp % 60),2)
|
|
1547
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
1548
|
+
|
|
1549
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
1550
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
1551
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
1552
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1553
|
+
|
|
1554
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
1555
|
+
"""Get formatted current timestamp based on stream type."""
|
|
1556
|
+
|
|
1557
|
+
if not stream_info:
|
|
1558
|
+
return "00:00:00.00"
|
|
1559
|
+
if precision:
|
|
1560
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
1561
|
+
if frame_id:
|
|
1562
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1563
|
+
else:
|
|
1564
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1565
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1566
|
+
|
|
1567
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1568
|
+
else:
|
|
1569
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1570
|
+
|
|
1571
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
1572
|
+
if frame_id:
|
|
1573
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1574
|
+
else:
|
|
1575
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1576
|
+
|
|
1577
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1578
|
+
|
|
1579
|
+
|
|
1580
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1581
|
+
else:
|
|
1582
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1583
|
+
if stream_time_str:
|
|
1584
|
+
try:
|
|
1585
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1586
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1587
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1588
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
1589
|
+
except:
|
|
1590
|
+
return self._format_timestamp_for_stream(time.time())
|
|
1591
|
+
else:
|
|
1592
|
+
return self._format_timestamp_for_stream(time.time())
|
|
1593
|
+
|
|
1594
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
1595
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
1596
|
+
if not stream_info:
|
|
1597
|
+
return "00:00:00"
|
|
1598
|
+
|
|
1599
|
+
if precision:
|
|
1600
|
+
if self.start_timer is None:
|
|
1601
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
1602
|
+
return self._format_timestamp(self.start_timer)
|
|
1603
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1604
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
1605
|
+
return self._format_timestamp(self.start_timer)
|
|
1606
|
+
else:
|
|
1607
|
+
return self._format_timestamp(self.start_timer)
|
|
1608
|
+
|
|
1609
|
+
if self.start_timer is None:
|
|
1610
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
1611
|
+
return self._format_timestamp(self.start_timer)
|
|
1612
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1613
|
+
self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
|
|
1614
|
+
return self._format_timestamp(self.start_timer)
|
|
1615
|
+
|
|
1616
|
+
else:
|
|
1617
|
+
if self.start_timer is not None:
|
|
1618
|
+
return self._format_timestamp(self.start_timer)
|
|
1619
|
+
|
|
1620
|
+
if self._tracking_start_time is None:
|
|
1621
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1622
|
+
if stream_time_str:
|
|
1623
|
+
try:
|
|
1624
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1625
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1626
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1627
|
+
except:
|
|
1628
|
+
self._tracking_start_time = time.time()
|
|
1629
|
+
else:
|
|
1630
|
+
self._tracking_start_time = time.time()
|
|
1631
|
+
|
|
1632
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
1633
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
1634
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1635
|
+
|
|
1636
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
1637
|
+
"""Format a timestamp so that exactly two digits follow the decimal point (milliseconds).
|
|
1638
|
+
|
|
1639
|
+
The input can be either:
|
|
1640
|
+
1. A numeric Unix timestamp (``float`` / ``int``) – it will first be converted to a
|
|
1641
|
+
string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
|
|
1642
|
+
2. A string already following the same layout.
|
|
1643
|
+
|
|
1644
|
+
The returned value preserves the overall format of the input but truncates or pads
|
|
1645
|
+
the fractional seconds portion to **exactly two digits**.
|
|
1646
|
+
|
|
1647
|
+
Example
|
|
1648
|
+
-------
|
|
1649
|
+
>>> self._format_timestamp("2025-08-19-04:22:47.187574 UTC")
|
|
1650
|
+
'2025-08-19-04:22:47.18 UTC'
|
|
1651
|
+
"""
|
|
1652
|
+
|
|
1653
|
+
# Convert numeric timestamps to the expected string representation first
|
|
1654
|
+
if isinstance(timestamp, (int, float)):
|
|
1655
|
+
timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime(
|
|
1656
|
+
'%Y-%m-%d-%H:%M:%S.%f UTC'
|
|
1657
|
+
)
|
|
1658
|
+
|
|
1659
|
+
# Ensure we are working with a string from here on
|
|
1660
|
+
if not isinstance(timestamp, str):
|
|
1661
|
+
return str(timestamp)
|
|
1662
|
+
|
|
1663
|
+
# If there is no fractional component, simply return the original string
|
|
1664
|
+
if '.' not in timestamp:
|
|
1665
|
+
return timestamp
|
|
1666
|
+
|
|
1667
|
+
# Split out the main portion (up to the decimal point)
|
|
1668
|
+
main_part, fractional_and_suffix = timestamp.split('.', 1)
|
|
1669
|
+
|
|
1670
|
+
# Separate fractional digits from the suffix (typically ' UTC')
|
|
1671
|
+
if ' ' in fractional_and_suffix:
|
|
1672
|
+
fractional_part, suffix = fractional_and_suffix.split(' ', 1)
|
|
1673
|
+
suffix = ' ' + suffix # Re-attach the space removed by split
|
|
1674
|
+
else:
|
|
1675
|
+
fractional_part, suffix = fractional_and_suffix, ''
|
|
1676
|
+
|
|
1677
|
+
# Guarantee exactly two digits for the fractional part
|
|
1678
|
+
fractional_part = (fractional_part + '00')[:2]
|
|
1679
|
+
|
|
1680
|
+
return f"{main_part}.{fractional_part}{suffix}"
|
|
1681
|
+
|
|
1682
|
+
def _get_tracking_start_time(self) -> str:
|
|
1683
|
+
"""Get the tracking start time, formatted as a string."""
|
|
1684
|
+
if self._tracking_start_time is None:
|
|
1685
|
+
return "N/A"
|
|
1686
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
1687
|
+
|
|
1688
|
+
def _set_tracking_start_time(self) -> None:
|
|
1689
|
+
"""Set the tracking start time to the current time."""
|
|
1690
|
+
self._tracking_start_time = time.time()
|
|
1691
|
+
|
|
1692
|
+
def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: ColorDetectionConfig) -> Dict[str, Dict[str, Any]]:
|
|
1693
|
+
"""Update zone tracking with current frame data."""
|
|
1694
|
+
if not zone_analysis or not config.zone_config or not config.zone_config['zones']:
|
|
1695
|
+
return {}
|
|
1696
|
+
|
|
1697
|
+
enhanced_zone_analysis = {}
|
|
1698
|
+
zones = config.zone_config['zones']
|
|
1699
|
+
|
|
1700
|
+
# Initialize current frame zone tracks
|
|
1701
|
+
current_frame_zone_tracks = {zone_name: set() for zone_name in zones.keys()}
|
|
1702
|
+
|
|
1703
|
+
# Initialize zone tracking storage
|
|
1704
|
+
for zone_name in zones.keys():
|
|
1705
|
+
if zone_name not in self._zone_current_track_ids:
|
|
1706
|
+
self._zone_current_track_ids[zone_name] = set()
|
|
1707
|
+
if zone_name not in self._zone_total_track_ids:
|
|
1708
|
+
self._zone_total_track_ids[zone_name] = set()
|
|
1709
|
+
|
|
1710
|
+
# Check each detection against each zone
|
|
1711
|
+
for detection in detections:
|
|
1712
|
+
track_id = detection.get("track_id")
|
|
1713
|
+
if track_id is None:
|
|
1714
|
+
continue
|
|
1715
|
+
|
|
1716
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
1717
|
+
if not bbox:
|
|
1718
|
+
continue
|
|
1719
|
+
|
|
1720
|
+
# Check which zone this detection is in
|
|
1721
|
+
for zone_name, zone_polygon in zones.items():
|
|
1722
|
+
if self._is_in_zone(bbox, zone_polygon):
|
|
1723
|
+
current_frame_zone_tracks[zone_name].add(track_id)
|
|
1724
|
+
if track_id not in self.color_det_dict: # Use color_det_dict for consistency
|
|
1725
|
+
self.color_det_dict[track_id] = [detection.get("main_color", "unknown"), detection.get("confidence", 0.0)]
|
|
1726
|
+
|
|
1727
|
+
# Update zone tracking for each zone
|
|
1728
|
+
for zone_name, zone_counts in zone_analysis.items():
|
|
1729
|
+
current_tracks = current_frame_zone_tracks.get(zone_name, set())
|
|
1730
|
+
self._zone_current_track_ids[zone_name] = current_tracks
|
|
1731
|
+
self._zone_total_track_ids[zone_name].update(current_tracks)
|
|
1732
|
+
self._zone_current_counts[zone_name] = len(current_tracks)
|
|
1733
|
+
self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
|
|
1734
|
+
|
|
1735
|
+
enhanced_zone_analysis[zone_name] = {
|
|
1736
|
+
"current_count": self._zone_current_counts[zone_name],
|
|
1737
|
+
"total_count": self._zone_total_counts[zone_name],
|
|
1738
|
+
"current_track_ids": list(current_tracks),
|
|
1739
|
+
"total_track_ids": list(self._zone_total_track_ids[zone_name]),
|
|
1740
|
+
"original_counts": zone_counts
|
|
1741
|
+
}
|
|
1742
|
+
|
|
1743
|
+
return enhanced_zone_analysis
|
|
1744
|
+
|
|
1745
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
1746
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists.
|
|
1747
|
+
Falls back to 0 when insufficient data is available."""
|
|
1748
|
+
|
|
1749
|
+
# Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
|
|
1750
|
+
def _bbox_to_list(bbox):
|
|
1751
|
+
if bbox is None:
|
|
1752
|
+
return []
|
|
1753
|
+
if isinstance(bbox, list):
|
|
1754
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
1755
|
+
if isinstance(bbox, dict):
|
|
1756
|
+
if "xmin" in bbox:
|
|
1757
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
1758
|
+
if "x1" in bbox:
|
|
1759
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
1760
|
+
# Fallback: first four numeric values
|
|
1761
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
1762
|
+
return values[:4] if len(values) >= 4 else []
|
|
1763
|
+
return []
|
|
1764
|
+
|
|
1765
|
+
l1 = _bbox_to_list(box1)
|
|
1766
|
+
l2 = _bbox_to_list(box2)
|
|
1767
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
1768
|
+
return 0.0
|
|
1769
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
1770
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
1771
|
+
|
|
1772
|
+
# Ensure correct order
|
|
1773
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
1774
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
1775
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
1776
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
1777
|
+
|
|
1778
|
+
inter_x_min = max(x1_min, x2_min)
|
|
1779
|
+
inter_y_min = max(y1_min, y2_min)
|
|
1780
|
+
inter_x_max = min(x1_max, x2_max)
|
|
1781
|
+
inter_y_max = min(y1_max, y2_max)
|
|
1782
|
+
|
|
1783
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
1784
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
1785
|
+
inter_area = inter_w * inter_h
|
|
1786
|
+
|
|
1787
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
1788
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
1789
|
+
union_area = area1 + area2 - inter_area
|
|
1790
|
+
|
|
1791
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
1792
|
+
|
|
1793
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
1794
|
+
"""Return a stable canonical ID for a raw tracker ID, merging fragmented
|
|
1795
|
+
tracks when IoU and temporal constraints indicate they represent the
|
|
1796
|
+
same physical."""
|
|
1797
|
+
if raw_id is None or bbox is None:
|
|
1798
|
+
# Nothing to merge
|
|
1799
|
+
return raw_id
|
|
1800
|
+
|
|
1801
|
+
now = time.time()
|
|
1802
|
+
|
|
1803
|
+
# Fast path – raw_id already mapped
|
|
1804
|
+
if raw_id in self._track_aliases:
|
|
1805
|
+
canonical_id = self._track_aliases[raw_id]
|
|
1806
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
1807
|
+
if track_info is not None:
|
|
1808
|
+
track_info["last_bbox"] = bbox
|
|
1809
|
+
track_info["last_update"] = now
|
|
1810
|
+
track_info["raw_ids"].add(raw_id)
|
|
1811
|
+
return canonical_id
|
|
1812
|
+
|
|
1813
|
+
# Attempt to merge with an existing canonical track
|
|
1814
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
1815
|
+
# Only consider recently updated tracks
|
|
1816
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
1817
|
+
continue
|
|
1818
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
1819
|
+
if iou >= self._track_merge_iou_threshold:
|
|
1820
|
+
# Merge
|
|
1821
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1822
|
+
info["last_bbox"] = bbox
|
|
1823
|
+
info["last_update"] = now
|
|
1824
|
+
info["raw_ids"].add(raw_id)
|
|
1825
|
+
return canonical_id
|
|
1826
|
+
|
|
1827
|
+
# No match – register new canonical track
|
|
1828
|
+
canonical_id = raw_id
|
|
1829
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1830
|
+
self._canonical_tracks[canonical_id] = {
|
|
1831
|
+
"last_bbox": bbox,
|
|
1832
|
+
"last_update": now,
|
|
1833
|
+
"raw_ids": {raw_id},
|
|
1834
|
+
}
|
|
1835
|
+
return canonical_id
|