matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,724 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
|
|
6
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
|
7
|
+
from ..utils import (
|
|
8
|
+
filter_by_confidence,
|
|
9
|
+
filter_by_categories,
|
|
10
|
+
apply_category_mapping,
|
|
11
|
+
count_objects_by_category,
|
|
12
|
+
count_objects_in_zones,
|
|
13
|
+
calculate_counting_summary,
|
|
14
|
+
match_results_structure,
|
|
15
|
+
bbox_smoothing,
|
|
16
|
+
BBoxSmoothingConfig,
|
|
17
|
+
BBoxSmoothingTracker
|
|
18
|
+
)
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class TrafficSignMonitoringConfig(BaseConfig):
|
|
25
|
+
"""Configuration for traffic sign monitoring use case."""
|
|
26
|
+
# Smoothing configuration
|
|
27
|
+
enable_smoothing: bool = True
|
|
28
|
+
smoothing_algorithm: str = "observability" # "window" or "observability"
|
|
29
|
+
smoothing_window_size: int = 20
|
|
30
|
+
smoothing_cooldown_frames: int = 5
|
|
31
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
32
|
+
|
|
33
|
+
# Confidence thresholds
|
|
34
|
+
confidence_threshold: float = 0.6
|
|
35
|
+
|
|
36
|
+
usecase_categories: List[str] = field(
|
|
37
|
+
default_factory=lambda: [
|
|
38
|
+
"Green Light", "Red Light", "Speed Limit 10", "Speed Limit 100",
|
|
39
|
+
"Speed Limit 110", "Speed Limit 120", "Speed Limit 20", "Speed Limit 30",
|
|
40
|
+
"Speed Limit 40", "Speed Limit 50", "Speed Limit 60", "Speed Limit 70",
|
|
41
|
+
"Speed Limit 80", "Speed Limit 90", "Stop"
|
|
42
|
+
]
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
target_categories: List[str] = field(
|
|
46
|
+
default_factory=lambda: [
|
|
47
|
+
"Green Light", "Red Light", "Speed Limit 10", "Speed Limit 100",
|
|
48
|
+
"Speed Limit 110", "Speed Limit 120", "Speed Limit 20", "Speed Limit 30",
|
|
49
|
+
"Speed Limit 40", "Speed Limit 50", "Speed Limit 60", "Speed Limit 70",
|
|
50
|
+
"Speed Limit 80", "Speed Limit 90", "Stop"
|
|
51
|
+
]
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
alert_config: Optional[AlertConfig] = None
|
|
55
|
+
|
|
56
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
57
|
+
default_factory=lambda: {
|
|
58
|
+
-1: "Green Light",
|
|
59
|
+
0: "Red Light",
|
|
60
|
+
1: "Speed Limit 10",
|
|
61
|
+
2: "Speed Limit 100",
|
|
62
|
+
3: "Speed Limit 110",
|
|
63
|
+
4: "Speed Limit 120",
|
|
64
|
+
5: "Speed Limit 20",
|
|
65
|
+
6: "Speed Limit 30",
|
|
66
|
+
7: "Speed Limit 40",
|
|
67
|
+
8: "Speed Limit 50",
|
|
68
|
+
9: "Speed Limit 60",
|
|
69
|
+
10: "Speed Limit 70",
|
|
70
|
+
11: "Speed Limit 80",
|
|
71
|
+
12: "Speed Limit 90",
|
|
72
|
+
13: "Stop"
|
|
73
|
+
}
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class TrafficSignMonitoringUseCase(BaseProcessor):
|
|
78
|
+
# Human-friendly display names for categories
|
|
79
|
+
CATEGORY_DISPLAY = {
|
|
80
|
+
"Green Light": "Green Light",
|
|
81
|
+
"Red Light": "Red Light",
|
|
82
|
+
"Speed Limit 10": "Speed Limit 10",
|
|
83
|
+
"Speed Limit 100": "Speed Limit 100",
|
|
84
|
+
"Speed Limit 110": "Speed Limit 110",
|
|
85
|
+
"Speed Limit 120": "Speed Limit 120",
|
|
86
|
+
"Speed Limit 20": "Speed Limit 20",
|
|
87
|
+
"Speed Limit 30": "Speed Limit 30",
|
|
88
|
+
"Speed Limit 40": "Speed Limit 40",
|
|
89
|
+
"Speed Limit 50": "Speed Limit 50",
|
|
90
|
+
"Speed Limit 60": "Speed Limit 60",
|
|
91
|
+
"Speed Limit 70": "Speed Limit 70",
|
|
92
|
+
"Speed Limit 80": "Speed Limit 80",
|
|
93
|
+
"Speed Limit 90": "Speed Limit 90",
|
|
94
|
+
"Stop": "Stop"
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
def __init__(self):
|
|
98
|
+
super().__init__("traffic_sign_monitoring")
|
|
99
|
+
self.category = "traffic"
|
|
100
|
+
|
|
101
|
+
self.CASE_TYPE: Optional[str] = 'traffic_sign_monitoring'
|
|
102
|
+
self.CASE_VERSION: Optional[str] = '1.0'
|
|
103
|
+
|
|
104
|
+
# List of categories to track
|
|
105
|
+
self.target_categories = [
|
|
106
|
+
"Green Light", "Red Light", "Speed Limit 10", "Speed Limit 100",
|
|
107
|
+
"Speed Limit 110", "Speed Limit 120", "Speed Limit 20", "Speed Limit 30",
|
|
108
|
+
"Speed Limit 40", "Speed Limit 50", "Speed Limit 60", "Speed Limit 70",
|
|
109
|
+
"Speed Limit 80", "Speed Limit 90", "Stop"
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
# Initialize smoothing tracker
|
|
113
|
+
self.smoothing_tracker = None
|
|
114
|
+
|
|
115
|
+
# Initialize advanced tracker
|
|
116
|
+
self.tracker = None
|
|
117
|
+
|
|
118
|
+
# Initialize tracking state variables
|
|
119
|
+
self._total_frame_counter = 0
|
|
120
|
+
self._global_frame_offset = 0
|
|
121
|
+
|
|
122
|
+
# Track start time for "TOTAL SINCE" calculation
|
|
123
|
+
self._tracking_start_time = None
|
|
124
|
+
|
|
125
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
126
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
127
|
+
self._track_merge_iou_threshold: float = 0.05
|
|
128
|
+
self._track_merge_time_window: float = 7.0
|
|
129
|
+
|
|
130
|
+
self._ascending_alert_list: List[int] = []
|
|
131
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
132
|
+
|
|
133
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
|
134
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
|
135
|
+
"""
|
|
136
|
+
Main entry point for traffic sign post-processing.
|
|
137
|
+
Applies category mapping, smoothing, counting, alerting, and summary generation.
|
|
138
|
+
"""
|
|
139
|
+
start_time = time.time()
|
|
140
|
+
if not isinstance(config, TrafficSignMonitoringConfig):
|
|
141
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
|
|
142
|
+
context=context)
|
|
143
|
+
if context is None:
|
|
144
|
+
context = ProcessingContext()
|
|
145
|
+
|
|
146
|
+
# Detect input format
|
|
147
|
+
input_format = match_results_structure(data)
|
|
148
|
+
context.input_format = input_format
|
|
149
|
+
context.confidence_threshold = config.confidence_threshold
|
|
150
|
+
|
|
151
|
+
if config.confidence_threshold is not None:
|
|
152
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
153
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
154
|
+
else:
|
|
155
|
+
processed_data = data
|
|
156
|
+
self.logger.debug("Did not apply confidence filtering")
|
|
157
|
+
|
|
158
|
+
# Apply category mapping
|
|
159
|
+
if config.index_to_category:
|
|
160
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
161
|
+
self.logger.debug("Applied category mapping")
|
|
162
|
+
|
|
163
|
+
if config.target_categories:
|
|
164
|
+
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
|
165
|
+
self.logger.debug("Applied category filtering")
|
|
166
|
+
|
|
167
|
+
# Apply bbox smoothing
|
|
168
|
+
if config.enable_smoothing:
|
|
169
|
+
if self.smoothing_tracker is None:
|
|
170
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
171
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
172
|
+
window_size=config.smoothing_window_size,
|
|
173
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
174
|
+
confidence_threshold=config.confidence_threshold,
|
|
175
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
176
|
+
enable_smoothing=True
|
|
177
|
+
)
|
|
178
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
179
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
180
|
+
|
|
181
|
+
# Advanced tracking
|
|
182
|
+
try:
|
|
183
|
+
from ..advanced_tracker import AdvancedTracker
|
|
184
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
185
|
+
|
|
186
|
+
if self.tracker is None:
|
|
187
|
+
tracker_config = TrackerConfig()
|
|
188
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
189
|
+
self.logger.info("Initialized AdvancedTracker for Traffic Sign Monitoring")
|
|
190
|
+
processed_data = self.tracker.update(processed_data)
|
|
191
|
+
|
|
192
|
+
except Exception as e:
|
|
193
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
194
|
+
|
|
195
|
+
# Update tracking state
|
|
196
|
+
self._update_tracking_state(processed_data)
|
|
197
|
+
self._total_frame_counter += 1
|
|
198
|
+
|
|
199
|
+
# Extract frame information
|
|
200
|
+
frame_number = None
|
|
201
|
+
if stream_info:
|
|
202
|
+
input_settings = stream_info.get("input_settings", {})
|
|
203
|
+
start_frame = input_settings.get("start_frame")
|
|
204
|
+
end_frame = input_settings.get("end_frame")
|
|
205
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
206
|
+
frame_number = start_frame
|
|
207
|
+
|
|
208
|
+
# Compute summaries and alerts
|
|
209
|
+
general_counting_summary = calculate_counting_summary(data)
|
|
210
|
+
counting_summary = self._count_categories(processed_data, config)
|
|
211
|
+
total_counts = self.get_total_counts()
|
|
212
|
+
counting_summary['total_counts'] = total_counts
|
|
213
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
|
214
|
+
predictions = self._extract_predictions(processed_data)
|
|
215
|
+
|
|
216
|
+
# Generate structured outputs
|
|
217
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
|
218
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
|
219
|
+
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=True)
|
|
220
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
221
|
+
|
|
222
|
+
# Extract frame-based dictionaries
|
|
223
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
224
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
225
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
226
|
+
summary = summary_list[0] if summary_list else {}
|
|
227
|
+
agg_summary = {str(frame_number): {
|
|
228
|
+
"incidents": incidents,
|
|
229
|
+
"tracking_stats": tracking_stats,
|
|
230
|
+
"business_analytics": business_analytics,
|
|
231
|
+
"alerts": alerts,
|
|
232
|
+
"human_text": summary}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
context.mark_completed()
|
|
236
|
+
result = self.create_result(
|
|
237
|
+
data={"agg_summary": agg_summary},
|
|
238
|
+
usecase=self.name,
|
|
239
|
+
category=self.category,
|
|
240
|
+
context=context
|
|
241
|
+
)
|
|
242
|
+
return result
|
|
243
|
+
|
|
244
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: TrafficSignMonitoringConfig) -> List[Dict]:
|
|
245
|
+
"""
|
|
246
|
+
Check if alert thresholds are exceeded for traffic signs.
|
|
247
|
+
"""
|
|
248
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
249
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
250
|
+
if len(window) < 2:
|
|
251
|
+
return True
|
|
252
|
+
increasing = 0
|
|
253
|
+
total = 0
|
|
254
|
+
for i in range(1, len(window)):
|
|
255
|
+
if window[i] >= window[i - 1]:
|
|
256
|
+
increasing += 1
|
|
257
|
+
total += 1
|
|
258
|
+
ratio = increasing / total
|
|
259
|
+
return ratio >= threshold
|
|
260
|
+
|
|
261
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
262
|
+
alerts = []
|
|
263
|
+
total_detections = summary.get("total_count", 0)
|
|
264
|
+
total_counts_dict = summary.get("total_counts", {})
|
|
265
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
|
266
|
+
per_category_count = summary.get("per_category_count", {})
|
|
267
|
+
|
|
268
|
+
if not config.alert_config:
|
|
269
|
+
return alerts
|
|
270
|
+
|
|
271
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
272
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
273
|
+
if category == "all" and total_detections > threshold:
|
|
274
|
+
alerts.append({
|
|
275
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
276
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
277
|
+
"incident_category": self.CASE_TYPE,
|
|
278
|
+
"threshold_level": threshold,
|
|
279
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
280
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
281
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
282
|
+
})
|
|
283
|
+
elif category in per_category_count and per_category_count[category] > threshold:
|
|
284
|
+
alerts.append({
|
|
285
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
286
|
+
"alert_id": f"alert_{category}_{frame_key}",
|
|
287
|
+
"incident_category": self.CASE_TYPE,
|
|
288
|
+
"threshold_level": threshold,
|
|
289
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
290
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
291
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
292
|
+
})
|
|
293
|
+
return alerts
|
|
294
|
+
|
|
295
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: TrafficSignMonitoringConfig,
|
|
296
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
297
|
+
"""Generate structured incidents for traffic sign monitoring."""
|
|
298
|
+
incidents = []
|
|
299
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
300
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
301
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
302
|
+
|
|
303
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
304
|
+
|
|
305
|
+
if total_detections > 0:
|
|
306
|
+
level = "low"
|
|
307
|
+
intensity = 5.0
|
|
308
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
309
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
|
310
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
311
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
|
312
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
313
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
314
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
|
315
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
316
|
+
|
|
317
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
|
318
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
319
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
320
|
+
if intensity >= 9:
|
|
321
|
+
level = "critical"
|
|
322
|
+
self._ascending_alert_list.append(3)
|
|
323
|
+
elif intensity >= 7:
|
|
324
|
+
level = "significant"
|
|
325
|
+
self._ascending_alert_list.append(2)
|
|
326
|
+
elif intensity >= 5:
|
|
327
|
+
level = "medium"
|
|
328
|
+
self._ascending_alert_list.append(1)
|
|
329
|
+
else:
|
|
330
|
+
level = "low"
|
|
331
|
+
self._ascending_alert_list.append(0)
|
|
332
|
+
else:
|
|
333
|
+
if total_detections > 30:
|
|
334
|
+
level = "critical"
|
|
335
|
+
intensity = 10.0
|
|
336
|
+
self._ascending_alert_list.append(3)
|
|
337
|
+
elif total_detections > 25:
|
|
338
|
+
level = "significant"
|
|
339
|
+
intensity = 9.0
|
|
340
|
+
self._ascending_alert_list.append(2)
|
|
341
|
+
elif total_detections > 15:
|
|
342
|
+
level = "medium"
|
|
343
|
+
intensity = 7.0
|
|
344
|
+
self._ascending_alert_list.append(1)
|
|
345
|
+
else:
|
|
346
|
+
level = "low"
|
|
347
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
348
|
+
self._ascending_alert_list.append(0)
|
|
349
|
+
|
|
350
|
+
human_text_lines = [f"TRAFFIC SIGN INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
351
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
|
352
|
+
human_text = "\n".join(human_text_lines)
|
|
353
|
+
|
|
354
|
+
alert_settings = []
|
|
355
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
356
|
+
alert_settings.append({
|
|
357
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
358
|
+
"incident_category": self.CASE_TYPE,
|
|
359
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
360
|
+
"ascending": True,
|
|
361
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
362
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
363
|
+
})
|
|
364
|
+
|
|
365
|
+
event = self.create_incident(
|
|
366
|
+
incident_id=f"{self.CASE_TYPE}_{frame_number}",
|
|
367
|
+
incident_type=self.CASE_TYPE,
|
|
368
|
+
severity_level=level,
|
|
369
|
+
human_text=human_text,
|
|
370
|
+
camera_info=camera_info,
|
|
371
|
+
alerts=alerts,
|
|
372
|
+
alert_settings=alert_settings,
|
|
373
|
+
start_time=start_timestamp,
|
|
374
|
+
end_time=self.current_incident_end_timestamp,
|
|
375
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
|
376
|
+
)
|
|
377
|
+
incidents.append(event)
|
|
378
|
+
else:
|
|
379
|
+
self._ascending_alert_list.append(0)
|
|
380
|
+
incidents.append({})
|
|
381
|
+
return incidents
|
|
382
|
+
|
|
383
|
+
def _generate_tracking_stats(
|
|
384
|
+
self,
|
|
385
|
+
counting_summary: Dict,
|
|
386
|
+
alerts: List,
|
|
387
|
+
config: TrafficSignMonitoringConfig,
|
|
388
|
+
frame_number: Optional[int] = None,
|
|
389
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
390
|
+
) -> List[Dict]:
|
|
391
|
+
"""Generate structured tracking stats for traffic sign monitoring."""
|
|
392
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
393
|
+
tracking_stats = []
|
|
394
|
+
|
|
395
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
396
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
|
397
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
398
|
+
|
|
399
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
400
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
401
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
402
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
403
|
+
|
|
404
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
|
|
405
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items()]
|
|
406
|
+
|
|
407
|
+
detections = []
|
|
408
|
+
for detection in counting_summary.get("detections", []):
|
|
409
|
+
bbox = detection.get("bounding_box", {})
|
|
410
|
+
category = detection.get("category", "traffic_sign")
|
|
411
|
+
if detection.get("masks"):
|
|
412
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=detection.get("masks"))
|
|
413
|
+
else:
|
|
414
|
+
detection_obj = self.create_detection_object(category, bbox)
|
|
415
|
+
detections.append(detection_obj)
|
|
416
|
+
|
|
417
|
+
alert_settings = []
|
|
418
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
419
|
+
alert_settings.append({
|
|
420
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
|
421
|
+
"incident_category": self.CASE_TYPE,
|
|
422
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
423
|
+
"ascending": True,
|
|
424
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
|
425
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
|
426
|
+
})
|
|
427
|
+
|
|
428
|
+
human_text_lines = [f"Tracking Statistics:"]
|
|
429
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
|
430
|
+
for cat, count in per_category_count.items():
|
|
431
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
|
432
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
|
433
|
+
for cat, count in total_counts_dict.items():
|
|
434
|
+
if count > 0:
|
|
435
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
|
436
|
+
human_text_lines.append(f"Alerts: {alerts[0].get('settings', {})} sent @ {current_timestamp}" if alerts else "Alerts: None")
|
|
437
|
+
human_text = "\n".join(human_text_lines)
|
|
438
|
+
|
|
439
|
+
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
|
440
|
+
|
|
441
|
+
tracking_stat = self.create_tracking_stats(
|
|
442
|
+
total_counts=total_counts,
|
|
443
|
+
current_counts=current_counts,
|
|
444
|
+
detections=detections,
|
|
445
|
+
human_text=human_text,
|
|
446
|
+
camera_info=camera_info,
|
|
447
|
+
alerts=alerts,
|
|
448
|
+
alert_settings=alert_settings,
|
|
449
|
+
reset_settings=reset_settings,
|
|
450
|
+
start_time=high_precision_start_timestamp,
|
|
451
|
+
reset_time=high_precision_reset_timestamp
|
|
452
|
+
)
|
|
453
|
+
tracking_stats.append(tracking_stat)
|
|
454
|
+
return tracking_stats
|
|
455
|
+
|
|
456
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: List, config: TrafficSignMonitoringConfig,
|
|
457
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None,
|
|
458
|
+
is_empty=False) -> List[Dict]:
|
|
459
|
+
"""Generate standardized business analytics for traffic sign monitoring."""
|
|
460
|
+
if is_empty:
|
|
461
|
+
return []
|
|
462
|
+
# Implement business analytics if needed
|
|
463
|
+
return []
|
|
464
|
+
|
|
465
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
466
|
+
"""Generate human-readable summary for traffic sign monitoring."""
|
|
467
|
+
lines = {}
|
|
468
|
+
lines["Application Name"] = self.CASE_TYPE
|
|
469
|
+
lines["Application Version"] = self.CASE_VERSION
|
|
470
|
+
if len(incidents) > 0:
|
|
471
|
+
lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
|
472
|
+
if len(tracking_stats) > 0:
|
|
473
|
+
lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
|
474
|
+
if len(business_analytics) > 0:
|
|
475
|
+
lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
|
476
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
477
|
+
lines["Summary"] = "No Summary Data"
|
|
478
|
+
return [lines]
|
|
479
|
+
|
|
480
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
|
481
|
+
"""Get detailed information about track IDs."""
|
|
482
|
+
frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
|
|
483
|
+
total_track_ids = set()
|
|
484
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
485
|
+
total_track_ids.update(s)
|
|
486
|
+
return {
|
|
487
|
+
"total_count": len(total_track_ids),
|
|
488
|
+
"current_frame_count": len(frame_track_ids),
|
|
489
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
490
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
491
|
+
"last_update_time": time.time(),
|
|
492
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
def _update_tracking_state(self, detections: list):
|
|
496
|
+
"""Track unique traffic sign track_ids per category."""
|
|
497
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
|
498
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
|
499
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
|
500
|
+
|
|
501
|
+
for det in detections:
|
|
502
|
+
cat = det.get("category")
|
|
503
|
+
raw_track_id = det.get("track_id")
|
|
504
|
+
if cat not in self.target_categories or raw_track_id is None:
|
|
505
|
+
continue
|
|
506
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
507
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
508
|
+
det["track_id"] = canonical_id
|
|
509
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
510
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
511
|
+
|
|
512
|
+
def get_total_counts(self):
|
|
513
|
+
"""Return total unique track_id count for each category."""
|
|
514
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
515
|
+
|
|
516
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
517
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
518
|
+
hours = int(timestamp // 3600)
|
|
519
|
+
minutes = int((timestamp % 3600) // 60)
|
|
520
|
+
seconds = round(float(timestamp % 60),2)
|
|
521
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
522
|
+
|
|
523
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
524
|
+
"""Format timestamp for streams."""
|
|
525
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
526
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
527
|
+
|
|
528
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
529
|
+
"""Get formatted current timestamp based on stream type."""
|
|
530
|
+
if not stream_info:
|
|
531
|
+
return "00:00:00.00"
|
|
532
|
+
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
|
533
|
+
if precision:
|
|
534
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
535
|
+
if frame_id:
|
|
536
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
537
|
+
else:
|
|
538
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
539
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
540
|
+
return stream_time_str
|
|
541
|
+
else:
|
|
542
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
543
|
+
|
|
544
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
545
|
+
if frame_id:
|
|
546
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
547
|
+
else:
|
|
548
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
549
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
550
|
+
return stream_time_str
|
|
551
|
+
else:
|
|
552
|
+
# For streams, use stream_time from stream_info
|
|
553
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
554
|
+
if stream_time_str:
|
|
555
|
+
# Parse the high precision timestamp string to get timestamp
|
|
556
|
+
try:
|
|
557
|
+
# Remove " UTC" suffix and parse
|
|
558
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
559
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
560
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
561
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
562
|
+
except:
|
|
563
|
+
# Fallback to current time if parsing fails
|
|
564
|
+
return self._format_timestamp_for_stream(time.time())
|
|
565
|
+
else:
|
|
566
|
+
return self._format_timestamp_for_stream(time.time())
|
|
567
|
+
|
|
568
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
569
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
570
|
+
if not stream_info:
|
|
571
|
+
return "00:00:00"
|
|
572
|
+
if precision:
|
|
573
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
574
|
+
return "00:00:00"
|
|
575
|
+
else:
|
|
576
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
577
|
+
|
|
578
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
579
|
+
# If video format, start from 00:00:00
|
|
580
|
+
return "00:00:00"
|
|
581
|
+
else:
|
|
582
|
+
# For streams, use tracking start time or current time with minutes/seconds reset
|
|
583
|
+
if self._tracking_start_time is None:
|
|
584
|
+
# Try to extract timestamp from stream_time string
|
|
585
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
586
|
+
if stream_time_str:
|
|
587
|
+
try:
|
|
588
|
+
# Remove " UTC" suffix and parse
|
|
589
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
590
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
591
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
592
|
+
except:
|
|
593
|
+
# Fallback to current time if parsing fails
|
|
594
|
+
self._tracking_start_time = time.time()
|
|
595
|
+
else:
|
|
596
|
+
self._tracking_start_time = time.time()
|
|
597
|
+
|
|
598
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
599
|
+
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
|
600
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
601
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
602
|
+
|
|
603
|
+
def _count_categories(self, detections: list, config: TrafficSignMonitoringConfig) -> dict:
|
|
604
|
+
"""Count detections per traffic sign category."""
|
|
605
|
+
counts = {}
|
|
606
|
+
for det in detections:
|
|
607
|
+
cat = det.get('category', 'unknown')
|
|
608
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
609
|
+
return {
|
|
610
|
+
"total_count": sum(counts.values()),
|
|
611
|
+
"per_category_count": counts,
|
|
612
|
+
"detections": [
|
|
613
|
+
{
|
|
614
|
+
"bounding_box": det.get("bounding_box"),
|
|
615
|
+
"category": det.get("category"),
|
|
616
|
+
"confidence": det.get("confidence"),
|
|
617
|
+
"track_id": det.get("track_id"),
|
|
618
|
+
"frame_id": det.get("frame_id")
|
|
619
|
+
}
|
|
620
|
+
for det in detections
|
|
621
|
+
]
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
|
625
|
+
"""Extract prediction details for traffic signs."""
|
|
626
|
+
return [
|
|
627
|
+
{
|
|
628
|
+
"category": det.get("category", "unknown"),
|
|
629
|
+
"confidence": det.get("confidence", 0.0),
|
|
630
|
+
"bounding_box": det.get("bounding_box", {})
|
|
631
|
+
}
|
|
632
|
+
for det in detections
|
|
633
|
+
]
|
|
634
|
+
|
|
635
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
636
|
+
"""Compute IoU between two bounding boxes."""
|
|
637
|
+
def _bbox_to_list(bbox):
|
|
638
|
+
if bbox is None:
|
|
639
|
+
return []
|
|
640
|
+
if isinstance(bbox, list):
|
|
641
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
642
|
+
if isinstance(bbox, dict):
|
|
643
|
+
if "xmin" in bbox:
|
|
644
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
645
|
+
if "x1" in bbox:
|
|
646
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
647
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
648
|
+
return values[:4] if len(values) >= 4 else []
|
|
649
|
+
return []
|
|
650
|
+
|
|
651
|
+
l1 = _bbox_to_list(box1)
|
|
652
|
+
l2 = _bbox_to_list(box2)
|
|
653
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
654
|
+
return 0.0
|
|
655
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
656
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
657
|
+
|
|
658
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
659
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
660
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
661
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
662
|
+
|
|
663
|
+
inter_x_min = max(x1_min, x2_min)
|
|
664
|
+
inter_y_min = max(y1_min, y2_min)
|
|
665
|
+
inter_x_max = min(x1_max, x2_max)
|
|
666
|
+
inter_y_max = min(y1_max, y2_max)
|
|
667
|
+
|
|
668
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
669
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
670
|
+
inter_area = inter_w * inter_h
|
|
671
|
+
|
|
672
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
673
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
674
|
+
union_area = area1 + area2 - inter_area
|
|
675
|
+
|
|
676
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
677
|
+
|
|
678
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
679
|
+
"""Return a stable canonical ID for a raw tracker ID."""
|
|
680
|
+
if raw_id is None or bbox is None:
|
|
681
|
+
return raw_id
|
|
682
|
+
now = time.time()
|
|
683
|
+
if raw_id in self._track_aliases:
|
|
684
|
+
canonical_id = self._track_aliases[raw_id]
|
|
685
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
686
|
+
if track_info is not None:
|
|
687
|
+
track_info["last_bbox"] = bbox
|
|
688
|
+
track_info["last_update"] = now
|
|
689
|
+
track_info["raw_ids"].add(raw_id)
|
|
690
|
+
return canonical_id
|
|
691
|
+
|
|
692
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
693
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
694
|
+
continue
|
|
695
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
696
|
+
if iou >= self._track_merge_iou_threshold:
|
|
697
|
+
self._track_aliases[raw_id] = canonical_id
|
|
698
|
+
info["last_bbox"] = bbox
|
|
699
|
+
info["last_update"] = now
|
|
700
|
+
info["raw_ids"].add(raw_id)
|
|
701
|
+
return canonical_id
|
|
702
|
+
|
|
703
|
+
canonical_id = raw_id
|
|
704
|
+
self._track_aliases[raw_id] = canonical_id
|
|
705
|
+
self._canonical_tracks[canonical_id] = {
|
|
706
|
+
"last_bbox": bbox,
|
|
707
|
+
"last_update": now,
|
|
708
|
+
"raw_ids": {raw_id},
|
|
709
|
+
}
|
|
710
|
+
return canonical_id
|
|
711
|
+
|
|
712
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
|
713
|
+
"""Format timestamp for human-readable output."""
|
|
714
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
|
715
|
+
|
|
716
|
+
def _get_tracking_start_time(self) -> str:
|
|
717
|
+
"""Get formatted tracking start time."""
|
|
718
|
+
if self._tracking_start_time is None:
|
|
719
|
+
return "N/A"
|
|
720
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
721
|
+
|
|
722
|
+
def _set_tracking_start_time(self) -> None:
|
|
723
|
+
"""Set tracking start time to current time."""
|
|
724
|
+
self._tracking_start_time = time.time()
|