matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1936 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
import tempfile
|
|
5
|
+
import os
|
|
6
|
+
import cv2
|
|
7
|
+
import copy
|
|
8
|
+
import numpy as np
|
|
9
|
+
from collections import defaultdict
|
|
10
|
+
import time
|
|
11
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
|
12
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
|
13
|
+
from ..utils import (
|
|
14
|
+
filter_by_confidence,
|
|
15
|
+
filter_by_categories,
|
|
16
|
+
apply_category_mapping,
|
|
17
|
+
match_results_structure,
|
|
18
|
+
count_objects_by_category,
|
|
19
|
+
calculate_counting_summary,
|
|
20
|
+
match_results_structure,
|
|
21
|
+
count_objects_in_zones,
|
|
22
|
+
bbox_smoothing,
|
|
23
|
+
BBoxSmoothingConfig,
|
|
24
|
+
BBoxSmoothingTracker
|
|
25
|
+
)
|
|
26
|
+
from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
|
|
27
|
+
from ..usecases.color.clip import ClipProcessor
|
|
28
|
+
import sys
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
import logging
|
|
31
|
+
import subprocess
|
|
32
|
+
import shutil
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class ColorDetectionConfig(BaseConfig):
|
|
36
|
+
"""Configuration for color detection use case."""
|
|
37
|
+
confidence_threshold: float = 0.9
|
|
38
|
+
top_k_colors: int = 3
|
|
39
|
+
frame_skip: int = 1
|
|
40
|
+
usecase: str = "color_detection"
|
|
41
|
+
usecase_categories: List[str] = field(
|
|
42
|
+
default_factory=lambda: [
|
|
43
|
+
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
|
|
44
|
+
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog",
|
|
45
|
+
"horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
|
|
46
|
+
"handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
|
|
47
|
+
"baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
|
|
48
|
+
"wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
|
|
49
|
+
"orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
|
|
50
|
+
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
|
|
51
|
+
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book",
|
|
52
|
+
"clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
|
|
53
|
+
]
|
|
54
|
+
)
|
|
55
|
+
target_categories: List[str] = field(
|
|
56
|
+
default_factory=lambda: [
|
|
57
|
+
"car", "bicycle", "bus", "motorcycle"]
|
|
58
|
+
)
|
|
59
|
+
fps: Optional[float] = None
|
|
60
|
+
bbox_format: str = "auto"
|
|
61
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
|
62
|
+
default_factory=lambda: {
|
|
63
|
+
0: "person", 1: "bicycle", 2: "car", 3: "motorcycle", 4: "airplane", 5: "bus",
|
|
64
|
+
6: "train", 7: "truck", 8: "boat", 9: "traffic light", 10: "fire hydrant",
|
|
65
|
+
11: "stop sign", 12: "parking meter", 13: "bench", 14: "bird", 15: "cat",
|
|
66
|
+
16: "dog", 17: "horse", 18: "sheep", 19: "cow", 20: "elephant", 21: "bear",
|
|
67
|
+
22: "zebra", 23: "giraffe", 24: "backpack", 25: "umbrella", 26: "handbag",
|
|
68
|
+
27: "tie", 28: "suitcase", 29: "frisbee", 30: "skis", 31: "snowboard",
|
|
69
|
+
32: "sports ball", 33: "kite", 34: "baseball bat", 35: "baseball glove",
|
|
70
|
+
36: "skateboard", 37: "surfboard", 38: "tennis racket", 39: "bottle",
|
|
71
|
+
40: "wine glass", 41: "cup", 42: "fork", 43: "knife", 44: "spoon", 45: "bowl",
|
|
72
|
+
46: "banana", 47: "apple", 48: "sandwich", 49: "orange", 50: "broccoli",
|
|
73
|
+
51: "carrot", 52: "hot dog", 53: "pizza", 54: "donut", 55: "cake", 56: "chair",
|
|
74
|
+
57: "couch", 58: "potted plant", 59: "bed", 60: "dining table", 61: "toilet",
|
|
75
|
+
62: "tv", 63: "laptop", 64: "mouse", 65: "remote", 66: "keyboard",
|
|
76
|
+
67: "cell phone", 68: "microwave", 69: "oven", 70: "toaster", 71: "sink",
|
|
77
|
+
72: "refrigerator", 73: "book", 74: "clock", 75: "vase", 76: "scissors",
|
|
78
|
+
77: "teddy bear", 78: "hair drier", 79: "toothbrush"
|
|
79
|
+
}
|
|
80
|
+
)
|
|
81
|
+
alert_config: Optional[AlertConfig] = None
|
|
82
|
+
time_window_minutes: int = 60
|
|
83
|
+
enable_unique_counting: bool = True
|
|
84
|
+
enable_smoothing: bool = False
|
|
85
|
+
smoothing_algorithm: str = "observability"
|
|
86
|
+
smoothing_window_size: int = 20
|
|
87
|
+
smoothing_cooldown_frames: int = 5
|
|
88
|
+
smoothing_confidence_range_factor: float = 0.5
|
|
89
|
+
enable_detector: bool = True
|
|
90
|
+
|
|
91
|
+
#JBK_720_GATE POLYGON = [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]]
|
|
92
|
+
zone_config: Optional[Dict[str, List[List[float]]]] = None #field(
|
|
93
|
+
# default_factory=lambda: {
|
|
94
|
+
# "zones": {
|
|
95
|
+
# "Interest_Region": [[86, 328], [844, 317], [1277, 520], [1273, 707], [125, 713]],
|
|
96
|
+
# }
|
|
97
|
+
# }
|
|
98
|
+
# )
|
|
99
|
+
# true_import: bool = False
|
|
100
|
+
|
|
101
|
+
def validate(self) -> List[str]:
|
|
102
|
+
errors = super().validate()
|
|
103
|
+
if self.confidence_threshold < 0 or self.confidence_threshold > 1:
|
|
104
|
+
errors.append("confidence_threshold must be between 0 and 1")
|
|
105
|
+
if self.top_k_colors <= 0:
|
|
106
|
+
errors.append("top_k_colors must be positive")
|
|
107
|
+
if self.frame_skip <= 0:
|
|
108
|
+
errors.append("frame_skip must be positive")
|
|
109
|
+
if self.bbox_format not in ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"]:
|
|
110
|
+
errors.append("bbox_format must be one of: auto, xmin_ymin_xmax_ymax, x_y_width_height")
|
|
111
|
+
if self.smoothing_window_size <= 0:
|
|
112
|
+
errors.append("smoothing_window_size must be positive")
|
|
113
|
+
if self.smoothing_cooldown_frames < 0:
|
|
114
|
+
errors.append("smoothing_cooldown_frames cannot be negative")
|
|
115
|
+
if self.smoothing_confidence_range_factor <= 0:
|
|
116
|
+
errors.append("smoothing_confidence_range_factor must be positive")
|
|
117
|
+
return errors
|
|
118
|
+
|
|
119
|
+
# def __post_init__(self):
|
|
120
|
+
# # Lazy initialization: the ClipProcessor will be created once by the use case
|
|
121
|
+
# # to avoid repeated model downloads and to ensure GPU session reuse.
|
|
122
|
+
# # log_file = open("pip_jetson_bt.log", "w")
|
|
123
|
+
# # cmd = ["pip", "install", "--force-reinstall", "huggingface_hub", "regex", "safetensors"]
|
|
124
|
+
# # subprocess.Popen(
|
|
125
|
+
# # cmd,
|
|
126
|
+
# # stdout=log_file,
|
|
127
|
+
# # stderr=subprocess.STDOUT,
|
|
128
|
+
# # preexec_fn=os.setpgrp
|
|
129
|
+
# # )
|
|
130
|
+
# print("Came to post_init and libraries installed!!!")
|
|
131
|
+
# if self.detector:
|
|
132
|
+
# self.detector = ClipProcessor()
|
|
133
|
+
# print("ClipProcessor Loaded Successfully!!")
|
|
134
|
+
# else:
|
|
135
|
+
# print("Clip color detector disabled by config")
|
|
136
|
+
# self.detector = None
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class ColorDetectionUseCase(BaseProcessor):
|
|
140
|
+
"""Color detection processor for analyzing object colors in video streams with tracking."""
|
|
141
|
+
CATEGORY_DISPLAY = {
|
|
142
|
+
"bicycle": "Bicycle", "car": "Car", "motorbike": "Motorbike", "auto rickshaw": "Auto Rickshaw",
|
|
143
|
+
"bus": "Bus", "garbagevan": "Garbage Van", "truck": "Truck", "minibus": "Minibus",
|
|
144
|
+
"army vehicle": "Army Vehicle", "pickup": "Pickup", "policecar": "Police Car",
|
|
145
|
+
"rickshaw": "Rickshaw", "scooter": "Scooter", "suv": "SUV", "taxi": "Taxi",
|
|
146
|
+
"three wheelers -CNG-": "Three Wheelers (CNG)", "human hauler": "Human Hauler",
|
|
147
|
+
"van": "Van", "wheelbarrow": "Wheelbarrow"
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
def __init__(self):
|
|
151
|
+
super().__init__("color_detection")
|
|
152
|
+
self.category = "visual_appearance"
|
|
153
|
+
|
|
154
|
+
self.target_categories = ["car", "bicycle", "bus", "motorcycle"]
|
|
155
|
+
|
|
156
|
+
self.CASE_TYPE: Optional[str] = 'color_detection'
|
|
157
|
+
self.CASE_VERSION: Optional[str] = '1.3'
|
|
158
|
+
|
|
159
|
+
self.tracker = None # AdvancedTracker instance
|
|
160
|
+
self.smoothing_tracker = None # BBoxSmoothingTracker instance
|
|
161
|
+
self._total_frame_counter = 0 # Total frames processed
|
|
162
|
+
self._global_frame_offset = 0 # Frame offset for new sessions
|
|
163
|
+
self._color_total_track_ids = defaultdict(set) # Cumulative track IDs per category-color
|
|
164
|
+
self._color_current_frame_track_ids = defaultdict(set) # Per-frame track IDs per category-color
|
|
165
|
+
|
|
166
|
+
self._tracking_start_time = None
|
|
167
|
+
|
|
168
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
169
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
170
|
+
# Tunable parameters – adjust if necessary for specific scenarios
|
|
171
|
+
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
|
|
172
|
+
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
|
173
|
+
|
|
174
|
+
self._ascending_alert_list: List[int] = []
|
|
175
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
176
|
+
self.color_det_dict = {}
|
|
177
|
+
self.start_timer = None
|
|
178
|
+
# Zone-based tracking storage
|
|
179
|
+
self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
|
|
180
|
+
self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
|
|
181
|
+
self._zone_current_counts = {} # zone_name -> current count in zone
|
|
182
|
+
self._zone_total_counts = {} # zone_name -> total count that have been in zone
|
|
183
|
+
self.logger.info("Initialized ColorDetectionUseCase with tracking")
|
|
184
|
+
self.detector = None # Will be initialized on first use
|
|
185
|
+
self.all_color_data = {}
|
|
186
|
+
self.all_color_counts = {}
|
|
187
|
+
self.total_category_count = {}
|
|
188
|
+
self.category_color = {}
|
|
189
|
+
self.vehicle_tracks = {}
|
|
190
|
+
self.vehicle_stats = defaultdict(lambda: defaultdict(int))
|
|
191
|
+
self.zone_vehicle_stats = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
|
|
192
|
+
#self.jpeg = TurboJPEG()
|
|
193
|
+
# data, config, ProcessingContext(), stream_info,input_bytes
|
|
194
|
+
def process(
|
|
195
|
+
self,
|
|
196
|
+
data: Any,
|
|
197
|
+
config: ConfigProtocol,
|
|
198
|
+
input_bytes: Optional[bytes] = None,
|
|
199
|
+
context: Optional[ProcessingContext] = None,
|
|
200
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
201
|
+
) -> ProcessingResult:
|
|
202
|
+
processing_start = time.time()
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
cwd = os.getcwd()
|
|
206
|
+
print("Current working directory:", cwd)
|
|
207
|
+
if not isinstance(config, ColorDetectionConfig):
|
|
208
|
+
return self.create_error_result(
|
|
209
|
+
"Invalid configuration type for color detection",
|
|
210
|
+
usecase=self.name,
|
|
211
|
+
category=self.category,
|
|
212
|
+
context=context
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# if config.true_import and self.detector is None:
|
|
216
|
+
# self.detector = ClipProcessor()
|
|
217
|
+
# self.logger.info("Initialized ClipProcessor for color detection")
|
|
218
|
+
|
|
219
|
+
if context is None:
|
|
220
|
+
context = ProcessingContext()
|
|
221
|
+
|
|
222
|
+
if not input_bytes:
|
|
223
|
+
self.logger.warning("input_bytes is required for color detection")
|
|
224
|
+
|
|
225
|
+
if not data:
|
|
226
|
+
self.logger.warning("Detection data is required for color detection")
|
|
227
|
+
|
|
228
|
+
input_format = match_results_structure(data)
|
|
229
|
+
context.input_format = input_format
|
|
230
|
+
context.confidence_threshold = config.confidence_threshold
|
|
231
|
+
|
|
232
|
+
self.logger.info(f"Processing color detection with format: {input_format.value}")
|
|
233
|
+
|
|
234
|
+
# Step 1: Apply confidence filtering
|
|
235
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
|
236
|
+
|
|
237
|
+
# Step 2: Apply category mapping if provided
|
|
238
|
+
if config.index_to_category:
|
|
239
|
+
color_processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
240
|
+
|
|
241
|
+
color_processed_data = [d for d in color_processed_data if d['category'] in self.target_categories]
|
|
242
|
+
|
|
243
|
+
raw_processed_data = [copy.deepcopy(det) for det in color_processed_data]
|
|
244
|
+
# Step 3: Apply bounding box smoothing if enabled
|
|
245
|
+
if config.enable_smoothing:
|
|
246
|
+
if self.smoothing_tracker is None:
|
|
247
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
248
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
|
249
|
+
window_size=config.smoothing_window_size,
|
|
250
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
|
251
|
+
confidence_threshold=config.confidence_threshold,
|
|
252
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
|
253
|
+
enable_smoothing=True
|
|
254
|
+
)
|
|
255
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
256
|
+
color_processed_data = bbox_smoothing(color_processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
257
|
+
|
|
258
|
+
# Step 4: Apply advanced tracking
|
|
259
|
+
try:
|
|
260
|
+
from ..advanced_tracker import AdvancedTracker
|
|
261
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
262
|
+
|
|
263
|
+
if self.tracker is None:
|
|
264
|
+
tracker_config = TrackerConfig()
|
|
265
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
266
|
+
self.logger.info("Initialized AdvancedTracker for color detection tracking")
|
|
267
|
+
|
|
268
|
+
color_processed_data = self.tracker.update(color_processed_data)
|
|
269
|
+
|
|
270
|
+
except Exception as e:
|
|
271
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
color_processed_data = self._attach_masks_to_detections(color_processed_data, raw_processed_data)
|
|
275
|
+
self._total_frame_counter += 1
|
|
276
|
+
|
|
277
|
+
frame_number = None
|
|
278
|
+
if stream_info:
|
|
279
|
+
input_settings = stream_info.get("input_settings", {})
|
|
280
|
+
start_frame = input_settings.get("start_frame")
|
|
281
|
+
end_frame = input_settings.get("end_frame")
|
|
282
|
+
# If start and end frame are the same, it's a single frame
|
|
283
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
284
|
+
frame_number = start_frame
|
|
285
|
+
|
|
286
|
+
# Step 7: Analyze colors in media
|
|
287
|
+
color_analysis = self._analyze_colors_in_media(
|
|
288
|
+
color_processed_data,
|
|
289
|
+
input_bytes,
|
|
290
|
+
config
|
|
291
|
+
)
|
|
292
|
+
if config.zone_config:
|
|
293
|
+
color_processed_data = self._is_in_zone_robust(color_processed_data,config.zone_config)
|
|
294
|
+
print(color_processed_data)
|
|
295
|
+
|
|
296
|
+
# Initialize detector lazily on first use if enabled
|
|
297
|
+
try:
|
|
298
|
+
self.logger.debug("About to call process_color_in_frame...")
|
|
299
|
+
|
|
300
|
+
if config.enable_detector and self.detector is None:
|
|
301
|
+
self.logger.info("Initializing ClipProcessor for color detection...")
|
|
302
|
+
try:
|
|
303
|
+
self.detector = ClipProcessor()
|
|
304
|
+
self.logger.info("ClipProcessor loaded successfully!")
|
|
305
|
+
except Exception as init_error:
|
|
306
|
+
self.logger.error(f"Failed to initialize ClipProcessor: {init_error}")
|
|
307
|
+
self.detector = None
|
|
308
|
+
|
|
309
|
+
if self.detector is None:
|
|
310
|
+
self.logger.warning("Detector is disabled or failed to initialize, skipping color detection")
|
|
311
|
+
curr_frame_color = {}
|
|
312
|
+
else:
|
|
313
|
+
self.logger.debug(f"Processing {len(color_processed_data)} detections for color classification")
|
|
314
|
+
curr_frame_color = self.detector.process_color_in_frame(
|
|
315
|
+
color_processed_data,
|
|
316
|
+
input_bytes,
|
|
317
|
+
config.zone_config,
|
|
318
|
+
stream_info,
|
|
319
|
+
)
|
|
320
|
+
self.logger.debug("process_color_in_frame completed successfully")
|
|
321
|
+
except Exception as e:
|
|
322
|
+
self.logger.error(f"ERROR in process_color_in_frame: {e}", exc_info=True)
|
|
323
|
+
curr_frame_color = {}
|
|
324
|
+
|
|
325
|
+
self.update_vehicle_stats(curr_frame_color)
|
|
326
|
+
self._update_color_tracking_state_from_analysis(color_analysis)
|
|
327
|
+
|
|
328
|
+
# Step 9: Calculate summaries
|
|
329
|
+
color_summary = self._calculate_color_summary(color_analysis, config)
|
|
330
|
+
totals = self.get_total_color_counts()
|
|
331
|
+
if not totals:
|
|
332
|
+
tmp = defaultdict(set)
|
|
333
|
+
for rec in color_analysis:
|
|
334
|
+
color = rec.get('main_color')
|
|
335
|
+
tid = rec.get('track_id') or rec.get('detection_id')
|
|
336
|
+
if color and tid is not None:
|
|
337
|
+
tmp[color].add(tid)
|
|
338
|
+
totals = {color: len(ids) for color, ids in tmp.items()}
|
|
339
|
+
total_category_counts = self.get_total_category_counts(color_processed_data)
|
|
340
|
+
color_summary['total_color_counts'] = totals
|
|
341
|
+
color_summary['total_category_counts'] = total_category_counts
|
|
342
|
+
|
|
343
|
+
general_summary = self._calculate_general_summary(processed_data, config)
|
|
344
|
+
new_color_summary = self.merge_color_summary(color_processed_data,curr_frame_color)
|
|
345
|
+
|
|
346
|
+
# Step 10: Zone analysis
|
|
347
|
+
self.color_helper(curr_frame_color)
|
|
348
|
+
|
|
349
|
+
zone_analysis = {}
|
|
350
|
+
if config.zone_config and config.zone_config['zones']:
|
|
351
|
+
frame_data = color_processed_data
|
|
352
|
+
zone_analysis = count_objects_in_zones(frame_data, config.zone_config['zones'], stream_info)
|
|
353
|
+
if zone_analysis and config.enable_unique_counting:
|
|
354
|
+
enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, color_processed_data, config)
|
|
355
|
+
for zone_name, enhanced_data in enhanced_zone_analysis.items():
|
|
356
|
+
zone_analysis[zone_name] = enhanced_data
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
# Step 11: Generate alerts, incidents, tracking stats, and summary
|
|
361
|
+
alerts = self._check_alerts(color_summary, frame_number, config)
|
|
362
|
+
|
|
363
|
+
incidents_list = self._generate_incidents(color_summary, alerts, config, frame_number, stream_info)
|
|
364
|
+
incidents_list = []
|
|
365
|
+
|
|
366
|
+
tracking_stats_list = self._generate_tracking_stats(new_color_summary,color_summary, alerts, config,curr_frame_color, frame_number, stream_info)
|
|
367
|
+
|
|
368
|
+
business_analytics_list = []
|
|
369
|
+
summary_list = self._generate_summary(color_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
incidents = incidents_list[0] if incidents_list else {}
|
|
373
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
|
374
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
|
375
|
+
summary = summary_list[0] if summary_list else {}
|
|
376
|
+
agg_summary = {str(frame_number): {
|
|
377
|
+
"incidents": incidents,
|
|
378
|
+
"tracking_stats": tracking_stats,
|
|
379
|
+
"business_analytics": business_analytics,
|
|
380
|
+
"alerts": alerts,
|
|
381
|
+
"zone_analysis": zone_analysis,
|
|
382
|
+
"human_text": summary}
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
context.mark_completed()
|
|
386
|
+
|
|
387
|
+
# Build result object following the new pattern
|
|
388
|
+
|
|
389
|
+
result = self.create_result(
|
|
390
|
+
data={"agg_summary": agg_summary},
|
|
391
|
+
usecase=self.name,
|
|
392
|
+
category=self.category,
|
|
393
|
+
context=context
|
|
394
|
+
)
|
|
395
|
+
proc_time = time.time() - processing_start
|
|
396
|
+
processing_latency_ms = proc_time * 1000.0
|
|
397
|
+
processing_fps = (1.0 / proc_time) if proc_time > 0 else None
|
|
398
|
+
print("latency in ms:",processing_latency_ms,"| Throughput fps:",processing_fps,"| Frame_Number:",self._total_frame_counter)
|
|
399
|
+
return result
|
|
400
|
+
|
|
401
|
+
except Exception as e:
|
|
402
|
+
self.logger.error(f"Color detection failed: {str(e)}", exc_info=True)
|
|
403
|
+
if context:
|
|
404
|
+
context.mark_completed()
|
|
405
|
+
return self.create_error_result(
|
|
406
|
+
str(e),
|
|
407
|
+
type(e).__name__,
|
|
408
|
+
usecase=self.name,
|
|
409
|
+
category=self.category,
|
|
410
|
+
context=context
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
def update_vehicle_stats(self, frame_detections: dict):
|
|
414
|
+
"""
|
|
415
|
+
Update global vehicle statistics ensuring uniqueness per track_id and per zone.
|
|
416
|
+
If the same vehicle (track_id) is seen again:
|
|
417
|
+
- Ignore if confidence is lower.
|
|
418
|
+
- Update its color if confidence is higher.
|
|
419
|
+
"""
|
|
420
|
+
|
|
421
|
+
# Ensure zone-level data structures exist
|
|
422
|
+
if not hasattr(self, "zone_vehicle_stats"):
|
|
423
|
+
self.zone_vehicle_stats = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
|
|
424
|
+
|
|
425
|
+
for _, det in frame_detections.items():
|
|
426
|
+
track_id = det.get('track_id')
|
|
427
|
+
if track_id is None:
|
|
428
|
+
continue
|
|
429
|
+
|
|
430
|
+
vehicle_type = det.get('object_label', 'unknown').lower()
|
|
431
|
+
color = det.get('color', 'unknown').lower()
|
|
432
|
+
conf = det.get('confidence', 0.0)
|
|
433
|
+
zone = det.get('zone_name', 'Unknown_Zone')
|
|
434
|
+
|
|
435
|
+
# If this track_id is new → add and count
|
|
436
|
+
if track_id not in self.vehicle_tracks:
|
|
437
|
+
self.vehicle_tracks[track_id] = {
|
|
438
|
+
'object_label': vehicle_type,
|
|
439
|
+
'color': color,
|
|
440
|
+
'confidence': conf,
|
|
441
|
+
'zone': zone
|
|
442
|
+
}
|
|
443
|
+
self.vehicle_stats[vehicle_type][color] += 1
|
|
444
|
+
self.zone_vehicle_stats[zone][vehicle_type][color] += 1
|
|
445
|
+
|
|
446
|
+
else:
|
|
447
|
+
existing = self.vehicle_tracks[track_id]
|
|
448
|
+
if conf > existing['confidence']:
|
|
449
|
+
old_color = existing['color']
|
|
450
|
+
old_zone = existing.get('zone', zone)
|
|
451
|
+
old_type = existing.get('object_label', vehicle_type)
|
|
452
|
+
|
|
453
|
+
# Decrease old counts
|
|
454
|
+
self.vehicle_stats[old_type][old_color] -= 1
|
|
455
|
+
if self.vehicle_stats[old_type][old_color] <= 0:
|
|
456
|
+
del self.vehicle_stats[old_type][old_color]
|
|
457
|
+
|
|
458
|
+
self.zone_vehicle_stats[old_zone][old_type][old_color] -= 1
|
|
459
|
+
if self.zone_vehicle_stats[old_zone][old_type][old_color] <= 0:
|
|
460
|
+
del self.zone_vehicle_stats[old_zone][old_type][old_color]
|
|
461
|
+
|
|
462
|
+
# Update track info
|
|
463
|
+
self.vehicle_tracks[track_id].update({
|
|
464
|
+
'color': color,
|
|
465
|
+
'confidence': conf,
|
|
466
|
+
'zone': zone,
|
|
467
|
+
})
|
|
468
|
+
|
|
469
|
+
# Increase new counts
|
|
470
|
+
self.vehicle_stats[vehicle_type][color] += 1
|
|
471
|
+
self.zone_vehicle_stats[zone][vehicle_type][color] += 1
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
def merge_color_summary(self,detections_data: List[Dict[str, Any]], curr_frame_color: Dict[int, Dict[str, Any]]) -> Dict[str, Any]:
|
|
475
|
+
"""
|
|
476
|
+
Combine base detections with current frame color information and produce a color summary.
|
|
477
|
+
Returns structure similar to _calculate_color_summary().
|
|
478
|
+
"""
|
|
479
|
+
|
|
480
|
+
category_colors = defaultdict(lambda: defaultdict(int))
|
|
481
|
+
detections = []
|
|
482
|
+
counts = {}
|
|
483
|
+
|
|
484
|
+
# Merge detections with color info
|
|
485
|
+
for record in detections_data:
|
|
486
|
+
track_id = record.get("track_id")
|
|
487
|
+
category = record.get("category", "unknown")
|
|
488
|
+
conf = record.get("confidence", 0.0)
|
|
489
|
+
bbox = record.get("bounding_box", {})
|
|
490
|
+
frame_id = record.get("frame_id")
|
|
491
|
+
zone_name = record.get("zone_name", "Unknown")
|
|
492
|
+
|
|
493
|
+
# Get color from curr_frame_color
|
|
494
|
+
main_color = "unknown"
|
|
495
|
+
if track_id in curr_frame_color:
|
|
496
|
+
main_color = curr_frame_color[track_id].get("color", "unknown")
|
|
497
|
+
|
|
498
|
+
category_colors[category][main_color] += 1
|
|
499
|
+
counts[category] = counts.get(category, 0) + 1
|
|
500
|
+
|
|
501
|
+
detections.append({
|
|
502
|
+
"bounding_box": bbox,
|
|
503
|
+
"category": category,
|
|
504
|
+
"confidence": conf,
|
|
505
|
+
"track_id": track_id,
|
|
506
|
+
"frame_id": frame_id,
|
|
507
|
+
"main_color": main_color,
|
|
508
|
+
"zone_name": zone_name
|
|
509
|
+
})
|
|
510
|
+
|
|
511
|
+
# Flatten color distribution
|
|
512
|
+
all_colors = defaultdict(int)
|
|
513
|
+
for category_data in category_colors.values():
|
|
514
|
+
for color, count in category_data.items():
|
|
515
|
+
all_colors[color] += count
|
|
516
|
+
|
|
517
|
+
# Find dominant color per category
|
|
518
|
+
dominant_colors = {}
|
|
519
|
+
for category, colors in category_colors.items():
|
|
520
|
+
if colors:
|
|
521
|
+
color, count = max(colors.items(), key=lambda x: x[1])
|
|
522
|
+
dominant_colors[category] = {
|
|
523
|
+
"color": color,
|
|
524
|
+
"count": count,
|
|
525
|
+
"percentage": round((count / sum(colors.values())) * 100, 1)
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
# Final summary dict
|
|
529
|
+
summary = {
|
|
530
|
+
"total_count": sum(counts.values()),
|
|
531
|
+
"per_category_count": counts,
|
|
532
|
+
"detections": detections,
|
|
533
|
+
"color_distribution": dict(all_colors),
|
|
534
|
+
"dominant_colors": dominant_colors
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
return summary
|
|
538
|
+
|
|
539
|
+
def get_vehicle_stats(self):
|
|
540
|
+
"""Return the current global vehicle statistics as a normal dictionary."""
|
|
541
|
+
return {vtype: dict(colors) for vtype, colors in self.vehicle_stats.items()}
|
|
542
|
+
|
|
543
|
+
def _is_in_zone_robust(self,detections,zones):
|
|
544
|
+
if not detections:
|
|
545
|
+
return {}
|
|
546
|
+
new_data = []
|
|
547
|
+
for det in detections:
|
|
548
|
+
bbox = det.get('bounding_box')
|
|
549
|
+
cx,cy = get_bbox_bottom25_center(bbox)
|
|
550
|
+
for zone, region in zones.items():
|
|
551
|
+
for reg, poly in region.items():
|
|
552
|
+
if point_in_polygon((cx,cy),poly):
|
|
553
|
+
det['zone_name'] = reg
|
|
554
|
+
new_data.append(det)
|
|
555
|
+
return new_data
|
|
556
|
+
|
|
557
|
+
def color_helper(self, curr_data):
|
|
558
|
+
if curr_data is None:
|
|
559
|
+
return
|
|
560
|
+
for tid, data in curr_data.items():
|
|
561
|
+
if tid not in self.all_color_data:
|
|
562
|
+
# First time seeing this track
|
|
563
|
+
self.all_color_data[tid] = {
|
|
564
|
+
"color": data.get("color"),
|
|
565
|
+
"confidence": data.get("confidence"),
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
# update color counts
|
|
569
|
+
color = data.get("color")
|
|
570
|
+
if color:
|
|
571
|
+
self.all_color_counts[color] = self.all_color_counts.get(color, 0) + 1
|
|
572
|
+
|
|
573
|
+
else:
|
|
574
|
+
# Update only if new confidence is higher
|
|
575
|
+
if data.get("confidence", 0) > self.all_color_data[tid]["confidence"]:
|
|
576
|
+
old_color = self.all_color_data[tid]["color"]
|
|
577
|
+
new_color = data.get("color")
|
|
578
|
+
|
|
579
|
+
if new_color != old_color:
|
|
580
|
+
# decrease old color count
|
|
581
|
+
if old_color in self.all_color_counts:
|
|
582
|
+
self.all_color_counts[old_color] -= 1
|
|
583
|
+
if self.all_color_counts[old_color] <= 0:
|
|
584
|
+
del self.all_color_counts[old_color]
|
|
585
|
+
|
|
586
|
+
# increase new color count
|
|
587
|
+
if new_color:
|
|
588
|
+
self.all_color_counts[new_color] = self.all_color_counts.get(new_color, 0) + 1
|
|
589
|
+
|
|
590
|
+
# update track info
|
|
591
|
+
self.all_color_data[tid]["color"] = new_color
|
|
592
|
+
self.all_color_data[tid]["confidence"] = data.get("confidence")
|
|
593
|
+
# return self.all_color_data
|
|
594
|
+
|
|
595
|
+
def _analyze_colors_in_media(
|
|
596
|
+
self,
|
|
597
|
+
data: Any,
|
|
598
|
+
media_bytes: bytes,
|
|
599
|
+
config: ColorDetectionConfig
|
|
600
|
+
) -> List[Dict[str, Any]]:
|
|
601
|
+
"""Analyze colors of detected objects in video frames or images."""
|
|
602
|
+
return self._analyze_colors_in_image(data, media_bytes, config)
|
|
603
|
+
|
|
604
|
+
def _update_color_tracking_state_from_analysis(self, color_analysis: List[Dict[str, Any]]) -> None:
|
|
605
|
+
"""Update total tracking store using analyzed color results.
|
|
606
|
+
Ensures totals are populated even if pre-analysis detections lacked colors/track_ids."""
|
|
607
|
+
existing_store = getattr(self, '_color_total_track_ids', None)
|
|
608
|
+
if not isinstance(existing_store, defaultdict):
|
|
609
|
+
existing_store = {} if existing_store is None else dict(existing_store)
|
|
610
|
+
self._color_total_track_ids = defaultdict(set, existing_store)
|
|
611
|
+
else:
|
|
612
|
+
self._color_total_track_ids = existing_store
|
|
613
|
+
# Reset current frame tracking for this frame
|
|
614
|
+
self._color_current_frame_track_ids = defaultdict(set)
|
|
615
|
+
|
|
616
|
+
for rec in color_analysis:
|
|
617
|
+
cat = rec.get('category')
|
|
618
|
+
color = rec.get('main_color')
|
|
619
|
+
track_id = rec.get('track_id')
|
|
620
|
+
major_colors = rec.get('major_colors') or []
|
|
621
|
+
# Safely extract color confidence
|
|
622
|
+
if major_colors and isinstance(major_colors[0], (list, tuple)) and len(major_colors[0]) > 2:
|
|
623
|
+
color_conf = major_colors[0][2]
|
|
624
|
+
else:
|
|
625
|
+
color_conf = 0.0
|
|
626
|
+
if track_id is None:
|
|
627
|
+
track_id = rec.get('detection_id')
|
|
628
|
+
if cat and track_id is not None:
|
|
629
|
+
# Update the color_det_dict with the actual color
|
|
630
|
+
if color and track_id in self.color_det_dict:
|
|
631
|
+
existing_color, existing_conf = self.color_det_dict.get(track_id, [None, -1])
|
|
632
|
+
if color_conf > existing_conf and color != existing_color:
|
|
633
|
+
# Move this track_id from any previous color bucket(s) to the new one
|
|
634
|
+
for k in list(self._color_total_track_ids.keys()):
|
|
635
|
+
if track_id in self._color_total_track_ids[k]:
|
|
636
|
+
self._color_total_track_ids[k].discard(track_id)
|
|
637
|
+
# Update assignment
|
|
638
|
+
self.color_det_dict[track_id] = [color, color_conf]
|
|
639
|
+
new_key = f"{cat}:{color}" if color else cat
|
|
640
|
+
self._color_total_track_ids[new_key].add(track_id)
|
|
641
|
+
# Update current frame tracking
|
|
642
|
+
self._color_current_frame_track_ids[new_key].add(track_id)
|
|
643
|
+
elif color_conf > existing_conf:
|
|
644
|
+
# Confidence improved but color unchanged; update confidence only
|
|
645
|
+
self.color_det_dict[track_id] = [existing_color, color_conf]
|
|
646
|
+
same_key = f"{cat}:{existing_color}" if existing_color else cat
|
|
647
|
+
self._color_current_frame_track_ids[same_key].add(track_id)
|
|
648
|
+
else:
|
|
649
|
+
# No improvement; still reflect in current frame under existing color
|
|
650
|
+
same_key = f"{cat}:{existing_color}" if existing_color else cat
|
|
651
|
+
self._color_current_frame_track_ids[same_key].add(track_id)
|
|
652
|
+
elif color and track_id not in self.color_det_dict:
|
|
653
|
+
# First assignment for this track
|
|
654
|
+
self.color_det_dict[track_id] = [color, color_conf]
|
|
655
|
+
key = f"{cat}:{color}" if color else cat
|
|
656
|
+
self._color_total_track_ids[key].add(track_id)
|
|
657
|
+
# Also update current frame tracking
|
|
658
|
+
self._color_current_frame_track_ids[key].add(track_id)
|
|
659
|
+
|
|
660
|
+
def _is_video_bytes(self, media_bytes: bytes) -> bool:
|
|
661
|
+
"""Determine if bytes represent a video file."""
|
|
662
|
+
# Check common video file signatures
|
|
663
|
+
video_signatures = [
|
|
664
|
+
b'\x00\x00\x00\x20ftypmp4', # MP4
|
|
665
|
+
b'\x00\x00\x00\x18ftypmp4', # MP4 variant
|
|
666
|
+
b'RIFF', # AVI
|
|
667
|
+
b'\x1aE\xdf\xa3', # MKV/WebM
|
|
668
|
+
b'ftyp', # General MP4 family
|
|
669
|
+
]
|
|
670
|
+
|
|
671
|
+
for signature in video_signatures:
|
|
672
|
+
if media_bytes.startswith(signature) or signature in media_bytes[:50]:
|
|
673
|
+
return True
|
|
674
|
+
return False
|
|
675
|
+
|
|
676
|
+
|
|
677
|
+
def _analyze_colors_in_image(
|
|
678
|
+
self,
|
|
679
|
+
data: Any,
|
|
680
|
+
image_bytes: bytes,
|
|
681
|
+
config: ColorDetectionConfig
|
|
682
|
+
) -> List[Dict[str, Any]]:
|
|
683
|
+
image_array = np.frombuffer(image_bytes, np.uint8)
|
|
684
|
+
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
|
685
|
+
#image = self.jpeg.decode(image_bytes, pixel_format=TJPF_RGB)
|
|
686
|
+
|
|
687
|
+
if image is None:
|
|
688
|
+
raise RuntimeError("Failed to decode image from bytes")
|
|
689
|
+
|
|
690
|
+
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
691
|
+
color_analysis = []
|
|
692
|
+
detections = self._get_frame_detections(data, "0")
|
|
693
|
+
|
|
694
|
+
for detection in detections:
|
|
695
|
+
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
|
696
|
+
continue
|
|
697
|
+
|
|
698
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
699
|
+
if not bbox:
|
|
700
|
+
continue
|
|
701
|
+
|
|
702
|
+
# Check all zones
|
|
703
|
+
zones = config.zone_config['zones'] if config.zone_config else {}
|
|
704
|
+
in_any_zone = not zones
|
|
705
|
+
zone_name = None
|
|
706
|
+
for z_name, zone_polygon in zones.items():
|
|
707
|
+
if self._is_in_zone(bbox, zone_polygon):
|
|
708
|
+
in_any_zone = True
|
|
709
|
+
zone_name = z_name
|
|
710
|
+
break
|
|
711
|
+
if not in_any_zone:
|
|
712
|
+
continue # Skip detections outside zones
|
|
713
|
+
|
|
714
|
+
# crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
|
|
715
|
+
# if crop.size == 0:
|
|
716
|
+
# continue
|
|
717
|
+
|
|
718
|
+
# major_colors = extract_major_colors(crop, k=config.top_k_colors)
|
|
719
|
+
# main_color = major_colors[0][0] if major_colors else "unknown"
|
|
720
|
+
main_color = "unknown"
|
|
721
|
+
major_colors = []
|
|
722
|
+
|
|
723
|
+
color_record = {
|
|
724
|
+
"frame_id": "0",
|
|
725
|
+
"timestamp": 0.0,
|
|
726
|
+
"category": detection.get("category", "unknown"),
|
|
727
|
+
"confidence": round(detection.get("confidence", 0.0), 3),
|
|
728
|
+
"main_color": main_color,
|
|
729
|
+
"major_colors": major_colors,
|
|
730
|
+
"bbox": bbox,
|
|
731
|
+
"detection_id": detection.get("id", f"det_{len(color_analysis)}"),
|
|
732
|
+
"track_id": detection.get("track_id"),
|
|
733
|
+
"zone_name": zone_name
|
|
734
|
+
}
|
|
735
|
+
color_analysis.append(color_record)
|
|
736
|
+
|
|
737
|
+
return color_analysis
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
def _get_frame_detections(self, data: Any, frame_key: str) -> List[Dict[str, Any]]:
|
|
741
|
+
"""Extract detections for a specific frame from data."""
|
|
742
|
+
if isinstance(data, dict):
|
|
743
|
+
# Frame-based format
|
|
744
|
+
return data.get(frame_key, [])
|
|
745
|
+
elif isinstance(data, list):
|
|
746
|
+
# List format (single frame or all detections)
|
|
747
|
+
return data
|
|
748
|
+
else:
|
|
749
|
+
return []
|
|
750
|
+
|
|
751
|
+
def _crop_bbox(self, image: np.ndarray, bbox: Dict[str, Any], bbox_format: str) -> np.ndarray:
|
|
752
|
+
"""Crop bounding box region from image."""
|
|
753
|
+
h, w = image.shape[:2]
|
|
754
|
+
|
|
755
|
+
# Auto-detect bbox format
|
|
756
|
+
if bbox_format == "auto":
|
|
757
|
+
if "xmin" in bbox:
|
|
758
|
+
bbox_format = "xmin_ymin_xmax_ymax"
|
|
759
|
+
elif "x" in bbox:
|
|
760
|
+
bbox_format = "x_y_width_height"
|
|
761
|
+
else:
|
|
762
|
+
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
763
|
+
|
|
764
|
+
# Extract coordinates based on format
|
|
765
|
+
if bbox_format == "xmin_ymin_xmax_ymax":
|
|
766
|
+
xmin = max(0, int(bbox["xmin"]))
|
|
767
|
+
ymin = max(0, int(bbox["ymin"]))
|
|
768
|
+
xmax = min(w, int(bbox["xmax"]))
|
|
769
|
+
ymax = min(h, int(bbox["ymax"]))
|
|
770
|
+
elif bbox_format == "x_y_width_height":
|
|
771
|
+
xmin = max(0, int(bbox["x"]))
|
|
772
|
+
ymin = max(0, int(bbox["y"]))
|
|
773
|
+
xmax = min(w, int(bbox["x"] + bbox["width"]))
|
|
774
|
+
ymax = min(h, int(bbox["y"] + bbox["height"]))
|
|
775
|
+
else:
|
|
776
|
+
return np.zeros((0, 0, 3), dtype=np.uint8)
|
|
777
|
+
|
|
778
|
+
return image[ymin:ymax, xmin:xmax]
|
|
779
|
+
|
|
780
|
+
def _calculate_color_summary(self, color_analysis: List[Dict], config: ColorDetectionConfig) -> Dict[str, Any]:
|
|
781
|
+
category_colors = defaultdict(lambda: defaultdict(int))
|
|
782
|
+
total_detections = len(color_analysis)
|
|
783
|
+
detections = []
|
|
784
|
+
counts = {}
|
|
785
|
+
for record in color_analysis:
|
|
786
|
+
category = record["category"]
|
|
787
|
+
main_color = record["main_color"]
|
|
788
|
+
category_colors[category][main_color] += 1
|
|
789
|
+
counts[category] = counts.get(category, 0) + 1
|
|
790
|
+
detections.append({
|
|
791
|
+
"bounding_box": record["bbox"],
|
|
792
|
+
"category": record["category"],
|
|
793
|
+
"confidence": record["confidence"],
|
|
794
|
+
"track_id": record["track_id"],
|
|
795
|
+
"frame_id": record["frame_id"],
|
|
796
|
+
"main_color": record["main_color"]
|
|
797
|
+
})
|
|
798
|
+
|
|
799
|
+
|
|
800
|
+
self.logger.debug(f"Valid detections after filtering: {len(detections)}")
|
|
801
|
+
summary = {
|
|
802
|
+
"total_count": sum(counts.values()),
|
|
803
|
+
"per_category_count": counts,
|
|
804
|
+
"detections": detections,
|
|
805
|
+
"dominant_colors": {},
|
|
806
|
+
"zone_counts": self._zone_current_counts if config.zone_config and config.zone_config['zones'] else {}
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
all_colors = defaultdict(int)
|
|
811
|
+
for category_data in category_colors.values():
|
|
812
|
+
for color, count in category_data.items():
|
|
813
|
+
all_colors[color] += count
|
|
814
|
+
summary["color_distribution"] = dict(all_colors)
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
for category, colors in category_colors.items():
|
|
818
|
+
if colors:
|
|
819
|
+
if "dominant_colors" not in summary:
|
|
820
|
+
summary["dominant_colors"] = {}
|
|
821
|
+
else:
|
|
822
|
+
dominant_color = max(colors.items(), key=lambda x: x[1])
|
|
823
|
+
summary["dominant_colors"][category] = {
|
|
824
|
+
"color": dominant_color[0],
|
|
825
|
+
"count": dominant_color[1],
|
|
826
|
+
"percentage": round((dominant_color[1] / sum(colors.values())) * 100, 1)
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
return summary
|
|
831
|
+
|
|
832
|
+
def _calculate_general_summary(self, processed_data: Any, config: ColorDetectionConfig) -> Dict[str, Any]:
|
|
833
|
+
"""Calculate general detection summary."""
|
|
834
|
+
|
|
835
|
+
# Count objects by category
|
|
836
|
+
category_counts = defaultdict(int)
|
|
837
|
+
total_objects = 0
|
|
838
|
+
|
|
839
|
+
if isinstance(processed_data, dict):
|
|
840
|
+
# Frame-based format
|
|
841
|
+
for frame_data in processed_data.values():
|
|
842
|
+
if isinstance(frame_data, list):
|
|
843
|
+
for detection in frame_data:
|
|
844
|
+
if detection.get("confidence", 1.0) >= config.confidence_threshold:
|
|
845
|
+
category = detection.get("category", "unknown")
|
|
846
|
+
category_counts[category] += 1
|
|
847
|
+
total_objects += 1
|
|
848
|
+
elif isinstance(processed_data, list):
|
|
849
|
+
# List format
|
|
850
|
+
for detection in processed_data:
|
|
851
|
+
if detection.get("confidence", 1.0) >= config.confidence_threshold:
|
|
852
|
+
category = detection.get("category", "unknown")
|
|
853
|
+
category_counts[category] += 1
|
|
854
|
+
total_objects += 1
|
|
855
|
+
|
|
856
|
+
return {
|
|
857
|
+
"total_objects": total_objects,
|
|
858
|
+
"category_counts": dict(category_counts),
|
|
859
|
+
"categories_detected": list(category_counts.keys())
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
def _calculate_metrics(self, color_analysis: List[Dict], color_summary: Dict, config: ColorDetectionConfig, context: ProcessingContext) -> Dict[str, Any]:
|
|
863
|
+
"""Calculate detailed metrics for analytics."""
|
|
864
|
+
total_detections = len(color_analysis)
|
|
865
|
+
unique_colors = len(color_summary.get("color_distribution", {}))
|
|
866
|
+
|
|
867
|
+
metrics = {
|
|
868
|
+
"total_detections": total_detections,
|
|
869
|
+
"unique_colors": unique_colors,
|
|
870
|
+
"categories_analyzed": len(color_summary.get("categories", {})),
|
|
871
|
+
"processing_time": context.processing_time or 0.0,
|
|
872
|
+
"input_format": context.input_format.value,
|
|
873
|
+
"confidence_threshold": config.confidence_threshold,
|
|
874
|
+
"color_diversity": 0.0,
|
|
875
|
+
"detection_rate": 0.0,
|
|
876
|
+
"average_colors_per_detection": config.top_k_colors
|
|
877
|
+
}
|
|
878
|
+
|
|
879
|
+
# Calculate color diversity
|
|
880
|
+
if total_detections > 0:
|
|
881
|
+
metrics["color_diversity"] = (unique_colors / total_detections) * 100
|
|
882
|
+
|
|
883
|
+
# Calculate detection rate
|
|
884
|
+
if config.time_window_minutes and config.time_window_minutes > 0:
|
|
885
|
+
metrics["detection_rate"] = (total_detections / config.time_window_minutes) * 60
|
|
886
|
+
|
|
887
|
+
# Per-category metrics
|
|
888
|
+
if color_summary.get("categories"):
|
|
889
|
+
category_metrics = {}
|
|
890
|
+
for category, colors in color_summary["categories"].items():
|
|
891
|
+
category_total = sum(colors.values())
|
|
892
|
+
category_metrics[category] = {
|
|
893
|
+
"count": category_total,
|
|
894
|
+
"unique_colors": len(colors),
|
|
895
|
+
"color_diversity": (len(colors) / category_total) * 100 if category_total > 0 else 0
|
|
896
|
+
}
|
|
897
|
+
metrics["category_metrics"] = category_metrics
|
|
898
|
+
|
|
899
|
+
# Processing settings
|
|
900
|
+
metrics["processing_settings"] = {
|
|
901
|
+
"confidence_threshold": config.confidence_threshold,
|
|
902
|
+
"top_k_colors": config.top_k_colors,
|
|
903
|
+
"frame_skip": config.frame_skip,
|
|
904
|
+
"target_categories": config.target_categories,
|
|
905
|
+
"enable_unique_counting": config.enable_unique_counting
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
return metrics
|
|
909
|
+
|
|
910
|
+
def _extract_predictions(self, color_analysis: List[Dict], config: ColorDetectionConfig) -> List[Dict]:
|
|
911
|
+
"""Extract predictions in standard format."""
|
|
912
|
+
|
|
913
|
+
predictions = []
|
|
914
|
+
for record in color_analysis:
|
|
915
|
+
prediction = {
|
|
916
|
+
"category": record["category"],
|
|
917
|
+
"confidence": record["confidence"],
|
|
918
|
+
"bbox": record["bbox"],
|
|
919
|
+
"frame_id": record["frame_id"],
|
|
920
|
+
"timestamp": record["timestamp"],
|
|
921
|
+
"main_color": record["main_color"],
|
|
922
|
+
"major_colors": record["major_colors"]
|
|
923
|
+
}
|
|
924
|
+
if "detection_id" in record:
|
|
925
|
+
prediction["id"] = record["detection_id"]
|
|
926
|
+
predictions.append(prediction)
|
|
927
|
+
|
|
928
|
+
return predictions
|
|
929
|
+
|
|
930
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
|
931
|
+
"""
|
|
932
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
|
933
|
+
"""
|
|
934
|
+
lines = []
|
|
935
|
+
lines.append("Application Name: "+self.CASE_TYPE)
|
|
936
|
+
lines.append("Application Version: "+self.CASE_VERSION)
|
|
937
|
+
if len(incidents) > 0:
|
|
938
|
+
lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
|
|
939
|
+
if len(tracking_stats) > 0:
|
|
940
|
+
lines.append(f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
|
|
941
|
+
if len(business_analytics) > 0:
|
|
942
|
+
lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
|
|
943
|
+
|
|
944
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
|
945
|
+
lines.append("Summary: "+"No Summary Data")
|
|
946
|
+
|
|
947
|
+
return ["\n".join(lines)]
|
|
948
|
+
|
|
949
|
+
def _generate_events(self, color_summary: Dict, alerts: List, config: ColorDetectionConfig, frame_number: Optional[int] = None) -> List[Dict]:
|
|
950
|
+
"""Generate structured events with frame-based keys."""
|
|
951
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
952
|
+
events = [{frame_key: []}]
|
|
953
|
+
frame_events = events[0][frame_key]
|
|
954
|
+
total_detections = color_summary.get("total_detections", 0)
|
|
955
|
+
|
|
956
|
+
if total_detections > 0:
|
|
957
|
+
level = "info"
|
|
958
|
+
intensity = min(10.0, total_detections / 5.0)
|
|
959
|
+
if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
960
|
+
threshold = config.alert_config.count_thresholds.get("all", 20)
|
|
961
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
962
|
+
level = "critical" if intensity >= 7 else "warning" if intensity >= 5 else "info"
|
|
963
|
+
elif total_detections > 50:
|
|
964
|
+
level = "critical"
|
|
965
|
+
intensity = 9.0
|
|
966
|
+
elif total_detections > 25:
|
|
967
|
+
level = "warning"
|
|
968
|
+
intensity = 7.0
|
|
969
|
+
|
|
970
|
+
event = {
|
|
971
|
+
"type": "color_detection",
|
|
972
|
+
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
|
973
|
+
"level": level,
|
|
974
|
+
"intensity": round(intensity, 1),
|
|
975
|
+
"config": {
|
|
976
|
+
"min_value": 0,
|
|
977
|
+
"max_value": 10,
|
|
978
|
+
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
|
979
|
+
},
|
|
980
|
+
"application_name": "Color Detection System",
|
|
981
|
+
"application_version": "1.2",
|
|
982
|
+
"location_info": None,
|
|
983
|
+
"human_text": (
|
|
984
|
+
f"Event: Color Detection\nLevel: {level.title()}\n"
|
|
985
|
+
f"Time: {datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')}\n"
|
|
986
|
+
f"Detections: {total_detections} objects analyzed\n"
|
|
987
|
+
f"Unique Colors: {len(color_summary.get('color_distribution', {}))}\n"
|
|
988
|
+
f"Intensity: {intensity:.1f}/10"
|
|
989
|
+
)
|
|
990
|
+
}
|
|
991
|
+
frame_events.append(event)
|
|
992
|
+
|
|
993
|
+
for alert in alerts:
|
|
994
|
+
alert_event = {
|
|
995
|
+
"type": alert.get("type", "color_alert"),
|
|
996
|
+
"stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
|
|
997
|
+
"level": alert.get("severity", "warning"),
|
|
998
|
+
"intensity": 8.0,
|
|
999
|
+
"config": {
|
|
1000
|
+
"min_value": 0,
|
|
1001
|
+
"max_value": 10,
|
|
1002
|
+
"level_settings": {"info": 2, "warning": 5, "critical": 7}
|
|
1003
|
+
},
|
|
1004
|
+
"application_name": "Color Detection Alert System",
|
|
1005
|
+
"application_version": "1.2",
|
|
1006
|
+
"location_info": alert.get("category"),
|
|
1007
|
+
"human_text": f"Event: {alert.get('type', 'Color Alert').title()}\nMessage: {alert.get('message', 'Color detection alert triggered')}"
|
|
1008
|
+
}
|
|
1009
|
+
frame_events.append(alert_event)
|
|
1010
|
+
|
|
1011
|
+
return events
|
|
1012
|
+
|
|
1013
|
+
def _generate_tracking_stats(
|
|
1014
|
+
self,
|
|
1015
|
+
new_color_summary: Dict,
|
|
1016
|
+
counting_summary: Dict,
|
|
1017
|
+
alerts: Any,
|
|
1018
|
+
config: ColorDetectionConfig,
|
|
1019
|
+
curr_frame_color: Any,
|
|
1020
|
+
total_color_data: Any,
|
|
1021
|
+
frame_number: Optional[int] = None,
|
|
1022
|
+
stream_info: Optional[Dict[str, Any]] = None,
|
|
1023
|
+
) -> List[Dict]:
|
|
1024
|
+
"""Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
|
|
1025
|
+
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
1026
|
+
# tracking_stats = [{frame_key: []}]
|
|
1027
|
+
# frame_tracking_stats = tracking_stats[0][frame_key]
|
|
1028
|
+
tracking_stats = []
|
|
1029
|
+
|
|
1030
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
1031
|
+
total_color_counts_dict = counting_summary.get("total_color_counts", {})
|
|
1032
|
+
total_category_counts_dict = counting_summary.get("total_category_counts", {})
|
|
1033
|
+
# cumulative_total = sum(total_color_counts_dict.values()) if total_color_counts_dict else 0
|
|
1034
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
|
1035
|
+
|
|
1036
|
+
# Compute current color counts from detections
|
|
1037
|
+
current_color_count: Dict[str, int] = {}
|
|
1038
|
+
for det in counting_summary.get("detections", []):
|
|
1039
|
+
color = det.get("main_color")
|
|
1040
|
+
if color:
|
|
1041
|
+
current_color_count[color] = current_color_count.get(color, 0) + 1
|
|
1042
|
+
|
|
1043
|
+
track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
|
|
1044
|
+
|
|
1045
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
|
1046
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
|
1047
|
+
|
|
1048
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
|
1049
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
|
1050
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
|
1051
|
+
|
|
1052
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
1053
|
+
# total_color_data = self.color_helper(curr_frame_color)
|
|
1054
|
+
|
|
1055
|
+
human_text_lines = []
|
|
1056
|
+
color_counts = {}
|
|
1057
|
+
|
|
1058
|
+
if curr_frame_color:
|
|
1059
|
+
for tid, data in curr_frame_color.items():
|
|
1060
|
+
color = data.get("color")
|
|
1061
|
+
if color not in color_counts:
|
|
1062
|
+
color_counts[color] = 0
|
|
1063
|
+
color_counts[color] += 1
|
|
1064
|
+
zone_frame_data = {}
|
|
1065
|
+
if curr_frame_color:
|
|
1066
|
+
for tid, data in curr_frame_color.items():
|
|
1067
|
+
zone = data.get("zone_name", "Unknown_Zone")
|
|
1068
|
+
color = data.get("color", "unknown")
|
|
1069
|
+
category = data.get("object_label", "unknown")
|
|
1070
|
+
|
|
1071
|
+
if zone not in zone_frame_data:
|
|
1072
|
+
zone_frame_data[zone] = {
|
|
1073
|
+
"color_counts": {},
|
|
1074
|
+
"category_counts": {}
|
|
1075
|
+
}
|
|
1076
|
+
|
|
1077
|
+
# Count colors
|
|
1078
|
+
zone_frame_data[zone]["color_counts"][color] = (
|
|
1079
|
+
zone_frame_data[zone]["color_counts"].get(color, 0) + 1
|
|
1080
|
+
)
|
|
1081
|
+
|
|
1082
|
+
# Count vehicle types
|
|
1083
|
+
zone_frame_data[zone]["category_counts"][category] = (
|
|
1084
|
+
zone_frame_data[zone]["category_counts"].get(category, 0) + 1
|
|
1085
|
+
)
|
|
1086
|
+
|
|
1087
|
+
# CURRENT FRAME section
|
|
1088
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
1089
|
+
if not curr_frame_color or total_detections == 0:
|
|
1090
|
+
human_text_lines.append(f"\t- No detections")
|
|
1091
|
+
else:
|
|
1092
|
+
for zone_name, stats in zone_frame_data.items():
|
|
1093
|
+
color_counts = stats["color_counts"]
|
|
1094
|
+
per_category_count = stats["category_counts"]
|
|
1095
|
+
if config.zone_config:
|
|
1096
|
+
human_text_lines.append(f"\t{zone_name}:")
|
|
1097
|
+
if per_category_count:
|
|
1098
|
+
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
|
1099
|
+
if len(category_counts) == 1:
|
|
1100
|
+
detection_text = category_counts[0] + " detected"
|
|
1101
|
+
elif len(category_counts) == 2:
|
|
1102
|
+
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
|
1103
|
+
else:
|
|
1104
|
+
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
|
1105
|
+
human_text_lines.append(f"\t\t- {detection_text}")
|
|
1106
|
+
|
|
1107
|
+
if color_counts:
|
|
1108
|
+
color_counts_text = ", ".join([f"{count} {color}" for color, count in color_counts.items()])
|
|
1109
|
+
human_text_lines.append(f"\t\t- Colors: {color_counts_text}")
|
|
1110
|
+
|
|
1111
|
+
human_text_lines.append("") # spacing
|
|
1112
|
+
|
|
1113
|
+
cumulative_total = sum(self.all_color_counts.values())
|
|
1114
|
+
stats = self.zone_vehicle_stats
|
|
1115
|
+
|
|
1116
|
+
# TOTAL SINCE section
|
|
1117
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
|
1118
|
+
for zone_name, vehicles in stats.items():
|
|
1119
|
+
total_in_zone = sum(sum(colors.values()) for colors in vehicles.values())
|
|
1120
|
+
if config.zone_config:
|
|
1121
|
+
human_text_lines.append(f"\t{zone_name}:")
|
|
1122
|
+
human_text_lines.append(f"\t\t- Total Detected: {total_in_zone}")
|
|
1123
|
+
|
|
1124
|
+
for vehicle_type, colors in vehicles.items():
|
|
1125
|
+
total_type_count = sum(colors.values())
|
|
1126
|
+
human_text_lines.append(f"\t\t- {vehicle_type}: {total_type_count}")
|
|
1127
|
+
for color, count in colors.items():
|
|
1128
|
+
human_text_lines.append(f"\t\t\t- {color}: {count}")
|
|
1129
|
+
|
|
1130
|
+
current_counts_categories = []
|
|
1131
|
+
for cat, count in per_category_count.items():
|
|
1132
|
+
if count > 0 or total_detections > 0:
|
|
1133
|
+
current_counts_categories.append({"category": cat, "count": count})
|
|
1134
|
+
current_counts_colors = []
|
|
1135
|
+
for color, count in current_color_count.items():
|
|
1136
|
+
if count > 0 or total_detections > 0:
|
|
1137
|
+
current_counts_colors.append({"color": color, "count": count})
|
|
1138
|
+
total_counts_categories = []
|
|
1139
|
+
for cat, count in total_category_counts_dict.items():
|
|
1140
|
+
if count > 0 or cumulative_total > 0:
|
|
1141
|
+
total_counts_categories.append({"category": cat, "count": count})
|
|
1142
|
+
total_counts_colors = []
|
|
1143
|
+
for color, count in total_color_counts_dict.items():
|
|
1144
|
+
if count > 0 or cumulative_total > 0:
|
|
1145
|
+
total_counts_colors.append({"category": color, "count": count})
|
|
1146
|
+
|
|
1147
|
+
human_text = "\n".join(human_text_lines)
|
|
1148
|
+
|
|
1149
|
+
# Include detections with masks from counting_summary
|
|
1150
|
+
# Prepare detections without confidence scores (as per eg.json)
|
|
1151
|
+
detections = []
|
|
1152
|
+
for detection in new_color_summary.get("detections", []):
|
|
1153
|
+
bbox = detection.get("bounding_box", {})
|
|
1154
|
+
category = detection.get("main_color", "No_color")
|
|
1155
|
+
# Include segmentation if available (like in eg.json)
|
|
1156
|
+
if detection.get("masks"):
|
|
1157
|
+
segmentation= detection.get("masks", [])
|
|
1158
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
1159
|
+
elif detection.get("segmentation"):
|
|
1160
|
+
segmentation= detection.get("segmentation")
|
|
1161
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
1162
|
+
elif detection.get("mask"):
|
|
1163
|
+
segmentation= detection.get("mask")
|
|
1164
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
|
1165
|
+
else:
|
|
1166
|
+
detection_obj = self.create_detection_object(category, bbox)
|
|
1167
|
+
detections.append(detection_obj)
|
|
1168
|
+
|
|
1169
|
+
# Build alert_settings array in expected format
|
|
1170
|
+
alert_settings = []
|
|
1171
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
1172
|
+
alert_settings.append({
|
|
1173
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1174
|
+
"incident_category": self.CASE_TYPE,
|
|
1175
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
1176
|
+
"ascending": True,
|
|
1177
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1178
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1179
|
+
}
|
|
1180
|
+
})
|
|
1181
|
+
|
|
1182
|
+
if alerts:
|
|
1183
|
+
for alert in alerts:
|
|
1184
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
|
1185
|
+
else:
|
|
1186
|
+
human_text_lines.append("Alerts: None")
|
|
1187
|
+
|
|
1188
|
+
human_text = "\n".join(human_text_lines)
|
|
1189
|
+
reset_settings = [
|
|
1190
|
+
{
|
|
1191
|
+
"interval_type": "daily",
|
|
1192
|
+
"reset_time": {
|
|
1193
|
+
"value": 9,
|
|
1194
|
+
"time_unit": "hour"
|
|
1195
|
+
}
|
|
1196
|
+
}
|
|
1197
|
+
]
|
|
1198
|
+
|
|
1199
|
+
|
|
1200
|
+
# Keep backward-compat: put colors into total_counts and categories into current_counts
|
|
1201
|
+
tracking_stat=self.create_tracking_stats(total_counts=total_counts_colors, current_counts=current_counts_categories,
|
|
1202
|
+
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
1203
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
|
1204
|
+
reset_time=high_precision_reset_timestamp)
|
|
1205
|
+
|
|
1206
|
+
tracking_stats.append(tracking_stat)
|
|
1207
|
+
return tracking_stats
|
|
1208
|
+
|
|
1209
|
+
|
|
1210
|
+
def reset_tracker(self) -> None:
|
|
1211
|
+
"""Reset the advanced tracker instance."""
|
|
1212
|
+
if self.tracker is not None:
|
|
1213
|
+
self.tracker.reset()
|
|
1214
|
+
self.logger.info("AdvancedTracker reset for new tracking session")
|
|
1215
|
+
|
|
1216
|
+
def reset_color_tracking(self) -> None:
|
|
1217
|
+
"""Reset color tracking state."""
|
|
1218
|
+
self._color_total_track_ids = defaultdict(set)
|
|
1219
|
+
self._color_current_frame_track_ids = defaultdict(set)
|
|
1220
|
+
self._total_frame_counter = 0
|
|
1221
|
+
self._global_frame_offset = 0
|
|
1222
|
+
self.logger.info("Color tracking state reset")
|
|
1223
|
+
|
|
1224
|
+
def reset_all_tracking(self) -> None:
|
|
1225
|
+
"""Reset both advanced tracker and color tracking state."""
|
|
1226
|
+
self.reset_tracker()
|
|
1227
|
+
self.reset_color_tracking()
|
|
1228
|
+
self.logger.info("All color tracking state reset")
|
|
1229
|
+
|
|
1230
|
+
def _is_in_zone(self, bbox: Dict[str, Any], zone_polygon: List[List[int]]) -> bool:
|
|
1231
|
+
"""Check if the bottom 25% center point of a bounding box lies within the given zone polygon."""
|
|
1232
|
+
if not zone_polygon or not isinstance(bbox, dict):
|
|
1233
|
+
return True # No zone defined, or invalid bbox, process all detections
|
|
1234
|
+
try:
|
|
1235
|
+
# Get bottom 25% center point
|
|
1236
|
+
center_point = get_bbox_bottom25_center(bbox)
|
|
1237
|
+
# Convert zone polygon to list of tuples
|
|
1238
|
+
polygon_points = [(point[0], point[1]) for point in zone_polygon]
|
|
1239
|
+
# Check if point is inside polygon
|
|
1240
|
+
in_zone = point_in_polygon(center_point, polygon_points)
|
|
1241
|
+
self.logger.debug(f"BBox center {center_point} in zone: {in_zone}")
|
|
1242
|
+
return in_zone
|
|
1243
|
+
except (KeyError, TypeError) as e:
|
|
1244
|
+
self.logger.warning(f"Failed to check zone for bbox {bbox}: {e}")
|
|
1245
|
+
return False
|
|
1246
|
+
|
|
1247
|
+
@staticmethod
|
|
1248
|
+
def _iou(bbox1, bbox2):
|
|
1249
|
+
"""Compute IoU between two bboxes (dicts with xmin/ymin/xmax/ymax or x/y/width/height)."""
|
|
1250
|
+
if "xmin" in bbox1:
|
|
1251
|
+
x1 = max(bbox1["xmin"], bbox2["xmin"])
|
|
1252
|
+
y1 = max(bbox1["ymin"], bbox2["ymin"])
|
|
1253
|
+
x2 = min(bbox1["xmax"], bbox2["xmax"])
|
|
1254
|
+
y2 = min(bbox1["ymax"], bbox2["ymax"])
|
|
1255
|
+
area1 = (bbox1["xmax"] - bbox1["xmin"]) * (bbox1["ymax"] - bbox1["ymin"])
|
|
1256
|
+
area2 = (bbox2["xmax"] - bbox2["xmin"]) * (bbox2["ymax"] - bbox2["ymin"])
|
|
1257
|
+
else:
|
|
1258
|
+
x1 = max(bbox1["x"], bbox2["x"])
|
|
1259
|
+
y1 = max(bbox1["y"], bbox2["y"])
|
|
1260
|
+
x2 = min(bbox1["x"] + bbox1["width"], bbox2["x"] + bbox2["width"])
|
|
1261
|
+
y2 = min(bbox1["y"] + bbox1["height"], bbox2["y"] + bbox2["height"])
|
|
1262
|
+
area1 = bbox1["width"] * bbox1["height"]
|
|
1263
|
+
area2 = bbox2["width"] * bbox2["height"]
|
|
1264
|
+
inter_w = max(0, x2 - x1)
|
|
1265
|
+
inter_h = max(0, y2 - y1)
|
|
1266
|
+
inter_area = inter_w * inter_h
|
|
1267
|
+
union = area1 + area2 - inter_area
|
|
1268
|
+
return inter_area / union if union > 0 else 0.0
|
|
1269
|
+
|
|
1270
|
+
@staticmethod
|
|
1271
|
+
def _deduplicate_detections(detections, iou_thresh=0.7):
|
|
1272
|
+
"""Suppress duplicate/overlapping detections with same category and high IoU."""
|
|
1273
|
+
filtered = []
|
|
1274
|
+
used = [False] * len(detections)
|
|
1275
|
+
for i, det in enumerate(detections):
|
|
1276
|
+
if used[i]:
|
|
1277
|
+
continue
|
|
1278
|
+
group = [i]
|
|
1279
|
+
for j in range(i + 1, len(detections)):
|
|
1280
|
+
if used[j]:
|
|
1281
|
+
continue
|
|
1282
|
+
if det.get("category") == detections[j].get("category"):
|
|
1283
|
+
bbox1 = det.get("bounding_box", det.get("bbox"))
|
|
1284
|
+
bbox2 = detections[j].get("bounding_box", detections[j].get("bbox"))
|
|
1285
|
+
if bbox1 and bbox2 and ColorDetectionUseCase._iou(bbox1, bbox2) > iou_thresh:
|
|
1286
|
+
used[j] = True
|
|
1287
|
+
group.append(j)
|
|
1288
|
+
best_idx = max(group, key=lambda idx: detections[idx].get("confidence", 0))
|
|
1289
|
+
filtered.append(detections[best_idx])
|
|
1290
|
+
used[best_idx] = True
|
|
1291
|
+
return filtered
|
|
1292
|
+
|
|
1293
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
|
1294
|
+
"""Get JSON schema for configuration validation."""
|
|
1295
|
+
return {
|
|
1296
|
+
"type": "object",
|
|
1297
|
+
"properties": {
|
|
1298
|
+
"confidence_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.5},
|
|
1299
|
+
"top_k_colors": {"type": "integer", "minimum": 1, "default": 3},
|
|
1300
|
+
"frame_skip": {"type": "integer", "minimum": 1, "default": 1},
|
|
1301
|
+
"target_categories": {"type": ["array", "null"], "items": {"type": "string"}, "default": [
|
|
1302
|
+
"car", "bicycle", "bus", "motorcycle"
|
|
1303
|
+
]},
|
|
1304
|
+
"fps": {"type": ["number", "null"], "minimum": 1.0, "default": None},
|
|
1305
|
+
"bbox_format": {"type": "string", "enum": ["auto", "xmin_ymin_xmax_ymax", "x_y_width_height"], "default": "auto"},
|
|
1306
|
+
"index_to_category": {"type": ["object", "null"], "default": None},
|
|
1307
|
+
"alert_config": {"type": ["object", "null"], "default": None}
|
|
1308
|
+
},
|
|
1309
|
+
"required": ["confidence_threshold", "top_k_colors"],
|
|
1310
|
+
"additionalProperties": False
|
|
1311
|
+
}
|
|
1312
|
+
|
|
1313
|
+
def create_default_config(self, **overrides) -> ColorDetectionConfig:
|
|
1314
|
+
"""Create default configuration with optional overrides."""
|
|
1315
|
+
defaults = {
|
|
1316
|
+
"category": self.category,
|
|
1317
|
+
"usecase": self.name,
|
|
1318
|
+
"confidence_threshold": 0.5,
|
|
1319
|
+
"top_k_colors": 3,
|
|
1320
|
+
"frame_skip": 1,
|
|
1321
|
+
"target_categories": [
|
|
1322
|
+
"car", "bicycle", "bus", "motorcycle"
|
|
1323
|
+
],
|
|
1324
|
+
"fps": None,
|
|
1325
|
+
"bbox_format": "auto",
|
|
1326
|
+
"index_to_category": None,
|
|
1327
|
+
"alert_config": None
|
|
1328
|
+
}
|
|
1329
|
+
defaults.update(overrides)
|
|
1330
|
+
return ColorDetectionConfig(**defaults)
|
|
1331
|
+
|
|
1332
|
+
def _update_color_tracking_state(self, detections: List[Dict]):
|
|
1333
|
+
"""Track unique track_ids per category and color for total count."""
|
|
1334
|
+
# Ensure storage is a defaultdict(set) to allow safe .add()
|
|
1335
|
+
existing_store = getattr(self, '_color_total_track_ids', None)
|
|
1336
|
+
if not isinstance(existing_store, defaultdict):
|
|
1337
|
+
existing_store = {} if existing_store is None else dict(existing_store)
|
|
1338
|
+
self._color_total_track_ids = defaultdict(set, existing_store)
|
|
1339
|
+
else:
|
|
1340
|
+
self._color_total_track_ids = existing_store
|
|
1341
|
+
self._color_current_frame_track_ids = defaultdict(set)
|
|
1342
|
+
for det in detections:
|
|
1343
|
+
cat = det.get('category')
|
|
1344
|
+
color = det.get('main_color')
|
|
1345
|
+
track_id = det.get('track_id')
|
|
1346
|
+
if cat and track_id is not None:
|
|
1347
|
+
key = f"{cat}:{color}" if color else cat
|
|
1348
|
+
self._color_total_track_ids[key].add(track_id)
|
|
1349
|
+
self._color_current_frame_track_ids[key].add(track_id)
|
|
1350
|
+
|
|
1351
|
+
def get_total_color_counts(self):
|
|
1352
|
+
"""Return total unique track_id count per color (across all categories)."""
|
|
1353
|
+
store = getattr(self, '_color_total_track_ids', {})
|
|
1354
|
+
if not isinstance(store, dict):
|
|
1355
|
+
return {}
|
|
1356
|
+
color_to_ids = defaultdict(set)
|
|
1357
|
+
for key, id_set in store.items():
|
|
1358
|
+
if isinstance(key, str) and ':' in key:
|
|
1359
|
+
_, color = key.split(':', 1)
|
|
1360
|
+
else:
|
|
1361
|
+
color = None
|
|
1362
|
+
# Support both set and iterable
|
|
1363
|
+
ids = id_set if isinstance(id_set, set) else set(id_set or [])
|
|
1364
|
+
if color:
|
|
1365
|
+
color_to_ids[color].update(ids)
|
|
1366
|
+
return {color: len(ids) for color, ids in color_to_ids.items()}
|
|
1367
|
+
|
|
1368
|
+
def get_total_category_counts(self,data):
|
|
1369
|
+
"""Return total unique track_id count per category (across all colors)."""
|
|
1370
|
+
for det in data:
|
|
1371
|
+
track_id = det.get("track_id")
|
|
1372
|
+
category = det.get("category")
|
|
1373
|
+
if track_id and category:
|
|
1374
|
+
if category not in self.total_category_count:
|
|
1375
|
+
self.total_category_count[category] = set()
|
|
1376
|
+
self.total_category_count[category].add(track_id)
|
|
1377
|
+
|
|
1378
|
+
# Convert sets to counts
|
|
1379
|
+
return {cat: len(track_ids) for cat, track_ids in self.total_category_count.items()}
|
|
1380
|
+
|
|
1381
|
+
|
|
1382
|
+
def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
|
|
1383
|
+
"""Get detailed information about track IDs for color detections (per frame)."""
|
|
1384
|
+
frame_track_ids = set(det.get('track_id') for det in detections if det.get('track_id') is not None)
|
|
1385
|
+
total_track_ids = set()
|
|
1386
|
+
for s in getattr(self, '_color_total_track_ids', {}).values():
|
|
1387
|
+
total_track_ids.update(s)
|
|
1388
|
+
return {
|
|
1389
|
+
"total_count": len(total_track_ids),
|
|
1390
|
+
"current_frame_count": len(frame_track_ids),
|
|
1391
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
1392
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
1393
|
+
"last_update_time": time.time(),
|
|
1394
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
1395
|
+
}
|
|
1396
|
+
|
|
1397
|
+
def _attach_masks_to_detections(
|
|
1398
|
+
self,
|
|
1399
|
+
processed_detections: List[Dict[str, Any]],
|
|
1400
|
+
raw_detections: List[Dict[str, Any]],
|
|
1401
|
+
iou_threshold: float = 0.5,
|
|
1402
|
+
) -> List[Dict[str, Any]]:
|
|
1403
|
+
"""
|
|
1404
|
+
Attach segmentation masks from the original `raw_detections` list to the
|
|
1405
|
+
`processed_detections` list returned after smoothing/tracking.
|
|
1406
|
+
|
|
1407
|
+
Matching between detections is performed using Intersection-over-Union
|
|
1408
|
+
(IoU) of the bounding boxes. For each processed detection we select the
|
|
1409
|
+
raw detection with the highest IoU above `iou_threshold` and copy its
|
|
1410
|
+
`masks` (or `mask`) field. If no suitable match is found, the detection
|
|
1411
|
+
keeps an empty list for `masks` to maintain a consistent schema.
|
|
1412
|
+
"""
|
|
1413
|
+
|
|
1414
|
+
if not processed_detections or not raw_detections:
|
|
1415
|
+
# Nothing to do – ensure masks key exists for downstream logic.
|
|
1416
|
+
for det in processed_detections:
|
|
1417
|
+
det.setdefault("masks", [])
|
|
1418
|
+
return processed_detections
|
|
1419
|
+
|
|
1420
|
+
# Track which raw detections have already been matched to avoid
|
|
1421
|
+
# assigning the same mask to multiple processed detections.
|
|
1422
|
+
used_raw_indices = set()
|
|
1423
|
+
|
|
1424
|
+
for det in processed_detections:
|
|
1425
|
+
best_iou = 0.0
|
|
1426
|
+
best_idx = None
|
|
1427
|
+
|
|
1428
|
+
for idx, raw_det in enumerate(raw_detections):
|
|
1429
|
+
if idx in used_raw_indices:
|
|
1430
|
+
continue
|
|
1431
|
+
|
|
1432
|
+
iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
|
|
1433
|
+
if iou > best_iou:
|
|
1434
|
+
best_iou = iou
|
|
1435
|
+
best_idx = idx
|
|
1436
|
+
|
|
1437
|
+
if best_idx is not None and best_iou >= iou_threshold:
|
|
1438
|
+
raw_det = raw_detections[best_idx]
|
|
1439
|
+
masks = raw_det.get("masks", raw_det.get("mask"))
|
|
1440
|
+
if masks is not None:
|
|
1441
|
+
det["masks"] = masks
|
|
1442
|
+
used_raw_indices.add(best_idx)
|
|
1443
|
+
else:
|
|
1444
|
+
# No adequate match – default to empty list to keep schema consistent.
|
|
1445
|
+
det.setdefault("masks", ["EMPTY"])
|
|
1446
|
+
|
|
1447
|
+
return processed_detections
|
|
1448
|
+
|
|
1449
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ColorDetectionConfig,
|
|
1450
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
|
1451
|
+
"""Generate structured events for the output format with frame-based keys."""
|
|
1452
|
+
|
|
1453
|
+
# Use frame number as key, fallback to 'current_frame' if not available
|
|
1454
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
1455
|
+
incidents=[]
|
|
1456
|
+
total_detections = counting_summary.get("total_count", 0)
|
|
1457
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
|
1458
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
|
1459
|
+
|
|
1460
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
|
1461
|
+
|
|
1462
|
+
if total_detections > 0:
|
|
1463
|
+
# Determine event level based on thresholds
|
|
1464
|
+
level = "low"
|
|
1465
|
+
intensity = 5.0
|
|
1466
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
|
1467
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
|
1468
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
|
1469
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
|
1470
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
|
1471
|
+
self.current_incident_end_timestamp = current_timestamp
|
|
1472
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
|
1473
|
+
self.current_incident_end_timestamp = 'N/A'
|
|
1474
|
+
|
|
1475
|
+
if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
1476
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
|
1477
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
|
1478
|
+
|
|
1479
|
+
if intensity >= 9:
|
|
1480
|
+
level = "critical"
|
|
1481
|
+
self._ascending_alert_list.append(3)
|
|
1482
|
+
elif intensity >= 7:
|
|
1483
|
+
level = "significant"
|
|
1484
|
+
self._ascending_alert_list.append(2)
|
|
1485
|
+
elif intensity >= 5:
|
|
1486
|
+
level = "medium"
|
|
1487
|
+
self._ascending_alert_list.append(1)
|
|
1488
|
+
else:
|
|
1489
|
+
level = "low"
|
|
1490
|
+
self._ascending_alert_list.append(0)
|
|
1491
|
+
else:
|
|
1492
|
+
if total_detections > 30:
|
|
1493
|
+
level = "critical"
|
|
1494
|
+
intensity = 10.0
|
|
1495
|
+
self._ascending_alert_list.append(3)
|
|
1496
|
+
elif total_detections > 25:
|
|
1497
|
+
level = "significant"
|
|
1498
|
+
intensity = 9.0
|
|
1499
|
+
self._ascending_alert_list.append(2)
|
|
1500
|
+
elif total_detections > 15:
|
|
1501
|
+
level = "medium"
|
|
1502
|
+
intensity = 7.0
|
|
1503
|
+
self._ascending_alert_list.append(1)
|
|
1504
|
+
else:
|
|
1505
|
+
level = "low"
|
|
1506
|
+
intensity = min(10.0, total_detections / 3.0)
|
|
1507
|
+
self._ascending_alert_list.append(0)
|
|
1508
|
+
|
|
1509
|
+
# Generate human text in new format
|
|
1510
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
|
1511
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
|
1512
|
+
human_text = "\n".join(human_text_lines)
|
|
1513
|
+
|
|
1514
|
+
alert_settings=[]
|
|
1515
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
|
1516
|
+
alert_settings.append({
|
|
1517
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1518
|
+
"incident_category": self.CASE_TYPE,
|
|
1519
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
|
1520
|
+
"ascending": True,
|
|
1521
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1522
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1523
|
+
}
|
|
1524
|
+
})
|
|
1525
|
+
|
|
1526
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
|
|
1527
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
|
1528
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
|
1529
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
|
1530
|
+
incidents.append(event)
|
|
1531
|
+
|
|
1532
|
+
else:
|
|
1533
|
+
self._ascending_alert_list.append(0)
|
|
1534
|
+
incidents.append({})
|
|
1535
|
+
|
|
1536
|
+
return incidents
|
|
1537
|
+
|
|
1538
|
+
def _check_alerts(self, summary: dict, frame_number:Any, config: ColorDetectionConfig) -> List[Dict]:
|
|
1539
|
+
"""
|
|
1540
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
|
1541
|
+
"""
|
|
1542
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
|
1543
|
+
'''
|
|
1544
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
|
1545
|
+
Now works with values 0,1,2,3 (not just binary).
|
|
1546
|
+
'''
|
|
1547
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
|
1548
|
+
if len(window) < 2:
|
|
1549
|
+
return True # not enough data to determine trend
|
|
1550
|
+
increasing = 0
|
|
1551
|
+
total = 0
|
|
1552
|
+
for i in range(1, len(window)):
|
|
1553
|
+
if window[i] >= window[i - 1]:
|
|
1554
|
+
increasing += 1
|
|
1555
|
+
total += 1
|
|
1556
|
+
ratio = increasing / total
|
|
1557
|
+
if ratio >= threshold:
|
|
1558
|
+
return True
|
|
1559
|
+
elif ratio <= (1 - threshold):
|
|
1560
|
+
return False
|
|
1561
|
+
|
|
1562
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
|
1563
|
+
alerts = []
|
|
1564
|
+
total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
|
|
1565
|
+
total_counts_dict = summary.get("total_color_counts", {}) #TOTAL cumulative counts per class
|
|
1566
|
+
if isinstance(total_counts_dict, int):
|
|
1567
|
+
total_counts_dict = {}
|
|
1568
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
|
1569
|
+
per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
|
|
1570
|
+
|
|
1571
|
+
if not config.alert_config:
|
|
1572
|
+
return alerts
|
|
1573
|
+
|
|
1574
|
+
total = summary.get("total_count", 0)
|
|
1575
|
+
#self._ascending_alert_list
|
|
1576
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
|
1577
|
+
|
|
1578
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
|
1579
|
+
if category == "all" and total > threshold:
|
|
1580
|
+
|
|
1581
|
+
alerts.append({
|
|
1582
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1583
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
|
1584
|
+
"incident_category": self.CASE_TYPE,
|
|
1585
|
+
"threshold_level": threshold,
|
|
1586
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
1587
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1588
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1589
|
+
}
|
|
1590
|
+
})
|
|
1591
|
+
elif category in summary.get("per_category_count", {}):
|
|
1592
|
+
count = summary.get("per_category_count", {})[category]
|
|
1593
|
+
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
|
1594
|
+
alerts.append({
|
|
1595
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1596
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
|
1597
|
+
"incident_category": self.CASE_TYPE,
|
|
1598
|
+
"threshold_level": threshold,
|
|
1599
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
|
1600
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
|
1601
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
|
1602
|
+
}
|
|
1603
|
+
})
|
|
1604
|
+
else:
|
|
1605
|
+
pass
|
|
1606
|
+
return alerts
|
|
1607
|
+
|
|
1608
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
1609
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
|
1610
|
+
dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
|
|
1611
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1612
|
+
|
|
1613
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
1614
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
1615
|
+
hours = int(timestamp // 3600)
|
|
1616
|
+
minutes = int((timestamp % 3600) // 60)
|
|
1617
|
+
seconds = round(float(timestamp % 60),2)
|
|
1618
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
|
1619
|
+
|
|
1620
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
1621
|
+
"""Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
|
|
1622
|
+
|
|
1623
|
+
The input can be either:
|
|
1624
|
+
1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
|
|
1625
|
+
2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
|
|
1626
|
+
|
|
1627
|
+
The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
|
|
1628
|
+
|
|
1629
|
+
Example
|
|
1630
|
+
-------
|
|
1631
|
+
>>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
|
|
1632
|
+
'2025:10:27 19:31:20'
|
|
1633
|
+
"""
|
|
1634
|
+
|
|
1635
|
+
# Convert numeric timestamps to datetime first
|
|
1636
|
+
if isinstance(timestamp, (int, float)):
|
|
1637
|
+
dt = datetime.fromtimestamp(timestamp, timezone.utc)
|
|
1638
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1639
|
+
|
|
1640
|
+
# Ensure we are working with a string from here on
|
|
1641
|
+
if not isinstance(timestamp, str):
|
|
1642
|
+
return str(timestamp)
|
|
1643
|
+
|
|
1644
|
+
# Remove ' UTC' suffix if present
|
|
1645
|
+
timestamp_clean = timestamp.replace(' UTC', '').strip()
|
|
1646
|
+
|
|
1647
|
+
# Remove milliseconds if present (everything after the last dot)
|
|
1648
|
+
if '.' in timestamp_clean:
|
|
1649
|
+
timestamp_clean = timestamp_clean.split('.')[0]
|
|
1650
|
+
|
|
1651
|
+
# Parse the timestamp string and convert to desired format
|
|
1652
|
+
try:
|
|
1653
|
+
# Handle format: YYYY-MM-DD-HH:MM:SS
|
|
1654
|
+
if timestamp_clean.count('-') >= 2:
|
|
1655
|
+
# Replace first two dashes with colons for date part, third with space
|
|
1656
|
+
parts = timestamp_clean.split('-')
|
|
1657
|
+
if len(parts) >= 4:
|
|
1658
|
+
# parts = ['2025', '10', '27', '19:31:20']
|
|
1659
|
+
formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
|
|
1660
|
+
return formatted
|
|
1661
|
+
except Exception:
|
|
1662
|
+
pass
|
|
1663
|
+
|
|
1664
|
+
# If parsing fails, return the cleaned string as-is
|
|
1665
|
+
return timestamp_clean
|
|
1666
|
+
|
|
1667
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
1668
|
+
"""Get formatted current timestamp based on stream type."""
|
|
1669
|
+
|
|
1670
|
+
if not stream_info:
|
|
1671
|
+
return "00:00:00.00"
|
|
1672
|
+
if precision:
|
|
1673
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
1674
|
+
if frame_id:
|
|
1675
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1676
|
+
else:
|
|
1677
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1678
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1679
|
+
|
|
1680
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1681
|
+
else:
|
|
1682
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1683
|
+
|
|
1684
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
1685
|
+
if frame_id:
|
|
1686
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1687
|
+
else:
|
|
1688
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1689
|
+
|
|
1690
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1691
|
+
|
|
1692
|
+
|
|
1693
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1694
|
+
else:
|
|
1695
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1696
|
+
if stream_time_str:
|
|
1697
|
+
try:
|
|
1698
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1699
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1700
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1701
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
1702
|
+
except:
|
|
1703
|
+
return self._format_timestamp_for_stream(time.time())
|
|
1704
|
+
else:
|
|
1705
|
+
return self._format_timestamp_for_stream(time.time())
|
|
1706
|
+
|
|
1707
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
1708
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
1709
|
+
if not stream_info:
|
|
1710
|
+
return "00:00:00"
|
|
1711
|
+
|
|
1712
|
+
if precision:
|
|
1713
|
+
if self.start_timer is None:
|
|
1714
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1715
|
+
if not candidate or candidate == "NA":
|
|
1716
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1717
|
+
self.start_timer = candidate
|
|
1718
|
+
return self._format_timestamp(self.start_timer)
|
|
1719
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1720
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1721
|
+
if not candidate or candidate == "NA":
|
|
1722
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1723
|
+
self.start_timer = candidate
|
|
1724
|
+
return self._format_timestamp(self.start_timer)
|
|
1725
|
+
else:
|
|
1726
|
+
return self._format_timestamp(self.start_timer)
|
|
1727
|
+
|
|
1728
|
+
if self.start_timer is None:
|
|
1729
|
+
# Prefer direct input_settings.stream_time if available and not NA
|
|
1730
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1731
|
+
if not candidate or candidate == "NA":
|
|
1732
|
+
# Fallback to nested stream_info.stream_time used by current timestamp path
|
|
1733
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1734
|
+
if stream_time_str:
|
|
1735
|
+
try:
|
|
1736
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1737
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1738
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1739
|
+
candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1740
|
+
except:
|
|
1741
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1742
|
+
else:
|
|
1743
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1744
|
+
self.start_timer = candidate
|
|
1745
|
+
return self._format_timestamp(self.start_timer)
|
|
1746
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1747
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1748
|
+
if not candidate or candidate == "NA":
|
|
1749
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1750
|
+
if stream_time_str:
|
|
1751
|
+
try:
|
|
1752
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1753
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1754
|
+
ts = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1755
|
+
candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1756
|
+
except:
|
|
1757
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1758
|
+
else:
|
|
1759
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1760
|
+
self.start_timer = candidate
|
|
1761
|
+
return self._format_timestamp(self.start_timer)
|
|
1762
|
+
|
|
1763
|
+
else:
|
|
1764
|
+
if self.start_timer is not None and self.start_timer != "NA":
|
|
1765
|
+
return self._format_timestamp(self.start_timer)
|
|
1766
|
+
|
|
1767
|
+
if self._tracking_start_time is None:
|
|
1768
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1769
|
+
if stream_time_str:
|
|
1770
|
+
try:
|
|
1771
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1772
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1773
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1774
|
+
except:
|
|
1775
|
+
self._tracking_start_time = time.time()
|
|
1776
|
+
else:
|
|
1777
|
+
self._tracking_start_time = time.time()
|
|
1778
|
+
|
|
1779
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
1780
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
1781
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1782
|
+
|
|
1783
|
+
def _get_tracking_start_time(self) -> str:
|
|
1784
|
+
"""Get the tracking start time, formatted as a string."""
|
|
1785
|
+
if self._tracking_start_time is None:
|
|
1786
|
+
return "N/A"
|
|
1787
|
+
return self._format_timestamp(self._tracking_start_time)
|
|
1788
|
+
|
|
1789
|
+
def _set_tracking_start_time(self) -> None:
|
|
1790
|
+
"""Set the tracking start time to the current time."""
|
|
1791
|
+
self._tracking_start_time = time.time()
|
|
1792
|
+
|
|
1793
|
+
def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: ColorDetectionConfig) -> Dict[str, Dict[str, Any]]:
|
|
1794
|
+
"""Update zone tracking with current frame data."""
|
|
1795
|
+
if not zone_analysis or not config.zone_config or not config.zone_config['zones']:
|
|
1796
|
+
return {}
|
|
1797
|
+
|
|
1798
|
+
enhanced_zone_analysis = {}
|
|
1799
|
+
zones = config.zone_config['zones']
|
|
1800
|
+
|
|
1801
|
+
# Initialize current frame zone tracks
|
|
1802
|
+
current_frame_zone_tracks = {zone_name: set() for zone_name in zones.keys()}
|
|
1803
|
+
|
|
1804
|
+
# Initialize zone tracking storage
|
|
1805
|
+
for zone_name in zones.keys():
|
|
1806
|
+
if zone_name not in self._zone_current_track_ids:
|
|
1807
|
+
self._zone_current_track_ids[zone_name] = set()
|
|
1808
|
+
if zone_name not in self._zone_total_track_ids:
|
|
1809
|
+
self._zone_total_track_ids[zone_name] = set()
|
|
1810
|
+
|
|
1811
|
+
# Check each detection against each zone
|
|
1812
|
+
for detection in detections:
|
|
1813
|
+
track_id = detection.get("track_id")
|
|
1814
|
+
if track_id is None:
|
|
1815
|
+
continue
|
|
1816
|
+
|
|
1817
|
+
bbox = detection.get("bounding_box", detection.get("bbox"))
|
|
1818
|
+
if not bbox:
|
|
1819
|
+
continue
|
|
1820
|
+
|
|
1821
|
+
# Check which zone this detection is in
|
|
1822
|
+
for zone_name, zone_polygon in zones.items():
|
|
1823
|
+
if self._is_in_zone(bbox, zone_polygon):
|
|
1824
|
+
current_frame_zone_tracks[zone_name].add(track_id)
|
|
1825
|
+
if track_id not in self.color_det_dict: # Use color_det_dict for consistency
|
|
1826
|
+
self.color_det_dict[track_id] = [detection.get("main_color", "unknown"), detection.get("confidence", 0.0)]
|
|
1827
|
+
|
|
1828
|
+
# Update zone tracking for each zone
|
|
1829
|
+
for zone_name, zone_counts in zone_analysis.items():
|
|
1830
|
+
current_tracks = current_frame_zone_tracks.get(zone_name, set())
|
|
1831
|
+
self._zone_current_track_ids[zone_name] = current_tracks
|
|
1832
|
+
self._zone_total_track_ids[zone_name].update(current_tracks)
|
|
1833
|
+
self._zone_current_counts[zone_name] = len(current_tracks)
|
|
1834
|
+
self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
|
|
1835
|
+
|
|
1836
|
+
enhanced_zone_analysis[zone_name] = {
|
|
1837
|
+
"current_count": self._zone_current_counts[zone_name],
|
|
1838
|
+
"total_count": self._zone_total_counts[zone_name],
|
|
1839
|
+
"current_track_ids": list(current_tracks),
|
|
1840
|
+
"total_track_ids": list(self._zone_total_track_ids[zone_name]),
|
|
1841
|
+
"original_counts": zone_counts
|
|
1842
|
+
}
|
|
1843
|
+
|
|
1844
|
+
return enhanced_zone_analysis
|
|
1845
|
+
|
|
1846
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
1847
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists.
|
|
1848
|
+
Falls back to 0 when insufficient data is available."""
|
|
1849
|
+
|
|
1850
|
+
# Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
|
|
1851
|
+
def _bbox_to_list(bbox):
|
|
1852
|
+
if bbox is None:
|
|
1853
|
+
return []
|
|
1854
|
+
if isinstance(bbox, list):
|
|
1855
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
1856
|
+
if isinstance(bbox, dict):
|
|
1857
|
+
if "xmin" in bbox:
|
|
1858
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
1859
|
+
if "x1" in bbox:
|
|
1860
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
1861
|
+
# Fallback: first four numeric values
|
|
1862
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
1863
|
+
return values[:4] if len(values) >= 4 else []
|
|
1864
|
+
return []
|
|
1865
|
+
|
|
1866
|
+
l1 = _bbox_to_list(box1)
|
|
1867
|
+
l2 = _bbox_to_list(box2)
|
|
1868
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
1869
|
+
return 0.0
|
|
1870
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
1871
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
1872
|
+
|
|
1873
|
+
# Ensure correct order
|
|
1874
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
1875
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
1876
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
1877
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
1878
|
+
|
|
1879
|
+
inter_x_min = max(x1_min, x2_min)
|
|
1880
|
+
inter_y_min = max(y1_min, y2_min)
|
|
1881
|
+
inter_x_max = min(x1_max, x2_max)
|
|
1882
|
+
inter_y_max = min(y1_max, y2_max)
|
|
1883
|
+
|
|
1884
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
1885
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
1886
|
+
inter_area = inter_w * inter_h
|
|
1887
|
+
|
|
1888
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
1889
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
1890
|
+
union_area = area1 + area2 - inter_area
|
|
1891
|
+
|
|
1892
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
1893
|
+
|
|
1894
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
1895
|
+
"""Return a stable canonical ID for a raw tracker ID, merging fragmented
|
|
1896
|
+
tracks when IoU and temporal constraints indicate they represent the
|
|
1897
|
+
same physical."""
|
|
1898
|
+
if raw_id is None or bbox is None:
|
|
1899
|
+
# Nothing to merge
|
|
1900
|
+
return raw_id
|
|
1901
|
+
|
|
1902
|
+
now = time.time()
|
|
1903
|
+
|
|
1904
|
+
# Fast path – raw_id already mapped
|
|
1905
|
+
if raw_id in self._track_aliases:
|
|
1906
|
+
canonical_id = self._track_aliases[raw_id]
|
|
1907
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
1908
|
+
if track_info is not None:
|
|
1909
|
+
track_info["last_bbox"] = bbox
|
|
1910
|
+
track_info["last_update"] = now
|
|
1911
|
+
track_info["raw_ids"].add(raw_id)
|
|
1912
|
+
return canonical_id
|
|
1913
|
+
|
|
1914
|
+
# Attempt to merge with an existing canonical track
|
|
1915
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
1916
|
+
# Only consider recently updated tracks
|
|
1917
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
1918
|
+
continue
|
|
1919
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
1920
|
+
if iou >= self._track_merge_iou_threshold:
|
|
1921
|
+
# Merge
|
|
1922
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1923
|
+
info["last_bbox"] = bbox
|
|
1924
|
+
info["last_update"] = now
|
|
1925
|
+
info["raw_ids"].add(raw_id)
|
|
1926
|
+
return canonical_id
|
|
1927
|
+
|
|
1928
|
+
# No match – register new canonical track
|
|
1929
|
+
canonical_id = raw_id
|
|
1930
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1931
|
+
self._canonical_tracks[canonical_id] = {
|
|
1932
|
+
"last_bbox": bbox,
|
|
1933
|
+
"last_update": now,
|
|
1934
|
+
"raw_ids": {raw_id},
|
|
1935
|
+
}
|
|
1936
|
+
return canonical_id
|