matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1901 @@
1
+ """
2
+ Proximity Detection use case implementation.
3
+
4
+ This module provides a clean implementation of proximity detection functionality
5
+ with zone-based analysis, tracking, and alerting capabilities.
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional, Set
9
+ import time
10
+ from datetime import datetime, timezone
11
+ import math
12
+
13
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
14
+ from ..core.config import ProximityConfig, ZoneConfig, AlertConfig
15
+ from ..utils import (
16
+ filter_by_confidence,
17
+ filter_by_categories,
18
+ apply_category_mapping,
19
+ count_objects_by_category,
20
+ count_objects_in_zones,
21
+ calculate_counting_summary,
22
+ match_results_structure,
23
+ bbox_smoothing,
24
+ BBoxSmoothingConfig,
25
+ BBoxSmoothingTracker,
26
+ calculate_iou
27
+ )
28
+ from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
29
+
30
+
31
+ class ProximityUseCase(BaseProcessor):
32
+ """Proximity Detection use case with zone analysis and alerting."""
33
+
34
+ def __init__(self):
35
+ """Initialize Proximity Detection use case."""
36
+ super().__init__("proximity_detection")
37
+ self.category = "security"
38
+ self.CASE_TYPE: Optional[str] = 'proximity_detection'
39
+ self.CASE_VERSION: Optional[str] = '1.3'
40
+
41
+ # Track ID storage for total count calculation
42
+ self._total_track_ids = set() # Store all unique track IDs seen across calls
43
+ self._current_frame_track_ids = set() # Store track IDs from current frame
44
+ self._total_count = 0 # Cached total count
45
+ self._last_update_time = time.time() # Track when last updated
46
+
47
+ # Zone-based tracking storage
48
+ self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
49
+ self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
50
+ self._zone_current_counts = {} # zone_name -> current count in zone
51
+ self._zone_total_counts = {} # zone_name -> total count that have been in zone
52
+
53
+ # Frame counter for tracking total frames processed
54
+ self._total_frame_counter = 0 # Total frames processed across all calls
55
+
56
+ # Global frame offset for video chunk processing
57
+ self._global_frame_offset = 0 # Offset to add to local frame IDs for global frame numbering
58
+ self._frames_in_current_chunk = 0 # Number of frames in current chunk
59
+
60
+ # Initialize smoothing tracker
61
+ self.smoothing_tracker = None
62
+
63
+ # Track start time for "TOTAL SINCE" calculation
64
+ self._tracking_start_time = None
65
+
66
+ # Proximity counting
67
+ self._total_proximity_count = 0 # Total proximity events across all calls
68
+ self._observed_proximity_pairs: Set[frozenset] = set() # Unique canonical ID pairs seen across frames
69
+ self._last_frame_proximity_pairs: Set[tuple] = set() # Pairs detected in the most recent frame (track-id based)
70
+
71
+
72
+ # --------------------------------------------------------------------- #
73
+ # Tracking aliasing structures to merge fragmented IDs #
74
+ # --------------------------------------------------------------------- #
75
+ # Maps raw tracker IDs generated by ByteTrack to a stable canonical ID
76
+ # that represents a real-world person. This helps avoid double counting
77
+ # when the tracker loses a target temporarily and assigns a new ID.
78
+ self._track_aliases: Dict[Any, Any] = {}
79
+
80
+ # Stores metadata about each canonical track such as its last seen
81
+ # bounding box, last update timestamp and all raw IDs that have been
82
+ # merged into it.
83
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
84
+
85
+ # IoU threshold above which two bounding boxes are considered to belong
86
+ # to the same person (empirically chosen; adjust in production if
87
+ # needed).
88
+ self._track_merge_iou_threshold: float = 0.04
89
+
90
+ # Only merge with canonical tracks that were updated within this time
91
+ # window (in seconds). This prevents accidentally merging tracks that
92
+ # left the scene long ago.
93
+ self._track_merge_time_window: float = 10.0
94
+
95
+ self._ascending_alert_list: List[int] = []
96
+ self.current_incident_end_timestamp: str = "N/A"
97
+
98
+
99
+ def process(self, data: Any, config: ConfigProtocol,
100
+ context: Optional[ProcessingContext] = None, stream_info: Optional[Any] = None) -> ProcessingResult:
101
+ """
102
+ Process proximity detection use case - automatically detects single or multi-frame structure.
103
+
104
+ Args:
105
+ data: Raw model output (detection or tracking format)
106
+ config: proximity detection configuration
107
+ context: Processing context
108
+ stream_info: Stream information containing frame details (optional)
109
+
110
+ Returns:
111
+ ProcessingResult: Processing result with standardized agg_summary structure
112
+ """
113
+ # start_time = time.time()
114
+
115
+ try:
116
+ # Ensure we have the right config type
117
+ if not isinstance(config, ProximityConfig):
118
+ return self.create_error_result(
119
+ "Invalid configuration type for proximity detection",
120
+ usecase=self.name,
121
+ category=self.category,
122
+ context=context
123
+ )
124
+
125
+ # Initialize processing context if not provided
126
+ if context is None:
127
+ context = ProcessingContext()
128
+
129
+ # Detect input format and frame structure
130
+ input_format = match_results_structure(data)
131
+ context.input_format = input_format
132
+ context.confidence_threshold = config.confidence_threshold
133
+
134
+ is_multi_frame = self.detect_frame_structure(data)
135
+
136
+ #self.logger.info(f"Processing people counting - Format: {input_format.value}, Multi-frame: {is_multi_frame}")
137
+
138
+ # Apply smoothing if enabled
139
+ if config.enable_smoothing and input_format == ResultFormat.OBJECT_TRACKING:
140
+ data = self._apply_smoothing(data, config)
141
+
142
+ # Process based on frame structure
143
+ if is_multi_frame:
144
+
145
+ return self._process_multi_frame(data, config, context, stream_info)
146
+ else:
147
+ return self._process_single_frame(data, config, context, stream_info)
148
+
149
+ except Exception as e: # noqa: BLE001
150
+ self.logger.error("Proximity detection failed: %s", str(e), exc_info=True)
151
+
152
+ if context:
153
+ context.mark_completed()
154
+
155
+ return self.create_error_result(
156
+ str(e),
157
+ type(e).__name__,
158
+ usecase=self.name,
159
+ category=self.category,
160
+ context=context
161
+ )
162
+
163
+ def _process_multi_frame(self, data: Dict, config: ProximityConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
164
+ """Process multi-frame data to generate frame-wise agg_summary."""
165
+
166
+ frame_incidents = {}
167
+ frame_tracking_stats = {}
168
+ frame_business_analytics = {}
169
+ frame_human_text = {}
170
+ frame_alerts = {}
171
+
172
+ # Increment total frame counter
173
+ frames_in_this_call = len(data)
174
+ self._total_frame_counter += frames_in_this_call
175
+
176
+ # Process each frame individually
177
+ for frame_key, frame_detections in data.items():
178
+ # Extract frame ID from tracking data
179
+ frame_id = self._extract_frame_id_from_tracking(frame_detections, frame_key)
180
+ global_frame_id = self.get_global_frame_id(frame_id)
181
+
182
+ # Process this single frame's detections
183
+ alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
184
+ frame_detections, config, global_frame_id, stream_info
185
+ )
186
+ incidents = incidents_list[0] if incidents_list else {}
187
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
188
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
189
+ summary = summary_list[0] if summary_list else {}
190
+
191
+ # Store frame-wise results
192
+ if incidents:
193
+ frame_incidents[global_frame_id] = incidents
194
+ if tracking_stats:
195
+ frame_tracking_stats[global_frame_id] = tracking_stats
196
+ if business_analytics:
197
+ frame_business_analytics[global_frame_id] = business_analytics
198
+ if summary:
199
+ frame_human_text[global_frame_id] = summary
200
+ if alerts:
201
+ frame_alerts[global_frame_id] = alerts
202
+
203
+ # Update global frame offset after processing this chunk
204
+ self.update_global_frame_offset(frames_in_this_call)
205
+
206
+ # Create frame-wise agg_summary
207
+ agg_summary = self.create_frame_wise_agg_summary(
208
+ frame_incidents, frame_tracking_stats, frame_business_analytics, frame_alerts,
209
+ frame_human_text=frame_human_text
210
+ )
211
+
212
+ # Mark processing as completed
213
+ context.mark_completed()
214
+
215
+ # Create result with standardized agg_summary
216
+ return self.create_result(
217
+ data={"agg_summary": agg_summary},
218
+ usecase=self.name,
219
+ category=self.category,
220
+ context=context
221
+ )
222
+
223
+ def _process_single_frame(self, data: Any, config: ProximityConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
224
+ """Process single frame data and return standardized agg_summary."""
225
+
226
+ current_frame = stream_info.get("input_settings", {}).get("start_frame", "current_frame")
227
+ # Process frame data
228
+ alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
229
+ data, config, current_frame, stream_info
230
+ )
231
+ incidents = incidents_list[0] if incidents_list else {}
232
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
233
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
234
+ summary = summary_list[0] if summary_list else {}
235
+
236
+ # Create single-frame agg_summary
237
+ agg_summary = self.create_agg_summary(
238
+ current_frame, incidents, tracking_stats, business_analytics, alerts, human_text=summary
239
+ )
240
+
241
+ # Mark processing as completed
242
+ context.mark_completed()
243
+
244
+ # Create result with standardized agg_summary
245
+ return self.create_result(
246
+ data={"agg_summary": agg_summary},
247
+ usecase=self.name,
248
+ category=self.category,
249
+ context=context
250
+ )
251
+
252
+
253
+ def _process_frame_detections(self, frame_data: Any, config: ProximityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> tuple:
254
+ """Process detections from a single frame and return standardized components."""
255
+
256
+ # Convert frame_data to list if it's not already
257
+ if isinstance(frame_data, list):
258
+ frame_detections = frame_data
259
+ else:
260
+ # Handle other formats as needed
261
+ frame_detections = []
262
+
263
+ # Step 1: Apply confidence filtering to this frame
264
+ if config.confidence_threshold is not None:
265
+ frame_detections = [d for d in frame_detections if d.get("confidence", 0) >= config.confidence_threshold]
266
+
267
+ # Step 2: Apply category mapping if provided
268
+ if config.index_to_category:
269
+ frame_detections = apply_category_mapping(frame_detections, config.index_to_category)
270
+
271
+ # Step 3: Filter to person categories
272
+ if config.person_categories:
273
+ frame_detections = [d for d in frame_detections if d.get("category") in config.person_categories]
274
+
275
+ # Step 4: Create counting summary for this frame
276
+ counting_summary = {
277
+ "total_objects": len(frame_detections),
278
+ "detections": frame_detections,
279
+ "categories": {}
280
+ }
281
+
282
+ # Count by category
283
+ for detection in frame_detections:
284
+ category = detection.get("category", "unknown")
285
+ current_count = counting_summary["categories"].get(category, 0)
286
+ counting_summary["categories"][category] = current_count + 1
287
+
288
+ # Update tracking state BEFORE proximity calculation so we have canonical IDs
289
+ self._update_tracking_state(counting_summary)
290
+
291
+ # Calculate unique proximity events for this frame using expanded bbox method
292
+ proximity_count = self._count_proximity_events_by_expanded_bbox(counting_summary["detections"], config, stream_info)
293
+ counting_summary["proximity_events"] = proximity_count
294
+ counting_summary["total_proximity_count"] = self._total_proximity_count
295
+
296
+ # Step 5: Zone analysis for this frame
297
+ zone_analysis = {}
298
+ if config.zone_config and config.zone_config.zones:
299
+ # Convert single frame to format expected by count_objects_in_zones
300
+ frame_data = frame_detections #[frame_detections]
301
+ zone_analysis = count_objects_in_zones(frame_data, config.zone_config.zones)
302
+
303
+ # Update zone tracking with current frame data
304
+ if zone_analysis and config.enable_tracking:
305
+ enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, frame_detections, config)
306
+ # Merge enhanced zone analysis with original zone analysis
307
+ for zone_name, enhanced_data in enhanced_zone_analysis.items():
308
+ zone_analysis[zone_name] = enhanced_data
309
+
310
+ # Step 5: Generate insights and alerts for this frame
311
+ alerts = self._check_alerts(counting_summary, zone_analysis, config, frame_id)
312
+
313
+ # Step 6: Generate summary and standardized agg_summary components for this frame
314
+ incidents = self._generate_incidents(counting_summary, zone_analysis, alerts, config, frame_id, stream_info)
315
+ tracking_stats = self._generate_tracking_stats(counting_summary, zone_analysis, config, frame_id=frame_id, alerts=alerts, stream_info=stream_info)
316
+ business_analytics = self._generate_business_analytics(counting_summary, zone_analysis, config, frame_id, stream_info, is_empty=True)
317
+ summary = self._generate_summary(counting_summary, incidents, tracking_stats, business_analytics, alerts)
318
+
319
+ # Return standardized components as tuple
320
+ return alerts, incidents, tracking_stats, business_analytics, summary
321
+
322
+ def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: ProximityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
323
+ """Generate standardized incidents for the agg_summary structure."""
324
+
325
+ camera_info = self.get_camera_info_from_stream(stream_info)
326
+ incidents = []
327
+ total_people = counting_summary.get("total_objects", 0)
328
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
329
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
330
+
331
+ alert_settings=[]
332
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
333
+ alert_settings.append({
334
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
335
+ "incident_category": self.CASE_TYPE,
336
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
337
+ "ascending": True,
338
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
339
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
340
+ }
341
+ })
342
+
343
+ if total_people > 0:
344
+ # Determine event level based on thresholds
345
+
346
+ level = "info"
347
+ intensity = 5.0
348
+ start_timestamp = self._get_start_timestamp_str(stream_info)
349
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
350
+ self.current_incident_end_timestamp = 'Incident still active'
351
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
352
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
353
+ self.current_incident_end_timestamp = current_timestamp
354
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
355
+ self.current_incident_end_timestamp = 'N/A'
356
+
357
+ if config.alert_config and config.alert_config.count_thresholds:
358
+ threshold = config.alert_config.count_thresholds.get("all", 10)
359
+ intensity = min(10.0, (total_people / threshold) * 10)
360
+
361
+ if intensity >= 9:
362
+ level = "critical"
363
+ self._ascending_alert_list.append(3)
364
+ elif intensity >= 7:
365
+ level = "significant"
366
+ self._ascending_alert_list.append(2)
367
+ elif intensity >= 5:
368
+ level = "medium"
369
+ self._ascending_alert_list.append(1)
370
+ else:
371
+ level = "low"
372
+ self._ascending_alert_list.append(0)
373
+ else:
374
+ if total_people > 30:
375
+ level = "critical"
376
+ intensity = 10.0
377
+ self._ascending_alert_list.append(3)
378
+ elif total_people > 25:
379
+ level = "significant"
380
+ intensity = 9.0
381
+ self._ascending_alert_list.append(2)
382
+ elif total_people > 15:
383
+ level = "medium"
384
+ intensity = 7.0
385
+ self._ascending_alert_list.append(1)
386
+ else:
387
+ level = "low"
388
+ intensity = min(10.0, total_people / 3.0)
389
+ self._ascending_alert_list.append(0)
390
+
391
+ # Generate human text in new format
392
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
393
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
394
+ human_text = "\n".join(human_text_lines)
395
+
396
+ # Main people counting incident
397
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_id), incident_type=self.CASE_TYPE,
398
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
399
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
400
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
401
+ incidents.append(event)
402
+ else:
403
+ self._ascending_alert_list.append(0)
404
+ incidents.append({})
405
+
406
+ # Add zone-specific events if applicable
407
+ if zone_analysis:
408
+ human_text_lines.append("\t- ZONE EVENTS:")
409
+ for zone_name, zone_count in zone_analysis.items():
410
+ zone_total = self._robust_zone_total(zone_count)
411
+ if zone_total > 0:
412
+ zone_level = "info"
413
+ if intensity >= 9:
414
+ level = "critical"
415
+ self._ascending_alert_list.append(3)
416
+ elif intensity >= 7:
417
+ level = "significant"
418
+ self._ascending_alert_list.append(2)
419
+ elif intensity >= 5:
420
+ level = "medium"
421
+ self._ascending_alert_list.append(1)
422
+ else:
423
+ level = "low"
424
+ self._ascending_alert_list.append(0)
425
+
426
+ if zone_total > 0:
427
+ human_text_lines.append(f"\t\t- Zone name: {zone_name}")
428
+ human_text_lines.append(f"\t\t\t- Total people in zone: {zone_total}")
429
+ # Main people counting incident
430
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+'zone_'+zone_name+str(frame_id), incident_type=self.CASE_TYPE,
431
+ severity_level=zone_level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
432
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
433
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
434
+ incidents.append(event)
435
+ return incidents
436
+
437
+ def _generate_tracking_stats(self, counting_summary: Dict, zone_analysis: Dict, config: ProximityConfig, frame_id: str, alerts: Any=None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
438
+ """Generate tracking stats using standardized methods."""
439
+
440
+ total_people = counting_summary.get("total_objects", 0)
441
+
442
+ # Get total count from cached tracking state
443
+ total_unique_count = self.get_total_count()
444
+ # current_frame_count = self.get_current_frame_count()
445
+
446
+ # Get camera info using standardized method
447
+ camera_info = self.get_camera_info_from_stream(stream_info)
448
+
449
+ # Build total_counts using standardized method
450
+ total_counts = []
451
+ per_category_total = {}
452
+
453
+ for category in config.person_categories or ["person"]:
454
+ # Get count for this category from zone analysis or counting summary
455
+ category_total_count = 0
456
+ if zone_analysis:
457
+ for zone_data in zone_analysis.values():
458
+ if isinstance(zone_data, dict) and "total_count" in zone_data:
459
+ category_total_count += zone_data.get("total_count", 0)
460
+ elif isinstance(zone_data, dict):
461
+ # Sum up zone counts
462
+ for v in zone_data.values():
463
+ if isinstance(v, int):
464
+ category_total_count += v
465
+ elif isinstance(v, list):
466
+ category_total_count += len(v)
467
+ elif isinstance(zone_data, (int, list)):
468
+ category_total_count += len(zone_data) if isinstance(zone_data, list) else zone_data
469
+ else:
470
+ # Use total unique count from tracking state
471
+ category_total_count = total_unique_count
472
+
473
+ if category_total_count > 0:
474
+ total_counts.append(self.create_count_object(category, category_total_count))
475
+ per_category_total[category] = category_total_count
476
+
477
+ # Build current_counts using standardized method
478
+ current_counts = []
479
+ per_category_current = {}
480
+
481
+ for category in config.person_categories or ["person"]:
482
+ # Get current count for this category
483
+ category_current_count = 0
484
+ if zone_analysis:
485
+ for zone_data in zone_analysis.values():
486
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
487
+ category_current_count += zone_data.get("current_count", 0)
488
+ elif isinstance(zone_data, dict):
489
+ # For current frame, look at detections count
490
+ for v in zone_data.values():
491
+ if isinstance(v, int):
492
+ category_current_count += v
493
+ elif isinstance(v, list):
494
+ category_current_count += len(v)
495
+ elif isinstance(zone_data, (int, list)):
496
+ category_current_count += len(zone_data) if isinstance(zone_data, list) else zone_data
497
+ else:
498
+ # Count detections in current frame for this category
499
+ detections = counting_summary.get("detections", [])
500
+ category_current_count = sum(1 for d in detections if d.get("category") == category)
501
+
502
+ if category_current_count > 0 or total_people > 0: # Include even if 0 when there are people
503
+ current_counts.append(self.create_count_object(category, category_current_count))
504
+ per_category_current[category] = category_current_count
505
+
506
+ # Prepare detections using standardized method (without confidence and track_id)
507
+ detections = []
508
+ for detection in counting_summary.get("detections", []):
509
+ bbox = detection.get("bounding_box", {})
510
+ category = detection.get("category", "person")
511
+ # Include segmentation if available (like in eg.json)
512
+ if detection.get("masks"):
513
+ segmentation= detection.get("masks", [])
514
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
515
+ elif detection.get("segmentation"):
516
+ segmentation= detection.get("segmentation")
517
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
518
+ elif detection.get("mask"):
519
+ segmentation= detection.get("mask")
520
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
521
+ else:
522
+ detection_obj = self.create_detection_object(category, bbox)
523
+ detections.append(detection_obj)
524
+
525
+ # detections prepared above are used only for output formatting
526
+ # Build alerts and alert_settings arrays
527
+ alert_settings = []
528
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
529
+ alert_settings.append({
530
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
531
+ "incident_category": self.CASE_TYPE,
532
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
533
+ "ascending": True,
534
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
535
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
536
+ }
537
+ })
538
+ if zone_analysis:
539
+ human_text_lines = []
540
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
541
+ start_timestamp = self._get_start_timestamp_str(stream_info)
542
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
543
+
544
+ def robust_zone_total(zone_count):
545
+ if isinstance(zone_count, dict):
546
+ total = 0
547
+ for v in zone_count.values():
548
+ if isinstance(v, int):
549
+ total += v
550
+ elif isinstance(v, list) and total == 0:
551
+ total += len(v)
552
+ return total
553
+ elif isinstance(zone_count, list):
554
+ return len(zone_count)
555
+ elif isinstance(zone_count, int):
556
+ return zone_count
557
+ else:
558
+ return 0
559
+
560
+ human_text_lines.append(f"\t- People Detected: {total_people}")
561
+ human_text_lines.append("")
562
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
563
+
564
+ for zone_name, zone_count in zone_analysis.items():
565
+ zone_total = robust_zone_total(zone_count)
566
+ human_text_lines.append(f"\t- Zone name: {zone_name}")
567
+ human_text_lines.append(f"\t\t- Total count in zone: {zone_total}")
568
+
569
+ if total_unique_count > 0:
570
+ human_text_lines.append(f"\t- Total unique people in the scene: {total_unique_count}")
571
+ if alerts:
572
+ for alert in alerts:
573
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
574
+ else:
575
+ human_text_lines.append("Alerts: None")
576
+ human_text = "\n".join(human_text_lines)
577
+ else:
578
+ human_text = self._generate_human_text_for_tracking(total_people, detections, total_unique_count, config, frame_id, alerts, stream_info)
579
+
580
+ # Create high precision timestamps for input_timestamp and reset_timestamp
581
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
582
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
583
+ # Create tracking_stat using standardized method
584
+ tracking_stat = self.create_tracking_stats(
585
+ total_counts, current_counts, detections, human_text, camera_info, alerts, alert_settings, start_time=high_precision_start_timestamp, reset_time=high_precision_reset_timestamp
586
+ )
587
+ return [tracking_stat]
588
+
589
+ def _count_proximity_events(self, detections: List[Dict[str, Any]], config: ProximityConfig, stream_info: Optional[Dict[str, Any]] = None) -> int:
590
+ """Count UNIQUE proximity events between detections in a frame.
591
+
592
+ Rules:
593
+ - Use IoU-NMS to deduplicate overlapping boxes (highest confidence kept).
594
+ - Use track IDs when available to build stable (id1,id2) pairs.
595
+ - Count each pair once (i < j) using Euclidean distance between box centers.
596
+ - Distance is evaluated in meters when calibration is available; otherwise, fallback to pixel threshold.
597
+ - Maintain a running set of unique canonical-ID pairs across frames to compute total unique proximity events.
598
+ """
599
+ if not detections:
600
+ return 0
601
+
602
+ # Determine threshold strategy
603
+ meters_per_pixel = self._get_meters_per_pixel(config, stream_info)
604
+ threshold_meters = getattr(config, "proximity_threshold_meters", 1.0)
605
+ threshold_pixels_fallback = getattr(config, "proximity_threshold_pixels", 400.0)
606
+
607
+ overlap_iou_threshold = getattr(self, "_proximity_iou_duplicate_threshold", 0.5)
608
+
609
+ # Helper: convert bbox to xyxy list
610
+ def _to_xyxy(bbox: Any) -> List[float]:
611
+ if isinstance(bbox, list):
612
+ if len(bbox) >= 4:
613
+ return [float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3])]
614
+ return []
615
+ if isinstance(bbox, dict):
616
+ if all(k in bbox for k in ("xmin", "ymin", "xmax", "ymax")):
617
+ return [float(bbox["xmin"]), float(bbox["ymin"]), float(bbox["xmax"]), float(bbox["ymax"])]
618
+ if all(k in bbox for k in ("x1", "y1", "x2", "y2")):
619
+ return [float(bbox["x1"]), float(bbox["y1"]), float(bbox["x2"]), float(bbox["y2"])]
620
+ # Fallback: take first four values
621
+ vals = list(bbox.values())
622
+ if len(vals) >= 4:
623
+ return [float(vals[0]), float(vals[1]), float(vals[2]), float(vals[3])]
624
+ return []
625
+ return []
626
+
627
+ # Prepare tracked detections (track_id, bbox_xyxy, conf)
628
+ tracked_detections: List[Dict[str, Any]] = []
629
+ for det in detections:
630
+ bbox = _to_xyxy(det.get("bounding_box", det.get("bbox", {})))
631
+ if not bbox:
632
+ continue
633
+ tracked_detections.append({
634
+ "track_id": det.get("track_id"),
635
+ "bbox": bbox,
636
+ "confidence": float(det.get("confidence", 1.0))
637
+ })
638
+
639
+ # IoU-NMS to remove overlapping boxes, keep highest confidence
640
+ kept: List[Dict[str, Any]] = self._nms_by_iou(tracked_detections, overlap_iou_threshold)
641
+
642
+ # Compute centroids and keep alignment arrays for IDs
643
+ centroids: List[tuple] = []
644
+ track_ids: List[Any] = []
645
+ for td in kept:
646
+ x1, y1, x2, y2 = map(float, td["bbox"])
647
+ # Use box center (matching your reference snippet); switch to bottom-center if needed
648
+ cx, cy = (x1 + x2) / 2.0, (y1 + y2) / 2.0
649
+ centroids.append((cx, cy))
650
+ track_ids.append(td.get("track_id"))
651
+
652
+ n = len(centroids)
653
+ current_pairs_by_ids: Set[tuple] = set()
654
+ current_pairs_all: Set[tuple] = set()
655
+
656
+ # Build current frame proximity pairs for all detections (even without IDs)
657
+ for i in range(n):
658
+ cx1, cy1 = centroids[i]
659
+ for j in range(i + 1, n):
660
+ cx2, cy2 = centroids[j]
661
+ pixel_distance = math.hypot(cx1 - cx2, cy1 - cy2)
662
+
663
+ if meters_per_pixel:
664
+ meters_distance = pixel_distance * float(meters_per_pixel)
665
+ is_close = meters_distance < float(threshold_meters)
666
+ else:
667
+ is_close = pixel_distance < float(threshold_pixels_fallback)
668
+
669
+ if not is_close:
670
+ continue
671
+
672
+ # For per-frame count, include every close pair
673
+ current_pairs_all.add((i, j))
674
+
675
+ # For global unique, require both IDs
676
+ id_i = track_ids[i]
677
+ id_j = track_ids[j]
678
+ if id_i is not None and id_j is not None:
679
+ pair_ids = (id_i, id_j) if id_i <= id_j else (id_j, id_i)
680
+ current_pairs_by_ids.add(pair_ids)
681
+
682
+ # Update global unique proximity pairs using ID pairs only
683
+ new_unique_pairs = {frozenset(p) for p in current_pairs_by_ids} - self._observed_proximity_pairs
684
+ if new_unique_pairs:
685
+ self._total_proximity_count += len(new_unique_pairs)
686
+ self._observed_proximity_pairs.update(new_unique_pairs)
687
+
688
+ # Store last frame pairs (ID pairs if available, else index pairs as fallback)
689
+ self._last_frame_proximity_pairs = current_pairs_by_ids if current_pairs_by_ids else current_pairs_all
690
+
691
+ # Return count of pairs detected in the current frame
692
+ return len(current_pairs_by_ids) if current_pairs_by_ids else len(current_pairs_all)
693
+
694
+ def _nms_by_iou(self, detections: List[Dict[str, Any]], iou_threshold: float) -> List[Dict[str, Any]]:
695
+ """Perform simple IoU-based NMS on a list of detections.
696
+
697
+ Each detection is a dict with keys: 'bbox' as [x1,y1,x2,y2], 'confidence' (float), and optional 'track_id'.
698
+ Keeps highest-confidence detections when overlap exceeds threshold.
699
+ """
700
+ if not detections:
701
+ return []
702
+ # Sort by confidence descending
703
+ dets = sorted(detections, key=lambda d: float(d.get("confidence", 1.0)), reverse=True)
704
+ kept: List[Dict[str, Any]] = []
705
+ for det in dets:
706
+ should_keep = True
707
+ for kept_det in kept:
708
+ if self._compute_iou(det["bbox"], kept_det["bbox"]) >= iou_threshold:
709
+ should_keep = False
710
+ break
711
+ if should_keep:
712
+ kept.append(det)
713
+ return kept
714
+
715
+ def _get_meters_per_pixel(self, config: ProximityConfig, stream_info: Optional[Dict[str, Any]] = None) -> Optional[float]:
716
+ """Compute meters-per-pixel scale using config and optional stream_info.
717
+
718
+ Priority:
719
+ 1) config.meters_per_pixel (direct override)
720
+ 2) config.scene_width_meters + frame width in pixels
721
+ 3) config.scene_height_meters + frame height in pixels
722
+ Returns None if insufficient information.
723
+ """
724
+ # Direct override
725
+ if hasattr(config, "meters_per_pixel") and getattr(config, "meters_per_pixel"):
726
+ try:
727
+ return float(getattr(config, "meters_per_pixel"))
728
+ except Exception: # noqa: BLE001
729
+ pass
730
+
731
+ width_px = None
732
+ height_px = None
733
+ if stream_info and isinstance(stream_info, dict):
734
+ input_settings = stream_info.get("input_settings", {}) or {}
735
+ resolution = input_settings.get("resolution", {}) or {}
736
+ width_px = resolution.get("width") or input_settings.get("frame_width")
737
+ height_px = resolution.get("height") or input_settings.get("frame_height")
738
+
739
+ # Derive from scene real-world width
740
+ if hasattr(config, "scene_width_meters") and getattr(config, "scene_width_meters") and width_px:
741
+ try:
742
+ return float(getattr(config, "scene_width_meters")) / float(width_px)
743
+ except Exception: # noqa: BLE001
744
+ pass
745
+
746
+ # Derive from scene real-world height
747
+ if hasattr(config, "scene_height_meters") and getattr(config, "scene_height_meters") and height_px:
748
+ try:
749
+ return float(getattr(config, "scene_height_meters")) / float(height_px)
750
+ except Exception: # noqa: BLE001
751
+ pass
752
+
753
+ return None
754
+
755
+ def _generate_human_text_for_tracking(
756
+ self,
757
+ total_people: int,
758
+ detections,
759
+ total_unique_count: int,
760
+ config: ProximityConfig,
761
+ frame_id: str,
762
+ alerts: Any = None,
763
+ stream_info: Optional[Dict[str, Any]] = None) -> str:
764
+ """Generate human-readable text for tracking stats in old format."""
765
+
766
+ human_text_lines=[]
767
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
768
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
769
+
770
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
771
+
772
+ # Add proximity count to human text (expanded bbox method)
773
+ proximity_count = self._count_proximity_events_by_expanded_bbox(detections, config, stream_info)
774
+ if proximity_count > 0:
775
+ human_text_lines.append(f"\t- Current Frame Proximity: {proximity_count//2}")
776
+ else:
777
+ human_text_lines.append("\t- No Proximity Events Detected")
778
+
779
+ human_text_lines.append("")
780
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
781
+ human_text_lines.append(f"\t- Total Proximity Count: {self._total_proximity_count//2}")
782
+
783
+ if alerts:
784
+ for alert in alerts:
785
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
786
+ else:
787
+ human_text_lines.append("Alerts: None")
788
+
789
+ return "\n".join(human_text_lines)
790
+
791
+ def _check_alerts(self, counting_summary: Dict, zone_analysis: Dict,
792
+ config: ProximityConfig, frame_id: str) -> List[Dict]:
793
+ """Check for alert conditions and generate alerts."""
794
+ def get_trend(data, lookback=900, threshold=0.6):
795
+ '''
796
+ Determine if the trend is ascending or descending based on actual value progression.
797
+ Now works with values 0,1,2,3 (not just binary).
798
+ '''
799
+ window = data[-lookback:] if len(data) >= lookback else data
800
+ if len(window) < 2:
801
+ return True # not enough data to determine trend
802
+ increasing = 0
803
+ total = 0
804
+ for i in range(1, len(window)):
805
+ if window[i] >= window[i - 1]:
806
+ increasing += 1
807
+ total += 1
808
+ ratio = increasing / total
809
+ if ratio >= threshold:
810
+ return True
811
+ elif ratio <= (1 - threshold):
812
+ return False
813
+ alerts = []
814
+
815
+ if not config.alert_config:
816
+ return alerts
817
+
818
+ total_people = counting_summary.get("total_objects", 0)
819
+
820
+ # Count threshold alerts
821
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
822
+
823
+ for category, threshold in config.alert_config.count_thresholds.items():
824
+ if category == "all" and total_people >= threshold:
825
+
826
+ alerts.append({
827
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
828
+ "alert_id": "alert_"+category+'_'+frame_id,
829
+ "incident_category": self.CASE_TYPE,
830
+ "threshold_level": threshold,
831
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
832
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
833
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
834
+ }
835
+ })
836
+ elif category in counting_summary.get("by_category", {}):
837
+ count = counting_summary["by_category"][category]
838
+
839
+ if count >= threshold:
840
+ alerts.append({
841
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
842
+ "alert_id": "alert_"+category+'_'+frame_id,
843
+ "incident_category": self.CASE_TYPE,
844
+ "threshold_level": threshold,
845
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
846
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
847
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
848
+ }
849
+ })
850
+ else:
851
+ pass
852
+
853
+ # Zone occupancy threshold alerts
854
+ if config.alert_config.occupancy_thresholds:
855
+ for zone_name, threshold in config.alert_config.occupancy_thresholds.items():
856
+ if zone_name in zone_analysis:
857
+ # Calculate zone_count robustly (supports int, list, dict values)
858
+ zone_count = self._robust_zone_total(zone_analysis[zone_name])
859
+ if zone_count >= threshold:
860
+ alerts.append({
861
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
862
+ "alert_id": f"alert_zone_{zone_name}_{frame_id}",
863
+ "incident_category": f"{self.CASE_TYPE}_{zone_name}",
864
+ "threshold_level": threshold,
865
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
866
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
867
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
868
+ }
869
+ })
870
+
871
+ return alerts
872
+
873
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: ProximityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
874
+ """Generate standardized business analytics for the agg_summary structure."""
875
+ if is_empty:
876
+ return []
877
+ business_analytics = []
878
+
879
+ total_people = counting_summary.get("total_objects", 0)
880
+
881
+ # Get camera info using standardized method
882
+ camera_info = self.get_camera_info_from_stream(stream_info)
883
+
884
+ if total_people > 0 or config.enable_analytics:
885
+ # Calculate analytics statistics
886
+ analytics_stats = {
887
+ "people_count": total_people,
888
+ "unique_people_count": self.get_total_count(),
889
+ "current_frame_count": self.get_current_frame_count()
890
+ }
891
+
892
+ # Add zone analytics if available
893
+ if zone_analysis:
894
+ zone_stats = {}
895
+ for zone_name, zone_count in zone_analysis.items():
896
+ zone_total = self._robust_zone_total(zone_count)
897
+ zone_stats[f"{zone_name}_occupancy"] = zone_total
898
+ analytics_stats.update(zone_stats)
899
+
900
+ # Generate human text for analytics
901
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
902
+ start_timestamp = self._get_start_timestamp_str(stream_info)
903
+
904
+ analytics_human_text = self.generate_analytics_human_text(
905
+ "people_counting_analytics", analytics_stats, current_timestamp, start_timestamp
906
+ )
907
+
908
+ # Create business analytics using standardized method
909
+ analytics = self.create_business_analytics(
910
+ "people_counting_analytics", analytics_stats, analytics_human_text, camera_info
911
+ )
912
+ business_analytics.append(analytics)
913
+
914
+ return business_analytics
915
+
916
+ def _generate_summary(self, _summary: dict, incidents: List, tracking_stats: List, business_analytics: List, _alerts: List) -> List[str]:
917
+ """
918
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
919
+ """
920
+ lines = {}
921
+ lines["Application Name"] = self.CASE_TYPE
922
+ lines["Application Version"] = self.CASE_VERSION
923
+ if len(incidents) > 0:
924
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
925
+ if len(tracking_stats) > 0:
926
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
927
+ if len(business_analytics) > 0:
928
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
929
+
930
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
931
+ lines["Summary"] = "No Summary Data"
932
+
933
+ return [lines]
934
+
935
+ def _calculate_metrics(self, counting_summary: Dict, zone_analysis: Dict,
936
+ config: ProximityConfig, context: ProcessingContext) -> Dict[str, Any]:
937
+ """Calculate detailed metrics for analytics."""
938
+ total_people = counting_summary.get("total_objects", 0)
939
+
940
+ metrics = {
941
+ "total_people": total_people,
942
+ "processing_time": context.processing_time or 0.0,
943
+ "input_format": context.input_format.value,
944
+ "confidence_threshold": config.confidence_threshold,
945
+ "zones_analyzed": len(zone_analysis),
946
+ "detection_rate": 0.0,
947
+ "coverage_percentage": 0.0
948
+ }
949
+
950
+ # Calculate detection rate
951
+ if config.time_window_minutes and config.time_window_minutes > 0:
952
+ metrics["detection_rate"] = (total_people / config.time_window_minutes) * 60
953
+
954
+ # Calculate zone coverage
955
+ if zone_analysis and total_people > 0:
956
+ people_in_zones = 0
957
+ for zone_counts in zone_analysis.values():
958
+ if isinstance(zone_counts, dict):
959
+ for v in zone_counts.values():
960
+ if isinstance(v, int):
961
+ people_in_zones += v
962
+ elif isinstance(v, list):
963
+ people_in_zones += len(v)
964
+ elif isinstance(zone_counts, list):
965
+ people_in_zones += len(zone_counts)
966
+ elif isinstance(zone_counts, int):
967
+ people_in_zones += zone_counts
968
+ metrics["coverage_percentage"] = (people_in_zones / total_people) * 100
969
+
970
+ # Unique tracking metrics
971
+ if config.enable_unique_counting:
972
+ unique_count = self._count_unique_tracks(counting_summary, config)
973
+ if unique_count is not None:
974
+ metrics["unique_people"] = unique_count
975
+ metrics["tracking_efficiency"] = (unique_count / total_people) * 100 if total_people > 0 else 0
976
+
977
+ # Per-zone metrics
978
+ if zone_analysis:
979
+ zone_metrics = {}
980
+ for zone_name, zone_counts in zone_analysis.items():
981
+ # Robustly sum counts, handling dicts with int or list values
982
+ if isinstance(zone_counts, dict):
983
+ zone_total = 0
984
+ for v in zone_counts.values():
985
+ if isinstance(v, int):
986
+ zone_total += v
987
+ elif isinstance(v, list):
988
+ zone_total += len(v)
989
+ elif isinstance(zone_counts, list):
990
+ zone_total = len(zone_counts)
991
+ elif isinstance(zone_counts, int):
992
+ zone_total = zone_counts
993
+ else:
994
+ zone_total = 0
995
+ zone_metrics[zone_name] = {
996
+ "count": zone_total,
997
+ "percentage": (zone_total / total_people) * 100 if total_people > 0 else 0
998
+ }
999
+ metrics["zone_metrics"] = zone_metrics
1000
+
1001
+ return metrics
1002
+
1003
+ def _extract_predictions(self, data: Any) -> List[Dict[str, Any]]:
1004
+ """Extract predictions from processed data for API compatibility."""
1005
+ predictions = []
1006
+
1007
+ try:
1008
+ if isinstance(data, list):
1009
+ # Detection format
1010
+ for item in data:
1011
+ prediction = self._normalize_prediction(item)
1012
+ if prediction:
1013
+ predictions.append(prediction)
1014
+
1015
+ elif isinstance(data, dict):
1016
+ # Frame-based or tracking format
1017
+ for frame_id, items in data.items():
1018
+ if isinstance(items, list):
1019
+ for item in items:
1020
+ prediction = self._normalize_prediction(item)
1021
+ if prediction:
1022
+ prediction["frame_id"] = frame_id
1023
+ predictions.append(prediction)
1024
+
1025
+ except Exception as e: # noqa: BLE001
1026
+ self.logger.warning("Failed to extract predictions: %s", str(e))
1027
+
1028
+ return predictions
1029
+
1030
+ def _normalize_prediction(self, item: Dict[str, Any]) -> Dict[str, Any]:
1031
+ """Normalize a single prediction item."""
1032
+ if not isinstance(item, dict):
1033
+ return {}
1034
+
1035
+ return {
1036
+ "category": item.get("category", item.get("class", "unknown")),
1037
+ "confidence": item.get("confidence", item.get("score", 0.0)),
1038
+ "bounding_box": item.get("bounding_box", item.get("bbox", {})),
1039
+ "track_id": item.get("track_id")
1040
+ }
1041
+
1042
+ def _get_detections_with_confidence(self, counting_summary: Dict) -> List[Dict]:
1043
+ """Extract detection items with confidence scores."""
1044
+ return counting_summary.get("detections", [])
1045
+
1046
+ def _count_unique_tracks(self, counting_summary: Dict, config: ProximityConfig = None) -> Optional[int]:
1047
+ """Count unique tracks if tracking is enabled."""
1048
+ # Always update tracking state regardless of enable_unique_counting setting
1049
+ self._update_tracking_state(counting_summary)
1050
+
1051
+ # Only return the count if unique counting is enabled
1052
+ if config and config.enable_unique_counting:
1053
+ return self._total_count if self._total_count > 0 else None
1054
+ else:
1055
+ return None
1056
+
1057
+ def _update_tracking_state(self, counting_summary: Dict) -> None:
1058
+ """Update tracking state with current frame data (always called)."""
1059
+ detections = self._get_detections_with_confidence(counting_summary)
1060
+
1061
+ if not detections:
1062
+ return
1063
+
1064
+ # Map raw tracker IDs to canonical IDs to avoid duplicate counting
1065
+ current_frame_tracks: Set[Any] = set()
1066
+
1067
+ for detection in detections:
1068
+ raw_track_id = detection.get("track_id")
1069
+ if raw_track_id is None:
1070
+ continue
1071
+
1072
+ bbox = detection.get("bounding_box", detection.get("bbox"))
1073
+ if not bbox:
1074
+ continue
1075
+
1076
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
1077
+
1078
+ # Propagate canonical ID so that downstream logic (including zone
1079
+ # tracking and event generation) operates on the de-duplicated ID.
1080
+ detection["track_id"] = canonical_id
1081
+ current_frame_tracks.add(canonical_id)
1082
+
1083
+ # Update total track IDs with new canonical IDs from current frame
1084
+ # old_total_count can be used for debugging or analytics if needed
1085
+ # Keeping it for potential future use but suppressing linter warning
1086
+ old_total_count = len(self._total_track_ids) # noqa: F841
1087
+ self._total_track_ids.update(current_frame_tracks)
1088
+ self._current_frame_track_ids = current_frame_tracks
1089
+
1090
+ # Update total count
1091
+ self._total_count = len(self._total_track_ids)
1092
+ self._last_update_time = time.time()
1093
+
1094
+ # Log tracking state updates
1095
+ if len(current_frame_tracks) > 0:
1096
+ new_tracks = current_frame_tracks - (self._total_track_ids - current_frame_tracks)
1097
+ if new_tracks:
1098
+ self.logger.debug(
1099
+ "Tracking state updated: %s new canonical track IDs added, total unique tracks: %s",
1100
+ len(new_tracks), self._total_count)
1101
+ else:
1102
+ self.logger.debug(
1103
+ "Tracking state updated: %s current frame canonical tracks, total unique tracks: %s",
1104
+ len(current_frame_tracks), self._total_count)
1105
+
1106
+ def get_total_count(self) -> int:
1107
+ """Get the total count of unique people tracked across all calls."""
1108
+ return self._total_count
1109
+
1110
+ def get_current_frame_count(self) -> int:
1111
+ """Get the count of people in the current frame."""
1112
+ return len(self._current_frame_track_ids)
1113
+
1114
+ def get_total_frames_processed(self) -> int:
1115
+ """Get the total number of frames processed across all calls."""
1116
+ return self._total_frame_counter
1117
+
1118
+ def set_global_frame_offset(self, offset: int) -> None:
1119
+ """Set the global frame offset for video chunk processing."""
1120
+ self._global_frame_offset = offset
1121
+ self.logger.info("Global frame offset set to: %s", offset)
1122
+
1123
+ def get_global_frame_offset(self) -> int:
1124
+ """Get the current global frame offset."""
1125
+ return self._global_frame_offset
1126
+
1127
+ def update_global_frame_offset(self, frames_in_chunk: int) -> None:
1128
+ """Update global frame offset after processing a chunk."""
1129
+ old_offset = self._global_frame_offset
1130
+ self._global_frame_offset += frames_in_chunk
1131
+ self.logger.info("Global frame offset updated: %s -> %s (added %s frames)", old_offset, self._global_frame_offset, frames_in_chunk)
1132
+
1133
+ def get_global_frame_id(self, local_frame_id: str) -> str:
1134
+ """Convert local frame ID to global frame ID."""
1135
+ try:
1136
+ # Try to convert local_frame_id to integer
1137
+ local_frame_num = int(local_frame_id)
1138
+ global_frame_num = local_frame_num #+ self._global_frame_offset
1139
+ return str(global_frame_num)
1140
+ except (ValueError, TypeError):
1141
+ # If local_frame_id is not a number (e.g., timestamp), return as is
1142
+ return local_frame_id
1143
+
1144
+ def get_track_ids_info(self) -> Dict[str, Any]:
1145
+ """Get detailed information about track IDs."""
1146
+ return {
1147
+ "total_count": self._total_count,
1148
+ "current_frame_count": len(self._current_frame_track_ids),
1149
+ "total_unique_track_ids": len(self._total_track_ids),
1150
+ "current_frame_track_ids": list(self._current_frame_track_ids),
1151
+ "last_update_time": self._last_update_time,
1152
+ "total_frames_processed": self._total_frame_counter
1153
+ }
1154
+
1155
+ def get_tracking_debug_info(self) -> Dict[str, Any]:
1156
+ """Get detailed debugging information about tracking state."""
1157
+ return {
1158
+ "total_track_ids": list(self._total_track_ids),
1159
+ "current_frame_track_ids": list(self._current_frame_track_ids),
1160
+ "total_count": self._total_count,
1161
+ "current_frame_count": len(self._current_frame_track_ids),
1162
+ "total_frames_processed": self._total_frame_counter,
1163
+ "last_update_time": self._last_update_time,
1164
+ "zone_current_track_ids": {zone: list(tracks) for zone, tracks in self._zone_current_track_ids.items()},
1165
+ "zone_total_track_ids": {zone: list(tracks) for zone, tracks in self._zone_total_track_ids.items()},
1166
+ "zone_current_counts": self._zone_current_counts.copy(),
1167
+ "zone_total_counts": self._zone_total_counts.copy(),
1168
+ "global_frame_offset": self._global_frame_offset,
1169
+ "frames_in_current_chunk": self._frames_in_current_chunk
1170
+ }
1171
+
1172
+ def get_frame_info(self) -> Dict[str, Any]:
1173
+ """Get detailed information about frame processing and global frame offset."""
1174
+ return {
1175
+ "global_frame_offset": self._global_frame_offset,
1176
+ "total_frames_processed": self._total_frame_counter,
1177
+ "frames_in_current_chunk": self._frames_in_current_chunk,
1178
+ "next_global_frame": self._global_frame_offset + self._frames_in_current_chunk
1179
+ }
1180
+
1181
+ def reset_tracking_state(self) -> None:
1182
+ """
1183
+ WARNING: This completely resets ALL tracking data including cumulative totals!
1184
+
1185
+ This should ONLY be used when:
1186
+ - Starting a completely new tracking session
1187
+ - Switching to a different video/stream
1188
+ - Manual reset requested by user
1189
+
1190
+ For clearing expired/stale tracks, use clear_current_frame_tracking() instead.
1191
+ """
1192
+ self._total_track_ids.clear()
1193
+ self._current_frame_track_ids.clear()
1194
+ self._total_count = 0
1195
+ self._last_update_time = time.time()
1196
+
1197
+ # Clear zone tracking data
1198
+ self._zone_current_track_ids.clear()
1199
+ self._zone_total_track_ids.clear()
1200
+ self._zone_current_counts.clear()
1201
+ self._zone_total_counts.clear()
1202
+
1203
+ # Reset frame counter and global frame offset
1204
+ self._total_frame_counter = 0
1205
+ self._global_frame_offset = 0
1206
+ self._frames_in_current_chunk = 0
1207
+
1208
+ # Clear aliasing information
1209
+ self._canonical_tracks.clear()
1210
+ self._track_aliases.clear()
1211
+ self._tracking_start_time = None
1212
+
1213
+ self.logger.warning(" FULL tracking state reset - all track IDs, zone data, frame counter, and global frame offset cleared. Cumulative totals lost!")
1214
+
1215
+ def clear_current_frame_tracking(self) -> int:
1216
+ """
1217
+ MANUAL USE ONLY: Clear only current frame tracking data while preserving cumulative totals.
1218
+
1219
+ This method is NOT called automatically anywhere in the code.
1220
+
1221
+ This is the SAFE method to use for manual clearing of stale/expired current frame data.
1222
+ The cumulative total (self._total_count) is always preserved.
1223
+
1224
+ In streaming scenarios, you typically don't need to call this at all.
1225
+
1226
+ Returns:
1227
+ Number of current frame tracks cleared
1228
+ """
1229
+ old_current_count = len(self._current_frame_track_ids)
1230
+ self._current_frame_track_ids.clear()
1231
+
1232
+ # Clear current zone tracking (but keep total zone tracking)
1233
+ cleared_zone_tracks = 0
1234
+ for zone_name in list(self._zone_current_track_ids.keys()):
1235
+ cleared_zone_tracks += len(self._zone_current_track_ids[zone_name])
1236
+ self._zone_current_track_ids[zone_name].clear()
1237
+ self._zone_current_counts[zone_name] = 0
1238
+
1239
+ # Update timestamp
1240
+ self._last_update_time = time.time()
1241
+
1242
+ self.logger.info("Cleared %s current frame tracks and %s zone current tracks. Cumulative total preserved: %s", old_current_count, cleared_zone_tracks, self._total_count)
1243
+ return old_current_count
1244
+
1245
+ def reset_frame_counter(self) -> None:
1246
+ """Reset only the frame counter."""
1247
+ old_count = self._total_frame_counter
1248
+ self._total_frame_counter = 0
1249
+ self.logger.info("Frame counter reset from %s to 0", old_count)
1250
+
1251
+ def clear_expired_tracks(self, max_age_seconds: float = 300.0) -> int:
1252
+ """
1253
+ MANUAL USE ONLY: Clear current frame tracking data if no updates for a while.
1254
+
1255
+ This method is NOT called automatically anywhere in the code.
1256
+ It's provided as a utility function for manual cleanup if needed.
1257
+
1258
+ In streaming scenarios, you typically don't need to call this at all.
1259
+ The cumulative total should keep growing as new unique people are detected.
1260
+
1261
+ This method only clears current frame tracking data while preserving
1262
+ the cumulative total count. The cumulative total should never decrease.
1263
+
1264
+ Args:
1265
+ max_age_seconds: Maximum age in seconds before clearing current frame tracks
1266
+
1267
+ Returns:
1268
+ Number of current frame tracks cleared
1269
+ """
1270
+ current_time = time.time()
1271
+ if current_time - self._last_update_time > max_age_seconds:
1272
+ # Use the safe method that preserves cumulative totals
1273
+ cleared_count = self.clear_current_frame_tracking()
1274
+ self.logger.info("Manual cleanup: cleared %s expired current frame tracks (age > %ss)", cleared_count, max_age_seconds)
1275
+ return cleared_count
1276
+ return 0
1277
+
1278
+ def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: ProximityConfig) -> Dict[str, Dict[str, Any]]:
1279
+ """
1280
+ Update zone tracking with current frame data.
1281
+
1282
+ Args:
1283
+ zone_analysis: Current zone analysis results
1284
+ detections: List of detections with track IDs
1285
+ config: proximity detection configuration with zone polygons
1286
+
1287
+ Returns:
1288
+ Enhanced zone analysis with tracking information
1289
+ """
1290
+ if not zone_analysis or not config.zone_config or not config.zone_config.zones:
1291
+ return {}
1292
+
1293
+ enhanced_zone_analysis = {}
1294
+ zones = config.zone_config.zones
1295
+
1296
+ # Get current frame track IDs in each zone
1297
+ current_frame_zone_tracks = {}
1298
+
1299
+ # Initialize zone tracking for all zones
1300
+ for zone_name in zones.keys():
1301
+ current_frame_zone_tracks[zone_name] = set()
1302
+ if zone_name not in self._zone_current_track_ids:
1303
+ self._zone_current_track_ids[zone_name] = set()
1304
+ if zone_name not in self._zone_total_track_ids:
1305
+ self._zone_total_track_ids[zone_name] = set()
1306
+
1307
+ # Check each detection against each zone
1308
+ for detection in detections:
1309
+ track_id = detection.get("track_id")
1310
+ if track_id is None:
1311
+ continue
1312
+
1313
+ # Get detection bbox
1314
+ bbox = detection.get("bounding_box", detection.get("bbox"))
1315
+ if not bbox:
1316
+ continue
1317
+
1318
+ # Get detection center point
1319
+ center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
1320
+
1321
+ # Check which zone this detection is in using actual zone polygons
1322
+ for zone_name, zone_polygon in zones.items():
1323
+ # Convert polygon points to tuples for point_in_polygon function
1324
+ # zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
1325
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
1326
+
1327
+ # Check if detection center is inside the zone polygon using ray casting algorithm
1328
+ if point_in_polygon(center_point, polygon_points):
1329
+ current_frame_zone_tracks[zone_name].add(track_id)
1330
+
1331
+ # Update zone tracking for each zone
1332
+ for zone_name, zone_counts in zone_analysis.items():
1333
+ # Get current frame tracks for this zone
1334
+ current_tracks = current_frame_zone_tracks.get(zone_name, set())
1335
+
1336
+ # Update current zone tracks
1337
+ self._zone_current_track_ids[zone_name] = current_tracks
1338
+
1339
+ # Update total zone tracks (accumulate all track IDs that have been in this zone)
1340
+ self._zone_total_track_ids[zone_name].update(current_tracks)
1341
+
1342
+ # Update counts
1343
+ self._zone_current_counts[zone_name] = len(current_tracks)
1344
+ self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
1345
+
1346
+ # Create enhanced zone analysis
1347
+ enhanced_zone_analysis[zone_name] = {
1348
+ "current_count": self._zone_current_counts[zone_name],
1349
+ "total_count": self._zone_total_counts[zone_name],
1350
+ "current_track_ids": list(current_tracks),
1351
+ "total_track_ids": list(self._zone_total_track_ids[zone_name]),
1352
+ "original_counts": zone_counts # Preserve original zone counts
1353
+ }
1354
+
1355
+ return enhanced_zone_analysis
1356
+
1357
+ def get_zone_tracking_info(self) -> Dict[str, Dict[str, Any]]:
1358
+ """Get detailed zone tracking information."""
1359
+ return {
1360
+ zone_name: {
1361
+ "current_count": self._zone_current_counts.get(zone_name, 0),
1362
+ "total_count": self._zone_total_counts.get(zone_name, 0),
1363
+ "current_track_ids": list(self._zone_current_track_ids.get(zone_name, set())),
1364
+ "total_track_ids": list(self._zone_total_track_ids.get(zone_name, set()))
1365
+ }
1366
+ for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1367
+ }
1368
+
1369
+ def get_zone_current_count(self, zone_name: str) -> int:
1370
+ """Get current count of people in a specific zone."""
1371
+ return self._zone_current_counts.get(zone_name, 0)
1372
+
1373
+ def get_zone_total_count(self, zone_name: str) -> int:
1374
+ """Get total count of people who have been in a specific zone."""
1375
+ return self._zone_total_counts.get(zone_name, 0)
1376
+
1377
+ def get_all_zone_counts(self) -> Dict[str, Dict[str, int]]:
1378
+ """Get current and total counts for all zones."""
1379
+ return {
1380
+ zone_name: {
1381
+ "current": self._zone_current_counts.get(zone_name, 0),
1382
+ "total": self._zone_total_counts.get(zone_name, 0)
1383
+ }
1384
+ for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1385
+ }
1386
+
1387
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
1388
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
1389
+ dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
1390
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1391
+
1392
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
1393
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
1394
+ hours = int(timestamp // 3600)
1395
+ minutes = int((timestamp % 3600) // 60)
1396
+ seconds = round(float(timestamp % 60),2)
1397
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
1398
+
1399
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: str=None) -> str:
1400
+ """Get formatted current timestamp based on stream type."""
1401
+ if not stream_info:
1402
+ return "00:00:00.00"
1403
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
1404
+ if precision:
1405
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1406
+ if frame_id:
1407
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1408
+ else:
1409
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1410
+ stream_time_str = self._format_timestamp_for_video(start_time)
1411
+ return stream_time_str
1412
+ else:
1413
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1414
+
1415
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1416
+ if frame_id:
1417
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1418
+ else:
1419
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1420
+ stream_time_str = self._format_timestamp_for_video(start_time)
1421
+ return stream_time_str
1422
+ else:
1423
+ # For streams, use stream_time from stream_info
1424
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1425
+ if stream_time_str:
1426
+ # Parse the high precision timestamp string to get timestamp
1427
+ try:
1428
+ # Remove " UTC" suffix and parse
1429
+ timestamp_str = stream_time_str.replace(" UTC", "")
1430
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1431
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
1432
+ return self._format_timestamp_for_stream(timestamp)
1433
+ except Exception: # noqa: BLE001
1434
+ # Fallback to current time if parsing fails
1435
+ return self._format_timestamp_for_stream(time.time())
1436
+ else:
1437
+ return self._format_timestamp_for_stream(time.time())
1438
+
1439
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
1440
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1441
+ if not stream_info:
1442
+ return "00:00:00"
1443
+
1444
+ if precision:
1445
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1446
+ return "00:00:00"
1447
+ else:
1448
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1449
+
1450
+
1451
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1452
+ # If video format, start from 00:00:00
1453
+ return "00:00:00"
1454
+ else:
1455
+ # For streams, use tracking start time or current time with minutes/seconds reset
1456
+ if self._tracking_start_time is None:
1457
+ # Try to extract timestamp from stream_time string
1458
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1459
+ if stream_time_str:
1460
+ try:
1461
+ # Remove " UTC" suffix and parse
1462
+ timestamp_str = stream_time_str.replace(" UTC", "")
1463
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1464
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1465
+ except Exception: # noqa: BLE001
1466
+ # Fallback to current time if parsing fails
1467
+ self._tracking_start_time = time.time()
1468
+ else:
1469
+ self._tracking_start_time = time.time()
1470
+
1471
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1472
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
1473
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1474
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1475
+
1476
+ def _extract_frame_id_from_tracking(self, frame_detections: List[Dict], frame_key: str) -> str:
1477
+ """Extract frame ID from tracking data."""
1478
+ # Priority 1: Check if detections have frame information
1479
+ if frame_detections and len(frame_detections) > 0:
1480
+ first_detection = frame_detections[0]
1481
+ if "frame" in first_detection:
1482
+ return str(first_detection["frame"])
1483
+ elif "frame_id" in first_detection:
1484
+ return str(first_detection["frame_id"])
1485
+ # Priority 2: Use frame_key from input data
1486
+ return str(frame_key)
1487
+
1488
+ def _robust_zone_total(self, zone_count):
1489
+ """Helper method to robustly calculate zone total."""
1490
+ if isinstance(zone_count, dict):
1491
+ total = 0
1492
+ for v in zone_count.values():
1493
+ if isinstance(v, int):
1494
+ total += v
1495
+ elif isinstance(v, list):
1496
+ total += len(v)
1497
+ return total
1498
+ elif isinstance(zone_count, list):
1499
+ return len(zone_count)
1500
+ elif isinstance(zone_count, int):
1501
+ return zone_count
1502
+ else:
1503
+ return 0
1504
+
1505
+ # --------------------------------------------------------------------- #
1506
+ # Private helpers for canonical track aliasing #
1507
+ # --------------------------------------------------------------------- #
1508
+
1509
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
1510
+ """Compute IoU between two bounding boxes that may be either list or dict.
1511
+ Falls back to geometry_utils.calculate_iou when both boxes are dicts.
1512
+ """
1513
+ # Handle dict format directly with calculate_iou (supports many keys)
1514
+ if isinstance(box1, dict) and isinstance(box2, dict):
1515
+ return calculate_iou(box1, box2)
1516
+
1517
+ # Helper to convert bbox (dict or list) to a list [x1,y1,x2,y2]
1518
+ def _bbox_to_list(bbox):
1519
+ if bbox is None:
1520
+ return []
1521
+ if isinstance(bbox, list):
1522
+ return bbox[:4] if len(bbox) >= 4 else []
1523
+ if isinstance(bbox, dict):
1524
+ if "xmin" in bbox:
1525
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1526
+ if "x1" in bbox:
1527
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1528
+ # Fallback: take first four values in insertion order
1529
+ values = list(bbox.values())
1530
+ return values[:4] if len(values) >= 4 else []
1531
+ # Unsupported type
1532
+ return []
1533
+
1534
+ list1 = _bbox_to_list(box1)
1535
+ list2 = _bbox_to_list(box2)
1536
+
1537
+ if len(list1) < 4 or len(list2) < 4:
1538
+ return 0.0
1539
+
1540
+ x1_min, y1_min, x1_max, y1_max = list1
1541
+ x2_min, y2_min, x2_max, y2_max = list2
1542
+
1543
+ # Ensure correct ordering of coordinates
1544
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1545
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1546
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1547
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1548
+
1549
+ inter_x_min = max(x1_min, x2_min)
1550
+ inter_y_min = max(y1_min, y2_min)
1551
+ inter_x_max = min(x1_max, x2_max)
1552
+ inter_y_max = min(y1_max, y2_max)
1553
+
1554
+ inter_w = max(0.0, inter_x_max - inter_x_min)
1555
+ inter_h = max(0.0, inter_y_max - inter_y_min)
1556
+ inter_area = inter_w * inter_h
1557
+
1558
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
1559
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
1560
+ union_area = area1 + area2 - inter_area
1561
+
1562
+ return (inter_area / union_area) if union_area > 0 else 0.0
1563
+
1564
+ def _get_canonical_id(self, raw_id: Any) -> Any:
1565
+ """Return the canonical ID for a raw tracker-generated ID."""
1566
+ return self._track_aliases.get(raw_id, raw_id)
1567
+
1568
+ def _merge_or_register_track(self, raw_id: Any, bbox: List[float]) -> Any:
1569
+ """Merge the raw track into an existing canonical track if possible,
1570
+ otherwise register it as a new canonical track. Returns the canonical
1571
+ ID to use for counting.
1572
+ """
1573
+ now = time.time()
1574
+
1575
+ # Fast path: raw_id already mapped
1576
+ if raw_id in self._track_aliases:
1577
+ canonical_id = self._track_aliases[raw_id]
1578
+ track_info = self._canonical_tracks.get(canonical_id)
1579
+ if track_info is not None:
1580
+ track_info["last_bbox"] = bbox
1581
+ track_info["last_update"] = now
1582
+ track_info["raw_ids"].add(raw_id)
1583
+ return canonical_id
1584
+
1585
+ # Attempt to merge with an existing canonical track
1586
+ for canonical_id, info in self._canonical_tracks.items():
1587
+ # Only consider recently updated tracks to avoid stale matches
1588
+ if now - info["last_update"] > self._track_merge_time_window:
1589
+ continue
1590
+
1591
+ iou = self._compute_iou(bbox, info["last_bbox"])
1592
+ if iou >= self._track_merge_iou_threshold:
1593
+ # Merge raw_id into canonical track
1594
+ self._track_aliases[raw_id] = canonical_id
1595
+ info["last_bbox"] = bbox
1596
+ info["last_update"] = now
1597
+ info["raw_ids"].add(raw_id)
1598
+ self.logger.debug(
1599
+ "Merged raw track %s into canonical track %s (IoU=%.2f)", raw_id, canonical_id, iou)
1600
+ return canonical_id
1601
+
1602
+ # No match found – create a new canonical track
1603
+ canonical_id = raw_id
1604
+ self._track_aliases[raw_id] = canonical_id
1605
+ self._canonical_tracks[canonical_id] = {
1606
+ "last_bbox": bbox,
1607
+ "last_update": now,
1608
+ "raw_ids": {raw_id},
1609
+ }
1610
+ self.logger.debug("Registered new canonical track %s", canonical_id)
1611
+ return canonical_id
1612
+
1613
+ def _format_timestamp(self, timestamp: float) -> str:
1614
+ """Format a timestamp for human-readable output."""
1615
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
1616
+
1617
+ def _get_tracking_start_time(self) -> str:
1618
+ """Get the tracking start time, formatted as a string."""
1619
+ if self._tracking_start_time is None:
1620
+ return "N/A"
1621
+ return self._format_timestamp(self._tracking_start_time)
1622
+
1623
+ def _set_tracking_start_time(self) -> None:
1624
+ """Set the tracking start time to the current time."""
1625
+ self._tracking_start_time = time.time()
1626
+
1627
+ def get_config_schema(self) -> Dict[str, Any]:
1628
+ """Get configuration schema for proximity detection."""
1629
+ return {
1630
+ "type": "object",
1631
+ "properties": {
1632
+ "confidence_threshold": {
1633
+ "type": "number",
1634
+ "minimum": 0.0,
1635
+ "maximum": 1.0,
1636
+ "default": 0.5,
1637
+ "description": "Minimum confidence threshold for detections"
1638
+ },
1639
+ "enable_tracking": {
1640
+ "type": "boolean",
1641
+ "default": False,
1642
+ "description": "Enable tracking for unique counting"
1643
+ },
1644
+ "zone_config": {
1645
+ "type": "object",
1646
+ "properties": {
1647
+ "zones": {
1648
+ "type": "object",
1649
+ "additionalProperties": {
1650
+ "type": "array",
1651
+ "items": {
1652
+ "type": "array",
1653
+ "items": {"type": "number"},
1654
+ "minItems": 2,
1655
+ "maxItems": 2
1656
+ },
1657
+ "minItems": 3
1658
+ },
1659
+ "description": "Zone definitions as polygons"
1660
+ },
1661
+ "zone_confidence_thresholds": {
1662
+ "type": "object",
1663
+ "additionalProperties": {"type": "number", "minimum": 0.0, "maximum": 1.0},
1664
+ "description": "Per-zone confidence thresholds"
1665
+ }
1666
+ }
1667
+ },
1668
+ "person_categories": {
1669
+ "type": "array",
1670
+ "items": {"type": "string"},
1671
+ "default": ["person"],
1672
+ "description": "Category names that represent people"
1673
+ },
1674
+ "enable_unique_counting": {
1675
+ "type": "boolean",
1676
+ "default": True,
1677
+ "description": "Enable unique proximity detection using tracking"
1678
+ },
1679
+ "proximity_threshold_meters": {
1680
+ "type": "number",
1681
+ "minimum": 0.1,
1682
+ "default": 1.0,
1683
+ "description": "Distance threshold in meters to consider two people in proximity"
1684
+ },
1685
+ "meters_per_pixel": {
1686
+ "type": "number",
1687
+ "minimum": 0,
1688
+ "description": "Direct meters-per-pixel calibration override. If set, used for distance conversion."
1689
+ },
1690
+ "scene_width_meters": {
1691
+ "type": "number",
1692
+ "minimum": 0,
1693
+ "description": "Real-world width of the scene captured by the frame (meters). Used to derive meters-per-pixel with frame width."
1694
+ },
1695
+ "scene_height_meters": {
1696
+ "type": "number",
1697
+ "minimum": 0,
1698
+ "description": "Real-world height of the scene captured by the frame (meters). Used to derive meters-per-pixel with frame height."
1699
+ },
1700
+ "proximity_threshold_pixels": {
1701
+ "type": "number",
1702
+ "minimum": 1,
1703
+ "default": 400,
1704
+ "description": "Fallback pixel threshold if no calibration is available"
1705
+ },
1706
+ "proximity_iou_threshold": {
1707
+ "type": "number",
1708
+ "minimum": 0.0,
1709
+ "maximum": 1.0,
1710
+ "default": 0.1,
1711
+ "description": "IoU threshold for proximity detection using expanded bounding boxes"
1712
+ },
1713
+ "time_window_minutes": {
1714
+ "type": "integer",
1715
+ "minimum": 1,
1716
+ "default": 60,
1717
+ "description": "Time window for counting analysis in minutes"
1718
+ },
1719
+ "alert_config": {
1720
+ "type": "object",
1721
+ "properties": {
1722
+ "count_thresholds": {
1723
+ "type": "object",
1724
+ "additionalProperties": {"type": "integer", "minimum": 1},
1725
+ "description": "Count thresholds for alerts"
1726
+ },
1727
+ "occupancy_thresholds": {
1728
+ "type": "object",
1729
+ "additionalProperties": {"type": "integer", "minimum": 1},
1730
+ "description": "Zone occupancy thresholds for alerts"
1731
+ },
1732
+ "alert_type": {
1733
+ "type": "array",
1734
+ "items": {"type": "string"},
1735
+ "default": ["Default"],
1736
+ "description": "To pass the type of alert. EG: email, sms, etc."
1737
+ },
1738
+ "alert_value": {
1739
+ "type": "array",
1740
+ "items": {"type": "string"},
1741
+ "default": ["JSON"],
1742
+ "description": "Alert value to pass the value based on type. EG: email id if type is email."
1743
+ },
1744
+ "alert_incident_category": {
1745
+ "type": "array",
1746
+ "items": {"type": "string"},
1747
+ "default": ["Incident Detection Alert"],
1748
+ "description": "Group and name the Alert category Type"
1749
+ },
1750
+ }
1751
+ }
1752
+ },
1753
+ "required": ["confidence_threshold"],
1754
+ "additionalProperties": False
1755
+ }
1756
+
1757
+ def create_default_config(self, **overrides) -> ProximityConfig:
1758
+ """Create default configuration with optional overrides."""
1759
+ defaults = {
1760
+ "category": self.category,
1761
+ "usecase": self.name,
1762
+ "confidence_threshold": 0.5,
1763
+ "enable_tracking": False,
1764
+ "enable_analytics": True,
1765
+ "enable_unique_counting": True,
1766
+ "time_window_minutes": 60,
1767
+ "person_categories": ["person"],
1768
+ }
1769
+ defaults.update(overrides)
1770
+ return ProximityConfig(**defaults)
1771
+
1772
+ def _apply_smoothing(self, data: Any, config: ProximityConfig) -> Any:
1773
+ """Apply smoothing to tracking data if enabled."""
1774
+ if self.smoothing_tracker is None:
1775
+ smoothing_config = BBoxSmoothingConfig(
1776
+ smoothing_algorithm=config.smoothing_algorithm,
1777
+ window_size=config.smoothing_window_size,
1778
+ cooldown_frames=config.smoothing_cooldown_frames,
1779
+ confidence_threshold=config.confidence_threshold or 0.5,
1780
+ confidence_range_factor=config.smoothing_confidence_range_factor,
1781
+ enable_smoothing=True
1782
+ )
1783
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
1784
+
1785
+ smoothed_data = bbox_smoothing(data, self.smoothing_tracker.config, self.smoothing_tracker)
1786
+ self.logger.debug("Applied bbox smoothing to tracking results")
1787
+ return smoothed_data
1788
+
1789
+ def _count_proximity_events_by_expanded_bbox(self, detections: List[Dict[str, Any]], config: ProximityConfig, stream_info: Optional[Dict[str, Any]] = None) -> int:
1790
+ """Count UNIQUE proximity events using expanded bounding boxes and IoU.
1791
+
1792
+ Rules:
1793
+ - Expand each bbox by 20% width and 10% height
1794
+ - Use IoU threshold to determine proximity between expanded boxes
1795
+ - Use track IDs when available to build stable (id1,id2) pairs
1796
+ - Count each pair once (i < j) using IoU between expanded boxes
1797
+ - Maintain a running set of unique canonical-ID pairs across frames to compute total unique proximity events
1798
+ """
1799
+ if not detections:
1800
+ return 0
1801
+
1802
+ # IoU threshold for proximity detection (configurable)
1803
+ proximity_iou_threshold = getattr(config, "proximity_iou_threshold", 0.1)
1804
+ overlap_iou_threshold = getattr(self, "_proximity_iou_duplicate_threshold", 0.5)
1805
+
1806
+ # Helper: convert bbox to xyxy list
1807
+ def _to_xyxy(bbox: Any) -> List[float]:
1808
+ if isinstance(bbox, list):
1809
+ if len(bbox) >= 4:
1810
+ return [float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3])]
1811
+ return []
1812
+ if isinstance(bbox, dict):
1813
+ if all(k in bbox for k in ("xmin", "ymin", "xmax", "ymax")):
1814
+ return [float(bbox["xmin"]), float(bbox["ymin"]), float(bbox["xmax"]), float(bbox["ymax"])]
1815
+ if all(k in bbox for k in ("x1", "y1", "x2", "y2")):
1816
+ return [float(bbox["x1"]), float(bbox["y1"]), float(bbox["x2"]), float(bbox["y2"])]
1817
+ # Fallback: take first four values
1818
+ vals = list(bbox.values())
1819
+ if len(vals) >= 4:
1820
+ return [float(vals[0]), float(vals[1]), float(vals[2]), float(vals[3])]
1821
+ return []
1822
+ return []
1823
+
1824
+ # Helper: expand bbox by given percentages
1825
+ def _expand_bbox(bbox_xyxy: List[float], width_expand: float = 0.2, height_expand: float = 0.1) -> List[float]:
1826
+ """Expand bbox by width_expand% width and height_expand% height."""
1827
+ if len(bbox_xyxy) < 4:
1828
+ return bbox_xyxy
1829
+
1830
+ x1, y1, x2, y2 = bbox_xyxy
1831
+ width = x2 - x1
1832
+ height = y2 - y1
1833
+
1834
+ # Calculate expansion amounts
1835
+ width_expansion = width * width_expand
1836
+ height_expansion = height * height_expand
1837
+
1838
+ # Expand bbox (expand outward from center)
1839
+ expanded_x1 = x1 - width_expansion / 2
1840
+ expanded_y1 = y1 - height_expansion / 2
1841
+ expanded_x2 = x2 + width_expansion / 2
1842
+ expanded_y2 = y2 + height_expansion / 2
1843
+
1844
+ return [expanded_x1, expanded_y1, expanded_x2, expanded_y2]
1845
+
1846
+ # Prepare tracked detections with expanded bboxes
1847
+ tracked_detections: List[Dict[str, Any]] = []
1848
+ for det in detections:
1849
+ bbox = _to_xyxy(det.get("bounding_box", det.get("bbox", {})))
1850
+ if not bbox:
1851
+ continue
1852
+
1853
+ # Expand the bbox
1854
+ expanded_bbox = _expand_bbox(bbox)
1855
+
1856
+ tracked_detections.append({
1857
+ "track_id": det.get("track_id"),
1858
+ "original_bbox": bbox,
1859
+ "expanded_bbox": expanded_bbox,
1860
+ "confidence": float(det.get("confidence", 1.0))
1861
+ })
1862
+
1863
+ # IoU-NMS to remove overlapping original boxes, keep highest confidence
1864
+ kept: List[Dict[str, Any]] = self._nms_by_iou(tracked_detections, overlap_iou_threshold)
1865
+
1866
+ n = len(kept)
1867
+ current_pairs_by_ids: Set[tuple] = set()
1868
+ current_pairs_all: Set[tuple] = set()
1869
+
1870
+ # Build current frame proximity pairs using expanded bbox IoU
1871
+ for i in range(n):
1872
+ expanded_bbox_i = kept[i]["expanded_bbox"]
1873
+ for j in range(i + 1, n):
1874
+ expanded_bbox_j = kept[j]["expanded_bbox"]
1875
+
1876
+ # Calculate IoU between expanded bboxes
1877
+ iou = self._compute_iou(expanded_bbox_i, expanded_bbox_j)
1878
+
1879
+ # Check if IoU exceeds proximity threshold
1880
+ if iou >= proximity_iou_threshold:
1881
+ # For per-frame count, include every close pair
1882
+ current_pairs_all.add((i, j))
1883
+
1884
+ # For global unique, require both IDs
1885
+ id_i = kept[i].get("track_id")
1886
+ id_j = kept[j].get("track_id")
1887
+ if id_i is not None and id_j is not None:
1888
+ pair_ids = (id_i, id_j) if id_i <= id_j else (id_j, id_i)
1889
+ current_pairs_by_ids.add(pair_ids)
1890
+
1891
+ # Update global unique proximity pairs using ID pairs only
1892
+ new_unique_pairs = {frozenset(p) for p in current_pairs_by_ids} - self._observed_proximity_pairs
1893
+ if new_unique_pairs:
1894
+ self._total_proximity_count += len(new_unique_pairs)
1895
+ self._observed_proximity_pairs.update(new_unique_pairs)
1896
+
1897
+ # Store last frame pairs (ID pairs if available, else index pairs as fallback)
1898
+ self._last_frame_proximity_pairs = current_pairs_by_ids if current_pairs_by_ids else current_pairs_all
1899
+
1900
+ # Return count of pairs detected in the current frame
1901
+ return len(current_pairs_by_ids) if current_pairs_by_ids else len(current_pairs_all)