matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1842 @@
1
+ """
2
+ People counting use case implementation.
3
+
4
+ This module provides a clean implementation of people counting functionality
5
+ with zone-based analysis, tracking, and alerting capabilities.
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional, Set
9
+ from dataclasses import asdict
10
+ import time
11
+ from datetime import datetime, timezone
12
+
13
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
14
+ from ..core.config import PeopleTrackingConfig, ZoneConfig, AlertConfig, LineConfig
15
+ from ..utils import (
16
+ filter_by_confidence,
17
+ filter_by_categories,
18
+ apply_category_mapping,
19
+ count_objects_by_category,
20
+ count_objects_in_zones,
21
+ calculate_counting_summary,
22
+ match_results_structure,
23
+ bbox_smoothing,
24
+ BBoxSmoothingConfig,
25
+ BBoxSmoothingTracker,
26
+ calculate_iou
27
+ )
28
+ from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
29
+
30
+
31
+ class PeopleTrackingUseCase(BaseProcessor):
32
+ """People counting use case with zone analysis and alerting."""
33
+
34
+ def __init__(self):
35
+ """Initialize people counting use case."""
36
+ super().__init__("people_tracking")
37
+ self.category = "general"
38
+ self.CASE_TYPE: Optional[str] = 'People_Tracking'
39
+ self.CASE_VERSION: Optional[str] = '1.3'
40
+
41
+ # Track ID storage for total count calculation
42
+ self._total_track_ids = set() # Store all unique track IDs seen across calls
43
+ self._current_frame_track_ids = set() # Store track IDs from current frame
44
+ self._total_count = 0 # Cached total count
45
+ self._last_update_time = time.time() # Track when last updated
46
+
47
+ # Zone-based tracking storage
48
+ self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
49
+ self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
50
+ self._zone_current_counts = {} # zone_name -> current count in zone
51
+ self._zone_total_counts = {} # zone_name -> total count that have been in zone
52
+
53
+ # Frame counter for tracking total frames processed
54
+ self._total_frame_counter = 0 # Total frames processed across all calls
55
+
56
+ # Global frame offset for video chunk processing
57
+ self._global_frame_offset = 0 # Offset to add to local frame IDs for global frame numbering
58
+ self._frames_in_current_chunk = 0 # Number of frames in current chunk
59
+
60
+ # Initialize smoothing tracker
61
+ self.smoothing_tracker = None
62
+
63
+ # Track start time for "TOTAL SINCE" calculation
64
+ self._tracking_start_time = None
65
+
66
+ # --------------------------------------------------------------------- #
67
+ # Tracking aliasing structures to merge fragmented IDs #
68
+ # --------------------------------------------------------------------- #
69
+ # Maps raw tracker IDs generated by ByteTrack to a stable canonical ID
70
+ # that represents a real-world person. This helps avoid double counting
71
+ # when the tracker loses a target temporarily and assigns a new ID.
72
+ self._track_aliases: Dict[Any, Any] = {}
73
+
74
+ # Stores metadata about each canonical track such as its last seen
75
+ # bounding box, last update timestamp and all raw IDs that have been
76
+ # merged into it.
77
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
78
+
79
+ # IoU threshold above which two bounding boxes are considered to belong
80
+ # to the same person (empirically chosen; adjust in production if
81
+ # needed).
82
+ self._track_merge_iou_threshold: float = 0.04
83
+
84
+ # Only merge with canonical tracks that were updated within this time
85
+ # window (in seconds). This prevents accidentally merging tracks that
86
+ # left the scene long ago.
87
+ self._track_merge_time_window: float = 10.0
88
+
89
+ self._ascending_alert_list: List[int] = []
90
+ self.current_incident_end_timestamp: str = "N/A"
91
+
92
+ self.start_timer = None
93
+
94
+ # Line crossing tracking storage
95
+ self._line_crossed_tracks: Dict[str, Set[Any]] = {} # "side1_to_side2": set of track_ids that crossed
96
+ self._side1_label: str = "Side A"
97
+ self._side2_label: str = "Side B"
98
+
99
+
100
+ def process(self, data: Any, config: ConfigProtocol,
101
+ context: Optional[ProcessingContext] = None, stream_info: Optional[Any] = None) -> ProcessingResult:
102
+ """
103
+ Process people counting use case - automatically detects single or multi-frame structure.
104
+
105
+ Args:
106
+ data: Raw model output (detection or tracking format)
107
+ config: People counting configuration
108
+ context: Processing context
109
+ stream_info: Stream information containing frame details (optional)
110
+
111
+ Returns:
112
+ ProcessingResult: Processing result with standardized agg_summary structure
113
+ """
114
+ start_time = time.time()
115
+
116
+ try:
117
+ # Ensure we have the right config type
118
+ if not isinstance(config, PeopleTrackingConfig):
119
+ return self.create_error_result(
120
+ "Invalid configuration type for people counting",
121
+ usecase=self.name,
122
+ category=self.category,
123
+ context=context
124
+ )
125
+
126
+ # Initialize processing context if not provided
127
+ if context is None:
128
+ context = ProcessingContext()
129
+
130
+ # Detect input format and frame structure
131
+ input_format = match_results_structure(data)
132
+ context.input_format = input_format
133
+ context.confidence_threshold = config.confidence_threshold
134
+
135
+ is_multi_frame = self.detect_frame_structure(data)
136
+ print("--------------------------------------")
137
+ print("config.alert_config",config.alert_config)
138
+ print(config)
139
+ print("is_multi?",is_multi_frame)
140
+ print("--------------------------------------")
141
+
142
+ #self.logger.info(f"Processing people counting - Format: {input_format.value}, Multi-frame: {is_multi_frame}")
143
+
144
+ # Apply smoothing if enabled
145
+ if config.enable_smoothing and input_format == ResultFormat.OBJECT_TRACKING:
146
+ data = self._apply_smoothing(data, config)
147
+
148
+ # Process based on frame structure
149
+ if is_multi_frame:
150
+
151
+ return self._process_multi_frame(data, config, context, stream_info)
152
+ else:
153
+ return self._process_single_frame(data, config, context, stream_info)
154
+
155
+ except Exception as e:
156
+ self.logger.error(f"People counting failed: {str(e)}", exc_info=True)
157
+
158
+ if context:
159
+ context.mark_completed()
160
+
161
+ return self.create_error_result(
162
+ str(e),
163
+ type(e).__name__,
164
+ usecase=self.name,
165
+ category=self.category,
166
+ context=context
167
+ )
168
+
169
+ def _process_multi_frame(self, data: Dict, config: PeopleTrackingConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
170
+ """Process multi-frame data to generate frame-wise agg_summary."""
171
+
172
+ frame_incidents = {}
173
+ frame_tracking_stats = {}
174
+ frame_business_analytics = {}
175
+ frame_human_text = {}
176
+ frame_alerts = {}
177
+
178
+ # Increment total frame counter
179
+ frames_in_this_call = len(data)
180
+ self._total_frame_counter += frames_in_this_call
181
+
182
+ # Process each frame individually
183
+ for frame_key, frame_detections in data.items():
184
+ # Extract frame ID from tracking data
185
+ frame_id = self._extract_frame_id_from_tracking(frame_detections, frame_key)
186
+ global_frame_id = self.get_global_frame_id(frame_id)
187
+
188
+ # Process this single frame's detections
189
+ alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
190
+ frame_detections, config, global_frame_id, stream_info
191
+ )
192
+ incidents = incidents_list[0] if incidents_list else {}
193
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
194
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
195
+ summary = summary_list[0] if summary_list else {}
196
+
197
+ # Store frame-wise results
198
+ if incidents:
199
+ frame_incidents[global_frame_id] = incidents
200
+ if tracking_stats:
201
+ frame_tracking_stats[global_frame_id] = tracking_stats
202
+ if business_analytics:
203
+ frame_business_analytics[global_frame_id] = business_analytics
204
+ if summary:
205
+ frame_human_text[global_frame_id] = summary
206
+ if alerts:
207
+ frame_alerts[global_frame_id] = alerts
208
+
209
+ # Update global frame offset after processing this chunk
210
+ self.update_global_frame_offset(frames_in_this_call)
211
+
212
+ # Create frame-wise agg_summary
213
+ agg_summary = self.create_frame_wise_agg_summary(
214
+ frame_incidents, frame_tracking_stats, frame_business_analytics, frame_alerts,
215
+ frame_human_text=frame_human_text
216
+ )
217
+
218
+ # Mark processing as completed
219
+ context.mark_completed()
220
+
221
+ # Create result with standardized agg_summary
222
+ return self.create_result(
223
+ data={"agg_summary": agg_summary},
224
+ usecase=self.name,
225
+ category=self.category,
226
+ context=context
227
+ )
228
+
229
+ def _process_single_frame(self, data: Any, config: PeopleTrackingConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
230
+ """Process single frame data and return standardized agg_summary."""
231
+
232
+ current_frame = stream_info.get("input_settings", {}).get("start_frame", "current_frame")
233
+ # Process frame data
234
+ alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
235
+ data, config, current_frame, stream_info
236
+ )
237
+ incidents = incidents_list[0] if incidents_list else {}
238
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
239
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
240
+ summary = summary_list[0] if summary_list else {}
241
+
242
+ # Create single-frame agg_summary
243
+ agg_summary = self.create_agg_summary(
244
+ current_frame, incidents, tracking_stats, business_analytics, alerts, human_text=summary
245
+ )
246
+
247
+ # Mark processing as completed
248
+ context.mark_completed()
249
+
250
+ # Create result with standardized agg_summary
251
+ return self.create_result(
252
+ data={"agg_summary": agg_summary},
253
+ usecase=self.name,
254
+ category=self.category,
255
+ context=context
256
+ )
257
+
258
+
259
+ def _process_frame_detections(self, frame_data: Any, config: PeopleTrackingConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> tuple:
260
+ """Process detections from a single frame and return standardized components."""
261
+
262
+ # Convert frame_data to list if it's not already
263
+ if isinstance(frame_data, list):
264
+ frame_detections = frame_data
265
+ else:
266
+ # Handle other formats as needed
267
+ frame_detections = []
268
+
269
+ # Step 1: Apply confidence filtering to this frame
270
+ if config.confidence_threshold is not None:
271
+ frame_detections = [d for d in frame_detections if d.get("confidence", 0) >= config.confidence_threshold]
272
+
273
+ # Step 2: Apply category mapping if provided
274
+ if config.index_to_category:
275
+ frame_detections = apply_category_mapping(frame_detections, config.index_to_category)
276
+
277
+ # Step 3: Filter to person categories
278
+ if config.person_categories:
279
+ frame_detections = [d for d in frame_detections if d.get("category") in config.person_categories]
280
+
281
+ # Step 4: Create counting summary for this frame
282
+ counting_summary = {
283
+ "total_objects": len(frame_detections),
284
+ "detections": frame_detections,
285
+ "categories": {}
286
+ }
287
+
288
+ # Count by category
289
+ for detection in frame_detections:
290
+ category = detection.get("category", "unknown")
291
+ counting_summary["categories"][category] = counting_summary["categories"].get(category, 0) + 1
292
+
293
+ # Step 4.5: Always update tracking state BEFORE zone enhancements so detections have track_ids
294
+ self._update_tracking_state(counting_summary)
295
+
296
+ # Step 5: Zone analysis for this frame
297
+ zone_analysis = {}
298
+ if config.zone_config and config.zone_config.zones:
299
+ # Convert single frame to format expected by count_objects_in_zones
300
+ frame_data = frame_detections #[frame_detections]
301
+ zone_analysis = count_objects_in_zones(frame_data, config.zone_config.zones)
302
+
303
+ # Update zone tracking with current frame data (now detections have canonical track_ids)
304
+ if zone_analysis and config.enable_tracking:
305
+ enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, frame_detections, config)
306
+ # Merge enhanced zone analysis with original zone analysis
307
+ for zone_name, enhanced_data in enhanced_zone_analysis.items():
308
+ zone_analysis[zone_name] = enhanced_data
309
+
310
+ # Step 5.5: Line crossing analysis for this frame
311
+ line_analysis = {}
312
+ if config.line_config:
313
+ line_analysis = self._update_line_crossings(frame_detections, config.line_config)
314
+
315
+ # Step 6: Generate insights and alerts for this frame
316
+ alerts = self._check_alerts(counting_summary, zone_analysis, config, frame_id, line_analysis=line_analysis)
317
+
318
+ # Step 7: Generate summary and standardized agg_summary components for this frame
319
+ incidents = self._generate_incidents(counting_summary, zone_analysis, alerts, config, frame_id, stream_info, line_analysis=line_analysis)
320
+ incidents = []
321
+ tracking_stats = self._generate_tracking_stats(counting_summary, zone_analysis, config, frame_id=frame_id, alerts=alerts, stream_info=stream_info, line_analysis=line_analysis)
322
+ business_analytics = self._generate_business_analytics(counting_summary, zone_analysis, config, frame_id, stream_info, is_empty=True)
323
+ summary = self._generate_summary(counting_summary, incidents, tracking_stats, business_analytics, alerts)
324
+
325
+ # Return standardized components as tuple
326
+ return alerts, incidents, tracking_stats, business_analytics, summary
327
+
328
+ def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: PeopleTrackingConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None, line_analysis: Optional[Dict] = None) -> List[Dict]:
329
+ """Generate standardized incidents for the agg_summary structure."""
330
+
331
+ camera_info = self.get_camera_info_from_stream(stream_info)
332
+ incidents = []
333
+ total_people = counting_summary.get("total_objects", 0)
334
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
335
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
336
+
337
+ alert_settings=[]
338
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
339
+ alert_settings.append({
340
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
341
+ "incident_category": self.CASE_TYPE,
342
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
343
+ "ascending": True,
344
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
345
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
346
+ }
347
+ })
348
+
349
+ if total_people > 0:
350
+ # Determine event level based on thresholds
351
+
352
+ level = "info"
353
+ intensity = 5.0
354
+ start_timestamp = self._get_start_timestamp_str(stream_info)
355
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
356
+ self.current_incident_end_timestamp = 'Incident still active'
357
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
358
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
359
+ self.current_incident_end_timestamp = current_timestamp
360
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
361
+ self.current_incident_end_timestamp = 'N/A'
362
+
363
+ if config.alert_config and hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
364
+ threshold = config.alert_config.count_thresholds.get("all", 10)
365
+ intensity = min(10.0, (total_people / threshold) * 10)
366
+
367
+ if intensity >= 9:
368
+ level = "critical"
369
+ self._ascending_alert_list.append(3)
370
+ elif intensity >= 7:
371
+ level = "significant"
372
+ self._ascending_alert_list.append(2)
373
+ elif intensity >= 5:
374
+ level = "medium"
375
+ self._ascending_alert_list.append(1)
376
+ else:
377
+ level = "low"
378
+ self._ascending_alert_list.append(0)
379
+ else:
380
+ if total_people > 30:
381
+ level = "critical"
382
+ intensity = 10.0
383
+ self._ascending_alert_list.append(3)
384
+ elif total_people > 25:
385
+ level = "significant"
386
+ intensity = 9.0
387
+ self._ascending_alert_list.append(2)
388
+ elif total_people > 15:
389
+ level = "medium"
390
+ intensity = 7.0
391
+ self._ascending_alert_list.append(1)
392
+ else:
393
+ level = "low"
394
+ intensity = min(10.0, total_people / 3.0)
395
+ self._ascending_alert_list.append(0)
396
+
397
+ # Generate human text in new format
398
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
399
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
400
+ human_text = "\n".join(human_text_lines)
401
+
402
+ # Main people counting incident
403
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_id), incident_type=self.CASE_TYPE,
404
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
405
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
406
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
407
+ incidents.append(event)
408
+ else:
409
+ self._ascending_alert_list.append(0)
410
+ incidents.append({})
411
+
412
+ # Add zone-specific events if applicable
413
+ if zone_analysis:
414
+ human_text_lines.append(f"\t- ZONE EVENTS:")
415
+ for zone_name, zone_count in zone_analysis.items():
416
+ zone_total = self._robust_zone_total(zone_count)
417
+ if zone_total > 0:
418
+ zone_intensity = min(10.0, zone_total / 5.0)
419
+ zone_level = "info"
420
+ if intensity >= 9:
421
+ level = "critical"
422
+ self._ascending_alert_list.append(3)
423
+ elif intensity >= 7:
424
+ level = "significant"
425
+ self._ascending_alert_list.append(2)
426
+ elif intensity >= 5:
427
+ level = "medium"
428
+ self._ascending_alert_list.append(1)
429
+ else:
430
+ level = "low"
431
+ self._ascending_alert_list.append(0)
432
+
433
+ if zone_total > 0:
434
+ human_text_lines.append(f"\t\t- Zone name: {zone_name}")
435
+ human_text_lines.append(f"\t\t\t- Total people in zone: {zone_total}")
436
+ # Main people counting incident
437
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+'zone_'+zone_name+str(frame_id), incident_type=self.CASE_TYPE,
438
+ severity_level=zone_level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
439
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
440
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
441
+ incidents.append(event)
442
+
443
+ # Add line crossing-specific events if applicable
444
+ if line_analysis and line_analysis.get("total_crossings"):
445
+ human_text_lines.append(f"\t- LINE CROSSING EVENTS:")
446
+ for direction, count in line_analysis["total_crossings"].items():
447
+ if count > 0:
448
+ from_label, to_label = direction.split("_to_")
449
+ from_label = self._side1_label if from_label == "side1" else self._side2_label
450
+ to_label = self._side1_label if to_label == "side1" else self._side2_label
451
+ human_text_lines.append(f"\t\t- Crossings from {from_label} to {to_label}: {count}")
452
+ # Create incident for line crossing
453
+ line_level = "info" # Can compute based on intensity similar to above
454
+ event = self.create_incident(
455
+ incident_id=self.CASE_TYPE + '_' + 'line_cross_' + direction + str(frame_id),
456
+ incident_type=self.CASE_TYPE + '_Line_Crossing',
457
+ severity_level=line_level,
458
+ human_text=human_text,
459
+ camera_info=camera_info,
460
+ alerts=alerts,
461
+ alert_settings=alert_settings,
462
+ start_time=start_timestamp,
463
+ end_time=self.current_incident_end_timestamp,
464
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
465
+ )
466
+ incidents.append(event)
467
+
468
+ return incidents
469
+
470
+ def _generate_tracking_stats(self, counting_summary: Dict, zone_analysis: Dict, config: PeopleTrackingConfig, frame_id: str, alerts: Any=[], stream_info: Optional[Dict[str, Any]] = None, line_analysis: Optional[Dict] = None) -> List[Dict]:
471
+ """Generate tracking stats using standardized methods."""
472
+
473
+ total_people = counting_summary.get("total_objects", 0)
474
+
475
+ # Get total count from cached tracking state
476
+ total_unique_count = self.get_total_count()
477
+ current_frame_count = self.get_current_frame_count()
478
+
479
+ # Get camera info using standardized method
480
+ camera_info = self.get_camera_info_from_stream(stream_info)
481
+
482
+ # Build total_counts using standardized method
483
+ total_counts = []
484
+ per_category_total = {}
485
+
486
+ for category in config.person_categories or ["person"]:
487
+ # Get count for this category from zone analysis or counting summary
488
+ category_total_count = 0
489
+ if zone_analysis:
490
+ for zone_data in zone_analysis.values():
491
+ if isinstance(zone_data, dict) and "total_count" in zone_data:
492
+ category_total_count += zone_data.get("total_count", 0)
493
+ elif isinstance(zone_data, dict):
494
+ # Sum up zone counts
495
+ for v in zone_data.values():
496
+ if isinstance(v, int):
497
+ category_total_count += v
498
+ elif isinstance(v, list):
499
+ category_total_count += len(v)
500
+ elif isinstance(zone_data, (int, list)):
501
+ category_total_count += len(zone_data) if isinstance(zone_data, list) else zone_data
502
+ else:
503
+ # Use total unique count from tracking state
504
+ category_total_count = total_unique_count
505
+
506
+ if category_total_count > 0:
507
+ total_counts.append(self.create_count_object(category, category_total_count))
508
+ per_category_total[category] = category_total_count
509
+
510
+ # Build current_counts using standardized method
511
+ current_counts = []
512
+ per_category_current = {}
513
+
514
+ for category in config.person_categories or ["person"]:
515
+ # Get current count for this category
516
+ category_current_count = 0
517
+ if zone_analysis:
518
+ for zone_data in zone_analysis.values():
519
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
520
+ category_current_count += zone_data.get("current_count", 0)
521
+ elif isinstance(zone_data, dict):
522
+ # For current frame, look at detections count
523
+ for v in zone_data.values():
524
+ if isinstance(v, int):
525
+ category_current_count += v
526
+ elif isinstance(v, list):
527
+ category_current_count += len(v)
528
+ elif isinstance(zone_data, (int, list)):
529
+ category_current_count += len(zone_data) if isinstance(zone_data, list) else zone_data
530
+ else:
531
+ # Count detections in current frame for this category
532
+ detections = counting_summary.get("detections", [])
533
+ category_current_count = sum(1 for d in detections if d.get("category") == category)
534
+
535
+ if category_current_count > 0 or total_people > 0: # Include even if 0 when there are people
536
+ current_counts.append(self.create_count_object(category, category_current_count))
537
+ per_category_current[category] = category_current_count
538
+
539
+ # Prepare detections using standardized method (without confidence and track_id)
540
+ detections = []
541
+ for detection in counting_summary.get("detections", []):
542
+ bbox = detection.get("bounding_box", {})
543
+ category = detection.get("category", "person")
544
+ # Include segmentation if available (like in eg.json)
545
+ if detection.get("masks"):
546
+ segmentation= detection.get("masks", [])
547
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
548
+ elif detection.get("segmentation"):
549
+ segmentation= detection.get("segmentation")
550
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
551
+ elif detection.get("mask"):
552
+ segmentation= detection.get("mask")
553
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
554
+ else:
555
+ detection_obj = self.create_detection_object(category, bbox)
556
+ detections.append(detection_obj)
557
+
558
+ # Build alerts and alert_settings arrays
559
+ alert_settings = []
560
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
561
+ alert_settings.append({
562
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
563
+ "incident_category": self.CASE_TYPE,
564
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
565
+ "ascending": True,
566
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
567
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
568
+ }
569
+ })
570
+ if zone_analysis:
571
+ human_text_lines=[]
572
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
573
+ start_timestamp = self._get_start_timestamp_str(stream_info)
574
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
575
+ def robust_zone_total(zone_count):
576
+ if isinstance(zone_count, dict):
577
+ total = 0
578
+ for v in zone_count.values():
579
+ if isinstance(v, int):
580
+ total += v
581
+ elif isinstance(v, list) and total==0:
582
+ total += len(v)
583
+ return total
584
+ elif isinstance(zone_count, list):
585
+ return len(zone_count)
586
+ elif isinstance(zone_count, int):
587
+ return zone_count
588
+ else:
589
+ return 0
590
+ human_text_lines.append(f"\t- People Detected: {total_people}")
591
+ human_text_lines.append("")
592
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
593
+
594
+ for zone_name, zone_count in zone_analysis.items():
595
+ zone_total = robust_zone_total(zone_count)
596
+ human_text_lines.append(f"\t- Zone name: {zone_name}")
597
+ human_text_lines.append(f"\t\t- Total count in zone: {zone_total-1}")
598
+
599
+ if total_unique_count > 0:
600
+ human_text_lines.append(f"\t- Total unique people in the scene: {total_unique_count}")
601
+ if alerts:
602
+ for alert in alerts:
603
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
604
+ else:
605
+ human_text_lines.append("Alerts: None")
606
+
607
+ # Add line crossing to human_text if available
608
+ if line_analysis and line_analysis.get("total_crossings"):
609
+ human_text_lines.append("")
610
+ human_text_lines.append("\t- Line Crossings:")
611
+ for direction, count in line_analysis["total_crossings"].items():
612
+ from_side, to_side = direction.split("_to_")
613
+ from_label = self._side1_label if from_side == "side1" else self._side2_label
614
+ to_label = self._side1_label if to_side == "side1" else self._side2_label
615
+ human_text_lines.append(f"\t\t- From {from_label} to {to_label}: {count}")
616
+
617
+ human_text = "\n".join(human_text_lines)
618
+ else:
619
+ human_text = self._generate_human_text_for_tracking(total_people, total_unique_count, config, frame_id, alerts, stream_info)
620
+
621
+ # Create high precision timestamps for input_timestamp and reset_timestamp
622
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
623
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
624
+ # Create tracking_stat using standardized method
625
+ tracking_stat = self.create_tracking_stats(
626
+ total_counts, current_counts, detections, human_text, camera_info, alerts, alert_settings, start_time=high_precision_start_timestamp, reset_time=high_precision_reset_timestamp
627
+ )
628
+
629
+ return [tracking_stat]
630
+
631
+ def _generate_human_text_for_tracking(self, total_people: int, total_unique_count: int, config: PeopleTrackingConfig, frame_id: str, alerts:Any=[], stream_info: Optional[Dict[str, Any]] = None) -> str:
632
+ """Generate human-readable text for tracking stats in old format."""
633
+ from datetime import datetime, timezone
634
+
635
+ human_text_lines=[]
636
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
637
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
638
+
639
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
640
+ human_text_lines.append(f"\t- People Detected: {total_people}")
641
+
642
+ human_text_lines.append("")
643
+ if total_unique_count > 0:
644
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
645
+ human_text_lines.append(f"\t- Total unique people count: {total_unique_count}")
646
+
647
+ if alerts:
648
+ for alert in alerts:
649
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
650
+ else:
651
+ human_text_lines.append("Alerts: None")
652
+
653
+ return "\n".join(human_text_lines)
654
+
655
+ def _check_alerts(self, counting_summary: Dict, zone_analysis: Dict,
656
+ config: PeopleTrackingConfig, frame_id: str, line_analysis: Optional[Dict] = None) -> List[Dict]:
657
+ """Check for alert conditions and generate alerts."""
658
+ def get_trend(data, lookback=900, threshold=0.6):
659
+ '''
660
+ Determine if the trend is ascending or descending based on actual value progression.
661
+ Now works with values 0,1,2,3 (not just binary).
662
+ '''
663
+ window = data[-lookback:] if len(data) >= lookback else data
664
+ if len(window) < 2:
665
+ return True # not enough data to determine trend
666
+ increasing = 0
667
+ total = 0
668
+ for i in range(1, len(window)):
669
+ if window[i] >= window[i - 1]:
670
+ increasing += 1
671
+ total += 1
672
+ ratio = increasing / total
673
+ if ratio >= threshold:
674
+ return True
675
+ elif ratio <= (1 - threshold):
676
+ return False
677
+ alerts = []
678
+
679
+ if not config.alert_config:
680
+ return alerts
681
+
682
+ total_people = counting_summary.get("total_objects", 0)
683
+
684
+ # Count threshold alerts
685
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
686
+
687
+ for category, threshold in config.alert_config.count_thresholds.items():
688
+ if category == "all" and total_people >= threshold:
689
+
690
+ alerts.append({
691
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
692
+ "alert_id": "alert_"+category+'_'+frame_id,
693
+ "incident_category": self.CASE_TYPE,
694
+ "threshold_level": threshold,
695
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
696
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
697
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
698
+ }
699
+ })
700
+ elif category in counting_summary.get("by_category", {}):
701
+ count = counting_summary["by_category"][category]
702
+
703
+ if count >= threshold:
704
+ alerts.append({
705
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
706
+ "alert_id": "alert_"+category+'_'+frame_id,
707
+ "incident_category": self.CASE_TYPE,
708
+ "threshold_level": threshold,
709
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
710
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
711
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
712
+ }
713
+ })
714
+ else:
715
+ pass
716
+
717
+ # Zone occupancy threshold alerts
718
+ if hasattr(config.alert_config, 'occupancy_thresholds') and config.alert_config.occupancy_thresholds:
719
+ for zone_name, threshold in config.alert_config.occupancy_thresholds.items():
720
+ if zone_name in zone_analysis:
721
+ # Calculate zone_count robustly (supports int, list, dict values)
722
+ print('ZONEEE',zone_name, zone_analysis[zone_name])
723
+ zone_count = self._robust_zone_total(zone_analysis[zone_name])
724
+ if zone_count >= threshold:
725
+ alerts.append({
726
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
727
+ "alert_id": f"alert_zone_{zone_name}_{frame_id}",
728
+ "incident_category": f"{self.CASE_TYPE}_{zone_name}",
729
+ "threshold_level": threshold,
730
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
731
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
732
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
733
+ }
734
+ })
735
+
736
+ # Line crossing threshold alerts
737
+ if hasattr(config.alert_config, 'crossing_thresholds') and config.alert_config.crossing_thresholds and line_analysis:
738
+ for direction_key, threshold in config.alert_config.crossing_thresholds.items():
739
+ if direction_key in line_analysis.get("total_crossings", {}):
740
+ crossing_count = line_analysis["total_crossings"][direction_key]
741
+ if crossing_count >= threshold:
742
+ alerts.append({
743
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
744
+ "alert_id": f"alert_crossing_{direction_key}_{frame_id}",
745
+ "incident_category": f"{self.CASE_TYPE}_Crossing_{direction_key}",
746
+ "threshold_level": threshold,
747
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
748
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
749
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
750
+ }
751
+ })
752
+
753
+ return alerts
754
+
755
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: PeopleTrackingConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
756
+ """Generate standardized business analytics for the agg_summary structure."""
757
+ if is_empty:
758
+ return []
759
+ business_analytics = []
760
+
761
+ total_people = counting_summary.get("total_objects", 0)
762
+
763
+ # Get camera info using standardized method
764
+ camera_info = self.get_camera_info_from_stream(stream_info)
765
+
766
+ if total_people > 0 or config.enable_analytics:
767
+ # Calculate analytics statistics
768
+ analytics_stats = {
769
+ "people_count": total_people,
770
+ "unique_people_count": self.get_total_count(),
771
+ "current_frame_count": self.get_current_frame_count()
772
+ }
773
+
774
+ # Add zone analytics if available
775
+ if zone_analysis:
776
+ zone_stats = {}
777
+ for zone_name, zone_count in zone_analysis.items():
778
+ zone_total = self._robust_zone_total(zone_count)
779
+ zone_stats[f"{zone_name}_occupancy"] = zone_total
780
+ analytics_stats.update(zone_stats)
781
+
782
+ # Generate human text for analytics
783
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
784
+ start_timestamp = self._get_start_timestamp_str(stream_info)
785
+
786
+ analytics_human_text = self.generate_analytics_human_text(
787
+ "people_counting_analytics", analytics_stats, current_timestamp, start_timestamp
788
+ )
789
+
790
+ # Create business analytics using standardized method
791
+ analytics = self.create_business_analytics(
792
+ "people_counting_analytics", analytics_stats, analytics_human_text, camera_info
793
+ )
794
+ business_analytics.append(analytics)
795
+
796
+ return business_analytics
797
+
798
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
799
+ """
800
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
801
+ """
802
+ lines = []
803
+ lines.append("Application Name: "+self.CASE_TYPE)
804
+ lines.append("Application Version: "+self.CASE_VERSION)
805
+ if len(incidents) > 0:
806
+ lines.append("Incidents: "+f"\n\t{incidents[0].get('human_text', 'No incidents detected')}")
807
+ if len(tracking_stats) > 0:
808
+ lines.append("Tracking Statistics: "+f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}")
809
+ if len(business_analytics) > 0:
810
+ lines.append("Business Analytics: "+f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}")
811
+
812
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
813
+ lines.append("Summary: "+"No Summary Data")
814
+
815
+ return ["\n".join(lines)]
816
+
817
+ def _calculate_metrics(self, counting_summary: Dict, zone_analysis: Dict,
818
+ config: PeopleTrackingConfig, context: ProcessingContext) -> Dict[str, Any]:
819
+ """Calculate detailed metrics for analytics."""
820
+ total_people = counting_summary.get("total_objects", 0)
821
+
822
+ metrics = {
823
+ "total_people": total_people,
824
+ "processing_time": context.processing_time or 0.0,
825
+ "input_format": context.input_format.value,
826
+ "confidence_threshold": config.confidence_threshold,
827
+ "zones_analyzed": len(zone_analysis),
828
+ "detection_rate": 0.0,
829
+ "coverage_percentage": 0.0
830
+ }
831
+
832
+ # Calculate detection rate
833
+ if config.time_window_minutes and config.time_window_minutes > 0:
834
+ metrics["detection_rate"] = (total_people / config.time_window_minutes) * 60
835
+
836
+ # Calculate zone coverage
837
+ if zone_analysis and total_people > 0:
838
+ people_in_zones = 0
839
+ for zone_counts in zone_analysis.values():
840
+ if isinstance(zone_counts, dict):
841
+ for v in zone_counts.values():
842
+ if isinstance(v, int):
843
+ people_in_zones += v
844
+ elif isinstance(v, list):
845
+ people_in_zones += len(v)
846
+ elif isinstance(zone_counts, list):
847
+ people_in_zones += len(zone_counts)
848
+ elif isinstance(zone_counts, int):
849
+ people_in_zones += zone_counts
850
+ metrics["coverage_percentage"] = (people_in_zones / total_people) * 100
851
+
852
+ # Unique tracking metrics
853
+ if config.enable_unique_counting:
854
+ unique_count = self._count_unique_tracks(counting_summary, config)
855
+ if unique_count is not None:
856
+ metrics["unique_people"] = unique_count
857
+ metrics["tracking_efficiency"] = (unique_count / total_people) * 100 if total_people > 0 else 0
858
+
859
+ # Per-zone metrics
860
+ if zone_analysis:
861
+ zone_metrics = {}
862
+ for zone_name, zone_counts in zone_analysis.items():
863
+ # Robustly sum counts, handling dicts with int or list values
864
+ if isinstance(zone_counts, dict):
865
+ zone_total = 0
866
+ for v in zone_counts.values():
867
+ if isinstance(v, int):
868
+ zone_total += v
869
+ elif isinstance(v, list):
870
+ zone_total += len(v)
871
+ elif isinstance(zone_counts, list):
872
+ zone_total = len(zone_counts)
873
+ elif isinstance(zone_counts, int):
874
+ zone_total = zone_counts
875
+ else:
876
+ zone_total = 0
877
+ zone_metrics[zone_name] = {
878
+ "count": zone_total,
879
+ "percentage": (zone_total / total_people) * 100 if total_people > 0 else 0
880
+ }
881
+ metrics["zone_metrics"] = zone_metrics
882
+
883
+ return metrics
884
+
885
+ def _extract_predictions(self, data: Any) -> List[Dict[str, Any]]:
886
+ """Extract predictions from processed data for API compatibility."""
887
+ predictions = []
888
+
889
+ try:
890
+ if isinstance(data, list):
891
+ # Detection format
892
+ for item in data:
893
+ prediction = self._normalize_prediction(item)
894
+ if prediction:
895
+ predictions.append(prediction)
896
+
897
+ elif isinstance(data, dict):
898
+ # Frame-based or tracking format
899
+ for frame_id, items in data.items():
900
+ if isinstance(items, list):
901
+ for item in items:
902
+ prediction = self._normalize_prediction(item)
903
+ if prediction:
904
+ prediction["frame_id"] = frame_id
905
+ predictions.append(prediction)
906
+
907
+ except Exception as e:
908
+ self.logger.warning(f"Failed to extract predictions: {str(e)}")
909
+
910
+ return predictions
911
+
912
+ def _normalize_prediction(self, item: Dict[str, Any]) -> Dict[str, Any]:
913
+ """Normalize a single prediction item."""
914
+ if not isinstance(item, dict):
915
+ return {}
916
+
917
+ return {
918
+ "category": item.get("category", item.get("class", "unknown")),
919
+ "confidence": item.get("confidence", item.get("score", 0.0)),
920
+ "bounding_box": item.get("bounding_box", item.get("bbox", {})),
921
+ "track_id": item.get("track_id")
922
+ }
923
+
924
+ def _get_detections_with_confidence(self, counting_summary: Dict) -> List[Dict]:
925
+ """Extract detection items with confidence scores."""
926
+ return counting_summary.get("detections", [])
927
+
928
+ def _count_unique_tracks(self, counting_summary: Dict, config: PeopleTrackingConfig = None) -> Optional[int]:
929
+ """Count unique tracks if tracking is enabled."""
930
+ # Always update tracking state regardless of enable_unique_counting setting
931
+ self._update_tracking_state(counting_summary)
932
+
933
+ # Only return the count if unique counting is enabled
934
+ if config and config.enable_unique_counting:
935
+ return self._total_count if self._total_count > 0 else None
936
+ else:
937
+ return None
938
+
939
+ def _update_tracking_state(self, counting_summary: Dict) -> None:
940
+ """Update tracking state with current frame data (always called)."""
941
+ detections = self._get_detections_with_confidence(counting_summary)
942
+
943
+ if not detections:
944
+ return
945
+
946
+ # Map raw tracker IDs to canonical IDs to avoid duplicate counting
947
+ current_frame_tracks: Set[Any] = set()
948
+
949
+ # Local sequence to make ephemeral IDs unique within the same call
950
+ ephemeral_seq = 0
951
+
952
+ for detection in detections:
953
+ # Prefer explicit tracker-provided ID when available
954
+ raw_track_id = detection.get("track_id")
955
+
956
+ # Always require a bbox to perform IoU-based merging
957
+ bbox = detection.get("bounding_box", detection.get("bbox"))
958
+ if not bbox:
959
+ continue
960
+
961
+ # For single-frame detectors (e.g., plain YOLO) there is no track_id.
962
+ # Generate a short-lived ID and merge by IoU against recent canonical tracks.
963
+ if raw_track_id is None:
964
+ raw_track_id = self._generate_ephemeral_track_id(bbox, ephemeral_seq)
965
+ ephemeral_seq += 1
966
+
967
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
968
+
969
+ # Propagate canonical ID so that downstream logic (including zone
970
+ # tracking and event generation) operates on the de-duplicated ID.
971
+ detection["track_id"] = canonical_id
972
+ current_frame_tracks.add(canonical_id)
973
+
974
+ # Update total track IDs with new canonical IDs from current frame
975
+ old_total_count = len(self._total_track_ids)
976
+ self._total_track_ids.update(current_frame_tracks)
977
+ self._current_frame_track_ids = current_frame_tracks
978
+
979
+ # Update total count
980
+ self._total_count = len(self._total_track_ids)
981
+ self._last_update_time = time.time()
982
+
983
+ # Log tracking state updates
984
+ if len(current_frame_tracks) > 0:
985
+ new_tracks = current_frame_tracks - (self._total_track_ids - current_frame_tracks)
986
+ if new_tracks:
987
+ self.logger.debug(
988
+ f"Tracking state updated: {len(new_tracks)} new canonical track IDs added, total unique tracks: {self._total_count}")
989
+ else:
990
+ self.logger.debug(
991
+ f"Tracking state updated: {len(current_frame_tracks)} current frame canonical tracks, total unique tracks: {self._total_count}")
992
+
993
+ def _generate_ephemeral_track_id(self, bbox: Any, seq: int) -> str:
994
+ """Create a short-lived raw track id for detections without a track_id.
995
+
996
+ Combines a coarse hash of the bbox geometry with a per-call sequence and
997
+ a millisecond timestamp, so the same person across adjacent frames will
998
+ still be merged to the same canonical track via IoU and time window,
999
+ while avoiding long-lived ID collisions across distant calls.
1000
+ """
1001
+ try:
1002
+ # Normalize bbox to xyxy list for hashing
1003
+ if isinstance(bbox, dict):
1004
+ if "x1" in bbox:
1005
+ xyxy = [bbox.get("x1"), bbox.get("y1"), bbox.get("x2"), bbox.get("y2")]
1006
+ elif "xmin" in bbox:
1007
+ xyxy = [bbox.get("xmin"), bbox.get("ymin"), bbox.get("xmax"), bbox.get("ymax")]
1008
+ else:
1009
+ values = list(bbox.values())
1010
+ xyxy = values[:4] if len(values) >= 4 else []
1011
+ elif isinstance(bbox, list):
1012
+ xyxy = bbox[:4]
1013
+ else:
1014
+ xyxy = []
1015
+
1016
+ if len(xyxy) < 4:
1017
+ xyxy = [0, 0, 0, 0]
1018
+
1019
+ x1, y1, x2, y2 = xyxy
1020
+ # Coarse-quantize geometry to stabilize hash across minor jitter
1021
+ cx = int(round((float(x1) + float(x2)) / 2.0))
1022
+ cy = int(round((float(y1) + float(y2)) / 2.0))
1023
+ w = int(round(abs(float(x2) - float(x1))))
1024
+ h = int(round(abs(float(y2) - float(y1))))
1025
+ geom_token = f"{cx}_{cy}_{w}_{h}"
1026
+ except Exception:
1027
+ geom_token = "0_0_0_0"
1028
+
1029
+ ms = int(time.time() * 1000)
1030
+ return f"tmp_{ms}_{seq}_{abs(hash(geom_token)) % 1000003}"
1031
+
1032
+ def get_total_count(self) -> int:
1033
+ """Get the total count of unique people tracked across all calls."""
1034
+ return self._total_count
1035
+
1036
+ def get_current_frame_count(self) -> int:
1037
+ """Get the count of people in the current frame."""
1038
+ return len(self._current_frame_track_ids)
1039
+
1040
+ def get_total_frames_processed(self) -> int:
1041
+ """Get the total number of frames processed across all calls."""
1042
+ return self._total_frame_counter
1043
+
1044
+ def set_global_frame_offset(self, offset: int) -> None:
1045
+ """Set the global frame offset for video chunk processing."""
1046
+ self._global_frame_offset = offset
1047
+ self.logger.info(f"Global frame offset set to: {offset}")
1048
+
1049
+ def get_global_frame_offset(self) -> int:
1050
+ """Get the current global frame offset."""
1051
+ return self._global_frame_offset
1052
+
1053
+ def update_global_frame_offset(self, frames_in_chunk: int) -> None:
1054
+ """Update global frame offset after processing a chunk."""
1055
+ old_offset = self._global_frame_offset
1056
+ self._global_frame_offset += frames_in_chunk
1057
+ self.logger.info(f"Global frame offset updated: {old_offset} -> {self._global_frame_offset} (added {frames_in_chunk} frames)")
1058
+
1059
+ def get_global_frame_id(self, local_frame_id: str) -> str:
1060
+ """Convert local frame ID to global frame ID."""
1061
+ try:
1062
+ # Try to convert local_frame_id to integer
1063
+ local_frame_num = int(local_frame_id)
1064
+ global_frame_num = local_frame_num #+ self._global_frame_offset
1065
+ return str(global_frame_num)
1066
+ except (ValueError, TypeError):
1067
+ # If local_frame_id is not a number (e.g., timestamp), return as is
1068
+ return local_frame_id
1069
+
1070
+ def get_track_ids_info(self) -> Dict[str, Any]:
1071
+ """Get detailed information about track IDs."""
1072
+ return {
1073
+ "total_count": self._total_count,
1074
+ "current_frame_count": len(self._current_frame_track_ids),
1075
+ "total_unique_track_ids": len(self._total_track_ids),
1076
+ "current_frame_track_ids": list(self._current_frame_track_ids),
1077
+ "last_update_time": self._last_update_time,
1078
+ "total_frames_processed": self._total_frame_counter
1079
+ }
1080
+
1081
+ def get_tracking_debug_info(self) -> Dict[str, Any]:
1082
+ """Get detailed debugging information about tracking state."""
1083
+ return {
1084
+ "total_track_ids": list(self._total_track_ids),
1085
+ "current_frame_track_ids": list(self._current_frame_track_ids),
1086
+ "total_count": self._total_count,
1087
+ "current_frame_count": len(self._current_frame_track_ids),
1088
+ "total_frames_processed": self._total_frame_counter,
1089
+ "last_update_time": self._last_update_time,
1090
+ "zone_current_track_ids": {zone: list(tracks) for zone, tracks in self._zone_current_track_ids.items()},
1091
+ "zone_total_track_ids": {zone: list(tracks) for zone, tracks in self._zone_total_track_ids.items()},
1092
+ "zone_current_counts": self._zone_current_counts.copy(),
1093
+ "zone_total_counts": self._zone_total_counts.copy(),
1094
+ "global_frame_offset": self._global_frame_offset,
1095
+ "frames_in_current_chunk": self._frames_in_current_chunk,
1096
+ "line_crossed_tracks": {dir: list(tracks) for dir, tracks in self._line_crossed_tracks.items()}
1097
+ }
1098
+
1099
+ def get_frame_info(self) -> Dict[str, Any]:
1100
+ """Get detailed information about frame processing and global frame offset."""
1101
+ return {
1102
+ "global_frame_offset": self._global_frame_offset,
1103
+ "total_frames_processed": self._total_frame_counter,
1104
+ "frames_in_current_chunk": self._frames_in_current_chunk,
1105
+ "next_global_frame": self._global_frame_offset + self._frames_in_current_chunk
1106
+ }
1107
+
1108
+ def reset_tracking_state(self) -> None:
1109
+ """
1110
+ WARNING: This completely resets ALL tracking data including cumulative totals!
1111
+
1112
+ This should ONLY be used when:
1113
+ - Starting a completely new tracking session
1114
+ - Switching to a different video/stream
1115
+ - Manual reset requested by user
1116
+
1117
+ For clearing expired/stale tracks, use clear_current_frame_tracking() instead.
1118
+ """
1119
+ self._total_track_ids.clear()
1120
+ self._current_frame_track_ids.clear()
1121
+ self._total_count = 0
1122
+ self._last_update_time = time.time()
1123
+
1124
+ # Clear zone tracking data
1125
+ self._zone_current_track_ids.clear()
1126
+ self._zone_total_track_ids.clear()
1127
+ self._zone_current_counts.clear()
1128
+ self._zone_total_counts.clear()
1129
+
1130
+ # Reset frame counter and global frame offset
1131
+ self._total_frame_counter = 0
1132
+ self._global_frame_offset = 0
1133
+ self._frames_in_current_chunk = 0
1134
+
1135
+ # Clear aliasing information
1136
+ self._canonical_tracks.clear()
1137
+ self._track_aliases.clear()
1138
+ self._tracking_start_time = None
1139
+
1140
+ # Clear line crossing data
1141
+ self._line_crossed_tracks.clear()
1142
+
1143
+ self.logger.warning(" FULL tracking state reset - all track IDs, zone data, line crossing data, frame counter, and global frame offset cleared. Cumulative totals lost!")
1144
+
1145
+ def clear_current_frame_tracking(self) -> int:
1146
+ """
1147
+ MANUAL USE ONLY: Clear only current frame tracking data while preserving cumulative totals.
1148
+
1149
+ This method is NOT called automatically anywhere in the code.
1150
+
1151
+ This is the SAFE method to use for manual clearing of stale/expired current frame data.
1152
+ The cumulative total (self._total_count) is always preserved.
1153
+
1154
+ In streaming scenarios, you typically don't need to call this at all.
1155
+
1156
+ Returns:
1157
+ Number of current frame tracks cleared
1158
+ """
1159
+ old_current_count = len(self._current_frame_track_ids)
1160
+ self._current_frame_track_ids.clear()
1161
+
1162
+ # Clear current zone tracking (but keep total zone tracking)
1163
+ cleared_zone_tracks = 0
1164
+ for zone_name in list(self._zone_current_track_ids.keys()):
1165
+ cleared_zone_tracks += len(self._zone_current_track_ids[zone_name])
1166
+ self._zone_current_track_ids[zone_name].clear()
1167
+ self._zone_current_counts[zone_name] = 0
1168
+
1169
+ # Update timestamp
1170
+ self._last_update_time = time.time()
1171
+
1172
+ self.logger.info(f"Cleared {old_current_count} current frame tracks and {cleared_zone_tracks} zone current tracks. Cumulative total preserved: {self._total_count}")
1173
+ return old_current_count
1174
+
1175
+ def reset_frame_counter(self) -> None:
1176
+ """Reset only the frame counter."""
1177
+ old_count = self._total_frame_counter
1178
+ self._total_frame_counter = 0
1179
+ self.logger.info(f"Frame counter reset from {old_count} to 0")
1180
+
1181
+ def clear_expired_tracks(self, max_age_seconds: float = 300.0) -> int:
1182
+ """
1183
+ MANUAL USE ONLY: Clear current frame tracking data if no updates for a while.
1184
+
1185
+ This method is NOT called automatically anywhere in the code.
1186
+ It's provided as a utility function for manual cleanup if needed.
1187
+
1188
+ In streaming scenarios, you typically don't need to call this at all.
1189
+ The cumulative total should keep growing as new unique people are detected.
1190
+
1191
+ This method only clears current frame tracking data while preserving
1192
+ the cumulative total count. The cumulative total should never decrease.
1193
+
1194
+ Args:
1195
+ max_age_seconds: Maximum age in seconds before clearing current frame tracks
1196
+
1197
+ Returns:
1198
+ Number of current frame tracks cleared
1199
+ """
1200
+ current_time = time.time()
1201
+ if current_time - self._last_update_time > max_age_seconds:
1202
+ # Use the safe method that preserves cumulative totals
1203
+ cleared_count = self.clear_current_frame_tracking()
1204
+ self.logger.info(f"Manual cleanup: cleared {cleared_count} expired current frame tracks (age > {max_age_seconds}s)")
1205
+ return cleared_count
1206
+ return 0
1207
+
1208
+ def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: PeopleTrackingConfig) -> Dict[str, Dict[str, Any]]:
1209
+ """
1210
+ Update zone tracking with current frame data.
1211
+
1212
+ Args:
1213
+ zone_analysis: Current zone analysis results
1214
+ detections: List of detections with track IDs
1215
+ config: People counting configuration with zone polygons
1216
+
1217
+ Returns:
1218
+ Enhanced zone analysis with tracking information
1219
+ """
1220
+ if not zone_analysis or not config.zone_config or not config.zone_config.zones:
1221
+ return {}
1222
+
1223
+ enhanced_zone_analysis = {}
1224
+ zones = config.zone_config.zones
1225
+
1226
+ # Get current frame track IDs in each zone
1227
+ current_frame_zone_tracks = {}
1228
+
1229
+ # Initialize zone tracking for all zones
1230
+ for zone_name in zones.keys():
1231
+ current_frame_zone_tracks[zone_name] = set()
1232
+ if zone_name not in self._zone_current_track_ids:
1233
+ self._zone_current_track_ids[zone_name] = set()
1234
+ if zone_name not in self._zone_total_track_ids:
1235
+ self._zone_total_track_ids[zone_name] = set()
1236
+
1237
+ # Check each detection against each zone
1238
+ for detection in detections:
1239
+ track_id = detection.get("track_id")
1240
+ if track_id is None:
1241
+ continue
1242
+
1243
+ # Get detection bbox
1244
+ bbox = detection.get("bounding_box", detection.get("bbox"))
1245
+ if not bbox:
1246
+ continue
1247
+
1248
+ # Get detection center point
1249
+ center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
1250
+
1251
+ # Check which zone this detection is in using actual zone polygons
1252
+ for zone_name, zone_polygon in zones.items():
1253
+ # Convert polygon points to tuples for point_in_polygon function
1254
+ # zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
1255
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
1256
+
1257
+ # Check if detection center is inside the zone polygon using ray casting algorithm
1258
+ if point_in_polygon(center_point, polygon_points):
1259
+ current_frame_zone_tracks[zone_name].add(track_id)
1260
+
1261
+ # Update zone tracking for each zone
1262
+ for zone_name, zone_counts in zone_analysis.items():
1263
+ # Get current frame tracks for this zone
1264
+ current_tracks = current_frame_zone_tracks.get(zone_name, set())
1265
+
1266
+ # Update current zone tracks
1267
+ self._zone_current_track_ids[zone_name] = current_tracks
1268
+
1269
+ # Update total zone tracks (accumulate all track IDs that have been in this zone)
1270
+ self._zone_total_track_ids[zone_name].update(current_tracks)
1271
+
1272
+ # Update counts
1273
+ self._zone_current_counts[zone_name] = len(current_tracks)
1274
+ self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
1275
+
1276
+ # Create enhanced zone analysis
1277
+ enhanced_zone_analysis[zone_name] = {
1278
+ "current_count": self._zone_current_counts[zone_name],
1279
+ "total_count": self._zone_total_counts[zone_name],
1280
+ "current_track_ids": list(current_tracks),
1281
+ "total_track_ids": list(self._zone_total_track_ids[zone_name]),
1282
+ "original_counts": zone_counts # Preserve original zone counts
1283
+ }
1284
+
1285
+ return enhanced_zone_analysis
1286
+
1287
+ def get_zone_tracking_info(self) -> Dict[str, Dict[str, Any]]:
1288
+ """Get detailed zone tracking information."""
1289
+ return {
1290
+ zone_name: {
1291
+ "current_count": self._zone_current_counts.get(zone_name, 0),
1292
+ "total_count": self._zone_total_counts.get(zone_name, 0),
1293
+ "current_track_ids": list(self._zone_current_track_ids.get(zone_name, set())),
1294
+ "total_track_ids": list(self._zone_total_track_ids.get(zone_name, set()))
1295
+ }
1296
+ for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1297
+ }
1298
+
1299
+ def get_zone_current_count(self, zone_name: str) -> int:
1300
+ """Get current count of people in a specific zone."""
1301
+ return self._zone_current_counts.get(zone_name, 0)
1302
+
1303
+ def get_zone_total_count(self, zone_name: str) -> int:
1304
+ """Get total count of people who have been in a specific zone."""
1305
+ return self._zone_total_counts.get(zone_name, 0)
1306
+
1307
+ def get_all_zone_counts(self) -> Dict[str, Dict[str, int]]:
1308
+ """Get current and total counts for all zones."""
1309
+ return {
1310
+ zone_name: {
1311
+ "current": self._zone_current_counts.get(zone_name, 0),
1312
+ "total": self._zone_total_counts.get(zone_name, 0)
1313
+ }
1314
+ for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1315
+ }
1316
+
1317
+ def _update_line_crossings(self, detections: List[Dict], line_config: LineConfig) -> Dict[str, Any]:
1318
+ """Update line crossing tracking with current frame data and detect crossings."""
1319
+ if not line_config or not line_config.points or len(line_config.points) != 2:
1320
+ return {}
1321
+
1322
+ # Initialize if not set
1323
+ self._side1_label = line_config.side1_label
1324
+ self._side2_label = line_config.side2_label
1325
+ direction1 = f"side1_to_side2"
1326
+ direction2 = f"side2_to_side1"
1327
+ if direction1 not in self._line_crossed_tracks:
1328
+ self._line_crossed_tracks[direction1] = set()
1329
+ if direction2 not in self._line_crossed_tracks:
1330
+ self._line_crossed_tracks[direction2] = set()
1331
+
1332
+ crossings_this_frame = {direction1: 0, direction2: 0}
1333
+
1334
+ for detection in detections:
1335
+ canonical_id = detection.get("track_id")
1336
+ if canonical_id not in self._canonical_tracks:
1337
+ continue
1338
+
1339
+ info = self._canonical_tracks[canonical_id]
1340
+ if "last_side" not in info:
1341
+ info["last_side"] = None
1342
+
1343
+ bbox = detection.get("bounding_box", detection.get("bbox"))
1344
+ if not bbox:
1345
+ continue
1346
+
1347
+ center = get_bbox_bottom25_center(bbox)
1348
+ side = self._compute_side(center, line_config.points)
1349
+
1350
+ if info["last_side"] is None:
1351
+ if side != "on_line":
1352
+ info["last_side"] = side
1353
+ continue
1354
+
1355
+ if side != info["last_side"] and side != "on_line":
1356
+ direction = f"{info['last_side']}_to_{side}"
1357
+ if canonical_id not in self._line_crossed_tracks.get(direction, set()):
1358
+ self._line_crossed_tracks[direction].add(canonical_id)
1359
+ crossings_this_frame[direction] += 1
1360
+ info["last_side"] = side
1361
+ elif side == "on_line":
1362
+ # Keep previous side if on the line
1363
+ pass
1364
+ else:
1365
+ info["last_side"] = side
1366
+
1367
+ total_crossings = {dir: len(tracks) for dir, tracks in self._line_crossed_tracks.items()}
1368
+
1369
+ return {
1370
+ "crossings_this_frame": crossings_this_frame,
1371
+ "total_crossings": total_crossings
1372
+ }
1373
+
1374
+ def _compute_side(self, point: tuple, line_points: List[List[float]]) -> str:
1375
+ """Compute which side of the line the point is on."""
1376
+ if not line_points or len(line_points) != 2:
1377
+ return "on_line"
1378
+
1379
+ (x1, y1), (x2, y2) = line_points
1380
+ dx = x2 - x1
1381
+ dy = y2 - y1
1382
+ px, py = point
1383
+ cross = (px - x1) * dy - (py - y1) * dx
1384
+
1385
+ if cross > 0:
1386
+ return "side1"
1387
+ elif cross < 0:
1388
+ return "side2"
1389
+ else:
1390
+ return "on_line"
1391
+
1392
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
1393
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
1394
+ dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
1395
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1396
+
1397
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
1398
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
1399
+ hours = int(timestamp // 3600)
1400
+ minutes = int((timestamp % 3600) // 60)
1401
+ seconds = round(float(timestamp % 60),2)
1402
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
1403
+
1404
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
1405
+ """Get formatted current timestamp based on stream type."""
1406
+ print('STREAM INFO-------------------------------')
1407
+ print(stream_info)
1408
+ if not stream_info:
1409
+ return "00:00:00.00"
1410
+ if precision:
1411
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1412
+ if frame_id:
1413
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1414
+ else:
1415
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1416
+ stream_time_str = self._format_timestamp_for_video(start_time)
1417
+
1418
+
1419
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
1420
+ else:
1421
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1422
+
1423
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1424
+ if frame_id:
1425
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1426
+ else:
1427
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1428
+
1429
+ stream_time_str = self._format_timestamp_for_video(start_time)
1430
+
1431
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
1432
+ else:
1433
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1434
+ if stream_time_str:
1435
+ try:
1436
+ timestamp_str = stream_time_str.replace(" UTC", "")
1437
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1438
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
1439
+ return self._format_timestamp_for_stream(timestamp)
1440
+ except:
1441
+ return self._format_timestamp_for_stream(time.time())
1442
+ else:
1443
+ return self._format_timestamp_for_stream(time.time())
1444
+
1445
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
1446
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1447
+ if not stream_info:
1448
+ return "00:00:00"
1449
+ print('STARTTT STREAM INFO-------------------------------')
1450
+ print(stream_info)
1451
+
1452
+ if precision:
1453
+ if self.start_timer is None:
1454
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1455
+ return self._format_timestamp(self.start_timer)
1456
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
1457
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1458
+ return self._format_timestamp(self.start_timer)
1459
+ else:
1460
+ return self._format_timestamp(self.start_timer)
1461
+
1462
+ if self.start_timer is None:
1463
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1464
+ return self._format_timestamp(self.start_timer)
1465
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
1466
+ self.start_timer = stream_info.get("input_settings", {}).get("stream_time", "NA")
1467
+ return self._format_timestamp(self.start_timer)
1468
+
1469
+ else:
1470
+ if self.start_timer is not None:
1471
+ return self._format_timestamp(self.start_timer)
1472
+
1473
+ if self._tracking_start_time is None:
1474
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1475
+ if stream_time_str:
1476
+ try:
1477
+ timestamp_str = stream_time_str.replace(" UTC", "")
1478
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1479
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1480
+ except:
1481
+ self._tracking_start_time = time.time()
1482
+ else:
1483
+ self._tracking_start_time = time.time()
1484
+
1485
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1486
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1487
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1488
+
1489
+ def _extract_frame_id_from_tracking(self, frame_detections: List[Dict], frame_key: str) -> str:
1490
+ """Extract frame ID from tracking data."""
1491
+ # Priority 1: Check if detections have frame information
1492
+ if frame_detections and len(frame_detections) > 0:
1493
+ first_detection = frame_detections[0]
1494
+ if "frame" in first_detection:
1495
+ return str(first_detection["frame"])
1496
+ elif "frame_id" in first_detection:
1497
+ return str(first_detection["frame_id"])
1498
+ # Priority 2: Use frame_key from input data
1499
+ return str(frame_key)
1500
+
1501
+ def _robust_zone_total(self, zone_count):
1502
+ """Helper method to robustly calculate zone total."""
1503
+ if isinstance(zone_count, dict):
1504
+ total = 0
1505
+ for v in zone_count.values():
1506
+ if isinstance(v, int):
1507
+ total += v
1508
+ elif isinstance(v, list):
1509
+ total += len(v)
1510
+ return total
1511
+ elif isinstance(zone_count, list):
1512
+ return len(zone_count)
1513
+ elif isinstance(zone_count, int):
1514
+ return zone_count
1515
+ else:
1516
+ return 0
1517
+
1518
+ # --------------------------------------------------------------------- #
1519
+ # Private helpers for canonical track aliasing #
1520
+ # --------------------------------------------------------------------- #
1521
+
1522
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
1523
+ """Compute IoU between two bounding boxes that may be either list or dict.
1524
+ Falls back to geometry_utils.calculate_iou when both boxes are dicts.
1525
+ """
1526
+ # Handle dict format directly with calculate_iou (supports many keys)
1527
+ if isinstance(box1, dict) and isinstance(box2, dict):
1528
+ return calculate_iou(box1, box2)
1529
+
1530
+ # Helper to convert bbox (dict or list) to a list [x1,y1,x2,y2]
1531
+ def _bbox_to_list(bbox):
1532
+ if bbox is None:
1533
+ return []
1534
+ if isinstance(bbox, list):
1535
+ return bbox[:4] if len(bbox) >= 4 else []
1536
+ if isinstance(bbox, dict):
1537
+ if "xmin" in bbox:
1538
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1539
+ if "x1" in bbox:
1540
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1541
+ # Fallback: take first four values in insertion order
1542
+ values = list(bbox.values())
1543
+ return values[:4] if len(values) >= 4 else []
1544
+ # Unsupported type
1545
+ return []
1546
+
1547
+ list1 = _bbox_to_list(box1)
1548
+ list2 = _bbox_to_list(box2)
1549
+
1550
+ if len(list1) < 4 or len(list2) < 4:
1551
+ return 0.0
1552
+
1553
+ x1_min, y1_min, x1_max, y1_max = list1
1554
+ x2_min, y2_min, x2_max, y2_max = list2
1555
+
1556
+ # Ensure correct ordering of coordinates
1557
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1558
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1559
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1560
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1561
+
1562
+ inter_x_min = max(x1_min, x2_min)
1563
+ inter_y_min = max(y1_min, y2_min)
1564
+ inter_x_max = min(x1_max, x2_max)
1565
+ inter_y_max = min(y1_max, y2_max)
1566
+
1567
+ inter_w = max(0.0, inter_x_max - inter_x_min)
1568
+ inter_h = max(0.0, inter_y_max - inter_y_min)
1569
+ inter_area = inter_w * inter_h
1570
+
1571
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
1572
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
1573
+ union_area = area1 + area2 - inter_area
1574
+
1575
+ return (inter_area / union_area) if union_area > 0 else 0.0
1576
+
1577
+ def _get_canonical_id(self, raw_id: Any) -> Any:
1578
+ """Return the canonical ID for a raw tracker-generated ID."""
1579
+ return self._track_aliases.get(raw_id, raw_id)
1580
+
1581
+ def _merge_or_register_track(self, raw_id: Any, bbox: List[float]) -> Any:
1582
+ """Merge the raw track into an existing canonical track if possible,
1583
+ otherwise register it as a new canonical track. Returns the canonical
1584
+ ID to use for counting.
1585
+ """
1586
+ now = time.time()
1587
+
1588
+ # Fast path: raw_id already mapped
1589
+ if raw_id in self._track_aliases:
1590
+ canonical_id = self._track_aliases[raw_id]
1591
+ track_info = self._canonical_tracks.get(canonical_id)
1592
+ if track_info is not None:
1593
+ track_info["last_bbox"] = bbox
1594
+ track_info["last_update"] = now
1595
+ track_info["raw_ids"].add(raw_id)
1596
+ return canonical_id
1597
+
1598
+ # Attempt to merge with an existing canonical track
1599
+ for canonical_id, info in self._canonical_tracks.items():
1600
+ # Only consider recently updated tracks to avoid stale matches
1601
+ if now - info["last_update"] > self._track_merge_time_window:
1602
+ continue
1603
+
1604
+ iou = self._compute_iou(bbox, info["last_bbox"])
1605
+ if iou >= self._track_merge_iou_threshold:
1606
+ # Merge raw_id into canonical track
1607
+ self._track_aliases[raw_id] = canonical_id
1608
+ info["last_bbox"] = bbox
1609
+ info["last_update"] = now
1610
+ info["raw_ids"].add(raw_id)
1611
+ self.logger.debug(
1612
+ f"Merged raw track {raw_id} into canonical track {canonical_id} (IoU={iou:.2f})")
1613
+ return canonical_id
1614
+
1615
+ # No match found – create a new canonical track
1616
+ canonical_id = raw_id
1617
+ self._track_aliases[raw_id] = canonical_id
1618
+ self._canonical_tracks[canonical_id] = {
1619
+ "last_bbox": bbox,
1620
+ "last_update": now,
1621
+ "raw_ids": {raw_id},
1622
+ "last_side": None # Initialize last_side for line crossing
1623
+ }
1624
+ self.logger.debug(f"Registered new canonical track {canonical_id}")
1625
+ return canonical_id
1626
+
1627
+ def _format_timestamp(self, timestamp: Any) -> str:
1628
+ """Format a timestamp so that exactly two digits follow the decimal point (milliseconds).
1629
+
1630
+ The input can be either:
1631
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will first be converted to a
1632
+ string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
1633
+ 2. A string already following the same layout.
1634
+
1635
+ The returned value preserves the overall format of the input but truncates or pads
1636
+ the fractional seconds portion to **exactly two digits**.
1637
+
1638
+ Example
1639
+ -------
1640
+ >>> self._format_timestamp("2025-08-19-04:22:47.187574 UTC")
1641
+ '2025-08-19-04:22:47.18 UTC'
1642
+ """
1643
+
1644
+ # Convert numeric timestamps to the expected string representation first
1645
+ if isinstance(timestamp, (int, float)):
1646
+ timestamp = datetime.fromtimestamp(timestamp, timezone.utc).strftime(
1647
+ '%Y-%m-%d-%H:%M:%S.%f UTC'
1648
+ )
1649
+
1650
+ # Ensure we are working with a string from here on
1651
+ if not isinstance(timestamp, str):
1652
+ return str(timestamp)
1653
+
1654
+ # If there is no fractional component, simply return the original string
1655
+ if '.' not in timestamp:
1656
+ return timestamp
1657
+
1658
+ # Split out the main portion (up to the decimal point)
1659
+ main_part, fractional_and_suffix = timestamp.split('.', 1)
1660
+
1661
+ # Separate fractional digits from the suffix (typically ' UTC')
1662
+ if ' ' in fractional_and_suffix:
1663
+ fractional_part, suffix = fractional_and_suffix.split(' ', 1)
1664
+ suffix = ' ' + suffix # Re-attach the space removed by split
1665
+ else:
1666
+ fractional_part, suffix = fractional_and_suffix, ''
1667
+
1668
+ # Guarantee exactly two digits for the fractional part
1669
+ fractional_part = (fractional_part + '00')[:2]
1670
+
1671
+ return f"{main_part}.{fractional_part}{suffix}"
1672
+
1673
+ def _get_tracking_start_time(self) -> str:
1674
+ """Get the tracking start time, formatted as a string."""
1675
+ if self._tracking_start_time is None:
1676
+ return "N/A"
1677
+ return self._format_timestamp(self._tracking_start_time)
1678
+
1679
+ def _set_tracking_start_time(self) -> None:
1680
+ """Set the tracking start time to the current time."""
1681
+ self._tracking_start_time = time.time()
1682
+
1683
+ def get_config_schema(self) -> Dict[str, Any]:
1684
+ """Get configuration schema for people counting."""
1685
+ return {
1686
+ "type": "object",
1687
+ "properties": {
1688
+ "confidence_threshold": {
1689
+ "type": "number",
1690
+ "minimum": 0.0,
1691
+ "maximum": 1.0,
1692
+ "default": 0.5,
1693
+ "description": "Minimum confidence threshold for detections"
1694
+ },
1695
+ "enable_tracking": {
1696
+ "type": "boolean",
1697
+ "default": False,
1698
+ "description": "Enable tracking for unique counting"
1699
+ },
1700
+ "zone_config": {
1701
+ "type": "object",
1702
+ "properties": {
1703
+ "zones": {
1704
+ "type": "object",
1705
+ "additionalProperties": {
1706
+ "type": "array",
1707
+ "items": {
1708
+ "type": "array",
1709
+ "items": {"type": "number"},
1710
+ "minItems": 2,
1711
+ "maxItems": 2
1712
+ },
1713
+ "minItems": 3
1714
+ },
1715
+ "description": "Zone definitions as polygons"
1716
+ },
1717
+ "zone_confidence_thresholds": {
1718
+ "type": "object",
1719
+ "additionalProperties": {"type": "number", "minimum": 0.0, "maximum": 1.0},
1720
+ "description": "Per-zone confidence thresholds"
1721
+ }
1722
+ }
1723
+ },
1724
+ "line_config": {
1725
+ "type": "object",
1726
+ "properties": {
1727
+ "points": {
1728
+ "type": "array",
1729
+ "items": {
1730
+ "type": "array",
1731
+ "items": {"type": "number"},
1732
+ "minItems": 2,
1733
+ "maxItems": 2
1734
+ },
1735
+ "minItems": 2,
1736
+ "maxItems": 2,
1737
+ "description": "Line defined by two points [[x1,y1],[x2,y2]]"
1738
+ },
1739
+ "side1_label": {
1740
+ "type": "string",
1741
+ "default": "Side A",
1742
+ "description": "Label for one side of the line"
1743
+ },
1744
+ "side2_label": {
1745
+ "type": "string",
1746
+ "default": "Side B",
1747
+ "description": "Label for the other side of the line"
1748
+ }
1749
+ },
1750
+ "description": "Configuration for line crossing detection"
1751
+ },
1752
+ "person_categories": {
1753
+ "type": "array",
1754
+ "items": {"type": "string"},
1755
+ "default": ["person", "people"],
1756
+ "description": "Category names that represent people"
1757
+ },
1758
+ "enable_unique_counting": {
1759
+ "type": "boolean",
1760
+ "default": True,
1761
+ "description": "Enable unique people counting using tracking"
1762
+ },
1763
+ "time_window_minutes": {
1764
+ "type": "integer",
1765
+ "minimum": 1,
1766
+ "default": 60,
1767
+ "description": "Time window for counting analysis in minutes"
1768
+ },
1769
+ "alert_config": {
1770
+ "type": "object",
1771
+ "properties": {
1772
+ "count_thresholds": {
1773
+ "type": "object",
1774
+ "additionalProperties": {"type": "integer", "minimum": 1},
1775
+ "description": "Count thresholds for alerts"
1776
+ },
1777
+ "occupancy_thresholds": {
1778
+ "type": "object",
1779
+ "additionalProperties": {"type": "integer", "minimum": 1},
1780
+ "description": "Zone occupancy thresholds for alerts"
1781
+ },
1782
+ "crossing_thresholds": {
1783
+ "type": "object",
1784
+ "additionalProperties": {"type": "integer", "minimum": 1},
1785
+ "description": "Line crossing thresholds for alerts, keys like 'Side A_to_Side B'"
1786
+ },
1787
+ "alert_type": {
1788
+ "type": "array",
1789
+ "items": {"type": "string"},
1790
+ "default": ["Default"],
1791
+ "description": "To pass the type of alert. EG: email, sms, etc."
1792
+ },
1793
+ "alert_value": {
1794
+ "type": "array",
1795
+ "items": {"type": "string"},
1796
+ "default": ["JSON"],
1797
+ "description": "Alert value to pass the value based on type. EG: email id if type is email."
1798
+ },
1799
+ "alert_incident_category": {
1800
+ "type": "array",
1801
+ "items": {"type": "string"},
1802
+ "default": ["Incident Detection Alert"],
1803
+ "description": "Group and name the Alert category Type"
1804
+ },
1805
+ }
1806
+ }
1807
+ },
1808
+ "required": ["confidence_threshold"],
1809
+ "additionalProperties": False
1810
+ }
1811
+
1812
+ def create_default_config(self, **overrides) -> PeopleTrackingConfig:
1813
+ """Create default configuration with optional overrides."""
1814
+ defaults = {
1815
+ "category": self.category,
1816
+ "usecase": self.name,
1817
+ "confidence_threshold": 0.5,
1818
+ "enable_tracking": False,
1819
+ "enable_analytics": True,
1820
+ "enable_unique_counting": True,
1821
+ "time_window_minutes": 60,
1822
+ "person_categories": ["person", "people"],
1823
+ }
1824
+ defaults.update(overrides)
1825
+ return PeopleTrackingConfig(**defaults)
1826
+
1827
+ def _apply_smoothing(self, data: Any, config: PeopleTrackingConfig) -> Any:
1828
+ """Apply smoothing to tracking data if enabled."""
1829
+ if self.smoothing_tracker is None:
1830
+ smoothing_config = BBoxSmoothingConfig(
1831
+ smoothing_algorithm=config.smoothing_algorithm,
1832
+ window_size=config.smoothing_window_size,
1833
+ cooldown_frames=config.smoothing_cooldown_frames,
1834
+ confidence_threshold=config.confidence_threshold or 0.5,
1835
+ confidence_range_factor=config.smoothing_confidence_range_factor,
1836
+ enable_smoothing=True
1837
+ )
1838
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
1839
+
1840
+ smoothed_data = bbox_smoothing(data, self.smoothing_tracker.config, self.smoothing_tracker)
1841
+ self.logger.debug(f"Applied bbox smoothing to tracking results")
1842
+ return smoothed_data