matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1672 @@
1
+ """
2
+ intrusion detection use case implementation.
3
+
4
+ This module provides a clean implementation of intrusion detection functionality
5
+ with zone-based analysis, tracking, and alerting capabilities.
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional, Set
9
+ from dataclasses import asdict
10
+ import time
11
+ from datetime import datetime, timezone
12
+
13
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
14
+ from ..core.config import IntrusionConfig, ZoneConfig, AlertConfig
15
+ from ..utils import (
16
+ filter_by_confidence,
17
+ filter_by_categories,
18
+ apply_category_mapping,
19
+ count_objects_by_category,
20
+ count_objects_in_zones,
21
+ calculate_counting_summary,
22
+ match_results_structure,
23
+ bbox_smoothing,
24
+ BBoxSmoothingConfig,
25
+ BBoxSmoothingTracker,
26
+ calculate_iou
27
+ )
28
+ from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
29
+
30
+
31
+ class IntrusionUseCase(BaseProcessor):
32
+ """Intrusion Detection use case with zone analysis and alerting."""
33
+
34
+ def __init__(self):
35
+ """Initialize intrusion detection use case."""
36
+ super().__init__("intrusion_detection")
37
+ self.category = "security"
38
+ self.CASE_TYPE: Optional[str] = 'intrusion_detection'
39
+ self.CASE_VERSION: Optional[str] = '1.3'
40
+
41
+ # Track ID storage for total count calculation
42
+ self._total_track_ids = set() # Store all unique track IDs seen across calls
43
+ self._current_frame_track_ids = set() # Store track IDs from current frame
44
+ self._total_count = 0 # Cached total count
45
+ self._last_update_time = time.time() # Track when last updated
46
+
47
+ # Zone-based tracking storage
48
+ self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
49
+ self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
50
+ self._zone_current_counts = {} # zone_name -> current count in zone
51
+ self._zone_total_counts = {} # zone_name -> total count that have been in zone
52
+
53
+ # Global unique person tracking across all zones
54
+ self._global_unique_person_track_ids = set() # All unique track IDs that have ever been in any zone
55
+ self._global_unique_person_count = 0 # Total count of unique people across all zones
56
+
57
+ # Frame counter for tracking total frames processed
58
+ self._total_frame_counter = 0 # Total frames processed across all calls
59
+
60
+ # Global frame offset for video chunk processing
61
+ self._global_frame_offset = 0 # Offset to add to local frame IDs for global frame numbering
62
+ self._frames_in_current_chunk = 0 # Number of frames in current chunk
63
+
64
+ # Initialize smoothing tracker
65
+ self.smoothing_tracker = None
66
+
67
+ # Track start time for "TOTAL SINCE" calculation
68
+ self._tracking_start_time = None
69
+
70
+ # --------------------------------------------------------------------- #
71
+ # Tracking aliasing structures to merge fragmented IDs #
72
+ # --------------------------------------------------------------------- #
73
+ # Maps raw tracker IDs generated by ByteTrack to a stable canonical ID
74
+ # that represents a real-world person. This helps avoid double counting
75
+ # when the tracker loses a target temporarily and assigns a new ID.
76
+ self._track_aliases: Dict[Any, Any] = {}
77
+
78
+ # Stores metadata about each canonical track such as its last seen
79
+ # bounding box, last update timestamp and all raw IDs that have been
80
+ # merged into it.
81
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
82
+
83
+ # IoU threshold above which two bounding boxes are considered to belong
84
+ # to the same person (empirically chosen; adjust in production if
85
+ # needed).
86
+ self._track_merge_iou_threshold: float = 0.001
87
+
88
+ # Only merge with canonical tracks that were updated within this time
89
+ # window (in seconds). This prevents accidentally merging tracks that
90
+ # left the scene long ago.
91
+ self._track_merge_time_window: float = 20.0
92
+
93
+ self._ascending_alert_list: List[int] = []
94
+ self.current_incident_end_timestamp: str = "N/A"
95
+
96
+
97
+ def process(self, data: Any, config: ConfigProtocol,
98
+ context: Optional[ProcessingContext] = None, stream_info: Optional[Any] = None) -> ProcessingResult:
99
+ """
100
+ Process intrusion detection use case - automatically detects single or multi-frame structure.
101
+
102
+ Args:
103
+ data: Raw model output (detection or tracking format)
104
+ config: intrusion detection configuration
105
+ context: Processing context
106
+ stream_info: Stream information containing frame details (optional)
107
+
108
+ Returns:
109
+ ProcessingResult: Processing result with standardized agg_summary structure
110
+ """
111
+ start_time = time.time()
112
+
113
+ try:
114
+ # Ensure we have the right config type
115
+ if not isinstance(config, IntrusionConfig):
116
+ return self.create_error_result(
117
+ "Invalid configuration type for intrusion detection",
118
+ usecase=self.name,
119
+ category=self.category,
120
+ context=context
121
+ )
122
+
123
+ # Initialize processing context if not provided
124
+ if context is None:
125
+ context = ProcessingContext()
126
+
127
+ # Detect input format and frame structure
128
+ input_format = match_results_structure(data)
129
+ context.input_format = input_format
130
+ context.confidence_threshold = config.confidence_threshold
131
+
132
+ is_multi_frame = self.detect_frame_structure(data)
133
+
134
+ #self.logger.info(f"Processing people counting - Format: {input_format.value}, Multi-frame: {is_multi_frame}")
135
+
136
+ # Apply smoothing if enabled
137
+ if config.enable_smoothing and input_format == ResultFormat.OBJECT_TRACKING:
138
+ data = self._apply_smoothing(data, config)
139
+
140
+ # Process based on frame structure
141
+ if is_multi_frame:
142
+
143
+ return self._process_multi_frame(data, config, context, stream_info)
144
+ else:
145
+ return self._process_single_frame(data, config, context, stream_info)
146
+
147
+ except Exception as e:
148
+ self.logger.error(f"intrusion detection failed: {str(e)}", exc_info=True)
149
+
150
+ if context:
151
+ context.mark_completed()
152
+
153
+ return self.create_error_result(
154
+ str(e),
155
+ type(e).__name__,
156
+ usecase=self.name,
157
+ category=self.category,
158
+ context=context
159
+ )
160
+
161
+ def _process_multi_frame(self, data: Dict, config: IntrusionConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
162
+ """Process multi-frame data to generate frame-wise agg_summary."""
163
+
164
+ frame_incidents = {}
165
+ frame_tracking_stats = {}
166
+ frame_business_analytics = {}
167
+ frame_human_text = {}
168
+ frame_alerts = {}
169
+
170
+ # Increment total frame counter
171
+ frames_in_this_call = len(data)
172
+ self._total_frame_counter += frames_in_this_call
173
+
174
+ # Process each frame individually
175
+ for frame_key, frame_detections in data.items():
176
+ # Extract frame ID from tracking data
177
+ frame_id = self._extract_frame_id_from_tracking(frame_detections, frame_key)
178
+ global_frame_id = self.get_global_frame_id(frame_id)
179
+
180
+ # Process this single frame's detections
181
+ alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
182
+ frame_detections, config, global_frame_id, stream_info
183
+ )
184
+ incidents = incidents_list[0] if incidents_list else {}
185
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
186
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
187
+ summary = summary_list[0] if summary_list else {}
188
+
189
+ # Store frame-wise results
190
+ if incidents:
191
+ frame_incidents[global_frame_id] = incidents
192
+ if tracking_stats:
193
+ frame_tracking_stats[global_frame_id] = tracking_stats
194
+ if business_analytics:
195
+ frame_business_analytics[global_frame_id] = business_analytics
196
+ if summary:
197
+ frame_human_text[global_frame_id] = summary
198
+ if alerts:
199
+ frame_alerts[global_frame_id] = alerts
200
+
201
+ # Update global frame offset after processing this chunk
202
+ self.update_global_frame_offset(frames_in_this_call)
203
+
204
+ # Create frame-wise agg_summary
205
+ agg_summary = self.create_frame_wise_agg_summary(
206
+ frame_incidents, frame_tracking_stats, frame_business_analytics, frame_alerts,
207
+ frame_human_text=frame_human_text
208
+ )
209
+
210
+ # Mark processing as completed
211
+ context.mark_completed()
212
+
213
+ # Create result with standardized agg_summary
214
+ return self.create_result(
215
+ data={"agg_summary": agg_summary},
216
+ usecase=self.name,
217
+ category=self.category,
218
+ context=context
219
+ )
220
+
221
+ def _process_single_frame(self, data: Any, config: IntrusionConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
222
+ """Process single frame data and return standardized agg_summary."""
223
+
224
+ current_frame = stream_info.get("input_settings", {}).get("start_frame", "current_frame")
225
+ # Process frame data
226
+ alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
227
+ data, config, current_frame, stream_info
228
+ )
229
+ incidents = incidents_list[0] if incidents_list else {}
230
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
231
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
232
+ summary = summary_list[0] if summary_list else {}
233
+
234
+ # Create single-frame agg_summary
235
+ agg_summary = self.create_agg_summary(
236
+ current_frame, incidents, tracking_stats, business_analytics, alerts, human_text=summary
237
+ )
238
+
239
+ # Mark processing as completed
240
+ context.mark_completed()
241
+
242
+ # Create result with standardized agg_summary
243
+ return self.create_result(
244
+ data={"agg_summary": agg_summary},
245
+ usecase=self.name,
246
+ category=self.category,
247
+ context=context
248
+ )
249
+
250
+
251
+ def _process_frame_detections(self, frame_data: Any, config: IntrusionConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> tuple:
252
+ """Process detections from a single frame and return standardized components."""
253
+
254
+ # Convert frame_data to list if it's not already
255
+ if isinstance(frame_data, list):
256
+ frame_detections = frame_data
257
+ else:
258
+ # Handle other formats as needed
259
+ frame_detections = []
260
+
261
+ # Step 1: Apply confidence filtering to this frame
262
+ if config.confidence_threshold is not None:
263
+ frame_detections = [d for d in frame_detections if d.get("confidence", 0) >= config.confidence_threshold]
264
+
265
+ # Step 2: Apply category mapping if provided
266
+ if config.index_to_category:
267
+ frame_detections = apply_category_mapping(frame_detections, config.index_to_category)
268
+
269
+ # Step 3: Filter to person categories
270
+ if config.person_categories:
271
+ frame_detections = [d for d in frame_detections if d.get("category") in config.person_categories]
272
+
273
+ # Step 4: Create counting summary for this frame
274
+ counting_summary = {
275
+ "total_objects": len(frame_detections),
276
+ "detections": frame_detections,
277
+ "categories": {}
278
+ }
279
+
280
+ # Count by category
281
+ for detection in frame_detections:
282
+ category = detection.get("category", "unknown")
283
+ counting_summary["categories"][category] = counting_summary["categories"].get(category, 0) + 1
284
+
285
+ # Debug logging for detections
286
+ self.logger.debug(f"Frame detections: {len(frame_detections)} detections")
287
+ for i, det in enumerate(frame_detections):
288
+ self.logger.debug(f"Detection {i}: category={det.get('category')}, bbox={det.get('bounding_box', det.get('bbox'))}, track_id={det.get('track_id')}")
289
+
290
+ # Step 5: Zone analysis for this frame
291
+ zone_analysis = {}
292
+ if config.zone_config and config.zone_config.zones:
293
+ # Debug logging for zone configuration
294
+ self.logger.debug(f"Zone config: {config.zone_config.zones}")
295
+
296
+ # Convert single frame to format expected by count_objects_in_zones
297
+ frame_data = frame_detections #[frame_detections]
298
+ zone_analysis = count_objects_in_zones(frame_data, config.zone_config.zones)
299
+
300
+ # Debug logging for zone analysis
301
+ self.logger.debug(f"Original zone_analysis: {zone_analysis}")
302
+
303
+ # Always update zone tracking for current frame counting, regardless of enable_tracking setting
304
+ if zone_analysis:
305
+ enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, frame_detections, config)
306
+ # Merge enhanced zone analysis with original zone analysis
307
+ for zone_name, enhanced_data in enhanced_zone_analysis.items():
308
+ zone_analysis[zone_name] = enhanced_data
309
+
310
+ # Debug logging for enhanced zone analysis
311
+ self.logger.debug(f"Enhanced zone_analysis: {zone_analysis}")
312
+
313
+ # Step 4.5: Always update tracking state (regardless of enable_unique_counting setting)
314
+ self._update_tracking_state(counting_summary)
315
+
316
+ # Step 5: Generate insights and alerts for this frame
317
+ alerts = self._check_alerts(counting_summary, zone_analysis, config, frame_id)
318
+
319
+ # Step 6: Generate summary and standardized agg_summary components for this frame
320
+ incidents = self._generate_incidents(counting_summary, zone_analysis, alerts, config, frame_id, stream_info)
321
+ tracking_stats = self._generate_tracking_stats(counting_summary, zone_analysis, config, frame_id, alerts, stream_info)
322
+ business_analytics = self._generate_business_analytics(counting_summary, zone_analysis, config, frame_id, stream_info, is_empty=True)
323
+ summary = self._generate_summary(counting_summary, incidents, tracking_stats, business_analytics, alerts)
324
+
325
+ # Return standardized components as tuple
326
+ return alerts, incidents, tracking_stats, business_analytics, summary
327
+
328
+ def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: IntrusionConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
329
+ """Generate standardized incidents for the agg_summary structure."""
330
+
331
+ camera_info = self.get_camera_info_from_stream(stream_info)
332
+ incidents = []
333
+ total_people = counting_summary.get("total_objects", 0)
334
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
335
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
336
+
337
+ alert_settings=[]
338
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
339
+ alert_settings.append({
340
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
341
+ "incident_category": self.CASE_TYPE,
342
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
343
+ "ascending": True,
344
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
345
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
346
+ }
347
+ })
348
+ human_text_lines = []
349
+ if total_people > 0:
350
+ # Determine event level based on thresholds
351
+
352
+ level = "info"
353
+ intensity = 5.0
354
+ start_timestamp = self._get_start_timestamp_str(stream_info)
355
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
356
+ self.current_incident_end_timestamp = 'Incident still active'
357
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
358
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
359
+ self.current_incident_end_timestamp = current_timestamp
360
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
361
+ self.current_incident_end_timestamp = 'N/A'
362
+
363
+ if config.alert_config and config.alert_config.count_thresholds:
364
+ threshold = config.alert_config.count_thresholds.get("all", 10)
365
+ intensity = min(10.0, (total_people / threshold) * 10)
366
+
367
+ if intensity >= 9:
368
+ level = "critical"
369
+ self._ascending_alert_list.append(3)
370
+ elif intensity >= 7:
371
+ level = "significant"
372
+ self._ascending_alert_list.append(2)
373
+ elif intensity >= 5:
374
+ level = "medium"
375
+ self._ascending_alert_list.append(1)
376
+ else:
377
+ level = "low"
378
+ self._ascending_alert_list.append(0)
379
+ else:
380
+ if total_people > 30:
381
+ level = "critical"
382
+ intensity = 10.0
383
+ self._ascending_alert_list.append(3)
384
+ elif total_people > 25:
385
+ level = "significant"
386
+ intensity = 9.0
387
+ self._ascending_alert_list.append(2)
388
+ elif total_people > 15:
389
+ level = "medium"
390
+ intensity = 7.0
391
+ self._ascending_alert_list.append(1)
392
+ else:
393
+ level = "low"
394
+ intensity = min(10.0, total_people / 3.0)
395
+ self._ascending_alert_list.append(0)
396
+
397
+ # Generate human text in new format
398
+ human_text_lines.append(f"INCIDENTS DETECTED @ {current_timestamp}:")
399
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
400
+ human_text = "\n".join(human_text_lines)
401
+
402
+ # Main people counting incident
403
+ # event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_id), incident_type=self.CASE_TYPE,
404
+ # severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
405
+ # start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
406
+ # level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
407
+ # incidents.append(event)
408
+ # else:
409
+ # self._ascending_alert_list.append(0)
410
+ # incidents.append({})
411
+
412
+ # Add zone-specific events if applicable
413
+ if zone_analysis:
414
+ level = "info"
415
+ intensity = 5.0
416
+ start_timestamp = self._get_start_timestamp_str(stream_info)
417
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
418
+ self.current_incident_end_timestamp = 'Incident still active'
419
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
420
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
421
+ self.current_incident_end_timestamp = current_timestamp
422
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
423
+ self.current_incident_end_timestamp = 'N/A'
424
+ human_text_lines.append(f"\t- ZONE EVENTS:")
425
+ for zone_name, zone_data in zone_analysis.items():
426
+ # Use enhanced zone analysis for accurate counts
427
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
428
+ # Enhanced zone analysis with current_count and total_count
429
+ current_count = zone_data.get("current_count", 0)
430
+ total_count = zone_data.get("total_count", 0)
431
+ zone_total = current_count # Use current count for incident severity
432
+ else:
433
+ # Fallback to original zone analysis format
434
+ zone_total = self._robust_zone_total(zone_data)
435
+
436
+ if zone_total > 0:
437
+ intensity = min(10.0, zone_total / 5.0)
438
+ zone_level = "info"
439
+ if intensity >= 9:
440
+ level = "critical"
441
+ self._ascending_alert_list.append(3)
442
+ elif intensity >= 7:
443
+ level = "significant"
444
+ self._ascending_alert_list.append(2)
445
+ elif intensity >= 5:
446
+ level = "medium"
447
+ self._ascending_alert_list.append(1)
448
+ else:
449
+ level = "low"
450
+ self._ascending_alert_list.append(0)
451
+
452
+ if zone_total > 0:
453
+ human_text_lines.append(f"\t\t- Zone name: {zone_name}")
454
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
455
+ human_text_lines.append(f"\t\t\t- Current people in zone: {current_count}")
456
+ human_text_lines.append(f"\t\t\t- Total people who have been in zone: {total_count}")
457
+ else:
458
+ human_text_lines.append(f"\t\t\t- Total people in zone: {zone_total}")
459
+ # Main people counting incident
460
+ human_text = "\n".join(human_text_lines)
461
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+'zone_'+zone_name+str(frame_id), incident_type=self.CASE_TYPE,
462
+ severity_level=zone_level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
463
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
464
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
465
+ incidents.append(event)
466
+ return incidents
467
+
468
+ def _generate_tracking_stats(
469
+ self,
470
+ counting_summary: Dict,
471
+ zone_analysis: Dict,
472
+ config: IntrusionConfig,
473
+ frame_id: str,
474
+ alerts: Any=[],
475
+ stream_info: Optional[Dict[str, Any]] = None
476
+ ) -> List[Dict]:
477
+ """Generate tracking stats using standardized methods."""
478
+
479
+ total_people = counting_summary.get("total_objects", 0)
480
+
481
+ # Get total count from cached tracking state
482
+ total_unique_count = self.get_total_count()
483
+ current_frame_count = self.get_current_frame_count()
484
+
485
+ # Get camera info using standardized method
486
+ camera_info = self.get_camera_info_from_stream(stream_info)
487
+
488
+ # Build total_counts using standardized method
489
+ total_counts = []
490
+ per_category_total = {}
491
+
492
+ for category in config.person_categories or ["person"]:
493
+ # Get count for this category from zone analysis or counting summary
494
+ category_total_count = 0
495
+ if zone_analysis:
496
+ for zone_data in zone_analysis.values():
497
+ if isinstance(zone_data, dict) and "total_count" in zone_data:
498
+ category_total_count += zone_data.get("total_count", 0)
499
+ elif isinstance(zone_data, dict):
500
+ # Sum up zone counts
501
+ for v in zone_data.values():
502
+ if isinstance(v, int):
503
+ category_total_count += v
504
+ elif isinstance(v, list):
505
+ category_total_count += len(v)
506
+ elif isinstance(zone_data, (int, list)):
507
+ category_total_count += len(zone_data) if isinstance(zone_data, list) else zone_data
508
+ else:
509
+ # Use total unique count from tracking state
510
+ category_total_count = total_unique_count
511
+
512
+ if category_total_count > 0:
513
+ total_counts.append(self.create_count_object(category, category_total_count))
514
+ per_category_total[category] = category_total_count
515
+
516
+ # Build current_counts using standardized method
517
+ current_counts = []
518
+ per_category_current = {}
519
+
520
+ for category in config.person_categories or ["person"]:
521
+ # Get current count for this category
522
+ category_current_count = 0
523
+ if zone_analysis:
524
+ # Use enhanced zone analysis current_count if available
525
+ for zone_data in zone_analysis.values():
526
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
527
+ # This is the enhanced zone analysis with current_count field
528
+ category_current_count += zone_data.get("current_count", 0)
529
+ elif isinstance(zone_data, dict):
530
+ # This is the original zone analysis from count_objects_in_zones
531
+ # Look for category-specific counts
532
+ if category in zone_data:
533
+ category_current_count += zone_data[category]
534
+ else:
535
+ # Fallback: sum up all values if category not found
536
+ for v in zone_data.values():
537
+ if isinstance(v, int):
538
+ category_current_count += v
539
+ elif isinstance(v, list):
540
+ category_current_count += len(v)
541
+ elif isinstance(zone_data, (int, list)):
542
+ category_current_count += len(zone_data) if isinstance(zone_data, list) else zone_data
543
+ else:
544
+ # Count detections in current frame for this category
545
+ detections = counting_summary.get("detections", [])
546
+ category_current_count = sum(1 for d in detections if d.get("category") == category)
547
+
548
+ # Debug logging for current frame count calculation
549
+ self.logger.debug(f"Category {category}: current_count={category_current_count}, total_people={total_people}")
550
+ if zone_analysis:
551
+ self.logger.debug(f"Zone analysis structure: {zone_analysis}")
552
+
553
+ # Always include current count, even if 0, when there are people detected
554
+ if category_current_count > 0 or total_people > 0:
555
+ current_counts.append(self.create_count_object(category, category_current_count))
556
+ per_category_current[category] = category_current_count
557
+
558
+ # Prepare detections using standardized method (without confidence and track_id)
559
+ detections = []
560
+ for detection in counting_summary.get("detections", []):
561
+ bbox = detection.get("bounding_box", {})
562
+ category = detection.get("category", "person")
563
+ # Include segmentation if available (like in eg.json)
564
+ if detection.get("masks"):
565
+ segmentation= detection.get("masks", [])
566
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
567
+ elif detection.get("segmentation"):
568
+ segmentation= detection.get("segmentation")
569
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
570
+ elif detection.get("mask"):
571
+ segmentation= detection.get("mask")
572
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
573
+ else:
574
+ detection_obj = self.create_detection_object(category, bbox)
575
+ detections.append(detection_obj)
576
+
577
+ # Build alerts and alert_settings arrays
578
+ alert_settings = []
579
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
580
+ alert_settings.append({
581
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
582
+ "incident_category": self.CASE_TYPE,
583
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
584
+ "ascending": True,
585
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
586
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
587
+ }
588
+ })
589
+ human_text_lines=[]
590
+ if zone_analysis:
591
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
592
+ start_timestamp = self._get_start_timestamp_str(stream_info)
593
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
594
+ human_text_lines.append(f"\t- People Detected in Prohibited Boarding Gate: {total_people}")
595
+ human_text_lines.append("")
596
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
597
+
598
+ for zone_name, zone_data in zone_analysis.items():
599
+ if isinstance(zone_data, dict) and "total_count" in zone_data:
600
+ # Enhanced zone analysis with total_count
601
+ total_count = zone_data.get("total_count", 0)
602
+ human_text_lines.append(f"\t- Zone name: {zone_name}")
603
+ human_text_lines.append(f"\t\t- Total count in Prohibited Boarding Gate: {total_count}")
604
+ else:
605
+ # Fallback to original zone analysis format
606
+ zone_total = self._robust_zone_total(zone_data)
607
+ human_text_lines.append(f"\t- Zone name: {zone_name}")
608
+ human_text_lines.append(f"\t\t- Total count in Prohibited Boarding Gate: {zone_total}")
609
+
610
+ if total_unique_count > 0:
611
+ human_text_lines.append(f"\t- Total unique people in the scene: {total_unique_count}")
612
+ if alerts:
613
+ for alert in alerts:
614
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
615
+ else:
616
+ human_text_lines.append("Alerts: None")
617
+ human_text = "\n".join(human_text_lines)
618
+ else:
619
+ human_text = self._generate_human_text_for_tracking(total_people, total_unique_count, config, frame_id, alerts, stream_info)
620
+ # Create high precision timestamps for input_timestamp and reset_timestamp
621
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
622
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
623
+ # Debug logging for final current counts
624
+ self.logger.debug(f"Final current_counts: {current_counts}")
625
+ self.logger.debug(f"Final total_counts: {total_counts}")
626
+
627
+ # Create tracking_stat using standardized method
628
+ tracking_stat = self.create_tracking_stats(
629
+ total_counts, current_counts, detections, human_text, camera_info, alerts, alert_settings, start_time=high_precision_start_timestamp, reset_time=high_precision_reset_timestamp
630
+ )
631
+
632
+ return [tracking_stat]
633
+
634
+ def _generate_human_text_for_tracking(self, total_people: int, total_unique_count: int, config: IntrusionConfig, frame_id: str, alerts:Any=[], stream_info: Optional[Dict[str, Any]] = None) -> str:
635
+ """Generate human-readable text for tracking stats in old format."""
636
+ from datetime import datetime, timezone
637
+
638
+ human_text_lines=[]
639
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
640
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
641
+
642
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
643
+ human_text_lines.append(f"\t- People Detected: {total_people}")
644
+
645
+ human_text_lines.append("")
646
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
647
+ human_text_lines.append(f"\t- Total unique people count: {total_unique_count}")
648
+
649
+ if alerts:
650
+ for alert in alerts:
651
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
652
+ else:
653
+ human_text_lines.append("Alerts: None")
654
+
655
+ return ""
656
+
657
+ def _check_alerts(self, counting_summary: Dict, zone_analysis: Dict,
658
+ config: IntrusionConfig, frame_id: str) -> List[Dict]:
659
+ """Check for alert conditions and generate alerts."""
660
+ def get_trend(data, lookback=900, threshold=0.6):
661
+ '''
662
+ Determine if the trend is ascending or descending based on actual value progression.
663
+ Now works with values 0,1,2,3 (not just binary).
664
+ '''
665
+ window = data[-lookback:] if len(data) >= lookback else data
666
+ if len(window) < 2:
667
+ return True # not enough data to determine trend
668
+ increasing = 0
669
+ total = 0
670
+ for i in range(1, len(window)):
671
+ if window[i] >= window[i - 1]:
672
+ increasing += 1
673
+ total += 1
674
+ ratio = increasing / total
675
+ if ratio >= threshold:
676
+ return True
677
+ elif ratio <= (1 - threshold):
678
+ return False
679
+ alerts = []
680
+
681
+ if not config.alert_config:
682
+ return alerts
683
+
684
+ total_people = counting_summary.get("total_objects", 0)
685
+
686
+ # Count threshold alerts
687
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
688
+
689
+ for category, threshold in config.alert_config.count_thresholds.items():
690
+ if category == "all" and total_people >= threshold:
691
+
692
+ alerts.append({
693
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
694
+ "alert_id": "alert_"+category+'_'+frame_id,
695
+ "incident_category": self.CASE_TYPE,
696
+ "threshold_level": threshold,
697
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
698
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
699
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
700
+ }
701
+ })
702
+ elif category in counting_summary.get("by_category", {}):
703
+ count = counting_summary["by_category"][category]
704
+
705
+ if count >= threshold:
706
+ alerts.append({
707
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
708
+ "alert_id": "alert_"+category+'_'+frame_id,
709
+ "incident_category": self.CASE_TYPE,
710
+ "threshold_level": threshold,
711
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
712
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
713
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
714
+ }
715
+ })
716
+ else:
717
+ pass
718
+
719
+ # Zone occupancy threshold alerts
720
+ if hasattr(config.alert_config, 'occupancy_thresholds') and config.alert_config.occupancy_thresholds:
721
+ for zone_name, threshold in config.alert_config.occupancy_thresholds.items():
722
+ if zone_name in zone_analysis:
723
+ # Use enhanced zone analysis for accurate current counts
724
+ zone_data = zone_analysis[zone_name]
725
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
726
+ # Enhanced zone analysis with current_count field
727
+ zone_count = zone_data.get("current_count", 0)
728
+ else:
729
+ # Fallback to original zone analysis format
730
+ zone_count = self._robust_zone_total(zone_data)
731
+
732
+ if zone_count >= threshold:
733
+ alerts.append({
734
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
735
+ "alert_id": f"alert_zone_{zone_name}_{frame_id}",
736
+ "incident_category": f"{self.CASE_TYPE}_{zone_name}",
737
+ "threshold_level": threshold,
738
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
739
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
740
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
741
+ }
742
+ })
743
+
744
+ return alerts
745
+
746
+ def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: IntrusionConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
747
+ """Generate standardized business analytics for the agg_summary structure."""
748
+ if is_empty:
749
+ return []
750
+ business_analytics = []
751
+
752
+ total_people = counting_summary.get("total_objects", 0)
753
+
754
+ # Get camera info using standardized method
755
+ camera_info = self.get_camera_info_from_stream(stream_info)
756
+
757
+ if total_people > 0 or config.enable_analytics:
758
+ # Calculate analytics statistics
759
+ analytics_stats = {
760
+ "people_count": total_people,
761
+ "unique_people_count": self.get_total_count(),
762
+ "current_frame_count": self.get_current_frame_count()
763
+ }
764
+
765
+ # Add zone analytics if available
766
+ if zone_analysis:
767
+ zone_stats = {}
768
+ for zone_name, zone_data in zone_analysis.items():
769
+ # Use enhanced zone analysis for accurate counts
770
+ if isinstance(zone_data, dict) and "current_count" in zone_data:
771
+ # Enhanced zone analysis with current_count and total_count
772
+ current_count = zone_data.get("current_count", 0)
773
+ total_count = zone_data.get("total_count", 0)
774
+ zone_stats[f"{zone_name}_current_occupancy"] = current_count
775
+ zone_stats[f"{zone_name}_total_occupancy"] = total_count
776
+ else:
777
+ # Fallback to original zone analysis format
778
+ zone_total = self._robust_zone_total(zone_data)
779
+ zone_stats[f"{zone_name}_occupancy"] = zone_total
780
+ analytics_stats.update(zone_stats)
781
+
782
+ # Generate human text for analytics
783
+ current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
784
+ start_timestamp = self._get_start_timestamp_str(stream_info)
785
+
786
+ analytics_human_text = self.generate_analytics_human_text(
787
+ "people_counting_analytics", analytics_stats, current_timestamp, start_timestamp
788
+ )
789
+
790
+ # Create business analytics using standardized method
791
+ analytics = self.create_business_analytics(
792
+ "people_counting_analytics", analytics_stats, analytics_human_text, camera_info
793
+ )
794
+ business_analytics.append(analytics)
795
+
796
+ return business_analytics
797
+
798
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
799
+ """
800
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
801
+ """
802
+ lines = {}
803
+ lines["Application Name"] = self.CASE_TYPE
804
+ lines["Application Version"] = self.CASE_VERSION
805
+ if len(incidents) > 0:
806
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
807
+ if len(tracking_stats) > 0:
808
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
809
+ if len(business_analytics) > 0:
810
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
811
+
812
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
813
+ lines["Summary"] = "No Summary Data"
814
+
815
+ return [lines]
816
+
817
+ def _calculate_metrics(self, counting_summary: Dict, zone_analysis: Dict,
818
+ config: IntrusionConfig, context: ProcessingContext) -> Dict[str, Any]:
819
+ """Calculate detailed metrics for analytics."""
820
+ total_people = counting_summary.get("total_objects", 0)
821
+
822
+ metrics = {
823
+ "total_people": total_people,
824
+ "processing_time": context.processing_time or 0.0,
825
+ "input_format": context.input_format.value,
826
+ "confidence_threshold": config.confidence_threshold,
827
+ "zones_analyzed": len(zone_analysis),
828
+ "detection_rate": 0.0,
829
+ "coverage_percentage": 0.0
830
+ }
831
+
832
+ # Calculate detection rate
833
+ if config.time_window_minutes and config.time_window_minutes > 0:
834
+ metrics["detection_rate"] = (total_people / config.time_window_minutes) * 60
835
+
836
+ # Calculate zone coverage
837
+ if zone_analysis and total_people > 0:
838
+ people_in_zones = 0
839
+ for zone_counts in zone_analysis.values():
840
+ if isinstance(zone_counts, dict):
841
+ for v in zone_counts.values():
842
+ if isinstance(v, int):
843
+ people_in_zones += v
844
+ elif isinstance(v, list):
845
+ people_in_zones += len(v)
846
+ elif isinstance(zone_counts, list):
847
+ people_in_zones += len(zone_counts)
848
+ elif isinstance(zone_counts, int):
849
+ people_in_zones += zone_counts
850
+ metrics["coverage_percentage"] = (people_in_zones / total_people) * 100
851
+
852
+ # Unique tracking metrics
853
+ if config.enable_unique_counting:
854
+ unique_count = self._count_unique_tracks(counting_summary, config)
855
+ if unique_count is not None:
856
+ metrics["unique_people"] = unique_count
857
+ metrics["tracking_efficiency"] = (unique_count / total_people) * 100 if total_people > 0 else 0
858
+
859
+ # Per-zone metrics
860
+ if zone_analysis:
861
+ zone_metrics = {}
862
+ for zone_name, zone_counts in zone_analysis.items():
863
+ # Robustly sum counts, handling dicts with int or list values
864
+ if isinstance(zone_counts, dict):
865
+ zone_total = 0
866
+ for v in zone_counts.values():
867
+ if isinstance(v, int):
868
+ zone_total += v
869
+ elif isinstance(v, list):
870
+ zone_total += len(v)
871
+ elif isinstance(zone_counts, list):
872
+ zone_total = len(zone_counts)
873
+ elif isinstance(zone_counts, int):
874
+ zone_total = zone_counts
875
+ else:
876
+ zone_total = 0
877
+ zone_metrics[zone_name] = {
878
+ "count": zone_total,
879
+ "percentage": (zone_total / total_people) * 100 if total_people > 0 else 0
880
+ }
881
+ metrics["zone_metrics"] = zone_metrics
882
+
883
+ return metrics
884
+
885
+ def _extract_predictions(self, data: Any) -> List[Dict[str, Any]]:
886
+ """Extract predictions from processed data for API compatibility."""
887
+ predictions = []
888
+
889
+ try:
890
+ if isinstance(data, list):
891
+ # Detection format
892
+ for item in data:
893
+ prediction = self._normalize_prediction(item)
894
+ if prediction:
895
+ predictions.append(prediction)
896
+
897
+ elif isinstance(data, dict):
898
+ # Frame-based or tracking format
899
+ for frame_id, items in data.items():
900
+ if isinstance(items, list):
901
+ for item in items:
902
+ prediction = self._normalize_prediction(item)
903
+ if prediction:
904
+ prediction["frame_id"] = frame_id
905
+ predictions.append(prediction)
906
+
907
+ except Exception as e:
908
+ self.logger.warning(f"Failed to extract predictions: {str(e)}")
909
+
910
+ return predictions
911
+
912
+ def _normalize_prediction(self, item: Dict[str, Any]) -> Dict[str, Any]:
913
+ """Normalize a single prediction item."""
914
+ if not isinstance(item, dict):
915
+ return {}
916
+
917
+ return {
918
+ "category": item.get("category", item.get("class", "unknown")),
919
+ "confidence": item.get("confidence", item.get("score", 0.0)),
920
+ "bounding_box": item.get("bounding_box", item.get("bbox", {})),
921
+ "track_id": item.get("track_id")
922
+ }
923
+
924
+ def _get_detections_with_confidence(self, counting_summary: Dict) -> List[Dict]:
925
+ """Extract detection items with confidence scores."""
926
+ return counting_summary.get("detections", [])
927
+
928
+ def _count_unique_tracks(self, counting_summary: Dict, config: IntrusionConfig = None) -> Optional[int]:
929
+ """Count unique tracks if tracking is enabled."""
930
+ # Always update tracking state regardless of enable_unique_counting setting
931
+ self._update_tracking_state(counting_summary)
932
+
933
+ # Only return the count if unique counting is enabled
934
+ if config and config.enable_unique_counting:
935
+ return self._total_count if self._total_count > 0 else None
936
+ else:
937
+ return None
938
+
939
+ def _update_tracking_state(self, counting_summary: Dict) -> None:
940
+ """Update tracking state with current frame data (always called)."""
941
+ detections = self._get_detections_with_confidence(counting_summary)
942
+
943
+ if not detections:
944
+ return
945
+
946
+ # Map raw tracker IDs to canonical IDs to avoid duplicate counting
947
+ current_frame_tracks: Set[Any] = set()
948
+
949
+ for detection in detections:
950
+ raw_track_id = detection.get("track_id")
951
+ if raw_track_id is None:
952
+ continue
953
+
954
+ bbox = detection.get("bounding_box", detection.get("bbox"))
955
+ if not bbox:
956
+ continue
957
+
958
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
959
+
960
+ # Propagate canonical ID so that downstream logic (including zone
961
+ # tracking and event generation) operates on the de-duplicated ID.
962
+ detection["track_id"] = canonical_id
963
+ current_frame_tracks.add(canonical_id)
964
+
965
+ # Update total track IDs with new canonical IDs from current frame
966
+ old_total_count = len(self._total_track_ids)
967
+ self._total_track_ids.update(current_frame_tracks)
968
+ self._current_frame_track_ids = current_frame_tracks
969
+
970
+ # Update total count
971
+ self._total_count = len(self._total_track_ids)
972
+ self._last_update_time = time.time()
973
+
974
+ # Log tracking state updates
975
+ if len(current_frame_tracks) > 0:
976
+ new_tracks = current_frame_tracks - (self._total_track_ids - current_frame_tracks)
977
+ if new_tracks:
978
+ self.logger.debug(
979
+ f"Tracking state updated: {len(new_tracks)} new canonical track IDs added, total unique tracks: {self._total_count}")
980
+ else:
981
+ self.logger.debug(
982
+ f"Tracking state updated: {len(current_frame_tracks)} current frame canonical tracks, total unique tracks: {self._total_count}")
983
+
984
+ def get_total_count(self) -> int:
985
+ """Get the total count of unique people tracked across all calls."""
986
+ return self._total_count
987
+
988
+ def get_current_frame_count(self) -> int:
989
+ """Get the count of people in the current frame."""
990
+ return len(self._current_frame_track_ids)
991
+
992
+ def get_total_frames_processed(self) -> int:
993
+ """Get the total number of frames processed across all calls."""
994
+ return self._total_frame_counter
995
+
996
+ def set_global_frame_offset(self, offset: int) -> None:
997
+ """Set the global frame offset for video chunk processing."""
998
+ self._global_frame_offset = offset
999
+ self.logger.info(f"Global frame offset set to: {offset}")
1000
+
1001
+ def get_global_frame_offset(self) -> int:
1002
+ """Get the current global frame offset."""
1003
+ return self._global_frame_offset
1004
+
1005
+ def update_global_frame_offset(self, frames_in_chunk: int) -> None:
1006
+ """Update global frame offset after processing a chunk."""
1007
+ old_offset = self._global_frame_offset
1008
+ self._global_frame_offset += frames_in_chunk
1009
+ self.logger.info(f"Global frame offset updated: {old_offset} -> {self._global_frame_offset} (added {frames_in_chunk} frames)")
1010
+
1011
+ def get_global_frame_id(self, local_frame_id: str) -> str:
1012
+ """Convert local frame ID to global frame ID."""
1013
+ try:
1014
+ # Try to convert local_frame_id to integer
1015
+ local_frame_num = int(local_frame_id)
1016
+ global_frame_num = local_frame_num #+ self._global_frame_offset
1017
+ return str(global_frame_num)
1018
+ except (ValueError, TypeError):
1019
+ # If local_frame_id is not a number (e.g., timestamp), return as is
1020
+ return local_frame_id
1021
+
1022
+ def get_track_ids_info(self) -> Dict[str, Any]:
1023
+ """Get detailed information about track IDs."""
1024
+ return {
1025
+ "total_count": self._total_count,
1026
+ "current_frame_count": len(self._current_frame_track_ids),
1027
+ "total_unique_track_ids": len(self._total_track_ids),
1028
+ "current_frame_track_ids": list(self._current_frame_track_ids),
1029
+ "last_update_time": self._last_update_time,
1030
+ "total_frames_processed": self._total_frame_counter
1031
+ }
1032
+
1033
+ def get_tracking_debug_info(self) -> Dict[str, Any]:
1034
+ """Get detailed debugging information about tracking state."""
1035
+ return {
1036
+ "total_track_ids": list(self._total_track_ids),
1037
+ "current_frame_track_ids": list(self._current_frame_track_ids),
1038
+ "total_count": self._total_count,
1039
+ "current_frame_count": len(self._current_frame_track_ids),
1040
+ "total_frames_processed": self._total_frame_counter,
1041
+ "last_update_time": self._last_update_time,
1042
+ "zone_current_track_ids": {zone: list(tracks) for zone, tracks in self._zone_current_track_ids.items()},
1043
+ "zone_total_track_ids": {zone: list(tracks) for zone, tracks in self._zone_total_track_ids.items()},
1044
+ "zone_current_counts": self._zone_current_counts.copy(),
1045
+ "zone_total_counts": self._zone_total_counts.copy(),
1046
+ "global_frame_offset": self._global_frame_offset,
1047
+ "frames_in_current_chunk": self._frames_in_current_chunk
1048
+ }
1049
+
1050
+ def get_frame_info(self) -> Dict[str, Any]:
1051
+ """Get detailed information about frame processing and global frame offset."""
1052
+ return {
1053
+ "global_frame_offset": self._global_frame_offset,
1054
+ "total_frames_processed": self._total_frame_counter,
1055
+ "frames_in_current_chunk": self._frames_in_current_chunk,
1056
+ "next_global_frame": self._global_frame_offset + self._frames_in_current_chunk
1057
+ }
1058
+
1059
+ def reset_tracking_state(self) -> None:
1060
+ """
1061
+ WARNING: This completely resets ALL tracking data including cumulative totals!
1062
+
1063
+ This should ONLY be used when:
1064
+ - Starting a completely new tracking session
1065
+ - Switching to a different video/stream
1066
+ - Manual reset requested by user
1067
+
1068
+ For clearing expired/stale tracks, use clear_current_frame_tracking() instead.
1069
+ """
1070
+ self._total_track_ids.clear()
1071
+ self._current_frame_track_ids.clear()
1072
+ self._total_count = 0
1073
+ self._last_update_time = time.time()
1074
+
1075
+ # Clear zone tracking data
1076
+ self._zone_current_track_ids.clear()
1077
+ self._zone_total_track_ids.clear()
1078
+ self._zone_current_counts.clear()
1079
+ self._zone_total_counts.clear()
1080
+
1081
+ # Clear global unique person tracking
1082
+ self._global_unique_person_track_ids.clear()
1083
+ self._global_unique_person_count = 0
1084
+
1085
+ # Reset frame counter and global frame offset
1086
+ self._total_frame_counter = 0
1087
+ self._global_frame_offset = 0
1088
+ self._frames_in_current_chunk = 0
1089
+
1090
+ # Clear aliasing information
1091
+ self._canonical_tracks.clear()
1092
+ self._track_aliases.clear()
1093
+ self._tracking_start_time = None
1094
+
1095
+ self.logger.warning(" FULL tracking state reset - all track IDs, zone data, frame counter, and global frame offset cleared. Cumulative totals lost!")
1096
+
1097
+ def clear_current_frame_tracking(self) -> int:
1098
+ """
1099
+ MANUAL USE ONLY: Clear only current frame tracking data while preserving cumulative totals.
1100
+
1101
+ This method is NOT called automatically anywhere in the code.
1102
+
1103
+ This is the SAFE method to use for manual clearing of stale/expired current frame data.
1104
+ The cumulative total (self._total_count) is always preserved.
1105
+
1106
+ In streaming scenarios, you typically don't need to call this at all.
1107
+
1108
+ Returns:
1109
+ Number of current frame tracks cleared
1110
+ """
1111
+ old_current_count = len(self._current_frame_track_ids)
1112
+ self._current_frame_track_ids.clear()
1113
+
1114
+ # Clear current zone tracking (but keep total zone tracking)
1115
+ cleared_zone_tracks = 0
1116
+ for zone_name in list(self._zone_current_track_ids.keys()):
1117
+ cleared_zone_tracks += len(self._zone_current_track_ids[zone_name])
1118
+ self._zone_current_track_ids[zone_name].clear()
1119
+ self._zone_current_counts[zone_name] = 0
1120
+
1121
+ # Update timestamp
1122
+ self._last_update_time = time.time()
1123
+
1124
+ self.logger.info(f"Cleared {old_current_count} current frame tracks and {cleared_zone_tracks} zone current tracks. Cumulative total preserved: {self._total_count}")
1125
+ return old_current_count
1126
+
1127
+ def reset_frame_counter(self) -> None:
1128
+ """Reset only the frame counter."""
1129
+ old_count = self._total_frame_counter
1130
+ self._total_frame_counter = 0
1131
+ self.logger.info(f"Frame counter reset from {old_count} to 0")
1132
+
1133
+ def clear_expired_tracks(self, max_age_seconds: float = 300.0) -> int:
1134
+ """
1135
+ MANUAL USE ONLY: Clear current frame tracking data if no updates for a while.
1136
+
1137
+ This method is NOT called automatically anywhere in the code.
1138
+ It's provided as a utility function for manual cleanup if needed.
1139
+
1140
+ In streaming scenarios, you typically don't need to call this at all.
1141
+ The cumulative total should keep growing as new unique people are detected.
1142
+
1143
+ This method only clears current frame tracking data while preserving
1144
+ the cumulative total count. The cumulative total should never decrease.
1145
+
1146
+ Args:
1147
+ max_age_seconds: Maximum age in seconds before clearing current frame tracks
1148
+
1149
+ Returns:
1150
+ Number of current frame tracks cleared
1151
+ """
1152
+ current_time = time.time()
1153
+ if current_time - self._last_update_time > max_age_seconds:
1154
+ # Use the safe method that preserves cumulative totals
1155
+ cleared_count = self.clear_current_frame_tracking()
1156
+ self.logger.info(f"Manual cleanup: cleared {cleared_count} expired current frame tracks (age > {max_age_seconds}s)")
1157
+ return cleared_count
1158
+ return 0
1159
+
1160
+ def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: IntrusionConfig) -> Dict[str, Dict[str, Any]]:
1161
+ """
1162
+ Update zone tracking with current frame data.
1163
+
1164
+ Args:
1165
+ zone_analysis: Current zone analysis results
1166
+ detections: List of detections with track IDs
1167
+ config: People counting configuration with zone polygons
1168
+
1169
+ Returns:
1170
+ Enhanced zone analysis with tracking information
1171
+ """
1172
+ if not zone_analysis or not config.zone_config or not config.zone_config.zones:
1173
+ return {}
1174
+
1175
+ enhanced_zone_analysis = {}
1176
+ zones = config.zone_config.zones
1177
+
1178
+ # Get current frame track IDs in each zone
1179
+ current_frame_zone_tracks = {}
1180
+
1181
+ # Initialize zone tracking for all zones
1182
+ for zone_name in zones.keys():
1183
+ current_frame_zone_tracks[zone_name] = set()
1184
+ if zone_name not in self._zone_current_track_ids:
1185
+ self._zone_current_track_ids[zone_name] = set()
1186
+ if zone_name not in self._zone_total_track_ids:
1187
+ self._zone_total_track_ids[zone_name] = set()
1188
+
1189
+ # Check each detection against each zone
1190
+ for detection in detections:
1191
+ track_id = detection.get("track_id")
1192
+ if track_id is None:
1193
+ continue
1194
+
1195
+ # Get detection bbox
1196
+ bbox = detection.get("bounding_box", detection.get("bbox"))
1197
+ if not bbox:
1198
+ continue
1199
+
1200
+ # Get detection center point
1201
+ center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
1202
+
1203
+ # Check which zone this detection is in using actual zone polygons
1204
+ for zone_name, zone_polygon in zones.items():
1205
+ # Convert polygon points to tuples for point_in_polygon function
1206
+ # zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
1207
+ polygon_points = [(point[0], point[1]) for point in zone_polygon]
1208
+
1209
+ # Check if detection center is inside the zone polygon using ray casting algorithm
1210
+ if point_in_polygon(center_point, polygon_points):
1211
+ current_frame_zone_tracks[zone_name].add(track_id)
1212
+
1213
+ # Update zone tracking for each zone
1214
+ for zone_name, zone_counts in zone_analysis.items():
1215
+ # Get current frame tracks for this zone
1216
+ current_tracks = current_frame_zone_tracks.get(zone_name, set())
1217
+
1218
+ # Update current zone tracks
1219
+ self._zone_current_track_ids[zone_name] = current_tracks
1220
+
1221
+ # Update total zone tracks (accumulate all track IDs that have been in this zone)
1222
+ self._zone_total_track_ids[zone_name].update(current_tracks)
1223
+
1224
+ # Update global unique person tracking
1225
+ old_global_count = len(self._global_unique_person_track_ids)
1226
+ self._global_unique_person_track_ids.update(current_tracks)
1227
+ new_global_count = len(self._global_unique_person_track_ids)
1228
+
1229
+ # Update counts
1230
+ self._zone_current_counts[zone_name] = len(current_tracks)
1231
+ self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
1232
+ self._global_unique_person_count = new_global_count
1233
+
1234
+ # Log if new unique person detected
1235
+ if new_global_count > old_global_count:
1236
+ new_persons = current_tracks - (self._global_unique_person_track_ids - current_tracks)
1237
+ self.logger.info(f"New unique person(s) detected in zone {zone_name}: {new_persons}. Global count: {old_global_count} -> {new_global_count}")
1238
+ else:
1239
+ # Log current state for debugging
1240
+ self.logger.debug(f"Zone {zone_name}: Current tracks: {current_tracks}, Global unique count: {new_global_count} (no change)")
1241
+ if current_tracks:
1242
+ self.logger.debug(f"Current track IDs in zone: {current_tracks}")
1243
+ self.logger.debug(f"Global unique track IDs: {self._global_unique_person_track_ids}")
1244
+
1245
+ # Create enhanced zone analysis
1246
+ enhanced_zone_analysis[zone_name] = {
1247
+ "current_count": self._zone_current_counts[zone_name],
1248
+ "total_count": self._zone_total_counts[zone_name],
1249
+ "current_track_ids": list(current_tracks),
1250
+ "total_track_ids": list(self._zone_total_track_ids[zone_name]),
1251
+ "original_counts": zone_counts, # Preserve original zone counts
1252
+ "global_unique_count": self._global_unique_person_count # Add global unique count
1253
+ }
1254
+
1255
+ # Debug logging for zone tracking
1256
+ self.logger.debug(f"Zone {zone_name}: current_count={self._zone_current_counts[zone_name]}, total_count={self._zone_total_counts[zone_name]}, current_tracks={len(current_tracks)}")
1257
+
1258
+ return enhanced_zone_analysis
1259
+
1260
+ def get_zone_tracking_info(self) -> Dict[str, Dict[str, Any]]:
1261
+ """Get detailed zone tracking information."""
1262
+ return {
1263
+ zone_name: {
1264
+ "current_count": self._zone_current_counts.get(zone_name, 0),
1265
+ "total_count": self._zone_total_counts.get(zone_name, 0),
1266
+ "current_track_ids": list(self._zone_current_track_ids.get(zone_name, set())),
1267
+ "total_track_ids": list(self._zone_total_track_ids.get(zone_name, set())),
1268
+ "global_unique_count": self._global_unique_person_count
1269
+ }
1270
+ for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1271
+ }
1272
+
1273
+ def get_zone_current_count(self, zone_name: str) -> int:
1274
+ """Get current count of people in a specific zone."""
1275
+ return self._zone_current_counts.get(zone_name, 0)
1276
+
1277
+ def get_zone_total_count(self, zone_name: str) -> int:
1278
+ """Get total count of people who have been in a specific zone."""
1279
+ return self._zone_total_counts.get(zone_name, 0)
1280
+
1281
+ def get_global_unique_person_count(self) -> int:
1282
+ """Get total count of unique people who have ever been in any zone."""
1283
+ return self._global_unique_person_count
1284
+
1285
+ def get_global_tracking_debug_info(self) -> Dict[str, Any]:
1286
+ """Get detailed debugging information about global unique person tracking."""
1287
+ return {
1288
+ "global_unique_person_count": self._global_unique_person_count,
1289
+ "global_unique_person_track_ids": list(self._global_unique_person_track_ids),
1290
+ "zone_current_track_ids": {zone: list(tracks) for zone, tracks in self._zone_current_track_ids.items()},
1291
+ "zone_total_track_ids": {zone: list(tracks) for zone, tracks in self._zone_total_track_ids.items()},
1292
+ "zone_current_counts": self._zone_current_counts.copy(),
1293
+ "zone_total_counts": self._zone_total_counts.copy()
1294
+ }
1295
+
1296
+ def get_all_zone_counts(self) -> Dict[str, Dict[str, int]]:
1297
+ """Get current and total counts for all zones."""
1298
+ return {
1299
+ zone_name: {
1300
+ "current": self._zone_current_counts.get(zone_name, 0),
1301
+ "total": self._zone_total_counts.get(zone_name, 0)
1302
+ }
1303
+ for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1304
+ }
1305
+
1306
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
1307
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
1308
+ dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
1309
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1310
+
1311
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
1312
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
1313
+ hours = int(timestamp // 3600)
1314
+ minutes = int((timestamp % 3600) // 60)
1315
+ seconds = round(float(timestamp % 60),2)
1316
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
1317
+
1318
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: str=None) -> str:
1319
+ """Get formatted current timestamp based on stream type."""
1320
+ if not stream_info:
1321
+ return "00:00:00.00"
1322
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
1323
+ if precision:
1324
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1325
+ if frame_id:
1326
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1327
+ else:
1328
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1329
+ stream_time_str = self._format_timestamp_for_video(start_time)
1330
+ return stream_time_str
1331
+ else:
1332
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1333
+
1334
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1335
+ if frame_id:
1336
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1337
+ else:
1338
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1339
+ stream_time_str = self._format_timestamp_for_video(start_time)
1340
+ return stream_time_str
1341
+ else:
1342
+ # For streams, use stream_time from stream_info
1343
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1344
+ if stream_time_str:
1345
+ # Parse the high precision timestamp string to get timestamp
1346
+ try:
1347
+ # Remove " UTC" suffix and parse
1348
+ timestamp_str = stream_time_str.replace(" UTC", "")
1349
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1350
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
1351
+ return self._format_timestamp_for_stream(timestamp)
1352
+ except:
1353
+ # Fallback to current time if parsing fails
1354
+ return self._format_timestamp_for_stream(time.time())
1355
+ else:
1356
+ return self._format_timestamp_for_stream(time.time())
1357
+
1358
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
1359
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1360
+ if not stream_info:
1361
+ return "00:00:00"
1362
+
1363
+ if precision:
1364
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1365
+ return "00:00:00"
1366
+ else:
1367
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1368
+
1369
+
1370
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1371
+ # If video format, start from 00:00:00
1372
+ return "00:00:00"
1373
+ else:
1374
+ # For streams, use tracking start time or current time with minutes/seconds reset
1375
+ if self._tracking_start_time is None:
1376
+ # Try to extract timestamp from stream_time string
1377
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1378
+ if stream_time_str:
1379
+ try:
1380
+ # Remove " UTC" suffix and parse
1381
+ timestamp_str = stream_time_str.replace(" UTC", "")
1382
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1383
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1384
+ except:
1385
+ # Fallback to current time if parsing fails
1386
+ self._tracking_start_time = time.time()
1387
+ else:
1388
+ self._tracking_start_time = time.time()
1389
+
1390
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1391
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
1392
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1393
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1394
+
1395
+ def _extract_frame_id_from_tracking(self, frame_detections: List[Dict], frame_key: str) -> str:
1396
+ """Extract frame ID from tracking data."""
1397
+ # Priority 1: Check if detections have frame information
1398
+ if frame_detections and len(frame_detections) > 0:
1399
+ first_detection = frame_detections[0]
1400
+ if "frame" in first_detection:
1401
+ return str(first_detection["frame"])
1402
+ elif "frame_id" in first_detection:
1403
+ return str(first_detection["frame_id"])
1404
+ # Priority 2: Use frame_key from input data
1405
+ return str(frame_key)
1406
+
1407
+ def _robust_zone_total(self, zone_count):
1408
+ """Helper method to robustly calculate zone total."""
1409
+ if isinstance(zone_count, dict):
1410
+ total = 0
1411
+ for v in zone_count.values():
1412
+ if isinstance(v, int):
1413
+ total += v
1414
+ elif isinstance(v, list):
1415
+ total += len(v)
1416
+ return total
1417
+ elif isinstance(zone_count, list):
1418
+ return len(zone_count)
1419
+ elif isinstance(zone_count, int):
1420
+ return zone_count
1421
+ else:
1422
+ return 0
1423
+
1424
+ # --------------------------------------------------------------------- #
1425
+ # Private helpers for canonical track aliasing #
1426
+ # --------------------------------------------------------------------- #
1427
+
1428
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
1429
+ """Compute IoU between two bounding boxes that may be either list or dict.
1430
+ Falls back to geometry_utils.calculate_iou when both boxes are dicts.
1431
+ """
1432
+ # Handle dict format directly with calculate_iou (supports many keys)
1433
+ if isinstance(box1, dict) and isinstance(box2, dict):
1434
+ return calculate_iou(box1, box2)
1435
+
1436
+ # Helper to convert bbox (dict or list) to a list [x1,y1,x2,y2]
1437
+ def _bbox_to_list(bbox):
1438
+ if bbox is None:
1439
+ return []
1440
+ if isinstance(bbox, list):
1441
+ return bbox[:4] if len(bbox) >= 4 else []
1442
+ if isinstance(bbox, dict):
1443
+ if "xmin" in bbox:
1444
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1445
+ if "x1" in bbox:
1446
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1447
+ # Fallback: take first four values in insertion order
1448
+ values = list(bbox.values())
1449
+ return values[:4] if len(values) >= 4 else []
1450
+ # Unsupported type
1451
+ return []
1452
+
1453
+ list1 = _bbox_to_list(box1)
1454
+ list2 = _bbox_to_list(box2)
1455
+
1456
+ if len(list1) < 4 or len(list2) < 4:
1457
+ return 0.0
1458
+
1459
+ x1_min, y1_min, x1_max, y1_max = list1
1460
+ x2_min, y2_min, x2_max, y2_max = list2
1461
+
1462
+ # Ensure correct ordering of coordinates
1463
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1464
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1465
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1466
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1467
+
1468
+ inter_x_min = max(x1_min, x2_min)
1469
+ inter_y_min = max(y1_min, y2_min)
1470
+ inter_x_max = min(x1_max, x2_max)
1471
+ inter_y_max = min(y1_max, y2_max)
1472
+
1473
+ inter_w = max(0.0, inter_x_max - inter_x_min)
1474
+ inter_h = max(0.0, inter_y_max - inter_y_min)
1475
+ inter_area = inter_w * inter_h
1476
+
1477
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
1478
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
1479
+ union_area = area1 + area2 - inter_area
1480
+
1481
+ return (inter_area / union_area) if union_area > 0 else 0.0
1482
+
1483
+ def _get_canonical_id(self, raw_id: Any) -> Any:
1484
+ """Return the canonical ID for a raw tracker-generated ID."""
1485
+ return self._track_aliases.get(raw_id, raw_id)
1486
+
1487
+ def _merge_or_register_track(self, raw_id: Any, bbox: List[float]) -> Any:
1488
+ """Merge the raw track into an existing canonical track if possible,
1489
+ otherwise register it as a new canonical track. Returns the canonical
1490
+ ID to use for counting.
1491
+ """
1492
+ now = time.time()
1493
+
1494
+ # Fast path: raw_id already mapped
1495
+ if raw_id in self._track_aliases:
1496
+ canonical_id = self._track_aliases[raw_id]
1497
+ track_info = self._canonical_tracks.get(canonical_id)
1498
+ if track_info is not None:
1499
+ track_info["last_bbox"] = bbox
1500
+ track_info["last_update"] = now
1501
+ track_info["raw_ids"].add(raw_id)
1502
+ return canonical_id
1503
+
1504
+ # Attempt to merge with an existing canonical track
1505
+ for canonical_id, info in self._canonical_tracks.items():
1506
+ # Only consider recently updated tracks to avoid stale matches
1507
+ if now - info["last_update"] > self._track_merge_time_window:
1508
+ continue
1509
+
1510
+ iou = self._compute_iou(bbox, info["last_bbox"])
1511
+ if iou >= self._track_merge_iou_threshold:
1512
+ # Merge raw_id into canonical track
1513
+ self._track_aliases[raw_id] = canonical_id
1514
+ info["last_bbox"] = bbox
1515
+ info["last_update"] = now
1516
+ info["raw_ids"].add(raw_id)
1517
+ self.logger.debug(
1518
+ f"Merged raw track {raw_id} into canonical track {canonical_id} (IoU={iou:.2f})")
1519
+ return canonical_id
1520
+
1521
+ # No match found – create a new canonical track
1522
+ canonical_id = raw_id
1523
+ self._track_aliases[raw_id] = canonical_id
1524
+ self._canonical_tracks[canonical_id] = {
1525
+ "last_bbox": bbox,
1526
+ "last_update": now,
1527
+ "raw_ids": {raw_id},
1528
+ }
1529
+ self.logger.debug(f"Registered new canonical track {canonical_id}")
1530
+ return canonical_id
1531
+
1532
+ def _format_timestamp(self, timestamp: float) -> str:
1533
+ """Format a timestamp for human-readable output."""
1534
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
1535
+
1536
+ def _get_tracking_start_time(self) -> str:
1537
+ """Get the tracking start time, formatted as a string."""
1538
+ if self._tracking_start_time is None:
1539
+ return "N/A"
1540
+ return self._format_timestamp(self._tracking_start_time)
1541
+
1542
+ def _set_tracking_start_time(self) -> None:
1543
+ """Set the tracking start time to the current time."""
1544
+ self._tracking_start_time = time.time()
1545
+
1546
+ def get_config_schema(self) -> Dict[str, Any]:
1547
+ """Get configuration schema for intrusion detection."""
1548
+ return {
1549
+ "type": "object",
1550
+ "properties": {
1551
+ "confidence_threshold": {
1552
+ "type": "number",
1553
+ "minimum": 0.0,
1554
+ "maximum": 1.0,
1555
+ "default": 0.5,
1556
+ "description": "Minimum confidence threshold for detections"
1557
+ },
1558
+ "enable_tracking": {
1559
+ "type": "boolean",
1560
+ "default": False,
1561
+ "description": "Enable tracking for unique counting"
1562
+ },
1563
+ "zone_config": {
1564
+ "type": "object",
1565
+ "properties": {
1566
+ "zones": {
1567
+ "type": "object",
1568
+ "additionalProperties": {
1569
+ "type": "array",
1570
+ "items": {
1571
+ "type": "array",
1572
+ "items": {"type": "number"},
1573
+ "minItems": 2,
1574
+ "maxItems": 2
1575
+ },
1576
+ "minItems": 3
1577
+ },
1578
+ "description": "Zone definitions as polygons"
1579
+ },
1580
+ "zone_confidence_thresholds": {
1581
+ "type": "object",
1582
+ "additionalProperties": {"type": "number", "minimum": 0.0, "maximum": 1.0},
1583
+ "description": "Per-zone confidence thresholds"
1584
+ }
1585
+ }
1586
+ },
1587
+ "person_categories": {
1588
+ "type": "array",
1589
+ "items": {"type": "string"},
1590
+ "default": ["person"],
1591
+ "description": "Category names that represent people"
1592
+ },
1593
+ "enable_unique_counting": {
1594
+ "type": "boolean",
1595
+ "default": True,
1596
+ "description": "Enable unique people counting using tracking"
1597
+ },
1598
+ "time_window_minutes": {
1599
+ "type": "integer",
1600
+ "minimum": 1,
1601
+ "default": 60,
1602
+ "description": "Time window for counting analysis in minutes"
1603
+ },
1604
+ "alert_config": {
1605
+ "type": "object",
1606
+ "properties": {
1607
+ "count_thresholds": {
1608
+ "type": "object",
1609
+ "additionalProperties": {"type": "integer", "minimum": 1},
1610
+ "description": "Count thresholds for alerts"
1611
+ },
1612
+ "occupancy_thresholds": {
1613
+ "type": "object",
1614
+ "additionalProperties": {"type": "integer", "minimum": 1},
1615
+ "description": "Zone occupancy thresholds for alerts"
1616
+ },
1617
+ "alert_type": {
1618
+ "type": "array",
1619
+ "items": {"type": "string"},
1620
+ "default": ["Default"],
1621
+ "description": "To pass the type of alert. EG: email, sms, etc."
1622
+ },
1623
+ "alert_value": {
1624
+ "type": "array",
1625
+ "items": {"type": "string"},
1626
+ "default": ["JSON"],
1627
+ "description": "Alert value to pass the value based on type. EG: email id if type is email."
1628
+ },
1629
+ "alert_incident_category": {
1630
+ "type": "array",
1631
+ "items": {"type": "string"},
1632
+ "default": ["Incident Detection Alert"],
1633
+ "description": "Group and name the Alert category Type"
1634
+ },
1635
+ }
1636
+ }
1637
+ },
1638
+ "required": ["confidence_threshold"],
1639
+ "additionalProperties": False
1640
+ }
1641
+
1642
+ def create_default_config(self, **overrides) -> IntrusionConfig:
1643
+ """Create default configuration with optional overrides."""
1644
+ defaults = {
1645
+ "category": self.category,
1646
+ "usecase": self.name,
1647
+ "confidence_threshold": 0.5,
1648
+ "enable_tracking": False,
1649
+ "enable_analytics": True,
1650
+ "enable_unique_counting": True,
1651
+ "time_window_minutes": 60,
1652
+ "person_categories": ["person"],
1653
+ }
1654
+ defaults.update(overrides)
1655
+ return IntrusionConfig(**defaults)
1656
+
1657
+ def _apply_smoothing(self, data: Any, config: IntrusionConfig) -> Any:
1658
+ """Apply smoothing to tracking data if enabled."""
1659
+ if self.smoothing_tracker is None:
1660
+ smoothing_config = BBoxSmoothingConfig(
1661
+ smoothing_algorithm=config.smoothing_algorithm,
1662
+ window_size=config.smoothing_window_size,
1663
+ cooldown_frames=config.smoothing_cooldown_frames,
1664
+ confidence_threshold=config.confidence_threshold or 0.5,
1665
+ confidence_range_factor=config.smoothing_confidence_range_factor,
1666
+ enable_smoothing=True
1667
+ )
1668
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
1669
+
1670
+ smoothed_data = bbox_smoothing(data, self.smoothing_tracker.config, self.smoothing_tracker)
1671
+ self.logger.debug(f"Applied bbox smoothing to tracking results")
1672
+ return smoothed_data