matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1601 @@
1
+ import time
2
+ from datetime import datetime, timezone
3
+ """
4
+ Advanced car service use case implementation.
5
+
6
+ This module provides comprehensive car service analytics with advanced tracking,
7
+ journey analysis, queue management, and detailed business intelligence metrics.
8
+ """
9
+
10
+ from typing import Any, Dict, List, Optional, Tuple
11
+ from dataclasses import field
12
+ import time
13
+ import math
14
+ from collections import defaultdict
15
+
16
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
17
+ from ..core.config import CarServiceConfig, TrackingConfig, AlertConfig
18
+ from ..utils import (
19
+ filter_by_confidence,
20
+ apply_category_mapping,
21
+ point_in_polygon,
22
+ get_bbox_center,
23
+ calculate_distance,
24
+ match_results_structure,
25
+ bbox_smoothing,
26
+ BBoxSmoothingConfig,
27
+ BBoxSmoothingTracker,
28
+ )
29
+
30
+ def assign_person_by_area(detections, car_areas, staff_areas):
31
+ """
32
+ Assigns category detections to 'staff' or 'car' based on their location in area polygons.
33
+ Modifies the detection list in-place.
34
+ Args:
35
+ detections: List of detection dicts.
36
+ car_areas: Dict of area_name -> polygon (list of [x, y]).
37
+ staff_areas: Dict of area_name -> polygon (list of [x, y]).
38
+ """
39
+ # Only process detections with category for staff/car assignment
40
+ staff_track_ids = set()
41
+ # First pass: assign staff and remember their track_ids
42
+ for det in detections:
43
+ if det.get('category') != 'person' and det.get('category') != 'staff':
44
+ # Skip non-person, non-staff objects (e.g., chair, tie, etc.)
45
+ continue
46
+ if det.get('category') == 'person':
47
+ bbox = det.get('bbox', det.get('bounding_box', None))
48
+ if bbox and len(bbox) == 4:
49
+ center = get_bbox_center(bbox)
50
+ for polygon in staff_areas.values():
51
+ if point_in_polygon(center, polygon):
52
+ det['category'] = 'staff'
53
+ if 'track_id' in det:
54
+ staff_track_ids.add(det['track_id'])
55
+ break
56
+ elif det.get('category') == 'staff' and 'track_id' in det:
57
+ staff_track_ids.add(det['track_id'])
58
+ # Second pass: assign car only if not a known staff track_id
59
+ for det in detections:
60
+ if det.get('category') != 'person' and det.get('category') != 'car':
61
+ continue
62
+ if det.get('track_id') not in staff_track_ids:
63
+ det['category'] = 'car'
64
+ elif det.get('track_id') in staff_track_ids:
65
+ det['category'] = 'staff'
66
+
67
+ class CarServiceUseCase(BaseProcessor):
68
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
69
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
70
+ hours = int(timestamp // 3600)
71
+ minutes = int((timestamp % 3600) // 60)
72
+ seconds = round(float(timestamp % 60), 2)
73
+ return f"{hours:02d}:{minutes:02d}:{seconds:04.1f}"
74
+
75
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
76
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
77
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
78
+
79
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
80
+ """Get formatted current timestamp based on stream type."""
81
+ if not stream_info:
82
+ return "00:00:00.00"
83
+ input_settings = stream_info.get("input_settings", {})
84
+ if precision:
85
+ if input_settings.get("start_frame", "na") != "na":
86
+ if frame_id is not None:
87
+ start_time = int(frame_id)/input_settings.get("original_fps", 30)
88
+ else:
89
+ start_time = input_settings.get("start_frame", 30)/input_settings.get("original_fps", 30)
90
+ return self._format_timestamp_for_video(start_time)
91
+ else:
92
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
93
+
94
+ if input_settings.get("start_frame", "na") != "na":
95
+ if frame_id is not None:
96
+ start_time = int(frame_id)/input_settings.get("original_fps", 30)
97
+ else:
98
+ start_time = input_settings.get("start_frame", 30)/input_settings.get("original_fps", 30)
99
+ return self._format_timestamp_for_video(start_time)
100
+ else:
101
+ # For streams, use stream_time from stream_info
102
+ stream_time_str = input_settings.get("stream_info", {}).get("stream_time", "")
103
+ if stream_time_str:
104
+ try:
105
+ timestamp_str = stream_time_str.replace(" UTC", "")
106
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
107
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
108
+ return self._format_timestamp_for_stream(timestamp)
109
+ except:
110
+ return self._format_timestamp_for_stream(time.time())
111
+ else:
112
+ return self._format_timestamp_for_stream(time.time())
113
+
114
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
115
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
116
+ if not stream_info:
117
+ return "00:00:00"
118
+ input_settings = stream_info.get("input_settings", {})
119
+ if precision:
120
+ if input_settings.get("start_frame", "na") != "na":
121
+ return "00:00:00"
122
+ else:
123
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
124
+
125
+ if input_settings.get("start_frame", "na") != "na":
126
+ return "00:00:00"
127
+ else:
128
+ if not hasattr(self, '_tracking_start_time') or self._tracking_start_time is None:
129
+ stream_time_str = input_settings.get("stream_info", {}).get("stream_time", "")
130
+ if stream_time_str:
131
+ try:
132
+ timestamp_str = stream_time_str.replace(" UTC", "")
133
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
134
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
135
+ except:
136
+ self._tracking_start_time = time.time()
137
+ else:
138
+ self._tracking_start_time = time.time()
139
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
140
+ dt = dt.replace(minute=0, second=0, microsecond=0)
141
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
142
+ def get_camera_info_from_stream(self, stream_info):
143
+ """Extract camera_info from stream_info, matching people_counting pattern."""
144
+ if not stream_info:
145
+ return {}
146
+ # Try to get camera_info directly
147
+ camera_info = stream_info.get("camera_info")
148
+ if camera_info:
149
+ return camera_info
150
+ # Fallback: try to extract from nested input_settings
151
+ input_settings = stream_info.get("input_settings", {})
152
+ for key in ["camera_info", "camera_id", "location", "site_id"]:
153
+ if key in input_settings:
154
+ return {key: input_settings[key]}
155
+ return {}
156
+ def _generate_per_frame_agg_summary(self, processed_data, analytics_results, config, context, stream_info=None):
157
+ """
158
+ Generate agg_summary dict with per-frame incidents, tracking_stats, business_analytics, alerts, human_text.
159
+ processed_data: dict of frame_id -> detections (list)
160
+ analytics_results: output of _compile_analytics_results
161
+ """
162
+ agg_summary = {}
163
+ total_frames_processed = getattr(self, '_total_frames_processed', 0)
164
+ global_frame_offset = getattr(self, 'global_frame_offset', 0)
165
+
166
+ # Try to get FPS from stream_info or config
167
+ fps = None
168
+ if stream_info:
169
+ fps = stream_info.get('fps') or stream_info.get('frame_rate')
170
+ if not fps:
171
+ fps = getattr(config, 'fps', None) or getattr(config, 'frame_rate', None)
172
+ try:
173
+ fps = float(fps)
174
+ if fps <= 0:
175
+ fps = None
176
+ except Exception:
177
+ fps = None
178
+
179
+ # If frame_ids are not sorted, sort them numerically if possible
180
+ try:
181
+ frame_ids = sorted(processed_data.keys(), key=lambda x: int(x))
182
+ except Exception:
183
+ frame_ids = list(processed_data.keys())
184
+
185
+
186
+ # For real-time fallback, record wall-clock start time
187
+ wallclock_start_time = None
188
+ if not fps:
189
+ wallclock_start_time = time.time()
190
+
191
+ for idx, frame_id in enumerate(frame_ids):
192
+ detections = processed_data[frame_id]
193
+ staff_count = sum(1 for d in detections if d.get('category') == 'staff')
194
+ car_count = sum(1 for d in detections if d.get('category') == 'car')
195
+ total_people = staff_count + car_count
196
+
197
+ queue_analytics = analytics_results.get("car_queue_analytics", {})
198
+ staff_analytics = analytics_results.get("staff_management_analytics", {})
199
+ service_analytics = analytics_results.get("service_area_analytics", {})
200
+ journey_analytics = analytics_results.get("car_journey_analytics", {})
201
+ business_metrics = analytics_results.get("business_metrics", {})
202
+
203
+ # --- Per-frame timestamp logic (robust, never default to 00:00:00.00 except first frame) ---
204
+ current_timestamp = self._get_current_timestamp_str(stream_info)
205
+ start_timestamp = self._get_start_timestamp_str(stream_info)
206
+
207
+ # --- Alert settings and alerts for each metric ---
208
+ alert_settings = []
209
+ alerts = []
210
+ # queue length alert
211
+ queue_threshold = getattr(config, "queue_length_threshold", 10)
212
+ if queue_analytics.get("cars_queuing", 0) > queue_threshold:
213
+ alert_settings.append({
214
+ "alert_type": "email",
215
+ "incident_category": "car_queue",
216
+ "threshold_level": queue_threshold,
217
+ "ascending": True,
218
+ "settings": {
219
+ "email_address": getattr(config, "email_address", "john.doe@gmail.com")
220
+ }
221
+ })
222
+ alerts.append({
223
+ "alert_type": "email",
224
+ "alert_id": "email_1",
225
+ "incident_category": "car_queue",
226
+ "threshold_value": queue_analytics.get("cars_queuing", 0),
227
+ "ascending": True,
228
+ "settings": {
229
+ "email_address": getattr(config, "email_address", "john.doe@gmail.com")
230
+ }
231
+ })
232
+ # service efficiency alert
233
+ efficiency_threshold = getattr(config, "service_efficiency_threshold", 0.1)
234
+ if business_metrics.get("service_efficiency", 0) < efficiency_threshold:
235
+ alert_settings.append({
236
+ "alert_type": "email",
237
+ "incident_category": "service_efficiency",
238
+ "threshold_level": efficiency_threshold,
239
+ "ascending": False,
240
+ "settings": {
241
+ "email_address": getattr(config, "email_address", "john.doe@gmail.com")
242
+ }
243
+ })
244
+ alerts.append({
245
+ "alert_type": "email",
246
+ "alert_id": "email_2",
247
+ "incident_category": "service_efficiency",
248
+ "threshold_value": business_metrics.get("service_efficiency", 0),
249
+ "ascending": False,
250
+ "settings": {
251
+ "email_address": getattr(config, "email_address", "john.doe@gmail.com")
252
+ }
253
+ })
254
+
255
+ human_text_lines = []
256
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
257
+ human_text_lines.append(f"\t- Active Cars: {queue_analytics.get('active_cars', 0)}")
258
+ human_text_lines.append(f"\t\t- Queuing: {queue_analytics.get('cars_queuing', 0)}")
259
+ human_text_lines.append(f"\t\t- Being Served: {queue_analytics.get('cars_being_served', 0)}")
260
+ human_text_lines.append(f"\t- Active Staff: {staff_analytics.get('active_staff', 0)}")
261
+ human_text_lines.append(f"\t- Car/Staff Ratio: {business_metrics.get('car_to_staff_ratio', 0):.2f}")
262
+ human_text_lines.append(f"\t- Queue Performance: {business_metrics.get('queue_performance', 0)*100:.1f}%")
263
+ human_text_lines.append(f"\t- Service Areas: {len(service_analytics.get('service_areas_status', {}))}")
264
+ for area_name, area_info in service_analytics.get('service_areas_status', {}).items():
265
+ cars = area_info.get("cars", 0)
266
+ staff = area_info.get("staff", 0)
267
+ status = area_info.get("status", "inactive")
268
+ human_text_lines.append(f"\t\t- {area_name}: {status} with {cars} cars and {staff} staff")
269
+ human_text_lines.append("")
270
+ human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
271
+ human_text_lines.append(f"\t- Total Cars: {journey_analytics.get('total_journeys', 0)}")
272
+ completed_count = journey_analytics.get("journey_states", {}).get("completed", 0)
273
+ human_text_lines.append(f"\t\t- Completed: {completed_count}")
274
+ human_text_lines.append(f"\t- Total Staff: {staff_analytics.get('total_staff', 0)}")
275
+ human_text_lines.append(f"\t- Average Staff Count: {staff_analytics.get('avg_staff_count', 0.0):.2f}")
276
+ human_text_lines.append(f"\t- Average Wait Time: {queue_analytics.get('average_wait_time', 0):.1f}s")
277
+ avg_service_time = 0.0
278
+ if analytics_results.get("service_times"):
279
+ times = [t.get("service_time", 0.0) for t in analytics_results["service_times"]]
280
+ if times:
281
+ avg_service_time = sum(times) / len(times)
282
+ human_text_lines.append(f"\t- Average Service Time: {avg_service_time:.1f}s")
283
+ human_text_lines.append(f"\t- Business Metrics:")
284
+ human_text_lines.append(f"\t\t- Service Efficiency: {business_metrics.get('service_efficiency', 0)*100:.1f}%")
285
+ human_text_lines.append(f"\t\t- Staff Productivity: {business_metrics.get('staff_productivity', 0):.2f} services/staff")
286
+ human_text = "\n".join(human_text_lines)
287
+
288
+ # Build event in incident format
289
+ event = {
290
+ "incident_id": f"CarService_{frame_id}",
291
+ "incident_type": "CarService",
292
+ "severity_level": business_metrics.get("severity_level", "info"),
293
+ "human_text": human_text,
294
+ "start_time": start_timestamp,
295
+ "end_time": "Incident still active", # or use logic as needed
296
+ "camera_info": stream_info.get("camera_info", {}) if stream_info else {},
297
+ "level_settings": {
298
+ "low": 1,
299
+ "medium": 3,
300
+ "significant": 4,
301
+ "critical": 7
302
+ },
303
+ "alerts": alerts,
304
+ "alert_settings": alert_settings
305
+ }
306
+ # Harmonize tracking_stats fields with people_counting output
307
+ camera_info = self.get_camera_info_from_stream(stream_info)
308
+ input_timestamp = current_timestamp
309
+ reset_timestamp = start_timestamp
310
+ reset_settings = config.create_default_config() if hasattr(config, "create_default_config") else {}
311
+
312
+ # Calculate total_counts (global sum of staff and car)
313
+ total_counts = [
314
+ {"category": "staff", "count": staff_analytics.get("total_staff", 0)},
315
+ {"category": "car", "count": journey_analytics.get("total_journeys", 0)}
316
+ ]
317
+ # Optionally add more categories if needed
318
+ # Calculate current_counts (frame-wise counts)
319
+ current_counts = [
320
+ {"category": "staff", "count": staff_analytics.get("active_staff", 0)},
321
+ {"category": "car", "count": queue_analytics.get("active_cars", 0)}
322
+ ]
323
+ # Detections: include all detections for this frame
324
+ detection_objs = []
325
+ for d in detections:
326
+ bbox = d.get("bounding_box", d.get("bbox", {}))
327
+ detection_objs.append({
328
+ "category": d.get("category", "person"),
329
+ "bounding_box": bbox
330
+ })
331
+
332
+ # Harmonize reset_settings format
333
+ reset_settings = [
334
+ {
335
+ "interval_type": getattr(config, "reset_interval_type", "daily"),
336
+ "reset_time": {
337
+ "value": getattr(config, "reset_time_value", 9),
338
+ "time_unit": getattr(config, "reset_time_unit", "hour")
339
+ }
340
+ }
341
+ ]
342
+
343
+ tracking_stat = {
344
+ "input_timestamp": input_timestamp,
345
+ "reset_timestamp": reset_timestamp,
346
+ "camera_info": camera_info,
347
+ "total_counts": total_counts,
348
+ "current_counts": current_counts,
349
+ "detections": detection_objs,
350
+ "alerts": alerts,
351
+ "alert_settings": alert_settings,
352
+ "reset_settings": reset_settings,
353
+ "human_text": human_text
354
+ }
355
+ # Patch: Build real_time_occupancy with correct service_areas info (not just empty lists)
356
+ real_time_occupancy = analytics_results.get("real_time_occupancy", {}).copy()
357
+ # Overwrite service_areas with per-zone info matching service_areas_status
358
+ service_areas_status = service_analytics.get("service_areas_status", {})
359
+ real_time_occupancy["service_areas"] = {}
360
+ for area_name, area_info in service_areas_status.items():
361
+ real_time_occupancy["service_areas"][area_name] = {
362
+ "cars": area_info.get("cars", 0),
363
+ "car_ids": area_info.get("car_ids", []),
364
+ "staff": area_info.get("staff", 0),
365
+ "staff_ids": area_info.get("staff_ids", []),
366
+ "service_ratio": area_info.get("service_ratio", 0.0),
367
+ "status": area_info.get("status", "inactive"),
368
+ "service_proximity_threshold": area_info.get("service_proximity_threshold", 230)
369
+ }
370
+ business_analytics = {
371
+ "business_metrics": business_metrics,
372
+ "car_queue_analytics": queue_analytics,
373
+ "staff_management_analytics": staff_analytics,
374
+ "service_area_analytics": service_analytics,
375
+ "car_journey_analytics": journey_analytics,
376
+ "service_times": analytics_results.get("service_times", []),
377
+ "real_time_occupancy": real_time_occupancy,
378
+ "alerts": alerts,
379
+ "alert_settings": alert_settings
380
+ }
381
+
382
+ agg_summary[str(frame_id)] = {
383
+ "incidents": event,
384
+ "tracking_stats": tracking_stat,
385
+ "business_analytics": business_analytics,
386
+ "alerts": alerts,
387
+ "human_text": human_text
388
+ }
389
+ return agg_summary
390
+ # --- Chunk tracking for per-chunk analytics ---
391
+ def _init_chunk_tracking(self):
392
+ self._chunk_frame_count = 0
393
+ self._chunk_car_ids = set()
394
+ self._chunk_area_car_ids = defaultdict(set)
395
+
396
+ def _update_chunk_tracking(self, car_detections):
397
+ for car in car_detections:
398
+ track_id = car.get('track_id')
399
+ if track_id is not None:
400
+ self._chunk_car_ids.add(track_id)
401
+ # Find all areas this car is in (from current_areas or by geometry)
402
+ if 'current_areas' in car:
403
+ for area in car['current_areas']:
404
+ self._chunk_area_car_ids[area].add(track_id)
405
+ else:
406
+ # fallback: try to infer from bbox and self.car_areas
407
+ car_center = get_bbox_center(car.get('bbox', car.get('bounding_box', {})))
408
+ for area_name, polygon in getattr(self, 'car_areas', {}).items():
409
+ if point_in_polygon(car_center, polygon):
410
+ self._chunk_area_car_ids[area_name].add(track_id)
411
+
412
+ def _maybe_reset_chunk(self):
413
+ if not hasattr(self, '_chunk_frame_count'):
414
+ self._init_chunk_tracking()
415
+ self._chunk_frame_count += 1
416
+ if self._chunk_frame_count > 1:
417
+ self._init_chunk_tracking()
418
+ def __init__(self):
419
+ """Initialize car service use case."""
420
+ super().__init__("car_service")
421
+ self.category = "automobile"
422
+
423
+ # Advanced tracking structures
424
+ self.car_occupancy = {}
425
+ self.staff_occupancy = {}
426
+ self.service_occupancy = {}
427
+ self.car_queue_times = {}
428
+ self.car_service_times = {}
429
+ self.car_journey = {}
430
+ self.staff_availability = {}
431
+ self.staff_service_count = defaultdict(int)
432
+ self.staff_active_services = {}
433
+
434
+ # Persistent unique staff tracking
435
+ self.global_staff_ids = set()
436
+ self.global_staff_ids_by_area = defaultdict(set)
437
+
438
+ # Persistent unique car tracking
439
+ self.global_car_ids = set()
440
+
441
+ # Persistent staff ID memory (for cross-frame staff identity)
442
+ self.persistent_staff_ids = set()
443
+
444
+ # Analytics
445
+ self.queue_wait_times = defaultdict(list)
446
+ self.service_times = defaultdict(list)
447
+ self.staff_efficiency = defaultdict(list)
448
+ self.peak_occupancy = defaultdict(int)
449
+
450
+ # Journey states
451
+ self.JOURNEY_STATES = {
452
+ 'ENTERING': 'entering',
453
+ 'QUEUING': 'queuing',
454
+ 'BEING_SERVED': 'being_served',
455
+ 'COMPLETED': 'completed',
456
+ 'LEFT': 'left'
457
+ }
458
+
459
+ def get_config_schema(self) -> Dict[str, Any]:
460
+ """Get configuration schema for car service."""
461
+ return {
462
+ "type": "object",
463
+ "properties": {
464
+ "confidence_threshold": {
465
+ "type": "number",
466
+ "minimum": 0.0,
467
+ "maximum": 1.0,
468
+ "default": 0.5,
469
+ "description": "Minimum confidence threshold for detections"
470
+ },
471
+ "car_areas": {
472
+ "type": "object",
473
+ "additionalProperties": {
474
+ "type": "array",
475
+ "items": {
476
+ "type": "array",
477
+ "items": {"type": "number"},
478
+ "minItems": 2,
479
+ "maxItems": 2
480
+ },
481
+ "minItems": 3
482
+ },
483
+ "description": "Car area definitions as polygons"
484
+ },
485
+ "staff_areas": {
486
+ "type": "object",
487
+ "additionalProperties": {
488
+ "type": "array",
489
+ "items": {
490
+ "type": "array",
491
+ "items": {"type": "number"},
492
+ "minItems": 2,
493
+ "maxItems": 2
494
+ },
495
+ "minItems": 3
496
+ },
497
+ "description": "Staff area definitions as polygons"
498
+ },
499
+ "service_areas": {
500
+ "type": "object",
501
+ "additionalProperties": {
502
+ "type": "array",
503
+ "items": {
504
+ "type": "array",
505
+ "items": {"type": "number"},
506
+ "minItems": 2,
507
+ "maxItems": 2
508
+ },
509
+ "minItems": 3
510
+ },
511
+ "description": "Service area definitions as polygons"
512
+ },
513
+ "staff_categories": {
514
+ "type": "array",
515
+ "items": {"type": "string"},
516
+ "default": ["staff", "employee"],
517
+ "description": "Category names that represent staff"
518
+ },
519
+ "car_categories": {
520
+ "type": "array",
521
+ "items": {"type": "string"},
522
+ "default": ["car", "person"],
523
+ "description": "Category names that represent cars"
524
+ },
525
+ "service_proximity_threshold": {
526
+ "type": "number",
527
+ "minimum": 0.0,
528
+ "default": 100.0,
529
+ "description": "Distance threshold for service interactions"
530
+ },
531
+ "max_service_time": {
532
+ "type": "number",
533
+ "minimum": 0.0,
534
+ "default": 1800.0,
535
+ "description": "Maximum expected service time in seconds"
536
+ },
537
+ "buffer_time": {
538
+ "type": "number",
539
+ "minimum": 0.0,
540
+ "default": 2.0,
541
+ "description": "Buffer time for service calculations"
542
+ },
543
+ "enable_tracking": {
544
+ "type": "boolean",
545
+ "default": True,
546
+ "description": "Enable advanced tracking for analytics"
547
+ },
548
+ "enable_journey_analysis": {
549
+ "type": "boolean",
550
+ "default": True,
551
+ "description": "Enable car journey analysis"
552
+ },
553
+ "enable_queue_analytics": {
554
+ "type": "boolean",
555
+ "default": True,
556
+ "description": "Enable queue management analytics"
557
+ },
558
+ "tracking_config": {
559
+ "type": "object",
560
+ "properties": {
561
+ "tracking_method": {
562
+ "type": "string",
563
+ "enum": ["kalman", "sort", "deepsort", "bytetrack"],
564
+ "default": "kalman"
565
+ },
566
+ "max_age": {"type": "integer", "minimum": 1, "default": 30},
567
+ "min_hits": {"type": "integer", "minimum": 1, "default": 3},
568
+ "iou_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.3}
569
+ }
570
+ },
571
+ "enable_smoothing": {
572
+ "type": "boolean",
573
+ "default": True,
574
+ "description": "Enable bounding box smoothing for detections"
575
+ },
576
+ "smoothing_algorithm": {
577
+ "type": "string",
578
+ "enum": ["observability", "kalman"],
579
+ "default": "observability"
580
+ },
581
+ "smoothing_window_size": {
582
+ "type": "integer",
583
+ "minimum": 1,
584
+ "default": 20
585
+ },
586
+ "smoothing_cooldown_frames": {
587
+ "type": "integer",
588
+ "minimum": 0,
589
+ "default": 5
590
+ },
591
+ "smoothing_confidence_threshold": {
592
+ "type": "number",
593
+ "minimum": 0.0,
594
+ "maximum": 1.0,
595
+ "default": 0.5
596
+ },
597
+ "smoothing_confidence_range_factor": {
598
+ "type": "number",
599
+ "minimum": 0.0,
600
+ "default": 0.5
601
+ },
602
+ "reset_interval_type": {
603
+ "type": "string",
604
+ "default": "daily",
605
+ "description": "Interval type for resetting analytics (e.g., daily, weekly)"
606
+ },
607
+ "reset_time_value": {
608
+ "type": "integer",
609
+ "default": 9,
610
+ "description": "Time value for reset (e.g., hour of day)"
611
+ },
612
+ "reset_time_unit": {
613
+ "type": "string",
614
+ "default": "hour",
615
+ "description": "Time unit for reset (e.g., hour, minute)"
616
+ },
617
+ "alert_config": {
618
+ "type": "object",
619
+ "description": "Custom alert configuration settings"
620
+ },
621
+ "queue_length_threshold": {
622
+ "type": "integer",
623
+ "default": 10,
624
+ "description": "Threshold for queue length alerts"
625
+ },
626
+ "service_efficiency_threshold": {
627
+ "type": "number",
628
+ "default": 0.0,
629
+ "description": "Threshold for service efficiency alerts"
630
+ },
631
+ "email_address": {
632
+ "type": "string",
633
+ "default": "john.doe@gmail.com",
634
+ "description": "Email address for alert notifications"
635
+ },
636
+ },
637
+ "required": ["confidence_threshold"],
638
+ "additionalProperties": False
639
+ }
640
+
641
+ def create_default_config(self, **overrides) -> CarServiceConfig:
642
+ """Create default configuration with optional overrides."""
643
+ defaults = {
644
+ "category": self.category,
645
+ "usecase": self.name,
646
+ "confidence_threshold": 0.5,
647
+ "enable_tracking": True,
648
+ "enable_analytics": True,
649
+ "enable_journey_analysis": True,
650
+ "enable_queue_analytics": True,
651
+ "staff_categories": ["staff", "employee"],
652
+ "car_categories": ["car"],
653
+ "service_proximity_threshold": 100.0,
654
+ "max_service_time": 1800.0,
655
+ "buffer_time": 2.0,
656
+ "stream_info": {},
657
+ }
658
+ defaults.update(overrides)
659
+ return CarServiceConfig(**defaults)
660
+
661
+ def process(self, data: Any, config: ConfigProtocol,
662
+ context: Optional[ProcessingContext] = None, stream_info: Optional[dict] = None) -> ProcessingResult:
663
+ """
664
+ Process advanced car service analytics.
665
+ """
666
+ start_time = time.time()
667
+
668
+ try:
669
+ if not isinstance(config, CarServiceConfig):
670
+ return self.create_error_result(
671
+ "Invalid configuration type for advanced car service",
672
+ usecase=self.name,
673
+ category=self.category,
674
+ context=context
675
+ )
676
+
677
+ if stream_info is not None:
678
+ if context is None:
679
+ context = ProcessingContext()
680
+ context.stream_info = stream_info
681
+
682
+ if context is None:
683
+ context = ProcessingContext()
684
+
685
+ self._service_proximity_threshold = config.service_proximity_threshold
686
+
687
+ input_format = match_results_structure(data)
688
+ context.input_format = input_format
689
+ context.confidence_threshold = config.confidence_threshold
690
+ context.enable_tracking = config.enable_tracking
691
+
692
+ self.logger.info(f"Processing advanced car service with format: {input_format.value}")
693
+
694
+ self._initialize_areas(config.car_areas, config.staff_areas, config.service_areas)
695
+
696
+ processed_data = data
697
+ if config.confidence_threshold is not None:
698
+ processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
699
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
700
+
701
+ if hasattr(config, 'index_to_category') and config.index_to_category:
702
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
703
+ self.logger.debug("Applied category mapping")
704
+
705
+ # --- Smoothing logic ---
706
+ if getattr(config, "enable_smoothing", False):
707
+ if not hasattr(self, "smoothing_tracker") or self.smoothing_tracker is None:
708
+ smoothing_config = BBoxSmoothingConfig(
709
+ smoothing_algorithm=getattr(config, "smoothing_algorithm", "observability"),
710
+ window_size=getattr(config, "smoothing_window_size", 20),
711
+ cooldown_frames=getattr(config, "smoothing_cooldown_frames", 5),
712
+ confidence_threshold=getattr(config, "confidence_threshold", 0.5),
713
+ confidence_range_factor=getattr(config, "smoothing_confidence_range_factor", 0.5),
714
+ enable_smoothing=True
715
+ )
716
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
717
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
718
+
719
+ detections = self._extract_detections(processed_data)
720
+ assign_person_by_area(
721
+ detections,
722
+ getattr(config, 'car_areas', {}),
723
+ getattr(config, 'staff_areas', {})
724
+ )
725
+ staff_detections, car_detections = self._categorize_detections(
726
+ detections, config.staff_categories, config.car_categories
727
+ )
728
+ self.logger.debug(f"Extracted {len(staff_detections)} staff and {len(car_detections)} car detections")
729
+
730
+ self._maybe_reset_chunk()
731
+ self._update_chunk_tracking(car_detections)
732
+
733
+ current_time = time.time()
734
+ analytics_results = self._process_comprehensive_analytics(
735
+ staff_detections, car_detections, config, current_time
736
+ )
737
+
738
+ # --- FIX: Ensure agg_summary is top-level and events/tracking_stats are dicts ---
739
+ if isinstance(processed_data, dict):
740
+ agg_summary = self._generate_per_frame_agg_summary(processed_data, analytics_results, config, context, stream_info)
741
+ else:
742
+ agg_summary = {"events": {}, "tracking_stats": {}}
743
+
744
+ insights = self._generate_insights(analytics_results, config)
745
+ alerts = self._check_alerts(analytics_results, config)
746
+ summary = self._generate_summary(analytics_results, alerts)
747
+ predictions = self._extract_predictions(processed_data)
748
+
749
+ context.mark_completed()
750
+
751
+ # Compose result data with harmonized agg_summary structure
752
+ result = self.create_result(
753
+ data={"agg_summary": agg_summary},
754
+ usecase=self.name,
755
+ category=self.category,
756
+ context=context
757
+ )
758
+
759
+ result.summary = summary
760
+ result.insights = insights
761
+ result.predictions = predictions
762
+ result.metrics = analytics_results.get("business_metrics", {})
763
+
764
+ if not config.car_areas and not config.staff_areas:
765
+ result.add_warning("No car or staff areas defined - using global analysis only")
766
+
767
+ if config.service_proximity_threshold > 250:
768
+ result.add_warning(f"High service proximity threshold ({config.service_proximity_threshold}) may miss interactions")
769
+
770
+ self.logger.info(f"Advanced car service analysis completed successfully in {result.processing_time:.2f}s")
771
+ return result
772
+
773
+ except Exception as e:
774
+ self.logger.error(f"Advanced car service analysis failed: {str(e)}", exc_info=True)
775
+
776
+ if context:
777
+ context.mark_completed()
778
+
779
+ return self.create_error_result(
780
+ str(e),
781
+ type(e).__name__,
782
+ usecase=self.name,
783
+ category=self.category,
784
+ context=context
785
+ )
786
+
787
+ def _initialize_areas(self, car_areas: Dict, staff_areas: Dict, service_areas: Dict):
788
+ """Initialize area tracking structures."""
789
+ self.car_areas = car_areas or {}
790
+ self.staff_areas = staff_areas or {}
791
+ self.service_areas = service_areas or {}
792
+
793
+ # Initialize occupancy tracking
794
+ self.car_occupancy = {name: [] for name in self.car_areas}
795
+ self.staff_occupancy = {name: [] for name in self.staff_areas}
796
+ self.service_occupancy = {name: [] for name in self.service_areas}
797
+ self.staff_availability = {area: [] for area in self.staff_areas}
798
+
799
+ def _extract_detections(self, data: Any) -> List[Dict[str, Any]]:
800
+ """Extract detections from processed data."""
801
+ detections = []
802
+
803
+ try:
804
+ if isinstance(data, list):
805
+ # Direct detection list
806
+ detections = [d for d in data if isinstance(d, dict)]
807
+ elif isinstance(data, dict):
808
+ # Frame-based or structured data
809
+ for key, value in data.items():
810
+ if isinstance(value, list):
811
+ detections.extend([d for d in value if isinstance(d, dict)])
812
+ elif isinstance(value, dict) and any(k in value for k in ['bbox', 'bounding_box', 'category']):
813
+ detections.append(value)
814
+ except Exception as e:
815
+ self.logger.warning(f"Failed to extract detections: {str(e)}")
816
+
817
+ return detections
818
+
819
+ def _categorize_detections(self, detections: List[Dict], staff_categories: List[str],
820
+ car_categories: List[str]) -> Tuple[List[Dict], List[Dict]]:
821
+ """Categorize detections into staff and cars, with persistent staff ID logic. Only include detections whose category is in staff_categories or car_categories."""
822
+ staff_detections = []
823
+ car_detections = []
824
+
825
+ for detection in detections:
826
+ track_id = detection.get('track_id')
827
+ category = detection.get('category', detection.get('class', ''))
828
+
829
+ # If this track_id was ever staff, always treat as staff
830
+ if track_id is not None and track_id in self.persistent_staff_ids:
831
+ staff_detections.append(detection)
832
+ continue
833
+
834
+ # If currently detected as staff, add to persistent set
835
+ if category in staff_categories:
836
+ staff_detections.append(detection)
837
+ if track_id is not None:
838
+ self.persistent_staff_ids.add(track_id)
839
+ elif category in car_categories:
840
+ car_detections.append(detection)
841
+ # else: skip detection (do not add to either list)
842
+
843
+ return staff_detections, car_detections
844
+
845
+ def _process_comprehensive_analytics(self, staff_detections: List[Dict], car_detections: List[Dict],
846
+ config: CarServiceConfig, current_time: float) -> Dict[str, Any]:
847
+ """Process comprehensive car service analytics."""
848
+ # Reset current state
849
+ self._reset_current_state()
850
+
851
+ # Process staff and car detections
852
+ self._process_staff_detections(staff_detections, current_time)
853
+ self._process_car_detections(car_detections, current_time)
854
+
855
+ # Update service interactions
856
+ self._update_service_interactions(current_time)
857
+
858
+ # Compile comprehensive results
859
+ return self._compile_analytics_results(current_time)
860
+
861
+ def _reset_current_state(self):
862
+ """Reset current state for new processing cycle."""
863
+ # Clear current occupancy (will be repopulated)
864
+ for area_name in self.car_occupancy:
865
+ self.car_occupancy[area_name] = []
866
+ for area_name in self.staff_occupancy:
867
+ self.staff_occupancy[area_name] = []
868
+ for area_name in self.service_occupancy:
869
+ self.service_occupancy[area_name] = []
870
+
871
+ def _process_staff_detections(self, staff_detections: List[Dict], current_time: float):
872
+ """Process staff detections and update tracking."""
873
+ for staff in staff_detections:
874
+ staff_center = get_bbox_center(staff.get('bbox', staff.get('bounding_box', {})))
875
+ if not staff_center:
876
+ continue
877
+ track_id = staff.get('track_id', f"staff_{hash(str(staff_center))}")
878
+ # Update persistent global staff ids
879
+ self.global_staff_ids.add(track_id)
880
+ # Update staff area occupancy and persistent area staff ids
881
+ for area_name, polygon in self.staff_areas.items():
882
+ if point_in_polygon(staff_center, polygon):
883
+ self.staff_occupancy[area_name].append({
884
+ 'track_id': track_id,
885
+ 'center': staff_center,
886
+ 'timestamp': current_time
887
+ })
888
+ self.global_staff_ids_by_area[area_name].add(track_id)
889
+
890
+ def _process_car_detections(self, car_detections: List[Dict], current_time: float):
891
+ """Process car detections and update journey tracking."""
892
+ for car in car_detections:
893
+ car_center = get_bbox_center(car.get('bbox', car.get('bounding_box', {})))
894
+ if not car_center:
895
+ continue
896
+ track_id = car.get('track_id', f"car_{hash(str(car_center))}")
897
+ # Update persistent global car ids
898
+ self.global_car_ids.add(track_id)
899
+ # Initialize car journey if new
900
+ is_new_journey = False
901
+ if track_id not in self.car_journey:
902
+ self._initialize_car_journey(track_id, current_time)
903
+ is_new_journey = True
904
+ journey = self.car_journey[track_id]
905
+ # Update car area occupancy
906
+ current_areas = []
907
+ for area_name, polygon in self.car_areas.items():
908
+ if point_in_polygon(car_center, polygon):
909
+ current_areas.append(area_name)
910
+ self.car_occupancy[area_name].append({
911
+ 'track_id': track_id,
912
+ 'center': car_center,
913
+ 'timestamp': current_time
914
+ })
915
+ # Update journey state based on current areas
916
+ journey['current_areas'] = current_areas
917
+ journey['last_seen'] = current_time
918
+ journey['positions'].append({
919
+ 'center': car_center,
920
+ 'timestamp': current_time,
921
+ 'areas': current_areas.copy()
922
+ })
923
+ # --- Staff service count: handle BEING_SERVED at initialization ---
924
+ if is_new_journey and self._is_car_being_served(track_id, current_time):
925
+ # car starts in BEING_SERVED state, increment staff_service_count for the nearest staff
926
+ nearest_staff = self._find_nearest_staff(car_center)
927
+ if nearest_staff:
928
+ staff_id, _ = nearest_staff
929
+ self.staff_service_count[staff_id] += 1
930
+ # Update journey state logic
931
+ self._update_car_journey_state(track_id, current_areas, current_time)
932
+
933
+ def _initialize_car_journey(self, track_id: int, current_time: float):
934
+ """Initialize car journey tracking."""
935
+ self.car_journey[track_id] = {
936
+ 'state': self.JOURNEY_STATES['ENTERING'],
937
+ 'start_time': current_time,
938
+ 'last_seen': current_time,
939
+ 'current_areas': [],
940
+ 'areas_visited': set(),
941
+ 'positions': [],
942
+ 'queue_start_time': None,
943
+ 'service_start_time': None,
944
+ 'service_end_time': None,
945
+ 'total_wait_time': 0.0,
946
+ 'total_service_time': 0.0,
947
+ 'staff_interactions': []
948
+ }
949
+
950
+ def _update_car_journey_state(self, track_id: int, current_areas: List[str], current_time: float):
951
+ """Update car journey state based on current location."""
952
+ journey = self.car_journey[track_id]
953
+ # Update areas visited
954
+ journey['areas_visited'].update(current_areas)
955
+ # State transition logic
956
+ if journey['state'] == self.JOURNEY_STATES['ENTERING']:
957
+ if current_areas:
958
+ journey['state'] = self.JOURNEY_STATES['QUEUING']
959
+ journey['queue_start_time'] = current_time
960
+ elif journey['state'] == self.JOURNEY_STATES['QUEUING']:
961
+ # Check if car is being served (near staff)
962
+ if self._is_car_being_served(track_id, current_time):
963
+ journey['state'] = self.JOURNEY_STATES['BEING_SERVED']
964
+ journey['service_start_time'] = current_time
965
+ if journey['queue_start_time']:
966
+ journey['total_wait_time'] = current_time - journey['queue_start_time']
967
+ # --- Staff service count: increment only on QUEUING -> BEING_SERVED transition ---
968
+ car_center = journey['positions'][-1]['center'] if journey['positions'] else None
969
+ if car_center:
970
+ nearest_staff = self._find_nearest_staff(car_center)
971
+ if nearest_staff:
972
+ staff_id, _ = nearest_staff
973
+ self.staff_service_count[staff_id] += 1
974
+ elif journey['state'] == self.JOURNEY_STATES['BEING_SERVED']:
975
+ # Check if service is completed
976
+ if not self._is_car_being_served(track_id, current_time):
977
+ journey['state'] = self.JOURNEY_STATES['COMPLETED']
978
+ journey['service_end_time'] = current_time
979
+ if journey['service_start_time']:
980
+ journey['total_service_time'] = current_time - journey['service_start_time']
981
+ # --- Service time tracking: record in self.service_times ---
982
+ # Try to associate with staff_id if possible
983
+ car_center = journey['positions'][-1]['center'] if journey['positions'] else None
984
+ staff_id = None
985
+ if car_center:
986
+ nearest_staff = self._find_nearest_staff(car_center)
987
+ if nearest_staff:
988
+ staff_id, _ = nearest_staff
989
+ # Store as per-car service time (flat list)
990
+ self.service_times[track_id].append({
991
+ 'car_id': track_id,
992
+ 'service_time': journey['total_service_time'],
993
+ 'service_start_time': journey['service_start_time'],
994
+ 'service_end_time': journey['service_end_time'],
995
+ 'staff_id': staff_id
996
+ })
997
+
998
+ def _is_car_being_served(self, car_track_id: int, current_time: float) -> bool:
999
+ """Check if car is currently being served by staff or in overlapping service/car area or proximity."""
1000
+ car_journey = self.car_journey.get(car_track_id)
1001
+ if not car_journey or not car_journey['positions']:
1002
+ return False
1003
+
1004
+ car_center = car_journey['positions'][-1]['center']
1005
+
1006
+ # Get all car areas the car is in
1007
+ car_areas_in = set()
1008
+ for area_name, polygon in self.car_areas.items():
1009
+ if point_in_polygon(car_center, polygon):
1010
+ car_areas_in.add(area_name)
1011
+
1012
+ # Get all service areas the car is in
1013
+ service_areas_in = set()
1014
+ for area_name, polygon in self.service_areas.items():
1015
+ if point_in_polygon(car_center, polygon):
1016
+ service_areas_in.add(area_name)
1017
+
1018
+ # If any area is both a car area and a service area, consider being served
1019
+ if car_areas_in & service_areas_in:
1020
+ return True
1021
+
1022
+ # If car is inside any service area (legacy logic)
1023
+ if service_areas_in:
1024
+ return True
1025
+
1026
+ # If not inside service area, check proximity to staff
1027
+ nearest_staff = self._find_nearest_staff(car_center)
1028
+ if nearest_staff:
1029
+ staff_id, distance = nearest_staff
1030
+ if distance <= self._service_proximity_threshold:
1031
+ return True
1032
+
1033
+ return False
1034
+
1035
+ def _find_nearest_staff(self, car_center: Tuple[float, float]) -> Optional[Tuple[int, float]]:
1036
+ """Find nearest staff member to car."""
1037
+ nearest_staff = None
1038
+ min_distance = float('inf')
1039
+
1040
+ for area_name, staff_list in self.staff_occupancy.items():
1041
+ for staff_info in staff_list:
1042
+ staff_center = staff_info['center']
1043
+ distance = calculate_distance(car_center, staff_center)
1044
+
1045
+ if distance < min_distance:
1046
+ min_distance = distance
1047
+ nearest_staff = (staff_info['track_id'], distance)
1048
+
1049
+ return nearest_staff
1050
+
1051
+ def _update_service_interactions(self, current_time: float):
1052
+ """Update service interactions between staff and cars."""
1053
+ for car_id, journey in self.car_journey.items():
1054
+ if journey['state'] == self.JOURNEY_STATES['BEING_SERVED']:
1055
+ if journey['positions']:
1056
+ car_center = journey['positions'][-1]['center']
1057
+ nearest_staff = self._find_nearest_staff(car_center)
1058
+ if nearest_staff:
1059
+ staff_id, distance = nearest_staff
1060
+ # Record interaction (do not increment staff_service_count here)
1061
+ interaction = {
1062
+ 'car_id': car_id,
1063
+ 'staff_id': staff_id,
1064
+ 'distance': distance,
1065
+ 'timestamp': current_time
1066
+ }
1067
+ journey['staff_interactions'].append(interaction)
1068
+ # Note: staff_service_count is now incremented only on state transition or at initialization
1069
+
1070
+ def _compile_analytics_results(self, current_time: float) -> Dict[str, Any]:
1071
+ """Compile comprehensive analytics results."""
1072
+ # --- Previous approach (commented out): ---
1073
+ # real_time_occupancy = {
1074
+ # "car_areas": self.car_occupancy,
1075
+ # "staff_areas": self.staff_occupancy,
1076
+ # "service_areas": self.service_occupancy
1077
+ # }
1078
+
1079
+ # --- New approach: Only keep the last detection per track_id per area ---
1080
+ def get_latest_per_track(area_dict):
1081
+ latest = {}
1082
+ for area_name, occupants in area_dict.items():
1083
+ track_map = {}
1084
+ for occ in occupants:
1085
+ tid = occ.get('track_id')
1086
+ ts = occ.get('timestamp', 0)
1087
+ if tid is not None:
1088
+ if tid not in track_map or ts > track_map[tid]['timestamp']:
1089
+ track_map[tid] = occ
1090
+ latest[area_name] = list(track_map.values())
1091
+ return latest
1092
+
1093
+ real_time_occupancy = {
1094
+ "car_areas": get_latest_per_track(self.car_occupancy),
1095
+ "staff_areas": get_latest_per_track(self.staff_occupancy),
1096
+ "service_areas": get_latest_per_track(self.service_occupancy)
1097
+ }
1098
+
1099
+
1100
+ # --- Service times output: flatten to a list for JSON output (per-car) ---
1101
+ service_times_output = []
1102
+ for car_id, records in self.service_times.items():
1103
+ for rec in records:
1104
+ entry = rec.copy()
1105
+ service_times_output.append(entry)
1106
+
1107
+ return {
1108
+ "car_queue_analytics": self._get_car_queue_results(),
1109
+ "staff_management_analytics": self._get_staff_management_results(),
1110
+ "service_area_analytics": self._get_service_area_results(),
1111
+ "car_journey_analytics": self._get_car_journey_results(),
1112
+ "business_metrics": self._calculate_analytics(current_time),
1113
+ "real_time_occupancy": real_time_occupancy,
1114
+ "service_times": service_times_output,
1115
+ "processing_timestamp": current_time
1116
+ }
1117
+
1118
+ def _get_car_queue_results(self) -> Dict[str, Any]:
1119
+ """Get car queue analytics (per chunk of 1 frames)."""
1120
+ # Use chunk-based car ids for per-chunk analytics
1121
+ active_cars = len(getattr(self, '_chunk_car_ids', set()))
1122
+ queue_lengths_by_area = {}
1123
+ for area_name in self.car_occupancy:
1124
+ queue_lengths_by_area[area_name] = len(getattr(self, '_chunk_area_car_ids', defaultdict(set))[area_name])
1125
+
1126
+ # For state counts, only count journeys whose track_id is in the current chunk
1127
+ cars_queuing = 0
1128
+ cars_being_served = 0
1129
+ cars_completed = 0
1130
+ # Collect wait times for all cars who have ever queued in this chunk
1131
+ wait_times = []
1132
+ chunk_ids = getattr(self, '_chunk_car_ids', set())
1133
+ now = time.time()
1134
+ for track_id in chunk_ids:
1135
+ journey = self.car_journey.get(track_id)
1136
+ if not journey:
1137
+ continue
1138
+ # Only include if car has ever queued (should always be true for chunk ids)
1139
+ # Use their total_wait_time if they have left QUEUING state, else ongoing
1140
+ if journey['state'] == self.JOURNEY_STATES['QUEUING']:
1141
+ cars_queuing += 1
1142
+ if journey['queue_start_time']:
1143
+ wait_times.append(now - journey['queue_start_time'])
1144
+ elif journey['state'] == self.JOURNEY_STATES['BEING_SERVED']:
1145
+ cars_being_served += 1
1146
+ if journey['total_wait_time']:
1147
+ wait_times.append(journey['total_wait_time'])
1148
+ elif journey['state'] in [self.JOURNEY_STATES['COMPLETED'], self.JOURNEY_STATES['LEFT']]:
1149
+ cars_completed += 1
1150
+ if journey['total_wait_time']:
1151
+ wait_times.append(journey['total_wait_time'])
1152
+
1153
+ n_total = len(wait_times)
1154
+ average_wait_time = sum(wait_times) / n_total if n_total > 0 else 1.0
1155
+
1156
+ queue_analytics = {
1157
+ "active_cars": active_cars,
1158
+ "cars_queuing": cars_queuing,
1159
+ "cars_being_served": cars_being_served,
1160
+ "cars_completed": cars_completed,
1161
+ "average_wait_time": average_wait_time,
1162
+ "queue_lengths_by_area": queue_lengths_by_area
1163
+ }
1164
+ return queue_analytics
1165
+
1166
+ def _get_staff_management_results(self) -> Dict[str, Any]:
1167
+ """Get staff management analytics."""
1168
+ staff_analytics = {
1169
+ "total_staff": len(self.global_staff_ids),
1170
+ "staff_distribution": {area_name: len(self.global_staff_ids_by_area[area_name]) for area_name in self.staff_areas},
1171
+ "staff_utilization": 0.0,
1172
+ "active_staff": 0,
1173
+ "avg_staff_count": 0.0
1174
+ }
1175
+
1176
+ total_services = sum(self.staff_service_count.values())
1177
+ chunk_staff_ids = set()
1178
+ for area_staff in self.staff_occupancy.values():
1179
+ for staff in area_staff:
1180
+ tid = staff.get('track_id')
1181
+ if tid is not None:
1182
+ chunk_staff_ids.add(tid)
1183
+ staff_analytics["active_staff"] = len(chunk_staff_ids)
1184
+
1185
+ # Calculate overall utilization
1186
+ total_staff_count = staff_analytics["total_staff"]
1187
+ if total_staff_count > 0:
1188
+ staff_analytics["staff_utilization"] = len(chunk_staff_ids) / total_staff_count
1189
+
1190
+ # --- Avg staff count calculation ---
1191
+ if not hasattr(self, '_staff_presence_history'):
1192
+ self._staff_presence_history = [] # list of (timestamp, staff_count)
1193
+ now = time.time()
1194
+ staff_count_now = len(chunk_staff_ids)
1195
+ self._staff_presence_history.append((now, staff_count_now))
1196
+ avg_staff_count = 0.0
1197
+ total_time = 0.0
1198
+ history = self._staff_presence_history
1199
+ if len(history) > 1:
1200
+ for i in range(1, len(history)):
1201
+ t0, c0 = history[i-1]
1202
+ t1, c1 = history[i]
1203
+ dt = t1 - t0
1204
+ avg_staff_count += c0 * dt
1205
+ total_time += dt
1206
+ if total_time > 0:
1207
+ staff_analytics["avg_staff_count"] = avg_staff_count / total_time
1208
+ else:
1209
+ staff_analytics["avg_staff_count"] = staff_count_now
1210
+ else:
1211
+ staff_analytics["avg_staff_count"] = staff_count_now
1212
+
1213
+ staff_efficiency = {}
1214
+ for staff_id in self.global_staff_ids:
1215
+ service_count = self.staff_service_count.get(staff_id, 0)
1216
+ staff_efficiency[staff_id] = {
1217
+ "services_handled": service_count,
1218
+ "efficiency_score": service_count / max(total_services, 1) if total_services > 0 else 0.0
1219
+ }
1220
+ self._internal_staff_efficiency = staff_efficiency
1221
+
1222
+ return staff_analytics
1223
+
1224
+ def _get_service_area_results(self) -> Dict[str, Any]:
1225
+ """Get service area analytics (dynamic: polygon inclusion, overlap, and proximity)."""
1226
+ service_analytics = {
1227
+ "service_areas_status": {},
1228
+ "total_active_services": 0
1229
+ }
1230
+
1231
+ service_proximity_threshold = getattr(self, '_service_proximity_threshold', 100.0)
1232
+
1233
+ # Collect all cars and staff (flattened)
1234
+ all_cars = []
1235
+ for area_list in self.car_occupancy.values():
1236
+ all_cars.extend(area_list)
1237
+ all_staff = []
1238
+ for area_list in self.staff_occupancy.values():
1239
+ all_staff.extend(area_list)
1240
+
1241
+ for area_name, polygon in self.service_areas.items():
1242
+ cars_in_area = set()
1243
+ staff_in_area = set()
1244
+
1245
+ # cars: count only if inside service area polygon
1246
+ for occ in all_cars:
1247
+ center = occ.get('center')
1248
+ tid = occ.get('track_id')
1249
+ if center is None or tid is None:
1250
+ continue
1251
+ if point_in_polygon(center, polygon):
1252
+ cars_in_area.add(tid)
1253
+
1254
+ # Staff: count only if inside service area polygon
1255
+ for occ in all_staff:
1256
+ center = occ.get('center')
1257
+ tid = occ.get('track_id')
1258
+ if center is None or tid is None:
1259
+ continue
1260
+ if point_in_polygon(center, polygon):
1261
+ staff_in_area.add(tid)
1262
+
1263
+ service_analytics["service_areas_status"][area_name] = {
1264
+ "cars": len(cars_in_area),
1265
+ "car_ids": list(cars_in_area),
1266
+ "staff": len(staff_in_area),
1267
+ "staff_ids": list(staff_in_area),
1268
+ "service_ratio": len(cars_in_area) / max(len(staff_in_area), 1),
1269
+ "status": "active" if len(staff_in_area) > 0 else "inactive",
1270
+ "service_proximity_threshold": service_proximity_threshold
1271
+ }
1272
+
1273
+ if len(staff_in_area) > 0:
1274
+ service_analytics["total_active_services"] += 1
1275
+
1276
+ return service_analytics
1277
+
1278
+ def _get_car_journey_results(self) -> Dict[str, Any]:
1279
+ """Get car journey analytics."""
1280
+ journey_analytics = {
1281
+ "total_journeys": len(self.car_journey),
1282
+ "journey_states": {state: 0 for state in self.JOURNEY_STATES.values()},
1283
+ "average_journey_time": 0.0,
1284
+ "popular_areas": {},
1285
+ # "journey_patterns": {}
1286
+ }
1287
+
1288
+ journey_times = []
1289
+ all_areas_visited = []
1290
+
1291
+ for journey in self.car_journey.values():
1292
+ # Count journey states
1293
+ journey_analytics["journey_states"][journey['state']] += 1
1294
+
1295
+ # Calculate journey time
1296
+ if journey['start_time'] and journey['last_seen']:
1297
+ journey_time = journey['last_seen'] - journey['start_time']
1298
+ journey_times.append(journey_time)
1299
+
1300
+ # Collect areas visited
1301
+ all_areas_visited.extend(journey['areas_visited'])
1302
+
1303
+ # Calculate average journey time
1304
+ if journey_times:
1305
+ journey_analytics["average_journey_time"] = sum(journey_times) / len(journey_times)
1306
+
1307
+ # Calculate popular areas
1308
+ from collections import Counter
1309
+ area_counts = Counter(all_areas_visited)
1310
+ journey_analytics["popular_areas"] = dict(area_counts.most_common())
1311
+
1312
+ return journey_analytics
1313
+
1314
+ def _calculate_analytics(self, current_time: float) -> Dict[str, Any]:
1315
+ """Calculate comprehensive business metrics."""
1316
+ total_cars = len(self.car_journey)
1317
+ chunk_ids = getattr(self, '_chunk_car_ids', set())
1318
+ cars_queuing = 0
1319
+ cars_being_served = 0
1320
+ for track_id in chunk_ids:
1321
+ journey = self.car_journey.get(track_id)
1322
+ if not journey:
1323
+ continue
1324
+ if journey['state'] == self.JOURNEY_STATES['QUEUING']:
1325
+ cars_queuing += 1
1326
+ elif journey['state'] == self.JOURNEY_STATES['BEING_SERVED']:
1327
+ cars_being_served += 1
1328
+
1329
+ # Use global staff count (unique staff IDs)
1330
+ # Use active_staff from staff_management_analytics for real-time ratio
1331
+ staff_analytics = self._get_staff_management_results()
1332
+ active_staff = staff_analytics.get("active_staff", 0)
1333
+ total_staff = staff_analytics.get("total_staff", 0)
1334
+
1335
+ metrics = {
1336
+ # Now using per-chunk car count for ratio
1337
+ "car_to_staff_ratio": (cars_queuing + cars_being_served) / max(active_staff, 1),
1338
+ "service_efficiency": 0.0,
1339
+ "queue_performance": 0.0,
1340
+ "staff_productivity": 0.0,
1341
+ "overall_performance": 0.0
1342
+ }
1343
+
1344
+ # Calculate service efficiency
1345
+ completed_services = sum(1 for j in self.car_journey.values()
1346
+ if j['state'] == self.JOURNEY_STATES['COMPLETED'])
1347
+ metrics["service_efficiency"] = completed_services / max(total_cars, 1)
1348
+
1349
+ # Calculate queue performance
1350
+ metrics["queue_performance"] = max(0, 1 - (cars_queuing / max(total_cars, 1)))
1351
+
1352
+ # Calculate staff productivity
1353
+ total_services = sum(self.staff_service_count.values())
1354
+ metrics["staff_productivity"] = total_services / max(total_staff, 1)
1355
+
1356
+ # Calculate overall performance
1357
+ metrics["overall_performance"] = (
1358
+ metrics["service_efficiency"] * 0.4 +
1359
+ metrics["queue_performance"] * 0.3 +
1360
+ metrics["staff_productivity"] * 0.3
1361
+ )
1362
+
1363
+ return metrics
1364
+
1365
+ def _check_alerts(self, analytics_results: Dict, config: CarServiceConfig) -> List[Dict]:
1366
+ """Check for alert conditions in advanced car service operations."""
1367
+ alerts = []
1368
+
1369
+ if not config.alert_config:
1370
+ return alerts
1371
+
1372
+ # Check queue length alerts
1373
+ queue_analytics = analytics_results.get("car_queue_analytics", {})
1374
+ cars_queuing = queue_analytics.get("cars_queuing", 0)
1375
+
1376
+ if cars_queuing > 10: # Threshold for long queues
1377
+ alerts.append({
1378
+ "type": "long_queue",
1379
+ "severity": "warning",
1380
+ "message": f"Long car queue detected ({cars_queuing} cars waiting)",
1381
+ "queue_length": cars_queuing,
1382
+ "recommendation": "Consider adding more staff or opening additional service points"
1383
+ })
1384
+
1385
+ # Check service efficiency alerts
1386
+ business_metrics = analytics_results.get("business_metrics", {})
1387
+ service_efficiency = business_metrics.get("service_efficiency", 0)
1388
+
1389
+ if service_efficiency < 0.5:
1390
+ alerts.append({
1391
+ "type": "low_service_efficiency",
1392
+ "severity": "critical" if service_efficiency < 0.3 else "warning",
1393
+ "message": f"Low service efficiency detected ({service_efficiency:.1%})",
1394
+ "efficiency": service_efficiency,
1395
+ "recommendation": "Review service processes and staff allocation"
1396
+ })
1397
+
1398
+ # Check staff utilization alerts
1399
+ staff_analytics = analytics_results.get("staff_management_analytics", {})
1400
+ staff_utilization = staff_analytics.get("staff_utilization", 0)
1401
+
1402
+ if staff_utilization < 0.6:
1403
+ alerts.append({
1404
+ "type": "low_staff_utilization",
1405
+ "severity": "warning",
1406
+ "message": f"Low staff utilization detected ({staff_utilization:.1%})",
1407
+ "utilization": staff_utilization,
1408
+ "recommendation": "Consider staff redeployment or schedule optimization"
1409
+ })
1410
+
1411
+ return alerts
1412
+
1413
+ def _generate_insights(self, analytics_results: Dict, config: CarServiceConfig) -> List[str]:
1414
+ """Generate actionable insights from advanced car service analysis."""
1415
+ insights = []
1416
+
1417
+ # Queue insights
1418
+ queue_analytics = analytics_results.get("car_queue_analytics", {})
1419
+ active_cars = queue_analytics.get("active_cars", 0)
1420
+ cars_queuing = queue_analytics.get("cars_queuing", 0)
1421
+ cars_being_served = queue_analytics.get("cars_being_served", 0)
1422
+
1423
+ if active_cars == 0:
1424
+ insights.append("No active cars detected in service areas")
1425
+ return insights
1426
+
1427
+ insights.append(f"Active car analysis: {active_cars} total cars")
1428
+
1429
+ if cars_queuing > 0:
1430
+ insights.append(f"📊 Queue status: {cars_queuing} cars waiting")
1431
+
1432
+ avg_wait_time = queue_analytics.get("average_wait_time", 0)
1433
+ if avg_wait_time > 300: # 5 minutes
1434
+ insights.append(f"⚠️ Long average wait time: {avg_wait_time/60:.1f} minutes")
1435
+ elif avg_wait_time > 0:
1436
+ insights.append(f"⏱️ Average wait time: {avg_wait_time/60:.1f} minutes")
1437
+
1438
+ if cars_being_served > 0:
1439
+ insights.append(f"🔄 Active services: {cars_being_served} cars being served")
1440
+
1441
+ # Staff insights
1442
+ staff_analytics = analytics_results.get("staff_management_analytics", {})
1443
+ total_staff = staff_analytics.get("total_staff", 0)
1444
+ staff_utilization = staff_analytics.get("staff_utilization", 0)
1445
+
1446
+ if total_staff > 0:
1447
+ insights.append(f"Staff deployment: {total_staff} staff members active")
1448
+
1449
+ if staff_utilization >= 0.8:
1450
+ insights.append("✅ High staff utilization - team is actively engaged")
1451
+ elif staff_utilization >= 0.6:
1452
+ insights.append("📊 Good staff utilization")
1453
+ else:
1454
+ insights.append("⚠️ Low staff utilization - consider redeployment")
1455
+
1456
+ # Business performance insights
1457
+ business_metrics = analytics_results.get("business_metrics", {})
1458
+ overall_performance = business_metrics.get("overall_performance", 0)
1459
+
1460
+ if overall_performance >= 0.8:
1461
+ insights.append("🌟 Excellent overall service performance")
1462
+ elif overall_performance >= 0.6:
1463
+ insights.append("✅ Good overall service performance")
1464
+ else:
1465
+ insights.append("📈 Service performance needs improvement")
1466
+
1467
+ # Journey insights
1468
+ journey_analytics = analytics_results.get("car_journey_analytics", {})
1469
+ avg_journey_time = journey_analytics.get("average_journey_time", 0)
1470
+
1471
+ if avg_journey_time > 0:
1472
+ insights.append(f"Car journey: average time {avg_journey_time/60:.1f} minutes")
1473
+
1474
+ if avg_journey_time > 1800: # 30 minutes
1475
+ insights.append("⚠️ Long car journey times detected")
1476
+
1477
+ return insights
1478
+
1479
+
1480
+ def _get_start_timestamp_str(self, stream_info: Optional[dict]) -> str:
1481
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1482
+ # For video, use start_video_timestamp if available, else 00:00:00
1483
+ if not stream_info:
1484
+ return "00:00:00.00"
1485
+ input_settings = stream_info.get("input_settings", {})
1486
+ stream_type = input_settings.get("stream_type", "video_file")
1487
+ if stream_type == "video_file":
1488
+ start_video_timestamp = stream_info.get("start_video_timestamp", None)
1489
+ if start_video_timestamp:
1490
+ return start_video_timestamp[:8]
1491
+ else:
1492
+ return "00:00:00.00"
1493
+ else:
1494
+ # For streams, persist the first stream_time as tracking start time
1495
+ if not hasattr(self, "_tracking_start_time") or self._tracking_start_time is None:
1496
+ stream_time_str = stream_info.get("stream_time", "")
1497
+ if stream_time_str:
1498
+ try:
1499
+ from datetime import datetime, timezone
1500
+ timestamp_str = stream_time_str.replace(" UTC", "")
1501
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1502
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1503
+ except Exception:
1504
+ self._tracking_start_time = time.time()
1505
+ else:
1506
+ self._tracking_start_time = time.time()
1507
+ from datetime import datetime, timezone
1508
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1509
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1510
+
1511
+ def _generate_summary(self, analytics_results: Dict, alerts: List) -> str:
1512
+ """Generate human-readable summary."""
1513
+ # Beautiful, tabbed, non-technical summary for all major analytics sections
1514
+ queue_analytics = analytics_results.get("car_queue_analytics", {})
1515
+ staff_analytics = analytics_results.get("staff_management_analytics", {})
1516
+ service_analytics = analytics_results.get("service_area_analytics", {})
1517
+ journey_analytics = analytics_results.get("car_journey_analytics", {})
1518
+ business_metrics = analytics_results.get("business_metrics", {})
1519
+ service_times = analytics_results.get("service_times", [])
1520
+ occupancy = analytics_results.get("real_time_occupancy", {})
1521
+
1522
+ def tabbed_section(title, dct, omit_keys=None):
1523
+ if not dct:
1524
+ return f"{title}: None"
1525
+ omit_keys = omit_keys or set()
1526
+ lines = [f"{title}:"]
1527
+ for k, v in dct.items():
1528
+ if k in omit_keys:
1529
+ continue
1530
+ if isinstance(v, dict):
1531
+ lines.append(f"\t{k}:")
1532
+ for sk, sv in v.items():
1533
+ lines.append(f"\t\t{sk}: {sv}")
1534
+ elif isinstance(v, list):
1535
+ lines.append(f"\t{k}: [{len(v)} items]")
1536
+ else:
1537
+ lines.append(f"\t{k}: {v}")
1538
+ return "\n".join(lines)
1539
+
1540
+ def tabbed_list_section(title, lst):
1541
+ if not lst:
1542
+ return f"{title}: None"
1543
+ lines = [f"{title}:"]
1544
+ for i, item in enumerate(lst):
1545
+ lines.append(f"\t{i+1}. {item}")
1546
+ return "\n".join(lines)
1547
+
1548
+ summary = []
1549
+ summary.append(tabbed_section("car_queue_analytics", queue_analytics, omit_keys={"wait_times_completed", "wait_times_ongoing"}))
1550
+ summary.append(tabbed_section("staff_management_analytics", staff_analytics, omit_keys={"staff_efficiency"}))
1551
+ summary.append(tabbed_section("service_area_analytics", service_analytics))
1552
+ summary.append(tabbed_section("car_journey_analytics", journey_analytics))
1553
+ summary.append(tabbed_section("business_metrics", business_metrics))
1554
+ summary.append(tabbed_section("service_times", {"service_times": service_times}))
1555
+ summary.append(tabbed_section("real_time_occupancy", occupancy))
1556
+
1557
+ if alerts:
1558
+ critical_alerts = sum(1 for alert in alerts if alert.get("severity") == "critical")
1559
+ if critical_alerts > 0:
1560
+ summary.append(f"ALERTS: {critical_alerts} critical alert(s)")
1561
+ else:
1562
+ summary.append(f"ALERTS: {len(alerts)} alert(s)")
1563
+
1564
+ return "\n".join(summary)
1565
+
1566
+ def _extract_predictions(self, data: Any) -> Dict[str, List[Dict[str, Any]]]:
1567
+ """Extract predictions from processed data for API compatibility, grouped by frame number if available."""
1568
+ predictions = {}
1569
+ try:
1570
+ if isinstance(data, dict):
1571
+ # Frame-based or tracking format
1572
+ for frame_id, items in data.items():
1573
+ if not isinstance(items, list):
1574
+ continue
1575
+ frame_preds = []
1576
+ for item in items:
1577
+ if isinstance(item, dict):
1578
+ pred = {
1579
+ "category": item.get("category", item.get("class", "unknown")),
1580
+ "confidence": item.get("confidence", item.get("score", 0.0)),
1581
+ "bounding_box": item.get("bounding_box", item.get("bbox", {})),
1582
+ "track_id": item.get("track_id")
1583
+ }
1584
+ frame_preds.append(pred)
1585
+ if frame_preds:
1586
+ predictions[str(frame_id)] = frame_preds
1587
+ elif isinstance(data, list):
1588
+ # If not frame-based, put all predictions under a generic key
1589
+ predictions["0"] = []
1590
+ for item in data:
1591
+ if isinstance(item, dict):
1592
+ pred = {
1593
+ "category": item.get("category", item.get("class", "unknown")),
1594
+ "confidence": item.get("confidence", item.get("score", 0.0)),
1595
+ "bounding_box": item.get("bounding_box", item.get("bbox", {})),
1596
+ "track_id": item.get("track_id")
1597
+ }
1598
+ predictions["0"].append(pred)
1599
+ except Exception as e:
1600
+ self.logger.warning(f"Failed to extract predictions: {str(e)}")
1601
+ return predictions