matrice-analytics 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. matrice_analytics/__init__.py +28 -0
  2. matrice_analytics/boundary_drawing_internal/README.md +305 -0
  3. matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
  4. matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
  5. matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
  6. matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
  7. matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
  8. matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
  9. matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
  10. matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
  11. matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
  12. matrice_analytics/post_processing/README.md +455 -0
  13. matrice_analytics/post_processing/__init__.py +732 -0
  14. matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
  15. matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
  16. matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
  17. matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
  18. matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
  19. matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
  20. matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
  21. matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
  22. matrice_analytics/post_processing/config.py +146 -0
  23. matrice_analytics/post_processing/core/__init__.py +63 -0
  24. matrice_analytics/post_processing/core/base.py +704 -0
  25. matrice_analytics/post_processing/core/config.py +3291 -0
  26. matrice_analytics/post_processing/core/config_utils.py +925 -0
  27. matrice_analytics/post_processing/face_reg/__init__.py +43 -0
  28. matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
  29. matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
  30. matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
  31. matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
  32. matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
  33. matrice_analytics/post_processing/ocr/__init__.py +0 -0
  34. matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
  35. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
  36. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
  37. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
  38. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
  39. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
  40. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
  41. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
  42. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
  43. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
  44. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
  45. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
  46. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
  47. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
  48. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
  49. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
  50. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
  51. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
  52. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
  53. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
  54. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
  55. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
  56. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
  57. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
  58. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
  59. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
  60. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
  61. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
  62. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
  63. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
  64. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
  65. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
  66. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
  67. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
  68. matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
  69. matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
  70. matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
  71. matrice_analytics/post_processing/post_processor.py +1175 -0
  72. matrice_analytics/post_processing/test_cases/__init__.py +1 -0
  73. matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
  74. matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
  75. matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
  76. matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
  77. matrice_analytics/post_processing/test_cases/test_config.py +852 -0
  78. matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
  79. matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
  80. matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
  81. matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
  82. matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
  83. matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
  84. matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
  85. matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
  86. matrice_analytics/post_processing/usecases/__init__.py +267 -0
  87. matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
  88. matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
  89. matrice_analytics/post_processing/usecases/age_detection.py +842 -0
  90. matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
  91. matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
  92. matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
  93. matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
  94. matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
  95. matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
  96. matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
  97. matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
  98. matrice_analytics/post_processing/usecases/car_service.py +1601 -0
  99. matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
  100. matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
  101. matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
  102. matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
  103. matrice_analytics/post_processing/usecases/color/clip.py +660 -0
  104. matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
  105. matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
  106. matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
  107. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
  108. matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
  109. matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
  110. matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
  111. matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
  112. matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
  113. matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
  114. matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
  115. matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
  116. matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
  117. matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
  118. matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
  119. matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
  120. matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
  121. matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
  122. matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
  123. matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
  124. matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
  125. matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
  126. matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
  127. matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
  128. matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
  129. matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
  130. matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
  131. matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
  132. matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
  133. matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
  134. matrice_analytics/post_processing/usecases/leaf.py +821 -0
  135. matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
  136. matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
  137. matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
  138. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
  139. matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
  140. matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
  141. matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
  142. matrice_analytics/post_processing/usecases/parking.py +787 -0
  143. matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
  144. matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
  145. matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
  146. matrice_analytics/post_processing/usecases/people_counting.py +706 -0
  147. matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
  148. matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
  149. matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
  150. matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
  151. matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
  152. matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
  153. matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
  154. matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
  155. matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
  156. matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
  157. matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
  158. matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
  159. matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
  160. matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
  161. matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
  162. matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
  163. matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
  164. matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
  165. matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
  166. matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
  167. matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
  168. matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
  169. matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
  170. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
  171. matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
  172. matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
  173. matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
  174. matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
  175. matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
  176. matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
  177. matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
  178. matrice_analytics/post_processing/utils/__init__.py +150 -0
  179. matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
  180. matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
  181. matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
  182. matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
  183. matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
  184. matrice_analytics/post_processing/utils/color_utils.py +592 -0
  185. matrice_analytics/post_processing/utils/counting_utils.py +182 -0
  186. matrice_analytics/post_processing/utils/filter_utils.py +261 -0
  187. matrice_analytics/post_processing/utils/format_utils.py +293 -0
  188. matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
  189. matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
  190. matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
  191. matrice_analytics/py.typed +0 -0
  192. matrice_analytics-0.1.60.dist-info/METADATA +481 -0
  193. matrice_analytics-0.1.60.dist-info/RECORD +196 -0
  194. matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
  195. matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
  196. matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
@@ -0,0 +1,583 @@
1
+ from typing import Any, Dict, List, Optional
2
+ from dataclasses import asdict, dataclass, field
3
+ import time
4
+ from datetime import datetime, timezone
5
+
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
7
+ from ..utils import (
8
+ filter_by_confidence,
9
+ filter_by_categories,
10
+ apply_category_mapping,
11
+ count_objects_by_category,
12
+ count_objects_in_zones,
13
+ calculate_counting_summary,
14
+ match_results_structure,
15
+ bbox_smoothing,
16
+ BBoxSmoothingConfig,
17
+ BBoxSmoothingTracker
18
+ )
19
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
20
+
21
+ @dataclass
22
+ class ShelfInventoryConfig(BaseConfig):
23
+ """Configuration for shelf inventory detection use case."""
24
+ enable_smoothing: bool = True
25
+ smoothing_algorithm: str = "observability"
26
+ smoothing_window_size: int = 20
27
+ smoothing_cooldown_frames: int = 5
28
+ smoothing_confidence_range_factor: float = 0.5
29
+ confidence_threshold: float = 0.6
30
+ usecase_categories: List[str] = field(
31
+ default_factory=lambda: ['Empty-Space', 'Reduced']
32
+ )
33
+ target_categories: List[str] = field(
34
+ default_factory=lambda: ['Empty-Space', 'Reduced']
35
+ )
36
+ alert_config: Optional[AlertConfig] = None
37
+ index_to_category: Optional[Dict[int, str]] = field(
38
+ default_factory=lambda: {
39
+ 0: "Empty-Space",
40
+ 1: "Reduced"
41
+ }
42
+ )
43
+
44
+ class ShelfInventoryUseCase(BaseProcessor):
45
+ CATEGORY_DISPLAY = {
46
+ "Empty-Space": "Empty Space",
47
+ "Reduced": "Reduced Stock"
48
+ }
49
+
50
+ def __init__(self):
51
+ super().__init__("shelf_inventory")
52
+ self.category = "retail"
53
+ self.CASE_TYPE: Optional[str] = 'shelf_inventory'
54
+ self.CASE_VERSION: Optional[str] = '1.0'
55
+ self.target_categories = ['Empty-Space', 'Reduced']
56
+ self.smoothing_tracker = None
57
+ self.tracker = None
58
+ self._total_frame_counter = 0
59
+ self._global_frame_offset = 0
60
+ self._tracking_start_time = None
61
+ self._track_aliases: Dict[Any, Any] = {}
62
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
63
+ self._track_merge_iou_threshold: float = 0.05
64
+ self._track_merge_time_window: float = 7.0
65
+ self._ascending_alert_list: List[int] = []
66
+ self.current_incident_end_timestamp: str = "N/A"
67
+
68
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
69
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
70
+ start_time = time.time()
71
+ if not isinstance(config, ShelfInventoryConfig):
72
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
73
+ if context is None:
74
+ context = ProcessingContext()
75
+
76
+ input_format = match_results_structure(data)
77
+ context.input_format = input_format
78
+ context.confidence_threshold = config.confidence_threshold
79
+
80
+ if config.confidence_threshold is not None:
81
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
82
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
83
+ else:
84
+ processed_data = data
85
+ self.logger.debug("No confidence filtering applied")
86
+
87
+ if config.index_to_category:
88
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
89
+ self.logger.debug("Applied category mapping")
90
+
91
+ if config.target_categories:
92
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
93
+ self.logger.debug("Applied category filtering")
94
+
95
+ if config.enable_smoothing:
96
+ if self.smoothing_tracker is None:
97
+ smoothing_config = BBoxSmoothingConfig(
98
+ smoothing_algorithm=config.smoothing_algorithm,
99
+ window_size=config.smoothing_window_size,
100
+ cooldown_frames=config.smoothing_cooldown_frames,
101
+ confidence_threshold=config.confidence_threshold,
102
+ confidence_range_factor=config.smoothing_confidence_range_factor,
103
+ enable_smoothing=True
104
+ )
105
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
106
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
107
+
108
+ try:
109
+ from ..advanced_tracker import AdvancedTracker
110
+ from ..advanced_tracker.config import TrackerConfig
111
+ if self.tracker is None:
112
+ tracker_config = TrackerConfig()
113
+ self.tracker = AdvancedTracker(tracker_config)
114
+ self.logger.info("Initialized AdvancedTracker for Shelf Inventory")
115
+ processed_data = self.tracker.update(processed_data)
116
+ except Exception as e:
117
+ self.logger.warning(f"AdvancedTracker failed: {e}")
118
+
119
+ self._update_tracking_state(processed_data)
120
+ self._total_frame_counter += 1
121
+
122
+ frame_number = None
123
+ if stream_info:
124
+ input_settings = stream_info.get("input_settings", {})
125
+ start_frame = input_settings.get("start_frame")
126
+ end_frame = input_settings.get("end_frame")
127
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
128
+ frame_number = start_frame
129
+
130
+ general_counting_summary = calculate_counting_summary(data)
131
+ counting_summary = self._count_categories(processed_data, config)
132
+ total_counts = self.get_total_counts()
133
+ counting_summary['total_counts'] = total_counts
134
+ alerts = self._check_alerts(counting_summary, frame_number, config)
135
+ predictions = self._extract_predictions(processed_data)
136
+
137
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
138
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
139
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
140
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
141
+
142
+ incidents = incidents_list[0] if incidents_list else {}
143
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
144
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
145
+ summary = summary_list[0] if summary_list else {}
146
+ agg_summary = {str(frame_number): {
147
+ "incidents": incidents,
148
+ "tracking_stats": tracking_stats,
149
+ "business_analytics": business_analytics,
150
+ "alerts": alerts,
151
+ "human_text": summary}
152
+ }
153
+
154
+ context.mark_completed()
155
+ result = self.create_result(
156
+ data={"agg_summary": agg_summary},
157
+ usecase=self.name,
158
+ category=self.category,
159
+ context=context
160
+ )
161
+ return result
162
+
163
+ def _check_alerts(self, summary: dict, frame_number: Any, config: ShelfInventoryConfig) -> List[Dict]:
164
+ def get_trend(data, lookback=900, threshold=0.6):
165
+ window = data[-lookback:] if len(data) >= lookback else data
166
+ if len(window) < 2:
167
+ return True
168
+ increasing = 0
169
+ total = 0
170
+ for i in range(1, len(window)):
171
+ if window[i] >= window[i - 1]:
172
+ increasing += 1
173
+ total += 1
174
+ ratio = increasing / total
175
+ return ratio >= threshold
176
+
177
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
178
+ alerts = []
179
+ total_detections = summary.get("total_count", 0)
180
+ total_counts_dict = summary.get("total_counts", {})
181
+ per_category_count = summary.get("per_category_count", {})
182
+
183
+ if not config.alert_config:
184
+ return alerts
185
+
186
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
187
+ for category, threshold in config.alert_config.count_thresholds.items():
188
+ if category == "all" and total_detections > threshold:
189
+ alerts.append({
190
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
191
+ "alert_id": f"alert_{category}_{frame_key}",
192
+ "incident_category": self.CASE_TYPE,
193
+ "threshold_level": threshold,
194
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
195
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
196
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
197
+ })
198
+ elif category in per_category_count and per_category_count[category] > threshold:
199
+ alerts.append({
200
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
201
+ "alert_id": f"alert_{category}_{frame_key}",
202
+ "incident_category": self.CASE_TYPE,
203
+ "threshold_level": threshold,
204
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
205
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
206
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
207
+ })
208
+ return alerts
209
+
210
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ShelfInventoryConfig,
211
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
212
+ incidents = []
213
+ total_detections = counting_summary.get("total_count", 0)
214
+ current_timestamp = self._get_current_timestamp_str(stream_info)
215
+ camera_info = self.get_camera_info_from_stream(stream_info)
216
+
217
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
218
+
219
+ if total_detections > 0:
220
+ level = "low"
221
+ intensity = 5.0
222
+ start_timestamp = self._get_start_timestamp_str(stream_info)
223
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
224
+ self.current_incident_end_timestamp = 'Incident still active'
225
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
226
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
227
+ self.current_incident_end_timestamp = current_timestamp
228
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
229
+ self.current_incident_end_timestamp = 'N/A'
230
+
231
+ if config.alert_config and config.alert_config.count_thresholds:
232
+ threshold = config.alert_config.count_thresholds.get("all", 15)
233
+ intensity = min(10.0, (total_detections / threshold) * 10)
234
+ if intensity >= 9:
235
+ level = "critical"
236
+ self._ascending_alert_list.append(3)
237
+ elif intensity >= 7:
238
+ level = "significant"
239
+ self._ascending_alert_list.append(2)
240
+ elif intensity >= 5:
241
+ level = "medium"
242
+ self._ascending_alert_list.append(1)
243
+ else:
244
+ level = "low"
245
+ self._ascending_alert_list.append(0)
246
+ else:
247
+ if total_detections > 30:
248
+ level = "critical"
249
+ intensity = 10.0
250
+ self._ascending_alert_list.append(3)
251
+ elif total_detections > 25:
252
+ level = "significant"
253
+ intensity = 9.0
254
+ self._ascending_alert_list.append(2)
255
+ elif total_detections > 15:
256
+ level = "medium"
257
+ intensity = 7.0
258
+ self._ascending_alert_list.append(1)
259
+ else:
260
+ level = "low"
261
+ intensity = min(10.0, total_detections / 3.0)
262
+ self._ascending_alert_list.append(0)
263
+
264
+ human_text_lines = [f"SHELF INVENTORY INCIDENTS DETECTED @ {current_timestamp}:"]
265
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
266
+ human_text = "\n".join(human_text_lines)
267
+
268
+ alert_settings = []
269
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
270
+ alert_settings.append({
271
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
272
+ "incident_category": self.CASE_TYPE,
273
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
274
+ "ascending": True,
275
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
276
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
277
+ })
278
+
279
+ event = self.create_incident(
280
+ incident_id=f"{self.CASE_TYPE}_{frame_number}",
281
+ incident_type=self.CASE_TYPE,
282
+ severity_level=level,
283
+ human_text=human_text,
284
+ camera_info=camera_info,
285
+ alerts=alerts,
286
+ alert_settings=alert_settings,
287
+ start_time=start_timestamp,
288
+ end_time=self.current_incident_end_timestamp,
289
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
290
+ )
291
+ incidents.append(event)
292
+ else:
293
+ self._ascending_alert_list.append(0)
294
+ incidents.append({})
295
+
296
+ return incidents
297
+
298
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: ShelfInventoryConfig,
299
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
300
+ camera_info = self.get_camera_info_from_stream(stream_info)
301
+ tracking_stats = []
302
+ total_detections = counting_summary.get("total_count", 0)
303
+ total_counts_dict = counting_summary.get("total_counts", {})
304
+ per_category_count = counting_summary.get("per_category_count", {})
305
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
306
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
307
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
308
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
309
+
310
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
311
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
312
+
313
+ detections = []
314
+ for detection in counting_summary.get("detections", []):
315
+ bbox = detection.get("bounding_box", {})
316
+ category = detection.get("category", "inventory")
317
+ if detection.get("masks"):
318
+ segmentation = detection.get("masks", [])
319
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
320
+ elif detection.get("segmentation"):
321
+ segmentation = detection.get("segmentation")
322
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
323
+ elif detection.get("mask"):
324
+ segmentation = detection.get("mask")
325
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
326
+ else:
327
+ detection_obj = self.create_detection_object(category, bbox)
328
+ detections.append(detection_obj)
329
+
330
+ alert_settings = []
331
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
332
+ alert_settings.append({
333
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
334
+ "incident_category": self.CASE_TYPE,
335
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
336
+ "ascending": True,
337
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
338
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
339
+ })
340
+
341
+ human_text_lines = [f"Tracking Statistics:"]
342
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
343
+ for cat, count in per_category_count.items():
344
+ human_text_lines.append(f"\t{cat}: {count}")
345
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
346
+ for cat, count in total_counts_dict.items():
347
+ if count > 0:
348
+ human_text_lines.append(f"\t{cat}: {count}")
349
+ human_text_lines.append(f"Alerts: {alerts[0].get('settings', {})} sent @ {current_timestamp}" if alerts else "Alerts: None")
350
+ human_text = "\n".join(human_text_lines)
351
+
352
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
353
+ tracking_stat = self.create_tracking_stats(
354
+ total_counts=total_counts,
355
+ current_counts=current_counts,
356
+ detections=detections,
357
+ human_text=human_text,
358
+ camera_info=camera_info,
359
+ alerts=alerts,
360
+ alert_settings=alert_settings,
361
+ reset_settings=reset_settings,
362
+ start_time=high_precision_start_timestamp,
363
+ reset_time=high_precision_reset_timestamp
364
+ )
365
+ tracking_stats.append(tracking_stat)
366
+ return tracking_stats
367
+
368
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: ShelfInventoryConfig,
369
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
370
+ if is_empty:
371
+ return []
372
+
373
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
374
+ lines = {}
375
+ lines["Application Name"] = self.CASE_TYPE
376
+ lines["Application Version"] = self.CASE_VERSION
377
+ if incidents:
378
+ lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
379
+ if tracking_stats:
380
+ lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
381
+ if business_analytics:
382
+ lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
383
+ if not incidents and not tracking_stats and not business_analytics:
384
+ lines["Summary"] = "No Summary Data"
385
+ return [lines]
386
+
387
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
388
+ frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
389
+ total_track_ids = set()
390
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
391
+ total_track_ids.update(s)
392
+ return {
393
+ "total_count": len(total_track_ids),
394
+ "current_frame_count": len(frame_track_ids),
395
+ "total_unique_track_ids": len(total_track_ids),
396
+ "current_frame_track_ids": list(frame_track_ids),
397
+ "last_update_time": time.time(),
398
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
399
+ }
400
+
401
+ def _update_tracking_state(self, detections: list):
402
+ if not hasattr(self, "_per_category_total_track_ids"):
403
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
404
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
405
+
406
+ for det in detections:
407
+ cat = det.get("category")
408
+ raw_track_id = det.get("track_id")
409
+ if cat not in self.target_categories or raw_track_id is None:
410
+ continue
411
+ bbox = det.get("bounding_box", det.get("bbox"))
412
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
413
+ det["track_id"] = canonical_id
414
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
415
+ self._current_frame_track_ids[cat].add(canonical_id)
416
+
417
+ def get_total_counts(self):
418
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
419
+
420
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
421
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
422
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
423
+
424
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
425
+ hours = int(timestamp // 3600)
426
+ minutes = int((timestamp % 3600) // 60)
427
+ seconds = round(float(timestamp % 60), 2)
428
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
429
+
430
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str] = None) -> str:
431
+ if not stream_info:
432
+ return "00:00:00.00" if precision else "00:00:00"
433
+ if precision:
434
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
435
+ start_time = (int(frame_id) if frame_id else stream_info.get("input_settings", {}).get("start_frame", 30)) / stream_info.get("input_settings", {}).get("original_fps", 30)
436
+ return self._format_timestamp_for_video(start_time)
437
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
438
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
439
+ start_time = (int(frame_id) if frame_id else stream_info.get("input_settings", {}).get("start_frame", 30)) / stream_info.get("input_settings", {}).get("original_fps", 30)
440
+ return self._format_timestamp_for_video(start_time)
441
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
442
+ if stream_time_str:
443
+ try:
444
+ timestamp_str = stream_time_str.replace(" UTC", "")
445
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
446
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
447
+ return self._format_timestamp_for_stream(timestamp)
448
+ except:
449
+ return self._format_timestamp_for_stream(time.time())
450
+ return self._format_timestamp_for_stream(time.time())
451
+
452
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
453
+ if not stream_info:
454
+ return "00:00:00" if not precision else "00:00:00.00"
455
+ if precision:
456
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
457
+ return "00:00:00.00"
458
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
459
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
460
+ return "00:00:00"
461
+ if self._tracking_start_time is None:
462
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
463
+ if stream_time_str:
464
+ try:
465
+ timestamp_str = stream_time_str.replace(" UTC", "")
466
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
467
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
468
+ except:
469
+ self._tracking_start_time = time.time()
470
+ else:
471
+ self._tracking_start_time = time.time()
472
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
473
+ dt = dt.replace(minute=0, second=0, microsecond=0)
474
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
475
+
476
+ def _count_categories(self, detections: list, config: ShelfInventoryConfig) -> dict:
477
+ counts = {}
478
+ for det in detections:
479
+ cat = det.get('category', 'unknown')
480
+ counts[cat] = counts.get(cat, 0) + 1
481
+ return {
482
+ "total_count": sum(counts.values()),
483
+ "per_category_count": counts,
484
+ "detections": [
485
+ {
486
+ "bounding_box": det.get("bounding_box"),
487
+ "category": det.get("category"),
488
+ "confidence": det.get("confidence"),
489
+ "track_id": det.get("track_id"),
490
+ "frame_id": det.get("frame_id")
491
+ }
492
+ for det in detections
493
+ ]
494
+ }
495
+
496
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
497
+ return [
498
+ {
499
+ "category": det.get("category", "unknown"),
500
+ "confidence": det.get("confidence", 0.0),
501
+ "bounding_box": det.get("bounding_box", {})
502
+ }
503
+ for det in detections
504
+ ]
505
+
506
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
507
+ def _bbox_to_list(bbox):
508
+ if bbox is None:
509
+ return []
510
+ if isinstance(bbox, list):
511
+ return bbox[:4] if len(bbox) >= 4 else []
512
+ if isinstance(bbox, dict):
513
+ if "xmin" in bbox:
514
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
515
+ if "x1" in bbox:
516
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
517
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
518
+ return values[:4] if len(values) >= 4 else []
519
+ return []
520
+
521
+ l1 = _bbox_to_list(box1)
522
+ l2 = _bbox_to_list(box2)
523
+ if len(l1) < 4 or len(l2) < 4:
524
+ return 0.0
525
+ x1_min, y1_min, x1_max, y1_max = l1
526
+ x2_min, y2_min, x2_max, y2_max = l2
527
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
528
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
529
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
530
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
531
+ inter_x_min = max(x1_min, x2_min)
532
+ inter_y_min = max(y1_min, y2_min)
533
+ inter_x_max = min(x1_max, x2_max)
534
+ inter_y_max = min(y1_max, y2_max)
535
+ inter_w = max(0.0, inter_x_max - inter_x_min)
536
+ inter_h = max(0.0, inter_y_max - inter_y_min)
537
+ inter_area = inter_w * inter_h
538
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
539
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
540
+ union_area = area1 + area2 - inter_area
541
+ return (inter_area / union_area) if union_area > 0 else 0.0
542
+
543
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
544
+ if raw_id is None or bbox is None:
545
+ return raw_id
546
+ now = time.time()
547
+ if raw_id in self._track_aliases:
548
+ canonical_id = self._track_aliases[raw_id]
549
+ track_info = self._canonical_tracks.get(canonical_id)
550
+ if track_info is not None:
551
+ track_info["last_bbox"] = bbox
552
+ track_info["last_update"] = now
553
+ track_info["raw_ids"].add(raw_id)
554
+ return canonical_id
555
+ for canonical_id, info in self._canonical_tracks.items():
556
+ if now - info["last_update"] > self._track_merge_time_window:
557
+ continue
558
+ iou = self._compute_iou(bbox, info["last_bbox"])
559
+ if iou >= self._track_merge_iou_threshold:
560
+ self._track_aliases[raw_id] = canonical_id
561
+ info["last_bbox"] = bbox
562
+ info["last_update"] = now
563
+ info["raw_ids"].add(raw_id)
564
+ return canonical_id
565
+ canonical_id = raw_id
566
+ self._track_aliases[raw_id] = canonical_id
567
+ self._canonical_tracks[canonical_id] = {
568
+ "last_bbox": bbox,
569
+ "last_update": now,
570
+ "raw_ids": {raw_id}
571
+ }
572
+ return canonical_id
573
+
574
+ def _format_timestamp(self, timestamp: float) -> str:
575
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
576
+
577
+ def _get_tracking_start_time(self) -> str:
578
+ if self._tracking_start_time is None:
579
+ return "N/A"
580
+ return self._format_timestamp(self._tracking_start_time)
581
+
582
+ def _set_tracking_start_time(self) -> None:
583
+ self._tracking_start_time = time.time()