matrice 1.0.99140__py3-none-any.whl → 1.0.99142__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,3 @@
1
- """
2
- Shelf Inventory Management Use Case for Post-Processing
3
-
4
- This module provides shelf inventory monitoring functionality with stock detection,
5
- zone analysis, and alert generation.
6
- """
7
-
8
1
  from typing import Any, Dict, List, Optional
9
2
  from dataclasses import asdict, dataclass, field
10
3
  import time
@@ -25,30 +18,22 @@ from ..utils import (
25
18
  )
26
19
  from ..core.config import BaseConfig, AlertConfig, ZoneConfig
27
20
 
28
-
29
21
  @dataclass
30
22
  class ShelfInventoryConfig(BaseConfig):
31
- """Configuration for shelf inventory management use case."""
32
- # Smoothing configuration
23
+ """Configuration for shelf inventory detection use case."""
33
24
  enable_smoothing: bool = True
34
25
  smoothing_algorithm: str = "observability"
35
26
  smoothing_window_size: int = 20
36
27
  smoothing_cooldown_frames: int = 5
37
28
  smoothing_confidence_range_factor: float = 0.5
38
-
39
- # Confidence thresholds
40
- confidence_threshold: float = 0.3
41
-
29
+ confidence_threshold: float = 0.6
42
30
  usecase_categories: List[str] = field(
43
31
  default_factory=lambda: ['Empty-Space', 'Reduced']
44
32
  )
45
-
46
33
  target_categories: List[str] = field(
47
34
  default_factory=lambda: ['Empty-Space', 'Reduced']
48
35
  )
49
-
50
36
  alert_config: Optional[AlertConfig] = None
51
-
52
37
  index_to_category: Optional[Dict[int, str]] = field(
53
38
  default_factory=lambda: {
54
39
  0: "Empty-Space",
@@ -56,108 +41,18 @@ class ShelfInventoryConfig(BaseConfig):
56
41
  }
57
42
  )
58
43
 
59
-
60
44
  class ShelfInventoryUseCase(BaseProcessor):
61
- def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
62
- """Get detailed information about track IDs (per frame)."""
63
- frame_track_ids = set()
64
- for det in detections:
65
- tid = det.get('track_id')
66
- if tid is not None:
67
- frame_track_ids.add(tid)
68
- total_track_ids = set()
69
- for s in getattr(self, '_per_category_total_track_ids', {}).values():
70
- total_track_ids.update(s)
71
- return {
72
- "total_count": len(total_track_ids),
73
- "current_frame_count": len(frame_track_ids),
74
- "total_unique_track_ids": len(total_track_ids),
75
- "current_frame_track_ids": list(frame_track_ids),
76
- "last_update_time": time.time(),
77
- "total_frames_processed": getattr(self, '_total_frame_counter', 0)
78
- }
79
-
80
- def _update_tracking_state(self, detections: list):
81
- """Track unique categories track_ids per category for total count after tracking."""
82
- if not hasattr(self, "_per_category_total_track_ids"):
83
- self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
84
- self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
85
-
86
- for det in detections:
87
- cat = det.get("category")
88
- raw_track_id = det.get("track_id")
89
- if cat not in self.target_categories or raw_track_id is None:
90
- continue
91
- bbox = det.get("bounding_box", det.get("bbox"))
92
- canonical_id = self._merge_or_register_track(raw_track_id, bbox)
93
- det["track_id"] = canonical_id
94
- self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
95
- self._current_frame_track_ids[cat].add(canonical_id)
96
-
97
- def get_total_counts(self):
98
- """Return total unique track_id count for each category."""
99
- return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
100
-
101
- def _format_timestamp_for_video(self, timestamp: float) -> str:
102
- """Format timestamp for video chunks (HH:MM:SS.ms format)."""
103
- hours = int(timestamp // 3600)
104
- minutes = int((timestamp % 3600) // 60)
105
- seconds = timestamp % 60
106
- return f"{hours:02d}:{minutes:02d}:{seconds:06.2f}"
107
-
108
- def _format_timestamp_for_stream(self, timestamp: float) -> str:
109
- """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
110
- dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
111
- return dt.strftime('%Y:%m:%d %H:%M:%S')
112
-
113
- def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
114
- """Get formatted current timestamp based on stream type."""
115
- if not stream_info:
116
- return "00:00:00.00"
117
- is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
118
- if stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
119
- stream_time_str = stream_info.get("video_timestamp", "")
120
- return stream_time_str[:8]
121
- else:
122
- stream_time_str = stream_info.get("stream_time", "")
123
- if stream_time_str:
124
- try:
125
- timestamp_str = stream_time_str.replace(" UTC", "")
126
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
127
- timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
128
- return self._format_timestamp_for_stream(timestamp)
129
- except:
130
- return self._format_timestamp_for_stream(time.time())
131
- else:
132
- return self._format_timestamp_for_stream(time.time())
133
-
134
- def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]]) -> str:
135
- """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
136
- if not stream_info:
137
- return "00:00:00"
138
- is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
139
- if is_video_chunk or stream_info.get("input_settings", {}).get("stream_type", "video_file") == "video_file":
140
- return "00:00:00"
141
- else:
142
- if self._tracking_start_time is None:
143
- stream_time_str = stream_info.get("stream_time", "")
144
- if stream_time_str:
145
- try:
146
- timestamp_str = stream_time_str.replace(" UTC", "")
147
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
148
- self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
149
- except:
150
- self._tracking_start_time = time.time()
151
- else:
152
- self._tracking_start_time = time.time()
153
- dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
154
- dt = dt.replace(minute=0, second=0, microsecond=0)
155
- return dt.strftime('%Y:%m:%d %H:%M:%S')
45
+ CATEGORY_DISPLAY = {
46
+ "Empty-Space": "Empty Space",
47
+ "Reduced": "Reduced Stock"
48
+ }
156
49
 
157
50
  def __init__(self):
158
51
  super().__init__("shelf_inventory")
159
52
  self.category = "retail"
160
- self.target_categories = ["Empty-Space", "Reduced"]
53
+ self.CASE_TYPE: Optional[str] = 'shelf_inventory'
54
+ self.CASE_VERSION: Optional[str] = '1.0'
55
+ self.target_categories = ['Empty-Space', 'Reduced']
161
56
  self.smoothing_tracker = None
162
57
  self.tracker = None
163
58
  self._total_frame_counter = 0
@@ -167,16 +62,17 @@ class ShelfInventoryUseCase(BaseProcessor):
167
62
  self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
168
63
  self._track_merge_iou_threshold: float = 0.05
169
64
  self._track_merge_time_window: float = 7.0
65
+ self._ascending_alert_list: List[int] = []
66
+ self.current_incident_end_timestamp: str = "N/A"
170
67
 
171
68
  def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
172
69
  stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
173
- """Main entry point for shelf inventory post-processing."""
174
70
  start_time = time.time()
175
71
  if not isinstance(config, ShelfInventoryConfig):
176
- return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
177
- context=context)
72
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category, context=context)
178
73
  if context is None:
179
74
  context = ProcessingContext()
75
+
180
76
  input_format = match_results_structure(data)
181
77
  context.input_format = input_format
182
78
  context.confidence_threshold = config.confidence_threshold
@@ -215,7 +111,7 @@ class ShelfInventoryUseCase(BaseProcessor):
215
111
  if self.tracker is None:
216
112
  tracker_config = TrackerConfig()
217
113
  self.tracker = AdvancedTracker(tracker_config)
218
- self.logger.info("Initialized AdvancedTracker for Shelf Inventory Monitoring")
114
+ self.logger.info("Initialized AdvancedTracker for Shelf Inventory")
219
115
  processed_data = self.tracker.update(processed_data)
220
116
  except Exception as e:
221
117
  self.logger.warning(f"AdvancedTracker failed: {e}")
@@ -235,191 +131,349 @@ class ShelfInventoryUseCase(BaseProcessor):
235
131
  counting_summary = self._count_categories(processed_data, config)
236
132
  total_counts = self.get_total_counts()
237
133
  counting_summary['total_counts'] = total_counts
238
- insights = self._generate_insights(counting_summary, config)
239
- alerts = self._check_alerts(counting_summary, config)
134
+ alerts = self._check_alerts(counting_summary, frame_number, config)
240
135
  predictions = self._extract_predictions(processed_data)
241
- summary = self._generate_summary(counting_summary, alerts)
242
136
 
243
- events_list = self._generate_events(counting_summary, alerts, config, frame_number, stream_info)
244
- tracking_stats_list = self._generate_tracking_stats(counting_summary, insights, summary, config, frame_number,
245
- stream_info)
137
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
138
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
139
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
140
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
246
141
 
247
- events = events_list[0] if events_list else {}
142
+ incidents = incidents_list[0] if incidents_list else {}
248
143
  tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
144
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
145
+ summary = summary_list[0] if summary_list else {}
146
+ agg_summary = {str(frame_number): {
147
+ "incidents": incidents,
148
+ "tracking_stats": tracking_stats,
149
+ "business_analytics": business_analytics,
150
+ "alerts": alerts,
151
+ "human_text": summary}
152
+ }
249
153
 
250
154
  context.mark_completed()
251
155
  result = self.create_result(
252
- data={
253
- "counting_summary": counting_summary,
254
- "general_counting_summary": general_counting_summary,
255
- "alerts": alerts,
256
- "total_detections": counting_summary.get("total_count", 0),
257
- "events": events,
258
- "tracking_stats": tracking_stats,
259
- },
156
+ data={"agg_summary": agg_summary},
260
157
  usecase=self.name,
261
158
  category=self.category,
262
159
  context=context
263
160
  )
264
- result.summary = summary
265
- result.insights = insights
266
- result.predictions = predictions
267
161
  return result
268
162
 
269
- def _generate_events(self, counting_summary: Dict, alerts: List, config: ShelfInventoryConfig,
270
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
271
- """Generate structured events for the output format with frame-based keys."""
163
+ def _check_alerts(self, summary: dict, frame_number: Any, config: ShelfInventoryConfig) -> List[Dict]:
164
+ def get_trend(data, lookback=900, threshold=0.6):
165
+ window = data[-lookback:] if len(data) >= lookback else data
166
+ if len(window) < 2:
167
+ return True
168
+ increasing = 0
169
+ total = 0
170
+ for i in range(1, len(window)):
171
+ if window[i] >= window[i - 1]:
172
+ increasing += 1
173
+ total += 1
174
+ ratio = increasing / total
175
+ return ratio >= threshold
176
+
272
177
  frame_key = str(frame_number) if frame_number is not None else "current_frame"
273
- events = [{frame_key: []}]
274
- frame_events = events[0][frame_key]
178
+ alerts = []
179
+ total_detections = summary.get("total_count", 0)
180
+ total_counts_dict = summary.get("total_counts", {})
181
+ per_category_count = summary.get("per_category_count", {})
182
+
183
+ if not config.alert_config:
184
+ return alerts
185
+
186
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
187
+ for category, threshold in config.alert_config.count_thresholds.items():
188
+ if category == "all" and total_detections > threshold:
189
+ alerts.append({
190
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
191
+ "alert_id": f"alert_{category}_{frame_key}",
192
+ "incident_category": self.CASE_TYPE,
193
+ "threshold_level": threshold,
194
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
195
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
196
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
197
+ })
198
+ elif category in per_category_count and per_category_count[category] > threshold:
199
+ alerts.append({
200
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
201
+ "alert_id": f"alert_{category}_{frame_key}",
202
+ "incident_category": self.CASE_TYPE,
203
+ "threshold_level": threshold,
204
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
205
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
206
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
207
+ })
208
+ return alerts
209
+
210
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ShelfInventoryConfig,
211
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
212
+ incidents = []
275
213
  total_detections = counting_summary.get("total_count", 0)
214
+ current_timestamp = self._get_current_timestamp_str(stream_info)
215
+ camera_info = self.get_camera_info_from_stream(stream_info)
216
+
217
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
276
218
 
277
219
  if total_detections > 0:
278
- level = "info"
220
+ level = "low"
279
221
  intensity = 5.0
222
+ start_timestamp = self._get_start_timestamp_str(stream_info)
223
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
224
+ self.current_incident_end_timestamp = 'Incident still active'
225
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
226
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
227
+ self.current_incident_end_timestamp = current_timestamp
228
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
229
+ self.current_incident_end_timestamp = 'N/A'
230
+
280
231
  if config.alert_config and config.alert_config.count_thresholds:
281
232
  threshold = config.alert_config.count_thresholds.get("all", 15)
282
233
  intensity = min(10.0, (total_detections / threshold) * 10)
283
- if intensity >= 7:
234
+ if intensity >= 9:
284
235
  level = "critical"
236
+ self._ascending_alert_list.append(3)
237
+ elif intensity >= 7:
238
+ level = "significant"
239
+ self._ascending_alert_list.append(2)
285
240
  elif intensity >= 5:
286
- level = "warning"
241
+ level = "medium"
242
+ self._ascending_alert_list.append(1)
287
243
  else:
288
- level = "info"
244
+ level = "low"
245
+ self._ascending_alert_list.append(0)
289
246
  else:
290
- if total_detections > 25:
247
+ if total_detections > 30:
291
248
  level = "critical"
249
+ intensity = 10.0
250
+ self._ascending_alert_list.append(3)
251
+ elif total_detections > 25:
252
+ level = "significant"
292
253
  intensity = 9.0
254
+ self._ascending_alert_list.append(2)
293
255
  elif total_detections > 15:
294
- level = "warning"
256
+ level = "medium"
295
257
  intensity = 7.0
258
+ self._ascending_alert_list.append(1)
296
259
  else:
297
- level = "info"
260
+ level = "low"
298
261
  intensity = min(10.0, total_detections / 3.0)
262
+ self._ascending_alert_list.append(0)
299
263
 
300
- human_text_lines = ["EVENTS DETECTED:"]
301
- human_text_lines.append(f" - {total_detections} shelf issues detected [INFO]")
264
+ human_text_lines = [f"SHELF INVENTORY INCIDENTS DETECTED @ {current_timestamp}:"]
265
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
302
266
  human_text = "\n".join(human_text_lines)
303
267
 
304
- event = {
305
- "type": "shelf_inventory",
306
- "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
307
- "level": level,
308
- "intensity": round(intensity, 1),
309
- "config": {
310
- "min_value": 0,
311
- "max_value": 10,
312
- "level_settings": {"info": 2, "warning": 5, "critical": 7}
313
- },
314
- "application_name": "Shelf Inventory System",
315
- "application_version": "1.0",
316
- "location_info": None,
317
- "human_text": human_text
318
- }
319
- frame_events.append(event)
268
+ alert_settings = []
269
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
270
+ alert_settings.append({
271
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
272
+ "incident_category": self.CASE_TYPE,
273
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
274
+ "ascending": True,
275
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
276
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
277
+ })
278
+
279
+ event = self.create_incident(
280
+ incident_id=f"{self.CASE_TYPE}_{frame_number}",
281
+ incident_type=self.CASE_TYPE,
282
+ severity_level=level,
283
+ human_text=human_text,
284
+ camera_info=camera_info,
285
+ alerts=alerts,
286
+ alert_settings=alert_settings,
287
+ start_time=start_timestamp,
288
+ end_time=self.current_incident_end_timestamp,
289
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
290
+ )
291
+ incidents.append(event)
292
+ else:
293
+ self._ascending_alert_list.append(0)
294
+ incidents.append({})
320
295
 
321
- for alert in alerts:
322
- total_detections = counting_summary.get("total_count", 0)
323
- intensity_message = "ALERT: Low shelf issues in the scene"
324
- if config.alert_config and config.alert_config.count_thresholds:
325
- threshold = config.alert_config.count_thresholds.get("all", 15)
326
- percentage = (total_detections / threshold) * 100 if threshold > 0 else 0
327
- if percentage < 20:
328
- intensity_message = "ALERT: Low shelf issues in the scene"
329
- elif percentage <= 50:
330
- intensity_message = "ALERT: Moderate shelf issues in the scene"
331
- elif percentage <= 70:
332
- intensity_message = "ALERT: Heavy shelf issues in the scene"
333
- else:
334
- intensity_message = "ALERT: Severe shelf issues in the scene"
296
+ return incidents
297
+
298
+ def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: ShelfInventoryConfig,
299
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
300
+ camera_info = self.get_camera_info_from_stream(stream_info)
301
+ tracking_stats = []
302
+ total_detections = counting_summary.get("total_count", 0)
303
+ total_counts_dict = counting_summary.get("total_counts", {})
304
+ per_category_count = counting_summary.get("per_category_count", {})
305
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
306
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
307
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
308
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
309
+
310
+ total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items() if count > 0]
311
+ current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items() if count > 0 or total_detections > 0]
312
+
313
+ detections = []
314
+ for detection in counting_summary.get("detections", []):
315
+ bbox = detection.get("bounding_box", {})
316
+ category = detection.get("category", "inventory")
317
+ if detection.get("masks"):
318
+ segmentation = detection.get("masks", [])
319
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
320
+ elif detection.get("segmentation"):
321
+ segmentation = detection.get("segmentation")
322
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
323
+ elif detection.get("mask"):
324
+ segmentation = detection.get("mask")
325
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
335
326
  else:
336
- if total_detections > 15:
337
- intensity_message = "ALERT: Heavy shelf issues in the scene"
338
- elif total_detections == 1:
339
- intensity_message = "ALERT: Low shelf issues in the scene"
340
- else:
341
- intensity_message = "ALERT: Moderate shelf issues in the scene"
342
-
343
- alert_event = {
344
- "type": alert.get("type", "shelf_alert"),
345
- "stream_time": datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S UTC"),
346
- "level": alert.get("severity", "warning"),
347
- "intensity": 8.0,
348
- "config": {
349
- "min_value": 0,
350
- "max_value": 10,
351
- "level_settings": {"info": 2, "warning": 5, "critical": 7}
352
- },
353
- "application_name": "Shelf Inventory Alert System",
354
- "application_version": "1.0",
355
- "location_info": alert.get("zone"),
356
- "human_text": f"{datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC')} : {intensity_message}"
357
- }
358
- frame_events.append(alert_event)
327
+ detection_obj = self.create_detection_object(category, bbox)
328
+ detections.append(detection_obj)
329
+
330
+ alert_settings = []
331
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
332
+ alert_settings.append({
333
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
334
+ "incident_category": self.CASE_TYPE,
335
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
336
+ "ascending": True,
337
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
338
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
339
+ })
340
+
341
+ human_text_lines = [f"Tracking Statistics:"]
342
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
343
+ for cat, count in per_category_count.items():
344
+ human_text_lines.append(f"\t{cat}: {count}")
345
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
346
+ for cat, count in total_counts_dict.items():
347
+ if count > 0:
348
+ human_text_lines.append(f"\t{cat}: {count}")
349
+ human_text_lines.append(f"Alerts: {alerts[0].get('settings', {})} sent @ {current_timestamp}" if alerts else "Alerts: None")
350
+ human_text = "\n".join(human_text_lines)
359
351
 
360
- return events
352
+ reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
353
+ tracking_stat = self.create_tracking_stats(
354
+ total_counts=total_counts,
355
+ current_counts=current_counts,
356
+ detections=detections,
357
+ human_text=human_text,
358
+ camera_info=camera_info,
359
+ alerts=alerts,
360
+ alert_settings=alert_settings,
361
+ reset_settings=reset_settings,
362
+ start_time=high_precision_start_timestamp,
363
+ reset_time=high_precision_reset_timestamp
364
+ )
365
+ tracking_stats.append(tracking_stat)
366
+ return tracking_stats
361
367
 
362
- def _generate_tracking_stats(self, counting_summary: Dict, insights: List[str], summary: str,
363
- config: ShelfInventoryConfig, frame_number: Optional[int] = None,
364
- stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
365
- """Generate structured tracking stats for the output format with frame-based keys."""
366
- frame_key = str(frame_number) if frame_number is not None else "current_frame"
367
- tracking_stats = [{frame_key: []}]
368
- frame_tracking_stats = tracking_stats[0][frame_key]
368
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: ShelfInventoryConfig,
369
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
370
+ if is_empty:
371
+ return []
369
372
 
370
- total_detections = counting_summary.get("total_count", 0)
371
- total_counts = counting_summary.get("total_counts", {})
372
- cumulative_total = sum(total_counts.values()) if total_counts else 0
373
- per_category_count = counting_summary.get("per_category_count", {})
373
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
374
+ lines = {}
375
+ lines["Application Name"] = self.CASE_TYPE
376
+ lines["Application Version"] = self.CASE_VERSION
377
+ if incidents:
378
+ lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
379
+ if tracking_stats:
380
+ lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
381
+ if business_analytics:
382
+ lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
383
+ if not incidents and not tracking_stats and not business_analytics:
384
+ lines["Summary"] = "No Summary Data"
385
+ return [lines]
374
386
 
375
- track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
387
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
388
+ frame_track_ids = {det.get('track_id') for det in detections if det.get('track_id') is not None}
389
+ total_track_ids = set()
390
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
391
+ total_track_ids.update(s)
392
+ return {
393
+ "total_count": len(total_track_ids),
394
+ "current_frame_count": len(frame_track_ids),
395
+ "total_unique_track_ids": len(total_track_ids),
396
+ "current_frame_track_ids": list(frame_track_ids),
397
+ "last_update_time": time.time(),
398
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
399
+ }
376
400
 
377
- current_timestamp = self._get_current_timestamp_str(stream_info)
378
- start_timestamp = self._get_start_timestamp_str(stream_info)
401
+ def _update_tracking_state(self, detections: list):
402
+ if not hasattr(self, "_per_category_total_track_ids"):
403
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
404
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
379
405
 
380
- human_text_lines = []
381
- human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
382
- if total_detections > 0:
383
- category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
384
- if len(category_counts) == 1:
385
- detection_text = category_counts[0] + " detected"
386
- elif len(category_counts) == 2:
387
- detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
388
- else:
389
- detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
390
- human_text_lines.append(f"\t- {detection_text}")
391
- else:
392
- human_text_lines.append(f"\t- No detections")
406
+ for det in detections:
407
+ cat = det.get("category")
408
+ raw_track_id = det.get("track_id")
409
+ if cat not in self.target_categories or raw_track_id is None:
410
+ continue
411
+ bbox = det.get("bounding_box", det.get("bbox"))
412
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
413
+ det["track_id"] = canonical_id
414
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
415
+ self._current_frame_track_ids[cat].add(canonical_id)
393
416
 
394
- human_text_lines.append("")
395
- human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
396
- human_text_lines.append(f"\t- Total Shelf Issues Detected: {cumulative_total}")
397
- if total_counts:
398
- for cat, count in total_counts.items():
399
- if count > 0:
400
- human_text_lines.append(f"\t- {cat}: {count}")
417
+ def get_total_counts(self):
418
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
401
419
 
402
- human_text = "\n".join(human_text_lines)
420
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
421
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
422
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
403
423
 
404
- tracking_stat = {
405
- "type": "shelf_inventory",
406
- "category": "retail",
407
- "count": total_detections,
408
- "insights": insights,
409
- "summary": summary,
410
- "timestamp": datetime.now(timezone.utc).strftime('%Y-%m-%d-%H:%M:%S UTC'),
411
- "human_text": human_text,
412
- "track_ids_info": track_ids_info,
413
- "global_frame_offset": getattr(self, '_global_frame_offset', 0),
414
- "local_frame_id": frame_key,
415
- "detections": counting_summary.get("detections", [])
416
- }
424
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
425
+ hours = int(timestamp // 3600)
426
+ minutes = int((timestamp % 3600) // 60)
427
+ seconds = round(float(timestamp % 60), 2)
428
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
417
429
 
418
- frame_tracking_stats.append(tracking_stat)
419
- return tracking_stats
430
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str] = None) -> str:
431
+ if not stream_info:
432
+ return "00:00:00.00" if precision else "00:00:00"
433
+ if precision:
434
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
435
+ start_time = (int(frame_id) if frame_id else stream_info.get("input_settings", {}).get("start_frame", 30)) / stream_info.get("input_settings", {}).get("original_fps", 30)
436
+ return self._format_timestamp_for_video(start_time)
437
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
438
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
439
+ start_time = (int(frame_id) if frame_id else stream_info.get("input_settings", {}).get("start_frame", 30)) / stream_info.get("input_settings", {}).get("original_fps", 30)
440
+ return self._format_timestamp_for_video(start_time)
441
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
442
+ if stream_time_str:
443
+ try:
444
+ timestamp_str = stream_time_str.replace(" UTC", "")
445
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
446
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
447
+ return self._format_timestamp_for_stream(timestamp)
448
+ except:
449
+ return self._format_timestamp_for_stream(time.time())
450
+ return self._format_timestamp_for_stream(time.time())
451
+
452
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
453
+ if not stream_info:
454
+ return "00:00:00" if not precision else "00:00:00.00"
455
+ if precision:
456
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
457
+ return "00:00:00.00"
458
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
459
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
460
+ return "00:00:00"
461
+ if self._tracking_start_time is None:
462
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
463
+ if stream_time_str:
464
+ try:
465
+ timestamp_str = stream_time_str.replace(" UTC", "")
466
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
467
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
468
+ except:
469
+ self._tracking_start_time = time.time()
470
+ else:
471
+ self._tracking_start_time = time.time()
472
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
473
+ dt = dt.replace(minute=0, second=0, microsecond=0)
474
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
420
475
 
421
476
  def _count_categories(self, detections: list, config: ShelfInventoryConfig) -> dict:
422
- """Count the number of detections per category and return a summary dict."""
423
477
  counts = {}
424
478
  for det in detections:
425
479
  cat = det.get('category', 'unknown')
@@ -439,71 +493,7 @@ class ShelfInventoryUseCase(BaseProcessor):
439
493
  ]
440
494
  }
441
495
 
442
- CATEGORY_DISPLAY = {
443
- "Empty-Space": "empty-space",
444
- "Reduced": "reduced"
445
- }
446
-
447
- def _generate_insights(self, summary: dict, config: ShelfInventoryConfig) -> List[str]:
448
- """Generate human-readable insights for each category."""
449
- insights = []
450
- per_cat = summary.get("per_category_count", {})
451
- total_detections = summary.get("total_count", 0)
452
-
453
- if total_detections == 0:
454
- insights.append("No shelf issues detected in the scene")
455
- return insights
456
- insights.append(f"EVENT: Detected {total_detections} shelf issues in the scene")
457
- intensity_threshold = None
458
- if config.alert_config and config.alert_config.count_thresholds and "all" in config.alert_config.count_thresholds:
459
- intensity_threshold = config.alert_config.count_thresholds["all"]
460
- if intensity_threshold is not None:
461
- percentage = (total_detections / intensity_threshold) * 100
462
- if percentage < 20:
463
- insights.append(f"INTENSITY: Low shelf issues in the scene ({percentage:.1f}% of capacity)")
464
- elif percentage <= 50:
465
- insights.append(f"INTENSITY: Moderate shelf issues in the scene ({percentage:.1f}% of capacity)")
466
- elif percentage <= 70:
467
- insights.append(f"INTENSITY: Heavy shelf issues in the scene ({percentage:.1f}% of capacity)")
468
- else:
469
- insights.append(f"INTENSITY: Severe shelf issues in the scene ({percentage:.1f}% of capacity)")
470
- for cat, count in per_cat.items():
471
- display = self.CATEGORY_DISPLAY.get(cat, cat)
472
- insights.append(f"{display}: {count}")
473
- return insights
474
-
475
- def _check_alerts(self, summary: dict, config: ShelfInventoryConfig) -> List[Dict]:
476
- """Check if any alert thresholds are exceeded and return alert dicts."""
477
- alerts = []
478
- if not config.alert_config:
479
- return alerts
480
- total = summary.get("total_count", 0)
481
- if config.alert_config.count_thresholds:
482
- for category, threshold in config.alert_config.count_thresholds.items():
483
- if category == "all" and total >= threshold:
484
- alerts.append({
485
- "type": "count_threshold",
486
- "severity": "warning",
487
- "message": f"Total shelf issues ({total}) exceeds threshold ({threshold})",
488
- "category": category,
489
- "current_count": total,
490
- "threshold": threshold
491
- })
492
- elif category in summary.get("per_category_count", {}):
493
- count = summary.get("per_category_count", {})[category]
494
- if count >= threshold:
495
- alerts.append({
496
- "type": "count_threshold",
497
- "severity": "warning",
498
- "message": f"{category} count ({count}) exceeds threshold ({threshold})",
499
- "category": category,
500
- "current_count": count,
501
- "threshold": threshold
502
- })
503
- return alerts
504
-
505
496
  def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
506
- """Extract prediction details for output (category, confidence, bounding box)."""
507
497
  return [
508
498
  {
509
499
  "category": det.get("category", "unknown"),
@@ -513,28 +503,7 @@ class ShelfInventoryUseCase(BaseProcessor):
513
503
  for det in detections
514
504
  ]
515
505
 
516
- def _generate_summary(self, summary: dict, alerts: List) -> str:
517
- """Generate a human_text string for the result, including per-category insights."""
518
- total = summary.get("total_count", 0)
519
- per_cat = summary.get("per_category_count", {})
520
- cumulative = summary.get("total_counts", {})
521
- cumulative_total = sum(cumulative.values()) if cumulative else 0
522
- lines = []
523
- if total > 0:
524
- lines.append(f"{total} shelf issues detected")
525
- if per_cat:
526
- lines.append("Shelf issues:")
527
- for cat, count in per_cat.items():
528
- lines.append(f"\t{cat}: {count}")
529
- else:
530
- lines.append("No shelf issues detected")
531
- lines.append(f"Total shelf issues: {cumulative_total}")
532
- if alerts:
533
- lines.append(f"{len(alerts)} alert(s)")
534
- return "\n".join(lines)
535
-
536
506
  def _compute_iou(self, box1: Any, box2: Any) -> float:
537
- """Compute IoU between two bounding boxes."""
538
507
  def _bbox_to_list(bbox):
539
508
  if bbox is None:
540
509
  return []
@@ -572,7 +541,6 @@ class ShelfInventoryUseCase(BaseProcessor):
572
541
  return (inter_area / union_area) if union_area > 0 else 0.0
573
542
 
574
543
  def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
575
- """Return a stable canonical ID for a raw tracker ID."""
576
544
  if raw_id is None or bbox is None:
577
545
  return raw_id
578
546
  now = time.time()
@@ -599,20 +567,17 @@ class ShelfInventoryUseCase(BaseProcessor):
599
567
  self._canonical_tracks[canonical_id] = {
600
568
  "last_bbox": bbox,
601
569
  "last_update": now,
602
- "raw_ids": {raw_id},
570
+ "raw_ids": {raw_id}
603
571
  }
604
572
  return canonical_id
605
573
 
606
574
  def _format_timestamp(self, timestamp: float) -> str:
607
- """Format a timestamp for human-readable output."""
608
575
  return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
609
576
 
610
577
  def _get_tracking_start_time(self) -> str:
611
- """Get the tracking start time, formatted as a string."""
612
578
  if self._tracking_start_time is None:
613
579
  return "N/A"
614
580
  return self._format_timestamp(self._tracking_start_time)
615
581
 
616
582
  def _set_tracking_start_time(self) -> None:
617
- """Set the tracking start time to the current time."""
618
583
  self._tracking_start_time = time.time()