matrice-analytics 0.1.70__py3-none-any.whl → 0.1.96__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. matrice_analytics/post_processing/__init__.py +8 -2
  2. matrice_analytics/post_processing/config.py +4 -2
  3. matrice_analytics/post_processing/core/base.py +1 -1
  4. matrice_analytics/post_processing/core/config.py +40 -3
  5. matrice_analytics/post_processing/face_reg/face_recognition.py +1014 -201
  6. matrice_analytics/post_processing/face_reg/face_recognition_client.py +171 -29
  7. matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
  8. matrice_analytics/post_processing/post_processor.py +4 -0
  9. matrice_analytics/post_processing/usecases/__init__.py +4 -1
  10. matrice_analytics/post_processing/usecases/advanced_customer_service.py +913 -500
  11. matrice_analytics/post_processing/usecases/color_detection.py +19 -18
  12. matrice_analytics/post_processing/usecases/customer_service.py +356 -9
  13. matrice_analytics/post_processing/usecases/fire_detection.py +241 -23
  14. matrice_analytics/post_processing/usecases/footfall.py +750 -0
  15. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +638 -40
  16. matrice_analytics/post_processing/usecases/people_counting.py +66 -33
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +35 -34
  18. matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
  19. matrice_analytics/post_processing/utils/alert_instance_utils.py +1018 -0
  20. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1338 -0
  21. matrice_analytics/post_processing/utils/incident_manager_utils.py +1754 -0
  22. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
  23. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +26 -22
  24. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
  25. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
  26. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
@@ -26,6 +26,11 @@ from ..utils import (
26
26
  BBoxSmoothingConfig,
27
27
  BBoxSmoothingTracker,
28
28
  )
29
+ # Import business metrics manager for publishing aggregated metrics every 5 minutes
30
+ from ..utils.business_metrics_manager_utils import (
31
+ BUSINESS_METRICS_MANAGER,
32
+ BusinessMetricsManagerFactory
33
+ )
29
34
 
30
35
  def assign_person_by_area(detections, customer_areas, staff_areas):
31
36
  """
@@ -65,94 +70,269 @@ def assign_person_by_area(detections, customer_areas, staff_areas):
65
70
  det['category'] = 'staff'
66
71
 
67
72
  class AdvancedCustomerServiceUseCase(BaseProcessor):
68
- def _format_timestamp_for_video(self, timestamp: float) -> str:
69
- """Format timestamp for video chunks (HH:MM:SS.ms format)."""
70
- hours = int(timestamp // 3600)
71
- minutes = int((timestamp % 3600) // 60)
72
- seconds = round(float(timestamp % 60), 2)
73
- return f"{hours:02d}:{minutes:02d}:{seconds:04.1f}"
73
+ def __init__(self):
74
+ """Initialize advanced customer service use case."""
75
+ super().__init__("advanced_customer_service")
76
+ self.category = "sales"
77
+ self.CASE_TYPE: Optional[str] = 'advanced_customer_service'
78
+ self.CASE_VERSION: Optional[str] = '1.3'
74
79
 
75
- def _format_timestamp_for_stream(self, timestamp: float) -> str:
76
- dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
77
- return dt.strftime('%Y:%m:%d %H:%M:%S')
80
+ # Advanced tracking structures
81
+ self.customer_occupancy = {}
82
+ self.staff_occupancy = {}
83
+ self.service_occupancy = {}
84
+ self.customer_queue_times = {}
85
+ self.customer_service_times = {}
86
+ self.customer_journey = {}
87
+ self.staff_availability = {}
88
+ self.staff_service_count = defaultdict(int)
89
+ self.staff_active_services = {}
78
90
 
79
- def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
80
- """Get formatted current timestamp based on stream type."""
81
- if not stream_info:
82
- return "00:00:00.00"
83
- input_settings = stream_info.get("input_settings", {})
84
- if precision:
85
- if input_settings.get("start_frame", "na") != "na":
86
- if frame_id is not None:
87
- start_time = int(frame_id)/input_settings.get("original_fps", 30)
88
- else:
89
- start_time = input_settings.get("start_frame", 30)/input_settings.get("original_fps", 30)
90
- return self._format_timestamp_for_video(start_time)
91
- else:
92
- return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
91
+ # Persistent unique staff tracking
92
+ self.global_staff_ids = set()
93
+ self.global_staff_ids_by_area = defaultdict(set)
93
94
 
94
- if input_settings.get("start_frame", "na") != "na":
95
- if frame_id is not None:
96
- start_time = int(frame_id)/input_settings.get("original_fps", 30)
97
- else:
98
- start_time = input_settings.get("start_frame", 30)/input_settings.get("original_fps", 30)
99
- return self._format_timestamp_for_video(start_time)
100
- else:
101
- # For streams, use stream_time from stream_info
102
- stream_time_str = input_settings.get("stream_info", {}).get("stream_time", "")
103
- if stream_time_str:
104
- try:
105
- timestamp_str = stream_time_str.replace(" UTC", "")
106
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
107
- timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
108
- return self._format_timestamp_for_stream(timestamp)
109
- except:
110
- return self._format_timestamp_for_stream(time.time())
95
+ # Persistent unique customer tracking
96
+ self.global_customer_ids = set()
97
+
98
+ # Persistent staff ID memory (for cross-frame staff identity)
99
+ self.persistent_staff_ids = set()
100
+
101
+ # Analytics
102
+ self.queue_wait_times = defaultdict(list)
103
+ self.service_times = defaultdict(list)
104
+ self.staff_efficiency = defaultdict(list)
105
+ self.peak_occupancy = defaultdict(int)
106
+
107
+ # Journey states
108
+ self.JOURNEY_STATES = {
109
+ 'ENTERING': 'entering',
110
+ 'QUEUING': 'queuing',
111
+ 'BEING_SERVED': 'being_served',
112
+ 'COMPLETED': 'completed',
113
+ 'LEFT': 'left'
114
+ }
115
+
116
+ # Tracker initialization (for YOLOv8 frame-wise predictions)
117
+ self.tracker = None
118
+ self.smoothing_tracker = None
119
+ self._total_frame_counter = 0
120
+ self._global_frame_offset = 0
121
+
122
+ # Track merging and aliasing (like people_counting)
123
+ self._track_aliases: Dict[Any, Any] = {}
124
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
125
+ self._track_merge_iou_threshold: float = 0.05
126
+ self._track_merge_time_window: float = 7.0
127
+
128
+ # Per-category track ID tracking
129
+ self._per_category_total_track_ids: Dict[str, set] = {}
130
+ self._current_frame_track_ids: Dict[str, set] = {}
131
+
132
+ # Alert tracking
133
+ self._ascending_alert_list: List[int] = []
134
+ self.current_incident_end_timestamp: str = "N/A"
135
+ self.start_timer = None
136
+
137
+ # Business metrics manager for publishing aggregated metrics every 5 minutes
138
+ self._business_metrics_manager_factory: Optional[BusinessMetricsManagerFactory] = None
139
+ self._business_metrics_manager: Optional[BUSINESS_METRICS_MANAGER] = None
140
+ self._business_metrics_manager_initialized: bool = False
141
+
142
+ def process(self, data: Any, config: ConfigProtocol,
143
+ context: Optional[ProcessingContext] = None, stream_info: Optional[dict] = None) -> ProcessingResult:
144
+ """
145
+ Process advanced customer service analytics.
146
+ """
147
+ start_time = time.time()
148
+ print("-------------------CUS-STREAM_INFO------------------------------")
149
+ self.logger.info("-------------------CUS-STREAM_INFO------------------------------")
150
+ self.logger.info(stream_info)
151
+ self.logger.info("-------------------CUS-STREAM_INFO------------------------------")
152
+ print("-------------------CUS-STREAM_INFO------------------------------")
153
+ print(stream_info)
154
+ print("-------------------CUS-STREAM_INFO------------------------------")
155
+
156
+ try:
157
+ if not isinstance(config, CustomerServiceConfig):
158
+ return self.create_error_result(
159
+ "Invalid configuration type for advanced customer service",
160
+ usecase=self.name,
161
+ category=self.category,
162
+ context=context
163
+ )
164
+
165
+ if stream_info is not None:
166
+ if context is None:
167
+ context = ProcessingContext()
168
+ context.stream_info = stream_info
169
+
170
+ if context is None:
171
+ context = ProcessingContext()
172
+
173
+ self._service_proximity_threshold = config.service_proximity_threshold
174
+
175
+ # Initialize business metrics manager once (for publishing aggregated metrics)
176
+ if not self._business_metrics_manager_initialized:
177
+ self._initialize_business_metrics_manager_once(config)
178
+
179
+ input_format = match_results_structure(data)
180
+ context.input_format = input_format
181
+ context.confidence_threshold = config.confidence_threshold
182
+ context.enable_tracking = config.enable_tracking
183
+
184
+ self.logger.info(f"Processing advanced customer service with format: {input_format.value}")
185
+
186
+ self._initialize_areas(config.customer_areas, config.staff_areas, config.service_areas)
187
+
188
+ processed_data = data
189
+ if config.confidence_threshold is not None:
190
+ processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
191
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
192
+
193
+ if hasattr(config, 'index_to_category') and config.index_to_category:
194
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
195
+ self.logger.debug("Applied category mapping")
196
+
197
+ # --- Smoothing logic ---
198
+ if getattr(config, "enable_smoothing", False):
199
+ if not hasattr(self, "smoothing_tracker") or self.smoothing_tracker is None:
200
+ smoothing_config = BBoxSmoothingConfig(
201
+ smoothing_algorithm=getattr(config, "smoothing_algorithm", "observability"),
202
+ window_size=getattr(config, "smoothing_window_size", 20),
203
+ cooldown_frames=getattr(config, "smoothing_cooldown_frames", 5),
204
+ confidence_threshold=getattr(config, "confidence_threshold", 0.5),
205
+ confidence_range_factor=getattr(config, "smoothing_confidence_range_factor", 0.5),
206
+ enable_smoothing=True
207
+ )
208
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
209
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
210
+
211
+ # Extract detections from processed data
212
+ detections = self._extract_detections(processed_data)
213
+
214
+ # --- Apply AdvancedTracker for YOLOv8 frame-wise predictions (like people_counting) ---
215
+ try:
216
+ from ..advanced_tracker import AdvancedTracker
217
+ from ..advanced_tracker.config import TrackerConfig
218
+ if self.tracker is None:
219
+ tracker_config = TrackerConfig(
220
+ track_high_thresh=float(config.confidence_threshold) if config.confidence_threshold else 0.4,
221
+ track_low_thresh=max(0.05, (float(config.confidence_threshold) / 2) if config.confidence_threshold else 0.05),
222
+ new_track_thresh=float(config.confidence_threshold) if config.confidence_threshold else 0.3,
223
+ match_thresh=0.8
224
+ )
225
+ self.tracker = AdvancedTracker(tracker_config)
226
+ self.logger.info(f"Initialized AdvancedTracker with thresholds: high={tracker_config.track_high_thresh}, "
227
+ f"low={tracker_config.track_low_thresh}, new={tracker_config.new_track_thresh}")
228
+ # Apply tracker to get track_ids
229
+ detections = self.tracker.update(detections)
230
+ self.logger.debug(f"Applied AdvancedTracker, {len(detections)} detections with track_ids")
231
+ except Exception as e:
232
+ self.logger.warning(f"AdvancedTracker failed: {e}, continuing without tracking")
233
+
234
+ # Update tracking state (track merging, canonical IDs)
235
+ self._update_tracking_state(detections)
236
+ self._total_frame_counter += 1
237
+
238
+ # Assign person detections to staff/customer based on area polygons
239
+ assign_person_by_area(
240
+ detections,
241
+ getattr(config, 'customer_areas', {}),
242
+ getattr(config, 'staff_areas', {})
243
+ )
244
+
245
+ # Categorize detections into staff and customers
246
+ staff_detections, customer_detections = self._categorize_detections(
247
+ detections, config.staff_categories, config.customer_categories
248
+ )
249
+ self.logger.debug(f"Extracted {len(staff_detections)} staff and {len(customer_detections)} customer detections")
250
+
251
+ self._maybe_reset_chunk()
252
+ self._update_chunk_tracking(customer_detections)
253
+
254
+ # Extract frame number from stream_info (like people_counting)
255
+ frame_number = None
256
+ if stream_info:
257
+ input_settings = stream_info.get("input_settings", {})
258
+ start_frame = input_settings.get("start_frame")
259
+ end_frame = input_settings.get("end_frame")
260
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
261
+ frame_number = start_frame
262
+
263
+ current_time = time.time()
264
+ analytics_results = self._process_comprehensive_analytics(
265
+ staff_detections, customer_detections, config, current_time
266
+ )
267
+
268
+ # Send business metrics to manager for aggregation and publishing
269
+ # The manager aggregates for 5 minutes and publishes mean/min/max/sum
270
+ business_metrics = analytics_results.get("business_metrics", {})
271
+ if business_metrics:
272
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Sending metrics: {list(business_metrics.keys())}")
273
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] stream_info available: {stream_info is not None}")
274
+ if stream_info:
275
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] stream_info.topic: {stream_info.get('topic', 'N/A')}")
276
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] stream_info.camera_info: {stream_info.get('camera_info', {})}")
277
+ self._send_metrics_to_manager(business_metrics, stream_info)
111
278
  else:
112
- return self._format_timestamp_for_stream(time.time())
279
+ self.logger.debug("[BUSINESS_METRICS_MANAGER] No business_metrics in analytics_results")
113
280
 
114
- def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
115
- """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
116
- if not stream_info:
117
- return "00:00:00"
118
- input_settings = stream_info.get("input_settings", {})
119
- if precision:
120
- if input_settings.get("start_frame", "na") != "na":
121
- return "00:00:00"
281
+ # --- FIX: Ensure agg_summary is top-level and events/tracking_stats are dicts ---
282
+ # Reconstruct processed_data dict with frame_number as key for per-frame analytics
283
+ if frame_number is not None:
284
+ processed_data_for_summary = {str(frame_number): detections}
285
+ elif isinstance(processed_data, dict):
286
+ processed_data_for_summary = processed_data
122
287
  else:
123
- return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
288
+ processed_data_for_summary = {"0": detections}
289
+
290
+ agg_summary = self._generate_per_frame_agg_summary(processed_data_for_summary, analytics_results, config, context, stream_info)
124
291
 
125
- if input_settings.get("start_frame", "na") != "na":
126
- return "00:00:00"
127
- else:
128
- if not hasattr(self, '_tracking_start_time') or self._tracking_start_time is None:
129
- stream_time_str = input_settings.get("stream_info", {}).get("stream_time", "")
130
- if stream_time_str:
131
- try:
132
- timestamp_str = stream_time_str.replace(" UTC", "")
133
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
134
- self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
135
- except:
136
- self._tracking_start_time = time.time()
137
- else:
138
- self._tracking_start_time = time.time()
139
- dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
140
- dt = dt.replace(minute=0, second=0, microsecond=0)
141
- return dt.strftime('%Y:%m:%d %H:%M:%S')
142
- def get_camera_info_from_stream(self, stream_info):
143
- """Extract camera_info from stream_info, matching people_counting pattern."""
144
- if not stream_info:
145
- return {}
146
- # Try to get camera_info directly
147
- camera_info = stream_info.get("camera_info")
148
- if camera_info:
149
- return camera_info
150
- # Fallback: try to extract from nested input_settings
151
- input_settings = stream_info.get("input_settings", {})
152
- for key in ["camera_info", "camera_id", "location", "site_id"]:
153
- if key in input_settings:
154
- return {key: input_settings[key]}
155
- return {}
292
+ insights = self._generate_insights(analytics_results, config)
293
+ alerts = self._check_alerts(analytics_results, config)
294
+ summary = self._generate_summary(analytics_results, alerts)
295
+ predictions = self._extract_predictions(processed_data)
296
+
297
+ context.mark_completed()
298
+
299
+ # Compose result data with harmonized agg_summary structure
300
+ result = self.create_result(
301
+ data={"agg_summary": agg_summary},
302
+ usecase=self.name,
303
+ category=self.category,
304
+ context=context
305
+ )
306
+
307
+ result.summary = summary
308
+ result.insights = insights
309
+ result.predictions = predictions
310
+ result.metrics = analytics_results.get("business_metrics", {})
311
+
312
+ if not config.customer_areas and not config.staff_areas:
313
+ result.add_warning("No customer or staff areas defined - using global analysis only")
314
+
315
+ if config.service_proximity_threshold > 250:
316
+ result.add_warning(f"High service proximity threshold ({config.service_proximity_threshold}) may miss interactions")
317
+
318
+ self.logger.info(f"Advanced customer service analysis completed successfully in {result.processing_time:.2f}s")
319
+ return result
320
+
321
+ except Exception as e:
322
+ self.logger.error(f"Advanced customer service analysis failed: {str(e)}", exc_info=True)
323
+
324
+ if context:
325
+ context.mark_completed()
326
+
327
+ return self.create_error_result(
328
+ str(e),
329
+ type(e).__name__,
330
+ usecase=self.name,
331
+ category=self.category,
332
+ context=context
333
+ )
334
+
335
+
156
336
  def _generate_per_frame_agg_summary(self, processed_data, analytics_results, config, context, stream_info=None):
157
337
  """
158
338
  Generate agg_summary dict with per-frame incidents, tracking_stats, business_analytics, alerts, human_text.
@@ -256,10 +436,10 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
256
436
  human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
257
437
  human_text_lines.append(f"\t- Active Customers: {queue_analytics.get('active_customers', 0)}")
258
438
  human_text_lines.append(f"\t\t- Queuing: {queue_analytics.get('customers_queuing', 0)}")
259
- human_text_lines.append(f"\t\t- Being Served: {queue_analytics.get('customers_being_served', 0)}")
439
+ #human_text_lines.append(f"\t\t- Being Served: {queue_analytics.get('customers_being_served', 0)}")
260
440
  human_text_lines.append(f"\t- Active Staff: {staff_analytics.get('active_staff', 0)}")
261
- human_text_lines.append(f"\t- Customer/Staff Ratio: {business_metrics.get('customer_to_staff_ratio', 0):.2f}")
262
- human_text_lines.append(f"\t- Queue Performance: {business_metrics.get('queue_performance', 0)*100:.1f}%")
441
+ # human_text_lines.append(f"\t- Customer/Staff Ratio: {business_metrics.get('customer_to_staff_ratio', 0):.2f}")
442
+ # human_text_lines.append(f"\t- Queue Performance: {business_metrics.get('queue_performance', 0)*100:.1f}%")
263
443
  human_text_lines.append(f"\t- Service Areas: {len(service_analytics.get('service_areas_status', {}))}")
264
444
  for area_name, area_info in service_analytics.get('service_areas_status', {}).items():
265
445
  customers = area_info.get("customers", 0)
@@ -267,22 +447,22 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
267
447
  status = area_info.get("status", "inactive")
268
448
  human_text_lines.append(f"\t\t- {area_name}: {status} with {customers} customers and {staff} staff")
269
449
  human_text_lines.append("")
270
- human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
271
- human_text_lines.append(f"\t- Total Customers: {journey_analytics.get('total_journeys', 0)}")
272
- completed_count = journey_analytics.get("journey_states", {}).get("completed", 0)
273
- human_text_lines.append(f"\t\t- Completed: {completed_count}")
274
- human_text_lines.append(f"\t- Total Staff: {staff_analytics.get('total_staff', 0)}")
275
- human_text_lines.append(f"\t- Average Staff Count: {staff_analytics.get('avg_staff_count', 0.0):.2f}")
276
- human_text_lines.append(f"\t- Average Wait Time: {queue_analytics.get('average_wait_time', 0):.1f}s")
277
- avg_service_time = 0.0
278
- if analytics_results.get("service_times"):
279
- times = [t.get("service_time", 0.0) for t in analytics_results["service_times"]]
280
- if times:
281
- avg_service_time = sum(times) / len(times)
282
- human_text_lines.append(f"\t- Average Service Time: {avg_service_time:.1f}s")
283
- human_text_lines.append(f"\t- Business Metrics:")
284
- human_text_lines.append(f"\t\t- Service Efficiency: {business_metrics.get('service_efficiency', 0)*100:.1f}%")
285
- human_text_lines.append(f"\t\t- Staff Productivity: {business_metrics.get('staff_productivity', 0):.2f} services/staff")
450
+ # human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
451
+ # human_text_lines.append(f"\t- Total Customers: {journey_analytics.get('total_journeys', 0)}")
452
+ # completed_count = journey_analytics.get("journey_states", {}).get("completed", 0)
453
+ # human_text_lines.append(f"\t\t- Completed: {completed_count}")
454
+ # human_text_lines.append(f"\t- Total Staff: {staff_analytics.get('total_staff', 0)}")
455
+ # human_text_lines.append(f"\t- Average Staff Count: {staff_analytics.get('avg_staff_count', 0.0):.2f}")
456
+ # human_text_lines.append(f"\t- Average Wait Time: {queue_analytics.get('average_wait_time', 0):.1f}s")
457
+ # avg_service_time = 0.0
458
+ # if analytics_results.get("service_times"):
459
+ # times = [t.get("service_time", 0.0) for t in analytics_results["service_times"]]
460
+ # if times:
461
+ # avg_service_time = sum(times) / len(times)
462
+ # human_text_lines.append(f"\t- Average Service Time: {avg_service_time:.1f}s")
463
+ # human_text_lines.append(f"\t- Business Metrics:")
464
+ # human_text_lines.append(f"\t\t- Service Efficiency: {business_metrics.get('service_efficiency', 0)*100:.1f}%")
465
+ # human_text_lines.append(f"\t\t- Staff Productivity: {business_metrics.get('staff_productivity', 0):.2f} services/staff")
286
466
  human_text = "\n".join(human_text_lines)
287
467
 
288
468
  # Build event in incident format
@@ -318,16 +498,17 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
318
498
  # Calculate current_counts (frame-wise counts)
319
499
  current_counts = [
320
500
  {"category": "staff", "count": staff_analytics.get("active_staff", 0)},
321
- {"category": "customer", "count": queue_analytics.get("active_customers", 0)}
501
+ {"category": "Active Customers", "count": queue_analytics.get("active_customers", 0)}
322
502
  ]
323
503
  # Detections: include all detections for this frame
324
504
  detection_objs = []
325
505
  for d in detections:
326
506
  bbox = d.get("bounding_box", d.get("bbox", {}))
327
- detection_objs.append({
328
- "category": d.get("category", "person"),
329
- "bounding_box": bbox
330
- })
507
+ if d.get("category") == "customer" or d.get("category") == "staff" or d.get("category") == "person":
508
+ detection_objs.append({
509
+ "category": d.get("category", "person"),
510
+ "bounding_box": bbox
511
+ })
331
512
 
332
513
  # Harmonize reset_settings format
333
514
  reset_settings = [
@@ -350,7 +531,8 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
350
531
  "alerts": alerts,
351
532
  "alert_settings": alert_settings,
352
533
  "reset_settings": reset_settings,
353
- "human_text": human_text
534
+ "human_text": human_text,
535
+ "target_categories": ['Staff', 'Active Customers']
354
536
  }
355
537
  # Patch: Build real_time_occupancy with correct service_areas info (not just empty lists)
356
538
  real_time_occupancy = analytics_results.get("real_time_occupancy", {}).copy()
@@ -379,12 +561,20 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
379
561
  "alert_settings": alert_settings
380
562
  }
381
563
 
382
- agg_summary[str(frame_id)] = {
564
+ # agg_summary[str(frame_id)] = {
565
+ # "incidents": event,
566
+ # "tracking_stats": tracking_stat,
567
+ # "business_analytics": business_analytics,
568
+ # "alerts": alerts,
569
+ # "human_text": human_text
570
+ # }
571
+ frame_id = None
572
+ agg_summary = {str(frame_id) : {
383
573
  "incidents": event,
384
574
  "tracking_stats": tracking_stat,
385
575
  "business_analytics": business_analytics,
386
576
  "alerts": alerts,
387
- "human_text": human_text
577
+ "human_text": human_text}
388
578
  }
389
579
  return agg_summary
390
580
  # --- Chunk tracking for per-chunk analytics ---
@@ -409,380 +599,125 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
409
599
  if point_in_polygon(customer_center, polygon):
410
600
  self._chunk_area_customer_ids[area_name].add(track_id)
411
601
 
412
- def _maybe_reset_chunk(self):
413
- if not hasattr(self, '_chunk_frame_count'):
414
- self._init_chunk_tracking()
415
- self._chunk_frame_count += 1
416
- if self._chunk_frame_count > 1:
417
- self._init_chunk_tracking()
418
- def __init__(self):
419
- """Initialize advanced customer service use case."""
420
- super().__init__("advanced_customer_service")
421
- self.category = "sales"
422
-
423
- # Advanced tracking structures
424
- self.customer_occupancy = {}
425
- self.staff_occupancy = {}
426
- self.service_occupancy = {}
427
- self.customer_queue_times = {}
428
- self.customer_service_times = {}
429
- self.customer_journey = {}
430
- self.staff_availability = {}
431
- self.staff_service_count = defaultdict(int)
432
- self.staff_active_services = {}
433
-
434
- # Persistent unique staff tracking
435
- self.global_staff_ids = set()
436
- self.global_staff_ids_by_area = defaultdict(set)
437
-
438
- # Persistent unique customer tracking
439
- self.global_customer_ids = set()
440
-
441
- # Persistent staff ID memory (for cross-frame staff identity)
442
- self.persistent_staff_ids = set()
443
-
444
- # Analytics
445
- self.queue_wait_times = defaultdict(list)
446
- self.service_times = defaultdict(list)
447
- self.staff_efficiency = defaultdict(list)
448
- self.peak_occupancy = defaultdict(int)
449
-
450
- # Journey states
451
- self.JOURNEY_STATES = {
452
- 'ENTERING': 'entering',
453
- 'QUEUING': 'queuing',
454
- 'BEING_SERVED': 'being_served',
455
- 'COMPLETED': 'completed',
456
- 'LEFT': 'left'
457
- }
458
-
459
- def get_config_schema(self) -> Dict[str, Any]:
460
- """Get configuration schema for advanced customer service."""
461
- return {
462
- "type": "object",
463
- "properties": {
464
- "confidence_threshold": {
465
- "type": "number",
466
- "minimum": 0.0,
467
- "maximum": 1.0,
468
- "default": 0.5,
469
- "description": "Minimum confidence threshold for detections"
470
- },
471
- "customer_areas": {
472
- "type": "object",
473
- "additionalProperties": {
474
- "type": "array",
475
- "items": {
476
- "type": "array",
477
- "items": {"type": "number"},
478
- "minItems": 2,
479
- "maxItems": 2
480
- },
481
- "minItems": 3
482
- },
483
- "description": "Customer area definitions as polygons"
484
- },
485
- "staff_areas": {
486
- "type": "object",
487
- "additionalProperties": {
488
- "type": "array",
489
- "items": {
490
- "type": "array",
491
- "items": {"type": "number"},
492
- "minItems": 2,
493
- "maxItems": 2
494
- },
495
- "minItems": 3
496
- },
497
- "description": "Staff area definitions as polygons"
498
- },
499
- "service_areas": {
500
- "type": "object",
501
- "additionalProperties": {
502
- "type": "array",
503
- "items": {
504
- "type": "array",
505
- "items": {"type": "number"},
506
- "minItems": 2,
507
- "maxItems": 2
508
- },
509
- "minItems": 3
510
- },
511
- "description": "Service area definitions as polygons"
512
- },
513
- "staff_categories": {
514
- "type": "array",
515
- "items": {"type": "string"},
516
- "default": ["staff", "employee"],
517
- "description": "Category names that represent staff"
518
- },
519
- "customer_categories": {
520
- "type": "array",
521
- "items": {"type": "string"},
522
- "default": ["customer", "person"],
523
- "description": "Category names that represent customers"
524
- },
525
- "service_proximity_threshold": {
526
- "type": "number",
527
- "minimum": 0.0,
528
- "default": 100.0,
529
- "description": "Distance threshold for service interactions"
530
- },
531
- "max_service_time": {
532
- "type": "number",
533
- "minimum": 0.0,
534
- "default": 1800.0,
535
- "description": "Maximum expected service time in seconds"
536
- },
537
- "buffer_time": {
538
- "type": "number",
539
- "minimum": 0.0,
540
- "default": 2.0,
541
- "description": "Buffer time for service calculations"
542
- },
543
- "enable_tracking": {
544
- "type": "boolean",
545
- "default": True,
546
- "description": "Enable advanced tracking for analytics"
547
- },
548
- "enable_journey_analysis": {
549
- "type": "boolean",
550
- "default": True,
551
- "description": "Enable customer journey analysis"
552
- },
553
- "enable_queue_analytics": {
554
- "type": "boolean",
555
- "default": True,
556
- "description": "Enable queue management analytics"
557
- },
558
- "tracking_config": {
559
- "type": "object",
560
- "properties": {
561
- "tracking_method": {
562
- "type": "string",
563
- "enum": ["kalman", "sort", "deepsort", "bytetrack"],
564
- "default": "kalman"
565
- },
566
- "max_age": {"type": "integer", "minimum": 1, "default": 30},
567
- "min_hits": {"type": "integer", "minimum": 1, "default": 3},
568
- "iou_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.3}
569
- }
570
- },
571
- "enable_smoothing": {
572
- "type": "boolean",
573
- "default": True,
574
- "description": "Enable bounding box smoothing for detections"
575
- },
576
- "smoothing_algorithm": {
577
- "type": "string",
578
- "enum": ["observability", "kalman"],
579
- "default": "observability"
580
- },
581
- "smoothing_window_size": {
582
- "type": "integer",
583
- "minimum": 1,
584
- "default": 20
585
- },
586
- "smoothing_cooldown_frames": {
587
- "type": "integer",
588
- "minimum": 0,
589
- "default": 5
590
- },
591
- "smoothing_confidence_threshold": {
592
- "type": "number",
593
- "minimum": 0.0,
594
- "maximum": 1.0,
595
- "default": 0.5
596
- },
597
- "smoothing_confidence_range_factor": {
598
- "type": "number",
599
- "minimum": 0.0,
600
- "default": 0.5
601
- },
602
- "reset_interval_type": {
603
- "type": "string",
604
- "default": "daily",
605
- "description": "Interval type for resetting analytics (e.g., daily, weekly)"
606
- },
607
- "reset_time_value": {
608
- "type": "integer",
609
- "default": 9,
610
- "description": "Time value for reset (e.g., hour of day)"
611
- },
612
- "reset_time_unit": {
613
- "type": "string",
614
- "default": "hour",
615
- "description": "Time unit for reset (e.g., hour, minute)"
616
- },
617
- "alert_config": {
618
- "type": "object",
619
- "description": "Custom alert configuration settings"
620
- },
621
- "queue_length_threshold": {
622
- "type": "integer",
623
- "default": 10,
624
- "description": "Threshold for queue length alerts"
625
- },
626
- "service_efficiency_threshold": {
627
- "type": "number",
628
- "default": 0.0,
629
- "description": "Threshold for service efficiency alerts"
630
- },
631
- "email_address": {
632
- "type": "string",
633
- "default": "john.doe@gmail.com",
634
- "description": "Email address for alert notifications"
635
- },
636
- },
637
- "required": ["confidence_threshold"],
638
- "additionalProperties": False
639
- }
640
-
641
- def create_default_config(self, **overrides) -> CustomerServiceConfig:
642
- """Create default configuration with optional overrides."""
643
- defaults = {
644
- "category": self.category,
645
- "usecase": self.name,
646
- "confidence_threshold": 0.5,
647
- "enable_tracking": True,
648
- "enable_analytics": True,
649
- "enable_journey_analysis": True,
650
- "enable_queue_analytics": True,
651
- "staff_categories": ["staff", "employee"],
652
- "customer_categories": ["customer", "person"],
653
- "service_proximity_threshold": 100.0,
654
- "max_service_time": 1800.0,
655
- "buffer_time": 2.0,
656
- "stream_info": {},
657
- }
658
- defaults.update(overrides)
659
- return CustomerServiceConfig(**defaults)
660
-
661
- def process(self, data: Any, config: ConfigProtocol,
662
- context: Optional[ProcessingContext] = None, stream_info: Optional[dict] = None) -> ProcessingResult:
602
+
603
+ def _initialize_business_metrics_manager_once(self, config: CustomerServiceConfig) -> None:
663
604
  """
664
- Process advanced customer service analytics.
605
+ Initialize business metrics manager ONCE with Redis OR Kafka clients (Environment based).
606
+ Called from process() on first invocation.
607
+ Uses config.session (existing session from pipeline) or creates from environment.
665
608
  """
666
- start_time = time.time()
667
-
609
+ if self._business_metrics_manager_initialized:
610
+ self.logger.debug("[BUSINESS_METRICS_MANAGER] Already initialized, skipping")
611
+ return
612
+
668
613
  try:
669
- if not isinstance(config, CustomerServiceConfig):
670
- return self.create_error_result(
671
- "Invalid configuration type for advanced customer service",
672
- usecase=self.name,
673
- category=self.category,
674
- context=context
675
- )
676
-
677
- if stream_info is not None:
678
- if context is None:
679
- context = ProcessingContext()
680
- context.stream_info = stream_info
681
-
682
- if context is None:
683
- context = ProcessingContext()
684
-
685
- self._service_proximity_threshold = config.service_proximity_threshold
686
-
687
- input_format = match_results_structure(data)
688
- context.input_format = input_format
689
- context.confidence_threshold = config.confidence_threshold
690
- context.enable_tracking = config.enable_tracking
691
-
692
- self.logger.info(f"Processing advanced customer service with format: {input_format.value}")
693
-
694
- self._initialize_areas(config.customer_areas, config.staff_areas, config.service_areas)
695
-
696
- processed_data = data
697
- if config.confidence_threshold is not None:
698
- processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
699
- self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
700
-
701
- if hasattr(config, 'index_to_category') and config.index_to_category:
702
- processed_data = apply_category_mapping(processed_data, config.index_to_category)
703
- self.logger.debug("Applied category mapping")
704
-
705
- # --- Smoothing logic ---
706
- if getattr(config, "enable_smoothing", False):
707
- if not hasattr(self, "smoothing_tracker") or self.smoothing_tracker is None:
708
- smoothing_config = BBoxSmoothingConfig(
709
- smoothing_algorithm=getattr(config, "smoothing_algorithm", "observability"),
710
- window_size=getattr(config, "smoothing_window_size", 20),
711
- cooldown_frames=getattr(config, "smoothing_cooldown_frames", 5),
712
- confidence_threshold=getattr(config, "confidence_threshold", 0.5),
713
- confidence_range_factor=getattr(config, "smoothing_confidence_range_factor", 0.5),
714
- enable_smoothing=True
715
- )
716
- self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
717
- processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
718
-
719
- detections = self._extract_detections(processed_data)
720
- assign_person_by_area(
721
- detections,
722
- getattr(config, 'customer_areas', {}),
723
- getattr(config, 'staff_areas', {})
724
- )
725
- staff_detections, customer_detections = self._categorize_detections(
726
- detections, config.staff_categories, config.customer_categories
727
- )
728
- self.logger.debug(f"Extracted {len(staff_detections)} staff and {len(customer_detections)} customer detections")
729
-
730
- self._maybe_reset_chunk()
731
- self._update_chunk_tracking(customer_detections)
732
-
733
- current_time = time.time()
734
- analytics_results = self._process_comprehensive_analytics(
735
- staff_detections, customer_detections, config, current_time
614
+ self.logger.info("[BUSINESS_METRICS_MANAGER] ===== Starting business metrics manager initialization =====")
615
+ self.logger.info("[BUSINESS_METRICS_MANAGER] Aggregation interval: 300 seconds (5 minutes)")
616
+
617
+ # Create factory if not exists
618
+ if self._business_metrics_manager_factory is None:
619
+ self._business_metrics_manager_factory = BusinessMetricsManagerFactory(logger=self.logger)
620
+ self.logger.debug("[BUSINESS_METRICS_MANAGER] Created BusinessMetricsManagerFactory")
621
+
622
+ # Initialize using factory (handles session creation, Redis/Kafka setup)
623
+ # Aggregation interval: 300 seconds (5 minutes)
624
+ self._business_metrics_manager = self._business_metrics_manager_factory.initialize(
625
+ config,
626
+ aggregation_interval=300 # 5 minutes
736
627
  )
737
-
738
- # --- FIX: Ensure agg_summary is top-level and events/tracking_stats are dicts ---
739
- if isinstance(processed_data, dict):
740
- agg_summary = self._generate_per_frame_agg_summary(processed_data, analytics_results, config, context, stream_info)
628
+
629
+ if self._business_metrics_manager:
630
+ self.logger.info("[BUSINESS_METRICS_MANAGER] ✓ Business metrics manager initialized successfully")
631
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Output topic: {self._business_metrics_manager.output_topic}")
632
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Aggregation interval: {self._business_metrics_manager.aggregation_interval}s")
633
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Redis client: {'Available' if self._business_metrics_manager.redis_client else 'Not available'}")
634
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Kafka client: {'Available' if self._business_metrics_manager.kafka_client else 'Not available'}")
635
+
636
+ # Log factory info
637
+ if self._business_metrics_manager_factory:
638
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Factory app_deployment_id: {self._business_metrics_manager_factory._app_deployment_id}")
639
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Factory action_id: {self._business_metrics_manager_factory._action_id}")
741
640
  else:
742
- agg_summary = {"events": {}, "tracking_stats": {}}
743
-
744
- insights = self._generate_insights(analytics_results, config)
745
- alerts = self._check_alerts(analytics_results, config)
746
- summary = self._generate_summary(analytics_results, alerts)
747
- predictions = self._extract_predictions(processed_data)
748
-
749
- context.mark_completed()
750
-
751
- # Compose result data with harmonized agg_summary structure
752
- result = self.create_result(
753
- data={"agg_summary": agg_summary},
754
- usecase=self.name,
755
- category=self.category,
756
- context=context
641
+ self.logger.warning("[BUSINESS_METRICS_MANAGER] Business metrics manager not available, metrics won't be published")
642
+ self.logger.warning("[BUSINESS_METRICS_MANAGER] Check if Redis/Kafka connection is properly configured")
643
+
644
+ except Exception as e:
645
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER] Business metrics manager initialization failed: {e}", exc_info=True)
646
+ finally:
647
+ self._business_metrics_manager_initialized = True # Mark as initialized (don't retry every frame)
648
+ self.logger.info("[BUSINESS_METRICS_MANAGER] ===== Initialization complete =====")
649
+
650
+ def _send_metrics_to_manager(
651
+ self,
652
+ business_metrics: Dict[str, Any],
653
+ stream_info: Optional[Dict[str, Any]] = None
654
+ ) -> None:
655
+ """
656
+ Send business metrics to the business metrics manager for aggregation and publishing.
657
+
658
+ The business metrics manager will:
659
+ 1. Aggregate metrics for 5 minutes (300 seconds)
660
+ 2. Publish aggregated metrics (mean/min/max/sum) to output topic
661
+ 3. Reset all values after publishing
662
+
663
+ Args:
664
+ business_metrics: Business metrics dictionary from _calculate_analytics
665
+ stream_info: Stream metadata containing camera info
666
+ """
667
+ if not self._business_metrics_manager:
668
+ self.logger.debug("[BUSINESS_METRICS_MANAGER] No business metrics manager available, skipping")
669
+ return
670
+
671
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] _send_metrics_to_manager called with stream_info keys: {list(stream_info.keys()) if stream_info else 'None'}")
672
+
673
+ # Extract camera_id from stream_info
674
+ # Stream info structure: {'topic': '692d7bde42582ffde3611908_input_topic', 'camera_info': {'camera_name': '...'}, ...}
675
+ camera_id = ""
676
+ camera_name = ""
677
+
678
+ if stream_info and isinstance(stream_info, dict):
679
+ # Method 1: Extract from topic field (e.g., "692d7bde42582ffde3611908_input_topic")
680
+ topic = stream_info.get("topic", "")
681
+ if topic and "_input_topic" in topic:
682
+ camera_id = topic.replace("_input_topic", "").strip()
683
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Extracted camera_id from topic: {camera_id}")
684
+
685
+ # Method 2: Try camera_info dict
686
+ camera_info = stream_info.get("camera_info", {})
687
+ if isinstance(camera_info, dict):
688
+ if not camera_id:
689
+ camera_id = camera_info.get("camera_id", "") or camera_info.get("cameraId", "")
690
+ camera_name = camera_info.get("camera_name", "")
691
+
692
+ # Method 3: Try direct fields
693
+ if not camera_id:
694
+ camera_id = stream_info.get("camera_id", "") or stream_info.get("cameraId", "")
695
+
696
+ if not camera_id:
697
+ # Fallback to a default identifier
698
+ camera_id = "default_camera"
699
+ self.logger.warning(f"[BUSINESS_METRICS_MANAGER] No camera_id found in stream_info, using default: {camera_id}")
700
+ else:
701
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Using camera_id={camera_id}, camera_name={camera_name}")
702
+
703
+ try:
704
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Calling process_metrics with camera_id={camera_id}")
705
+
706
+ # Process the metrics through the manager
707
+ published = self._business_metrics_manager.process_metrics(
708
+ camera_id=camera_id,
709
+ metrics_data=business_metrics,
710
+ stream_info=stream_info
757
711
  )
758
-
759
- result.summary = summary
760
- result.insights = insights
761
- result.predictions = predictions
762
- result.metrics = analytics_results.get("business_metrics", {})
763
-
764
- if not config.customer_areas and not config.staff_areas:
765
- result.add_warning("No customer or staff areas defined - using global analysis only")
766
-
767
- if config.service_proximity_threshold > 250:
768
- result.add_warning(f"High service proximity threshold ({config.service_proximity_threshold}) may miss interactions")
769
-
770
- self.logger.info(f"Advanced customer service analysis completed successfully in {result.processing_time:.2f}s")
771
- return result
772
-
712
+
713
+ if published:
714
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Metrics published for camera: {camera_id}")
715
+ else:
716
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Metrics queued for aggregation (not yet published)")
773
717
  except Exception as e:
774
- self.logger.error(f"Advanced customer service analysis failed: {str(e)}", exc_info=True)
718
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER] Error sending metrics to manager: {e}", exc_info=True)
775
719
 
776
- if context:
777
- context.mark_completed()
778
-
779
- return self.create_error_result(
780
- str(e),
781
- type(e).__name__,
782
- usecase=self.name,
783
- category=self.category,
784
- context=context
785
- )
720
+
786
721
 
787
722
  def _initialize_areas(self, customer_areas: Dict, staff_areas: Dict, service_areas: Dict):
788
723
  """Initialize area tracking structures."""
@@ -858,16 +793,6 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
858
793
  # Compile comprehensive results
859
794
  return self._compile_analytics_results(current_time)
860
795
 
861
- def _reset_current_state(self):
862
- """Reset current state for new processing cycle."""
863
- # Clear current occupancy (will be repopulated)
864
- for area_name in self.customer_occupancy:
865
- self.customer_occupancy[area_name] = []
866
- for area_name in self.staff_occupancy:
867
- self.staff_occupancy[area_name] = []
868
- for area_name in self.service_occupancy:
869
- self.service_occupancy[area_name] = []
870
-
871
796
  def _process_staff_detections(self, staff_detections: List[Dict], current_time: float):
872
797
  """Process staff detections and update tracking."""
873
798
  for staff in staff_detections:
@@ -1475,38 +1400,322 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
1475
1400
  insights.append("⚠️ Long customer journey times detected")
1476
1401
 
1477
1402
  return insights
1403
+
1404
+ def _format_timestamp(self, timestamp: Any) -> str:
1405
+ """Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
1478
1406
 
1407
+ The input can be either:
1408
+ 1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
1409
+ 2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
1479
1410
 
1480
- def _get_start_timestamp_str(self, stream_info: Optional[dict]) -> str:
1481
- """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1482
- # For video, use start_video_timestamp if available, else 00:00:00
1411
+ The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
1412
+
1413
+ Example
1414
+ -------
1415
+ >>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
1416
+ '2025:10:27 19:31:20'
1417
+ """
1418
+
1419
+ # Convert numeric timestamps to datetime first
1420
+ if isinstance(timestamp, (int, float)):
1421
+ dt = datetime.fromtimestamp(timestamp, timezone.utc)
1422
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1423
+
1424
+ # Ensure we are working with a string from here on
1425
+ if not isinstance(timestamp, str):
1426
+ return str(timestamp)
1427
+
1428
+ # Remove ' UTC' suffix if present
1429
+ timestamp_clean = timestamp.replace(' UTC', '').strip()
1430
+
1431
+ # Remove milliseconds if present (everything after the last dot)
1432
+ if '.' in timestamp_clean:
1433
+ timestamp_clean = timestamp_clean.split('.')[0]
1434
+
1435
+ # Parse the timestamp string and convert to desired format
1436
+ try:
1437
+ # Handle format: YYYY-MM-DD-HH:MM:SS
1438
+ if timestamp_clean.count('-') >= 2:
1439
+ # Replace first two dashes with colons for date part, third with space
1440
+ parts = timestamp_clean.split('-')
1441
+ if len(parts) >= 4:
1442
+ # parts = ['2025', '10', '27', '19:31:20']
1443
+ formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
1444
+ return formatted
1445
+ except Exception:
1446
+ pass
1447
+
1448
+ # If parsing fails, return the cleaned string as-is
1449
+ return timestamp_clean
1450
+
1451
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
1452
+ """Get formatted current timestamp based on stream type."""
1453
+
1483
1454
  if not stream_info:
1484
1455
  return "00:00:00.00"
1485
- input_settings = stream_info.get("input_settings", {})
1486
- stream_type = input_settings.get("stream_type", "video_file")
1487
- if stream_type == "video_file":
1488
- start_video_timestamp = stream_info.get("start_video_timestamp", None)
1489
- if start_video_timestamp:
1490
- return start_video_timestamp[:8]
1456
+ if precision:
1457
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1458
+ if frame_id:
1459
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1460
+ else:
1461
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1462
+ stream_time_str = self._format_timestamp_for_video(start_time)
1463
+
1464
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
1465
+ else:
1466
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1467
+
1468
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1469
+ if frame_id:
1470
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1491
1471
  else:
1492
- return "00:00:00.00"
1472
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1473
+
1474
+ stream_time_str = self._format_timestamp_for_video(start_time)
1475
+
1476
+
1477
+ return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
1478
+ else:
1479
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1480
+ if stream_time_str:
1481
+ try:
1482
+ timestamp_str = stream_time_str.replace(" UTC", "")
1483
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1484
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
1485
+ return self._format_timestamp_for_stream(timestamp)
1486
+ except:
1487
+ return self._format_timestamp_for_stream(time.time())
1488
+ else:
1489
+ return self._format_timestamp_for_stream(time.time())
1490
+
1491
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
1492
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1493
+ if not stream_info:
1494
+ return "00:00:00"
1495
+
1496
+ if precision:
1497
+ if self.start_timer is None:
1498
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
1499
+ if not candidate or candidate == "NA":
1500
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1501
+ self.start_timer = candidate
1502
+ return self._format_timestamp(self.start_timer)
1503
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
1504
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
1505
+ if not candidate or candidate == "NA":
1506
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1507
+ self.start_timer = candidate
1508
+ return self._format_timestamp(self.start_timer)
1509
+ else:
1510
+ return self._format_timestamp(self.start_timer)
1511
+
1512
+ if self.start_timer is None:
1513
+ # Prefer direct input_settings.stream_time if available and not NA
1514
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
1515
+ if not candidate or candidate == "NA":
1516
+ # Fallback to nested stream_info.stream_time used by current timestamp path
1517
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1518
+ if stream_time_str:
1519
+ try:
1520
+ timestamp_str = stream_time_str.replace(" UTC", "")
1521
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1522
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1523
+ candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1524
+ except:
1525
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1526
+ else:
1527
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1528
+ self.start_timer = candidate
1529
+ return self._format_timestamp(self.start_timer)
1530
+ elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
1531
+ candidate = stream_info.get("input_settings", {}).get("stream_time")
1532
+ if not candidate or candidate == "NA":
1533
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1534
+ if stream_time_str:
1535
+ try:
1536
+ timestamp_str = stream_time_str.replace(" UTC", "")
1537
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1538
+ ts = dt.replace(tzinfo=timezone.utc).timestamp()
1539
+ candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1540
+ except:
1541
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1542
+ else:
1543
+ candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1544
+ self.start_timer = candidate
1545
+ return self._format_timestamp(self.start_timer)
1546
+
1493
1547
  else:
1494
- # For streams, persist the first stream_time as tracking start time
1495
- if not hasattr(self, "_tracking_start_time") or self._tracking_start_time is None:
1496
- stream_time_str = stream_info.get("stream_time", "")
1548
+ if self.start_timer is not None and self.start_timer != "NA":
1549
+ return self._format_timestamp(self.start_timer)
1550
+
1551
+ if self._tracking_start_time is None:
1552
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1497
1553
  if stream_time_str:
1498
1554
  try:
1499
- from datetime import datetime, timezone
1500
1555
  timestamp_str = stream_time_str.replace(" UTC", "")
1501
1556
  dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1502
1557
  self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1503
- except Exception:
1558
+ except:
1504
1559
  self._tracking_start_time = time.time()
1505
1560
  else:
1506
1561
  self._tracking_start_time = time.time()
1507
- from datetime import datetime, timezone
1562
+
1508
1563
  dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1564
+ dt = dt.replace(minute=0, second=0, microsecond=0)
1509
1565
  return dt.strftime('%Y:%m:%d %H:%M:%S')
1566
+
1567
+
1568
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
1569
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
1570
+ hours = int(timestamp // 3600)
1571
+ minutes = int((timestamp % 3600) // 60)
1572
+ seconds = round(float(timestamp % 60), 2)
1573
+ return f"{hours:02d}:{minutes:02d}:{seconds:04.1f}"
1574
+
1575
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
1576
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
1577
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
1578
+
1579
+ def get_camera_info_from_stream(self, stream_info):
1580
+ """Extract camera_info from stream_info, matching people_counting pattern."""
1581
+ if not stream_info:
1582
+ return {}
1583
+ # Try to get camera_info directly
1584
+ camera_info = stream_info.get("camera_info")
1585
+ if camera_info:
1586
+ return camera_info
1587
+ # Fallback: try to extract from nested input_settings
1588
+ input_settings = stream_info.get("input_settings", {})
1589
+ for key in ["camera_info", "camera_id", "location", "site_id"]:
1590
+ if key in input_settings:
1591
+ return {key: input_settings[key]}
1592
+ return {}
1593
+ def _maybe_reset_chunk(self):
1594
+ if not hasattr(self, '_chunk_frame_count'):
1595
+ self._init_chunk_tracking()
1596
+ self._chunk_frame_count += 1
1597
+ if self._chunk_frame_count > 1:
1598
+ self._init_chunk_tracking()
1599
+ def _reset_current_state(self):
1600
+ """Reset current state for new processing cycle."""
1601
+ # Clear current occupancy (will be repopulated)
1602
+ for area_name in self.customer_occupancy:
1603
+ self.customer_occupancy[area_name] = []
1604
+ for area_name in self.staff_occupancy:
1605
+ self.staff_occupancy[area_name] = []
1606
+ for area_name in self.service_occupancy:
1607
+ self.service_occupancy[area_name] = []
1608
+
1609
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
1610
+ """Compute IoU between two bounding boxes."""
1611
+ def _bbox_to_list(bbox):
1612
+ if bbox is None:
1613
+ return []
1614
+ if isinstance(bbox, list):
1615
+ return bbox[:4] if len(bbox) >= 4 else []
1616
+ if isinstance(bbox, dict):
1617
+ if "xmin" in bbox:
1618
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1619
+ if "x1" in bbox:
1620
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1621
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
1622
+ return values[:4] if len(values) >= 4 else []
1623
+ return []
1624
+
1625
+ l1 = _bbox_to_list(box1)
1626
+ l2 = _bbox_to_list(box2)
1627
+ if len(l1) < 4 or len(l2) < 4:
1628
+ return 0.0
1629
+ x1_min, y1_min, x1_max, y1_max = l1
1630
+ x2_min, y2_min, x2_max, y2_max = l2
1631
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1632
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1633
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1634
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1635
+ inter_x_min = max(x1_min, x2_min)
1636
+ inter_y_min = max(y1_min, y2_min)
1637
+ inter_x_max = min(x1_max, x2_max)
1638
+ inter_y_max = min(y1_max, y2_max)
1639
+ inter_w = max(0.0, inter_x_max - inter_x_min)
1640
+ inter_h = max(0.0, inter_y_max - inter_y_min)
1641
+ inter_area = inter_w * inter_h
1642
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
1643
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
1644
+ union_area = area1 + area2 - inter_area
1645
+ return (inter_area / union_area) if union_area > 0 else 0.0
1646
+
1647
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
1648
+ """Return a stable canonical ID for a raw tracker ID."""
1649
+ if raw_id is None or bbox is None:
1650
+ return raw_id
1651
+ now = time.time()
1652
+ if raw_id in self._track_aliases:
1653
+ canonical_id = self._track_aliases[raw_id]
1654
+ track_info = self._canonical_tracks.get(canonical_id)
1655
+ if track_info is not None:
1656
+ track_info["last_bbox"] = bbox
1657
+ track_info["last_update"] = now
1658
+ track_info["raw_ids"].add(raw_id)
1659
+ return canonical_id
1660
+ for canonical_id, info in self._canonical_tracks.items():
1661
+ if now - info["last_update"] > self._track_merge_time_window:
1662
+ continue
1663
+ iou = self._compute_iou(bbox, info["last_bbox"])
1664
+ if iou >= self._track_merge_iou_threshold:
1665
+ self._track_aliases[raw_id] = canonical_id
1666
+ info["last_bbox"] = bbox
1667
+ info["last_update"] = now
1668
+ info["raw_ids"].add(raw_id)
1669
+ return canonical_id
1670
+ canonical_id = raw_id
1671
+ self._track_aliases[raw_id] = canonical_id
1672
+ self._canonical_tracks[canonical_id] = {
1673
+ "last_bbox": bbox,
1674
+ "last_update": now,
1675
+ "raw_ids": {raw_id},
1676
+ }
1677
+ return canonical_id
1678
+
1679
+ def _update_tracking_state(self, detections: List[Dict]):
1680
+ """Track unique track_ids per category (staff/customer)."""
1681
+ target_categories = ['staff', 'customer', 'person']
1682
+ if not hasattr(self, "_per_category_total_track_ids") or self._per_category_total_track_ids is None:
1683
+ self._per_category_total_track_ids = {cat: set() for cat in target_categories}
1684
+ self._current_frame_track_ids = {cat: set() for cat in target_categories}
1685
+
1686
+ for det in detections:
1687
+ cat = det.get("category")
1688
+ raw_track_id = det.get("track_id")
1689
+ if cat not in target_categories or raw_track_id is None:
1690
+ continue
1691
+ bbox = det.get("bounding_box", det.get("bbox"))
1692
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
1693
+ det["track_id"] = canonical_id
1694
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
1695
+ self._current_frame_track_ids[cat].add(canonical_id)
1696
+
1697
+ def get_total_counts(self) -> Dict[str, int]:
1698
+ """Return total unique track counts per category."""
1699
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
1700
+
1701
+ def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
1702
+ """Get detailed information about track IDs."""
1703
+ frame_track_ids = set()
1704
+ for det in detections:
1705
+ tid = det.get('track_id')
1706
+ if tid is not None:
1707
+ frame_track_ids.add(tid)
1708
+ total_track_ids = set()
1709
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
1710
+ total_track_ids.update(s)
1711
+ return {
1712
+ "total_count": len(total_track_ids),
1713
+ "current_frame_count": len(frame_track_ids),
1714
+ "total_unique_track_ids": len(total_track_ids),
1715
+ "current_frame_track_ids": list(frame_track_ids),
1716
+ "last_update_time": time.time(),
1717
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
1718
+ }
1510
1719
 
1511
1720
  def _generate_summary(self, analytics_results: Dict, alerts: List) -> str:
1512
1721
  """Generate human-readable summary."""
@@ -1546,6 +1755,8 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
1546
1755
  return "\n".join(lines)
1547
1756
 
1548
1757
  summary = []
1758
+ summary.append("Application Name: "+self.CASE_TYPE)
1759
+ summary.append("Application Version: "+self.CASE_VERSION)
1549
1760
  summary.append(tabbed_section("customer_queue_analytics", queue_analytics, omit_keys={"wait_times_completed", "wait_times_ongoing"}))
1550
1761
  summary.append(tabbed_section("staff_management_analytics", staff_analytics, omit_keys={"staff_efficiency"}))
1551
1762
  summary.append(tabbed_section("service_area_analytics", service_analytics))
@@ -1562,6 +1773,208 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
1562
1773
  summary.append(f"ALERTS: {len(alerts)} alert(s)")
1563
1774
 
1564
1775
  return "\n".join(summary)
1776
+
1777
+ def get_config_schema(self) -> Dict[str, Any]:
1778
+ """Get configuration schema for advanced customer service."""
1779
+ return {
1780
+ "type": "object",
1781
+ "properties": {
1782
+ "confidence_threshold": {
1783
+ "type": "number",
1784
+ "minimum": 0.0,
1785
+ "maximum": 1.0,
1786
+ "default": 0.5,
1787
+ "description": "Minimum confidence threshold for detections"
1788
+ },
1789
+ "customer_areas": {
1790
+ "type": "object",
1791
+ "additionalProperties": {
1792
+ "type": "array",
1793
+ "items": {
1794
+ "type": "array",
1795
+ "items": {"type": "number"},
1796
+ "minItems": 2,
1797
+ "maxItems": 2
1798
+ },
1799
+ "minItems": 3
1800
+ },
1801
+ "description": "Customer area definitions as polygons"
1802
+ },
1803
+ "staff_areas": {
1804
+ "type": "object",
1805
+ "additionalProperties": {
1806
+ "type": "array",
1807
+ "items": {
1808
+ "type": "array",
1809
+ "items": {"type": "number"},
1810
+ "minItems": 2,
1811
+ "maxItems": 2
1812
+ },
1813
+ "minItems": 3
1814
+ },
1815
+ "description": "Staff area definitions as polygons"
1816
+ },
1817
+ "service_areas": {
1818
+ "type": "object",
1819
+ "additionalProperties": {
1820
+ "type": "array",
1821
+ "items": {
1822
+ "type": "array",
1823
+ "items": {"type": "number"},
1824
+ "minItems": 2,
1825
+ "maxItems": 2
1826
+ },
1827
+ "minItems": 3
1828
+ },
1829
+ "description": "Service area definitions as polygons"
1830
+ },
1831
+ "staff_categories": {
1832
+ "type": "array",
1833
+ "items": {"type": "string"},
1834
+ "default": ["staff", "employee"],
1835
+ "description": "Category names that represent staff"
1836
+ },
1837
+ "customer_categories": {
1838
+ "type": "array",
1839
+ "items": {"type": "string"},
1840
+ "default": ["customer", "person"],
1841
+ "description": "Category names that represent customers"
1842
+ },
1843
+ "service_proximity_threshold": {
1844
+ "type": "number",
1845
+ "minimum": 0.0,
1846
+ "default": 100.0,
1847
+ "description": "Distance threshold for service interactions"
1848
+ },
1849
+ "max_service_time": {
1850
+ "type": "number",
1851
+ "minimum": 0.0,
1852
+ "default": 1800.0,
1853
+ "description": "Maximum expected service time in seconds"
1854
+ },
1855
+ "buffer_time": {
1856
+ "type": "number",
1857
+ "minimum": 0.0,
1858
+ "default": 2.0,
1859
+ "description": "Buffer time for service calculations"
1860
+ },
1861
+ "enable_tracking": {
1862
+ "type": "boolean",
1863
+ "default": True,
1864
+ "description": "Enable advanced tracking for analytics"
1865
+ },
1866
+ "enable_journey_analysis": {
1867
+ "type": "boolean",
1868
+ "default": True,
1869
+ "description": "Enable customer journey analysis"
1870
+ },
1871
+ "enable_queue_analytics": {
1872
+ "type": "boolean",
1873
+ "default": True,
1874
+ "description": "Enable queue management analytics"
1875
+ },
1876
+ "tracking_config": {
1877
+ "type": "object",
1878
+ "properties": {
1879
+ "tracking_method": {
1880
+ "type": "string",
1881
+ "enum": ["kalman", "sort", "deepsort", "bytetrack"],
1882
+ "default": "kalman"
1883
+ },
1884
+ "max_age": {"type": "integer", "minimum": 1, "default": 30},
1885
+ "min_hits": {"type": "integer", "minimum": 1, "default": 3},
1886
+ "iou_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.3}
1887
+ }
1888
+ },
1889
+ "enable_smoothing": {
1890
+ "type": "boolean",
1891
+ "default": True,
1892
+ "description": "Enable bounding box smoothing for detections"
1893
+ },
1894
+ "smoothing_algorithm": {
1895
+ "type": "string",
1896
+ "enum": ["observability", "kalman"],
1897
+ "default": "observability"
1898
+ },
1899
+ "smoothing_window_size": {
1900
+ "type": "integer",
1901
+ "minimum": 1,
1902
+ "default": 20
1903
+ },
1904
+ "smoothing_cooldown_frames": {
1905
+ "type": "integer",
1906
+ "minimum": 0,
1907
+ "default": 5
1908
+ },
1909
+ "smoothing_confidence_threshold": {
1910
+ "type": "number",
1911
+ "minimum": 0.0,
1912
+ "maximum": 1.0,
1913
+ "default": 0.5
1914
+ },
1915
+ "smoothing_confidence_range_factor": {
1916
+ "type": "number",
1917
+ "minimum": 0.0,
1918
+ "default": 0.5
1919
+ },
1920
+ "reset_interval_type": {
1921
+ "type": "string",
1922
+ "default": "daily",
1923
+ "description": "Interval type for resetting analytics (e.g., daily, weekly)"
1924
+ },
1925
+ "reset_time_value": {
1926
+ "type": "integer",
1927
+ "default": 9,
1928
+ "description": "Time value for reset (e.g., hour of day)"
1929
+ },
1930
+ "reset_time_unit": {
1931
+ "type": "string",
1932
+ "default": "hour",
1933
+ "description": "Time unit for reset (e.g., hour, minute)"
1934
+ },
1935
+ "alert_config": {
1936
+ "type": "object",
1937
+ "description": "Custom alert configuration settings"
1938
+ },
1939
+ "queue_length_threshold": {
1940
+ "type": "integer",
1941
+ "default": 10,
1942
+ "description": "Threshold for queue length alerts"
1943
+ },
1944
+ "service_efficiency_threshold": {
1945
+ "type": "number",
1946
+ "default": 0.0,
1947
+ "description": "Threshold for service efficiency alerts"
1948
+ },
1949
+ "email_address": {
1950
+ "type": "string",
1951
+ "default": "john.doe@gmail.com",
1952
+ "description": "Email address for alert notifications"
1953
+ },
1954
+ },
1955
+ "required": ["confidence_threshold"],
1956
+ "additionalProperties": False
1957
+ }
1958
+
1959
+ def create_default_config(self, **overrides) -> CustomerServiceConfig:
1960
+ """Create default configuration with optional overrides."""
1961
+ defaults = {
1962
+ "category": self.category,
1963
+ "usecase": self.name,
1964
+ "confidence_threshold": 0.5,
1965
+ "enable_tracking": True,
1966
+ "enable_analytics": True,
1967
+ "enable_journey_analysis": True,
1968
+ "enable_queue_analytics": True,
1969
+ "staff_categories": ["staff", "employee"],
1970
+ "customer_categories": ["customer", "person"],
1971
+ "service_proximity_threshold": 100.0,
1972
+ "max_service_time": 1800.0,
1973
+ "buffer_time": 2.0,
1974
+ "stream_info": {},
1975
+ }
1976
+ defaults.update(overrides)
1977
+ return CustomerServiceConfig(**defaults)
1565
1978
 
1566
1979
  def _extract_predictions(self, data: Any) -> Dict[str, List[Dict[str, Any]]]:
1567
1980
  """Extract predictions from processed data for API compatibility, grouped by frame number if available."""