matrice-analytics 0.1.70__py3-none-any.whl → 0.1.89__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/post_processing/config.py +2 -2
- matrice_analytics/post_processing/core/base.py +1 -1
- matrice_analytics/post_processing/face_reg/face_recognition.py +871 -190
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +55 -25
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +908 -498
- matrice_analytics/post_processing/usecases/color_detection.py +18 -18
- matrice_analytics/post_processing/usecases/customer_service.py +356 -9
- matrice_analytics/post_processing/usecases/fire_detection.py +147 -9
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +549 -41
- matrice_analytics/post_processing/usecases/people_counting.py +11 -11
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +34 -34
- matrice_analytics/post_processing/utils/alert_instance_utils.py +950 -0
- matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1245 -0
- matrice_analytics/post_processing/utils/incident_manager_utils.py +1657 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.89.dist-info}/METADATA +1 -1
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.89.dist-info}/RECORD +19 -16
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.89.dist-info}/WHEEL +0 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.89.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.89.dist-info}/top_level.txt +0 -0
|
@@ -26,6 +26,11 @@ from ..utils import (
|
|
|
26
26
|
BBoxSmoothingConfig,
|
|
27
27
|
BBoxSmoothingTracker,
|
|
28
28
|
)
|
|
29
|
+
# Import business metrics manager for publishing aggregated metrics every 5 minutes
|
|
30
|
+
from ..utils.business_metrics_manager_utils import (
|
|
31
|
+
BUSINESS_METRICS_MANAGER,
|
|
32
|
+
BusinessMetricsManagerFactory
|
|
33
|
+
)
|
|
29
34
|
|
|
30
35
|
def assign_person_by_area(detections, customer_areas, staff_areas):
|
|
31
36
|
"""
|
|
@@ -65,94 +70,267 @@ def assign_person_by_area(detections, customer_areas, staff_areas):
|
|
|
65
70
|
det['category'] = 'staff'
|
|
66
71
|
|
|
67
72
|
class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
68
|
-
def
|
|
69
|
-
"""
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
seconds = round(float(timestamp % 60), 2)
|
|
73
|
-
return f"{hours:02d}:{minutes:02d}:{seconds:04.1f}"
|
|
73
|
+
def __init__(self):
|
|
74
|
+
"""Initialize advanced customer service use case."""
|
|
75
|
+
super().__init__("advanced_customer_service")
|
|
76
|
+
self.category = "sales"
|
|
74
77
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
+
# Advanced tracking structures
|
|
79
|
+
self.customer_occupancy = {}
|
|
80
|
+
self.staff_occupancy = {}
|
|
81
|
+
self.service_occupancy = {}
|
|
82
|
+
self.customer_queue_times = {}
|
|
83
|
+
self.customer_service_times = {}
|
|
84
|
+
self.customer_journey = {}
|
|
85
|
+
self.staff_availability = {}
|
|
86
|
+
self.staff_service_count = defaultdict(int)
|
|
87
|
+
self.staff_active_services = {}
|
|
78
88
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
return "00:00:00.00"
|
|
83
|
-
input_settings = stream_info.get("input_settings", {})
|
|
84
|
-
if precision:
|
|
85
|
-
if input_settings.get("start_frame", "na") != "na":
|
|
86
|
-
if frame_id is not None:
|
|
87
|
-
start_time = int(frame_id)/input_settings.get("original_fps", 30)
|
|
88
|
-
else:
|
|
89
|
-
start_time = input_settings.get("start_frame", 30)/input_settings.get("original_fps", 30)
|
|
90
|
-
return self._format_timestamp_for_video(start_time)
|
|
91
|
-
else:
|
|
92
|
-
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
89
|
+
# Persistent unique staff tracking
|
|
90
|
+
self.global_staff_ids = set()
|
|
91
|
+
self.global_staff_ids_by_area = defaultdict(set)
|
|
93
92
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
93
|
+
# Persistent unique customer tracking
|
|
94
|
+
self.global_customer_ids = set()
|
|
95
|
+
|
|
96
|
+
# Persistent staff ID memory (for cross-frame staff identity)
|
|
97
|
+
self.persistent_staff_ids = set()
|
|
98
|
+
|
|
99
|
+
# Analytics
|
|
100
|
+
self.queue_wait_times = defaultdict(list)
|
|
101
|
+
self.service_times = defaultdict(list)
|
|
102
|
+
self.staff_efficiency = defaultdict(list)
|
|
103
|
+
self.peak_occupancy = defaultdict(int)
|
|
104
|
+
|
|
105
|
+
# Journey states
|
|
106
|
+
self.JOURNEY_STATES = {
|
|
107
|
+
'ENTERING': 'entering',
|
|
108
|
+
'QUEUING': 'queuing',
|
|
109
|
+
'BEING_SERVED': 'being_served',
|
|
110
|
+
'COMPLETED': 'completed',
|
|
111
|
+
'LEFT': 'left'
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
# Tracker initialization (for YOLOv8 frame-wise predictions)
|
|
115
|
+
self.tracker = None
|
|
116
|
+
self.smoothing_tracker = None
|
|
117
|
+
self._total_frame_counter = 0
|
|
118
|
+
self._global_frame_offset = 0
|
|
119
|
+
|
|
120
|
+
# Track merging and aliasing (like people_counting)
|
|
121
|
+
self._track_aliases: Dict[Any, Any] = {}
|
|
122
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
|
123
|
+
self._track_merge_iou_threshold: float = 0.05
|
|
124
|
+
self._track_merge_time_window: float = 7.0
|
|
125
|
+
|
|
126
|
+
# Per-category track ID tracking
|
|
127
|
+
self._per_category_total_track_ids: Dict[str, set] = {}
|
|
128
|
+
self._current_frame_track_ids: Dict[str, set] = {}
|
|
129
|
+
|
|
130
|
+
# Alert tracking
|
|
131
|
+
self._ascending_alert_list: List[int] = []
|
|
132
|
+
self.current_incident_end_timestamp: str = "N/A"
|
|
133
|
+
self.start_timer = None
|
|
134
|
+
|
|
135
|
+
# Business metrics manager for publishing aggregated metrics every 5 minutes
|
|
136
|
+
self._business_metrics_manager_factory: Optional[BusinessMetricsManagerFactory] = None
|
|
137
|
+
self._business_metrics_manager: Optional[BUSINESS_METRICS_MANAGER] = None
|
|
138
|
+
self._business_metrics_manager_initialized: bool = False
|
|
139
|
+
|
|
140
|
+
def process(self, data: Any, config: ConfigProtocol,
|
|
141
|
+
context: Optional[ProcessingContext] = None, stream_info: Optional[dict] = None) -> ProcessingResult:
|
|
142
|
+
"""
|
|
143
|
+
Process advanced customer service analytics.
|
|
144
|
+
"""
|
|
145
|
+
start_time = time.time()
|
|
146
|
+
print("-------------------CUS-STREAM_INFO------------------------------")
|
|
147
|
+
self.logger.info("-------------------CUS-STREAM_INFO------------------------------")
|
|
148
|
+
self.logger.info(stream_info)
|
|
149
|
+
self.logger.info("-------------------CUS-STREAM_INFO------------------------------")
|
|
150
|
+
print("-------------------CUS-STREAM_INFO------------------------------")
|
|
151
|
+
print(stream_info)
|
|
152
|
+
print("-------------------CUS-STREAM_INFO------------------------------")
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
if not isinstance(config, CustomerServiceConfig):
|
|
156
|
+
return self.create_error_result(
|
|
157
|
+
"Invalid configuration type for advanced customer service",
|
|
158
|
+
usecase=self.name,
|
|
159
|
+
category=self.category,
|
|
160
|
+
context=context
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
if stream_info is not None:
|
|
164
|
+
if context is None:
|
|
165
|
+
context = ProcessingContext()
|
|
166
|
+
context.stream_info = stream_info
|
|
167
|
+
|
|
168
|
+
if context is None:
|
|
169
|
+
context = ProcessingContext()
|
|
170
|
+
|
|
171
|
+
self._service_proximity_threshold = config.service_proximity_threshold
|
|
172
|
+
|
|
173
|
+
# Initialize business metrics manager once (for publishing aggregated metrics)
|
|
174
|
+
if not self._business_metrics_manager_initialized:
|
|
175
|
+
self._initialize_business_metrics_manager_once(config)
|
|
176
|
+
|
|
177
|
+
input_format = match_results_structure(data)
|
|
178
|
+
context.input_format = input_format
|
|
179
|
+
context.confidence_threshold = config.confidence_threshold
|
|
180
|
+
context.enable_tracking = config.enable_tracking
|
|
181
|
+
|
|
182
|
+
self.logger.info(f"Processing advanced customer service with format: {input_format.value}")
|
|
183
|
+
|
|
184
|
+
self._initialize_areas(config.customer_areas, config.staff_areas, config.service_areas)
|
|
185
|
+
|
|
186
|
+
processed_data = data
|
|
187
|
+
if config.confidence_threshold is not None:
|
|
188
|
+
processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
|
|
189
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
190
|
+
|
|
191
|
+
if hasattr(config, 'index_to_category') and config.index_to_category:
|
|
192
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
193
|
+
self.logger.debug("Applied category mapping")
|
|
194
|
+
|
|
195
|
+
# --- Smoothing logic ---
|
|
196
|
+
if getattr(config, "enable_smoothing", False):
|
|
197
|
+
if not hasattr(self, "smoothing_tracker") or self.smoothing_tracker is None:
|
|
198
|
+
smoothing_config = BBoxSmoothingConfig(
|
|
199
|
+
smoothing_algorithm=getattr(config, "smoothing_algorithm", "observability"),
|
|
200
|
+
window_size=getattr(config, "smoothing_window_size", 20),
|
|
201
|
+
cooldown_frames=getattr(config, "smoothing_cooldown_frames", 5),
|
|
202
|
+
confidence_threshold=getattr(config, "confidence_threshold", 0.5),
|
|
203
|
+
confidence_range_factor=getattr(config, "smoothing_confidence_range_factor", 0.5),
|
|
204
|
+
enable_smoothing=True
|
|
205
|
+
)
|
|
206
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
207
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
208
|
+
|
|
209
|
+
# Extract detections from processed data
|
|
210
|
+
detections = self._extract_detections(processed_data)
|
|
211
|
+
|
|
212
|
+
# --- Apply AdvancedTracker for YOLOv8 frame-wise predictions (like people_counting) ---
|
|
213
|
+
try:
|
|
214
|
+
from ..advanced_tracker import AdvancedTracker
|
|
215
|
+
from ..advanced_tracker.config import TrackerConfig
|
|
216
|
+
if self.tracker is None:
|
|
217
|
+
tracker_config = TrackerConfig(
|
|
218
|
+
track_high_thresh=float(config.confidence_threshold) if config.confidence_threshold else 0.4,
|
|
219
|
+
track_low_thresh=max(0.05, (float(config.confidence_threshold) / 2) if config.confidence_threshold else 0.05),
|
|
220
|
+
new_track_thresh=float(config.confidence_threshold) if config.confidence_threshold else 0.3,
|
|
221
|
+
match_thresh=0.8
|
|
222
|
+
)
|
|
223
|
+
self.tracker = AdvancedTracker(tracker_config)
|
|
224
|
+
self.logger.info(f"Initialized AdvancedTracker with thresholds: high={tracker_config.track_high_thresh}, "
|
|
225
|
+
f"low={tracker_config.track_low_thresh}, new={tracker_config.new_track_thresh}")
|
|
226
|
+
# Apply tracker to get track_ids
|
|
227
|
+
detections = self.tracker.update(detections)
|
|
228
|
+
self.logger.debug(f"Applied AdvancedTracker, {len(detections)} detections with track_ids")
|
|
229
|
+
except Exception as e:
|
|
230
|
+
self.logger.warning(f"AdvancedTracker failed: {e}, continuing without tracking")
|
|
231
|
+
|
|
232
|
+
# Update tracking state (track merging, canonical IDs)
|
|
233
|
+
self._update_tracking_state(detections)
|
|
234
|
+
self._total_frame_counter += 1
|
|
235
|
+
|
|
236
|
+
# Assign person detections to staff/customer based on area polygons
|
|
237
|
+
assign_person_by_area(
|
|
238
|
+
detections,
|
|
239
|
+
getattr(config, 'customer_areas', {}),
|
|
240
|
+
getattr(config, 'staff_areas', {})
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
# Categorize detections into staff and customers
|
|
244
|
+
staff_detections, customer_detections = self._categorize_detections(
|
|
245
|
+
detections, config.staff_categories, config.customer_categories
|
|
246
|
+
)
|
|
247
|
+
self.logger.debug(f"Extracted {len(staff_detections)} staff and {len(customer_detections)} customer detections")
|
|
248
|
+
|
|
249
|
+
self._maybe_reset_chunk()
|
|
250
|
+
self._update_chunk_tracking(customer_detections)
|
|
251
|
+
|
|
252
|
+
# Extract frame number from stream_info (like people_counting)
|
|
253
|
+
frame_number = None
|
|
254
|
+
if stream_info:
|
|
255
|
+
input_settings = stream_info.get("input_settings", {})
|
|
256
|
+
start_frame = input_settings.get("start_frame")
|
|
257
|
+
end_frame = input_settings.get("end_frame")
|
|
258
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
|
259
|
+
frame_number = start_frame
|
|
260
|
+
|
|
261
|
+
current_time = time.time()
|
|
262
|
+
analytics_results = self._process_comprehensive_analytics(
|
|
263
|
+
staff_detections, customer_detections, config, current_time
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# Send business metrics to manager for aggregation and publishing
|
|
267
|
+
# The manager aggregates for 5 minutes and publishes mean/min/max/sum
|
|
268
|
+
business_metrics = analytics_results.get("business_metrics", {})
|
|
269
|
+
if business_metrics:
|
|
270
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Sending metrics: {list(business_metrics.keys())}")
|
|
271
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] stream_info available: {stream_info is not None}")
|
|
272
|
+
if stream_info:
|
|
273
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] stream_info.topic: {stream_info.get('topic', 'N/A')}")
|
|
274
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] stream_info.camera_info: {stream_info.get('camera_info', {})}")
|
|
275
|
+
self._send_metrics_to_manager(business_metrics, stream_info)
|
|
111
276
|
else:
|
|
112
|
-
|
|
277
|
+
self.logger.debug("[BUSINESS_METRICS_MANAGER] No business_metrics in analytics_results")
|
|
113
278
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
if input_settings.get("start_frame", "na") != "na":
|
|
121
|
-
return "00:00:00"
|
|
279
|
+
# --- FIX: Ensure agg_summary is top-level and events/tracking_stats are dicts ---
|
|
280
|
+
# Reconstruct processed_data dict with frame_number as key for per-frame analytics
|
|
281
|
+
if frame_number is not None:
|
|
282
|
+
processed_data_for_summary = {str(frame_number): detections}
|
|
283
|
+
elif isinstance(processed_data, dict):
|
|
284
|
+
processed_data_for_summary = processed_data
|
|
122
285
|
else:
|
|
123
|
-
|
|
286
|
+
processed_data_for_summary = {"0": detections}
|
|
287
|
+
|
|
288
|
+
agg_summary = self._generate_per_frame_agg_summary(processed_data_for_summary, analytics_results, config, context, stream_info)
|
|
124
289
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
290
|
+
insights = self._generate_insights(analytics_results, config)
|
|
291
|
+
alerts = self._check_alerts(analytics_results, config)
|
|
292
|
+
summary = self._generate_summary(analytics_results, alerts)
|
|
293
|
+
predictions = self._extract_predictions(processed_data)
|
|
294
|
+
|
|
295
|
+
context.mark_completed()
|
|
296
|
+
|
|
297
|
+
# Compose result data with harmonized agg_summary structure
|
|
298
|
+
result = self.create_result(
|
|
299
|
+
data={"agg_summary": agg_summary},
|
|
300
|
+
usecase=self.name,
|
|
301
|
+
category=self.category,
|
|
302
|
+
context=context
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
result.summary = summary
|
|
306
|
+
result.insights = insights
|
|
307
|
+
result.predictions = predictions
|
|
308
|
+
result.metrics = analytics_results.get("business_metrics", {})
|
|
309
|
+
|
|
310
|
+
if not config.customer_areas and not config.staff_areas:
|
|
311
|
+
result.add_warning("No customer or staff areas defined - using global analysis only")
|
|
312
|
+
|
|
313
|
+
if config.service_proximity_threshold > 250:
|
|
314
|
+
result.add_warning(f"High service proximity threshold ({config.service_proximity_threshold}) may miss interactions")
|
|
315
|
+
|
|
316
|
+
self.logger.info(f"Advanced customer service analysis completed successfully in {result.processing_time:.2f}s")
|
|
317
|
+
return result
|
|
318
|
+
|
|
319
|
+
except Exception as e:
|
|
320
|
+
self.logger.error(f"Advanced customer service analysis failed: {str(e)}", exc_info=True)
|
|
321
|
+
|
|
322
|
+
if context:
|
|
323
|
+
context.mark_completed()
|
|
324
|
+
|
|
325
|
+
return self.create_error_result(
|
|
326
|
+
str(e),
|
|
327
|
+
type(e).__name__,
|
|
328
|
+
usecase=self.name,
|
|
329
|
+
category=self.category,
|
|
330
|
+
context=context
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
|
|
156
334
|
def _generate_per_frame_agg_summary(self, processed_data, analytics_results, config, context, stream_info=None):
|
|
157
335
|
"""
|
|
158
336
|
Generate agg_summary dict with per-frame incidents, tracking_stats, business_analytics, alerts, human_text.
|
|
@@ -256,10 +434,10 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
256
434
|
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
|
257
435
|
human_text_lines.append(f"\t- Active Customers: {queue_analytics.get('active_customers', 0)}")
|
|
258
436
|
human_text_lines.append(f"\t\t- Queuing: {queue_analytics.get('customers_queuing', 0)}")
|
|
259
|
-
human_text_lines.append(f"\t\t- Being Served: {queue_analytics.get('customers_being_served', 0)}")
|
|
437
|
+
#human_text_lines.append(f"\t\t- Being Served: {queue_analytics.get('customers_being_served', 0)}")
|
|
260
438
|
human_text_lines.append(f"\t- Active Staff: {staff_analytics.get('active_staff', 0)}")
|
|
261
|
-
human_text_lines.append(f"\t- Customer/Staff Ratio: {business_metrics.get('customer_to_staff_ratio', 0):.2f}")
|
|
262
|
-
human_text_lines.append(f"\t- Queue Performance: {business_metrics.get('queue_performance', 0)*100:.1f}%")
|
|
439
|
+
# human_text_lines.append(f"\t- Customer/Staff Ratio: {business_metrics.get('customer_to_staff_ratio', 0):.2f}")
|
|
440
|
+
# human_text_lines.append(f"\t- Queue Performance: {business_metrics.get('queue_performance', 0)*100:.1f}%")
|
|
263
441
|
human_text_lines.append(f"\t- Service Areas: {len(service_analytics.get('service_areas_status', {}))}")
|
|
264
442
|
for area_name, area_info in service_analytics.get('service_areas_status', {}).items():
|
|
265
443
|
customers = area_info.get("customers", 0)
|
|
@@ -267,22 +445,22 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
267
445
|
status = area_info.get("status", "inactive")
|
|
268
446
|
human_text_lines.append(f"\t\t- {area_name}: {status} with {customers} customers and {staff} staff")
|
|
269
447
|
human_text_lines.append("")
|
|
270
|
-
human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
|
|
271
|
-
human_text_lines.append(f"\t- Total Customers: {journey_analytics.get('total_journeys', 0)}")
|
|
272
|
-
completed_count = journey_analytics.get("journey_states", {}).get("completed", 0)
|
|
273
|
-
human_text_lines.append(f"\t\t- Completed: {completed_count}")
|
|
274
|
-
human_text_lines.append(f"\t- Total Staff: {staff_analytics.get('total_staff', 0)}")
|
|
275
|
-
human_text_lines.append(f"\t- Average Staff Count: {staff_analytics.get('avg_staff_count', 0.0):.2f}")
|
|
276
|
-
human_text_lines.append(f"\t- Average Wait Time: {queue_analytics.get('average_wait_time', 0):.1f}s")
|
|
277
|
-
avg_service_time = 0.0
|
|
278
|
-
if analytics_results.get("service_times"):
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
human_text_lines.append(f"\t- Average Service Time: {avg_service_time:.1f}s")
|
|
283
|
-
human_text_lines.append(f"\t- Business Metrics:")
|
|
284
|
-
human_text_lines.append(f"\t\t- Service Efficiency: {business_metrics.get('service_efficiency', 0)*100:.1f}%")
|
|
285
|
-
human_text_lines.append(f"\t\t- Staff Productivity: {business_metrics.get('staff_productivity', 0):.2f} services/staff")
|
|
448
|
+
# human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
|
|
449
|
+
# human_text_lines.append(f"\t- Total Customers: {journey_analytics.get('total_journeys', 0)}")
|
|
450
|
+
# completed_count = journey_analytics.get("journey_states", {}).get("completed", 0)
|
|
451
|
+
# human_text_lines.append(f"\t\t- Completed: {completed_count}")
|
|
452
|
+
# human_text_lines.append(f"\t- Total Staff: {staff_analytics.get('total_staff', 0)}")
|
|
453
|
+
# human_text_lines.append(f"\t- Average Staff Count: {staff_analytics.get('avg_staff_count', 0.0):.2f}")
|
|
454
|
+
# human_text_lines.append(f"\t- Average Wait Time: {queue_analytics.get('average_wait_time', 0):.1f}s")
|
|
455
|
+
# avg_service_time = 0.0
|
|
456
|
+
# if analytics_results.get("service_times"):
|
|
457
|
+
# times = [t.get("service_time", 0.0) for t in analytics_results["service_times"]]
|
|
458
|
+
# if times:
|
|
459
|
+
# avg_service_time = sum(times) / len(times)
|
|
460
|
+
# human_text_lines.append(f"\t- Average Service Time: {avg_service_time:.1f}s")
|
|
461
|
+
# human_text_lines.append(f"\t- Business Metrics:")
|
|
462
|
+
# human_text_lines.append(f"\t\t- Service Efficiency: {business_metrics.get('service_efficiency', 0)*100:.1f}%")
|
|
463
|
+
# human_text_lines.append(f"\t\t- Staff Productivity: {business_metrics.get('staff_productivity', 0):.2f} services/staff")
|
|
286
464
|
human_text = "\n".join(human_text_lines)
|
|
287
465
|
|
|
288
466
|
# Build event in incident format
|
|
@@ -324,10 +502,11 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
324
502
|
detection_objs = []
|
|
325
503
|
for d in detections:
|
|
326
504
|
bbox = d.get("bounding_box", d.get("bbox", {}))
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
505
|
+
if d.get("category") == "customer" or d.get("category") == "staff" or d.get("category") == "person":
|
|
506
|
+
detection_objs.append({
|
|
507
|
+
"category": d.get("category", "person"),
|
|
508
|
+
"bounding_box": bbox
|
|
509
|
+
})
|
|
331
510
|
|
|
332
511
|
# Harmonize reset_settings format
|
|
333
512
|
reset_settings = [
|
|
@@ -379,12 +558,20 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
379
558
|
"alert_settings": alert_settings
|
|
380
559
|
}
|
|
381
560
|
|
|
382
|
-
agg_summary[str(frame_id)] = {
|
|
561
|
+
# agg_summary[str(frame_id)] = {
|
|
562
|
+
# "incidents": event,
|
|
563
|
+
# "tracking_stats": tracking_stat,
|
|
564
|
+
# "business_analytics": business_analytics,
|
|
565
|
+
# "alerts": alerts,
|
|
566
|
+
# "human_text": human_text
|
|
567
|
+
# }
|
|
568
|
+
frame_id = None
|
|
569
|
+
agg_summary = {str(frame_id) : {
|
|
383
570
|
"incidents": event,
|
|
384
571
|
"tracking_stats": tracking_stat,
|
|
385
572
|
"business_analytics": business_analytics,
|
|
386
573
|
"alerts": alerts,
|
|
387
|
-
"human_text": human_text
|
|
574
|
+
"human_text": human_text}
|
|
388
575
|
}
|
|
389
576
|
return agg_summary
|
|
390
577
|
# --- Chunk tracking for per-chunk analytics ---
|
|
@@ -409,380 +596,125 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
409
596
|
if point_in_polygon(customer_center, polygon):
|
|
410
597
|
self._chunk_area_customer_ids[area_name].add(track_id)
|
|
411
598
|
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
self._init_chunk_tracking()
|
|
415
|
-
self._chunk_frame_count += 1
|
|
416
|
-
if self._chunk_frame_count > 1:
|
|
417
|
-
self._init_chunk_tracking()
|
|
418
|
-
def __init__(self):
|
|
419
|
-
"""Initialize advanced customer service use case."""
|
|
420
|
-
super().__init__("advanced_customer_service")
|
|
421
|
-
self.category = "sales"
|
|
422
|
-
|
|
423
|
-
# Advanced tracking structures
|
|
424
|
-
self.customer_occupancy = {}
|
|
425
|
-
self.staff_occupancy = {}
|
|
426
|
-
self.service_occupancy = {}
|
|
427
|
-
self.customer_queue_times = {}
|
|
428
|
-
self.customer_service_times = {}
|
|
429
|
-
self.customer_journey = {}
|
|
430
|
-
self.staff_availability = {}
|
|
431
|
-
self.staff_service_count = defaultdict(int)
|
|
432
|
-
self.staff_active_services = {}
|
|
433
|
-
|
|
434
|
-
# Persistent unique staff tracking
|
|
435
|
-
self.global_staff_ids = set()
|
|
436
|
-
self.global_staff_ids_by_area = defaultdict(set)
|
|
437
|
-
|
|
438
|
-
# Persistent unique customer tracking
|
|
439
|
-
self.global_customer_ids = set()
|
|
440
|
-
|
|
441
|
-
# Persistent staff ID memory (for cross-frame staff identity)
|
|
442
|
-
self.persistent_staff_ids = set()
|
|
443
|
-
|
|
444
|
-
# Analytics
|
|
445
|
-
self.queue_wait_times = defaultdict(list)
|
|
446
|
-
self.service_times = defaultdict(list)
|
|
447
|
-
self.staff_efficiency = defaultdict(list)
|
|
448
|
-
self.peak_occupancy = defaultdict(int)
|
|
449
|
-
|
|
450
|
-
# Journey states
|
|
451
|
-
self.JOURNEY_STATES = {
|
|
452
|
-
'ENTERING': 'entering',
|
|
453
|
-
'QUEUING': 'queuing',
|
|
454
|
-
'BEING_SERVED': 'being_served',
|
|
455
|
-
'COMPLETED': 'completed',
|
|
456
|
-
'LEFT': 'left'
|
|
457
|
-
}
|
|
458
|
-
|
|
459
|
-
def get_config_schema(self) -> Dict[str, Any]:
|
|
460
|
-
"""Get configuration schema for advanced customer service."""
|
|
461
|
-
return {
|
|
462
|
-
"type": "object",
|
|
463
|
-
"properties": {
|
|
464
|
-
"confidence_threshold": {
|
|
465
|
-
"type": "number",
|
|
466
|
-
"minimum": 0.0,
|
|
467
|
-
"maximum": 1.0,
|
|
468
|
-
"default": 0.5,
|
|
469
|
-
"description": "Minimum confidence threshold for detections"
|
|
470
|
-
},
|
|
471
|
-
"customer_areas": {
|
|
472
|
-
"type": "object",
|
|
473
|
-
"additionalProperties": {
|
|
474
|
-
"type": "array",
|
|
475
|
-
"items": {
|
|
476
|
-
"type": "array",
|
|
477
|
-
"items": {"type": "number"},
|
|
478
|
-
"minItems": 2,
|
|
479
|
-
"maxItems": 2
|
|
480
|
-
},
|
|
481
|
-
"minItems": 3
|
|
482
|
-
},
|
|
483
|
-
"description": "Customer area definitions as polygons"
|
|
484
|
-
},
|
|
485
|
-
"staff_areas": {
|
|
486
|
-
"type": "object",
|
|
487
|
-
"additionalProperties": {
|
|
488
|
-
"type": "array",
|
|
489
|
-
"items": {
|
|
490
|
-
"type": "array",
|
|
491
|
-
"items": {"type": "number"},
|
|
492
|
-
"minItems": 2,
|
|
493
|
-
"maxItems": 2
|
|
494
|
-
},
|
|
495
|
-
"minItems": 3
|
|
496
|
-
},
|
|
497
|
-
"description": "Staff area definitions as polygons"
|
|
498
|
-
},
|
|
499
|
-
"service_areas": {
|
|
500
|
-
"type": "object",
|
|
501
|
-
"additionalProperties": {
|
|
502
|
-
"type": "array",
|
|
503
|
-
"items": {
|
|
504
|
-
"type": "array",
|
|
505
|
-
"items": {"type": "number"},
|
|
506
|
-
"minItems": 2,
|
|
507
|
-
"maxItems": 2
|
|
508
|
-
},
|
|
509
|
-
"minItems": 3
|
|
510
|
-
},
|
|
511
|
-
"description": "Service area definitions as polygons"
|
|
512
|
-
},
|
|
513
|
-
"staff_categories": {
|
|
514
|
-
"type": "array",
|
|
515
|
-
"items": {"type": "string"},
|
|
516
|
-
"default": ["staff", "employee"],
|
|
517
|
-
"description": "Category names that represent staff"
|
|
518
|
-
},
|
|
519
|
-
"customer_categories": {
|
|
520
|
-
"type": "array",
|
|
521
|
-
"items": {"type": "string"},
|
|
522
|
-
"default": ["customer", "person"],
|
|
523
|
-
"description": "Category names that represent customers"
|
|
524
|
-
},
|
|
525
|
-
"service_proximity_threshold": {
|
|
526
|
-
"type": "number",
|
|
527
|
-
"minimum": 0.0,
|
|
528
|
-
"default": 100.0,
|
|
529
|
-
"description": "Distance threshold for service interactions"
|
|
530
|
-
},
|
|
531
|
-
"max_service_time": {
|
|
532
|
-
"type": "number",
|
|
533
|
-
"minimum": 0.0,
|
|
534
|
-
"default": 1800.0,
|
|
535
|
-
"description": "Maximum expected service time in seconds"
|
|
536
|
-
},
|
|
537
|
-
"buffer_time": {
|
|
538
|
-
"type": "number",
|
|
539
|
-
"minimum": 0.0,
|
|
540
|
-
"default": 2.0,
|
|
541
|
-
"description": "Buffer time for service calculations"
|
|
542
|
-
},
|
|
543
|
-
"enable_tracking": {
|
|
544
|
-
"type": "boolean",
|
|
545
|
-
"default": True,
|
|
546
|
-
"description": "Enable advanced tracking for analytics"
|
|
547
|
-
},
|
|
548
|
-
"enable_journey_analysis": {
|
|
549
|
-
"type": "boolean",
|
|
550
|
-
"default": True,
|
|
551
|
-
"description": "Enable customer journey analysis"
|
|
552
|
-
},
|
|
553
|
-
"enable_queue_analytics": {
|
|
554
|
-
"type": "boolean",
|
|
555
|
-
"default": True,
|
|
556
|
-
"description": "Enable queue management analytics"
|
|
557
|
-
},
|
|
558
|
-
"tracking_config": {
|
|
559
|
-
"type": "object",
|
|
560
|
-
"properties": {
|
|
561
|
-
"tracking_method": {
|
|
562
|
-
"type": "string",
|
|
563
|
-
"enum": ["kalman", "sort", "deepsort", "bytetrack"],
|
|
564
|
-
"default": "kalman"
|
|
565
|
-
},
|
|
566
|
-
"max_age": {"type": "integer", "minimum": 1, "default": 30},
|
|
567
|
-
"min_hits": {"type": "integer", "minimum": 1, "default": 3},
|
|
568
|
-
"iou_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.3}
|
|
569
|
-
}
|
|
570
|
-
},
|
|
571
|
-
"enable_smoothing": {
|
|
572
|
-
"type": "boolean",
|
|
573
|
-
"default": True,
|
|
574
|
-
"description": "Enable bounding box smoothing for detections"
|
|
575
|
-
},
|
|
576
|
-
"smoothing_algorithm": {
|
|
577
|
-
"type": "string",
|
|
578
|
-
"enum": ["observability", "kalman"],
|
|
579
|
-
"default": "observability"
|
|
580
|
-
},
|
|
581
|
-
"smoothing_window_size": {
|
|
582
|
-
"type": "integer",
|
|
583
|
-
"minimum": 1,
|
|
584
|
-
"default": 20
|
|
585
|
-
},
|
|
586
|
-
"smoothing_cooldown_frames": {
|
|
587
|
-
"type": "integer",
|
|
588
|
-
"minimum": 0,
|
|
589
|
-
"default": 5
|
|
590
|
-
},
|
|
591
|
-
"smoothing_confidence_threshold": {
|
|
592
|
-
"type": "number",
|
|
593
|
-
"minimum": 0.0,
|
|
594
|
-
"maximum": 1.0,
|
|
595
|
-
"default": 0.5
|
|
596
|
-
},
|
|
597
|
-
"smoothing_confidence_range_factor": {
|
|
598
|
-
"type": "number",
|
|
599
|
-
"minimum": 0.0,
|
|
600
|
-
"default": 0.5
|
|
601
|
-
},
|
|
602
|
-
"reset_interval_type": {
|
|
603
|
-
"type": "string",
|
|
604
|
-
"default": "daily",
|
|
605
|
-
"description": "Interval type for resetting analytics (e.g., daily, weekly)"
|
|
606
|
-
},
|
|
607
|
-
"reset_time_value": {
|
|
608
|
-
"type": "integer",
|
|
609
|
-
"default": 9,
|
|
610
|
-
"description": "Time value for reset (e.g., hour of day)"
|
|
611
|
-
},
|
|
612
|
-
"reset_time_unit": {
|
|
613
|
-
"type": "string",
|
|
614
|
-
"default": "hour",
|
|
615
|
-
"description": "Time unit for reset (e.g., hour, minute)"
|
|
616
|
-
},
|
|
617
|
-
"alert_config": {
|
|
618
|
-
"type": "object",
|
|
619
|
-
"description": "Custom alert configuration settings"
|
|
620
|
-
},
|
|
621
|
-
"queue_length_threshold": {
|
|
622
|
-
"type": "integer",
|
|
623
|
-
"default": 10,
|
|
624
|
-
"description": "Threshold for queue length alerts"
|
|
625
|
-
},
|
|
626
|
-
"service_efficiency_threshold": {
|
|
627
|
-
"type": "number",
|
|
628
|
-
"default": 0.0,
|
|
629
|
-
"description": "Threshold for service efficiency alerts"
|
|
630
|
-
},
|
|
631
|
-
"email_address": {
|
|
632
|
-
"type": "string",
|
|
633
|
-
"default": "john.doe@gmail.com",
|
|
634
|
-
"description": "Email address for alert notifications"
|
|
635
|
-
},
|
|
636
|
-
},
|
|
637
|
-
"required": ["confidence_threshold"],
|
|
638
|
-
"additionalProperties": False
|
|
639
|
-
}
|
|
640
|
-
|
|
641
|
-
def create_default_config(self, **overrides) -> CustomerServiceConfig:
|
|
642
|
-
"""Create default configuration with optional overrides."""
|
|
643
|
-
defaults = {
|
|
644
|
-
"category": self.category,
|
|
645
|
-
"usecase": self.name,
|
|
646
|
-
"confidence_threshold": 0.5,
|
|
647
|
-
"enable_tracking": True,
|
|
648
|
-
"enable_analytics": True,
|
|
649
|
-
"enable_journey_analysis": True,
|
|
650
|
-
"enable_queue_analytics": True,
|
|
651
|
-
"staff_categories": ["staff", "employee"],
|
|
652
|
-
"customer_categories": ["customer", "person"],
|
|
653
|
-
"service_proximity_threshold": 100.0,
|
|
654
|
-
"max_service_time": 1800.0,
|
|
655
|
-
"buffer_time": 2.0,
|
|
656
|
-
"stream_info": {},
|
|
657
|
-
}
|
|
658
|
-
defaults.update(overrides)
|
|
659
|
-
return CustomerServiceConfig(**defaults)
|
|
660
|
-
|
|
661
|
-
def process(self, data: Any, config: ConfigProtocol,
|
|
662
|
-
context: Optional[ProcessingContext] = None, stream_info: Optional[dict] = None) -> ProcessingResult:
|
|
599
|
+
|
|
600
|
+
def _initialize_business_metrics_manager_once(self, config: CustomerServiceConfig) -> None:
|
|
663
601
|
"""
|
|
664
|
-
|
|
602
|
+
Initialize business metrics manager ONCE with Redis OR Kafka clients (Environment based).
|
|
603
|
+
Called from process() on first invocation.
|
|
604
|
+
Uses config.session (existing session from pipeline) or creates from environment.
|
|
665
605
|
"""
|
|
666
|
-
|
|
667
|
-
|
|
606
|
+
if self._business_metrics_manager_initialized:
|
|
607
|
+
self.logger.debug("[BUSINESS_METRICS_MANAGER] Already initialized, skipping")
|
|
608
|
+
return
|
|
609
|
+
|
|
668
610
|
try:
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
)
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
if context is None:
|
|
683
|
-
context = ProcessingContext()
|
|
684
|
-
|
|
685
|
-
self._service_proximity_threshold = config.service_proximity_threshold
|
|
686
|
-
|
|
687
|
-
input_format = match_results_structure(data)
|
|
688
|
-
context.input_format = input_format
|
|
689
|
-
context.confidence_threshold = config.confidence_threshold
|
|
690
|
-
context.enable_tracking = config.enable_tracking
|
|
691
|
-
|
|
692
|
-
self.logger.info(f"Processing advanced customer service with format: {input_format.value}")
|
|
693
|
-
|
|
694
|
-
self._initialize_areas(config.customer_areas, config.staff_areas, config.service_areas)
|
|
695
|
-
|
|
696
|
-
processed_data = data
|
|
697
|
-
if config.confidence_threshold is not None:
|
|
698
|
-
processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
|
|
699
|
-
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
700
|
-
|
|
701
|
-
if hasattr(config, 'index_to_category') and config.index_to_category:
|
|
702
|
-
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
703
|
-
self.logger.debug("Applied category mapping")
|
|
704
|
-
|
|
705
|
-
# --- Smoothing logic ---
|
|
706
|
-
if getattr(config, "enable_smoothing", False):
|
|
707
|
-
if not hasattr(self, "smoothing_tracker") or self.smoothing_tracker is None:
|
|
708
|
-
smoothing_config = BBoxSmoothingConfig(
|
|
709
|
-
smoothing_algorithm=getattr(config, "smoothing_algorithm", "observability"),
|
|
710
|
-
window_size=getattr(config, "smoothing_window_size", 20),
|
|
711
|
-
cooldown_frames=getattr(config, "smoothing_cooldown_frames", 5),
|
|
712
|
-
confidence_threshold=getattr(config, "confidence_threshold", 0.5),
|
|
713
|
-
confidence_range_factor=getattr(config, "smoothing_confidence_range_factor", 0.5),
|
|
714
|
-
enable_smoothing=True
|
|
715
|
-
)
|
|
716
|
-
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
|
717
|
-
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
|
718
|
-
|
|
719
|
-
detections = self._extract_detections(processed_data)
|
|
720
|
-
assign_person_by_area(
|
|
721
|
-
detections,
|
|
722
|
-
getattr(config, 'customer_areas', {}),
|
|
723
|
-
getattr(config, 'staff_areas', {})
|
|
724
|
-
)
|
|
725
|
-
staff_detections, customer_detections = self._categorize_detections(
|
|
726
|
-
detections, config.staff_categories, config.customer_categories
|
|
727
|
-
)
|
|
728
|
-
self.logger.debug(f"Extracted {len(staff_detections)} staff and {len(customer_detections)} customer detections")
|
|
729
|
-
|
|
730
|
-
self._maybe_reset_chunk()
|
|
731
|
-
self._update_chunk_tracking(customer_detections)
|
|
732
|
-
|
|
733
|
-
current_time = time.time()
|
|
734
|
-
analytics_results = self._process_comprehensive_analytics(
|
|
735
|
-
staff_detections, customer_detections, config, current_time
|
|
611
|
+
self.logger.info("[BUSINESS_METRICS_MANAGER] ===== Starting business metrics manager initialization =====")
|
|
612
|
+
self.logger.info("[BUSINESS_METRICS_MANAGER] Aggregation interval: 300 seconds (5 minutes)")
|
|
613
|
+
|
|
614
|
+
# Create factory if not exists
|
|
615
|
+
if self._business_metrics_manager_factory is None:
|
|
616
|
+
self._business_metrics_manager_factory = BusinessMetricsManagerFactory(logger=self.logger)
|
|
617
|
+
self.logger.debug("[BUSINESS_METRICS_MANAGER] Created BusinessMetricsManagerFactory")
|
|
618
|
+
|
|
619
|
+
# Initialize using factory (handles session creation, Redis/Kafka setup)
|
|
620
|
+
# Aggregation interval: 300 seconds (5 minutes)
|
|
621
|
+
self._business_metrics_manager = self._business_metrics_manager_factory.initialize(
|
|
622
|
+
config,
|
|
623
|
+
aggregation_interval=300 # 5 minutes
|
|
736
624
|
)
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
625
|
+
|
|
626
|
+
if self._business_metrics_manager:
|
|
627
|
+
self.logger.info("[BUSINESS_METRICS_MANAGER] ✓ Business metrics manager initialized successfully")
|
|
628
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Output topic: {self._business_metrics_manager.output_topic}")
|
|
629
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Aggregation interval: {self._business_metrics_manager.aggregation_interval}s")
|
|
630
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Redis client: {'Available' if self._business_metrics_manager.redis_client else 'Not available'}")
|
|
631
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Kafka client: {'Available' if self._business_metrics_manager.kafka_client else 'Not available'}")
|
|
632
|
+
|
|
633
|
+
# Log factory info
|
|
634
|
+
if self._business_metrics_manager_factory:
|
|
635
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Factory app_deployment_id: {self._business_metrics_manager_factory._app_deployment_id}")
|
|
636
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Factory action_id: {self._business_metrics_manager_factory._action_id}")
|
|
741
637
|
else:
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
638
|
+
self.logger.warning("[BUSINESS_METRICS_MANAGER] ❌ Business metrics manager not available, metrics won't be published")
|
|
639
|
+
self.logger.warning("[BUSINESS_METRICS_MANAGER] Check if Redis/Kafka connection is properly configured")
|
|
640
|
+
|
|
641
|
+
except Exception as e:
|
|
642
|
+
self.logger.error(f"[BUSINESS_METRICS_MANAGER] Business metrics manager initialization failed: {e}", exc_info=True)
|
|
643
|
+
finally:
|
|
644
|
+
self._business_metrics_manager_initialized = True # Mark as initialized (don't retry every frame)
|
|
645
|
+
self.logger.info("[BUSINESS_METRICS_MANAGER] ===== Initialization complete =====")
|
|
646
|
+
|
|
647
|
+
def _send_metrics_to_manager(
|
|
648
|
+
self,
|
|
649
|
+
business_metrics: Dict[str, Any],
|
|
650
|
+
stream_info: Optional[Dict[str, Any]] = None
|
|
651
|
+
) -> None:
|
|
652
|
+
"""
|
|
653
|
+
Send business metrics to the business metrics manager for aggregation and publishing.
|
|
654
|
+
|
|
655
|
+
The business metrics manager will:
|
|
656
|
+
1. Aggregate metrics for 5 minutes (300 seconds)
|
|
657
|
+
2. Publish aggregated metrics (mean/min/max/sum) to output topic
|
|
658
|
+
3. Reset all values after publishing
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
business_metrics: Business metrics dictionary from _calculate_analytics
|
|
662
|
+
stream_info: Stream metadata containing camera info
|
|
663
|
+
"""
|
|
664
|
+
if not self._business_metrics_manager:
|
|
665
|
+
self.logger.debug("[BUSINESS_METRICS_MANAGER] No business metrics manager available, skipping")
|
|
666
|
+
return
|
|
667
|
+
|
|
668
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] _send_metrics_to_manager called with stream_info keys: {list(stream_info.keys()) if stream_info else 'None'}")
|
|
669
|
+
|
|
670
|
+
# Extract camera_id from stream_info
|
|
671
|
+
# Stream info structure: {'topic': '692d7bde42582ffde3611908_input_topic', 'camera_info': {'camera_name': '...'}, ...}
|
|
672
|
+
camera_id = ""
|
|
673
|
+
camera_name = ""
|
|
674
|
+
|
|
675
|
+
if stream_info and isinstance(stream_info, dict):
|
|
676
|
+
# Method 1: Extract from topic field (e.g., "692d7bde42582ffde3611908_input_topic")
|
|
677
|
+
topic = stream_info.get("topic", "")
|
|
678
|
+
if topic and "_input_topic" in topic:
|
|
679
|
+
camera_id = topic.replace("_input_topic", "").strip()
|
|
680
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Extracted camera_id from topic: {camera_id}")
|
|
681
|
+
|
|
682
|
+
# Method 2: Try camera_info dict
|
|
683
|
+
camera_info = stream_info.get("camera_info", {})
|
|
684
|
+
if isinstance(camera_info, dict):
|
|
685
|
+
if not camera_id:
|
|
686
|
+
camera_id = camera_info.get("camera_id", "") or camera_info.get("cameraId", "")
|
|
687
|
+
camera_name = camera_info.get("camera_name", "")
|
|
688
|
+
|
|
689
|
+
# Method 3: Try direct fields
|
|
690
|
+
if not camera_id:
|
|
691
|
+
camera_id = stream_info.get("camera_id", "") or stream_info.get("cameraId", "")
|
|
692
|
+
|
|
693
|
+
if not camera_id:
|
|
694
|
+
# Fallback to a default identifier
|
|
695
|
+
camera_id = "default_camera"
|
|
696
|
+
self.logger.warning(f"[BUSINESS_METRICS_MANAGER] No camera_id found in stream_info, using default: {camera_id}")
|
|
697
|
+
else:
|
|
698
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] Using camera_id={camera_id}, camera_name={camera_name}")
|
|
699
|
+
|
|
700
|
+
try:
|
|
701
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Calling process_metrics with camera_id={camera_id}")
|
|
702
|
+
|
|
703
|
+
# Process the metrics through the manager
|
|
704
|
+
published = self._business_metrics_manager.process_metrics(
|
|
705
|
+
camera_id=camera_id,
|
|
706
|
+
metrics_data=business_metrics,
|
|
707
|
+
stream_info=stream_info
|
|
757
708
|
)
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
if not config.customer_areas and not config.staff_areas:
|
|
765
|
-
result.add_warning("No customer or staff areas defined - using global analysis only")
|
|
766
|
-
|
|
767
|
-
if config.service_proximity_threshold > 250:
|
|
768
|
-
result.add_warning(f"High service proximity threshold ({config.service_proximity_threshold}) may miss interactions")
|
|
769
|
-
|
|
770
|
-
self.logger.info(f"Advanced customer service analysis completed successfully in {result.processing_time:.2f}s")
|
|
771
|
-
return result
|
|
772
|
-
|
|
709
|
+
|
|
710
|
+
if published:
|
|
711
|
+
self.logger.info(f"[BUSINESS_METRICS_MANAGER] ✓ Metrics published for camera: {camera_id}")
|
|
712
|
+
else:
|
|
713
|
+
self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Metrics queued for aggregation (not yet published)")
|
|
773
714
|
except Exception as e:
|
|
774
|
-
self.logger.error(f"
|
|
715
|
+
self.logger.error(f"[BUSINESS_METRICS_MANAGER] Error sending metrics to manager: {e}", exc_info=True)
|
|
775
716
|
|
|
776
|
-
|
|
777
|
-
context.mark_completed()
|
|
778
|
-
|
|
779
|
-
return self.create_error_result(
|
|
780
|
-
str(e),
|
|
781
|
-
type(e).__name__,
|
|
782
|
-
usecase=self.name,
|
|
783
|
-
category=self.category,
|
|
784
|
-
context=context
|
|
785
|
-
)
|
|
717
|
+
|
|
786
718
|
|
|
787
719
|
def _initialize_areas(self, customer_areas: Dict, staff_areas: Dict, service_areas: Dict):
|
|
788
720
|
"""Initialize area tracking structures."""
|
|
@@ -858,16 +790,6 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
858
790
|
# Compile comprehensive results
|
|
859
791
|
return self._compile_analytics_results(current_time)
|
|
860
792
|
|
|
861
|
-
def _reset_current_state(self):
|
|
862
|
-
"""Reset current state for new processing cycle."""
|
|
863
|
-
# Clear current occupancy (will be repopulated)
|
|
864
|
-
for area_name in self.customer_occupancy:
|
|
865
|
-
self.customer_occupancy[area_name] = []
|
|
866
|
-
for area_name in self.staff_occupancy:
|
|
867
|
-
self.staff_occupancy[area_name] = []
|
|
868
|
-
for area_name in self.service_occupancy:
|
|
869
|
-
self.service_occupancy[area_name] = []
|
|
870
|
-
|
|
871
793
|
def _process_staff_detections(self, staff_detections: List[Dict], current_time: float):
|
|
872
794
|
"""Process staff detections and update tracking."""
|
|
873
795
|
for staff in staff_detections:
|
|
@@ -1475,38 +1397,322 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
1475
1397
|
insights.append("⚠️ Long customer journey times detected")
|
|
1476
1398
|
|
|
1477
1399
|
return insights
|
|
1400
|
+
|
|
1401
|
+
def _format_timestamp(self, timestamp: Any) -> str:
|
|
1402
|
+
"""Format a timestamp to match the current timestamp format: YYYY:MM:DD HH:MM:SS.
|
|
1478
1403
|
|
|
1404
|
+
The input can be either:
|
|
1405
|
+
1. A numeric Unix timestamp (``float`` / ``int``) – it will be converted to datetime.
|
|
1406
|
+
2. A string in the format ``YYYY-MM-DD-HH:MM:SS.ffffff UTC``.
|
|
1479
1407
|
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1408
|
+
The returned value will be in the format: YYYY:MM:DD HH:MM:SS (no milliseconds, no UTC suffix).
|
|
1409
|
+
|
|
1410
|
+
Example
|
|
1411
|
+
-------
|
|
1412
|
+
>>> self._format_timestamp("2025-10-27-19:31:20.187574 UTC")
|
|
1413
|
+
'2025:10:27 19:31:20'
|
|
1414
|
+
"""
|
|
1415
|
+
|
|
1416
|
+
# Convert numeric timestamps to datetime first
|
|
1417
|
+
if isinstance(timestamp, (int, float)):
|
|
1418
|
+
dt = datetime.fromtimestamp(timestamp, timezone.utc)
|
|
1419
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1420
|
+
|
|
1421
|
+
# Ensure we are working with a string from here on
|
|
1422
|
+
if not isinstance(timestamp, str):
|
|
1423
|
+
return str(timestamp)
|
|
1424
|
+
|
|
1425
|
+
# Remove ' UTC' suffix if present
|
|
1426
|
+
timestamp_clean = timestamp.replace(' UTC', '').strip()
|
|
1427
|
+
|
|
1428
|
+
# Remove milliseconds if present (everything after the last dot)
|
|
1429
|
+
if '.' in timestamp_clean:
|
|
1430
|
+
timestamp_clean = timestamp_clean.split('.')[0]
|
|
1431
|
+
|
|
1432
|
+
# Parse the timestamp string and convert to desired format
|
|
1433
|
+
try:
|
|
1434
|
+
# Handle format: YYYY-MM-DD-HH:MM:SS
|
|
1435
|
+
if timestamp_clean.count('-') >= 2:
|
|
1436
|
+
# Replace first two dashes with colons for date part, third with space
|
|
1437
|
+
parts = timestamp_clean.split('-')
|
|
1438
|
+
if len(parts) >= 4:
|
|
1439
|
+
# parts = ['2025', '10', '27', '19:31:20']
|
|
1440
|
+
formatted = f"{parts[0]}:{parts[1]}:{parts[2]} {'-'.join(parts[3:])}"
|
|
1441
|
+
return formatted
|
|
1442
|
+
except Exception:
|
|
1443
|
+
pass
|
|
1444
|
+
|
|
1445
|
+
# If parsing fails, return the cleaned string as-is
|
|
1446
|
+
return timestamp_clean
|
|
1447
|
+
|
|
1448
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
|
1449
|
+
"""Get formatted current timestamp based on stream type."""
|
|
1450
|
+
|
|
1483
1451
|
if not stream_info:
|
|
1484
1452
|
return "00:00:00.00"
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1453
|
+
if precision:
|
|
1454
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
1455
|
+
if frame_id:
|
|
1456
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1457
|
+
else:
|
|
1458
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1459
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1460
|
+
|
|
1461
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1462
|
+
else:
|
|
1463
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1464
|
+
|
|
1465
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
|
1466
|
+
if frame_id:
|
|
1467
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1491
1468
|
else:
|
|
1492
|
-
|
|
1469
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
|
1470
|
+
|
|
1471
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
|
1472
|
+
|
|
1473
|
+
|
|
1474
|
+
return self._format_timestamp(stream_info.get("input_settings", {}).get("stream_time", "NA"))
|
|
1475
|
+
else:
|
|
1476
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1477
|
+
if stream_time_str:
|
|
1478
|
+
try:
|
|
1479
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1480
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1481
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1482
|
+
return self._format_timestamp_for_stream(timestamp)
|
|
1483
|
+
except:
|
|
1484
|
+
return self._format_timestamp_for_stream(time.time())
|
|
1485
|
+
else:
|
|
1486
|
+
return self._format_timestamp_for_stream(time.time())
|
|
1487
|
+
|
|
1488
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
|
1489
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
|
1490
|
+
if not stream_info:
|
|
1491
|
+
return "00:00:00"
|
|
1492
|
+
|
|
1493
|
+
if precision:
|
|
1494
|
+
if self.start_timer is None:
|
|
1495
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1496
|
+
if not candidate or candidate == "NA":
|
|
1497
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1498
|
+
self.start_timer = candidate
|
|
1499
|
+
return self._format_timestamp(self.start_timer)
|
|
1500
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1501
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1502
|
+
if not candidate or candidate == "NA":
|
|
1503
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1504
|
+
self.start_timer = candidate
|
|
1505
|
+
return self._format_timestamp(self.start_timer)
|
|
1506
|
+
else:
|
|
1507
|
+
return self._format_timestamp(self.start_timer)
|
|
1508
|
+
|
|
1509
|
+
if self.start_timer is None:
|
|
1510
|
+
# Prefer direct input_settings.stream_time if available and not NA
|
|
1511
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1512
|
+
if not candidate or candidate == "NA":
|
|
1513
|
+
# Fallback to nested stream_info.stream_time used by current timestamp path
|
|
1514
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1515
|
+
if stream_time_str:
|
|
1516
|
+
try:
|
|
1517
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1518
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1519
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1520
|
+
candidate = datetime.fromtimestamp(self._tracking_start_time, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1521
|
+
except:
|
|
1522
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1523
|
+
else:
|
|
1524
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1525
|
+
self.start_timer = candidate
|
|
1526
|
+
return self._format_timestamp(self.start_timer)
|
|
1527
|
+
elif stream_info.get("input_settings", {}).get("start_frame", "na") == 1:
|
|
1528
|
+
candidate = stream_info.get("input_settings", {}).get("stream_time")
|
|
1529
|
+
if not candidate or candidate == "NA":
|
|
1530
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1531
|
+
if stream_time_str:
|
|
1532
|
+
try:
|
|
1533
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1534
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1535
|
+
ts = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1536
|
+
candidate = datetime.fromtimestamp(ts, timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1537
|
+
except:
|
|
1538
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1539
|
+
else:
|
|
1540
|
+
candidate = datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
|
1541
|
+
self.start_timer = candidate
|
|
1542
|
+
return self._format_timestamp(self.start_timer)
|
|
1543
|
+
|
|
1493
1544
|
else:
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1545
|
+
if self.start_timer is not None and self.start_timer != "NA":
|
|
1546
|
+
return self._format_timestamp(self.start_timer)
|
|
1547
|
+
|
|
1548
|
+
if self._tracking_start_time is None:
|
|
1549
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
|
1497
1550
|
if stream_time_str:
|
|
1498
1551
|
try:
|
|
1499
|
-
from datetime import datetime, timezone
|
|
1500
1552
|
timestamp_str = stream_time_str.replace(" UTC", "")
|
|
1501
1553
|
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
|
1502
1554
|
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
|
1503
|
-
except
|
|
1555
|
+
except:
|
|
1504
1556
|
self._tracking_start_time = time.time()
|
|
1505
1557
|
else:
|
|
1506
1558
|
self._tracking_start_time = time.time()
|
|
1507
|
-
|
|
1559
|
+
|
|
1508
1560
|
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
|
1561
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
|
1509
1562
|
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1563
|
+
|
|
1564
|
+
|
|
1565
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
|
1566
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
|
1567
|
+
hours = int(timestamp // 3600)
|
|
1568
|
+
minutes = int((timestamp % 3600) // 60)
|
|
1569
|
+
seconds = round(float(timestamp % 60), 2)
|
|
1570
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:04.1f}"
|
|
1571
|
+
|
|
1572
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
|
1573
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
1574
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
|
1575
|
+
|
|
1576
|
+
def get_camera_info_from_stream(self, stream_info):
|
|
1577
|
+
"""Extract camera_info from stream_info, matching people_counting pattern."""
|
|
1578
|
+
if not stream_info:
|
|
1579
|
+
return {}
|
|
1580
|
+
# Try to get camera_info directly
|
|
1581
|
+
camera_info = stream_info.get("camera_info")
|
|
1582
|
+
if camera_info:
|
|
1583
|
+
return camera_info
|
|
1584
|
+
# Fallback: try to extract from nested input_settings
|
|
1585
|
+
input_settings = stream_info.get("input_settings", {})
|
|
1586
|
+
for key in ["camera_info", "camera_id", "location", "site_id"]:
|
|
1587
|
+
if key in input_settings:
|
|
1588
|
+
return {key: input_settings[key]}
|
|
1589
|
+
return {}
|
|
1590
|
+
def _maybe_reset_chunk(self):
|
|
1591
|
+
if not hasattr(self, '_chunk_frame_count'):
|
|
1592
|
+
self._init_chunk_tracking()
|
|
1593
|
+
self._chunk_frame_count += 1
|
|
1594
|
+
if self._chunk_frame_count > 1:
|
|
1595
|
+
self._init_chunk_tracking()
|
|
1596
|
+
def _reset_current_state(self):
|
|
1597
|
+
"""Reset current state for new processing cycle."""
|
|
1598
|
+
# Clear current occupancy (will be repopulated)
|
|
1599
|
+
for area_name in self.customer_occupancy:
|
|
1600
|
+
self.customer_occupancy[area_name] = []
|
|
1601
|
+
for area_name in self.staff_occupancy:
|
|
1602
|
+
self.staff_occupancy[area_name] = []
|
|
1603
|
+
for area_name in self.service_occupancy:
|
|
1604
|
+
self.service_occupancy[area_name] = []
|
|
1605
|
+
|
|
1606
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
|
1607
|
+
"""Compute IoU between two bounding boxes."""
|
|
1608
|
+
def _bbox_to_list(bbox):
|
|
1609
|
+
if bbox is None:
|
|
1610
|
+
return []
|
|
1611
|
+
if isinstance(bbox, list):
|
|
1612
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
|
1613
|
+
if isinstance(bbox, dict):
|
|
1614
|
+
if "xmin" in bbox:
|
|
1615
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
|
1616
|
+
if "x1" in bbox:
|
|
1617
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
|
1618
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
|
1619
|
+
return values[:4] if len(values) >= 4 else []
|
|
1620
|
+
return []
|
|
1621
|
+
|
|
1622
|
+
l1 = _bbox_to_list(box1)
|
|
1623
|
+
l2 = _bbox_to_list(box2)
|
|
1624
|
+
if len(l1) < 4 or len(l2) < 4:
|
|
1625
|
+
return 0.0
|
|
1626
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
|
1627
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
|
1628
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
|
1629
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
|
1630
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
|
1631
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
|
1632
|
+
inter_x_min = max(x1_min, x2_min)
|
|
1633
|
+
inter_y_min = max(y1_min, y2_min)
|
|
1634
|
+
inter_x_max = min(x1_max, x2_max)
|
|
1635
|
+
inter_y_max = min(y1_max, y2_max)
|
|
1636
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
|
1637
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
|
1638
|
+
inter_area = inter_w * inter_h
|
|
1639
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
|
1640
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
|
1641
|
+
union_area = area1 + area2 - inter_area
|
|
1642
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
|
1643
|
+
|
|
1644
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
|
1645
|
+
"""Return a stable canonical ID for a raw tracker ID."""
|
|
1646
|
+
if raw_id is None or bbox is None:
|
|
1647
|
+
return raw_id
|
|
1648
|
+
now = time.time()
|
|
1649
|
+
if raw_id in self._track_aliases:
|
|
1650
|
+
canonical_id = self._track_aliases[raw_id]
|
|
1651
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
|
1652
|
+
if track_info is not None:
|
|
1653
|
+
track_info["last_bbox"] = bbox
|
|
1654
|
+
track_info["last_update"] = now
|
|
1655
|
+
track_info["raw_ids"].add(raw_id)
|
|
1656
|
+
return canonical_id
|
|
1657
|
+
for canonical_id, info in self._canonical_tracks.items():
|
|
1658
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
|
1659
|
+
continue
|
|
1660
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
|
1661
|
+
if iou >= self._track_merge_iou_threshold:
|
|
1662
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1663
|
+
info["last_bbox"] = bbox
|
|
1664
|
+
info["last_update"] = now
|
|
1665
|
+
info["raw_ids"].add(raw_id)
|
|
1666
|
+
return canonical_id
|
|
1667
|
+
canonical_id = raw_id
|
|
1668
|
+
self._track_aliases[raw_id] = canonical_id
|
|
1669
|
+
self._canonical_tracks[canonical_id] = {
|
|
1670
|
+
"last_bbox": bbox,
|
|
1671
|
+
"last_update": now,
|
|
1672
|
+
"raw_ids": {raw_id},
|
|
1673
|
+
}
|
|
1674
|
+
return canonical_id
|
|
1675
|
+
|
|
1676
|
+
def _update_tracking_state(self, detections: List[Dict]):
|
|
1677
|
+
"""Track unique track_ids per category (staff/customer)."""
|
|
1678
|
+
target_categories = ['staff', 'customer', 'person']
|
|
1679
|
+
if not hasattr(self, "_per_category_total_track_ids") or self._per_category_total_track_ids is None:
|
|
1680
|
+
self._per_category_total_track_ids = {cat: set() for cat in target_categories}
|
|
1681
|
+
self._current_frame_track_ids = {cat: set() for cat in target_categories}
|
|
1682
|
+
|
|
1683
|
+
for det in detections:
|
|
1684
|
+
cat = det.get("category")
|
|
1685
|
+
raw_track_id = det.get("track_id")
|
|
1686
|
+
if cat not in target_categories or raw_track_id is None:
|
|
1687
|
+
continue
|
|
1688
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
|
1689
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
|
1690
|
+
det["track_id"] = canonical_id
|
|
1691
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
|
1692
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
|
1693
|
+
|
|
1694
|
+
def get_total_counts(self) -> Dict[str, int]:
|
|
1695
|
+
"""Return total unique track counts per category."""
|
|
1696
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
|
1697
|
+
|
|
1698
|
+
def _get_track_ids_info(self, detections: List[Dict]) -> Dict[str, Any]:
|
|
1699
|
+
"""Get detailed information about track IDs."""
|
|
1700
|
+
frame_track_ids = set()
|
|
1701
|
+
for det in detections:
|
|
1702
|
+
tid = det.get('track_id')
|
|
1703
|
+
if tid is not None:
|
|
1704
|
+
frame_track_ids.add(tid)
|
|
1705
|
+
total_track_ids = set()
|
|
1706
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
|
1707
|
+
total_track_ids.update(s)
|
|
1708
|
+
return {
|
|
1709
|
+
"total_count": len(total_track_ids),
|
|
1710
|
+
"current_frame_count": len(frame_track_ids),
|
|
1711
|
+
"total_unique_track_ids": len(total_track_ids),
|
|
1712
|
+
"current_frame_track_ids": list(frame_track_ids),
|
|
1713
|
+
"last_update_time": time.time(),
|
|
1714
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
|
1715
|
+
}
|
|
1510
1716
|
|
|
1511
1717
|
def _generate_summary(self, analytics_results: Dict, alerts: List) -> str:
|
|
1512
1718
|
"""Generate human-readable summary."""
|
|
@@ -1546,6 +1752,8 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
1546
1752
|
return "\n".join(lines)
|
|
1547
1753
|
|
|
1548
1754
|
summary = []
|
|
1755
|
+
summary.append("Application Name: "+self.CASE_TYPE)
|
|
1756
|
+
summary.append("Application Version: "+self.CASE_VERSION)
|
|
1549
1757
|
summary.append(tabbed_section("customer_queue_analytics", queue_analytics, omit_keys={"wait_times_completed", "wait_times_ongoing"}))
|
|
1550
1758
|
summary.append(tabbed_section("staff_management_analytics", staff_analytics, omit_keys={"staff_efficiency"}))
|
|
1551
1759
|
summary.append(tabbed_section("service_area_analytics", service_analytics))
|
|
@@ -1562,6 +1770,208 @@ class AdvancedCustomerServiceUseCase(BaseProcessor):
|
|
|
1562
1770
|
summary.append(f"ALERTS: {len(alerts)} alert(s)")
|
|
1563
1771
|
|
|
1564
1772
|
return "\n".join(summary)
|
|
1773
|
+
|
|
1774
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
|
1775
|
+
"""Get configuration schema for advanced customer service."""
|
|
1776
|
+
return {
|
|
1777
|
+
"type": "object",
|
|
1778
|
+
"properties": {
|
|
1779
|
+
"confidence_threshold": {
|
|
1780
|
+
"type": "number",
|
|
1781
|
+
"minimum": 0.0,
|
|
1782
|
+
"maximum": 1.0,
|
|
1783
|
+
"default": 0.5,
|
|
1784
|
+
"description": "Minimum confidence threshold for detections"
|
|
1785
|
+
},
|
|
1786
|
+
"customer_areas": {
|
|
1787
|
+
"type": "object",
|
|
1788
|
+
"additionalProperties": {
|
|
1789
|
+
"type": "array",
|
|
1790
|
+
"items": {
|
|
1791
|
+
"type": "array",
|
|
1792
|
+
"items": {"type": "number"},
|
|
1793
|
+
"minItems": 2,
|
|
1794
|
+
"maxItems": 2
|
|
1795
|
+
},
|
|
1796
|
+
"minItems": 3
|
|
1797
|
+
},
|
|
1798
|
+
"description": "Customer area definitions as polygons"
|
|
1799
|
+
},
|
|
1800
|
+
"staff_areas": {
|
|
1801
|
+
"type": "object",
|
|
1802
|
+
"additionalProperties": {
|
|
1803
|
+
"type": "array",
|
|
1804
|
+
"items": {
|
|
1805
|
+
"type": "array",
|
|
1806
|
+
"items": {"type": "number"},
|
|
1807
|
+
"minItems": 2,
|
|
1808
|
+
"maxItems": 2
|
|
1809
|
+
},
|
|
1810
|
+
"minItems": 3
|
|
1811
|
+
},
|
|
1812
|
+
"description": "Staff area definitions as polygons"
|
|
1813
|
+
},
|
|
1814
|
+
"service_areas": {
|
|
1815
|
+
"type": "object",
|
|
1816
|
+
"additionalProperties": {
|
|
1817
|
+
"type": "array",
|
|
1818
|
+
"items": {
|
|
1819
|
+
"type": "array",
|
|
1820
|
+
"items": {"type": "number"},
|
|
1821
|
+
"minItems": 2,
|
|
1822
|
+
"maxItems": 2
|
|
1823
|
+
},
|
|
1824
|
+
"minItems": 3
|
|
1825
|
+
},
|
|
1826
|
+
"description": "Service area definitions as polygons"
|
|
1827
|
+
},
|
|
1828
|
+
"staff_categories": {
|
|
1829
|
+
"type": "array",
|
|
1830
|
+
"items": {"type": "string"},
|
|
1831
|
+
"default": ["staff", "employee"],
|
|
1832
|
+
"description": "Category names that represent staff"
|
|
1833
|
+
},
|
|
1834
|
+
"customer_categories": {
|
|
1835
|
+
"type": "array",
|
|
1836
|
+
"items": {"type": "string"},
|
|
1837
|
+
"default": ["customer", "person"],
|
|
1838
|
+
"description": "Category names that represent customers"
|
|
1839
|
+
},
|
|
1840
|
+
"service_proximity_threshold": {
|
|
1841
|
+
"type": "number",
|
|
1842
|
+
"minimum": 0.0,
|
|
1843
|
+
"default": 100.0,
|
|
1844
|
+
"description": "Distance threshold for service interactions"
|
|
1845
|
+
},
|
|
1846
|
+
"max_service_time": {
|
|
1847
|
+
"type": "number",
|
|
1848
|
+
"minimum": 0.0,
|
|
1849
|
+
"default": 1800.0,
|
|
1850
|
+
"description": "Maximum expected service time in seconds"
|
|
1851
|
+
},
|
|
1852
|
+
"buffer_time": {
|
|
1853
|
+
"type": "number",
|
|
1854
|
+
"minimum": 0.0,
|
|
1855
|
+
"default": 2.0,
|
|
1856
|
+
"description": "Buffer time for service calculations"
|
|
1857
|
+
},
|
|
1858
|
+
"enable_tracking": {
|
|
1859
|
+
"type": "boolean",
|
|
1860
|
+
"default": True,
|
|
1861
|
+
"description": "Enable advanced tracking for analytics"
|
|
1862
|
+
},
|
|
1863
|
+
"enable_journey_analysis": {
|
|
1864
|
+
"type": "boolean",
|
|
1865
|
+
"default": True,
|
|
1866
|
+
"description": "Enable customer journey analysis"
|
|
1867
|
+
},
|
|
1868
|
+
"enable_queue_analytics": {
|
|
1869
|
+
"type": "boolean",
|
|
1870
|
+
"default": True,
|
|
1871
|
+
"description": "Enable queue management analytics"
|
|
1872
|
+
},
|
|
1873
|
+
"tracking_config": {
|
|
1874
|
+
"type": "object",
|
|
1875
|
+
"properties": {
|
|
1876
|
+
"tracking_method": {
|
|
1877
|
+
"type": "string",
|
|
1878
|
+
"enum": ["kalman", "sort", "deepsort", "bytetrack"],
|
|
1879
|
+
"default": "kalman"
|
|
1880
|
+
},
|
|
1881
|
+
"max_age": {"type": "integer", "minimum": 1, "default": 30},
|
|
1882
|
+
"min_hits": {"type": "integer", "minimum": 1, "default": 3},
|
|
1883
|
+
"iou_threshold": {"type": "number", "minimum": 0.0, "maximum": 1.0, "default": 0.3}
|
|
1884
|
+
}
|
|
1885
|
+
},
|
|
1886
|
+
"enable_smoothing": {
|
|
1887
|
+
"type": "boolean",
|
|
1888
|
+
"default": True,
|
|
1889
|
+
"description": "Enable bounding box smoothing for detections"
|
|
1890
|
+
},
|
|
1891
|
+
"smoothing_algorithm": {
|
|
1892
|
+
"type": "string",
|
|
1893
|
+
"enum": ["observability", "kalman"],
|
|
1894
|
+
"default": "observability"
|
|
1895
|
+
},
|
|
1896
|
+
"smoothing_window_size": {
|
|
1897
|
+
"type": "integer",
|
|
1898
|
+
"minimum": 1,
|
|
1899
|
+
"default": 20
|
|
1900
|
+
},
|
|
1901
|
+
"smoothing_cooldown_frames": {
|
|
1902
|
+
"type": "integer",
|
|
1903
|
+
"minimum": 0,
|
|
1904
|
+
"default": 5
|
|
1905
|
+
},
|
|
1906
|
+
"smoothing_confidence_threshold": {
|
|
1907
|
+
"type": "number",
|
|
1908
|
+
"minimum": 0.0,
|
|
1909
|
+
"maximum": 1.0,
|
|
1910
|
+
"default": 0.5
|
|
1911
|
+
},
|
|
1912
|
+
"smoothing_confidence_range_factor": {
|
|
1913
|
+
"type": "number",
|
|
1914
|
+
"minimum": 0.0,
|
|
1915
|
+
"default": 0.5
|
|
1916
|
+
},
|
|
1917
|
+
"reset_interval_type": {
|
|
1918
|
+
"type": "string",
|
|
1919
|
+
"default": "daily",
|
|
1920
|
+
"description": "Interval type for resetting analytics (e.g., daily, weekly)"
|
|
1921
|
+
},
|
|
1922
|
+
"reset_time_value": {
|
|
1923
|
+
"type": "integer",
|
|
1924
|
+
"default": 9,
|
|
1925
|
+
"description": "Time value for reset (e.g., hour of day)"
|
|
1926
|
+
},
|
|
1927
|
+
"reset_time_unit": {
|
|
1928
|
+
"type": "string",
|
|
1929
|
+
"default": "hour",
|
|
1930
|
+
"description": "Time unit for reset (e.g., hour, minute)"
|
|
1931
|
+
},
|
|
1932
|
+
"alert_config": {
|
|
1933
|
+
"type": "object",
|
|
1934
|
+
"description": "Custom alert configuration settings"
|
|
1935
|
+
},
|
|
1936
|
+
"queue_length_threshold": {
|
|
1937
|
+
"type": "integer",
|
|
1938
|
+
"default": 10,
|
|
1939
|
+
"description": "Threshold for queue length alerts"
|
|
1940
|
+
},
|
|
1941
|
+
"service_efficiency_threshold": {
|
|
1942
|
+
"type": "number",
|
|
1943
|
+
"default": 0.0,
|
|
1944
|
+
"description": "Threshold for service efficiency alerts"
|
|
1945
|
+
},
|
|
1946
|
+
"email_address": {
|
|
1947
|
+
"type": "string",
|
|
1948
|
+
"default": "john.doe@gmail.com",
|
|
1949
|
+
"description": "Email address for alert notifications"
|
|
1950
|
+
},
|
|
1951
|
+
},
|
|
1952
|
+
"required": ["confidence_threshold"],
|
|
1953
|
+
"additionalProperties": False
|
|
1954
|
+
}
|
|
1955
|
+
|
|
1956
|
+
def create_default_config(self, **overrides) -> CustomerServiceConfig:
|
|
1957
|
+
"""Create default configuration with optional overrides."""
|
|
1958
|
+
defaults = {
|
|
1959
|
+
"category": self.category,
|
|
1960
|
+
"usecase": self.name,
|
|
1961
|
+
"confidence_threshold": 0.5,
|
|
1962
|
+
"enable_tracking": True,
|
|
1963
|
+
"enable_analytics": True,
|
|
1964
|
+
"enable_journey_analysis": True,
|
|
1965
|
+
"enable_queue_analytics": True,
|
|
1966
|
+
"staff_categories": ["staff", "employee"],
|
|
1967
|
+
"customer_categories": ["customer", "person"],
|
|
1968
|
+
"service_proximity_threshold": 100.0,
|
|
1969
|
+
"max_service_time": 1800.0,
|
|
1970
|
+
"buffer_time": 2.0,
|
|
1971
|
+
"stream_info": {},
|
|
1972
|
+
}
|
|
1973
|
+
defaults.update(overrides)
|
|
1974
|
+
return CustomerServiceConfig(**defaults)
|
|
1565
1975
|
|
|
1566
1976
|
def _extract_predictions(self, data: Any) -> Dict[str, List[Dict[str, Any]]]:
|
|
1567
1977
|
"""Extract predictions from processed data for API compatibility, grouped by frame number if available."""
|