matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1008 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Customer service use case implementation.
|
|
3
|
+
|
|
4
|
+
This module provides comprehensive customer service analytics including staff utilization,
|
|
5
|
+
service interactions, area occupancy analysis, and business intelligence metrics.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
9
|
+
from dataclasses import field
|
|
10
|
+
import time
|
|
11
|
+
import math
|
|
12
|
+
from collections import defaultdict
|
|
13
|
+
|
|
14
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol
|
|
15
|
+
from ..core.config import CustomerServiceConfig, TrackingConfig, AlertConfig
|
|
16
|
+
from ..utils import (
|
|
17
|
+
filter_by_confidence,
|
|
18
|
+
apply_category_mapping,
|
|
19
|
+
point_in_polygon,
|
|
20
|
+
get_bbox_center,
|
|
21
|
+
calculate_distance,
|
|
22
|
+
match_results_structure
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def assign_person_by_area(detections, customer_areas, staff_areas):
|
|
27
|
+
"""
|
|
28
|
+
Assigns category 'person' detections to 'staff' or 'customer' based on their location in area polygons.
|
|
29
|
+
Modifies the detection list in-place.
|
|
30
|
+
Args:
|
|
31
|
+
detections: List of detection dicts.
|
|
32
|
+
customer_areas: Dict of area_name -> polygon (list of [x, y]).
|
|
33
|
+
staff_areas: Dict of area_name -> polygon (list of [x, y]).
|
|
34
|
+
"""
|
|
35
|
+
from ..utils import get_bbox_center, point_in_polygon
|
|
36
|
+
for det in detections:
|
|
37
|
+
if det.get('category') == 'person':
|
|
38
|
+
bbox = det.get('bbox', det.get('bounding_box', None))
|
|
39
|
+
if bbox and len(bbox) == 4:
|
|
40
|
+
center = get_bbox_center(bbox)
|
|
41
|
+
# Check staff areas first
|
|
42
|
+
for polygon in staff_areas.values():
|
|
43
|
+
if point_in_polygon(center, polygon):
|
|
44
|
+
det['category'] = 'staff'
|
|
45
|
+
break
|
|
46
|
+
else:
|
|
47
|
+
# Check customer areas
|
|
48
|
+
for polygon in customer_areas.values():
|
|
49
|
+
if point_in_polygon(center, polygon):
|
|
50
|
+
det['category'] = 'customer'
|
|
51
|
+
break
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class CustomerServiceUseCase(BaseProcessor):
|
|
55
|
+
"""Customer service analytics with comprehensive business intelligence."""
|
|
56
|
+
|
|
57
|
+
def __init__(self):
|
|
58
|
+
"""Initialize customer service use case."""
|
|
59
|
+
super().__init__("customer_service")
|
|
60
|
+
self.category = "sales"
|
|
61
|
+
|
|
62
|
+
# State tracking for analytics
|
|
63
|
+
self._customer_journeys: Dict[int, Dict] = {}
|
|
64
|
+
self._staff_activities: Dict[int, Dict] = {}
|
|
65
|
+
self._service_interactions: List[Dict] = []
|
|
66
|
+
self._area_occupancy_history: Dict[str, List] = defaultdict(list)
|
|
67
|
+
|
|
68
|
+
# --- Persistent sets for global unique counting across chunks ---
|
|
69
|
+
self._global_customer_ids = set()
|
|
70
|
+
self._global_staff_ids = set()
|
|
71
|
+
|
|
72
|
+
def get_config_schema(self) -> Dict[str, Any]:
|
|
73
|
+
"""Get configuration schema for customer service."""
|
|
74
|
+
return {
|
|
75
|
+
"type": "object",
|
|
76
|
+
"properties": {
|
|
77
|
+
"confidence_threshold": {
|
|
78
|
+
"type": "number",
|
|
79
|
+
"minimum": 0.0,
|
|
80
|
+
"maximum": 1.0,
|
|
81
|
+
"default": 0.5,
|
|
82
|
+
"description": "Minimum confidence threshold for detections"
|
|
83
|
+
},
|
|
84
|
+
"customer_areas": {
|
|
85
|
+
"type": "object",
|
|
86
|
+
"additionalProperties": {
|
|
87
|
+
"type": "array",
|
|
88
|
+
"items": {
|
|
89
|
+
"type": "array",
|
|
90
|
+
"items": {"type": "number"},
|
|
91
|
+
"minItems": 2,
|
|
92
|
+
"maxItems": 2
|
|
93
|
+
},
|
|
94
|
+
"minItems": 3
|
|
95
|
+
},
|
|
96
|
+
"description": "Customer area definitions as polygons"
|
|
97
|
+
},
|
|
98
|
+
"staff_areas": {
|
|
99
|
+
"type": "object",
|
|
100
|
+
"additionalProperties": {
|
|
101
|
+
"type": "array",
|
|
102
|
+
"items": {
|
|
103
|
+
"type": "array",
|
|
104
|
+
"items": {"type": "number"},
|
|
105
|
+
"minItems": 2,
|
|
106
|
+
"maxItems": 2
|
|
107
|
+
},
|
|
108
|
+
"minItems": 3
|
|
109
|
+
},
|
|
110
|
+
"description": "Staff area definitions as polygons"
|
|
111
|
+
},
|
|
112
|
+
"service_areas": {
|
|
113
|
+
"type": "object",
|
|
114
|
+
"additionalProperties": {
|
|
115
|
+
"type": "array",
|
|
116
|
+
"items": {
|
|
117
|
+
"type": "array",
|
|
118
|
+
"items": {"type": "number"},
|
|
119
|
+
"minItems": 2,
|
|
120
|
+
"maxItems": 2
|
|
121
|
+
},
|
|
122
|
+
"minItems": 3
|
|
123
|
+
},
|
|
124
|
+
"description": "Service area definitions as polygons"
|
|
125
|
+
},
|
|
126
|
+
"staff_categories": {
|
|
127
|
+
"type": "array",
|
|
128
|
+
"items": {"type": "string"},
|
|
129
|
+
"default": ["staff", "employee"],
|
|
130
|
+
"description": "Category names that represent staff"
|
|
131
|
+
},
|
|
132
|
+
"customer_categories": {
|
|
133
|
+
"type": "array",
|
|
134
|
+
"items": {"type": "string"},
|
|
135
|
+
"default": ["customer", "person"],
|
|
136
|
+
"description": "Category names that represent customers"
|
|
137
|
+
},
|
|
138
|
+
"service_proximity_threshold": {
|
|
139
|
+
"type": "number",
|
|
140
|
+
"minimum": 0.0,
|
|
141
|
+
"default": 100.0,
|
|
142
|
+
"description": "Distance threshold for service interactions"
|
|
143
|
+
},
|
|
144
|
+
"max_service_time": {
|
|
145
|
+
"type": "number",
|
|
146
|
+
"minimum": 0.0,
|
|
147
|
+
"default": 1800.0,
|
|
148
|
+
"description": "Maximum expected service time in seconds"
|
|
149
|
+
},
|
|
150
|
+
"buffer_time": {
|
|
151
|
+
"type": "number",
|
|
152
|
+
"minimum": 0.0,
|
|
153
|
+
"default": 2.0,
|
|
154
|
+
"description": "Buffer time for service calculations"
|
|
155
|
+
},
|
|
156
|
+
"enable_tracking": {
|
|
157
|
+
"type": "boolean",
|
|
158
|
+
"default": True,
|
|
159
|
+
"description": "Enable advanced tracking for analytics"
|
|
160
|
+
}
|
|
161
|
+
},
|
|
162
|
+
"required": ["confidence_threshold"],
|
|
163
|
+
"additionalProperties": False
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
def create_default_config(self, **overrides) -> CustomerServiceConfig:
|
|
167
|
+
"""Create default configuration with optional overrides."""
|
|
168
|
+
defaults = {
|
|
169
|
+
"category": self.category,
|
|
170
|
+
"usecase": self.name,
|
|
171
|
+
"confidence_threshold": 0.5,
|
|
172
|
+
"enable_tracking": True,
|
|
173
|
+
"enable_analytics": True,
|
|
174
|
+
"staff_categories": ["staff", "employee"],
|
|
175
|
+
"customer_categories": ["customer", "person"],
|
|
176
|
+
"service_proximity_threshold": 100.0,
|
|
177
|
+
"max_service_time": 1800.0,
|
|
178
|
+
"buffer_time": 2.0,
|
|
179
|
+
}
|
|
180
|
+
defaults.update(overrides)
|
|
181
|
+
return CustomerServiceConfig(**defaults)
|
|
182
|
+
|
|
183
|
+
def process(self, data: Any, config: ConfigProtocol,
|
|
184
|
+
context: Optional[ProcessingContext] = None,
|
|
185
|
+
stream_info: Optional[Any] = None) -> ProcessingResult:
|
|
186
|
+
"""
|
|
187
|
+
Process customer service analytics.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
data: Raw model output (detection or tracking format)
|
|
191
|
+
config: Customer service configuration
|
|
192
|
+
context: Processing context
|
|
193
|
+
stream_info: Stream information containing frame details (optional, for compatibility)
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
ProcessingResult: Processing result with customer service analytics
|
|
197
|
+
"""
|
|
198
|
+
start_time = time.time()
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
# Ensure we have the right config type
|
|
202
|
+
if not isinstance(config, CustomerServiceConfig):
|
|
203
|
+
return self.create_error_result(
|
|
204
|
+
"Invalid configuration type for customer service",
|
|
205
|
+
usecase=self.name,
|
|
206
|
+
category=self.category,
|
|
207
|
+
context=context
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Initialize processing context if not provided
|
|
211
|
+
if context is None:
|
|
212
|
+
context = ProcessingContext()
|
|
213
|
+
|
|
214
|
+
# Detect input format
|
|
215
|
+
input_format = match_results_structure(data)
|
|
216
|
+
context.input_format = input_format
|
|
217
|
+
context.confidence_threshold = config.confidence_threshold
|
|
218
|
+
context.enable_tracking = config.enable_tracking
|
|
219
|
+
|
|
220
|
+
self.logger.info(f"Processing customer service with format: {input_format.value}")
|
|
221
|
+
|
|
222
|
+
# Step 1: Apply confidence filtering
|
|
223
|
+
processed_data = data
|
|
224
|
+
if config.confidence_threshold is not None:
|
|
225
|
+
processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
|
|
226
|
+
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
|
227
|
+
|
|
228
|
+
# Step 2: Apply category mapping if provided
|
|
229
|
+
if hasattr(config, 'index_to_category') and config.index_to_category:
|
|
230
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
|
231
|
+
self.logger.debug("Applied category mapping")
|
|
232
|
+
|
|
233
|
+
# Step 3: Extract detections and assign 'person' by area if needed
|
|
234
|
+
detections = self._extract_detections(processed_data)
|
|
235
|
+
assign_person_by_area(
|
|
236
|
+
detections,
|
|
237
|
+
getattr(config, 'customer_areas', {}),
|
|
238
|
+
getattr(config, 'staff_areas', {})
|
|
239
|
+
)
|
|
240
|
+
staff_detections, customer_detections = self._categorize_detections(
|
|
241
|
+
detections, config.staff_categories, config.customer_categories
|
|
242
|
+
)
|
|
243
|
+
self.logger.debug(f"Extracted {len(staff_detections)} staff and {len(customer_detections)} customer detections")
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# Step 4: Analyze area occupancy
|
|
247
|
+
area_analysis = self._analyze_area_occupancy(
|
|
248
|
+
staff_detections, customer_detections, config
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
# Step 5: Analyze service interactions
|
|
252
|
+
service_interactions = self._analyze_service_interactions(
|
|
253
|
+
staff_detections, customer_detections, config
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
# Step 6: Analyze customer journeys
|
|
257
|
+
customer_analytics = self._analyze_customer_journeys(
|
|
258
|
+
customer_detections, config
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# Step 7: Analyze staff utilization
|
|
262
|
+
staff_analytics = self._analyze_staff_utilization(
|
|
263
|
+
staff_detections, service_interactions, config
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# Step 8: Calculate business metrics
|
|
267
|
+
business_metrics = self._calculate_business_metrics(
|
|
268
|
+
area_analysis, service_interactions, customer_analytics,
|
|
269
|
+
staff_analytics, config
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Step 9: Generate insights and alerts
|
|
273
|
+
insights = self._generate_insights(
|
|
274
|
+
area_analysis, service_interactions, customer_analytics,
|
|
275
|
+
staff_analytics, business_metrics
|
|
276
|
+
)
|
|
277
|
+
alerts = self._check_alerts(
|
|
278
|
+
area_analysis, service_interactions, business_metrics, config
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# --- Unique counting logic for summary and analytics ---
|
|
282
|
+
# def unique_track_ids(detections):
|
|
283
|
+
# return set([d.get('track_id') for d in detections if d.get('track_id') is not None])
|
|
284
|
+
|
|
285
|
+
# unique_customer_ids = unique_track_ids(customer_detections)
|
|
286
|
+
# unique_staff_ids = unique_track_ids(staff_detections)
|
|
287
|
+
|
|
288
|
+
# --- New: Persistent global unique counting across chunks ---
|
|
289
|
+
current_customer_ids = set([d.get('track_id') for d in customer_detections if d.get('track_id') is not None])
|
|
290
|
+
current_staff_ids = set([d.get('track_id') for d in staff_detections if d.get('track_id') is not None])
|
|
291
|
+
|
|
292
|
+
self._global_customer_ids.update(current_customer_ids)
|
|
293
|
+
self._global_staff_ids.update(current_staff_ids)
|
|
294
|
+
|
|
295
|
+
# Step 10: Generate human-readable summary
|
|
296
|
+
summary = self._generate_summary(
|
|
297
|
+
len(self._global_staff_ids), len(self._global_customer_ids),
|
|
298
|
+
len(service_interactions), alerts
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Step 11: Extract predictions for API compatibility
|
|
302
|
+
predictions = self._extract_predictions(processed_data)
|
|
303
|
+
|
|
304
|
+
# Mark processing as completed
|
|
305
|
+
context.mark_completed()
|
|
306
|
+
|
|
307
|
+
# Create successful result
|
|
308
|
+
result = self.create_result(
|
|
309
|
+
data={
|
|
310
|
+
"area_analysis": area_analysis,
|
|
311
|
+
"service_interactions": service_interactions,
|
|
312
|
+
"customer_analytics": customer_analytics,
|
|
313
|
+
"staff_analytics": staff_analytics,
|
|
314
|
+
"business_metrics": business_metrics,
|
|
315
|
+
"alerts": alerts,
|
|
316
|
+
"staff_count": len(self._global_staff_ids),
|
|
317
|
+
"customer_count": len(self._global_customer_ids),
|
|
318
|
+
"interaction_count": len(service_interactions)
|
|
319
|
+
},
|
|
320
|
+
usecase=self.name,
|
|
321
|
+
category=self.category,
|
|
322
|
+
context=context
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
# Add human-readable information
|
|
326
|
+
result.summary = summary
|
|
327
|
+
result.insights = insights
|
|
328
|
+
result.predictions = predictions
|
|
329
|
+
result.metrics = business_metrics
|
|
330
|
+
|
|
331
|
+
# --- Patch: Compose human_text using current chunk counts, not cumulative ---
|
|
332
|
+
human_text_parts = [
|
|
333
|
+
summary,
|
|
334
|
+
f"Detected {len(current_customer_ids)} customers and {len(current_staff_ids)} staff members"
|
|
335
|
+
]
|
|
336
|
+
if insights:
|
|
337
|
+
human_text_parts.extend(insights[1:]) # skip duplicate count line
|
|
338
|
+
result.human_text = "\n".join(human_text_parts)
|
|
339
|
+
|
|
340
|
+
# Add warnings for configuration issues
|
|
341
|
+
if not config.customer_areas and not config.staff_areas:
|
|
342
|
+
result.add_warning("No customer or staff areas defined - using global analysis only")
|
|
343
|
+
|
|
344
|
+
if config.service_proximity_threshold > 200:
|
|
345
|
+
result.add_warning(f"High service proximity threshold ({config.service_proximity_threshold}) may miss interactions")
|
|
346
|
+
|
|
347
|
+
self.logger.info(f"Customer service analysis completed successfully in {result.processing_time:.2f}s")
|
|
348
|
+
return result
|
|
349
|
+
|
|
350
|
+
except Exception as e:
|
|
351
|
+
self.logger.error(f"Customer service analysis failed: {str(e)}", exc_info=True)
|
|
352
|
+
|
|
353
|
+
if context:
|
|
354
|
+
context.mark_completed()
|
|
355
|
+
|
|
356
|
+
return self.create_error_result(
|
|
357
|
+
str(e),
|
|
358
|
+
type(e).__name__,
|
|
359
|
+
usecase=self.name,
|
|
360
|
+
category=self.category,
|
|
361
|
+
context=context
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def _extract_detections(self, data: Any) -> List[Dict[str, Any]]:
|
|
365
|
+
"""Extract detections from processed data."""
|
|
366
|
+
detections = []
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
if isinstance(data, list):
|
|
370
|
+
# Direct detection list
|
|
371
|
+
detections = [d for d in data if isinstance(d, dict)]
|
|
372
|
+
elif isinstance(data, dict):
|
|
373
|
+
# Frame-based or structured data
|
|
374
|
+
for key, value in data.items():
|
|
375
|
+
if isinstance(value, list):
|
|
376
|
+
detections.extend([d for d in value if isinstance(d, dict)])
|
|
377
|
+
elif isinstance(value, dict) and any(k in value for k in ['bbox', 'bounding_box', 'category']):
|
|
378
|
+
detections.append(value)
|
|
379
|
+
except Exception as e:
|
|
380
|
+
self.logger.warning(f"Failed to extract detections: {str(e)}")
|
|
381
|
+
|
|
382
|
+
return detections
|
|
383
|
+
|
|
384
|
+
def _categorize_detections(self, detections: List[Dict], staff_categories: List[str],
|
|
385
|
+
customer_categories: List[str]) -> Tuple[List[Dict], List[Dict]]:
|
|
386
|
+
"""Categorize detections into staff and customers."""
|
|
387
|
+
staff_detections = []
|
|
388
|
+
customer_detections = []
|
|
389
|
+
|
|
390
|
+
for detection in detections:
|
|
391
|
+
category = detection.get('category', detection.get('class', ''))
|
|
392
|
+
|
|
393
|
+
if category in staff_categories:
|
|
394
|
+
staff_detections.append(detection)
|
|
395
|
+
elif category in customer_categories:
|
|
396
|
+
customer_detections.append(detection)
|
|
397
|
+
else:
|
|
398
|
+
# Default to customer if category is unknown
|
|
399
|
+
customer_detections.append(detection)
|
|
400
|
+
|
|
401
|
+
return staff_detections, customer_detections
|
|
402
|
+
|
|
403
|
+
def _analyze_area_occupancy(self, staff_detections: List[Dict], customer_detections: List[Dict],
|
|
404
|
+
config: CustomerServiceConfig) -> Dict[str, Any]:
|
|
405
|
+
"""Analyze occupancy across different areas."""
|
|
406
|
+
area_analysis = {
|
|
407
|
+
"customer_areas": {},
|
|
408
|
+
"staff_areas": {},
|
|
409
|
+
"service_areas": {},
|
|
410
|
+
"total_people": len(staff_detections) + len(customer_detections)
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
# Analyze customer areas
|
|
414
|
+
for area_name, polygon in config.customer_areas.items():
|
|
415
|
+
count = self._count_people_in_area(customer_detections, polygon)
|
|
416
|
+
area_analysis["customer_areas"][area_name] = {
|
|
417
|
+
"count": count,
|
|
418
|
+
"utilization": min(count / 10, 1.0), # Assume max capacity of 10
|
|
419
|
+
"occupancy_level": "high" if count > 7 else "medium" if count > 3 else "low"
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
# Analyze staff areas
|
|
423
|
+
for area_name, polygon in config.staff_areas.items():
|
|
424
|
+
count = self._count_people_in_area(staff_detections, polygon)
|
|
425
|
+
area_analysis["staff_areas"][area_name] = {
|
|
426
|
+
"count": count,
|
|
427
|
+
"availability": count > 0,
|
|
428
|
+
"staffing_level": "adequate" if count >= 2 else "minimal" if count == 1 else "unstaffed"
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
# Analyze service areas (both staff and customers)
|
|
432
|
+
for area_name, polygon in config.service_areas.items():
|
|
433
|
+
staff_count = self._count_people_in_area(staff_detections, polygon)
|
|
434
|
+
customer_count = self._count_people_in_area(customer_detections, polygon)
|
|
435
|
+
total_count = staff_count + customer_count
|
|
436
|
+
|
|
437
|
+
area_analysis["service_areas"][area_name] = {
|
|
438
|
+
"staff_count": staff_count,
|
|
439
|
+
"customer_count": customer_count,
|
|
440
|
+
"total_count": total_count,
|
|
441
|
+
"service_ratio": customer_count / max(staff_count, 1),
|
|
442
|
+
"service_quality": "good" if staff_count > 0 and customer_count / max(staff_count, 1) <= 3 else "needs_attention"
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
return area_analysis
|
|
446
|
+
|
|
447
|
+
def _count_people_in_area(self, detections: List[Dict], polygon: List[List[float]]) -> int:
|
|
448
|
+
"""
|
|
449
|
+
Count unique people (by track_id) in a specific area defined by polygon.
|
|
450
|
+
Previous logic (per-frame count) is commented out below.
|
|
451
|
+
"""
|
|
452
|
+
# count = 0
|
|
453
|
+
# for detection in detections:
|
|
454
|
+
# center = get_bbox_center(detection.get('bbox', detection.get('bounding_box', {})))
|
|
455
|
+
# if center and point_in_polygon(center, polygon):
|
|
456
|
+
# count += 1
|
|
457
|
+
# return count
|
|
458
|
+
|
|
459
|
+
track_ids = set()
|
|
460
|
+
for detection in detections:
|
|
461
|
+
center = get_bbox_center(detection.get('bbox', detection.get('bounding_box', {})))
|
|
462
|
+
if center and point_in_polygon(center, polygon):
|
|
463
|
+
track_id = detection.get('track_id')
|
|
464
|
+
if track_id is not None:
|
|
465
|
+
track_ids.add(track_id)
|
|
466
|
+
return len(track_ids)
|
|
467
|
+
|
|
468
|
+
def _analyze_service_interactions(self, staff_detections: List[Dict], customer_detections: List[Dict],
|
|
469
|
+
config: CustomerServiceConfig) -> List[Dict]:
|
|
470
|
+
"""Analyze service interactions between staff and customers."""
|
|
471
|
+
interactions = []
|
|
472
|
+
|
|
473
|
+
for staff in staff_detections:
|
|
474
|
+
staff_center = get_bbox_center(staff.get('bbox', staff.get('bounding_box', {})))
|
|
475
|
+
if not staff_center:
|
|
476
|
+
continue
|
|
477
|
+
|
|
478
|
+
# Find nearby customers
|
|
479
|
+
nearby_customers = []
|
|
480
|
+
for customer in customer_detections:
|
|
481
|
+
customer_center = get_bbox_center(customer.get('bbox', customer.get('bounding_box', {})))
|
|
482
|
+
if not customer_center:
|
|
483
|
+
continue
|
|
484
|
+
|
|
485
|
+
distance = calculate_distance(staff_center, customer_center)
|
|
486
|
+
if distance <= config.service_proximity_threshold:
|
|
487
|
+
nearby_customers.append({
|
|
488
|
+
"detection": customer,
|
|
489
|
+
"distance": distance,
|
|
490
|
+
"center": customer_center
|
|
491
|
+
})
|
|
492
|
+
|
|
493
|
+
if nearby_customers:
|
|
494
|
+
# Sort by distance
|
|
495
|
+
nearby_customers.sort(key=lambda x: x["distance"])
|
|
496
|
+
|
|
497
|
+
# Create interaction record
|
|
498
|
+
interaction = {
|
|
499
|
+
"staff_id": staff.get('track_id', f"staff_{hash(str(staff_center))}"),
|
|
500
|
+
"staff_center": staff_center,
|
|
501
|
+
"customers": nearby_customers,
|
|
502
|
+
"customer_count": len(nearby_customers),
|
|
503
|
+
"min_distance": nearby_customers[0]["distance"],
|
|
504
|
+
"avg_distance": sum(c["distance"] for c in nearby_customers) / len(nearby_customers),
|
|
505
|
+
"service_quality": self._calculate_service_quality(nearby_customers),
|
|
506
|
+
"timestamp": time.time()
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
interactions.append(interaction)
|
|
510
|
+
|
|
511
|
+
return interactions
|
|
512
|
+
|
|
513
|
+
def _calculate_service_quality(self, nearby_customers: List[Dict]) -> Dict[str, Any]:
|
|
514
|
+
"""Calculate service quality metrics based on customer proximity and count."""
|
|
515
|
+
if not nearby_customers:
|
|
516
|
+
return {"score": 0.0, "level": "none", "notes": "No customers nearby"}
|
|
517
|
+
|
|
518
|
+
customer_count = len(nearby_customers)
|
|
519
|
+
min_distance = nearby_customers[0]["distance"]
|
|
520
|
+
avg_distance = sum(c["distance"] for c in nearby_customers) / customer_count
|
|
521
|
+
|
|
522
|
+
# Base score calculation
|
|
523
|
+
distance_score = max(0, (100 - min_distance) / 100) # Closer is better
|
|
524
|
+
load_score = max(0, (5 - customer_count) / 5) # Fewer customers is better
|
|
525
|
+
|
|
526
|
+
overall_score = (distance_score * 0.6 + load_score * 0.4)
|
|
527
|
+
|
|
528
|
+
# Determine service level
|
|
529
|
+
if overall_score >= 0.8:
|
|
530
|
+
level = "excellent"
|
|
531
|
+
notes = "Good proximity, manageable customer load"
|
|
532
|
+
elif overall_score >= 0.6:
|
|
533
|
+
level = "good"
|
|
534
|
+
notes = "Reasonable service conditions"
|
|
535
|
+
elif overall_score >= 0.4:
|
|
536
|
+
level = "fair"
|
|
537
|
+
notes = "Service conditions could be improved"
|
|
538
|
+
else:
|
|
539
|
+
level = "poor"
|
|
540
|
+
notes = "High customer load or poor positioning"
|
|
541
|
+
|
|
542
|
+
return {
|
|
543
|
+
"score": overall_score,
|
|
544
|
+
"level": level,
|
|
545
|
+
"notes": notes,
|
|
546
|
+
"distance_score": distance_score,
|
|
547
|
+
"load_score": load_score,
|
|
548
|
+
"customer_count": customer_count,
|
|
549
|
+
"min_distance": min_distance,
|
|
550
|
+
"avg_distance": avg_distance
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
def _analyze_customer_journeys(self, customer_detections: List[Dict],
|
|
554
|
+
config: CustomerServiceConfig) -> Dict[str, Any]:
|
|
555
|
+
"""Analyze customer journey patterns and behavior."""
|
|
556
|
+
# Use global unique customer IDs for total_customers and tracked_customers
|
|
557
|
+
journey_analytics = {
|
|
558
|
+
"total_customers": len(self._global_customer_ids),
|
|
559
|
+
"tracked_customers": len(self._global_customer_ids),
|
|
560
|
+
"area_transitions": [],
|
|
561
|
+
"dwell_times": {},
|
|
562
|
+
"journey_patterns": {}
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
# Track customers with track_ids
|
|
566
|
+
tracked_customers = [c for c in customer_detections if c.get('track_id')]
|
|
567
|
+
journey_analytics["tracked_customers"] = len(tracked_customers)
|
|
568
|
+
|
|
569
|
+
if not tracked_customers:
|
|
570
|
+
return journey_analytics
|
|
571
|
+
|
|
572
|
+
# Group detections by track_id
|
|
573
|
+
customer_tracks = defaultdict(list)
|
|
574
|
+
for detection in tracked_customers:
|
|
575
|
+
customer_tracks[detection['track_id']].append(detection)
|
|
576
|
+
|
|
577
|
+
# Analyze each customer's journey
|
|
578
|
+
for track_id, detections in customer_tracks.items():
|
|
579
|
+
journey = self._analyze_single_customer_journey(detections, config)
|
|
580
|
+
journey_analytics["journey_patterns"][track_id] = journey
|
|
581
|
+
|
|
582
|
+
return journey_analytics
|
|
583
|
+
|
|
584
|
+
def _analyze_single_customer_journey(self, detections: List[Dict],
|
|
585
|
+
config: CustomerServiceConfig) -> Dict[str, Any]:
|
|
586
|
+
"""Analyze a single customer's journey through the space."""
|
|
587
|
+
journey = {
|
|
588
|
+
"total_detections": len(detections),
|
|
589
|
+
"areas_visited": set(),
|
|
590
|
+
"time_in_areas": defaultdict(float),
|
|
591
|
+
"movement_pattern": "stationary"
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
# Sort detections by timestamp if available
|
|
595
|
+
sorted_detections = sorted(detections, key=lambda x: x.get('timestamp', 0))
|
|
596
|
+
|
|
597
|
+
# Track area visits
|
|
598
|
+
all_areas = {**config.customer_areas, **config.service_areas}
|
|
599
|
+
|
|
600
|
+
for detection in sorted_detections:
|
|
601
|
+
center = get_bbox_center(detection.get('bbox', detection.get('bounding_box', {})))
|
|
602
|
+
if not center:
|
|
603
|
+
continue
|
|
604
|
+
|
|
605
|
+
# Check which areas the customer is in
|
|
606
|
+
for area_name, polygon in all_areas.items():
|
|
607
|
+
if point_in_polygon(center, polygon):
|
|
608
|
+
journey["areas_visited"].add(area_name)
|
|
609
|
+
# Approximate time spent (would need temporal data for accuracy)
|
|
610
|
+
journey["time_in_areas"][area_name] += 1.0
|
|
611
|
+
|
|
612
|
+
# Convert set to list for serialization
|
|
613
|
+
journey["areas_visited"] = list(journey["areas_visited"])
|
|
614
|
+
journey["time_in_areas"] = dict(journey["time_in_areas"])
|
|
615
|
+
|
|
616
|
+
# Determine movement pattern
|
|
617
|
+
if len(journey["areas_visited"]) > 2:
|
|
618
|
+
journey["movement_pattern"] = "mobile"
|
|
619
|
+
elif len(journey["areas_visited"]) == 2:
|
|
620
|
+
journey["movement_pattern"] = "limited_movement"
|
|
621
|
+
|
|
622
|
+
return journey
|
|
623
|
+
|
|
624
|
+
def _analyze_staff_utilization(self, staff_detections: List[Dict], service_interactions: List[Dict],
|
|
625
|
+
config: CustomerServiceConfig) -> Dict[str, Any]:
|
|
626
|
+
"""Analyze staff utilization and efficiency metrics."""
|
|
627
|
+
staff_analytics = {
|
|
628
|
+
# Use global unique staff IDs for total_staff
|
|
629
|
+
"total_staff": len(self._global_staff_ids),
|
|
630
|
+
"active_staff": 0,
|
|
631
|
+
"staff_efficiency": {},
|
|
632
|
+
"coverage_areas": [],
|
|
633
|
+
"utilization_rate": 0.0
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
if not staff_detections:
|
|
637
|
+
return staff_analytics
|
|
638
|
+
|
|
639
|
+
# Track staff with interactions
|
|
640
|
+
staff_with_interactions = set()
|
|
641
|
+
staff_interaction_counts = defaultdict(int)
|
|
642
|
+
|
|
643
|
+
for interaction in service_interactions:
|
|
644
|
+
staff_id = interaction.get("staff_id")
|
|
645
|
+
if staff_id:
|
|
646
|
+
staff_with_interactions.add(staff_id)
|
|
647
|
+
staff_interaction_counts[staff_id] += interaction.get("customer_count", 0)
|
|
648
|
+
|
|
649
|
+
staff_analytics["active_staff"] = len(staff_with_interactions)
|
|
650
|
+
staff_analytics["utilization_rate"] = len(staff_with_interactions) / len(staff_detections) if staff_detections else 0
|
|
651
|
+
|
|
652
|
+
# Calculate staff efficiency
|
|
653
|
+
for staff_id, customer_count in staff_interaction_counts.items():
|
|
654
|
+
staff_analytics["staff_efficiency"][staff_id] = {
|
|
655
|
+
"customers_served": customer_count,
|
|
656
|
+
"efficiency_score": min(customer_count / 5.0, 1.0) # Normalize to max 5 customers
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
# Determine coverage areas
|
|
660
|
+
staff_positions = []
|
|
661
|
+
for staff in staff_detections:
|
|
662
|
+
center = get_bbox_center(staff.get('bbox', staff.get('bounding_box', {})))
|
|
663
|
+
if center:
|
|
664
|
+
staff_positions.append(center)
|
|
665
|
+
|
|
666
|
+
# Check coverage of service areas
|
|
667
|
+
for area_name, polygon in config.service_areas.items():
|
|
668
|
+
covered = any(point_in_polygon(pos, polygon) for pos in staff_positions)
|
|
669
|
+
if covered:
|
|
670
|
+
staff_analytics["coverage_areas"].append(area_name)
|
|
671
|
+
|
|
672
|
+
return staff_analytics
|
|
673
|
+
|
|
674
|
+
def _calculate_business_metrics(self, area_analysis: Dict, service_interactions: List[Dict],
|
|
675
|
+
customer_analytics: Dict, staff_analytics: Dict,
|
|
676
|
+
config: CustomerServiceConfig) -> Dict[str, Any]:
|
|
677
|
+
"""Calculate comprehensive business intelligence metrics."""
|
|
678
|
+
total_customers = customer_analytics.get("total_customers", 0)
|
|
679
|
+
total_staff = staff_analytics.get("total_staff", 0)
|
|
680
|
+
|
|
681
|
+
metrics = {
|
|
682
|
+
"customer_to_staff_ratio": total_customers / max(total_staff, 1),
|
|
683
|
+
"service_coverage": len(staff_analytics.get("coverage_areas", [])) / max(len(config.service_areas), 1),
|
|
684
|
+
"interaction_rate": len(service_interactions) / max(total_customers, 1),
|
|
685
|
+
"staff_utilization": staff_analytics.get("utilization_rate", 0.0),
|
|
686
|
+
"area_utilization": self._calculate_area_utilization(area_analysis, config),
|
|
687
|
+
"service_quality_score": 0.0,
|
|
688
|
+
"attention_score": self._calculate_attention_score(service_interactions, total_customers),
|
|
689
|
+
"peak_areas": self._identify_peak_areas(area_analysis),
|
|
690
|
+
"optimization_opportunities": self._identify_optimization_opportunities(area_analysis, staff_analytics),
|
|
691
|
+
"overall_performance": 0.0
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
# Calculate service quality score
|
|
695
|
+
if service_interactions:
|
|
696
|
+
quality_scores = [
|
|
697
|
+
interaction.get("service_quality", {}).get("score", 0.0)
|
|
698
|
+
for interaction in service_interactions
|
|
699
|
+
]
|
|
700
|
+
metrics["service_quality_score"] = sum(quality_scores) / len(quality_scores)
|
|
701
|
+
|
|
702
|
+
# Calculate overall performance score
|
|
703
|
+
metrics["overall_performance"] = self._calculate_overall_performance_score(metrics)
|
|
704
|
+
|
|
705
|
+
return metrics
|
|
706
|
+
|
|
707
|
+
def _calculate_area_utilization(self, area_analysis: Dict, config: CustomerServiceConfig) -> Dict[str, float]:
|
|
708
|
+
"""Calculate utilization rates for different area types."""
|
|
709
|
+
utilization = {}
|
|
710
|
+
|
|
711
|
+
# Customer area utilization
|
|
712
|
+
customer_areas = area_analysis.get("customer_areas", {})
|
|
713
|
+
if customer_areas:
|
|
714
|
+
avg_utilization = sum(area.get("utilization", 0) for area in customer_areas.values()) / len(customer_areas)
|
|
715
|
+
utilization["customer_areas"] = avg_utilization
|
|
716
|
+
|
|
717
|
+
# Service area utilization
|
|
718
|
+
service_areas = area_analysis.get("service_areas", {})
|
|
719
|
+
if service_areas:
|
|
720
|
+
total_people = sum(area.get("total_count", 0) for area in service_areas.values())
|
|
721
|
+
max_capacity = len(service_areas) * 10 # Assume 10 people per service area
|
|
722
|
+
utilization["service_areas"] = min(total_people / max_capacity, 1.0) if max_capacity > 0 else 0.0
|
|
723
|
+
|
|
724
|
+
return utilization
|
|
725
|
+
|
|
726
|
+
def _calculate_attention_score(self, service_interactions: List[Dict], total_customers: int) -> float:
|
|
727
|
+
"""Calculate how well customers are being attended to."""
|
|
728
|
+
if not total_customers:
|
|
729
|
+
return 1.0
|
|
730
|
+
|
|
731
|
+
customers_with_attention = sum(interaction.get("customer_count", 0) for interaction in service_interactions)
|
|
732
|
+
return min(customers_with_attention / total_customers, 1.0)
|
|
733
|
+
|
|
734
|
+
def _identify_peak_areas(self, area_analysis: Dict) -> List[str]:
|
|
735
|
+
"""Identify areas with highest occupancy."""
|
|
736
|
+
peak_areas = []
|
|
737
|
+
|
|
738
|
+
# Check customer areas
|
|
739
|
+
customer_areas = area_analysis.get("customer_areas", {})
|
|
740
|
+
for area_name, area_data in customer_areas.items():
|
|
741
|
+
if area_data.get("occupancy_level") == "high":
|
|
742
|
+
peak_areas.append(f"customer_area:{area_name}")
|
|
743
|
+
|
|
744
|
+
# Check service areas
|
|
745
|
+
service_areas = area_analysis.get("service_areas", {})
|
|
746
|
+
for area_name, area_data in service_areas.items():
|
|
747
|
+
if area_data.get("total_count", 0) > 5:
|
|
748
|
+
peak_areas.append(f"service_area:{area_name}")
|
|
749
|
+
|
|
750
|
+
return peak_areas
|
|
751
|
+
|
|
752
|
+
def _identify_optimization_opportunities(self, area_analysis: Dict, staff_analytics: Dict) -> List[str]:
|
|
753
|
+
"""Identify opportunities for service optimization."""
|
|
754
|
+
opportunities = []
|
|
755
|
+
|
|
756
|
+
# Check for understaffed areas
|
|
757
|
+
service_areas = area_analysis.get("service_areas", {})
|
|
758
|
+
for area_name, area_data in service_areas.items():
|
|
759
|
+
if area_data.get("service_quality") == "needs_attention":
|
|
760
|
+
opportunities.append(f"Increase staffing in service area: {area_name}")
|
|
761
|
+
|
|
762
|
+
# Check for low staff utilization
|
|
763
|
+
if staff_analytics.get("utilization_rate", 0) < 0.5:
|
|
764
|
+
opportunities.append("Staff utilization is low - consider reassignment")
|
|
765
|
+
|
|
766
|
+
# Check for high customer-to-staff ratios
|
|
767
|
+
customer_areas = area_analysis.get("customer_areas", {})
|
|
768
|
+
high_occupancy_areas = [
|
|
769
|
+
area_name for area_name, area_data in customer_areas.items()
|
|
770
|
+
if area_data.get("occupancy_level") == "high"
|
|
771
|
+
]
|
|
772
|
+
|
|
773
|
+
if high_occupancy_areas:
|
|
774
|
+
opportunities.append(f"High customer density in: {', '.join(high_occupancy_areas)}")
|
|
775
|
+
|
|
776
|
+
# Check coverage gaps
|
|
777
|
+
uncovered_areas = []
|
|
778
|
+
coverage_areas = staff_analytics.get("coverage_areas", [])
|
|
779
|
+
for area_name in service_areas.keys():
|
|
780
|
+
if area_name not in coverage_areas:
|
|
781
|
+
uncovered_areas.append(area_name)
|
|
782
|
+
|
|
783
|
+
if uncovered_areas:
|
|
784
|
+
opportunities.append(f"Service areas need coverage: {', '.join(uncovered_areas)}")
|
|
785
|
+
|
|
786
|
+
return opportunities
|
|
787
|
+
|
|
788
|
+
def _calculate_overall_performance_score(self, metrics: Dict) -> float:
|
|
789
|
+
"""Calculate an overall performance score."""
|
|
790
|
+
# Weight different metrics
|
|
791
|
+
weights = {
|
|
792
|
+
"service_quality_score": 0.3,
|
|
793
|
+
"attention_score": 0.25,
|
|
794
|
+
"staff_utilization": 0.2,
|
|
795
|
+
"service_coverage": 0.15,
|
|
796
|
+
"interaction_rate": 0.1
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
score = 0.0
|
|
800
|
+
total_weight = 0.0
|
|
801
|
+
|
|
802
|
+
for metric, weight in weights.items():
|
|
803
|
+
if metric in metrics:
|
|
804
|
+
score += metrics[metric] * weight
|
|
805
|
+
total_weight += weight
|
|
806
|
+
|
|
807
|
+
return score / total_weight if total_weight > 0 else 0.0
|
|
808
|
+
|
|
809
|
+
def _check_alerts(self, area_analysis: Dict, service_interactions: List[Dict],
|
|
810
|
+
business_metrics: Dict, config: CustomerServiceConfig) -> List[Dict]:
|
|
811
|
+
"""Check for alert conditions in customer service operations."""
|
|
812
|
+
alerts = []
|
|
813
|
+
|
|
814
|
+
if not config.alert_config:
|
|
815
|
+
return alerts
|
|
816
|
+
|
|
817
|
+
# Check customer-to-staff ratio
|
|
818
|
+
ratio = business_metrics.get("customer_to_staff_ratio", 0)
|
|
819
|
+
if ratio > 5:
|
|
820
|
+
alerts.append({
|
|
821
|
+
"type": "high_customer_ratio",
|
|
822
|
+
"severity": "warning",
|
|
823
|
+
"message": f"High customer-to-staff ratio ({ratio:.1f}:1)",
|
|
824
|
+
"ratio": ratio,
|
|
825
|
+
"recommendation": "Consider additional staff deployment"
|
|
826
|
+
})
|
|
827
|
+
|
|
828
|
+
# Check service coverage
|
|
829
|
+
coverage = business_metrics.get("service_coverage", 0)
|
|
830
|
+
if coverage < 0.7:
|
|
831
|
+
alerts.append({
|
|
832
|
+
"type": "low_service_coverage",
|
|
833
|
+
"severity": "warning",
|
|
834
|
+
"message": f"Low service area coverage ({coverage:.1%})",
|
|
835
|
+
"coverage": coverage,
|
|
836
|
+
"recommendation": "Deploy staff to uncovered service areas"
|
|
837
|
+
})
|
|
838
|
+
|
|
839
|
+
# Check for areas needing attention
|
|
840
|
+
service_areas = area_analysis.get("service_areas", {})
|
|
841
|
+
for area_name, area_data in service_areas.items():
|
|
842
|
+
if area_data.get("service_quality") == "needs_attention":
|
|
843
|
+
alerts.append({
|
|
844
|
+
"type": "service_quality",
|
|
845
|
+
"severity": "warning",
|
|
846
|
+
"message": f"Service area '{area_name}' needs attention",
|
|
847
|
+
"area": area_name,
|
|
848
|
+
"customer_count": area_data.get("customer_count", 0),
|
|
849
|
+
"staff_count": area_data.get("staff_count", 0),
|
|
850
|
+
"recommendation": "Increase staff presence in this area"
|
|
851
|
+
})
|
|
852
|
+
|
|
853
|
+
# Check overall performance
|
|
854
|
+
performance = business_metrics.get("overall_performance", 0)
|
|
855
|
+
if performance < 0.6:
|
|
856
|
+
alerts.append({
|
|
857
|
+
"type": "low_performance",
|
|
858
|
+
"severity": "critical" if performance < 0.4 else "warning",
|
|
859
|
+
"message": f"Overall service performance is low ({performance:.1%})",
|
|
860
|
+
"performance": performance,
|
|
861
|
+
"recommendation": "Review staffing levels and service processes"
|
|
862
|
+
})
|
|
863
|
+
|
|
864
|
+
return alerts
|
|
865
|
+
|
|
866
|
+
def _generate_insights(self, area_analysis: Dict, service_interactions: List[Dict],
|
|
867
|
+
customer_analytics: Dict, staff_analytics: Dict,
|
|
868
|
+
business_metrics: Dict) -> List[str]:
|
|
869
|
+
"""Generate actionable insights from customer service analysis."""
|
|
870
|
+
insights = []
|
|
871
|
+
|
|
872
|
+
# Basic counts
|
|
873
|
+
total_staff = staff_analytics.get("total_staff", 0)
|
|
874
|
+
total_customers = customer_analytics.get("total_customers", 0)
|
|
875
|
+
active_staff = staff_analytics.get("active_staff", 0)
|
|
876
|
+
|
|
877
|
+
if total_customers == 0:
|
|
878
|
+
insights.append("No customers detected in service areas")
|
|
879
|
+
return insights
|
|
880
|
+
|
|
881
|
+
insights.append(f"Detected {total_customers} customers and {total_staff} staff members")
|
|
882
|
+
|
|
883
|
+
# Staff utilization insights
|
|
884
|
+
if total_staff > 0:
|
|
885
|
+
utilization = staff_analytics.get("utilization_rate", 0)
|
|
886
|
+
if utilization >= 0.8:
|
|
887
|
+
insights.append(f"✅ High staff utilization ({utilization:.1%}) - staff are actively engaged")
|
|
888
|
+
elif utilization >= 0.5:
|
|
889
|
+
insights.append(f"📊 Moderate staff utilization ({utilization:.1%})")
|
|
890
|
+
else:
|
|
891
|
+
insights.append(f"⚠️ Low staff utilization ({utilization:.1%}) - consider staff redeployment")
|
|
892
|
+
|
|
893
|
+
# Service interaction insights
|
|
894
|
+
interaction_count = len(service_interactions)
|
|
895
|
+
if interaction_count > 0:
|
|
896
|
+
interaction_rate = business_metrics.get("interaction_rate", 0)
|
|
897
|
+
insights.append(f"Service interactions: {interaction_count} active engagements")
|
|
898
|
+
|
|
899
|
+
if interaction_rate >= 0.8:
|
|
900
|
+
insights.append("✅ High customer engagement rate")
|
|
901
|
+
elif interaction_rate >= 0.5:
|
|
902
|
+
insights.append("📊 Moderate customer engagement")
|
|
903
|
+
else:
|
|
904
|
+
insights.append("⚠️ Low customer engagement - some customers may not be receiving attention")
|
|
905
|
+
|
|
906
|
+
# Area occupancy insights
|
|
907
|
+
service_areas = area_analysis.get("service_areas", {})
|
|
908
|
+
busy_areas = [name for name, data in service_areas.items() if data.get("total_count", 0) > 3]
|
|
909
|
+
if busy_areas:
|
|
910
|
+
insights.append(f"High activity in service areas: {', '.join(busy_areas)}")
|
|
911
|
+
|
|
912
|
+
# Service quality insights
|
|
913
|
+
quality_score = business_metrics.get("service_quality_score", 0)
|
|
914
|
+
if quality_score >= 0.8:
|
|
915
|
+
insights.append("✅ Excellent service quality detected")
|
|
916
|
+
elif quality_score >= 0.6:
|
|
917
|
+
insights.append("📊 Good service quality overall")
|
|
918
|
+
else:
|
|
919
|
+
insights.append("⚠️ Service quality could be improved")
|
|
920
|
+
|
|
921
|
+
# Coverage insights
|
|
922
|
+
coverage = business_metrics.get("service_coverage", 0)
|
|
923
|
+
if coverage < 0.7:
|
|
924
|
+
uncovered_count = max(0, len(service_areas) - len(staff_analytics.get("coverage_areas", [])))
|
|
925
|
+
if uncovered_count > 0:
|
|
926
|
+
insights.append(f"🚨 {uncovered_count} service area(s) lack staff presence")
|
|
927
|
+
|
|
928
|
+
# Performance insights
|
|
929
|
+
performance = business_metrics.get("overall_performance", 0)
|
|
930
|
+
if performance >= 0.8:
|
|
931
|
+
insights.append("🌟 Outstanding overall service performance")
|
|
932
|
+
elif performance >= 0.6:
|
|
933
|
+
insights.append("✅ Good overall service performance")
|
|
934
|
+
else:
|
|
935
|
+
insights.append("📈 Service performance has room for improvement")
|
|
936
|
+
|
|
937
|
+
# Optimization opportunities
|
|
938
|
+
opportunities = business_metrics.get("optimization_opportunities", [])
|
|
939
|
+
if opportunities:
|
|
940
|
+
insights.append(f"💡 Optimization opportunities identified: {len(opportunities)} areas for improvement")
|
|
941
|
+
|
|
942
|
+
return insights
|
|
943
|
+
|
|
944
|
+
def _generate_summary(self, staff_count: int, customer_count: int,
|
|
945
|
+
interaction_count: int, alerts: List) -> str:
|
|
946
|
+
"""Generate human-readable summary."""
|
|
947
|
+
if customer_count == 0 and staff_count == 0:
|
|
948
|
+
return "No people detected in service areas"
|
|
949
|
+
|
|
950
|
+
summary_parts = []
|
|
951
|
+
|
|
952
|
+
if customer_count > 0:
|
|
953
|
+
summary_parts.append(f"{customer_count} customers")
|
|
954
|
+
|
|
955
|
+
if staff_count > 0:
|
|
956
|
+
summary_parts.append(f"{staff_count} staff")
|
|
957
|
+
|
|
958
|
+
if interaction_count > 0:
|
|
959
|
+
summary_parts.append(f"{interaction_count} active service interactions")
|
|
960
|
+
|
|
961
|
+
summary = "Customer service analysis: " + ", ".join(summary_parts)
|
|
962
|
+
|
|
963
|
+
if alerts:
|
|
964
|
+
alert_count = len(alerts)
|
|
965
|
+
critical_alerts = sum(1 for alert in alerts if alert.get("severity") == "critical")
|
|
966
|
+
if critical_alerts > 0:
|
|
967
|
+
summary += f" with {critical_alerts} critical alert(s)"
|
|
968
|
+
else:
|
|
969
|
+
summary += f" with {alert_count} alert(s)"
|
|
970
|
+
|
|
971
|
+
return summary
|
|
972
|
+
|
|
973
|
+
def _extract_predictions(self, data: Any) -> List[Dict[str, Any]]:
|
|
974
|
+
"""Extract predictions from processed data for API compatibility."""
|
|
975
|
+
predictions = []
|
|
976
|
+
|
|
977
|
+
try:
|
|
978
|
+
if isinstance(data, list):
|
|
979
|
+
# Detection format
|
|
980
|
+
for item in data:
|
|
981
|
+
if isinstance(item, dict):
|
|
982
|
+
prediction = {
|
|
983
|
+
"category": item.get("category", item.get("class", "unknown")),
|
|
984
|
+
"confidence": item.get("confidence", item.get("score", 0.0)),
|
|
985
|
+
"bounding_box": item.get("bounding_box", item.get("bbox", {})),
|
|
986
|
+
"track_id": item.get("track_id")
|
|
987
|
+
}
|
|
988
|
+
predictions.append(prediction)
|
|
989
|
+
|
|
990
|
+
elif isinstance(data, dict):
|
|
991
|
+
# Frame-based or tracking format
|
|
992
|
+
for frame_id, items in data.items():
|
|
993
|
+
if isinstance(items, list):
|
|
994
|
+
for item in items:
|
|
995
|
+
if isinstance(item, dict):
|
|
996
|
+
prediction = {
|
|
997
|
+
"frame_id": frame_id,
|
|
998
|
+
"category": item.get("category", item.get("class", "unknown")),
|
|
999
|
+
"confidence": item.get("confidence", item.get("score", 0.0)),
|
|
1000
|
+
"bounding_box": item.get("bounding_box", item.get("bbox", {})),
|
|
1001
|
+
"track_id": item.get("track_id")
|
|
1002
|
+
}
|
|
1003
|
+
predictions.append(prediction)
|
|
1004
|
+
|
|
1005
|
+
except Exception as e:
|
|
1006
|
+
self.logger.warning(f"Failed to extract predictions: {str(e)}")
|
|
1007
|
+
|
|
1008
|
+
return predictions
|