matrice-analytics 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-analytics might be problematic. Click here for more details.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +142 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3188 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +681 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +1870 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +339 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +283 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +248 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +271 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1153 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1043 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +232 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1835 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +930 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1112 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +891 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +914 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1194 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +1728 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +950 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.2.dist-info/METADATA +481 -0
- matrice_analytics-0.1.2.dist-info/RECORD +160 -0
- matrice_analytics-0.1.2.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.2.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Facial Recognition API - Python Client for Post-Processing
|
|
4
|
+
|
|
5
|
+
This client handles vector search and enrollment operations for face recognition
|
|
6
|
+
in the post-processing pipeline using Matrice Session.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import base64
|
|
11
|
+
import logging
|
|
12
|
+
import httpx
|
|
13
|
+
from typing import List, Dict, Any, Optional
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
|
|
16
|
+
# Import matrice session
|
|
17
|
+
try:
|
|
18
|
+
from matrice_common.session import Session
|
|
19
|
+
HAS_MATRICE_SESSION = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
HAS_MATRICE_SESSION = False
|
|
22
|
+
logging.warning("Matrice session not available")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class FacialRecognitionClient:
|
|
26
|
+
"""
|
|
27
|
+
Simplified Face Recognition Client using Matrice Session.
|
|
28
|
+
All API calls are made through the Matrice session RPC interface.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, account_number: str = "", access_key: str = "", secret_key: str = "",
|
|
32
|
+
project_id: str = "", server_id: str = "", session=None):
|
|
33
|
+
|
|
34
|
+
# Set up logging
|
|
35
|
+
self.logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
self.server_id = server_id
|
|
38
|
+
if not self.server_id:
|
|
39
|
+
raise ValueError("Server ID is required for Face Recognition Client")
|
|
40
|
+
|
|
41
|
+
# Use existing session if provided, otherwise create new one
|
|
42
|
+
if session is not None:
|
|
43
|
+
self.session = session
|
|
44
|
+
# Get project_id from session or parameter
|
|
45
|
+
self.project_id = getattr(session, 'project_id', '') or project_id or os.getenv("MATRICE_PROJECT_ID", "")
|
|
46
|
+
self.logger.info("Using existing Matrice session for face recognition client")
|
|
47
|
+
else:
|
|
48
|
+
# Initialize credentials from environment if not provided
|
|
49
|
+
self.account_number = account_number or os.getenv("MATRICE_ACCOUNT_NUMBER", "")
|
|
50
|
+
self.access_key = access_key or os.getenv("MATRICE_ACCESS_KEY_ID", "")
|
|
51
|
+
self.secret_key = secret_key or os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
|
|
52
|
+
self.project_id = project_id or os.getenv("MATRICE_PROJECT_ID", "")
|
|
53
|
+
|
|
54
|
+
# Initialize Matrice session
|
|
55
|
+
if not HAS_MATRICE_SESSION:
|
|
56
|
+
raise ImportError("Matrice session is required for Face Recognition Client")
|
|
57
|
+
|
|
58
|
+
# if not all([self.account_number, self.access_key, self.secret_key]):
|
|
59
|
+
# raise ValueError("Missing required credentials: account_number, access_key, secret_key")
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
self.session = Session(
|
|
63
|
+
account_number=self.account_number,
|
|
64
|
+
access_key=self.access_key,
|
|
65
|
+
secret_key=self.secret_key,
|
|
66
|
+
project_id=self.project_id,
|
|
67
|
+
)
|
|
68
|
+
self.logger.info("Initialized new Matrice session for face recognition client")
|
|
69
|
+
except Exception as e:
|
|
70
|
+
self.logger.error(f"Failed to initialize Matrice session: {e}", exc_info=True)
|
|
71
|
+
raise
|
|
72
|
+
|
|
73
|
+
async def enroll_staff(self, staff_data: Dict[str, Any], image_paths: List[str]) -> Dict[str, Any]:
|
|
74
|
+
"""
|
|
75
|
+
Enroll a new staff member with face images
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
staff_data: Dictionary containing staff information (staffId, firstName, lastName, etc.)
|
|
79
|
+
image_paths: List of file paths to face images
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Dict containing enrollment response
|
|
83
|
+
"""
|
|
84
|
+
# Convert images to base64
|
|
85
|
+
base64_images = []
|
|
86
|
+
for image_path in image_paths:
|
|
87
|
+
try:
|
|
88
|
+
with open(image_path, "rb") as image_file:
|
|
89
|
+
base64_image = base64.b64encode(image_file.read()).decode('utf-8')
|
|
90
|
+
base64_images.append(base64_image)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
self.logger.error(f"Error reading image {image_path}: {e}", exc_info=True)
|
|
93
|
+
return {"success": False, "error": f"Failed to read image: {e}"}
|
|
94
|
+
|
|
95
|
+
return await self.enroll_staff_base64(staff_data, base64_images)
|
|
96
|
+
|
|
97
|
+
async def enroll_staff_base64(self, staff_data: Dict[str, Any], base64_images: List[str]) -> Dict[str, Any]:
|
|
98
|
+
"""Enroll staff with base64 encoded images"""
|
|
99
|
+
|
|
100
|
+
# Prepare enrollment request
|
|
101
|
+
enrollment_request = {
|
|
102
|
+
"staff_info": staff_data,
|
|
103
|
+
"images": base64_images
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# Use Matrice session for async RPC call
|
|
107
|
+
response = await self.session.rpc.async_send_request(
|
|
108
|
+
method="POST",
|
|
109
|
+
path=f"/v1/actions/facial_recognition/staff/enroll?serverID={self.server_id}",
|
|
110
|
+
payload=enrollment_request
|
|
111
|
+
)
|
|
112
|
+
return self._handle_response(response)
|
|
113
|
+
|
|
114
|
+
async def search_similar_faces(self, face_embedding: List[float],
|
|
115
|
+
threshold: float = 0.3, limit: int = 10,
|
|
116
|
+
collection: str = "staff_enrollment",
|
|
117
|
+
location: str = "",
|
|
118
|
+
timestamp: str = "") -> Dict[str, Any]:
|
|
119
|
+
"""
|
|
120
|
+
Search for staff members by face embedding vector
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
face_embedding: Face embedding vector
|
|
124
|
+
collection: Vector collection name
|
|
125
|
+
threshold: Similarity threshold (0.0 to 1.0)
|
|
126
|
+
limit: Maximum number of results to return
|
|
127
|
+
location: Location identifier for logging
|
|
128
|
+
timestamp: Current timestamp in ISO format
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Dict containing search results with detectionType (known/unknown)
|
|
132
|
+
"""
|
|
133
|
+
search_request = {
|
|
134
|
+
"embedding": face_embedding,
|
|
135
|
+
"collection": collection,
|
|
136
|
+
"threshold": threshold,
|
|
137
|
+
"limit": limit,
|
|
138
|
+
"location": location,
|
|
139
|
+
"timestamp": timestamp
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
# Use Matrice session for async RPC call
|
|
143
|
+
response = await self.session.rpc.async_send_request(
|
|
144
|
+
method="POST",
|
|
145
|
+
path=f"/v1/actions/facial_recognition/search/similar?serverID={self.server_id}",
|
|
146
|
+
payload=search_request
|
|
147
|
+
)
|
|
148
|
+
return self._handle_response(response)
|
|
149
|
+
|
|
150
|
+
async def get_staff_details(self, staff_id: str) -> Dict[str, Any]:
|
|
151
|
+
"""Get full staff details by staff ID"""
|
|
152
|
+
|
|
153
|
+
# Use Matrice session for async RPC call
|
|
154
|
+
response = await self.session.rpc.async_send_request(
|
|
155
|
+
method="GET",
|
|
156
|
+
path=f"/v1/actions/facial_recognition/staff/{staff_id}?serverID={self.server_id}",
|
|
157
|
+
payload={}
|
|
158
|
+
)
|
|
159
|
+
return self._handle_response(response)
|
|
160
|
+
|
|
161
|
+
async def store_people_activity(self,
|
|
162
|
+
staff_id: str,
|
|
163
|
+
detection_type: str,
|
|
164
|
+
bbox: List[float],
|
|
165
|
+
location: str,
|
|
166
|
+
employee_id: Optional[str] = None,
|
|
167
|
+
timestamp: str = datetime.now(timezone.utc).isoformat(),
|
|
168
|
+
) -> str:
|
|
169
|
+
"""
|
|
170
|
+
Store people activity data and return response with potential upload URLs
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
staff_id: Staff identifier (empty for unknown faces)
|
|
174
|
+
detection_type: Type of detection (known, unknown, empty)
|
|
175
|
+
bbox: Bounding box coordinates [x1, y1, x2, y2]
|
|
176
|
+
location: Location identifier
|
|
177
|
+
employee_id: Employee ID (for unknown faces, this will be generated)
|
|
178
|
+
timestamp: Timestamp in ISO format
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Dict containing response data including uploadUrl and employeeId for unknown faces,
|
|
182
|
+
or None if the request failed
|
|
183
|
+
"""
|
|
184
|
+
activity_request = {
|
|
185
|
+
"staff_id": staff_id,
|
|
186
|
+
"type": detection_type,
|
|
187
|
+
"timestamp": timestamp,
|
|
188
|
+
"bbox": bbox,
|
|
189
|
+
"location": location,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
# Add optional fields if provided
|
|
193
|
+
if detection_type == "unknown":
|
|
194
|
+
if employee_id:
|
|
195
|
+
activity_request["anonymous_id"] = employee_id
|
|
196
|
+
elif detection_type == "known" and employee_id:
|
|
197
|
+
activity_request["employee_id"] = employee_id
|
|
198
|
+
response = await self.session.rpc.async_send_request(
|
|
199
|
+
method="POST",
|
|
200
|
+
path=f"/v1/actions/facial_recognition/store_people_activity?serverID={self.server_id}",
|
|
201
|
+
payload=activity_request
|
|
202
|
+
)
|
|
203
|
+
handled_response = self._handle_response(response)
|
|
204
|
+
if handled_response.get("success", False):
|
|
205
|
+
data = handled_response.get("data", {})
|
|
206
|
+
self.logger.debug(f"Successfully stored {detection_type} activity")
|
|
207
|
+
if not data:
|
|
208
|
+
self.logger.warning("No data returned form store people activity")
|
|
209
|
+
return None
|
|
210
|
+
return data
|
|
211
|
+
else:
|
|
212
|
+
self.logger.error(f"Failed to store {detection_type} activity: {handled_response.get('error', 'Unknown error')}")
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
async def update_staff_images(self, image_url: str, employee_id: str) -> Dict[str, Any]:
|
|
216
|
+
"""Update staff images with uploaded image URL"""
|
|
217
|
+
|
|
218
|
+
update_request = {
|
|
219
|
+
"imageUrl": image_url,
|
|
220
|
+
"employeeId": employee_id
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
# Use Matrice session for async RPC call
|
|
224
|
+
response = await self.session.rpc.async_send_request(
|
|
225
|
+
method="PUT",
|
|
226
|
+
path=f"/v1/actions/facial_recognition/update_staff_images?serverID={self.server_id}",
|
|
227
|
+
payload=update_request
|
|
228
|
+
)
|
|
229
|
+
return self._handle_response(response)
|
|
230
|
+
|
|
231
|
+
async def upload_image_to_url(self, image_bytes: bytes, upload_url: str) -> bool:
|
|
232
|
+
"""Upload image bytes to the provided URL"""
|
|
233
|
+
try:
|
|
234
|
+
# Upload the image to the signed URL using async httpx
|
|
235
|
+
headers = {'Content-Type': 'image/jpeg'}
|
|
236
|
+
async with httpx.AsyncClient() as client:
|
|
237
|
+
response = await client.put(upload_url, content=image_bytes, headers=headers)
|
|
238
|
+
|
|
239
|
+
if response.status_code in [200, 201]:
|
|
240
|
+
self.logger.debug(f"Successfully uploaded image to URL")
|
|
241
|
+
return True
|
|
242
|
+
else:
|
|
243
|
+
self.logger.error(f"Failed to upload image: {response.status_code} - {response.text}")
|
|
244
|
+
return False
|
|
245
|
+
|
|
246
|
+
except Exception as e:
|
|
247
|
+
self.logger.error(f"Error uploading image to URL: {e}", exc_info=True)
|
|
248
|
+
return False
|
|
249
|
+
|
|
250
|
+
async def shutdown_service(self, action_record_id: Optional[str] = None) -> Dict[str, Any]:
|
|
251
|
+
"""Gracefully shutdown the service"""
|
|
252
|
+
|
|
253
|
+
payload = {} if not action_record_id else {"actionRecordId": action_record_id}
|
|
254
|
+
|
|
255
|
+
# Use Matrice session for async RPC call
|
|
256
|
+
response = await self.session.rpc.async_send_request(
|
|
257
|
+
method="DELETE",
|
|
258
|
+
path=f"/v1/actions/facial_recognition/shutdown?serverID={self.server_id}",
|
|
259
|
+
payload=payload
|
|
260
|
+
)
|
|
261
|
+
return self._handle_response(response)
|
|
262
|
+
|
|
263
|
+
async def get_all_staff_embeddings(self) -> Dict[str, Any]:
|
|
264
|
+
"""Get all staff embeddings"""
|
|
265
|
+
|
|
266
|
+
payload = {}
|
|
267
|
+
|
|
268
|
+
# Use Matrice session for async RPC call
|
|
269
|
+
response = await self.session.rpc.async_send_request(
|
|
270
|
+
method="GET",
|
|
271
|
+
path=f"/v1/actions/facial_recognition/get_all_staff_embeddings?serverID={self.server_id}",
|
|
272
|
+
payload=payload,
|
|
273
|
+
)
|
|
274
|
+
return self._handle_response(response)
|
|
275
|
+
|
|
276
|
+
async def enroll_unknown_person(self, embedding: List[float], image_source: str = None, timestamp: str = None, location: str = None, employee_id: str = None) -> Dict[str, Any]:
|
|
277
|
+
"""Enroll an unknown person"""
|
|
278
|
+
|
|
279
|
+
payload = {
|
|
280
|
+
"embedding": embedding
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
if image_source:
|
|
284
|
+
payload["imageSource"] = image_source
|
|
285
|
+
if timestamp:
|
|
286
|
+
payload["timestamp"] = timestamp
|
|
287
|
+
else:
|
|
288
|
+
payload["timestamp"] = datetime.now(timezone.utc).isoformat()
|
|
289
|
+
if location:
|
|
290
|
+
payload["location"] = location
|
|
291
|
+
if employee_id:
|
|
292
|
+
payload["employeeId"] = employee_id
|
|
293
|
+
|
|
294
|
+
# Use Matrice session for async RPC call
|
|
295
|
+
response = await self.session.rpc.async_send_request(
|
|
296
|
+
method="POST",
|
|
297
|
+
path=f"/v1/actions/facial_recognition/enroll_unknown_person?serverID={self.server_id}",
|
|
298
|
+
payload=payload,
|
|
299
|
+
)
|
|
300
|
+
return self._handle_response(response)
|
|
301
|
+
|
|
302
|
+
async def health_check(self) -> Dict[str, Any]:
|
|
303
|
+
"""Check if the facial recognition service is healthy"""
|
|
304
|
+
|
|
305
|
+
# Use Matrice session for async RPC call
|
|
306
|
+
response = await self.session.rpc.async_send_request(
|
|
307
|
+
method="GET",
|
|
308
|
+
path=f"/v1/actions/facial_recognition/health?serverID={self.server_id}",
|
|
309
|
+
payload={}
|
|
310
|
+
)
|
|
311
|
+
return self._handle_response(response)
|
|
312
|
+
|
|
313
|
+
def _handle_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
|
314
|
+
"""Handle RPC response and errors"""
|
|
315
|
+
try:
|
|
316
|
+
if response.get("success", True):
|
|
317
|
+
return response
|
|
318
|
+
else:
|
|
319
|
+
error_msg = response.get("error", "Unknown RPC error")
|
|
320
|
+
self.logger.error(f"RPC Error: {error_msg}", exc_info=True)
|
|
321
|
+
return {"success": False, "error": error_msg}
|
|
322
|
+
except Exception as e:
|
|
323
|
+
self.logger.error(f"Error handling RPC response: {e}", exc_info=True)
|
|
324
|
+
return {"success": False, "error": f"Response handling error: {e}"}
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
# Factory function for easy initialization
|
|
328
|
+
def create_face_client(account_number: str = None, access_key: str = None,
|
|
329
|
+
secret_key: str = None, project_id: str = None,
|
|
330
|
+
server_id: str = "", session=None) -> FacialRecognitionClient:
|
|
331
|
+
"""Create a facial recognition client with automatic credential detection"""
|
|
332
|
+
return FacialRecognitionClient(
|
|
333
|
+
account_number=account_number,
|
|
334
|
+
access_key=access_key,
|
|
335
|
+
secret_key=secret_key,
|
|
336
|
+
project_id=project_id,
|
|
337
|
+
server_id=server_id,
|
|
338
|
+
session=session
|
|
339
|
+
)
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
import threading
|
|
5
|
+
from typing import Dict, Optional, Set
|
|
6
|
+
import numpy as np
|
|
7
|
+
import cv2
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from .face_recognition_client import FacialRecognitionClient
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class PeopleActivityLogging:
|
|
13
|
+
"""Background logging system for face recognition activity"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, face_client: FacialRecognitionClient = None):
|
|
16
|
+
self.face_client = face_client
|
|
17
|
+
self.logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
# Queue for processing detections in background
|
|
20
|
+
self.activity_queue = asyncio.Queue()
|
|
21
|
+
|
|
22
|
+
# Thread for background processing
|
|
23
|
+
self.processing_thread = None
|
|
24
|
+
self.is_running = False
|
|
25
|
+
|
|
26
|
+
# Empty detection tracking
|
|
27
|
+
self.last_detection_time = time.time()
|
|
28
|
+
self.empty_detection_logged = False
|
|
29
|
+
self.empty_detection_threshold = 10.0 # 10 seconds
|
|
30
|
+
|
|
31
|
+
# Storage for unknown faces (for debugging/backup)
|
|
32
|
+
self.unknown_faces_storage = {}
|
|
33
|
+
|
|
34
|
+
# Employee ID tracking with timestamps to prevent duplicate logging
|
|
35
|
+
# TODO: Make this use track_id or similarity check instead of employee_id for better deduplication
|
|
36
|
+
self.recent_employee_detections: Dict[str, float] = {}
|
|
37
|
+
self.employee_detection_threshold = 10.0 # 10 seconds
|
|
38
|
+
|
|
39
|
+
# Start background processing
|
|
40
|
+
self.start_background_processing()
|
|
41
|
+
|
|
42
|
+
def start_background_processing(self):
|
|
43
|
+
"""Start the background processing thread"""
|
|
44
|
+
if not self.is_running:
|
|
45
|
+
self.is_running = True
|
|
46
|
+
self.processing_thread = threading.Thread(
|
|
47
|
+
target=self._run_async_loop, daemon=True
|
|
48
|
+
)
|
|
49
|
+
self.processing_thread.start()
|
|
50
|
+
self.logger.info("Started PeopleActivityLogging background processing")
|
|
51
|
+
|
|
52
|
+
def stop_background_processing(self):
|
|
53
|
+
"""Stop the background processing thread"""
|
|
54
|
+
self.is_running = False
|
|
55
|
+
if self.processing_thread:
|
|
56
|
+
self.processing_thread.join(timeout=5.0)
|
|
57
|
+
self.logger.info("Stopped PeopleActivityLogging background processing")
|
|
58
|
+
|
|
59
|
+
def _run_async_loop(self):
|
|
60
|
+
"""Run the async event loop in the background thread"""
|
|
61
|
+
try:
|
|
62
|
+
# Create new event loop for this thread
|
|
63
|
+
loop = asyncio.new_event_loop()
|
|
64
|
+
asyncio.set_event_loop(loop)
|
|
65
|
+
loop.run_until_complete(self._process_activity_queue())
|
|
66
|
+
except Exception as e:
|
|
67
|
+
self.logger.error(f"Error in background processing loop: {e}", exc_info=True)
|
|
68
|
+
finally:
|
|
69
|
+
try:
|
|
70
|
+
loop.close()
|
|
71
|
+
except:
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
async def _process_activity_queue(self):
|
|
75
|
+
"""Process activity queue continuously"""
|
|
76
|
+
while self.is_running:
|
|
77
|
+
try:
|
|
78
|
+
# Process queued detections with timeout
|
|
79
|
+
try:
|
|
80
|
+
activity_data = await asyncio.wait_for(
|
|
81
|
+
self.activity_queue.get(), timeout=20
|
|
82
|
+
)
|
|
83
|
+
await self._process_activity(activity_data)
|
|
84
|
+
self.activity_queue.task_done()
|
|
85
|
+
except asyncio.TimeoutError:
|
|
86
|
+
# Continue loop to check for empty detections
|
|
87
|
+
continue
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
self.logger.error(f"Error processing activity queue: {e}", exc_info=True)
|
|
91
|
+
await asyncio.sleep(1.0)
|
|
92
|
+
|
|
93
|
+
async def enqueue_detection(
|
|
94
|
+
self,
|
|
95
|
+
detection: Dict,
|
|
96
|
+
current_frame: Optional[np.ndarray] = None,
|
|
97
|
+
location: str = "",
|
|
98
|
+
):
|
|
99
|
+
"""Enqueue a detection for background processing"""
|
|
100
|
+
try:
|
|
101
|
+
activity_data = {
|
|
102
|
+
"detection_type": detection["recognition_status"], # known, unknown
|
|
103
|
+
"detection": detection,
|
|
104
|
+
"current_frame": current_frame,
|
|
105
|
+
"location": location,
|
|
106
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
107
|
+
"employee_id": detection.get("employee_id", None),
|
|
108
|
+
"staff_id": detection.get("person_id")
|
|
109
|
+
}
|
|
110
|
+
if detection["recognition_status"] not in ["known", "unknown"]:
|
|
111
|
+
self.logger.warning(
|
|
112
|
+
f"Invalid detection status: {detection['recognition_status']}"
|
|
113
|
+
)
|
|
114
|
+
return
|
|
115
|
+
if not detection.get("employee_id", None):
|
|
116
|
+
self.logger.warning(
|
|
117
|
+
f"No employee_id found for detection: {detection}"
|
|
118
|
+
)
|
|
119
|
+
return
|
|
120
|
+
if not detection.get("person_id", None):
|
|
121
|
+
self.logger.warning(
|
|
122
|
+
f"No person_id found for detection: {detection}"
|
|
123
|
+
)
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
bbox = detection.get("bounding_box", {})
|
|
127
|
+
bbox_list = [
|
|
128
|
+
bbox.get("xmin", 0),
|
|
129
|
+
bbox.get("ymin", 0),
|
|
130
|
+
bbox.get("xmax", 0),
|
|
131
|
+
bbox.get("ymax", 0),
|
|
132
|
+
]
|
|
133
|
+
activity_data["bbox"] = bbox_list
|
|
134
|
+
# Update last detection time
|
|
135
|
+
self.last_detection_time = time.time()
|
|
136
|
+
self.empty_detection_logged = False
|
|
137
|
+
|
|
138
|
+
await self.activity_queue.put(activity_data)
|
|
139
|
+
except Exception as e:
|
|
140
|
+
self.logger.error(f"Error enqueueing detection: {e}", exc_info=True)
|
|
141
|
+
|
|
142
|
+
def _should_log_detection(self, employee_id: str) -> bool:
|
|
143
|
+
"""
|
|
144
|
+
Check if detection should be logged based on employee ID and time threshold.
|
|
145
|
+
Only log if employee_id was not detected in the past 10 seconds.
|
|
146
|
+
|
|
147
|
+
TODO: Make this use track_id or similarity check instead of just employee_id in 10 secs window
|
|
148
|
+
for better deduplication across different detection sessions.
|
|
149
|
+
"""
|
|
150
|
+
current_time = time.time()
|
|
151
|
+
|
|
152
|
+
# Clean up old entries (older than threshold)
|
|
153
|
+
expired_keys = [
|
|
154
|
+
emp_id for emp_id, timestamp in self.recent_employee_detections.items()
|
|
155
|
+
if current_time - timestamp > self.employee_detection_threshold
|
|
156
|
+
]
|
|
157
|
+
for emp_id in expired_keys:
|
|
158
|
+
del self.recent_employee_detections[emp_id]
|
|
159
|
+
|
|
160
|
+
# Check if employee was recently detected
|
|
161
|
+
if employee_id in self.recent_employee_detections:
|
|
162
|
+
last_detection = self.recent_employee_detections[employee_id]
|
|
163
|
+
if current_time - last_detection < self.employee_detection_threshold:
|
|
164
|
+
self.logger.debug(f"Skipping logging for employee {employee_id} - detected {current_time - last_detection:.1f}s ago")
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
# Update detection time for this employee
|
|
168
|
+
self.recent_employee_detections[employee_id] = current_time
|
|
169
|
+
return True
|
|
170
|
+
|
|
171
|
+
async def _process_activity(self, activity_data: Dict):
|
|
172
|
+
"""Process activity data - handle all face detections and uploads"""
|
|
173
|
+
detection_type = activity_data["detection_type"]
|
|
174
|
+
current_frame = activity_data["current_frame"]
|
|
175
|
+
bbox = activity_data["bbox"]
|
|
176
|
+
employee_id = activity_data["employee_id"]
|
|
177
|
+
location = activity_data["location"]
|
|
178
|
+
staff_id = activity_data["staff_id"]
|
|
179
|
+
timestamp = activity_data["timestamp"]
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
if not self.face_client:
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
# Check if we should log this detection (avoid duplicates within time window)
|
|
186
|
+
if not self._should_log_detection(employee_id):
|
|
187
|
+
return None
|
|
188
|
+
|
|
189
|
+
# Store activity data
|
|
190
|
+
self.logger.info(f"Storing activity data for employee {employee_id}")
|
|
191
|
+
upload_url = await self.face_client.store_people_activity(
|
|
192
|
+
staff_id=staff_id,
|
|
193
|
+
detection_type=detection_type,
|
|
194
|
+
bbox=bbox,
|
|
195
|
+
location=location,
|
|
196
|
+
employee_id=employee_id,
|
|
197
|
+
timestamp=timestamp,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
if upload_url:
|
|
201
|
+
self.logger.debug("Successfully stored activity log and fetched upload URL")
|
|
202
|
+
await self._upload_frame(current_frame, upload_url, employee_id)
|
|
203
|
+
else:
|
|
204
|
+
self.logger.warning("Failed to store activity log")
|
|
205
|
+
|
|
206
|
+
return upload_url
|
|
207
|
+
except Exception as e:
|
|
208
|
+
self.logger.error(f"Error storing activity log: {e}", exc_info=True)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
async def _upload_frame(self, current_frame: np.ndarray, upload_url: str, employee_id: str):
|
|
212
|
+
_, buffer = cv2.imencode(".jpg", current_frame)
|
|
213
|
+
frame_bytes = buffer.tobytes()
|
|
214
|
+
|
|
215
|
+
upload_success = await self.face_client.upload_image_to_url(
|
|
216
|
+
frame_bytes, upload_url
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if upload_success:
|
|
220
|
+
self.logger.info(f"Successfully uploaded whole frame for employee {employee_id}")
|
|
221
|
+
else:
|
|
222
|
+
self.logger.warning("Failed to upload whole frame")
|
|
223
|
+
|
|
224
|
+
async def _should_log_activity(self, activity_data: Dict) -> bool:
|
|
225
|
+
"""Check if activity should be logged"""
|
|
226
|
+
detection_type = activity_data["detection_type"]
|
|
227
|
+
if detection_type == "known":
|
|
228
|
+
return True
|
|
229
|
+
return False
|
|
230
|
+
|
|
231
|
+
def _crop_face_from_frame(self, frame: np.ndarray, bounding_box: Dict) -> bytes:
|
|
232
|
+
"""
|
|
233
|
+
Crop face from frame using bounding box and return as bytes
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
frame: Original frame as numpy array
|
|
237
|
+
bounding_box: Dict with x1, y1, x2, y2 coordinates
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
bytes: Cropped face image as JPEG bytes
|
|
241
|
+
"""
|
|
242
|
+
try:
|
|
243
|
+
# Extract coordinates - handle different bounding box formats
|
|
244
|
+
x1 = int(bounding_box.get("xmin", bounding_box.get("x1", 0)))
|
|
245
|
+
y1 = int(bounding_box.get("ymin", bounding_box.get("y1", 0)))
|
|
246
|
+
x2 = int(bounding_box.get("xmax", bounding_box.get("x2", 0)))
|
|
247
|
+
y2 = int(bounding_box.get("ymax", bounding_box.get("y2", 0)))
|
|
248
|
+
|
|
249
|
+
# Ensure coordinates are within frame bounds
|
|
250
|
+
h, w = frame.shape[:2]
|
|
251
|
+
x1, y1 = max(0, x1), max(0, y1)
|
|
252
|
+
x2, y2 = min(w, x2), min(h, y2)
|
|
253
|
+
|
|
254
|
+
# Validate coordinates
|
|
255
|
+
if x2 <= x1 or y2 <= y1:
|
|
256
|
+
self.logger.warning("Invalid bounding box coordinates")
|
|
257
|
+
return b""
|
|
258
|
+
|
|
259
|
+
# Crop the face
|
|
260
|
+
cropped_face = frame[y1:y2, x1:x2]
|
|
261
|
+
|
|
262
|
+
# Convert to JPEG bytes
|
|
263
|
+
_, buffer = cv2.imencode(".jpg", cropped_face)
|
|
264
|
+
return buffer.tobytes()
|
|
265
|
+
|
|
266
|
+
except Exception as e:
|
|
267
|
+
self.logger.error(f"Error cropping face from frame: {e}", exc_info=True)
|
|
268
|
+
return b""
|
|
269
|
+
|
|
270
|
+
def get_unknown_faces_storage(self) -> Dict[str, bytes]:
|
|
271
|
+
"""Get stored unknown face images as bytes"""
|
|
272
|
+
return self.unknown_faces_storage.copy()
|
|
273
|
+
|
|
274
|
+
def clear_unknown_faces_storage(self) -> None:
|
|
275
|
+
"""Clear stored unknown face images"""
|
|
276
|
+
self.unknown_faces_storage.clear()
|
|
277
|
+
|
|
278
|
+
def __del__(self):
|
|
279
|
+
"""Cleanup when object is destroyed"""
|
|
280
|
+
try:
|
|
281
|
+
self.stop_background_processing()
|
|
282
|
+
except:
|
|
283
|
+
pass
|
|
File without changes
|