matrice-analytics 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice_analytics/__init__.py +28 -0
- matrice_analytics/boundary_drawing_internal/README.md +305 -0
- matrice_analytics/boundary_drawing_internal/__init__.py +45 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_internal.py +1207 -0
- matrice_analytics/boundary_drawing_internal/boundary_drawing_tool.py +429 -0
- matrice_analytics/boundary_drawing_internal/boundary_tool_template.html +1036 -0
- matrice_analytics/boundary_drawing_internal/data/.gitignore +12 -0
- matrice_analytics/boundary_drawing_internal/example_usage.py +206 -0
- matrice_analytics/boundary_drawing_internal/usage/README.md +110 -0
- matrice_analytics/boundary_drawing_internal/usage/boundary_drawer_launcher.py +102 -0
- matrice_analytics/boundary_drawing_internal/usage/simple_boundary_launcher.py +107 -0
- matrice_analytics/post_processing/README.md +455 -0
- matrice_analytics/post_processing/__init__.py +732 -0
- matrice_analytics/post_processing/advanced_tracker/README.md +650 -0
- matrice_analytics/post_processing/advanced_tracker/__init__.py +17 -0
- matrice_analytics/post_processing/advanced_tracker/base.py +99 -0
- matrice_analytics/post_processing/advanced_tracker/config.py +77 -0
- matrice_analytics/post_processing/advanced_tracker/kalman_filter.py +370 -0
- matrice_analytics/post_processing/advanced_tracker/matching.py +195 -0
- matrice_analytics/post_processing/advanced_tracker/strack.py +230 -0
- matrice_analytics/post_processing/advanced_tracker/tracker.py +367 -0
- matrice_analytics/post_processing/config.py +146 -0
- matrice_analytics/post_processing/core/__init__.py +63 -0
- matrice_analytics/post_processing/core/base.py +704 -0
- matrice_analytics/post_processing/core/config.py +3291 -0
- matrice_analytics/post_processing/core/config_utils.py +925 -0
- matrice_analytics/post_processing/face_reg/__init__.py +43 -0
- matrice_analytics/post_processing/face_reg/compare_similarity.py +556 -0
- matrice_analytics/post_processing/face_reg/embedding_manager.py +950 -0
- matrice_analytics/post_processing/face_reg/face_recognition.py +2234 -0
- matrice_analytics/post_processing/face_reg/face_recognition_client.py +606 -0
- matrice_analytics/post_processing/face_reg/people_activity_logging.py +321 -0
- matrice_analytics/post_processing/ocr/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/easyocr_extractor.py +250 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/__init__.py +9 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/__init__.py +4 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/cli.py +33 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/dataset_stats.py +139 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/export.py +398 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/train.py +447 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/utils.py +129 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/valid.py +93 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/validate_dataset.py +240 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_augmentation.py +176 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/cli/visualize_predictions.py +96 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/process.py +246 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/types.py +60 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/core/utils.py +87 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/__init__.py +3 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/config.py +82 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/hub.py +141 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/inference/plate_recognizer.py +323 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/py.typed +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/augmentation.py +101 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/data/dataset.py +97 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/config.py +114 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/layers.py +553 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/loss.py +55 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/metric.py +86 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_builders.py +95 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/model/model_schema.py +395 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/__init__.py +0 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/backend_utils.py +38 -0
- matrice_analytics/post_processing/ocr/fast_plate_ocr_py38/train/utilities/utils.py +214 -0
- matrice_analytics/post_processing/ocr/postprocessing.py +270 -0
- matrice_analytics/post_processing/ocr/preprocessing.py +52 -0
- matrice_analytics/post_processing/post_processor.py +1175 -0
- matrice_analytics/post_processing/test_cases/__init__.py +1 -0
- matrice_analytics/post_processing/test_cases/run_tests.py +143 -0
- matrice_analytics/post_processing/test_cases/test_advanced_customer_service.py +841 -0
- matrice_analytics/post_processing/test_cases/test_basic_counting_tracking.py +523 -0
- matrice_analytics/post_processing/test_cases/test_comprehensive.py +531 -0
- matrice_analytics/post_processing/test_cases/test_config.py +852 -0
- matrice_analytics/post_processing/test_cases/test_customer_service.py +585 -0
- matrice_analytics/post_processing/test_cases/test_data_generators.py +583 -0
- matrice_analytics/post_processing/test_cases/test_people_counting.py +510 -0
- matrice_analytics/post_processing/test_cases/test_processor.py +524 -0
- matrice_analytics/post_processing/test_cases/test_usecases.py +165 -0
- matrice_analytics/post_processing/test_cases/test_utilities.py +356 -0
- matrice_analytics/post_processing/test_cases/test_utils.py +743 -0
- matrice_analytics/post_processing/usecases/Histopathological_Cancer_Detection_img.py +604 -0
- matrice_analytics/post_processing/usecases/__init__.py +267 -0
- matrice_analytics/post_processing/usecases/abandoned_object_detection.py +797 -0
- matrice_analytics/post_processing/usecases/advanced_customer_service.py +1601 -0
- matrice_analytics/post_processing/usecases/age_detection.py +842 -0
- matrice_analytics/post_processing/usecases/age_gender_detection.py +1085 -0
- matrice_analytics/post_processing/usecases/anti_spoofing_detection.py +656 -0
- matrice_analytics/post_processing/usecases/assembly_line_detection.py +841 -0
- matrice_analytics/post_processing/usecases/banana_defect_detection.py +624 -0
- matrice_analytics/post_processing/usecases/basic_counting_tracking.py +667 -0
- matrice_analytics/post_processing/usecases/blood_cancer_detection_img.py +881 -0
- matrice_analytics/post_processing/usecases/car_damage_detection.py +834 -0
- matrice_analytics/post_processing/usecases/car_part_segmentation.py +946 -0
- matrice_analytics/post_processing/usecases/car_service.py +1601 -0
- matrice_analytics/post_processing/usecases/cardiomegaly_classification.py +864 -0
- matrice_analytics/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice_analytics/post_processing/usecases/chicken_pose_detection.py +648 -0
- matrice_analytics/post_processing/usecases/child_monitoring.py +814 -0
- matrice_analytics/post_processing/usecases/color/clip.py +660 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/merges.txt +48895 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/preprocessor_config.json +28 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/special_tokens_map.json +30 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer.json +245079 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/tokenizer_config.json +32 -0
- matrice_analytics/post_processing/usecases/color/clip_processor/vocab.json +1 -0
- matrice_analytics/post_processing/usecases/color/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/color/color_mapper.py +468 -0
- matrice_analytics/post_processing/usecases/color_detection.py +1936 -0
- matrice_analytics/post_processing/usecases/color_map_utils.py +70 -0
- matrice_analytics/post_processing/usecases/concrete_crack_detection.py +827 -0
- matrice_analytics/post_processing/usecases/crop_weed_detection.py +781 -0
- matrice_analytics/post_processing/usecases/customer_service.py +1008 -0
- matrice_analytics/post_processing/usecases/defect_detection_products.py +936 -0
- matrice_analytics/post_processing/usecases/distracted_driver_detection.py +822 -0
- matrice_analytics/post_processing/usecases/drone_traffic_monitoring.py +585 -0
- matrice_analytics/post_processing/usecases/drowsy_driver_detection.py +829 -0
- matrice_analytics/post_processing/usecases/dwell_detection.py +829 -0
- matrice_analytics/post_processing/usecases/emergency_vehicle_detection.py +827 -0
- matrice_analytics/post_processing/usecases/face_emotion.py +813 -0
- matrice_analytics/post_processing/usecases/face_recognition.py +827 -0
- matrice_analytics/post_processing/usecases/fashion_detection.py +835 -0
- matrice_analytics/post_processing/usecases/field_mapping.py +902 -0
- matrice_analytics/post_processing/usecases/fire_detection.py +1146 -0
- matrice_analytics/post_processing/usecases/flare_analysis.py +836 -0
- matrice_analytics/post_processing/usecases/flower_segmentation.py +1006 -0
- matrice_analytics/post_processing/usecases/gas_leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/gender_detection.py +832 -0
- matrice_analytics/post_processing/usecases/human_activity_recognition.py +871 -0
- matrice_analytics/post_processing/usecases/intrusion_detection.py +1672 -0
- matrice_analytics/post_processing/usecases/leaf.py +821 -0
- matrice_analytics/post_processing/usecases/leaf_disease.py +840 -0
- matrice_analytics/post_processing/usecases/leak_detection.py +837 -0
- matrice_analytics/post_processing/usecases/license_plate_detection.py +1188 -0
- matrice_analytics/post_processing/usecases/license_plate_monitoring.py +1781 -0
- matrice_analytics/post_processing/usecases/litter_monitoring.py +717 -0
- matrice_analytics/post_processing/usecases/mask_detection.py +869 -0
- matrice_analytics/post_processing/usecases/natural_disaster.py +907 -0
- matrice_analytics/post_processing/usecases/parking.py +787 -0
- matrice_analytics/post_processing/usecases/parking_space_detection.py +822 -0
- matrice_analytics/post_processing/usecases/pcb_defect_detection.py +888 -0
- matrice_analytics/post_processing/usecases/pedestrian_detection.py +808 -0
- matrice_analytics/post_processing/usecases/people_counting.py +706 -0
- matrice_analytics/post_processing/usecases/people_counting_bckp.py +1683 -0
- matrice_analytics/post_processing/usecases/people_tracking.py +1842 -0
- matrice_analytics/post_processing/usecases/pipeline_detection.py +605 -0
- matrice_analytics/post_processing/usecases/plaque_segmentation_img.py +874 -0
- matrice_analytics/post_processing/usecases/pothole_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/ppe_compliance.py +645 -0
- matrice_analytics/post_processing/usecases/price_tag_detection.py +822 -0
- matrice_analytics/post_processing/usecases/proximity_detection.py +1901 -0
- matrice_analytics/post_processing/usecases/road_lane_detection.py +623 -0
- matrice_analytics/post_processing/usecases/road_traffic_density.py +832 -0
- matrice_analytics/post_processing/usecases/road_view_segmentation.py +915 -0
- matrice_analytics/post_processing/usecases/shelf_inventory_detection.py +583 -0
- matrice_analytics/post_processing/usecases/shoplifting_detection.py +822 -0
- matrice_analytics/post_processing/usecases/shopping_cart_analysis.py +899 -0
- matrice_analytics/post_processing/usecases/skin_cancer_classification_img.py +864 -0
- matrice_analytics/post_processing/usecases/smoker_detection.py +833 -0
- matrice_analytics/post_processing/usecases/solar_panel.py +810 -0
- matrice_analytics/post_processing/usecases/suspicious_activity_detection.py +1030 -0
- matrice_analytics/post_processing/usecases/template_usecase.py +380 -0
- matrice_analytics/post_processing/usecases/theft_detection.py +648 -0
- matrice_analytics/post_processing/usecases/traffic_sign_monitoring.py +724 -0
- matrice_analytics/post_processing/usecases/underground_pipeline_defect_detection.py +775 -0
- matrice_analytics/post_processing/usecases/underwater_pollution_detection.py +842 -0
- matrice_analytics/post_processing/usecases/vehicle_monitoring.py +1029 -0
- matrice_analytics/post_processing/usecases/warehouse_object_segmentation.py +899 -0
- matrice_analytics/post_processing/usecases/waterbody_segmentation.py +923 -0
- matrice_analytics/post_processing/usecases/weapon_detection.py +771 -0
- matrice_analytics/post_processing/usecases/weld_defect_detection.py +615 -0
- matrice_analytics/post_processing/usecases/wildlife_monitoring.py +898 -0
- matrice_analytics/post_processing/usecases/windmill_maintenance.py +834 -0
- matrice_analytics/post_processing/usecases/wound_segmentation.py +856 -0
- matrice_analytics/post_processing/utils/__init__.py +150 -0
- matrice_analytics/post_processing/utils/advanced_counting_utils.py +400 -0
- matrice_analytics/post_processing/utils/advanced_helper_utils.py +317 -0
- matrice_analytics/post_processing/utils/advanced_tracking_utils.py +461 -0
- matrice_analytics/post_processing/utils/alerting_utils.py +213 -0
- matrice_analytics/post_processing/utils/category_mapping_utils.py +94 -0
- matrice_analytics/post_processing/utils/color_utils.py +592 -0
- matrice_analytics/post_processing/utils/counting_utils.py +182 -0
- matrice_analytics/post_processing/utils/filter_utils.py +261 -0
- matrice_analytics/post_processing/utils/format_utils.py +293 -0
- matrice_analytics/post_processing/utils/geometry_utils.py +300 -0
- matrice_analytics/post_processing/utils/smoothing_utils.py +358 -0
- matrice_analytics/post_processing/utils/tracking_utils.py +234 -0
- matrice_analytics/py.typed +0 -0
- matrice_analytics-0.1.60.dist-info/METADATA +481 -0
- matrice_analytics-0.1.60.dist-info/RECORD +196 -0
- matrice_analytics-0.1.60.dist-info/WHEEL +5 -0
- matrice_analytics-0.1.60.dist-info/licenses/LICENSE.txt +21 -0
- matrice_analytics-0.1.60.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
import threading
|
|
5
|
+
import queue
|
|
6
|
+
import base64
|
|
7
|
+
from typing import Dict, Optional, Set
|
|
8
|
+
import numpy as np
|
|
9
|
+
import cv2
|
|
10
|
+
from datetime import datetime, timezone
|
|
11
|
+
from .face_recognition_client import FacialRecognitionClient
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PeopleActivityLogging:
|
|
15
|
+
"""Background logging system for face recognition activity"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, face_client: FacialRecognitionClient = None):
|
|
18
|
+
self.face_client = face_client
|
|
19
|
+
self.logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
# Use thread-safe queue for cross-thread communication (Python 3.8 compatibility)
|
|
22
|
+
self.activity_queue = queue.Queue()
|
|
23
|
+
|
|
24
|
+
# Thread for background processing
|
|
25
|
+
self.processing_thread = None
|
|
26
|
+
self.is_running = False
|
|
27
|
+
|
|
28
|
+
# Empty detection tracking
|
|
29
|
+
self.last_detection_time = time.time()
|
|
30
|
+
self.empty_detection_logged = False
|
|
31
|
+
self.empty_detection_threshold = 10.0 # 10 seconds
|
|
32
|
+
|
|
33
|
+
# Storage for unknown faces (for debugging/backup)
|
|
34
|
+
self.unknown_faces_storage = {}
|
|
35
|
+
|
|
36
|
+
# Employee ID tracking with timestamps to prevent duplicate logging
|
|
37
|
+
# TODO: Make this use track_id or similarity check instead of employee_id for better deduplication
|
|
38
|
+
self.recent_employee_detections: Dict[str, float] = {}
|
|
39
|
+
self.employee_detection_threshold = 10.0 # 10 seconds
|
|
40
|
+
|
|
41
|
+
# Start background processing
|
|
42
|
+
self.start_background_processing()
|
|
43
|
+
|
|
44
|
+
def start_background_processing(self):
|
|
45
|
+
"""Start the background processing thread"""
|
|
46
|
+
if not self.is_running:
|
|
47
|
+
self.is_running = True
|
|
48
|
+
self.processing_thread = threading.Thread(
|
|
49
|
+
target=self._run_async_loop, daemon=True
|
|
50
|
+
)
|
|
51
|
+
self.processing_thread.start()
|
|
52
|
+
self.logger.info("Started PeopleActivityLogging background processing")
|
|
53
|
+
|
|
54
|
+
def stop_background_processing(self):
|
|
55
|
+
"""Stop the background processing thread"""
|
|
56
|
+
self.is_running = False
|
|
57
|
+
if self.processing_thread:
|
|
58
|
+
self.processing_thread.join(timeout=5.0)
|
|
59
|
+
self.logger.info("Stopped PeopleActivityLogging background processing")
|
|
60
|
+
|
|
61
|
+
def _run_async_loop(self):
|
|
62
|
+
"""Run the async event loop in the background thread"""
|
|
63
|
+
try:
|
|
64
|
+
# Create new event loop for this thread
|
|
65
|
+
loop = asyncio.new_event_loop()
|
|
66
|
+
asyncio.set_event_loop(loop)
|
|
67
|
+
loop.run_until_complete(self._process_activity_queue())
|
|
68
|
+
except Exception as e:
|
|
69
|
+
self.logger.error(f"Error in background processing loop: {e}", exc_info=True)
|
|
70
|
+
finally:
|
|
71
|
+
try:
|
|
72
|
+
loop.close()
|
|
73
|
+
except:
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
async def _process_activity_queue(self):
|
|
77
|
+
"""Process activity queue continuously"""
|
|
78
|
+
while self.is_running:
|
|
79
|
+
try:
|
|
80
|
+
# Process queued detections with timeout using thread-safe queue
|
|
81
|
+
try:
|
|
82
|
+
activity_data = self.activity_queue.get(timeout=20)
|
|
83
|
+
await self._process_activity(activity_data)
|
|
84
|
+
self.activity_queue.task_done()
|
|
85
|
+
except queue.Empty:
|
|
86
|
+
# Continue loop to check for empty detections
|
|
87
|
+
continue
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
self.logger.error(f"Error processing activity queue: {e}", exc_info=True)
|
|
91
|
+
await asyncio.sleep(1.0)
|
|
92
|
+
|
|
93
|
+
async def enqueue_detection(
|
|
94
|
+
self,
|
|
95
|
+
detection: Dict,
|
|
96
|
+
current_frame: Optional[np.ndarray] = None,
|
|
97
|
+
location: str = "",
|
|
98
|
+
):
|
|
99
|
+
"""Enqueue a detection for background processing"""
|
|
100
|
+
try:
|
|
101
|
+
activity_data = {
|
|
102
|
+
"detection_type": detection["recognition_status"], # known, unknown
|
|
103
|
+
"detection": detection,
|
|
104
|
+
"current_frame": current_frame,
|
|
105
|
+
"location": location,
|
|
106
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
107
|
+
"employee_id": detection.get("employee_id", None),
|
|
108
|
+
"staff_id": detection.get("person_id")
|
|
109
|
+
}
|
|
110
|
+
if detection["recognition_status"] not in ["known", "unknown"]:
|
|
111
|
+
self.logger.warning(
|
|
112
|
+
f"Invalid detection status: {detection['recognition_status']}"
|
|
113
|
+
)
|
|
114
|
+
return
|
|
115
|
+
if not detection.get("employee_id", None):
|
|
116
|
+
self.logger.warning(
|
|
117
|
+
f"No employee_id found for detection: {detection}"
|
|
118
|
+
)
|
|
119
|
+
return
|
|
120
|
+
if not detection.get("person_id", None):
|
|
121
|
+
self.logger.warning(
|
|
122
|
+
f"No person_id found for detection: {detection}"
|
|
123
|
+
)
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
bbox = detection.get("bounding_box", {})
|
|
127
|
+
bbox_list = [
|
|
128
|
+
bbox.get("xmin", 0),
|
|
129
|
+
bbox.get("ymin", 0),
|
|
130
|
+
bbox.get("xmax", 0),
|
|
131
|
+
bbox.get("ymax", 0),
|
|
132
|
+
]
|
|
133
|
+
activity_data["bbox"] = bbox_list
|
|
134
|
+
# Update last detection time
|
|
135
|
+
self.last_detection_time = time.time()
|
|
136
|
+
self.empty_detection_logged = False
|
|
137
|
+
|
|
138
|
+
# Use thread-safe put (no await needed for queue.Queue)
|
|
139
|
+
self.activity_queue.put(activity_data)
|
|
140
|
+
except Exception as e:
|
|
141
|
+
self.logger.error(f"Error enqueueing detection: {e}", exc_info=True)
|
|
142
|
+
|
|
143
|
+
def _should_log_detection(self, employee_id: str) -> bool:
|
|
144
|
+
"""
|
|
145
|
+
Check if detection should be logged based on employee ID and time threshold.
|
|
146
|
+
Only log if employee_id was not detected in the past 10 seconds.
|
|
147
|
+
|
|
148
|
+
TODO: Make this use track_id or similarity check instead of just employee_id in 10 secs window
|
|
149
|
+
for better deduplication across different detection sessions.
|
|
150
|
+
"""
|
|
151
|
+
current_time = time.time()
|
|
152
|
+
|
|
153
|
+
# Clean up old entries (older than threshold)
|
|
154
|
+
expired_keys = [
|
|
155
|
+
emp_id for emp_id, timestamp in self.recent_employee_detections.items()
|
|
156
|
+
if current_time - timestamp > self.employee_detection_threshold
|
|
157
|
+
]
|
|
158
|
+
for emp_id in expired_keys:
|
|
159
|
+
del self.recent_employee_detections[emp_id]
|
|
160
|
+
|
|
161
|
+
# Check if employee was recently detected
|
|
162
|
+
if employee_id in self.recent_employee_detections:
|
|
163
|
+
last_detection = self.recent_employee_detections[employee_id]
|
|
164
|
+
if current_time - last_detection < self.employee_detection_threshold:
|
|
165
|
+
self.logger.debug(f"Skipping logging for employee {employee_id} - detected {current_time - last_detection:.1f}s ago")
|
|
166
|
+
return False
|
|
167
|
+
|
|
168
|
+
# Update detection time for this employee
|
|
169
|
+
self.recent_employee_detections[employee_id] = current_time
|
|
170
|
+
return True
|
|
171
|
+
|
|
172
|
+
async def _process_activity(self, activity_data: Dict):
|
|
173
|
+
"""Process activity data - handle all face detections with embedded image data"""
|
|
174
|
+
detection_type = activity_data["detection_type"]
|
|
175
|
+
current_frame = activity_data["current_frame"]
|
|
176
|
+
bbox = activity_data["bbox"]
|
|
177
|
+
employee_id = activity_data["employee_id"]
|
|
178
|
+
location = activity_data["location"]
|
|
179
|
+
staff_id = activity_data["staff_id"]
|
|
180
|
+
timestamp = activity_data["timestamp"]
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
if not self.face_client:
|
|
184
|
+
self.logger.warning("Face client not available for activity logging")
|
|
185
|
+
return
|
|
186
|
+
|
|
187
|
+
# Check if we should log this detection (avoid duplicates within time window)
|
|
188
|
+
if not self._should_log_detection(employee_id):
|
|
189
|
+
self.logger.debug(f"Skipping activity log for employee_id={employee_id} (within cooldown period)")
|
|
190
|
+
return None
|
|
191
|
+
|
|
192
|
+
# Encode frame as base64 JPEG
|
|
193
|
+
image_data = None
|
|
194
|
+
if current_frame is not None:
|
|
195
|
+
try:
|
|
196
|
+
self.logger.debug(f"Encoding frame as base64 JPEG - employee_id={employee_id}")
|
|
197
|
+
_, buffer = cv2.imencode(".jpg", current_frame)
|
|
198
|
+
frame_bytes = buffer.tobytes()
|
|
199
|
+
image_data = base64.b64encode(frame_bytes).decode('utf-8')
|
|
200
|
+
self.logger.debug(f"Encoded image data - employee_id={employee_id}, size={len(frame_bytes)} bytes")
|
|
201
|
+
except Exception as e:
|
|
202
|
+
self.logger.error(f"Error encoding frame for employee_id={employee_id}: {e}", exc_info=True)
|
|
203
|
+
|
|
204
|
+
# Store activity data with embedded image
|
|
205
|
+
self.logger.info(f"Processing activity log - type={detection_type}, employee_id={employee_id}, staff_id={staff_id}, location={location}")
|
|
206
|
+
response = await self.face_client.store_people_activity(
|
|
207
|
+
staff_id=staff_id,
|
|
208
|
+
detection_type=detection_type,
|
|
209
|
+
bbox=bbox,
|
|
210
|
+
location=location,
|
|
211
|
+
employee_id=employee_id,
|
|
212
|
+
timestamp=timestamp,
|
|
213
|
+
image_data=image_data,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
if response and response.get("success", False):
|
|
217
|
+
self.logger.info(f"Activity log stored successfully for employee_id={employee_id}")
|
|
218
|
+
else:
|
|
219
|
+
error_msg = response.get("error", "Unknown error") if response else "No response"
|
|
220
|
+
self.logger.warning(f"Failed to store activity log for employee_id={employee_id} - {error_msg}")
|
|
221
|
+
|
|
222
|
+
return response
|
|
223
|
+
except Exception as e:
|
|
224
|
+
self.logger.error(f"Error processing activity log for employee_id={employee_id}: {e}", exc_info=True)
|
|
225
|
+
|
|
226
|
+
# async def _upload_frame_to_url(self, current_frame: np.ndarray, upload_url: str, employee_id: str):
|
|
227
|
+
# try:
|
|
228
|
+
# self.logger.debug(f"Encoding frame for upload - employee_id={employee_id}")
|
|
229
|
+
# _, buffer = cv2.imencode(".jpg", current_frame)
|
|
230
|
+
# frame_bytes = buffer.tobytes()
|
|
231
|
+
|
|
232
|
+
# self.logger.info(f"Uploading frame to storage - employee_id={employee_id}, size={len(frame_bytes)} bytes")
|
|
233
|
+
# upload_success = await self.face_client.upload_image_to_url(
|
|
234
|
+
# frame_bytes, upload_url
|
|
235
|
+
# )
|
|
236
|
+
|
|
237
|
+
# if upload_success:
|
|
238
|
+
# self.logger.info(f"Frame uploaded successfully for employee_id={employee_id}")
|
|
239
|
+
# else:
|
|
240
|
+
# self.logger.warning(f"Failed to upload frame for employee_id={employee_id}")
|
|
241
|
+
# except Exception as e:
|
|
242
|
+
# self.logger.error(f"Error uploading frame for employee_id={employee_id}: {e}", exc_info=True)
|
|
243
|
+
|
|
244
|
+
async def _upload_frame(self, current_frame: np.ndarray, upload_url: str, employee_id: str):
|
|
245
|
+
try:
|
|
246
|
+
self.logger.debug(f"Encoding frame for upload - employee_id={employee_id}")
|
|
247
|
+
_, buffer = cv2.imencode(".jpg", current_frame)
|
|
248
|
+
frame_bytes = buffer.tobytes()
|
|
249
|
+
|
|
250
|
+
self.logger.info(f"Uploading frame to storage - employee_id={employee_id}, size={len(frame_bytes)} bytes")
|
|
251
|
+
upload_success = await self.face_client.upload_image_to_url(
|
|
252
|
+
frame_bytes, upload_url
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
if upload_success:
|
|
256
|
+
self.logger.info(f"Frame uploaded successfully for employee_id={employee_id}")
|
|
257
|
+
else:
|
|
258
|
+
self.logger.warning(f"Failed to upload frame for employee_id={employee_id}")
|
|
259
|
+
except Exception as e:
|
|
260
|
+
self.logger.error(f"Error uploading frame for employee_id={employee_id}: {e}", exc_info=True)
|
|
261
|
+
|
|
262
|
+
async def _should_log_activity(self, activity_data: Dict) -> bool:
|
|
263
|
+
"""Check if activity should be logged"""
|
|
264
|
+
detection_type = activity_data["detection_type"]
|
|
265
|
+
if detection_type == "known":
|
|
266
|
+
return True
|
|
267
|
+
return False
|
|
268
|
+
|
|
269
|
+
def _crop_face_from_frame(self, frame: np.ndarray, bounding_box: Dict) -> bytes:
|
|
270
|
+
"""
|
|
271
|
+
Crop face from frame using bounding box and return as bytes
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
frame: Original frame as numpy array
|
|
275
|
+
bounding_box: Dict with x1, y1, x2, y2 coordinates
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
bytes: Cropped face image as JPEG bytes
|
|
279
|
+
"""
|
|
280
|
+
try:
|
|
281
|
+
# Extract coordinates - handle different bounding box formats
|
|
282
|
+
x1 = int(bounding_box.get("xmin", bounding_box.get("x1", 0)))
|
|
283
|
+
y1 = int(bounding_box.get("ymin", bounding_box.get("y1", 0)))
|
|
284
|
+
x2 = int(bounding_box.get("xmax", bounding_box.get("x2", 0)))
|
|
285
|
+
y2 = int(bounding_box.get("ymax", bounding_box.get("y2", 0)))
|
|
286
|
+
|
|
287
|
+
# Ensure coordinates are within frame bounds
|
|
288
|
+
h, w = frame.shape[:2]
|
|
289
|
+
x1, y1 = max(0, x1), max(0, y1)
|
|
290
|
+
x2, y2 = min(w, x2), min(h, y2)
|
|
291
|
+
|
|
292
|
+
# Validate coordinates
|
|
293
|
+
if x2 <= x1 or y2 <= y1:
|
|
294
|
+
self.logger.warning("Invalid bounding box coordinates")
|
|
295
|
+
return b""
|
|
296
|
+
|
|
297
|
+
# Crop the face
|
|
298
|
+
cropped_face = frame[y1:y2, x1:x2]
|
|
299
|
+
|
|
300
|
+
# Convert to JPEG bytes
|
|
301
|
+
_, buffer = cv2.imencode(".jpg", cropped_face)
|
|
302
|
+
return buffer.tobytes()
|
|
303
|
+
|
|
304
|
+
except Exception as e:
|
|
305
|
+
self.logger.error(f"Error cropping face from frame: {e}", exc_info=True)
|
|
306
|
+
return b""
|
|
307
|
+
|
|
308
|
+
def get_unknown_faces_storage(self) -> Dict[str, bytes]:
|
|
309
|
+
"""Get stored unknown face images as bytes"""
|
|
310
|
+
return self.unknown_faces_storage.copy()
|
|
311
|
+
|
|
312
|
+
def clear_unknown_faces_storage(self) -> None:
|
|
313
|
+
"""Clear stored unknown face images"""
|
|
314
|
+
self.unknown_faces_storage.clear()
|
|
315
|
+
|
|
316
|
+
def __del__(self):
|
|
317
|
+
"""Cleanup when object is destroyed"""
|
|
318
|
+
try:
|
|
319
|
+
self.stop_background_processing()
|
|
320
|
+
except:
|
|
321
|
+
pass
|
|
File without changes
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
import easyocr
|
|
2
|
+
import numpy as np
|
|
3
|
+
import torch
|
|
4
|
+
from matrice_common.utils import log_errors
|
|
5
|
+
|
|
6
|
+
class EasyOCRExtractor:
|
|
7
|
+
def __init__(self, lang=['en', 'hi', 'ar'], gpu=False, model_storage_directory=None,
|
|
8
|
+
download_enabled=True, detector=True, recognizer=True, verbose=False):
|
|
9
|
+
"""
|
|
10
|
+
Initializes the EasyOCR text extractor with optimized parameters.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
lang (str or list): Language(s) to be used by EasyOCR. Default is ['en', 'hi', 'ar'].
|
|
14
|
+
gpu (bool): Enable GPU acceleration if available. Default is True.
|
|
15
|
+
model_storage_directory (str): Custom path to store models. Default is None.
|
|
16
|
+
download_enabled (bool): Allow downloading models if not found. Default is True.
|
|
17
|
+
detector (bool): Load text detection model. Default is True.
|
|
18
|
+
recognizer (bool): Load text recognition model. Default is True.
|
|
19
|
+
verbose (bool): Enable verbose output (e.g., progress bars). Default is False.
|
|
20
|
+
"""
|
|
21
|
+
self.lang = lang
|
|
22
|
+
self.gpu = gpu
|
|
23
|
+
# Check if GPU is available
|
|
24
|
+
if torch.cuda.is_available():
|
|
25
|
+
self.gpu = True
|
|
26
|
+
else:
|
|
27
|
+
self.gpu = False
|
|
28
|
+
self.model_storage_directory = model_storage_directory
|
|
29
|
+
self.download_enabled = download_enabled
|
|
30
|
+
self.detector = detector
|
|
31
|
+
self.recognizer = recognizer
|
|
32
|
+
self.verbose = verbose
|
|
33
|
+
self.reader = None
|
|
34
|
+
|
|
35
|
+
@log_errors(default_return=None, raise_exception=True, service_name="py_analytics", log_error=True)
|
|
36
|
+
def setup(self):
|
|
37
|
+
"""
|
|
38
|
+
Initializes the EasyOCR reader if not already initialized.
|
|
39
|
+
"""
|
|
40
|
+
if self.reader is None:
|
|
41
|
+
lang_list = [self.lang] if isinstance(self.lang, str) else self.lang
|
|
42
|
+
self.reader = easyocr.Reader(
|
|
43
|
+
lang_list=lang_list,
|
|
44
|
+
gpu=self.gpu,
|
|
45
|
+
model_storage_directory=self.model_storage_directory,
|
|
46
|
+
download_enabled=self.download_enabled,
|
|
47
|
+
detector=self.detector,
|
|
48
|
+
recognizer=self.recognizer,
|
|
49
|
+
verbose=self.verbose
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
def extract(self, image_np, bboxes=None, detail=1, paragraph=False,
|
|
53
|
+
decoder='greedy', beam_width=5, batch_size=1, workers=0,
|
|
54
|
+
allowlist=None, blocklist=None, min_size=10, rotation_info=None,
|
|
55
|
+
contrast_ths=0.1, adjust_contrast=0.5, text_threshold=0.7,
|
|
56
|
+
low_text=0.4, link_threshold=0.4, canvas_size=2560, mag_ratio=1.0,
|
|
57
|
+
slope_ths=0.1, ycenter_ths=0.5, height_ths=0.5, width_ths=0.5,
|
|
58
|
+
add_margin=0.1):
|
|
59
|
+
"""
|
|
60
|
+
Extracts text from the given image or specific regions within the bounding boxes
|
|
61
|
+
with configurable parameters for optimal performance.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
image_np (np.ndarray): Input image as a numpy array.
|
|
65
|
+
bboxes (list): List of bounding boxes. Each box is a list of [xmin, ymin, xmax, ymax].
|
|
66
|
+
If None, OCR is performed on the entire image.
|
|
67
|
+
detail (int): Set to 0 for simple output, 1 for detailed output.
|
|
68
|
+
paragraph (bool): Combine results into paragraphs.
|
|
69
|
+
decoder (str): Decoding method ('greedy', 'beamsearch', 'wordbeamsearch').
|
|
70
|
+
beam_width (int): How many beams to keep when using beam search decoders.
|
|
71
|
+
batch_size (int): Number of images to process in a batch.
|
|
72
|
+
workers (int): Number of worker threads for data loading.
|
|
73
|
+
allowlist (str): Force recognition of only specific characters.
|
|
74
|
+
blocklist (str): Block specific characters from recognition.
|
|
75
|
+
min_size (int): Filter text boxes smaller than this pixel size.
|
|
76
|
+
rotation_info (list): List of rotation angles to try (e.g., [90, 180, 270]).
|
|
77
|
+
contrast_ths (float): Threshold for contrast adjustment.
|
|
78
|
+
adjust_contrast (float): Target contrast level for low-contrast text.
|
|
79
|
+
text_threshold (float): Text confidence threshold.
|
|
80
|
+
low_text (float): Text low-bound score.
|
|
81
|
+
link_threshold (float): Link confidence threshold.
|
|
82
|
+
canvas_size (int): Maximum image size before resizing.
|
|
83
|
+
mag_ratio (float): Image magnification ratio.
|
|
84
|
+
slope_ths (float): Maximum slope for merging boxes.
|
|
85
|
+
ycenter_ths (float): Maximum y-center shift for merging boxes.
|
|
86
|
+
height_ths (float): Maximum height difference for merging boxes.
|
|
87
|
+
width_ths (float): Maximum width for horizontal merging.
|
|
88
|
+
add_margin (float): Margin to add around text boxes.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
list: OCR results containing text, confidence, and bounding boxes.
|
|
92
|
+
"""
|
|
93
|
+
# Make sure the reader is initialized
|
|
94
|
+
self.setup()
|
|
95
|
+
|
|
96
|
+
ocr_results = []
|
|
97
|
+
|
|
98
|
+
# Dictionary of readtext parameters
|
|
99
|
+
readtext_params = {
|
|
100
|
+
'decoder': decoder,
|
|
101
|
+
'beamWidth': beam_width,
|
|
102
|
+
'batch_size': batch_size,
|
|
103
|
+
'workers': workers,
|
|
104
|
+
'allowlist': allowlist,
|
|
105
|
+
'blocklist': blocklist,
|
|
106
|
+
'detail': detail,
|
|
107
|
+
'paragraph': paragraph,
|
|
108
|
+
'min_size': min_size,
|
|
109
|
+
'rotation_info': rotation_info,
|
|
110
|
+
'contrast_ths': contrast_ths,
|
|
111
|
+
'adjust_contrast': adjust_contrast,
|
|
112
|
+
'text_threshold': text_threshold,
|
|
113
|
+
'low_text': low_text,
|
|
114
|
+
'link_threshold': link_threshold,
|
|
115
|
+
'canvas_size': canvas_size,
|
|
116
|
+
'mag_ratio': mag_ratio,
|
|
117
|
+
'slope_ths': slope_ths,
|
|
118
|
+
'ycenter_ths': ycenter_ths,
|
|
119
|
+
'height_ths': height_ths,
|
|
120
|
+
'width_ths': width_ths,
|
|
121
|
+
'add_margin': add_margin
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
# If no bounding boxes, perform OCR on the entire image
|
|
125
|
+
if bboxes is None:
|
|
126
|
+
text_data = self.reader.readtext(image_np, **readtext_params)
|
|
127
|
+
if detail == 0:
|
|
128
|
+
return text_data # Simple output format for detail=0
|
|
129
|
+
|
|
130
|
+
for bbox, text, conf in text_data:
|
|
131
|
+
ocr_results.append({
|
|
132
|
+
"text": text,
|
|
133
|
+
"confidence": conf,
|
|
134
|
+
"bounding_box": bbox
|
|
135
|
+
})
|
|
136
|
+
else:
|
|
137
|
+
# Perform OCR on each bounding box
|
|
138
|
+
for box in bboxes:
|
|
139
|
+
xmin, ymin, xmax, ymax = map(int, box)
|
|
140
|
+
cropped_img = image_np[ymin:ymax, xmin:xmax]
|
|
141
|
+
|
|
142
|
+
# Skip empty crops
|
|
143
|
+
if cropped_img.size == 0 or cropped_img.shape[0] == 0 or cropped_img.shape[1] == 0:
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
text_data = self.reader.readtext(cropped_img, **readtext_params)
|
|
147
|
+
|
|
148
|
+
if detail == 0:
|
|
149
|
+
# Adjust coordinates for the cropped region
|
|
150
|
+
adjusted_data = []
|
|
151
|
+
for result in text_data:
|
|
152
|
+
if isinstance(result, tuple) and len(result) >= 1:
|
|
153
|
+
# Adjust coordinates based on crop position
|
|
154
|
+
adjusted_bbox = [[pt[0] + xmin, pt[1] + ymin] for pt in result[0]]
|
|
155
|
+
if len(result) == 3: # (bbox, text, confidence)
|
|
156
|
+
adjusted_data.append((adjusted_bbox, result[1], result[2]))
|
|
157
|
+
elif len(result) == 2: # (bbox, text)
|
|
158
|
+
adjusted_data.append((adjusted_bbox, result[1]))
|
|
159
|
+
ocr_results.extend(adjusted_data)
|
|
160
|
+
else:
|
|
161
|
+
for bbox, text, conf in text_data:
|
|
162
|
+
# Adjust bounding box coordinates relative to the original image
|
|
163
|
+
adjusted_bbox = [
|
|
164
|
+
[pt[0] + xmin, pt[1] + ymin] for pt in bbox
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
ocr_results.append({
|
|
168
|
+
"text": text,
|
|
169
|
+
"confidence": conf,
|
|
170
|
+
"bounding_box": adjusted_bbox
|
|
171
|
+
})
|
|
172
|
+
|
|
173
|
+
return ocr_results
|
|
174
|
+
|
|
175
|
+
def detect_text_regions(self, image_np, min_size=10, text_threshold=0.7,
|
|
176
|
+
low_text=0.4, link_threshold=0.4, canvas_size=2560,
|
|
177
|
+
mag_ratio=1.0, slope_ths=0.1, ycenter_ths=0.5,
|
|
178
|
+
height_ths=0.5, width_ths=0.5, add_margin=0.1,
|
|
179
|
+
optimal_num_chars=None):
|
|
180
|
+
"""
|
|
181
|
+
Detects text regions in the image without performing recognition.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
image_np (np.ndarray): Input image as a numpy array.
|
|
185
|
+
min_size (int): Filter text boxes smaller than this pixel size.
|
|
186
|
+
text_threshold (float): Text confidence threshold.
|
|
187
|
+
low_text (float): Text low-bound score.
|
|
188
|
+
link_threshold (float): Link confidence threshold.
|
|
189
|
+
canvas_size (int): Maximum image size before resizing.
|
|
190
|
+
mag_ratio (float): Image magnification ratio.
|
|
191
|
+
slope_ths (float): Maximum slope for merging boxes.
|
|
192
|
+
ycenter_ths (float): Maximum y-center shift for merging boxes.
|
|
193
|
+
height_ths (float): Maximum height difference for merging boxes.
|
|
194
|
+
width_ths (float): Maximum width for horizontal merging.
|
|
195
|
+
add_margin (float): Margin to add around text boxes.
|
|
196
|
+
optimal_num_chars (int): Prioritize boxes with this estimated character count.
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
tuple: (horizontal_list, free_list) containing text regions
|
|
200
|
+
"""
|
|
201
|
+
self.setup()
|
|
202
|
+
return self.reader.detect(
|
|
203
|
+
image_np,
|
|
204
|
+
min_size=min_size,
|
|
205
|
+
text_threshold=text_threshold,
|
|
206
|
+
low_text=low_text,
|
|
207
|
+
link_threshold=link_threshold,
|
|
208
|
+
canvas_size=canvas_size,
|
|
209
|
+
mag_ratio=mag_ratio,
|
|
210
|
+
slope_ths=slope_ths,
|
|
211
|
+
ycenter_ths=ycenter_ths,
|
|
212
|
+
height_ths=height_ths,
|
|
213
|
+
width_ths=width_ths,
|
|
214
|
+
add_margin=add_margin,
|
|
215
|
+
optimal_num_chars=optimal_num_chars
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
def recognize_from_regions(self, image_np, horizontal_list=None, free_list=None,
|
|
219
|
+
decoder='greedy', beam_width=5, batch_size=1,
|
|
220
|
+
workers=0, allowlist=None, blocklist=None,
|
|
221
|
+
detail=1, paragraph=False, contrast_ths=0.1,
|
|
222
|
+
adjust_contrast=0.5):
|
|
223
|
+
"""
|
|
224
|
+
Recognizes text from previously detected regions.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
image_np (np.ndarray): Input image as a numpy array.
|
|
228
|
+
horizontal_list (list): List of rectangular regions [x_min, x_max, y_min, y_max].
|
|
229
|
+
free_list (list): List of free-form regions [[x1,y1],[x2,y2],[x3,y3],[x4,y4]].
|
|
230
|
+
Other parameters: Same as extract method.
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
list: OCR results for the specified regions
|
|
234
|
+
"""
|
|
235
|
+
self.setup()
|
|
236
|
+
return self.reader.recognize(
|
|
237
|
+
image_np,
|
|
238
|
+
horizontal_list=horizontal_list,
|
|
239
|
+
free_list=free_list,
|
|
240
|
+
decoder=decoder,
|
|
241
|
+
beamWidth=beam_width,
|
|
242
|
+
batch_size=batch_size,
|
|
243
|
+
workers=workers,
|
|
244
|
+
allowlist=allowlist,
|
|
245
|
+
blocklist=blocklist,
|
|
246
|
+
detail=detail,
|
|
247
|
+
paragraph=paragraph,
|
|
248
|
+
contrast_ths=contrast_ths,
|
|
249
|
+
adjust_contrast=adjust_contrast
|
|
250
|
+
)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main CLI used when training a FastPlateOCR model.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import click
|
|
9
|
+
|
|
10
|
+
from fast_plate_ocr.cli.dataset_stats import dataset_stats
|
|
11
|
+
from fast_plate_ocr.cli.export import export
|
|
12
|
+
from fast_plate_ocr.cli.train import train
|
|
13
|
+
from fast_plate_ocr.cli.valid import valid
|
|
14
|
+
from fast_plate_ocr.cli.validate_dataset import validate_dataset
|
|
15
|
+
from fast_plate_ocr.cli.visualize_augmentation import visualize_augmentation
|
|
16
|
+
from fast_plate_ocr.cli.visualize_predictions import visualize_predictions
|
|
17
|
+
|
|
18
|
+
except ImportError as e:
|
|
19
|
+
raise ImportError("Make sure to 'pip install fast-plate-ocr[train]' to run this!") from e
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@click.group(context_settings={"max_content_width": 120})
|
|
23
|
+
def main_cli():
|
|
24
|
+
"""FastPlateOCR CLI."""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
main_cli.add_command(dataset_stats)
|
|
28
|
+
main_cli.add_command(export)
|
|
29
|
+
main_cli.add_command(train)
|
|
30
|
+
main_cli.add_command(valid)
|
|
31
|
+
main_cli.add_command(validate_dataset)
|
|
32
|
+
main_cli.add_command(visualize_augmentation)
|
|
33
|
+
main_cli.add_command(visualize_predictions)
|