nedo-vision-worker-core 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

Files changed (95) hide show
  1. nedo_vision_worker_core/__init__.py +23 -0
  2. nedo_vision_worker_core/ai/FrameDrawer.py +144 -0
  3. nedo_vision_worker_core/ai/ImageDebugger.py +126 -0
  4. nedo_vision_worker_core/ai/VideoDebugger.py +69 -0
  5. nedo_vision_worker_core/ai/__init__.py +1 -0
  6. nedo_vision_worker_core/cli.py +197 -0
  7. nedo_vision_worker_core/config/ConfigurationManager.py +173 -0
  8. nedo_vision_worker_core/config/__init__.py +1 -0
  9. nedo_vision_worker_core/core_service.py +237 -0
  10. nedo_vision_worker_core/database/DatabaseManager.py +236 -0
  11. nedo_vision_worker_core/database/__init__.py +1 -0
  12. nedo_vision_worker_core/detection/BaseDetector.py +22 -0
  13. nedo_vision_worker_core/detection/DetectionManager.py +83 -0
  14. nedo_vision_worker_core/detection/RFDETRDetector.py +62 -0
  15. nedo_vision_worker_core/detection/YOLODetector.py +57 -0
  16. nedo_vision_worker_core/detection/__init__.py +1 -0
  17. nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +29 -0
  18. nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +47 -0
  19. nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +44 -0
  20. nedo_vision_worker_core/detection/detection_processing/__init__.py +1 -0
  21. nedo_vision_worker_core/doctor.py +342 -0
  22. nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
  23. nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
  24. nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
  25. nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
  26. nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
  27. nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
  28. nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
  29. nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
  30. nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
  31. nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
  32. nedo_vision_worker_core/icons/boots-green.png +0 -0
  33. nedo_vision_worker_core/icons/boots-red.png +0 -0
  34. nedo_vision_worker_core/icons/gloves-green.png +0 -0
  35. nedo_vision_worker_core/icons/gloves-red.png +0 -0
  36. nedo_vision_worker_core/icons/goggles-green.png +0 -0
  37. nedo_vision_worker_core/icons/goggles-red.png +0 -0
  38. nedo_vision_worker_core/icons/helmet-green.png +0 -0
  39. nedo_vision_worker_core/icons/helmet-red.png +0 -0
  40. nedo_vision_worker_core/icons/mask-red.png +0 -0
  41. nedo_vision_worker_core/icons/vest-green.png +0 -0
  42. nedo_vision_worker_core/icons/vest-red.png +0 -0
  43. nedo_vision_worker_core/models/__init__.py +20 -0
  44. nedo_vision_worker_core/models/ai_model.py +41 -0
  45. nedo_vision_worker_core/models/auth.py +14 -0
  46. nedo_vision_worker_core/models/config.py +9 -0
  47. nedo_vision_worker_core/models/dataset_source.py +30 -0
  48. nedo_vision_worker_core/models/logs.py +9 -0
  49. nedo_vision_worker_core/models/ppe_detection.py +39 -0
  50. nedo_vision_worker_core/models/ppe_detection_label.py +20 -0
  51. nedo_vision_worker_core/models/restricted_area_violation.py +20 -0
  52. nedo_vision_worker_core/models/user.py +10 -0
  53. nedo_vision_worker_core/models/worker_source.py +19 -0
  54. nedo_vision_worker_core/models/worker_source_pipeline.py +21 -0
  55. nedo_vision_worker_core/models/worker_source_pipeline_config.py +24 -0
  56. nedo_vision_worker_core/models/worker_source_pipeline_debug.py +15 -0
  57. nedo_vision_worker_core/models/worker_source_pipeline_detection.py +14 -0
  58. nedo_vision_worker_core/pipeline/PipelineConfigManager.py +32 -0
  59. nedo_vision_worker_core/pipeline/PipelineManager.py +133 -0
  60. nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +40 -0
  61. nedo_vision_worker_core/pipeline/PipelineProcessor.py +338 -0
  62. nedo_vision_worker_core/pipeline/PipelineSyncThread.py +202 -0
  63. nedo_vision_worker_core/pipeline/__init__.py +1 -0
  64. nedo_vision_worker_core/preprocessing/ImageResizer.py +42 -0
  65. nedo_vision_worker_core/preprocessing/ImageRoi.py +61 -0
  66. nedo_vision_worker_core/preprocessing/Preprocessor.py +16 -0
  67. nedo_vision_worker_core/preprocessing/__init__.py +1 -0
  68. nedo_vision_worker_core/repositories/AIModelRepository.py +31 -0
  69. nedo_vision_worker_core/repositories/PPEDetectionRepository.py +146 -0
  70. nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +90 -0
  71. nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +81 -0
  72. nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +71 -0
  73. nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +79 -0
  74. nedo_vision_worker_core/repositories/WorkerSourceRepository.py +19 -0
  75. nedo_vision_worker_core/repositories/__init__.py +1 -0
  76. nedo_vision_worker_core/streams/RTMPStreamer.py +146 -0
  77. nedo_vision_worker_core/streams/StreamSyncThread.py +66 -0
  78. nedo_vision_worker_core/streams/VideoStream.py +324 -0
  79. nedo_vision_worker_core/streams/VideoStreamManager.py +121 -0
  80. nedo_vision_worker_core/streams/__init__.py +1 -0
  81. nedo_vision_worker_core/tracker/SFSORT.py +325 -0
  82. nedo_vision_worker_core/tracker/TrackerManager.py +163 -0
  83. nedo_vision_worker_core/tracker/__init__.py +1 -0
  84. nedo_vision_worker_core/util/BoundingBoxMetrics.py +53 -0
  85. nedo_vision_worker_core/util/DrawingUtils.py +354 -0
  86. nedo_vision_worker_core/util/ModelReadinessChecker.py +188 -0
  87. nedo_vision_worker_core/util/PersonAttributeMatcher.py +70 -0
  88. nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +45 -0
  89. nedo_vision_worker_core/util/TablePrinter.py +28 -0
  90. nedo_vision_worker_core/util/__init__.py +1 -0
  91. nedo_vision_worker_core-0.2.0.dist-info/METADATA +347 -0
  92. nedo_vision_worker_core-0.2.0.dist-info/RECORD +95 -0
  93. nedo_vision_worker_core-0.2.0.dist-info/WHEEL +5 -0
  94. nedo_vision_worker_core-0.2.0.dist-info/entry_points.txt +2 -0
  95. nedo_vision_worker_core-0.2.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,354 @@
1
+ import math
2
+ import cv2
3
+ import numpy as np
4
+ import os
5
+
6
+ class DrawingUtils:
7
+ _color_map = {
8
+ True: "blue",
9
+ False: "red",
10
+ None: "blue"
11
+ }
12
+
13
+
14
+ _corner_images_by_color = {}
15
+ _line_images_by_color = {}
16
+ _inner_frame_image_by_color = {}
17
+ _corner_half_by_color = {}
18
+ _line_half_by_color = {}
19
+ _line_size_by_color = {}
20
+ _corner_quarter_by_color = {}
21
+
22
+ _asset_cache = {}
23
+ _base_height = 720
24
+ _scale_weight = 1.3
25
+ _max_scale = 3.0
26
+
27
+ @staticmethod
28
+ def initialize(assets_path: str):
29
+ for color_flag, color_name in DrawingUtils._color_map.items():
30
+ top_left = cv2.imread(os.path.join(assets_path, color_name, "top_left.png"), cv2.IMREAD_UNCHANGED)
31
+ top_right = cv2.imread(os.path.join(assets_path, color_name, "top_right.png"), cv2.IMREAD_UNCHANGED)
32
+
33
+ DrawingUtils._corner_images_by_color[color_flag] = {
34
+ 'top_left': top_left,
35
+ 'top_right': top_right,
36
+ 'bottom_left': cv2.flip(top_right, -1),
37
+ 'bottom_right': cv2.flip(top_left, -1)
38
+ }
39
+
40
+ line = cv2.imread(os.path.join(assets_path, color_name, "line.png"), cv2.IMREAD_UNCHANGED)
41
+ DrawingUtils._line_images_by_color[color_flag] = {
42
+ 'vertical': line,
43
+ 'horizontal': cv2.rotate(line, cv2.ROTATE_90_CLOCKWISE)
44
+ }
45
+
46
+ inner_frame = cv2.imread(os.path.join(assets_path, color_name, "inner_frame.png"), cv2.IMREAD_UNCHANGED)
47
+ DrawingUtils._inner_frame_image_by_color[color_flag] = inner_frame
48
+
49
+ # Initialize cache for the base resolution
50
+ DrawingUtils._prepare_scaled_assets(DrawingUtils._base_height, color_flag)
51
+
52
+ @staticmethod
53
+ def _get_scale_factor(height):
54
+ if height <= DrawingUtils._base_height:
55
+ return height / DrawingUtils._base_height * DrawingUtils._scale_weight
56
+
57
+ base_scale = DrawingUtils._scale_weight
58
+ height_ratio = height / DrawingUtils._base_height
59
+ additional_scale = (math.sqrt(height_ratio) - 1) * 0.5
60
+
61
+ return min(base_scale + additional_scale, DrawingUtils._max_scale)
62
+
63
+ @staticmethod
64
+ def _prepare_scaled_assets(frame_height, color_flag):
65
+ """Prepare and cache scaled assets for a specific frame height"""
66
+ scale_factor = DrawingUtils._get_scale_factor(frame_height)
67
+ key = DrawingUtils._color_map.get(color_flag)
68
+
69
+ if (frame_height, key) in DrawingUtils._asset_cache:
70
+ return DrawingUtils._asset_cache[(frame_height, key)]
71
+
72
+ corner_size = int(80 * scale_factor)
73
+
74
+ scaled_corners = {}
75
+ for position, img in DrawingUtils._corner_images_by_color[color_flag].items():
76
+ scaled_corners[position] = cv2.resize(img, (corner_size, corner_size))
77
+
78
+ line_size = int(15 * scale_factor)
79
+
80
+ scaled_lines = {}
81
+ for direction, img in DrawingUtils._line_images_by_color[color_flag].items():
82
+ scaled_lines[direction] = cv2.resize(img, (line_size, line_size))
83
+
84
+ corner_half = corner_size // 2
85
+ corner_quarter = corner_size // 4
86
+ line_half = line_size // 2
87
+
88
+ cache_entry = {
89
+ 'corners': scaled_corners,
90
+ 'lines': scaled_lines,
91
+ 'line_size': line_size,
92
+ 'line_half': line_half,
93
+ 'corner_half': corner_half,
94
+ 'corner_quarter': corner_quarter,
95
+ 'scale_factor': scale_factor
96
+ }
97
+
98
+ DrawingUtils._asset_cache[(frame_height, key)] = cache_entry
99
+ return cache_entry
100
+
101
+ @staticmethod
102
+ def draw_alpha_overlay(dest, src, x, y):
103
+ if src is None or dest is None:
104
+ return
105
+
106
+ # Get source dimensions
107
+ src_h, src_w = src.shape[:2]
108
+
109
+ # Calculate safe region bounds
110
+ dest_h, dest_w = dest.shape[:2]
111
+ y_start = max(y, 0)
112
+ x_start = max(x, 0)
113
+ y_end = min(y + src_h, dest_h)
114
+ x_end = min(x + src_w, dest_w)
115
+
116
+ # Calculate source crop coordinates
117
+ crop_y1 = max(-y, 0)
118
+ crop_x1 = max(-x, 0)
119
+ crop_y2 = src_h - max((y + src_h) - dest_h, 0)
120
+ crop_x2 = src_w - max((x + src_w) - dest_w, 0)
121
+
122
+ # Check if there's any valid area to draw
123
+ if crop_y2 <= crop_y1 or crop_x2 <= crop_x1:
124
+ return
125
+
126
+ # Crop source image to valid region
127
+ src_cropped = src[crop_y1:crop_y2, crop_x1:crop_x2]
128
+ roi = dest[y_start:y_end, x_start:x_end]
129
+
130
+ if src_cropped.shape[2] == 4:
131
+ # Split source into color and alpha channels
132
+ src_bgr = src_cropped[:, :, :3]
133
+ alpha = src_cropped[:, :, 3:] / 255.0
134
+
135
+ # Blend with ROI using alpha
136
+ if roi.shape[:2] == src_bgr.shape[:2]:
137
+ blended = (src_bgr * alpha) + (roi[:, :, :3] * (1 - alpha))
138
+ roi[:, :, :3] = blended.astype(np.uint8)
139
+ else:
140
+ if roi.shape == src_cropped.shape:
141
+ roi[:] = src_cropped
142
+
143
+ @staticmethod
144
+ def draw_bbox_info(frame, bbox, color_data, title, subtitle, suffix):
145
+ color, flag = color_data
146
+ x1, y1, x2, y2 = map(int, bbox)
147
+
148
+ frame_height = frame.shape[0]
149
+ scale_factor = DrawingUtils._get_scale_factor(frame_height)
150
+
151
+ font = cv2.FONT_HERSHEY_SIMPLEX
152
+ font_scale = 0.25 * scale_factor
153
+ title_scale = 0.4 * scale_factor
154
+
155
+ # Colors
156
+ text_color = (255, 255, 255)
157
+
158
+ # Measure text sizes
159
+ (_, title_h), _ = cv2.getTextSize(title, font, title_scale, 1)
160
+ (_, subtitle_h), _ = cv2.getTextSize(subtitle, font, font_scale, 1)
161
+ (suffix_w, _), _ = cv2.getTextSize(suffix, font, font_scale, 1)
162
+
163
+ padding = int(15 * scale_factor)
164
+ line_spacing = int(7 * scale_factor)
165
+
166
+ subtitle_y = y2 - padding
167
+ title_y = subtitle_y - subtitle_h - line_spacing
168
+ block_height = title_h + subtitle_h + line_spacing + 2 * padding
169
+
170
+ alpha_start = 0.6
171
+ alpha_end = 0
172
+
173
+ for i in range(block_height):
174
+ relative_pos = i / block_height
175
+ alpha = alpha_start + (alpha_end - alpha_start) * (1 - relative_pos)
176
+
177
+ y_pos = y2 - block_height + i
178
+ if y_pos < 0 or y_pos >= frame.shape[0]:
179
+ continue
180
+
181
+ original_row = frame[y_pos, x1:x2].astype(np.float32)
182
+ blended_row = (1 - alpha) * original_row + alpha * np.array(color, dtype=np.float32)
183
+ frame[y_pos, x1:x2] = blended_row.astype(np.uint8)
184
+
185
+ # Draw texts
186
+ cv2.putText(frame, title, (x1 + padding, title_y), font, title_scale, text_color, 2, cv2.LINE_AA)
187
+ cv2.putText(frame, subtitle, (x1 + padding, subtitle_y), font, font_scale, text_color, 1, cv2.LINE_AA)
188
+ cv2.putText(frame, suffix, (x2 - suffix_w - padding, subtitle_y), font, font_scale, text_color, 1, cv2.LINE_AA)
189
+
190
+ @staticmethod
191
+ def draw_corner_line(frame, bbox, color, thickness=1):
192
+ x1, y1, x2, y2 = map(int, bbox)
193
+
194
+ # Scale the corner length based on frame height
195
+ frame_height = frame.shape[0]
196
+ scale_factor = DrawingUtils._get_scale_factor(frame_height)
197
+
198
+ # Scale thickness
199
+ scaled_thickness = max(1, int(thickness * scale_factor))
200
+
201
+ corner_length = min(x2 - x1, y2 - y1) // 6
202
+
203
+ cv2.line(frame, (x1, y1), (x1 + corner_length, y1), color, scaled_thickness)
204
+ cv2.line(frame, (x1, y1), (x1, y1 + corner_length), color, scaled_thickness)
205
+
206
+ cv2.line(frame, (x2, y1), (x2 - corner_length, y1), color, scaled_thickness)
207
+ cv2.line(frame, (x2, y1), (x2, y1 + corner_length), color, scaled_thickness)
208
+
209
+ cv2.line(frame, (x1, y2), (x1 + corner_length, y2), color, scaled_thickness)
210
+ cv2.line(frame, (x1, y2), (x1, y2 - corner_length), color, scaled_thickness)
211
+
212
+ cv2.line(frame, (x2, y2), (x2 - corner_length, y2), color, scaled_thickness)
213
+ cv2.line(frame, (x2, y2), (x2, y2 - corner_length), color, scaled_thickness)
214
+
215
+ @staticmethod
216
+ def draw_main_bbox(frame, bbox, color_data):
217
+ color, flag = color_data
218
+
219
+ x1, y1, x2, y2 = map(int, bbox)
220
+ h, w = y2 - y1, x2 - x1
221
+
222
+ frame_height = frame.shape[0]
223
+
224
+ assets_data = DrawingUtils._prepare_scaled_assets(frame_height, flag)
225
+ if not assets_data:
226
+ return frame
227
+
228
+ assets = assets_data['corners']
229
+ lines = assets_data['lines']
230
+ line_size = assets_data['line_size']
231
+ half_line = assets_data['line_half']
232
+ half_corner = assets_data['corner_half']
233
+ quarter_corner = assets_data['corner_quarter']
234
+ scale_factor = assets_data['scale_factor']
235
+
236
+ # Define minimum size threshold for corners (scaled)
237
+ min_size_threshold = int(half_corner * scale_factor)
238
+
239
+ if h < min_size_threshold or w < min_size_threshold:
240
+ half_corner = 0
241
+ quarter_corner = 0
242
+
243
+ # Draw vertical lines (optimized)
244
+ vertical_line = lines['vertical']
245
+ if vertical_line is not None:
246
+ line_h = h - half_corner
247
+ if line_h > 0:
248
+ line = cv2.resize(vertical_line, (line_size, line_h))
249
+ DrawingUtils.draw_alpha_overlay(frame, line, x1 - half_line, y1 + quarter_corner)
250
+ DrawingUtils.draw_alpha_overlay(frame, line, x2 - half_line, y1 + quarter_corner)
251
+
252
+ # Draw horizontal lines (optimized)
253
+ horizontal_line = lines['horizontal']
254
+ if horizontal_line is not None:
255
+ line_w = w - half_corner
256
+ if line_w > 0:
257
+ line = cv2.resize(horizontal_line, (line_w, line_size))
258
+ DrawingUtils.draw_alpha_overlay(frame, line, x1 + quarter_corner, y1 - half_line)
259
+ DrawingUtils.draw_alpha_overlay(frame, line, x1 + quarter_corner, y2 - half_line)
260
+
261
+ if h < min_size_threshold or w < min_size_threshold:
262
+ # For small bounding boxes, just use lines
263
+ scaled_thickness = max(1, int(2 * scale_factor))
264
+ DrawingUtils.draw_corner_line(frame, bbox, color, scaled_thickness)
265
+ else:
266
+ # Draw corners
267
+ DrawingUtils.draw_alpha_overlay(frame, assets['top_left'], x1 - half_corner, y1 - half_corner)
268
+ DrawingUtils.draw_alpha_overlay(frame, assets['top_right'], x2 - half_corner, y1 - half_corner)
269
+ DrawingUtils.draw_alpha_overlay(frame, assets['bottom_left'], x1 - half_corner, y2 - half_corner)
270
+ DrawingUtils.draw_alpha_overlay(frame, assets['bottom_right'], x2 - half_corner, y2 - half_corner)
271
+
272
+ return frame
273
+
274
+ @staticmethod
275
+ def draw_inner_box(frame, bbox, color_data, thickness=1):
276
+ color, flag = color_data
277
+
278
+ if flag is None:
279
+ return frame
280
+
281
+ x1, y1, x2, y2 = map(int, bbox)
282
+ h, w = y2 - y1, x2 - x1
283
+
284
+ frame_height = frame.shape[0]
285
+ scale_factor = DrawingUtils._get_scale_factor(frame_height)
286
+
287
+ # Scale thickness
288
+ scaled_thickness = max(1, int(thickness * scale_factor))
289
+
290
+ # Get original texture
291
+ texture = DrawingUtils._inner_frame_image_by_color.get(flag)
292
+
293
+ if texture is None:
294
+ return frame
295
+
296
+ # Draw texture with alpha blending
297
+ if texture.shape[2] == 4:
298
+ resized_texture = cv2.resize(texture, (w, h))
299
+ DrawingUtils.draw_alpha_overlay(frame, resized_texture, x1, y1)
300
+ else:
301
+ frame[y1:y2, x1:x2] = cv2.resize(texture, (w, h))
302
+
303
+ DrawingUtils.draw_corner_line(frame, bbox, color, scaled_thickness)
304
+
305
+ return frame
306
+
307
+ @staticmethod
308
+ def crop_with_bounding_box(frame, obj, target_height=512, buffer=30):
309
+ img_h, img_w = frame.shape[:2]
310
+ x1, y1, x2, y2 = map(int, obj["bbox"])
311
+
312
+ current_height = y2 - y1
313
+ target_height = target_height if target_height else current_height + buffer * 2
314
+ scale = (target_height - buffer * 2) / current_height
315
+
316
+ # Calculate buffer based on edge proximity
317
+ scaled_buffer = int(buffer / scale)
318
+
319
+ # Check if bounding box is at the edges
320
+ at_left_edge = x1 <= scaled_buffer
321
+ at_right_edge = x2 >= img_w - scaled_buffer
322
+ at_top_edge = y1 <= scaled_buffer
323
+ at_bottom_edge = y2 >= img_h - scaled_buffer
324
+
325
+ # Apply buffer only if not at edges
326
+ crop_x1 = max(x1 - (0 if at_left_edge else scaled_buffer), 0)
327
+ crop_y1 = max(y1 - (0 if at_top_edge else scaled_buffer), 0)
328
+ crop_x2 = min(x2 + (0 if at_right_edge else scaled_buffer), img_w)
329
+ crop_y2 = min(y2 + (0 if at_bottom_edge else scaled_buffer), img_h)
330
+
331
+ cropped = frame[crop_y1:crop_y2, crop_x1:crop_x2]
332
+ _, cropped_w = cropped.shape[:2]
333
+
334
+ target_width = int(cropped_w * scale)
335
+ final_img = cv2.resize(cropped, (target_width, target_height), interpolation=cv2.INTER_AREA)
336
+
337
+ def transform_bbox(bbox):
338
+ x1, y1, x2, y2 = bbox
339
+
340
+ nx1 = int((x1 - crop_x1) * scale)
341
+ ny1 = int((y1 - crop_y1) * scale)
342
+ nx2 = int((x2 - crop_x1) * scale)
343
+ ny2 = int((y2 - crop_y1) * scale)
344
+ return (nx1, ny1, nx2, ny2)
345
+
346
+ obj = obj.copy()
347
+ obj["bbox"] = transform_bbox(obj["bbox"])
348
+
349
+ for attr in obj.get("attributes", []):
350
+ if "bbox" in attr:
351
+ attr["bbox"] = transform_bbox(attr["bbox"])
352
+
353
+ return final_img, obj
354
+
@@ -0,0 +1,188 @@
1
+ import logging
2
+ from typing import Dict, Any
3
+ from ..models.ai_model import AIModelEntity
4
+ from ..database.DatabaseManager import DatabaseManager
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+
9
+ class ModelReadinessChecker:
10
+ """Utility class to check model readiness and provide detailed status information."""
11
+
12
+ @staticmethod
13
+ def check_model_readiness(model: AIModelEntity) -> Dict[str, Any]:
14
+ """
15
+ Comprehensive check of model readiness.
16
+
17
+ Args:
18
+ model: The AI model entity to check
19
+
20
+ Returns:
21
+ Dictionary with readiness status and details
22
+ """
23
+ if not model:
24
+ return {
25
+ "ready": False,
26
+ "reason": "No model provided",
27
+ "status": "none"
28
+ }
29
+
30
+ # Check download status
31
+ if not model.is_ready_for_use():
32
+ if model.is_downloading():
33
+ return {
34
+ "ready": False,
35
+ "reason": f"Model {model.name} is still downloading",
36
+ "status": model.download_status,
37
+ "model_name": model.name
38
+ }
39
+ elif model.has_download_failed():
40
+ return {
41
+ "ready": False,
42
+ "reason": f"Model {model.name} download failed: {model.download_error}",
43
+ "status": model.download_status,
44
+ "model_name": model.name,
45
+ "error": model.download_error
46
+ }
47
+ else:
48
+ return {
49
+ "ready": False,
50
+ "reason": f"Model {model.name} is not ready (status: {model.download_status})",
51
+ "status": model.download_status,
52
+ "model_name": model.name
53
+ }
54
+
55
+ # Check if file exists and has content
56
+ file_path = DatabaseManager.STORAGE_PATHS["models"] / model.file
57
+ if not file_path.exists():
58
+ return {
59
+ "ready": False,
60
+ "reason": f"Model file not found: {file_path}",
61
+ "status": model.download_status,
62
+ "model_name": model.name,
63
+ "file_path": str(file_path)
64
+ }
65
+
66
+ if file_path.stat().st_size == 0:
67
+ return {
68
+ "ready": False,
69
+ "reason": f"Model file is empty: {file_path}",
70
+ "status": model.download_status,
71
+ "model_name": model.name,
72
+ "file_path": str(file_path)
73
+ }
74
+
75
+ return {
76
+ "ready": True,
77
+ "reason": "Model is ready for use",
78
+ "status": model.download_status,
79
+ "model_name": model.name,
80
+ "file_path": str(file_path),
81
+ "file_size": file_path.stat().st_size
82
+ }
83
+
84
+ @staticmethod
85
+ def log_model_status(model: AIModelEntity, context: str = ""):
86
+ """
87
+ Log detailed model status information.
88
+
89
+ Args:
90
+ model: The AI model entity to check
91
+ context: Additional context for the log message
92
+ """
93
+ if not model:
94
+ logger.warning(f"⚠️ {context}: No model provided")
95
+ return
96
+
97
+ readiness = ModelReadinessChecker.check_model_readiness(model)
98
+
99
+ if readiness["ready"]:
100
+ logger.info(f"✅ {context}: Model {model.name} is ready (size: {readiness.get('file_size', 0)} bytes)")
101
+ else:
102
+ if readiness["status"] == "downloading":
103
+ logger.warning(f"⏳ {context}: {readiness['reason']}")
104
+ elif readiness["status"] == "failed":
105
+ logger.error(f"❌ {context}: {readiness['reason']}")
106
+ else:
107
+ logger.warning(f"⚠️ {context}: {readiness['reason']}")
108
+
109
+ @staticmethod
110
+ def get_models_status(models: list) -> Dict[str, Any]:
111
+ """
112
+ Get status summary for multiple models.
113
+
114
+ Args:
115
+ models: List of AI model entities
116
+
117
+ Returns:
118
+ Dictionary with status summary
119
+ """
120
+ if not models:
121
+ return {
122
+ "total": 0,
123
+ "ready": 0,
124
+ "downloading": 0,
125
+ "failed": 0,
126
+ "other": 0
127
+ }
128
+
129
+ summary = {
130
+ "total": len(models),
131
+ "ready": 0,
132
+ "downloading": 0,
133
+ "failed": 0,
134
+ "other": 0,
135
+ "details": []
136
+ }
137
+
138
+ for model in models:
139
+ readiness = ModelReadinessChecker.check_model_readiness(model)
140
+ summary["details"].append({
141
+ "model_name": model.name,
142
+ "model_id": model.id,
143
+ "status": readiness["status"],
144
+ "ready": readiness["ready"],
145
+ "reason": readiness["reason"]
146
+ })
147
+
148
+ if readiness["ready"]:
149
+ summary["ready"] += 1
150
+ elif readiness["status"] == "downloading":
151
+ summary["downloading"] += 1
152
+ elif readiness["status"] == "failed":
153
+ summary["failed"] += 1
154
+ else:
155
+ summary["other"] += 1
156
+
157
+ return summary
158
+
159
+ @staticmethod
160
+ def log_models_summary(models: list, context: str = ""):
161
+ """
162
+ Log summary of multiple models status.
163
+
164
+ Args:
165
+ models: List of AI model entities
166
+ context: Additional context for the log message
167
+ """
168
+ summary = ModelReadinessChecker.get_models_status(models)
169
+
170
+ if summary["total"] == 0:
171
+ logger.info(f"📊 {context}: No models available")
172
+ return
173
+
174
+ logger.info(f"📊 {context}: Models status - "
175
+ f"Ready: {summary['ready']}/{summary['total']}, "
176
+ f"Downloading: {summary['downloading']}, "
177
+ f"Failed: {summary['failed']}, "
178
+ f"Other: {summary['other']}")
179
+
180
+ # Log details for non-ready models
181
+ for detail in summary["details"]:
182
+ if not detail["ready"]:
183
+ if detail["status"] == "downloading":
184
+ logger.warning(f"⏳ {context}: {detail['reason']}")
185
+ elif detail["status"] == "failed":
186
+ logger.error(f"❌ {context}: {detail['reason']}")
187
+ else:
188
+ logger.warning(f"⚠️ {context}: {detail['reason']}")
@@ -0,0 +1,70 @@
1
+ from .BoundingBoxMetrics import BoundingBoxMetrics
2
+
3
+ EXCLUSIVE_LABEL_GROUPS = [
4
+ ("helmet", "no_helmet"),
5
+ ("vest", "no_vest"),
6
+ ("gloves", "no_gloves"),
7
+ ("goggles", "no_goggles"),
8
+ ("boots", "no_boots"),
9
+ ]
10
+
11
+ MULTI_INSTANCE_CLASSES = ["boots", "gloves", "goggles", "no_gloves"]
12
+
13
+ NEGATIVE_CLASSES = ["no_helmet", "no_vest", "no_goggles", "no_boots"]
14
+
15
+ class PersonAttributeMatcher:
16
+ @staticmethod
17
+ def match_persons_with_attributes(persons, attributes, coverage_threshold=0.2):
18
+ matched_results = []
19
+
20
+ exclusive_groups = []
21
+ for group in EXCLUSIVE_LABEL_GROUPS:
22
+ exclusive_groups.append(set(group))
23
+ exclusive_labels = set(l for group in EXCLUSIVE_LABEL_GROUPS for l in group)
24
+
25
+ for person in persons:
26
+ person_bbox = person["bbox"]
27
+ matched_attributes = []
28
+ for attr in attributes:
29
+ attr_bbox = attr["bbox"]
30
+ coverage = BoundingBoxMetrics.compute_coverage(person_bbox, attr_bbox)
31
+ if coverage >= coverage_threshold:
32
+ matched_attributes.append({
33
+ "label": attr["label"],
34
+ "confidence": attr["confidence"],
35
+ "coverage": round(coverage, 2),
36
+ "bbox": attr_bbox
37
+ })
38
+
39
+ filtered_attributes = []
40
+
41
+ for group in exclusive_groups:
42
+ group_attrs = [a for a in matched_attributes if a["label"] in group]
43
+ if group_attrs:
44
+ best = max(group_attrs, key=lambda a: a["confidence"])
45
+ filtered_attributes.append(best)
46
+
47
+ for attr in matched_attributes:
48
+ label = attr["label"]
49
+ is_in_exclusive_group = any(label in group for group in exclusive_groups)
50
+ if is_in_exclusive_group:
51
+ continue
52
+ if label in MULTI_INSTANCE_CLASSES:
53
+ filtered_attributes.append(attr)
54
+
55
+ for attr in matched_attributes:
56
+ label = attr["label"]
57
+ if label in MULTI_INSTANCE_CLASSES and label not in NEGATIVE_CLASSES:
58
+ already_added = any(
59
+ a["label"] == label and list(a["bbox"]) == list(attr["bbox"])
60
+ for a in filtered_attributes
61
+ )
62
+ if not already_added:
63
+ filtered_attributes.append(attr)
64
+ matched_results.append({
65
+ "label": "person",
66
+ "confidence": person["confidence"],
67
+ "bbox": person_bbox,
68
+ "attributes": filtered_attributes
69
+ })
70
+ return matched_results
@@ -0,0 +1,45 @@
1
+ from typing import List
2
+ from shapely.geometry import Polygon, Point
3
+
4
+ class PersonRestrictedAreaMatcher:
5
+ """Matches detected persons with restricted areas defined as polygons."""
6
+
7
+ @staticmethod
8
+ def match_persons_with_restricted_areas(persons, restricted_areas: List[Polygon]):
9
+ """
10
+ Correlates detected persons with restricted areas defined as polygons.
11
+ Using center point method for detection.
12
+
13
+ Args:
14
+ persons (list): List of person detections (each detection has 'bbox').
15
+ restricted_areas (list): List of restricted areas, each defined as a polygon with coordinates.
16
+
17
+ Returns:
18
+ list: List of persons with matched restricted areas they've entered.
19
+ """
20
+ matched_results = []
21
+
22
+ for person in persons:
23
+ person_bbox = person["bbox"]
24
+
25
+ # Calculate center point of person's bounding box
26
+ # bbox format is typically [x_min, y_min, x_max, y_max]
27
+ center_x = (person_bbox[0] + person_bbox[2]) / 2
28
+ center_y = (person_bbox[1] + person_bbox[3]) / 2
29
+ center_point = Point(center_x, center_y)
30
+
31
+ in_restricted = any(area.contains(center_point) for area in restricted_areas)
32
+
33
+ attributes = [{
34
+ "label": "in_restricted_area",
35
+ "confidence": 1.0,
36
+ }] if in_restricted else []
37
+
38
+ matched_results.append({
39
+ "label": "person",
40
+ "confidence": person["confidence"],
41
+ "bbox": person_bbox,
42
+ "attributes": attributes
43
+ })
44
+
45
+ return matched_results
@@ -0,0 +1,28 @@
1
+ from tabulate import tabulate
2
+
3
+ class TablePrinter:
4
+ """Utility class for printing structured data in a tabular format."""
5
+
6
+ @staticmethod
7
+ def print_table(data, headers=None, title="Data Table", table_format="fancy_grid"):
8
+ """
9
+ Prints a list of dictionaries or lists in a formatted table.
10
+
11
+ Args:
12
+ data (list): A list of dictionaries or lists containing the data.
13
+ headers (list, optional): Column headers (if None, inferred from dict keys).
14
+ title (str, optional): Title for the table (default: "Data Table").
15
+ table_format (str, optional): Tabulate format (default: "fancy_grid").
16
+ """
17
+ if not data:
18
+ print(f"\n{title}: No data available.\n")
19
+ return
20
+
21
+ # If data is a list of dictionaries, extract headers automatically
22
+ if isinstance(data[0], dict):
23
+ if headers is None:
24
+ headers = list(data[0].keys())
25
+ data = [[row.get(header, "") for header in headers] for row in data]
26
+
27
+ print(f"\n{title}")
28
+ print(tabulate(data, headers=headers, tablefmt=table_format))
@@ -0,0 +1 @@
1
+