nedo-vision-worker-core 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- nedo_vision_worker_core/__init__.py +23 -0
- nedo_vision_worker_core/ai/FrameDrawer.py +144 -0
- nedo_vision_worker_core/ai/ImageDebugger.py +126 -0
- nedo_vision_worker_core/ai/VideoDebugger.py +69 -0
- nedo_vision_worker_core/ai/__init__.py +1 -0
- nedo_vision_worker_core/cli.py +197 -0
- nedo_vision_worker_core/config/ConfigurationManager.py +173 -0
- nedo_vision_worker_core/config/__init__.py +1 -0
- nedo_vision_worker_core/core_service.py +237 -0
- nedo_vision_worker_core/database/DatabaseManager.py +236 -0
- nedo_vision_worker_core/database/__init__.py +1 -0
- nedo_vision_worker_core/detection/BaseDetector.py +22 -0
- nedo_vision_worker_core/detection/DetectionManager.py +83 -0
- nedo_vision_worker_core/detection/RFDETRDetector.py +62 -0
- nedo_vision_worker_core/detection/YOLODetector.py +57 -0
- nedo_vision_worker_core/detection/__init__.py +1 -0
- nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +29 -0
- nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +47 -0
- nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +44 -0
- nedo_vision_worker_core/detection/detection_processing/__init__.py +1 -0
- nedo_vision_worker_core/doctor.py +342 -0
- nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
- nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
- nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
- nedo_vision_worker_core/icons/boots-green.png +0 -0
- nedo_vision_worker_core/icons/boots-red.png +0 -0
- nedo_vision_worker_core/icons/gloves-green.png +0 -0
- nedo_vision_worker_core/icons/gloves-red.png +0 -0
- nedo_vision_worker_core/icons/goggles-green.png +0 -0
- nedo_vision_worker_core/icons/goggles-red.png +0 -0
- nedo_vision_worker_core/icons/helmet-green.png +0 -0
- nedo_vision_worker_core/icons/helmet-red.png +0 -0
- nedo_vision_worker_core/icons/mask-red.png +0 -0
- nedo_vision_worker_core/icons/vest-green.png +0 -0
- nedo_vision_worker_core/icons/vest-red.png +0 -0
- nedo_vision_worker_core/models/__init__.py +20 -0
- nedo_vision_worker_core/models/ai_model.py +41 -0
- nedo_vision_worker_core/models/auth.py +14 -0
- nedo_vision_worker_core/models/config.py +9 -0
- nedo_vision_worker_core/models/dataset_source.py +30 -0
- nedo_vision_worker_core/models/logs.py +9 -0
- nedo_vision_worker_core/models/ppe_detection.py +39 -0
- nedo_vision_worker_core/models/ppe_detection_label.py +20 -0
- nedo_vision_worker_core/models/restricted_area_violation.py +20 -0
- nedo_vision_worker_core/models/user.py +10 -0
- nedo_vision_worker_core/models/worker_source.py +19 -0
- nedo_vision_worker_core/models/worker_source_pipeline.py +21 -0
- nedo_vision_worker_core/models/worker_source_pipeline_config.py +24 -0
- nedo_vision_worker_core/models/worker_source_pipeline_debug.py +15 -0
- nedo_vision_worker_core/models/worker_source_pipeline_detection.py +14 -0
- nedo_vision_worker_core/pipeline/PipelineConfigManager.py +32 -0
- nedo_vision_worker_core/pipeline/PipelineManager.py +133 -0
- nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +40 -0
- nedo_vision_worker_core/pipeline/PipelineProcessor.py +338 -0
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py +202 -0
- nedo_vision_worker_core/pipeline/__init__.py +1 -0
- nedo_vision_worker_core/preprocessing/ImageResizer.py +42 -0
- nedo_vision_worker_core/preprocessing/ImageRoi.py +61 -0
- nedo_vision_worker_core/preprocessing/Preprocessor.py +16 -0
- nedo_vision_worker_core/preprocessing/__init__.py +1 -0
- nedo_vision_worker_core/repositories/AIModelRepository.py +31 -0
- nedo_vision_worker_core/repositories/PPEDetectionRepository.py +146 -0
- nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +90 -0
- nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +81 -0
- nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +71 -0
- nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +79 -0
- nedo_vision_worker_core/repositories/WorkerSourceRepository.py +19 -0
- nedo_vision_worker_core/repositories/__init__.py +1 -0
- nedo_vision_worker_core/streams/RTMPStreamer.py +146 -0
- nedo_vision_worker_core/streams/StreamSyncThread.py +66 -0
- nedo_vision_worker_core/streams/VideoStream.py +324 -0
- nedo_vision_worker_core/streams/VideoStreamManager.py +121 -0
- nedo_vision_worker_core/streams/__init__.py +1 -0
- nedo_vision_worker_core/tracker/SFSORT.py +325 -0
- nedo_vision_worker_core/tracker/TrackerManager.py +163 -0
- nedo_vision_worker_core/tracker/__init__.py +1 -0
- nedo_vision_worker_core/util/BoundingBoxMetrics.py +53 -0
- nedo_vision_worker_core/util/DrawingUtils.py +354 -0
- nedo_vision_worker_core/util/ModelReadinessChecker.py +188 -0
- nedo_vision_worker_core/util/PersonAttributeMatcher.py +70 -0
- nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +45 -0
- nedo_vision_worker_core/util/TablePrinter.py +28 -0
- nedo_vision_worker_core/util/__init__.py +1 -0
- nedo_vision_worker_core-0.2.0.dist-info/METADATA +347 -0
- nedo_vision_worker_core-0.2.0.dist-info/RECORD +95 -0
- nedo_vision_worker_core-0.2.0.dist-info/WHEEL +5 -0
- nedo_vision_worker_core-0.2.0.dist-info/entry_points.txt +2 -0
- nedo_vision_worker_core-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
import threading
|
|
4
|
+
import queue
|
|
5
|
+
from ..detection.detection_processing.HumanDetectionProcessor import HumanDetectionProcessor
|
|
6
|
+
from ..detection.detection_processing.PPEDetectionProcessor import PPEDetectionProcessor
|
|
7
|
+
from .PipelineConfigManager import PipelineConfigManager
|
|
8
|
+
from .PipelinePrepocessor import PipelinePrepocessor
|
|
9
|
+
from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
|
|
10
|
+
from ..repositories.WorkerSourcePipelineDetectionRepository import WorkerSourcePipelineDetectionRepository
|
|
11
|
+
from ..streams.VideoStreamManager import VideoStreamManager
|
|
12
|
+
from ..ai.VideoDebugger import VideoDebugger
|
|
13
|
+
from ..ai.FrameDrawer import FrameDrawer
|
|
14
|
+
from ..tracker.TrackerManager import TrackerManager
|
|
15
|
+
from ..detection.DetectionManager import DetectionManager
|
|
16
|
+
from ..streams.RTMPStreamer import RTMPStreamer
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class PipelineProcessor:
|
|
20
|
+
"""Handles pipeline processing including preprocessing, AI model inference, tracking, and video stream processing."""
|
|
21
|
+
|
|
22
|
+
def __init__(self, pipeline_id, worker_source_id, model, enable_visualization=True):
|
|
23
|
+
"""
|
|
24
|
+
Initializes the PipelineProcessor with configurable detection labels.
|
|
25
|
+
|
|
26
|
+
:param model: The model to use for inference.
|
|
27
|
+
:param enable_visualization: Flag to enable visualization.
|
|
28
|
+
:param detection_labels: List of object labels to detect.
|
|
29
|
+
"""
|
|
30
|
+
self.running = True
|
|
31
|
+
self.video_debugger = VideoDebugger(enable_visualization)
|
|
32
|
+
self.tracker_manager = TrackerManager()
|
|
33
|
+
self.detection_manager = DetectionManager(model)
|
|
34
|
+
self.config_manager = PipelineConfigManager()
|
|
35
|
+
self.preprocessor = PipelinePrepocessor()
|
|
36
|
+
self.detection_processor = None
|
|
37
|
+
self.threshold = 0.7
|
|
38
|
+
|
|
39
|
+
self.frame_queue = queue.Queue(maxsize=1)
|
|
40
|
+
self.tracked_objects_render = []
|
|
41
|
+
self.detection_thread = None
|
|
42
|
+
self.frame_counter = 0
|
|
43
|
+
self.frame_drawer = FrameDrawer()
|
|
44
|
+
self.pipeline_id = pipeline_id
|
|
45
|
+
self.worker_source_id = worker_source_id
|
|
46
|
+
|
|
47
|
+
self.rtmp_streamer = None
|
|
48
|
+
|
|
49
|
+
self.detection_processor_codes = [
|
|
50
|
+
PPEDetectionProcessor.code,
|
|
51
|
+
HumanDetectionProcessor.code,
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
self.debug_flag = False
|
|
55
|
+
self.debug_repo = WorkerSourcePipelineDebugRepository()
|
|
56
|
+
self.detection_repo = WorkerSourcePipelineDetectionRepository()
|
|
57
|
+
|
|
58
|
+
def _get_detection_processor_code(self):
|
|
59
|
+
for code in self.detection_processor_codes:
|
|
60
|
+
if self.config_manager.is_feature_enabled(code):
|
|
61
|
+
return code
|
|
62
|
+
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
def _get_detection_processor(self, code):
|
|
66
|
+
if code == PPEDetectionProcessor.code:
|
|
67
|
+
return PPEDetectionProcessor()
|
|
68
|
+
elif code == HumanDetectionProcessor.code:
|
|
69
|
+
return HumanDetectionProcessor()
|
|
70
|
+
else:
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
def _update_detection_processor(self):
|
|
74
|
+
code = self._get_detection_processor_code()
|
|
75
|
+
|
|
76
|
+
if self.detection_processor and self.detection_processor.code == code:
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
self.detection_processor = self._get_detection_processor(code)
|
|
80
|
+
if self.detection_processor:
|
|
81
|
+
self.frame_drawer.update_config(
|
|
82
|
+
icons=self.detection_processor.icons,
|
|
83
|
+
violation_labels=self.detection_processor.violation_labels,
|
|
84
|
+
compliance_labels=self.detection_processor.compliance_labels,
|
|
85
|
+
)
|
|
86
|
+
self.tracker_manager.attribute_labels = self.detection_processor.labels
|
|
87
|
+
self.tracker_manager.exclusive_attribute_groups = self.detection_processor.exclusive_labels
|
|
88
|
+
|
|
89
|
+
def _update_config(self):
|
|
90
|
+
self.config_manager.update(self.pipeline_id)
|
|
91
|
+
self.preprocessor.update(self.config_manager)
|
|
92
|
+
self.detection_interval = self._get_detection_interval()
|
|
93
|
+
self._update_detection_processor()
|
|
94
|
+
|
|
95
|
+
if self.detection_processor:
|
|
96
|
+
config = self.config_manager.get_feature_config(self.detection_processor.code)
|
|
97
|
+
self.detection_processor.update(self.config_manager)
|
|
98
|
+
self.threshold = config.get("minimumDetectionConfidence", 0.7)
|
|
99
|
+
|
|
100
|
+
if self.detection_processor.code == HumanDetectionProcessor.code:
|
|
101
|
+
self.frame_drawer.polygons = [((0, 0, 255), p) for p in self.detection_processor.restricted_areas]
|
|
102
|
+
else:
|
|
103
|
+
self.threshold = 0.7
|
|
104
|
+
self.frame_drawer.update_config()
|
|
105
|
+
self.tracker_manager.attribute_labels = []
|
|
106
|
+
self.tracker_manager.exclusive_attribute_groups = []
|
|
107
|
+
|
|
108
|
+
def process_pipeline(self, video_manager: VideoStreamManager):
|
|
109
|
+
"""
|
|
110
|
+
Runs the full pipeline processing including preprocessing, detection and tracking.
|
|
111
|
+
"""
|
|
112
|
+
pipeline_id = self.pipeline_id
|
|
113
|
+
worker_source_id = self.worker_source_id
|
|
114
|
+
|
|
115
|
+
logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
|
|
116
|
+
|
|
117
|
+
self._update_config()
|
|
118
|
+
|
|
119
|
+
initial_frame = self._wait_for_frame(video_manager)
|
|
120
|
+
if initial_frame is None:
|
|
121
|
+
logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
|
|
122
|
+
return
|
|
123
|
+
|
|
124
|
+
self.rtmp_streamer = RTMPStreamer(pipeline_id)
|
|
125
|
+
|
|
126
|
+
# Start detection in a separate thread
|
|
127
|
+
self.detection_thread = threading.Thread(
|
|
128
|
+
target=self._detection_worker,
|
|
129
|
+
name=f"detection-{pipeline_id}"
|
|
130
|
+
)
|
|
131
|
+
self.detection_thread.daemon = True
|
|
132
|
+
self.detection_thread.start()
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
while self.running:
|
|
136
|
+
frame = video_manager.get_frame(worker_source_id)
|
|
137
|
+
|
|
138
|
+
if frame is None:
|
|
139
|
+
logging.warning(f"⚠️ No frame available for {worker_source_id}. Retrying...")
|
|
140
|
+
# Check if stream was removed
|
|
141
|
+
if not video_manager.has_stream(worker_source_id):
|
|
142
|
+
logging.info(f"🛑 Stream {worker_source_id} was removed, stopping pipeline")
|
|
143
|
+
break
|
|
144
|
+
time.sleep(0.01)
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
self.frame_counter += 1
|
|
148
|
+
|
|
149
|
+
self.frame_drawer.draw_polygons(frame)
|
|
150
|
+
drawn_frame = self.frame_drawer.draw_frame(
|
|
151
|
+
frame.copy(),
|
|
152
|
+
self.tracked_objects_render,
|
|
153
|
+
with_trails=True,
|
|
154
|
+
trail_length=int(2 / self.detection_interval)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
if self.debug_flag:
|
|
158
|
+
tracked_objects_render = self._process_frame(frame)
|
|
159
|
+
|
|
160
|
+
self.debug_repo.update_debug_entries_by_pipeline_id(
|
|
161
|
+
self.pipeline_id,
|
|
162
|
+
self.frame_drawer.draw_frame(
|
|
163
|
+
frame.copy(),
|
|
164
|
+
tracked_objects_render
|
|
165
|
+
),
|
|
166
|
+
tracked_objects_render
|
|
167
|
+
)
|
|
168
|
+
self.debug_flag = False
|
|
169
|
+
|
|
170
|
+
# Check RTMP streamer status before sending frame
|
|
171
|
+
if self.rtmp_streamer:
|
|
172
|
+
try:
|
|
173
|
+
self.rtmp_streamer.send_frame(drawn_frame)
|
|
174
|
+
except Exception as e:
|
|
175
|
+
logging.error(f"❌ RTMP streaming error: {e}")
|
|
176
|
+
# Stop RTMP streamer on error
|
|
177
|
+
self.rtmp_streamer.stop_stream()
|
|
178
|
+
self.rtmp_streamer = None
|
|
179
|
+
|
|
180
|
+
# Only put frame in queue if detection thread is still running
|
|
181
|
+
if self.detection_thread and self.detection_thread.is_alive():
|
|
182
|
+
if not self.frame_queue.full():
|
|
183
|
+
self.frame_queue.put(frame, block=False)
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
self.video_debugger.show_frame(pipeline_id, worker_source_id, drawn_frame)
|
|
187
|
+
except Exception as e:
|
|
188
|
+
logging.error(f"⚠️ Failed to render frame for pipeline {pipeline_id}: {e}")
|
|
189
|
+
|
|
190
|
+
time.sleep(0.01)
|
|
191
|
+
except Exception as e:
|
|
192
|
+
logging.error(f"❌ Error in pipeline {pipeline_id}: {e}", exc_info=True)
|
|
193
|
+
|
|
194
|
+
def _process_frame(self, frame):
|
|
195
|
+
dimension = frame.shape[:2]
|
|
196
|
+
|
|
197
|
+
processed_frame = self.preprocessor.apply(frame)
|
|
198
|
+
detections = self.detection_manager.detect_objects(processed_frame, self.threshold)
|
|
199
|
+
detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
|
|
200
|
+
matched_results = self.detection_processor.process(detections, dimension)
|
|
201
|
+
return self.tracker_manager.track_objects(matched_results)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _detection_worker(self):
|
|
205
|
+
"""
|
|
206
|
+
Runs detection in a separate thread and updates configuration periodically.
|
|
207
|
+
Applies preprocessing based on pipeline configuration.
|
|
208
|
+
"""
|
|
209
|
+
pipeline_id = self.pipeline_id
|
|
210
|
+
worker_source_id = self.worker_source_id
|
|
211
|
+
last_detection_time = time.time()
|
|
212
|
+
last_config_update_time = time.time()
|
|
213
|
+
config_update_interval = 5 # Update configuration every 5 seconds
|
|
214
|
+
|
|
215
|
+
while self.running:
|
|
216
|
+
try:
|
|
217
|
+
frame = self.frame_queue.get(block=True, timeout=1)
|
|
218
|
+
current_time = time.time()
|
|
219
|
+
|
|
220
|
+
# Update config periodically
|
|
221
|
+
if (current_time - last_config_update_time) >= config_update_interval:
|
|
222
|
+
self._update_config()
|
|
223
|
+
last_config_update_time = current_time
|
|
224
|
+
logging.info(f"🔄 Updated pipeline config for {pipeline_id}")
|
|
225
|
+
|
|
226
|
+
# Process detection only if enough time has passed since last detection
|
|
227
|
+
# detection_interval is the time in seconds between consecutive detections
|
|
228
|
+
if (current_time - last_detection_time) < self.detection_interval:
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
last_detection_time = current_time
|
|
232
|
+
|
|
233
|
+
if self.detection_processor is None or frame is None or frame.size == 0:
|
|
234
|
+
self.tracked_objects_render = []
|
|
235
|
+
continue
|
|
236
|
+
|
|
237
|
+
self.tracked_objects_render = self._process_frame(frame)
|
|
238
|
+
|
|
239
|
+
# Save to database if enabled
|
|
240
|
+
if self.config_manager.is_feature_enabled("db"):
|
|
241
|
+
self.detection_processor.save_to_db(
|
|
242
|
+
pipeline_id,
|
|
243
|
+
worker_source_id,
|
|
244
|
+
self.frame_counter,
|
|
245
|
+
self.tracked_objects_render,
|
|
246
|
+
frame,
|
|
247
|
+
self.frame_drawer
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
if self.config_manager.is_feature_enabled("webhook") or self.config_manager.is_feature_enabled("mqtt"):
|
|
251
|
+
self.detection_repo.save_detection(
|
|
252
|
+
pipeline_id,
|
|
253
|
+
frame,
|
|
254
|
+
self.tracked_objects_render,
|
|
255
|
+
self.frame_drawer
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
except queue.Empty:
|
|
259
|
+
pass
|
|
260
|
+
except Exception as e:
|
|
261
|
+
logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
|
|
262
|
+
|
|
263
|
+
def _wait_for_frame(self, video_manager, max_retries=10, sleep_time=3):
|
|
264
|
+
"""Waits until a frame is available from the video source."""
|
|
265
|
+
for retry_count in range(max_retries):
|
|
266
|
+
frame = video_manager.get_frame(self.worker_source_id)
|
|
267
|
+
if frame is not None:
|
|
268
|
+
return frame
|
|
269
|
+
logging.warning(f"⚠️ Waiting for video stream {self.worker_source_id} (Attempt {retry_count + 1}/{max_retries})...")
|
|
270
|
+
time.sleep(sleep_time)
|
|
271
|
+
|
|
272
|
+
return None
|
|
273
|
+
|
|
274
|
+
def stop(self):
|
|
275
|
+
"""Stops the Pipeline processor and cleans up resources."""
|
|
276
|
+
if not self.running: # Prevent multiple stops
|
|
277
|
+
return
|
|
278
|
+
|
|
279
|
+
logging.info("🛑 Stopping PipelineProcessor...")
|
|
280
|
+
self.running = False
|
|
281
|
+
|
|
282
|
+
# Stop RTMP streamer first
|
|
283
|
+
if hasattr(self, 'rtmp_streamer') and self.rtmp_streamer:
|
|
284
|
+
try:
|
|
285
|
+
self.rtmp_streamer.stop_stream()
|
|
286
|
+
self.rtmp_streamer = None
|
|
287
|
+
except Exception as e:
|
|
288
|
+
logging.error(f"Error stopping RTMP streamer: {e}")
|
|
289
|
+
|
|
290
|
+
# Clear frame queue before joining thread
|
|
291
|
+
try:
|
|
292
|
+
while True:
|
|
293
|
+
try:
|
|
294
|
+
self.frame_queue.get_nowait()
|
|
295
|
+
except queue.Empty:
|
|
296
|
+
break
|
|
297
|
+
except Exception as e:
|
|
298
|
+
logging.error(f"Error clearing frame queue: {e}")
|
|
299
|
+
|
|
300
|
+
# Wait for detection thread with increased timeout
|
|
301
|
+
if self.detection_thread and self.detection_thread.is_alive():
|
|
302
|
+
try:
|
|
303
|
+
self.detection_thread.join(timeout=5.0) # Increased timeout
|
|
304
|
+
if self.detection_thread.is_alive():
|
|
305
|
+
logging.warning("Detection thread did not terminate cleanly")
|
|
306
|
+
except Exception as e:
|
|
307
|
+
logging.error(f"Error joining detection thread: {e}")
|
|
308
|
+
finally:
|
|
309
|
+
self.detection_thread = None
|
|
310
|
+
|
|
311
|
+
# Clear tracked objects
|
|
312
|
+
self.tracked_objects_render.clear() # Use clear() instead of reassignment
|
|
313
|
+
|
|
314
|
+
# Close video debugger windows last
|
|
315
|
+
try:
|
|
316
|
+
if hasattr(self, 'video_debugger'):
|
|
317
|
+
self.video_debugger.close_all()
|
|
318
|
+
except Exception as e:
|
|
319
|
+
logging.error(f"Error closing video debugger: {e}")
|
|
320
|
+
|
|
321
|
+
logging.info("✅ PipelineProcessor stopped successfully")
|
|
322
|
+
|
|
323
|
+
def _get_detection_interval(self):
|
|
324
|
+
"""
|
|
325
|
+
Get detection interval from configuration.
|
|
326
|
+
Converts frames per second to seconds per frame.
|
|
327
|
+
"""
|
|
328
|
+
config = self.config_manager.get_feature_config("processing_speed")
|
|
329
|
+
fps = config.get("decimal", 1.0)
|
|
330
|
+
|
|
331
|
+
if fps <= 0:
|
|
332
|
+
return 1 / 10 # Default to 10 frame per second if fps is zero or negative
|
|
333
|
+
|
|
334
|
+
return 1.0 / fps # Convert fps to seconds per frame
|
|
335
|
+
|
|
336
|
+
def enable_debug(self):
|
|
337
|
+
"""Enable debug mode for this pipeline."""
|
|
338
|
+
self.debug_flag = True
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
import threading
|
|
5
|
+
from typing import Dict, Set, Optional, Callable
|
|
6
|
+
from ..repositories.AIModelRepository import AIModelRepository
|
|
7
|
+
from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
|
|
8
|
+
from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
|
|
9
|
+
from .PipelineManager import PipelineManager
|
|
10
|
+
from ..streams.VideoStreamManager import VideoStreamManager
|
|
11
|
+
from ..util.ModelReadinessChecker import ModelReadinessChecker
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PipelineSyncThread(threading.Thread):
|
|
15
|
+
"""Thread responsible for synchronizing worker source pipelines from the database in real-time."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=4):
|
|
18
|
+
super().__init__(daemon=True) # Runs as a daemon
|
|
19
|
+
self.video_manager = video_manager
|
|
20
|
+
self.polling_interval = polling_interval
|
|
21
|
+
self.pipeline_repo = WorkerSourcePipelineRepository()
|
|
22
|
+
self.debug_repo = WorkerSourcePipelineDebugRepository()
|
|
23
|
+
self.ai_model_repo = AIModelRepository()
|
|
24
|
+
self.running = True
|
|
25
|
+
self.pipeline_manager = PipelineManager(video_manager, self.on_pipeline_stopped, max_workers)
|
|
26
|
+
|
|
27
|
+
def _parse_json(self, value: str) -> Optional[dict]:
|
|
28
|
+
"""Attempts to parse the value as JSON if applicable."""
|
|
29
|
+
if not value:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
value = value.strip() # Remove leading/trailing spaces
|
|
33
|
+
if (value.startswith("{") and value.endswith("}")) or (value.startswith("[") and value.endswith("]")):
|
|
34
|
+
try:
|
|
35
|
+
return json.loads(value) # Parse JSON object or list
|
|
36
|
+
except json.JSONDecodeError:
|
|
37
|
+
pass # Keep as string if parsing fails
|
|
38
|
+
return None
|
|
39
|
+
|
|
40
|
+
def on_pipeline_stopped(self, pipeline_id: str) -> None:
|
|
41
|
+
"""Set the pipeline as stopped in the database."""
|
|
42
|
+
pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
|
|
43
|
+
pipeline.pipeline_status_code = "run" if pipeline.pipeline_status_code == "restart" else "stop"
|
|
44
|
+
self.pipeline_repo.session.commit()
|
|
45
|
+
|
|
46
|
+
def run(self) -> None:
|
|
47
|
+
"""Continuously updates pipelines based on database changes."""
|
|
48
|
+
while self.running:
|
|
49
|
+
try:
|
|
50
|
+
# Cache model and pipeline data
|
|
51
|
+
models = {m.id: m for m in self.ai_model_repo.get_models()}
|
|
52
|
+
db_pipelines = {p.id: p for p in self.pipeline_repo.get_all_pipelines()}
|
|
53
|
+
|
|
54
|
+
# Get pipeline IDs for comparison
|
|
55
|
+
local_pipeline_ids = set(self.pipeline_manager.get_active_pipelines())
|
|
56
|
+
db_pipeline_ids = set(db_pipelines.keys())
|
|
57
|
+
|
|
58
|
+
restarted_pipeline = False
|
|
59
|
+
|
|
60
|
+
# Helper function for model lookup
|
|
61
|
+
def get_model(pipeline):
|
|
62
|
+
return models.get(pipeline.ai_model_id)
|
|
63
|
+
|
|
64
|
+
# Process pipeline changes
|
|
65
|
+
self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines, get_model, restarted_pipeline)
|
|
66
|
+
self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
|
|
67
|
+
self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines, get_model)
|
|
68
|
+
|
|
69
|
+
if restarted_pipeline:
|
|
70
|
+
self.pipeline_repo.session.commit()
|
|
71
|
+
|
|
72
|
+
except Exception as e:
|
|
73
|
+
logging.error(f"⚠️ Error syncing pipelines from database: {e}", exc_info=True)
|
|
74
|
+
|
|
75
|
+
time.sleep(self.polling_interval)
|
|
76
|
+
|
|
77
|
+
def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
|
|
78
|
+
get_model: Callable, restarted_pipeline: bool) -> None:
|
|
79
|
+
"""Add new pipelines that exist in DB but not locally."""
|
|
80
|
+
for pid in pipeline_ids:
|
|
81
|
+
pipeline = db_pipelines[pid]
|
|
82
|
+
|
|
83
|
+
if pipeline.pipeline_status_code == 'restart':
|
|
84
|
+
pipeline.pipeline_status_code = 'run'
|
|
85
|
+
restarted_pipeline = True
|
|
86
|
+
|
|
87
|
+
if pipeline.pipeline_status_code == 'run':
|
|
88
|
+
model = get_model(pipeline)
|
|
89
|
+
|
|
90
|
+
# Check if model is ready before starting pipeline
|
|
91
|
+
if model:
|
|
92
|
+
readiness = ModelReadinessChecker.check_model_readiness(model)
|
|
93
|
+
if not readiness["ready"]:
|
|
94
|
+
logging.warning(f"⚠️ Pipeline {pid} ({pipeline.name}): {readiness['reason']}. Skipping pipeline start.")
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
logging.info(f"🟢 Adding new pipeline: {pid} ({pipeline.name})")
|
|
98
|
+
self.pipeline_manager.start_pipeline(pipeline, model)
|
|
99
|
+
|
|
100
|
+
def _remove_deleted_pipelines(self, pipeline_ids: Set[str]) -> None:
|
|
101
|
+
"""Remove pipelines that exist locally but not in DB."""
|
|
102
|
+
for pid in pipeline_ids:
|
|
103
|
+
logging.info(f"🔴 Removing deleted pipeline: {pid}")
|
|
104
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
105
|
+
|
|
106
|
+
def _update_existing_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
|
|
107
|
+
get_model: Callable) -> None:
|
|
108
|
+
"""Update existing pipelines that need changes."""
|
|
109
|
+
debug_pipeline_ids = self.debug_repo.get_pipeline_ids_to_debug()
|
|
110
|
+
|
|
111
|
+
for pid in pipeline_ids:
|
|
112
|
+
db_pipeline = db_pipelines[pid]
|
|
113
|
+
local_pipeline = self.pipeline_manager.get_pipeline(pid)
|
|
114
|
+
processor = self.pipeline_manager.processors[pid]
|
|
115
|
+
local_proc = processor.detection_manager
|
|
116
|
+
db_model = get_model(db_pipeline)
|
|
117
|
+
|
|
118
|
+
self.update_pipeline(pid, db_pipeline, local_pipeline, local_proc, db_model, local_proc.model_metadata)
|
|
119
|
+
if pid in debug_pipeline_ids:
|
|
120
|
+
processor.enable_debug()
|
|
121
|
+
|
|
122
|
+
def update_pipeline(self, pid, db_pipeline, local_pipeline, local_proc, db_model, local_model):
|
|
123
|
+
"""Handles pipeline updates, ensuring correct model and status."""
|
|
124
|
+
processor = self.pipeline_manager.processors.get(pid)
|
|
125
|
+
processor.frame_drawer.location_name = db_pipeline.location_name
|
|
126
|
+
|
|
127
|
+
# Case 1: Pipeline should be running but isn't
|
|
128
|
+
if db_pipeline.pipeline_status_code == "run" and not self.pipeline_manager.is_running(pid):
|
|
129
|
+
logging.info(f"🟢 Starting pipeline {pid}: {db_pipeline.name} (status: RUNNING)")
|
|
130
|
+
self.pipeline_manager.start_pipeline(db_pipeline, db_model)
|
|
131
|
+
|
|
132
|
+
# Case 2: Pipeline should be stopped but is running
|
|
133
|
+
elif db_pipeline.pipeline_status_code == "stop" and self.pipeline_manager.is_running(pid):
|
|
134
|
+
logging.info(f"🔴 Stopping pipeline {pid}: {db_pipeline.name} (status: STOPPED)")
|
|
135
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
136
|
+
|
|
137
|
+
# Case 3: Pipeline configuration has changed, needs restart
|
|
138
|
+
elif self._has_pipeline_changed(local_pipeline, db_pipeline):
|
|
139
|
+
logging.info(f"🟡 Updating pipeline {pid}: {db_pipeline.name} (status: RESTARTING)")
|
|
140
|
+
if self.pipeline_manager.is_running(pid):
|
|
141
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
142
|
+
self.pipeline_manager.start_pipeline(db_pipeline, db_model)
|
|
143
|
+
|
|
144
|
+
# Case 4: AI Model has changed
|
|
145
|
+
elif local_model and db_model and local_model.id != db_model.id:
|
|
146
|
+
if db_model:
|
|
147
|
+
readiness = ModelReadinessChecker.check_model_readiness(db_model)
|
|
148
|
+
if readiness["ready"]:
|
|
149
|
+
local_proc.load_model(db_model)
|
|
150
|
+
logging.info(f"🔄 Model updated for pipeline {pid}: {db_pipeline.name} "
|
|
151
|
+
f"(version: {db_model.version if db_model else 'removed'})")
|
|
152
|
+
else:
|
|
153
|
+
logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
|
|
154
|
+
|
|
155
|
+
# Case 5: Local model exists but doesn't match DB model
|
|
156
|
+
elif local_model and (not db_model or local_model.version != db_model.version):
|
|
157
|
+
if not db_model or ModelReadinessChecker.check_model_readiness(db_model)["ready"]:
|
|
158
|
+
local_proc.load_model(db_model)
|
|
159
|
+
logging.info(f"🔄 Model updated for pipeline {pid}: {db_pipeline.name} "
|
|
160
|
+
f"(version: {db_model.version if db_model else 'removed'})")
|
|
161
|
+
else:
|
|
162
|
+
readiness = ModelReadinessChecker.check_model_readiness(db_model)
|
|
163
|
+
logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
|
|
164
|
+
|
|
165
|
+
# Case 6: DB model exists but local model doesn't
|
|
166
|
+
elif db_model and not local_model:
|
|
167
|
+
readiness = ModelReadinessChecker.check_model_readiness(db_model)
|
|
168
|
+
if readiness["ready"]:
|
|
169
|
+
logging.info(f"🔄 Added model for pipeline {pid}: {db_pipeline.name} (version: {db_model.version})")
|
|
170
|
+
local_proc.load_model(db_model)
|
|
171
|
+
else:
|
|
172
|
+
logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
|
|
173
|
+
|
|
174
|
+
def _has_pipeline_changed(self, local_pipeline, db_pipeline):
|
|
175
|
+
"""Checks if the pipeline configuration has changed."""
|
|
176
|
+
if db_pipeline.pipeline_status_code == "restart":
|
|
177
|
+
return True
|
|
178
|
+
|
|
179
|
+
local_configs = local_pipeline.worker_source_pipeline_configs
|
|
180
|
+
db_configs = db_pipeline.worker_source_pipeline_configs
|
|
181
|
+
|
|
182
|
+
# Convert config objects to comparable structures
|
|
183
|
+
local_config_values = [
|
|
184
|
+
(config.pipeline_config_id, config.is_enabled, config.value,
|
|
185
|
+
config.pipeline_config_name, config.pipeline_config_code)
|
|
186
|
+
for config in local_configs
|
|
187
|
+
]
|
|
188
|
+
|
|
189
|
+
db_config_values = [
|
|
190
|
+
(config.pipeline_config_id, config.is_enabled, config.value,
|
|
191
|
+
config.pipeline_config_name, config.pipeline_config_code)
|
|
192
|
+
for config in db_configs
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
return sorted(local_config_values) != sorted(db_config_values)
|
|
196
|
+
|
|
197
|
+
def stop(self):
|
|
198
|
+
"""Stops the synchronization thread and shuts down pipelines properly."""
|
|
199
|
+
logging.info("🛑 Stopping PipelineSyncThread...")
|
|
200
|
+
self.running = False
|
|
201
|
+
self.video_manager.stop_all()
|
|
202
|
+
self.pipeline_manager.shutdown()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from typing import Tuple
|
|
2
|
+
import cv2
|
|
3
|
+
import numpy as np
|
|
4
|
+
from ..pipeline.PipelineConfigManager import PipelineConfigManager
|
|
5
|
+
from .Preprocessor import Preprocessor
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ImageResizer(Preprocessor):
|
|
9
|
+
def __init__(self):
|
|
10
|
+
self.code = "resize"
|
|
11
|
+
self.is_enabled = False
|
|
12
|
+
self.factor = 1.0
|
|
13
|
+
|
|
14
|
+
def _get_factor(self, config: PipelineConfigManager) -> float:
|
|
15
|
+
if not self.is_enabled:
|
|
16
|
+
return 1.0
|
|
17
|
+
|
|
18
|
+
resize_factor = config.get_feature_config(self.code, "1")
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
return float(resize_factor)
|
|
22
|
+
except (ValueError, TypeError):
|
|
23
|
+
return 1.0
|
|
24
|
+
|
|
25
|
+
def update_config(self, config: PipelineConfigManager):
|
|
26
|
+
self.is_enabled = config.is_feature_enabled(self.code)
|
|
27
|
+
self.factor = self._get_factor(config)
|
|
28
|
+
|
|
29
|
+
def apply(self, image: np.ndarray) -> np.ndarray:
|
|
30
|
+
if not self.is_enabled or self.factor == 1.0:
|
|
31
|
+
return image
|
|
32
|
+
|
|
33
|
+
height, width = image.shape[:2]
|
|
34
|
+
new_height, new_width = int(height / self.factor), int(width / self.factor)
|
|
35
|
+
|
|
36
|
+
return cv2.resize(image, (new_width, new_height))
|
|
37
|
+
|
|
38
|
+
def revert_bboxes(self, bboxes: np.ndarray, dimension: Tuple[int, int]) -> np.ndarray:
|
|
39
|
+
if not self.is_enabled or self.factor == 1.0:
|
|
40
|
+
return bboxes
|
|
41
|
+
|
|
42
|
+
return bboxes * self.factor
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from typing import Tuple
|
|
2
|
+
import cv2
|
|
3
|
+
import numpy as np
|
|
4
|
+
from ..pipeline.PipelineConfigManager import PipelineConfigManager
|
|
5
|
+
from .Preprocessor import Preprocessor
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ImageRoi(Preprocessor):
|
|
9
|
+
def __init__(self):
|
|
10
|
+
self.code = "roi"
|
|
11
|
+
self.is_enabled = False
|
|
12
|
+
self.normalized_points = []
|
|
13
|
+
|
|
14
|
+
def _get_roi_points(self, config: PipelineConfigManager) -> float:
|
|
15
|
+
if not self.is_enabled:
|
|
16
|
+
return []
|
|
17
|
+
|
|
18
|
+
return config.get_feature_config(self.code, [])
|
|
19
|
+
|
|
20
|
+
def update_config(self, config: PipelineConfigManager):
|
|
21
|
+
self.is_enabled = config.is_feature_enabled(self.code)
|
|
22
|
+
self.normalized_points = self._get_roi_points(config)
|
|
23
|
+
|
|
24
|
+
def apply(self, image: np.ndarray) -> np.ndarray:
|
|
25
|
+
if not self.is_enabled or len(self.normalized_points) < 4:
|
|
26
|
+
return image
|
|
27
|
+
|
|
28
|
+
height, width = image.shape[:2]
|
|
29
|
+
points = [(int(p["x"] * width), int(p["y"] * height)) for p in self.normalized_points]
|
|
30
|
+
x_coords = [p[0] for p in points]
|
|
31
|
+
y_coords = [p[1] for p in points]
|
|
32
|
+
|
|
33
|
+
roi_x = min(x_coords)
|
|
34
|
+
roi_y = min(y_coords)
|
|
35
|
+
roi_width = max(x_coords) - roi_x + 1
|
|
36
|
+
roi_height = max(y_coords) - roi_y + 1
|
|
37
|
+
|
|
38
|
+
cropped = image[roi_y:roi_y+roi_height, roi_x:roi_x+roi_width].copy()
|
|
39
|
+
mask = np.zeros(cropped.shape[:2], dtype=np.uint8)
|
|
40
|
+
|
|
41
|
+
offset_points = [(x - roi_x, y - roi_y) for x, y in points]
|
|
42
|
+
points_np = np.array(offset_points, dtype=np.int32).reshape((-1, 1, 2))
|
|
43
|
+
|
|
44
|
+
cv2.fillPoly(mask, [points_np], 255)
|
|
45
|
+
|
|
46
|
+
return cv2.bitwise_and(cropped, cropped, mask=mask)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def revert_bboxes(self, bboxes: np.ndarray, dimension: Tuple[int, int]) -> np.ndarray:
|
|
50
|
+
if not self.is_enabled or len(self.normalized_points) < 4:
|
|
51
|
+
return bboxes
|
|
52
|
+
|
|
53
|
+
height, width = dimension
|
|
54
|
+
points = [(int(p["x"] * width), int(p["y"] * height)) for p in self.normalized_points]
|
|
55
|
+
x_coords = [p[0] for p in points]
|
|
56
|
+
y_coords = [p[1] for p in points]
|
|
57
|
+
|
|
58
|
+
roi_x = min(x_coords)
|
|
59
|
+
roi_y = min(y_coords)
|
|
60
|
+
|
|
61
|
+
return bboxes + np.array([roi_x, roi_y, roi_x, roi_y])
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Tuple
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from ..pipeline.PipelineConfigManager import PipelineConfigManager
|
|
6
|
+
|
|
7
|
+
class Preprocessor(ABC):
|
|
8
|
+
@abstractmethod
|
|
9
|
+
def update_config(self, config: PipelineConfigManager):
|
|
10
|
+
pass
|
|
11
|
+
@abstractmethod
|
|
12
|
+
def apply(self, image: np.ndarray) -> np.ndarray:
|
|
13
|
+
pass
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def revert_bboxes(self, bboxes: np.ndarray, dimension: Tuple[int, int]) -> np.ndarray:
|
|
16
|
+
pass
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|