nedo-vision-worker 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nedo_vision_worker/__init__.py +10 -0
- nedo_vision_worker/cli.py +195 -0
- nedo_vision_worker/config/ConfigurationManager.py +196 -0
- nedo_vision_worker/config/__init__.py +1 -0
- nedo_vision_worker/database/DatabaseManager.py +219 -0
- nedo_vision_worker/database/__init__.py +1 -0
- nedo_vision_worker/doctor.py +453 -0
- nedo_vision_worker/initializer/AppInitializer.py +78 -0
- nedo_vision_worker/initializer/__init__.py +1 -0
- nedo_vision_worker/models/__init__.py +15 -0
- nedo_vision_worker/models/ai_model.py +29 -0
- nedo_vision_worker/models/auth.py +14 -0
- nedo_vision_worker/models/config.py +9 -0
- nedo_vision_worker/models/dataset_source.py +30 -0
- nedo_vision_worker/models/logs.py +9 -0
- nedo_vision_worker/models/ppe_detection.py +39 -0
- nedo_vision_worker/models/ppe_detection_label.py +20 -0
- nedo_vision_worker/models/restricted_area_violation.py +20 -0
- nedo_vision_worker/models/user.py +10 -0
- nedo_vision_worker/models/worker_source.py +19 -0
- nedo_vision_worker/models/worker_source_pipeline.py +21 -0
- nedo_vision_worker/models/worker_source_pipeline_config.py +24 -0
- nedo_vision_worker/models/worker_source_pipeline_debug.py +15 -0
- nedo_vision_worker/models/worker_source_pipeline_detection.py +14 -0
- nedo_vision_worker/protos/AIModelService_pb2.py +46 -0
- nedo_vision_worker/protos/AIModelService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/DatasetSourceService_pb2.py +46 -0
- nedo_vision_worker/protos/DatasetSourceService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/HumanDetectionService_pb2.py +44 -0
- nedo_vision_worker/protos/HumanDetectionService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/PPEDetectionService_pb2.py +46 -0
- nedo_vision_worker/protos/PPEDetectionService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/VisionWorkerService_pb2.py +72 -0
- nedo_vision_worker/protos/VisionWorkerService_pb2_grpc.py +471 -0
- nedo_vision_worker/protos/WorkerSourcePipelineService_pb2.py +64 -0
- nedo_vision_worker/protos/WorkerSourcePipelineService_pb2_grpc.py +312 -0
- nedo_vision_worker/protos/WorkerSourceService_pb2.py +50 -0
- nedo_vision_worker/protos/WorkerSourceService_pb2_grpc.py +183 -0
- nedo_vision_worker/protos/__init__.py +1 -0
- nedo_vision_worker/repositories/AIModelRepository.py +44 -0
- nedo_vision_worker/repositories/DatasetSourceRepository.py +150 -0
- nedo_vision_worker/repositories/PPEDetectionRepository.py +112 -0
- nedo_vision_worker/repositories/RestrictedAreaRepository.py +88 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineDebugRepository.py +90 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineDetectionRepository.py +48 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineRepository.py +174 -0
- nedo_vision_worker/repositories/WorkerSourceRepository.py +46 -0
- nedo_vision_worker/repositories/__init__.py +1 -0
- nedo_vision_worker/services/AIModelClient.py +362 -0
- nedo_vision_worker/services/ConnectionInfoClient.py +57 -0
- nedo_vision_worker/services/DatasetSourceClient.py +88 -0
- nedo_vision_worker/services/FileToRTMPServer.py +78 -0
- nedo_vision_worker/services/GrpcClientBase.py +155 -0
- nedo_vision_worker/services/GrpcClientManager.py +141 -0
- nedo_vision_worker/services/ImageUploadClient.py +82 -0
- nedo_vision_worker/services/PPEDetectionClient.py +108 -0
- nedo_vision_worker/services/RTSPtoRTMPStreamer.py +98 -0
- nedo_vision_worker/services/RestrictedAreaClient.py +100 -0
- nedo_vision_worker/services/SystemUsageClient.py +77 -0
- nedo_vision_worker/services/VideoStreamClient.py +161 -0
- nedo_vision_worker/services/WorkerSourceClient.py +215 -0
- nedo_vision_worker/services/WorkerSourcePipelineClient.py +393 -0
- nedo_vision_worker/services/WorkerSourceUpdater.py +134 -0
- nedo_vision_worker/services/WorkerStatusClient.py +65 -0
- nedo_vision_worker/services/__init__.py +1 -0
- nedo_vision_worker/util/HardwareID.py +104 -0
- nedo_vision_worker/util/ImageUploader.py +92 -0
- nedo_vision_worker/util/Networking.py +94 -0
- nedo_vision_worker/util/PlatformDetector.py +50 -0
- nedo_vision_worker/util/SystemMonitor.py +299 -0
- nedo_vision_worker/util/VideoProbeUtil.py +120 -0
- nedo_vision_worker/util/__init__.py +1 -0
- nedo_vision_worker/worker/CoreActionWorker.py +125 -0
- nedo_vision_worker/worker/DataSenderWorker.py +168 -0
- nedo_vision_worker/worker/DataSyncWorker.py +143 -0
- nedo_vision_worker/worker/DatasetFrameSender.py +208 -0
- nedo_vision_worker/worker/DatasetFrameWorker.py +412 -0
- nedo_vision_worker/worker/PPEDetectionManager.py +86 -0
- nedo_vision_worker/worker/PipelineActionWorker.py +129 -0
- nedo_vision_worker/worker/PipelineImageWorker.py +116 -0
- nedo_vision_worker/worker/RabbitMQListener.py +170 -0
- nedo_vision_worker/worker/RestrictedAreaManager.py +85 -0
- nedo_vision_worker/worker/SystemUsageManager.py +111 -0
- nedo_vision_worker/worker/VideoStreamWorker.py +139 -0
- nedo_vision_worker/worker/WorkerManager.py +155 -0
- nedo_vision_worker/worker/__init__.py +1 -0
- nedo_vision_worker/worker_service.py +264 -0
- nedo_vision_worker-1.0.0.dist-info/METADATA +563 -0
- nedo_vision_worker-1.0.0.dist-info/RECORD +92 -0
- nedo_vision_worker-1.0.0.dist-info/WHEEL +5 -0
- nedo_vision_worker-1.0.0.dist-info/entry_points.txt +2 -0
- nedo_vision_worker-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
import ffmpeg
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from ..database.DatabaseManager import _get_storage_paths
|
|
9
|
+
from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
|
|
10
|
+
from ..repositories.WorkerSourcePipelineDetectionRepository import WorkerSourcePipelineDetectionRepository
|
|
11
|
+
from .GrpcClientBase import GrpcClientBase
|
|
12
|
+
from ..protos.WorkerSourcePipelineService_pb2_grpc import WorkerSourcePipelineServiceStub
|
|
13
|
+
from ..protos.WorkerSourcePipelineService_pb2 import GetListByWorkerIdRequest, SendPipelineImageRequest, UpdatePipelineStatusRequest, SendPipelineDebugRequest, SendPipelineDetectionDataRequest
|
|
14
|
+
from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class WorkerSourcePipelineClient(GrpcClientBase):
|
|
18
|
+
def __init__(self, server_host: str, server_port: int = 50051):
|
|
19
|
+
super().__init__(server_host, server_port)
|
|
20
|
+
self.repo = WorkerSourcePipelineRepository()
|
|
21
|
+
self.debug_repo = WorkerSourcePipelineDebugRepository()
|
|
22
|
+
self.detection_repo = WorkerSourcePipelineDetectionRepository()
|
|
23
|
+
storage_paths = _get_storage_paths()
|
|
24
|
+
self.source_file_path = storage_paths["files"] / "source_files"
|
|
25
|
+
|
|
26
|
+
# Track video playback positions and last fetch times
|
|
27
|
+
self.video_positions = {} # {video_path: current_position_in_seconds}
|
|
28
|
+
self.last_fetch_times = {} # {video_path: last_fetch_timestamp}
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
self.connect(WorkerSourcePipelineServiceStub)
|
|
32
|
+
except Exception as e:
|
|
33
|
+
logging.error(f"Failed to connect to gRPC server: {e}")
|
|
34
|
+
self.stub = None
|
|
35
|
+
|
|
36
|
+
def _detect_stream_type(self, url):
|
|
37
|
+
"""Detect whether the stream is RTSP, HLS, or video file based on the URL scheme."""
|
|
38
|
+
parsed_url = urlparse(url)
|
|
39
|
+
if parsed_url.scheme == "rtsp":
|
|
40
|
+
return "rtsp"
|
|
41
|
+
elif parsed_url.scheme in ["http", "https"] and url.endswith(".m3u8"):
|
|
42
|
+
return "hls"
|
|
43
|
+
elif url.startswith("worker-source/"):
|
|
44
|
+
file_path = self.source_file_path / os.path.basename(url)
|
|
45
|
+
if file_path.exists():
|
|
46
|
+
video_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm', '.m4v']
|
|
47
|
+
if file_path.suffix.lower() in video_extensions:
|
|
48
|
+
return "video_file"
|
|
49
|
+
return "image_file"
|
|
50
|
+
else:
|
|
51
|
+
return "unknown"
|
|
52
|
+
|
|
53
|
+
def _get_video_duration(self, file_path):
|
|
54
|
+
"""Get the duration of a video file in seconds."""
|
|
55
|
+
try:
|
|
56
|
+
file_path_str = str(file_path)
|
|
57
|
+
if not os.path.exists(file_path_str):
|
|
58
|
+
logging.error(f"Video file does not exist: {file_path_str}")
|
|
59
|
+
return None
|
|
60
|
+
import subprocess
|
|
61
|
+
import json
|
|
62
|
+
cmd = [
|
|
63
|
+
'ffprobe',
|
|
64
|
+
'-v', 'quiet',
|
|
65
|
+
'-print_format', 'json',
|
|
66
|
+
'-show_format',
|
|
67
|
+
file_path_str
|
|
68
|
+
]
|
|
69
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
|
|
70
|
+
if result.returncode != 0:
|
|
71
|
+
logging.error(f"FFprobe failed for {file_path_str}: {result.stderr}")
|
|
72
|
+
return None
|
|
73
|
+
try:
|
|
74
|
+
probe_data = json.loads(result.stdout)
|
|
75
|
+
except json.JSONDecodeError as e:
|
|
76
|
+
logging.error(f"Failed to parse ffprobe output for {file_path_str}: {e}")
|
|
77
|
+
return None
|
|
78
|
+
if 'format' not in probe_data or 'duration' not in probe_data['format']:
|
|
79
|
+
logging.error(f"No duration found in probe result for {file_path_str}")
|
|
80
|
+
return None
|
|
81
|
+
duration = probe_data['format']['duration']
|
|
82
|
+
# Defensive: ensure duration is a float or convertible to float
|
|
83
|
+
try:
|
|
84
|
+
duration_val = float(duration)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logging.error(f"Duration value not convertible to float: {duration} ({type(duration)}) - {e}", exc_info=True)
|
|
87
|
+
return None
|
|
88
|
+
if isinstance(duration_val, bool):
|
|
89
|
+
logging.error(f"Duration value is boolean, which is invalid: {duration_val}")
|
|
90
|
+
return None
|
|
91
|
+
return duration_val
|
|
92
|
+
except Exception as e:
|
|
93
|
+
logging.error(f"Error getting video duration for {file_path}: {e}", exc_info=True)
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
def _get_current_video_position(self, video_path):
|
|
97
|
+
"""Get or advance the current playback position for a video file based on real time elapsed."""
|
|
98
|
+
current_time = time.time()
|
|
99
|
+
|
|
100
|
+
if video_path not in self.video_positions:
|
|
101
|
+
self.video_positions[video_path] = 0.0
|
|
102
|
+
self.last_fetch_times[video_path] = current_time
|
|
103
|
+
return 0.0
|
|
104
|
+
|
|
105
|
+
current_pos = self.video_positions[video_path]
|
|
106
|
+
last_fetch_time = self.last_fetch_times[video_path]
|
|
107
|
+
|
|
108
|
+
# Calculate time elapsed since last fetch
|
|
109
|
+
time_elapsed = current_time - last_fetch_time
|
|
110
|
+
|
|
111
|
+
# Advance position by the actual time elapsed
|
|
112
|
+
current_pos += time_elapsed
|
|
113
|
+
|
|
114
|
+
# Get video duration to handle looping
|
|
115
|
+
duration = self._get_video_duration(video_path)
|
|
116
|
+
if duration is not None and isinstance(duration, (int, float)):
|
|
117
|
+
# Loop back to beginning if we've reached the end
|
|
118
|
+
if current_pos >= duration:
|
|
119
|
+
current_pos = 0.0
|
|
120
|
+
else:
|
|
121
|
+
# Default to 120 seconds if we can't get duration
|
|
122
|
+
if current_pos >= 120.0:
|
|
123
|
+
current_pos = 0.0
|
|
124
|
+
|
|
125
|
+
# Update the stored position and fetch time
|
|
126
|
+
self.video_positions[video_path] = current_pos
|
|
127
|
+
self.last_fetch_times[video_path] = current_time
|
|
128
|
+
|
|
129
|
+
return current_pos
|
|
130
|
+
|
|
131
|
+
def reset_video_position(self, video_path):
|
|
132
|
+
"""Reset the playback position for a specific video file."""
|
|
133
|
+
if video_path in self.video_positions:
|
|
134
|
+
self.video_positions[video_path] = 0.0
|
|
135
|
+
self.last_fetch_times[video_path] = time.time()
|
|
136
|
+
logging.info(f"Reset video position for {video_path}")
|
|
137
|
+
|
|
138
|
+
def reset_all_video_positions(self):
|
|
139
|
+
"""Reset all video playback positions."""
|
|
140
|
+
self.video_positions.clear()
|
|
141
|
+
self.last_fetch_times.clear()
|
|
142
|
+
logging.info("Reset all video positions")
|
|
143
|
+
|
|
144
|
+
def get_video_positions_status(self):
|
|
145
|
+
"""Get the current status of all video positions for debugging."""
|
|
146
|
+
status = {}
|
|
147
|
+
for video_path, position in self.video_positions.items():
|
|
148
|
+
duration = self._get_video_duration(video_path)
|
|
149
|
+
last_fetch_time = self.last_fetch_times.get(video_path, None)
|
|
150
|
+
time_since_last_fetch = time.time() - last_fetch_time if last_fetch_time else None
|
|
151
|
+
|
|
152
|
+
if duration:
|
|
153
|
+
progress = (position / duration) * 100
|
|
154
|
+
status[video_path] = {
|
|
155
|
+
"current_position": position,
|
|
156
|
+
"duration": duration,
|
|
157
|
+
"progress_percent": progress,
|
|
158
|
+
"last_fetch_time": last_fetch_time,
|
|
159
|
+
"time_since_last_fetch": time_since_last_fetch
|
|
160
|
+
}
|
|
161
|
+
else:
|
|
162
|
+
status[video_path] = {
|
|
163
|
+
"current_position": position,
|
|
164
|
+
"duration": None,
|
|
165
|
+
"progress_percent": None,
|
|
166
|
+
"last_fetch_time": last_fetch_time,
|
|
167
|
+
"time_since_last_fetch": time_since_last_fetch
|
|
168
|
+
}
|
|
169
|
+
return status
|
|
170
|
+
|
|
171
|
+
def _get_single_frame_bytes(self, url):
|
|
172
|
+
"""Get a single frame from RTSP, HLS, or video file as JPEG bytes."""
|
|
173
|
+
stream_type = self._detect_stream_type(url)
|
|
174
|
+
|
|
175
|
+
if stream_type == "rtsp":
|
|
176
|
+
ffmpeg_input = (
|
|
177
|
+
ffmpeg
|
|
178
|
+
.input(url, rtsp_transport="tcp", fflags="nobuffer", timeout="5000000")
|
|
179
|
+
)
|
|
180
|
+
elif stream_type == "hls":
|
|
181
|
+
ffmpeg_input = (
|
|
182
|
+
ffmpeg
|
|
183
|
+
.input(url, format="hls", analyzeduration="10000000", probesize="10000000")
|
|
184
|
+
)
|
|
185
|
+
elif stream_type == "video_file":
|
|
186
|
+
file_path = self.source_file_path / os.path.basename(url)
|
|
187
|
+
file_path_str = str(file_path)
|
|
188
|
+
|
|
189
|
+
# Check if file exists
|
|
190
|
+
if not os.path.exists(file_path_str):
|
|
191
|
+
logging.error(f"Video file does not exist: {file_path_str}")
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
current_position = self._get_current_video_position(file_path_str)
|
|
195
|
+
|
|
196
|
+
logging.info(f"🎬 [APP] Capturing video frame at position {current_position:.2f}s from {file_path_str}")
|
|
197
|
+
|
|
198
|
+
ffmpeg_input = (
|
|
199
|
+
ffmpeg
|
|
200
|
+
.input(file_path_str, ss=current_position)
|
|
201
|
+
)
|
|
202
|
+
elif stream_type == "image_file":
|
|
203
|
+
file_path = self.source_file_path / os.path.basename(url)
|
|
204
|
+
|
|
205
|
+
logging.info(f"🖼️ [APP] Capturing image frame from {file_path}")
|
|
206
|
+
|
|
207
|
+
ffmpeg_input = (
|
|
208
|
+
ffmpeg
|
|
209
|
+
.input(str(file_path))
|
|
210
|
+
)
|
|
211
|
+
else:
|
|
212
|
+
logging.error(f"Unsupported stream type: {url}")
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
if stream_type == "video_file":
|
|
216
|
+
process = (
|
|
217
|
+
ffmpeg_input
|
|
218
|
+
.output('pipe:', format='mjpeg', vframes=1, q=2)
|
|
219
|
+
.overwrite_output()
|
|
220
|
+
.run_async(pipe_stdout=True, pipe_stderr=True)
|
|
221
|
+
)
|
|
222
|
+
else:
|
|
223
|
+
process = (
|
|
224
|
+
ffmpeg_input
|
|
225
|
+
.output('pipe:', format='mjpeg', vframes=1, q=2)
|
|
226
|
+
.overwrite_output()
|
|
227
|
+
.run_async(pipe_stdout=True, pipe_stderr=True)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
stdout, stderr = process.communicate(timeout=15)
|
|
232
|
+
|
|
233
|
+
if process.returncode != 0:
|
|
234
|
+
error_msg = stderr.decode('utf-8', errors='ignore')
|
|
235
|
+
logging.error(f"FFmpeg error: {error_msg}")
|
|
236
|
+
return None
|
|
237
|
+
|
|
238
|
+
if not stdout:
|
|
239
|
+
logging.error("No data received from FFmpeg")
|
|
240
|
+
return None
|
|
241
|
+
|
|
242
|
+
return stdout
|
|
243
|
+
|
|
244
|
+
except Exception as e:
|
|
245
|
+
logging.error(f"Error capturing frame: {e}", exc_info=True)
|
|
246
|
+
return None
|
|
247
|
+
|
|
248
|
+
finally:
|
|
249
|
+
process.terminate()
|
|
250
|
+
process.wait()
|
|
251
|
+
|
|
252
|
+
def update_pipeline_status(self, pipeline_id: str, status_code: str, token: str):
|
|
253
|
+
if not self.stub:
|
|
254
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
timestamp = int(time.time() * 1000)
|
|
258
|
+
|
|
259
|
+
request = UpdatePipelineStatusRequest(
|
|
260
|
+
pipeline_id=pipeline_id,
|
|
261
|
+
status_code=status_code,
|
|
262
|
+
timestamp=timestamp,
|
|
263
|
+
token=token
|
|
264
|
+
)
|
|
265
|
+
response = self.handle_rpc(self.stub.UpdateStatus, request)
|
|
266
|
+
|
|
267
|
+
if response and response.success:
|
|
268
|
+
return {"success": True, "message": response.message}
|
|
269
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
270
|
+
|
|
271
|
+
except Exception as e:
|
|
272
|
+
logging.error(f"Error updating pipeline status: {e}")
|
|
273
|
+
return {"success": False, "message": f"Error occurred: {e}"}
|
|
274
|
+
|
|
275
|
+
def get_worker_source_pipeline_list(self, worker_id: str, token: str) -> dict:
|
|
276
|
+
if not self.stub:
|
|
277
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
request = GetListByWorkerIdRequest(worker_id=worker_id, token=token)
|
|
281
|
+
response = self.handle_rpc(self.stub.GetListByWorkerId, request)
|
|
282
|
+
|
|
283
|
+
if response and response.success:
|
|
284
|
+
# Create a wrapper function that captures the token
|
|
285
|
+
def update_status_callback(pipeline_id: str, status_code: str):
|
|
286
|
+
return self.update_pipeline_status(pipeline_id, status_code, token)
|
|
287
|
+
|
|
288
|
+
self.repo.sync_worker_source_pipelines(response, update_status_callback) # Sync includes delete, update, insert
|
|
289
|
+
return {"success": True, "message": response.message, "data": response.data}
|
|
290
|
+
|
|
291
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
292
|
+
|
|
293
|
+
except Exception as e:
|
|
294
|
+
logging.error(f"Error fetching worker source pipeline list: {e}")
|
|
295
|
+
return {"success": False, "message": f"Error occurred: {e}"}
|
|
296
|
+
|
|
297
|
+
def send_pipeline_image(self, worker_source_pipeline_id: str, uuid: str, url: str, token: str):
|
|
298
|
+
if not self.stub:
|
|
299
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
frame_bytes = self._get_single_frame_bytes(url)
|
|
303
|
+
|
|
304
|
+
if not frame_bytes:
|
|
305
|
+
return {"success": False, "message": "Failed to retrieve frame from source"}
|
|
306
|
+
|
|
307
|
+
request = SendPipelineImageRequest(
|
|
308
|
+
worker_source_pipeline_id=worker_source_pipeline_id,
|
|
309
|
+
uuid=uuid,
|
|
310
|
+
image=frame_bytes,
|
|
311
|
+
token=token
|
|
312
|
+
)
|
|
313
|
+
response = self.handle_rpc(self.stub.SendPipelineImage, request)
|
|
314
|
+
|
|
315
|
+
if response and response.success:
|
|
316
|
+
return {"success": True, "message": response.message}
|
|
317
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
318
|
+
|
|
319
|
+
except Exception as e:
|
|
320
|
+
logging.error(f"Error sending pipeline image: {e}")
|
|
321
|
+
return {"success": False, "message": f"Error occurred: {e}"}
|
|
322
|
+
|
|
323
|
+
@staticmethod
|
|
324
|
+
def read_image_as_binary(image_path: str) -> bytes:
|
|
325
|
+
"""
|
|
326
|
+
Reads an image file and returns its binary content.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
image_path (str): Path to the image file.
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
bytes: Binary content of the image.
|
|
333
|
+
"""
|
|
334
|
+
with open(image_path, 'rb') as image_file:
|
|
335
|
+
return image_file.read()
|
|
336
|
+
|
|
337
|
+
def sync_pipeline_debug(self, token: str):
|
|
338
|
+
if not self.stub:
|
|
339
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
340
|
+
|
|
341
|
+
try:
|
|
342
|
+
debug_entries = self.debug_repo.get_debug_entries_with_data()
|
|
343
|
+
|
|
344
|
+
for debug_entry in debug_entries:
|
|
345
|
+
image_binary = self.read_image_as_binary(debug_entry.image_path)
|
|
346
|
+
|
|
347
|
+
request = SendPipelineDebugRequest(
|
|
348
|
+
worker_source_pipeline_id=debug_entry.worker_source_pipeline_id,
|
|
349
|
+
uuid=debug_entry.uuid,
|
|
350
|
+
data=debug_entry.data,
|
|
351
|
+
image=image_binary,
|
|
352
|
+
token=token
|
|
353
|
+
)
|
|
354
|
+
response = self.handle_rpc(self.stub.SendPipelineDebug, request)
|
|
355
|
+
|
|
356
|
+
if response and response.success:
|
|
357
|
+
self.debug_repo.delete_entry_by_id(debug_entry.id)
|
|
358
|
+
else:
|
|
359
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
360
|
+
|
|
361
|
+
return {"success": True, "message": "Successfully synced debug entries"}
|
|
362
|
+
|
|
363
|
+
except Exception as e:
|
|
364
|
+
logging.error(f"Error syncing pipeline debug: {e}")
|
|
365
|
+
|
|
366
|
+
def sync_pipeline_detection(self, token: str):
|
|
367
|
+
if not self.stub:
|
|
368
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
369
|
+
|
|
370
|
+
try:
|
|
371
|
+
entries = self.detection_repo.get_entries()
|
|
372
|
+
|
|
373
|
+
for entry in entries:
|
|
374
|
+
image_binary = self.read_image_as_binary(entry.image_path)
|
|
375
|
+
|
|
376
|
+
request = SendPipelineDetectionDataRequest(
|
|
377
|
+
worker_source_pipeline_id=entry.worker_source_pipeline_id,
|
|
378
|
+
data=entry.data,
|
|
379
|
+
image=image_binary,
|
|
380
|
+
timestamp=int(entry.created_at.timestamp() * 1000),
|
|
381
|
+
token=token
|
|
382
|
+
)
|
|
383
|
+
response = self.handle_rpc(self.stub.SendPipelineDetectionData, request)
|
|
384
|
+
|
|
385
|
+
if response and response.success:
|
|
386
|
+
self.detection_repo.delete_entry_by_id(entry.id)
|
|
387
|
+
else:
|
|
388
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
389
|
+
|
|
390
|
+
return {"success": True, "message": "Successfully synced debug entries"}
|
|
391
|
+
|
|
392
|
+
except Exception as e:
|
|
393
|
+
logging.error(f"Error syncing pipeline debug: {e}")
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import threading
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
import os
|
|
5
|
+
from ..database.DatabaseManager import _get_storage_paths
|
|
6
|
+
from ..repositories.WorkerSourceRepository import WorkerSourceRepository
|
|
7
|
+
from .WorkerSourceClient import WorkerSourceClient
|
|
8
|
+
from .GrpcClientManager import GrpcClientManager
|
|
9
|
+
from ..util.VideoProbeUtil import VideoProbeUtil # Helper to extract video metadata
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class WorkerSourceUpdater:
|
|
14
|
+
"""Handles synchronization and updates of worker sources via gRPC and local database.
|
|
15
|
+
|
|
16
|
+
This class is thread-safe and can be used concurrently from multiple threads.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, server_host: str, token: str):
|
|
20
|
+
storage_paths = _get_storage_paths()
|
|
21
|
+
self.source_file_path = storage_paths["files"] / "source_files"
|
|
22
|
+
# Use shared client instead of creating new instance
|
|
23
|
+
self.client = GrpcClientManager.get_shared_client(WorkerSourceClient)
|
|
24
|
+
self.repo = WorkerSourceRepository()
|
|
25
|
+
self.token = token
|
|
26
|
+
# Thread safety lock for critical operations
|
|
27
|
+
self._lock = threading.RLock()
|
|
28
|
+
|
|
29
|
+
def _get_source_metadata(self, source):
|
|
30
|
+
"""Get metadata for a worker source."""
|
|
31
|
+
url = source.url if source.type_code == "live" else source.file_path
|
|
32
|
+
if not url:
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
if source.type_code == "file":
|
|
36
|
+
url = self.source_file_path / os.path.basename(url)
|
|
37
|
+
|
|
38
|
+
return VideoProbeUtil.get_video_metadata(url)
|
|
39
|
+
|
|
40
|
+
def update_worker_sources(self):
|
|
41
|
+
"""Fetch local worker sources, probe video URLs, and update if different from the local DB.
|
|
42
|
+
|
|
43
|
+
This method is thread-safe and can be called concurrently from multiple threads.
|
|
44
|
+
"""
|
|
45
|
+
with self._lock:
|
|
46
|
+
try:
|
|
47
|
+
worker_sources = self.repo.get_all_worker_sources()
|
|
48
|
+
updated_records = []
|
|
49
|
+
|
|
50
|
+
for source in worker_sources:
|
|
51
|
+
metadata = self._get_source_metadata(source)
|
|
52
|
+
if not metadata:
|
|
53
|
+
logger.warning(f"⚠️ [APP] Failed to probe video for Worker Source ID {source.id}")
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
# Extract details
|
|
57
|
+
resolution = metadata.get("resolution")
|
|
58
|
+
frame_rate = round(metadata.get("frame_rate"), 0) if metadata.get("frame_rate") else None
|
|
59
|
+
status_code = "connected" if resolution else "disconnected"
|
|
60
|
+
# .NET Compatible time
|
|
61
|
+
worker_timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
|
|
62
|
+
|
|
63
|
+
# Compare with local database values
|
|
64
|
+
if (
|
|
65
|
+
source.resolution != resolution or
|
|
66
|
+
source.frame_rate != frame_rate or
|
|
67
|
+
source.status_code != status_code
|
|
68
|
+
):
|
|
69
|
+
logger.info(f"🔄 [APP] Detected changes in Worker Source ID {source.id}, updating...")
|
|
70
|
+
|
|
71
|
+
# Update local database
|
|
72
|
+
source.resolution = resolution
|
|
73
|
+
source.frame_rate = frame_rate
|
|
74
|
+
source.status_code = status_code
|
|
75
|
+
updated_records.append(source)
|
|
76
|
+
|
|
77
|
+
# Send gRPC update request (client is thread-safe)
|
|
78
|
+
response = self.client.update_worker_source(
|
|
79
|
+
worker_source_id=source.id,
|
|
80
|
+
resolution=resolution,
|
|
81
|
+
status_code=status_code,
|
|
82
|
+
frame_rate=frame_rate,
|
|
83
|
+
worker_timestamp=worker_timestamp,
|
|
84
|
+
token=self.token,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
if response.get("success"):
|
|
88
|
+
logger.info(f"✅ [APP] Updated Worker Source ID {source.id} - {response.get('message')}")
|
|
89
|
+
else:
|
|
90
|
+
logger.error(f"🚨 [APP] Failed to update Worker Source ID {source.id}: {response.get('message')}")
|
|
91
|
+
|
|
92
|
+
# Batch update local database
|
|
93
|
+
if updated_records:
|
|
94
|
+
self.repo.bulk_update_worker_sources(updated_records)
|
|
95
|
+
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.error(f"🚨 [APP] Unexpected error while updating worker sources: {e}", exc_info=True)
|
|
98
|
+
|
|
99
|
+
def stop_worker_sources(self):
|
|
100
|
+
"""Stop all worker sources.
|
|
101
|
+
|
|
102
|
+
This method is thread-safe and can be called concurrently from multiple threads.
|
|
103
|
+
"""
|
|
104
|
+
with self._lock:
|
|
105
|
+
try:
|
|
106
|
+
worker_sources = self.repo.get_all_worker_sources()
|
|
107
|
+
updated_records = []
|
|
108
|
+
|
|
109
|
+
for source in worker_sources:
|
|
110
|
+
source.status_code = "disconnected"
|
|
111
|
+
worker_timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
|
|
112
|
+
|
|
113
|
+
updated_records.append(source)
|
|
114
|
+
|
|
115
|
+
# Send gRPC update request (client is thread-safe)
|
|
116
|
+
response = self.client.update_worker_source(
|
|
117
|
+
worker_source_id=source.id,
|
|
118
|
+
resolution=source.resolution,
|
|
119
|
+
status_code=source.status_code,
|
|
120
|
+
frame_rate=source.frame_rate,
|
|
121
|
+
worker_timestamp=worker_timestamp,
|
|
122
|
+
token=self.token,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
if response.get("success"):
|
|
126
|
+
logger.info(f"✅ [APP] Updated Worker Source ID {source.id} - {response.get('message')}")
|
|
127
|
+
else:
|
|
128
|
+
logger.error(f"🚨 [APP] Failed to update Worker Source ID {source.id}: {response.get('message')}")
|
|
129
|
+
|
|
130
|
+
# Batch update local database
|
|
131
|
+
if updated_records:
|
|
132
|
+
self.repo.bulk_update_worker_sources(updated_records)
|
|
133
|
+
except Exception as e:
|
|
134
|
+
logger.error(f"🚨 [APP] Unexpected error while stopping worker sources: {e}", exc_info=True)
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import grpc
|
|
3
|
+
from ..protos.VisionWorkerService_pb2 import UpdateWorkerStatusRequest
|
|
4
|
+
from ..protos.VisionWorkerService_pb2_grpc import VisionWorkerServiceStub
|
|
5
|
+
from .GrpcClientBase import GrpcClientBase
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
class WorkerStatusClient(GrpcClientBase):
|
|
11
|
+
def __init__(self, server_host: str, server_port: int = 50051):
|
|
12
|
+
"""
|
|
13
|
+
Initialize the WorkerStatusClient for updating worker status.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
server_host (str): The gRPC server host.
|
|
17
|
+
server_port (int): The gRPC server port.
|
|
18
|
+
"""
|
|
19
|
+
super().__init__(server_host, server_port)
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
self.connect(VisionWorkerServiceStub)
|
|
23
|
+
except Exception as e:
|
|
24
|
+
logging.error(f"Failed to connect to gRPC server: {e}")
|
|
25
|
+
self.stub = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def update_worker_status(self, worker_id: str, status_code: str, token: str) -> dict:
|
|
29
|
+
"""
|
|
30
|
+
Update the status of a worker on the server using token authentication.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
worker_id (str): The ID of the worker.
|
|
34
|
+
status_code (str): The status code to report (e.g., "RUNNING", "STOPPED").
|
|
35
|
+
token (str): Authentication token for the worker.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
dict: Result of the status update operation.
|
|
39
|
+
"""
|
|
40
|
+
if not self.stub:
|
|
41
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
timestamp = int(time.time() * 1000)
|
|
45
|
+
request = UpdateWorkerStatusRequest(
|
|
46
|
+
worker_id=worker_id,
|
|
47
|
+
status_code=status_code,
|
|
48
|
+
timestamp=timestamp,
|
|
49
|
+
token=token
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
response = self.handle_rpc(self.stub.UpdateStatus, request)
|
|
53
|
+
|
|
54
|
+
if response and response.success:
|
|
55
|
+
return {"success": True, "message": response.message}
|
|
56
|
+
|
|
57
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
58
|
+
|
|
59
|
+
except grpc.RpcError as e:
|
|
60
|
+
logger.error(f"gRPC error while updating status for {worker_id}: {str(e)}")
|
|
61
|
+
return {"success": False, "message": f"RPC error: {str(e)}"}
|
|
62
|
+
|
|
63
|
+
except Exception as e:
|
|
64
|
+
logger.error(f"Unexpected error while updating status for {worker_id}: {str(e)}")
|
|
65
|
+
return {"success": False, "message": f"An unexpected error occurred: {str(e)}"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|