nedo-vision-worker 1.2.1__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,5 +6,5 @@ A library for running worker agents in the Nedo Vision platform.
6
6
 
7
7
  from .worker_service import WorkerService
8
8
 
9
- __version__ = "1.2.1"
9
+ __version__ = "1.2.2"
10
10
  __all__ = ["WorkerService"]
@@ -87,8 +87,8 @@ class WorkerServiceDoctor:
87
87
  def check_python_environment(self) -> None:
88
88
  """Comprehensive Python environment validation."""
89
89
  version = sys.version_info
90
- min_version = (3, 8)
91
- recommended_version = (3, 9)
90
+ min_version = (3, 10)
91
+ recommended_version = (3, 10)
92
92
 
93
93
  details = [
94
94
  f"Python {version.major}.{version.minor}.{version.micro}",
@@ -891,6 +891,36 @@ class WorkerServiceDoctor:
891
891
  performance_impact=performance_impact
892
892
  ))
893
893
 
894
+ def check_psutil_installation(self) -> None:
895
+ """Check if psutil is installed for system monitoring."""
896
+ details = []
897
+ recommendations = []
898
+ try:
899
+ import psutil
900
+ version = getattr(psutil, '__version__', 'N/A')
901
+ details.append(f"psutil version: {version}")
902
+ status = HealthStatus.GOOD
903
+ message = "psutil is installed"
904
+ is_blocking = False
905
+ performance_impact = "None"
906
+ except ImportError:
907
+ status = HealthStatus.WARNING
908
+ message = "psutil not installed"
909
+ details.append("System resource monitoring will be disabled.")
910
+ recommendations.append("Install psutil for system monitoring: pip install psutil")
911
+ is_blocking = False
912
+ performance_impact = "Low"
913
+
914
+ self._add_result(HealthCheck(
915
+ name="System Monitoring (psutil)",
916
+ status=status,
917
+ message=message,
918
+ details=details,
919
+ recommendations=recommendations if recommendations else None,
920
+ is_blocking=is_blocking,
921
+ performance_impact=performance_impact
922
+ ))
923
+
894
924
  def run_comprehensive_health_check(self) -> List[HealthCheck]:
895
925
  """Execute all health checks with progress indication."""
896
926
  print("🏥 Nedo Vision Worker Service - Comprehensive Health Check")
@@ -17,6 +17,15 @@ class WorkerSourceRepository:
17
17
  except Exception as e:
18
18
  logger.error(f"🚨 [APP] Database error while fetching worker sources: {e}", exc_info=True)
19
19
  return []
20
+
21
+
22
+ def get_worker_sources_by_worker_id(self, worker_id: str):
23
+ """Retrieve all worker sources from the database."""
24
+ try:
25
+ return self.session.query(WorkerSourceEntity).filter_by(worker_id=worker_id).all()
26
+ except Exception as e:
27
+ logger.error(f"🚨 [APP] Database error while fetching worker sources: {e}", exc_info=True)
28
+ return []
20
29
 
21
30
  def bulk_update_worker_sources(self, updated_records):
22
31
  """Batch update worker sources in the database."""
@@ -43,4 +52,4 @@ class WorkerSourceRepository:
43
52
  return None
44
53
  except Exception as e:
45
54
  logger.error(f"🚨 [APP] Database error while fetching worker source by ID {worker_source_id}: {e}", exc_info=True)
46
- return None
55
+ return None
@@ -68,7 +68,7 @@ class GrpcClientBase:
68
68
  self.connected = False
69
69
  error_msg = str(e)
70
70
 
71
- logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}")
71
+ logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}", exc_info=True)
72
72
 
73
73
  if attempts < self.max_retries:
74
74
  sleep_time = retry_interval * (2 ** (attempts - 1))
@@ -1,18 +1,28 @@
1
1
  import logging
2
2
  import os
3
3
  import time
4
- import ffmpeg
4
+ import subprocess
5
5
  from urllib.parse import urlparse
6
6
  from pathlib import Path
7
7
 
8
8
  from ..database.DatabaseManager import _get_storage_paths
9
9
  from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
10
10
  from ..repositories.WorkerSourcePipelineDetectionRepository import WorkerSourcePipelineDetectionRepository
11
- from ..util.FFmpegUtil import get_rtsp_ffmpeg_options, get_stream_timeout_duration
11
+ from ..util.FFmpegUtil import (
12
+ get_rtsp_ffmpeg_options,
13
+ get_stream_timeout_duration,
14
+ get_ffmpeg_version,
15
+ )
12
16
  from .GrpcClientBase import GrpcClientBase
13
17
  from .SharedDirectDeviceClient import SharedDirectDeviceClient
14
18
  from ..protos.WorkerSourcePipelineService_pb2_grpc import WorkerSourcePipelineServiceStub
15
- from ..protos.WorkerSourcePipelineService_pb2 import GetListByWorkerIdRequest, SendPipelineImageRequest, UpdatePipelineStatusRequest, SendPipelineDebugRequest, SendPipelineDetectionDataRequest
19
+ from ..protos.WorkerSourcePipelineService_pb2 import (
20
+ GetListByWorkerIdRequest,
21
+ SendPipelineImageRequest,
22
+ UpdatePipelineStatusRequest,
23
+ SendPipelineDebugRequest,
24
+ SendPipelineDetectionDataRequest,
25
+ )
16
26
  from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
17
27
 
18
28
 
@@ -23,12 +33,11 @@ class WorkerSourcePipelineClient(GrpcClientBase):
23
33
  self.debug_repo = WorkerSourcePipelineDebugRepository()
24
34
  self.detection_repo = WorkerSourcePipelineDetectionRepository()
25
35
  storage_paths = _get_storage_paths()
26
- self.source_file_path = storage_paths["files"] / "source_files"
36
+ self.source_file_path: Path = storage_paths["files"] / "source_files"
27
37
  self.shared_device_client = SharedDirectDeviceClient()
28
-
29
- # Track video playback positions and last fetch times
30
- self.video_positions = {} # {video_path: current_position_in_seconds}
31
- self.last_fetch_times = {} # {video_path: last_fetch_timestamp}
38
+
39
+ self.video_positions = {}
40
+ self.last_fetch_times = {}
32
41
 
33
42
  try:
34
43
  self.connect(WorkerSourcePipelineServiceStub)
@@ -36,132 +45,127 @@ class WorkerSourcePipelineClient(GrpcClientBase):
36
45
  logging.error(f"Failed to connect to gRPC server: {e}")
37
46
  self.stub = None
38
47
 
48
+
49
+ # ---------- small helpers ----------
50
+
51
+ @staticmethod
52
+ def _opts_dict_to_cli(opts: dict) -> list:
53
+ out = []
54
+ for k, v in opts.items():
55
+ out += [f"-{k}", str(v)]
56
+ return out
57
+
58
+ @staticmethod
59
+ def _strip_timeout_keys(d: dict) -> dict:
60
+ o = dict(d)
61
+ o.pop("rw_timeout", None)
62
+ o.pop("stimeout", None)
63
+ o.pop("timeout", None)
64
+ return o
65
+
66
+ @staticmethod
67
+ def _rtsp_timeout_flag_by_version() -> str:
68
+ major, minor, patch = get_ffmpeg_version()
69
+ return "-timeout" if major >= 5 else "-stimeout"
70
+
71
+
72
+ # ---------- stream detection & video position ----------
73
+
39
74
  def _detect_stream_type(self, url):
40
75
  if isinstance(url, str) and url.isdigit():
41
76
  return "direct"
42
-
77
+
43
78
  parsed_url = urlparse(url)
44
79
  if parsed_url.scheme == "rtsp":
45
80
  return "rtsp"
46
- elif parsed_url.scheme in ["http", "https"] and url.endswith(".m3u8"):
81
+ if parsed_url.scheme in ["http", "https"] and url.endswith(".m3u8"):
47
82
  return "hls"
48
- elif url.startswith("worker-source/"):
83
+ if url.startswith("worker-source/"):
49
84
  file_path = self.source_file_path / os.path.basename(url)
50
85
  if file_path.exists():
51
- video_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm', '.m4v']
52
- if file_path.suffix.lower() in video_extensions:
86
+ if file_path.suffix.lower() in (".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v"):
53
87
  return "video_file"
54
88
  return "image_file"
55
- else:
56
- return "unknown"
57
-
89
+ return "unknown"
90
+
58
91
  def _get_video_duration(self, file_path):
59
- """Get the duration of a video file in seconds."""
60
92
  try:
61
93
  file_path_str = str(file_path)
62
94
  if not os.path.exists(file_path_str):
63
95
  logging.error(f"Video file does not exist: {file_path_str}")
64
96
  return None
65
- import subprocess
97
+
66
98
  import json
67
- cmd = [
68
- 'ffprobe',
69
- '-v', 'quiet',
70
- '-print_format', 'json',
71
- '-show_format',
72
- file_path_str
73
- ]
99
+ cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_format", file_path_str]
74
100
  result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
75
101
  if result.returncode != 0:
76
102
  logging.error(f"FFprobe failed for {file_path_str}: {result.stderr}")
77
103
  return None
104
+
78
105
  try:
79
106
  probe_data = json.loads(result.stdout)
80
107
  except json.JSONDecodeError as e:
81
108
  logging.error(f"Failed to parse ffprobe output for {file_path_str}: {e}")
82
109
  return None
83
- if 'format' not in probe_data or 'duration' not in probe_data['format']:
110
+
111
+ if "format" not in probe_data or "duration" not in probe_data["format"]:
84
112
  logging.error(f"No duration found in probe result for {file_path_str}")
85
113
  return None
86
- duration = probe_data['format']['duration']
87
- # Defensive: ensure duration is a float or convertible to float
114
+
88
115
  try:
89
- duration_val = float(duration)
116
+ duration_val = float(probe_data["format"]["duration"])
90
117
  except Exception as e:
91
- logging.error(f"Duration value not convertible to float: {duration} ({type(duration)}) - {e}", exc_info=True)
118
+ logging.error(f"Duration value not convertible to float: {e}", exc_info=True)
92
119
  return None
120
+
93
121
  if isinstance(duration_val, bool):
94
- logging.error(f"Duration value is boolean, which is invalid: {duration_val}")
122
+ logging.error("Duration value is boolean, which is invalid")
95
123
  return None
124
+
96
125
  return duration_val
126
+
97
127
  except Exception as e:
98
128
  logging.error(f"Error getting video duration for {file_path}: {e}", exc_info=True)
99
129
  return None
100
-
130
+
101
131
  def _get_current_video_position(self, video_path):
102
- """Get or advance the current playback position for a video file based on real time elapsed."""
103
- current_time = time.time()
104
-
132
+ now = time.time()
133
+
105
134
  if video_path not in self.video_positions:
106
135
  self.video_positions[video_path] = 0.0
107
- self.last_fetch_times[video_path] = current_time
136
+ self.last_fetch_times[video_path] = now
108
137
  return 0.0
109
-
138
+
110
139
  current_pos = self.video_positions[video_path]
111
140
  last_fetch_time = self.last_fetch_times[video_path]
112
-
113
- # Calculate time elapsed since last fetch
114
- time_elapsed = current_time - last_fetch_time
115
-
116
- # Advance position by the actual time elapsed
117
- current_pos += time_elapsed
118
-
119
- # Get video duration to handle looping
141
+ current_pos += (now - last_fetch_time)
142
+
120
143
  duration = self._get_video_duration(video_path)
121
144
  if duration is not None and isinstance(duration, (int, float)):
122
- # Loop back to beginning if we've reached the end
123
145
  if current_pos >= duration:
124
146
  current_pos = 0.0
125
147
  else:
126
- # Default to 120 seconds if we can't get duration
127
148
  if current_pos >= 120.0:
128
149
  current_pos = 0.0
129
-
130
- # Update the stored position and fetch time
150
+
131
151
  self.video_positions[video_path] = current_pos
132
- self.last_fetch_times[video_path] = current_time
133
-
152
+ self.last_fetch_times[video_path] = now
134
153
  return current_pos
135
-
136
- def reset_video_position(self, video_path):
137
- """Reset the playback position for a specific video file."""
138
- if video_path in self.video_positions:
139
- self.video_positions[video_path] = 0.0
140
- self.last_fetch_times[video_path] = time.time()
141
- logging.info(f"Reset video position for {video_path}")
142
-
143
- def reset_all_video_positions(self):
144
- """Reset all video playback positions."""
145
- self.video_positions.clear()
146
- self.last_fetch_times.clear()
147
- logging.info("Reset all video positions")
148
-
154
+
149
155
  def get_video_positions_status(self):
150
- """Get the current status of all video positions for debugging."""
151
156
  status = {}
152
157
  for video_path, position in self.video_positions.items():
153
158
  duration = self._get_video_duration(video_path)
154
159
  last_fetch_time = self.last_fetch_times.get(video_path, None)
155
160
  time_since_last_fetch = time.time() - last_fetch_time if last_fetch_time else None
156
-
161
+
157
162
  if duration:
158
- progress = (position / duration) * 100
159
163
  status[video_path] = {
160
164
  "current_position": position,
161
165
  "duration": duration,
162
- "progress_percent": progress,
166
+ "progress_percent": (position / duration) * 100,
163
167
  "last_fetch_time": last_fetch_time,
164
- "time_since_last_fetch": time_since_last_fetch
168
+ "time_since_last_fetch": time_since_last_fetch,
165
169
  }
166
170
  else:
167
171
  status[video_path] = {
@@ -169,108 +173,126 @@ class WorkerSourcePipelineClient(GrpcClientBase):
169
173
  "duration": None,
170
174
  "progress_percent": None,
171
175
  "last_fetch_time": last_fetch_time,
172
- "time_since_last_fetch": time_since_last_fetch
176
+ "time_since_last_fetch": time_since_last_fetch,
173
177
  }
174
178
  return status
175
-
179
+
180
+
181
+ # ---------- ffmpeg cmd builders ----------
182
+
183
+ def _build_ffmpeg_cmd_rtsp(self, url: str) -> list:
184
+ base_opts = self._strip_timeout_keys(get_rtsp_ffmpeg_options())
185
+ timeout_flag = self._rtsp_timeout_flag_by_version()
186
+ in_args = self._opts_dict_to_cli(base_opts) + ["-rtsp_transport", "tcp", timeout_flag, "5000000", "-i", url]
187
+ return ["ffmpeg", "-hide_banner", "-loglevel", "error"] + in_args + [
188
+ "-vframes", "1", "-q:v", "2", "-f", "mjpeg", "pipe:1"
189
+ ]
190
+
191
+ def _build_ffmpeg_cmd_hls(self, url: str) -> list:
192
+ in_args = ["-f", "hls", "-analyzeduration", "10000000", "-probesize", "10000000", "-i", url]
193
+ return ["ffmpeg", "-hide_banner", "-loglevel", "error"] + in_args + [
194
+ "-vframes", "1", "-q:v", "2", "-f", "mjpeg", "pipe:1"
195
+ ]
196
+
197
+ def _build_ffmpeg_cmd_video_file(self, file_path: str, pos: float) -> list:
198
+ in_args = ["-ss", f"{pos:.3f}", "-i", file_path]
199
+ return ["ffmpeg", "-hide_banner", "-loglevel", "error"] + in_args + [
200
+ "-vframes", "1", "-q:v", "2", "-f", "mjpeg", "pipe:1"
201
+ ]
202
+
203
+ def _build_ffmpeg_cmd_image_file(self, file_path: str) -> list:
204
+ in_args = ["-i", file_path]
205
+ return ["ffmpeg", "-hide_banner", "-loglevel", "error"] + in_args + [
206
+ "-vframes", "1", "-q:v", "2", "-f", "mjpeg", "pipe:1"
207
+ ]
208
+
209
+
210
+ # ---------- frame capture ----------
211
+
176
212
  def _get_single_frame_bytes(self, url):
177
213
  stream_type = self._detect_stream_type(url)
178
-
179
- if stream_type == "direct":
180
- device_index = int(url)
181
- logging.info(f"📹 [APP] Capturing frame from direct video device: {device_index}")
182
-
183
- # Use the shared device client for direct devices
184
- try:
185
- # Get device properties first
214
+ proc = None
215
+
216
+ try:
217
+ if stream_type == "direct":
218
+ device_index = int(url)
219
+ logging.info(f"📹 [APP] Capturing frame from direct device: {device_index}")
220
+
186
221
  width, height, fps, pixel_format = self.shared_device_client.get_video_properties(url)
187
222
  if not width or not height:
188
223
  logging.error(f"Failed to get properties for device {device_index}")
189
224
  return None
190
-
191
- # Create ffmpeg input using shared device client
192
- ffmpeg_input = self.shared_device_client.create_ffmpeg_input(url, width, height, fps)
193
-
194
- except Exception as e:
195
- logging.error(f"Error setting up direct device {device_index}: {e}")
225
+
226
+ cmd = self.shared_device_client.create_ffmpeg_cli(url, width, height, fps)
227
+ cmd += ["-vframes", "1", "-q:v", "2", "-f", "mjpeg", "pipe:1"]
228
+
229
+ elif stream_type == "rtsp":
230
+ cmd = self._build_ffmpeg_cmd_rtsp(url)
231
+
232
+ elif stream_type == "hls":
233
+ cmd = self._build_ffmpeg_cmd_hls(url)
234
+
235
+ elif stream_type == "video_file":
236
+ file_path = self.source_file_path / os.path.basename(url)
237
+ if not file_path.exists():
238
+ logging.error(f"Video file does not exist: {file_path}")
239
+ return None
240
+ pos = self._get_current_video_position(str(file_path))
241
+ logging.info(f"🎬 [APP] Capturing video frame at {pos:.2f}s from {file_path}")
242
+ cmd = self._build_ffmpeg_cmd_video_file(str(file_path), pos)
243
+
244
+ elif stream_type == "image_file":
245
+ file_path = self.source_file_path / os.path.basename(url)
246
+ logging.info(f"🖼️ [APP] Capturing image frame from {file_path}")
247
+ cmd = self._build_ffmpeg_cmd_image_file(str(file_path))
248
+
249
+ else:
250
+ logging.error(f"Unsupported stream type: {url}")
196
251
  return None
197
- elif stream_type == "rtsp":
198
- rtsp_options = get_rtsp_ffmpeg_options()
199
- ffmpeg_input = ffmpeg.input(url, **rtsp_options)
200
- elif stream_type == "hls":
201
- ffmpeg_input = (
202
- ffmpeg
203
- .input(url, format="hls", analyzeduration="10000000", probesize="10000000")
204
- )
205
- elif stream_type == "video_file":
206
- file_path = self.source_file_path / os.path.basename(url)
207
- file_path_str = str(file_path)
208
-
209
- if not os.path.exists(file_path_str):
210
- logging.error(f"Video file does not exist: {file_path_str}")
252
+
253
+ timeout_s = get_stream_timeout_duration(stream_type)
254
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
255
+
256
+ try:
257
+ stdout, stderr = proc.communicate(timeout=timeout_s)
258
+ except subprocess.TimeoutExpired:
259
+ proc.kill()
260
+ stdout, stderr = proc.communicate()
261
+ logging.error(f"FFmpeg timed out after {timeout_s}s for {stream_type} stream")
211
262
  return None
212
-
213
- current_position = self._get_current_video_position(file_path_str)
214
- logging.info(f"🎬 [APP] Capturing video frame at position {current_position:.2f}s from {file_path_str}")
215
-
216
- ffmpeg_input = (
217
- ffmpeg
218
- .input(file_path_str, ss=current_position)
219
- )
220
- elif stream_type == "image_file":
221
- file_path = self.source_file_path / os.path.basename(url)
222
- logging.info(f"🖼️ [APP] Capturing image frame from {file_path}")
223
-
224
- ffmpeg_input = (
225
- ffmpeg
226
- .input(str(file_path))
227
- )
228
- else:
229
- logging.error(f"Unsupported stream type: {url}")
230
- return None
231
263
 
232
- if stream_type == "video_file":
233
- process = (
234
- ffmpeg_input
235
- .output('pipe:', format='mjpeg', vframes=1, q=2)
236
- .overwrite_output()
237
- .run_async(pipe_stdout=True, pipe_stderr=True)
238
- )
239
- else:
240
- process = (
241
- ffmpeg_input
242
- .output('pipe:', format='mjpeg', vframes=1, q=2)
243
- .overwrite_output()
244
- .run_async(pipe_stdout=True, pipe_stderr=True)
245
- )
246
-
247
- try:
248
- # Use appropriate timeout for different stream types
249
- timeout_duration = get_stream_timeout_duration(stream_type)
250
- stdout, stderr = process.communicate(timeout=timeout_duration)
251
-
252
- if process.returncode != 0:
253
- error_msg = stderr.decode('utf-8', errors='ignore')
254
- logging.error(f"FFmpeg error for {stream_type} stream: {error_msg}")
264
+ if proc.returncode != 0:
265
+ logging.error(f"FFmpeg error for {stream_type} stream: {(stderr or b'').decode('utf-8', 'ignore')}")
255
266
  return None
256
-
267
+
257
268
  if not stdout:
258
269
  logging.error("No data received from FFmpeg")
259
270
  return None
260
-
271
+
261
272
  return stdout
262
-
273
+
263
274
  except Exception as e:
264
275
  logging.error(f"Error capturing frame: {e}", exc_info=True)
265
276
  return None
266
-
277
+
267
278
  finally:
268
- # Release device access for direct devices
269
279
  if stream_type == "direct":
270
- self.shared_device_client.release_device_access(url)
271
-
272
- process.terminate()
273
- process.wait()
280
+ try:
281
+ self.shared_device_client.release_device_access(url)
282
+ except Exception:
283
+ pass
284
+ if proc and proc.poll() is None:
285
+ try:
286
+ proc.terminate()
287
+ proc.wait(timeout=2)
288
+ except Exception:
289
+ try:
290
+ proc.kill()
291
+ except Exception:
292
+ pass
293
+
294
+
295
+ # ---------- RPCs ----------
274
296
 
275
297
  def update_pipeline_status(self, pipeline_id: str, status_code: str, token: str):
276
298
  if not self.stub:
@@ -283,7 +305,7 @@ class WorkerSourcePipelineClient(GrpcClientBase):
283
305
  pipeline_id=pipeline_id,
284
306
  status_code=status_code,
285
307
  timestamp=timestamp,
286
- token=token
308
+ token=token,
287
309
  )
288
310
  response = self.handle_rpc(self.stub.UpdateStatus, request)
289
311
 
@@ -304,11 +326,10 @@ class WorkerSourcePipelineClient(GrpcClientBase):
304
326
  response = self.handle_rpc(self.stub.GetListByWorkerId, request)
305
327
 
306
328
  if response and response.success:
307
- # Create a wrapper function that captures the token
308
329
  def update_status_callback(pipeline_id: str, status_code: str):
309
330
  return self.update_pipeline_status(pipeline_id, status_code, token)
310
-
311
- self.repo.sync_worker_source_pipelines(response, update_status_callback) # Sync includes delete, update, insert
331
+
332
+ self.repo.sync_worker_source_pipelines(response, update_status_callback)
312
333
  return {"success": True, "message": response.message, "data": response.data}
313
334
 
314
335
  return {"success": False, "message": response.message if response else "Unknown error"}
@@ -316,22 +337,22 @@ class WorkerSourcePipelineClient(GrpcClientBase):
316
337
  except Exception as e:
317
338
  logging.error(f"Error fetching worker source pipeline list: {e}")
318
339
  return {"success": False, "message": f"Error occurred: {e}"}
319
-
340
+
320
341
  def send_pipeline_image(self, worker_source_pipeline_id: str, uuid: str, url: str, token: str):
321
342
  if not self.stub:
322
343
  return {"success": False, "message": "gRPC connection is not established."}
323
344
 
324
345
  try:
325
346
  frame_bytes = self._get_single_frame_bytes(url)
326
-
347
+
327
348
  if not frame_bytes:
328
349
  return {"success": False, "message": "Failed to retrieve frame from source"}
329
-
350
+
330
351
  request = SendPipelineImageRequest(
331
352
  worker_source_pipeline_id=worker_source_pipeline_id,
332
353
  uuid=uuid,
333
354
  image=frame_bytes,
334
- token=token
355
+ token=token,
335
356
  )
336
357
  response = self.handle_rpc(self.stub.SendPipelineImage, request)
337
358
 
@@ -342,28 +363,19 @@ class WorkerSourcePipelineClient(GrpcClientBase):
342
363
  except Exception as e:
343
364
  logging.error(f"Error sending pipeline image: {e}")
344
365
  return {"success": False, "message": f"Error occurred: {e}"}
345
-
366
+
346
367
  @staticmethod
347
368
  def read_image_as_binary(image_path: str) -> bytes:
348
- """
349
- Reads an image file and returns its binary content.
350
-
351
- Args:
352
- image_path (str): Path to the image file.
353
-
354
- Returns:
355
- bytes: Binary content of the image.
356
- """
357
- with open(image_path, 'rb') as image_file:
358
- return image_file.read()
359
-
369
+ with open(image_path, "rb") as f:
370
+ return f.read()
371
+
360
372
  def sync_pipeline_debug(self, token: str):
361
373
  if not self.stub:
362
374
  return {"success": False, "message": "gRPC connection is not established."}
363
375
 
364
376
  try:
365
377
  debug_entries = self.debug_repo.get_debug_entries_with_data()
366
-
378
+
367
379
  for debug_entry in debug_entries:
368
380
  image_binary = self.read_image_as_binary(debug_entry.image_path)
369
381
 
@@ -372,7 +384,7 @@ class WorkerSourcePipelineClient(GrpcClientBase):
372
384
  uuid=debug_entry.uuid,
373
385
  data=debug_entry.data,
374
386
  image=image_binary,
375
- token=token
387
+ token=token,
376
388
  )
377
389
  response = self.handle_rpc(self.stub.SendPipelineDebug, request)
378
390
 
@@ -392,7 +404,7 @@ class WorkerSourcePipelineClient(GrpcClientBase):
392
404
 
393
405
  try:
394
406
  entries = self.detection_repo.get_entries()
395
-
407
+
396
408
  for entry in entries:
397
409
  image_binary = self.read_image_as_binary(entry.image_path)
398
410
 
@@ -401,7 +413,7 @@ class WorkerSourcePipelineClient(GrpcClientBase):
401
413
  data=entry.data,
402
414
  image=image_binary,
403
415
  timestamp=int(entry.created_at.timestamp() * 1000),
404
- token=token
416
+ token=token,
405
417
  )
406
418
  response = self.handle_rpc(self.stub.SendPipelineDetectionData, request)
407
419
 
@@ -413,4 +425,4 @@ class WorkerSourcePipelineClient(GrpcClientBase):
413
425
  return {"success": True, "message": "Successfully synced debug entries"}
414
426
 
415
427
  except Exception as e:
416
- logging.error(f"Error syncing pipeline debug: {e}")
428
+ logging.error(f"Error syncing pipeline debug: {e}")
@@ -16,9 +16,10 @@ class WorkerSourceUpdater:
16
16
  This class is thread-safe and can be used concurrently from multiple threads.
17
17
  """
18
18
 
19
- def __init__(self, server_host: str, token: str):
19
+ def __init__(self, worker_id: str, token: str):
20
20
  storage_paths = _get_storage_paths()
21
21
  self.source_file_path = storage_paths["files"] / "source_files"
22
+ self.worker_id = worker_id
22
23
  # Use shared client instead of creating new instance
23
24
  self.client = GrpcClientManager.get_shared_client(WorkerSourceClient)
24
25
  self.repo = WorkerSourceRepository()
@@ -49,7 +50,7 @@ class WorkerSourceUpdater:
49
50
  """
50
51
  with self._lock:
51
52
  try:
52
- worker_sources = self.repo.get_all_worker_sources()
53
+ worker_sources = self.repo.get_worker_sources_by_worker_id(self.worker_id)
53
54
  updated_records = []
54
55
 
55
56
  for source in worker_sources:
@@ -130,7 +131,7 @@ class WorkerSourceUpdater:
130
131
  """
131
132
  with self._lock:
132
133
  try:
133
- worker_sources = self.repo.get_all_worker_sources()
134
+ worker_sources = self.repo.get_worker_sources_by_worker_id(self.worker_id)
134
135
  updated_records = []
135
136
 
136
137
  for source in worker_sources:
@@ -1,124 +1,73 @@
1
- """
2
- FFmpeg utilities for Jetson compatibility and RTSP stream handling.
3
-
4
- This module provides common FFmpeg configurations and utilities that work
5
- reliably on Jetson devices with FFmpeg 4.4.1 and newer versions.
6
- """
7
-
8
1
  import logging
9
2
  import subprocess
10
3
  import re
11
- from typing import Dict, Any, Tuple
4
+ from typing import Dict, Any, Tuple, List
12
5
 
13
6
 
14
7
  def get_ffmpeg_version() -> Tuple[int, int, int]:
15
- """
16
- Get the FFmpeg version as a tuple of (major, minor, patch).
17
-
18
- Returns:
19
- Tuple[int, int, int]: Version tuple (major, minor, patch)
20
- """
21
8
  try:
22
- result = subprocess.run(['ffmpeg', '-version'], capture_output=True, text=True, timeout=5)
9
+ result = subprocess.run(
10
+ ["ffmpeg", "-version"],
11
+ capture_output=True,
12
+ text=True,
13
+ timeout=5,
14
+ check=False,
15
+ )
23
16
  if result.returncode == 0:
24
- # Extract version from output like "ffmpeg version n7.1.1" or "ffmpeg version 4.4.1"
25
- match = re.search(r'ffmpeg version n?(\d+)\.(\d+)\.(\d+)', result.stdout)
26
- if match:
27
- return (int(match.group(1)), int(match.group(2)), int(match.group(3)))
17
+ m = re.search(r"ffmpeg version n?(\d+)\.(\d+)(?:\.(\d+))?", result.stdout)
18
+ if m:
19
+ major = int(m.group(1))
20
+ minor = int(m.group(2))
21
+ patch = int(m.group(3)) if m.group(3) else 0
22
+ return major, minor, patch
28
23
  except Exception as e:
29
24
  logging.warning(f"Could not determine FFmpeg version: {e}")
30
-
31
- # Default to a reasonable version if detection fails
32
- return (4, 4, 1)
25
+ return 4, 4, 1
26
+
27
+
28
+ def _supports_rw_timeout(v: Tuple[int, int, int]) -> bool:
29
+ major, minor, _ = v
30
+ return major >= 5 or (major == 4 and minor >= 3)
33
31
 
34
32
 
35
33
  def get_rtsp_ffmpeg_options() -> Dict[str, Any]:
36
- """
37
- Get FFmpeg options optimized for RTSP streams with version compatibility.
38
-
39
- These options work across different FFmpeg versions:
40
- - FFmpeg 4.4.x: Uses stimeout
41
- - FFmpeg 5.x+: Uses timeout
42
- - FFmpeg 7.x+: Uses timeout
43
-
44
- Returns:
45
- Dict[str, Any]: FFmpeg input options for RTSP streams
46
- """
47
- version = get_ffmpeg_version()
48
- major, minor, patch = version
49
-
50
- # Base options that work across all versions
51
- options = {
34
+ v = get_ffmpeg_version()
35
+
36
+ opts = {
52
37
  "rtsp_transport": "tcp",
53
- "fflags": "nobuffer+genpts",
54
- "max_delay": "5000000", # Max buffering delay
55
- "buffer_size": "1024000", # Input buffer size
56
- "avoid_negative_ts": "make_zero" # Handle timestamp issues
38
+ "probesize": "256k",
39
+ "analyzeduration": "1000000",
40
+ "buffer_size": "1024000",
41
+ "max_delay": "700000",
42
+ "fflags": "nobuffer+genpts",
57
43
  }
58
-
59
- # Add version-specific timeout option
60
- if major == 4 and minor == 4:
61
- # FFmpeg 4.4.x uses stimeout
62
- options["stimeout"] = "5000000"
63
- logging.debug("Using stimeout for FFmpeg 4.4.x")
44
+
45
+ if _supports_rw_timeout(v):
46
+ opts["rw_timeout"] = "5000000"
64
47
  else:
65
- # FFmpeg 5.x+ uses timeout (microseconds)
66
- options["timeout"] = "5000000"
67
- logging.debug(f"Using timeout for FFmpeg {major}.{minor}.{patch}")
68
-
69
- return options
48
+ opts["stimeout"] = "5000000"
70
49
 
50
+ return opts
71
51
 
72
- def get_rtsp_probe_options() -> list:
73
- """
74
- Get ffprobe command line options for RTSP streams with version compatibility.
75
-
76
- Returns:
77
- list: Command line options to insert into ffprobe command
78
- """
79
- version = get_ffmpeg_version()
80
- major, minor, patch = version
81
-
82
- base_options = ["-rtsp_transport", "tcp"]
83
-
84
- # Add version-specific timeout option
85
- if major == 4 and minor == 4:
86
- # FFmpeg 4.4.x uses stimeout
87
- return base_options + ["-stimeout", "5000000"]
88
- else:
89
- # FFmpeg 5.x+ uses timeout
90
- return base_options + ["-timeout", "5000000"]
91
52
 
53
+ def get_rtsp_probe_options() -> List[str]:
54
+ v = get_ffmpeg_version()
92
55
 
93
- def log_ffmpeg_version_info():
94
- """Log information about FFmpeg compatibility."""
95
- version = get_ffmpeg_version()
96
- major, minor, patch = version
97
-
98
- logging.info(f"Detected FFmpeg version: {major}.{minor}.{patch}")
99
-
100
- if major == 4 and minor == 4:
101
- logging.info("Using 'stimeout' parameter for FFmpeg 4.4.x compatibility")
102
- else:
103
- logging.info(f"Using 'timeout' parameter for FFmpeg {major}.{minor}.{patch}")
104
-
105
- logging.info("RTSP configuration optimized for embedded devices")
56
+ opts = [
57
+ "-v", "error",
58
+ "-rtsp_transport", "tcp",
59
+ "-probesize", "256k",
60
+ "-analyzeduration", "1000000",
61
+ ]
106
62
 
63
+ opts += ["-rw_timeout" if _supports_rw_timeout(v) else "-stimeout", "5000000"]
64
+ return opts
107
65
 
108
- def get_stream_timeout_duration(stream_type: str) -> int:
109
- """
110
- Get appropriate timeout duration for different stream types.
111
-
112
- Args:
113
- stream_type (str): Type of stream (rtsp, hls, direct, etc.)
114
-
115
- Returns:
116
- int: Timeout duration in seconds
117
- """
118
- timeouts = {
119
- "rtsp": 30, # RTSP streams may take longer to connect
120
- "hls": 20, # HLS streams need time for manifest download
121
- "direct": 10, # Direct device access should be faster
122
- "video_file": 5 # Local files should be very fast
123
- }
124
- return timeouts.get(stream_type, 15) # Default 15 seconds
66
+
67
+ def get_stream_timeout_duration(t: str) -> int:
68
+ return {
69
+ "rtsp": 30,
70
+ "hls": 20,
71
+ "direct": 10,
72
+ "video_file": 5,
73
+ }.get(t, 15)
@@ -103,8 +103,7 @@ class VideoProbeUtil:
103
103
  frame_rate = round(frame_rate, 2)
104
104
 
105
105
  cap.release()
106
-
107
- logging.info(f"✅ [APP] Successfully probed device {device_idx} directly: {width}x{height} @ {frame_rate}fps")
106
+
108
107
 
109
108
  return {
110
109
  "resolution": f"{width}x{height}",
@@ -183,8 +182,6 @@ class VideoProbeUtil:
183
182
  if not width or not height:
184
183
  logging.warning(f"⚠️ [APP] Invalid resolution from FFmpeg for device {device_idx}")
185
184
  return None
186
-
187
- logging.info(f"✅ [APP] Successfully probed device {device_idx} with FFmpeg: {width}x{height} @ {frame_rate}fps")
188
185
 
189
186
  return {
190
187
  "resolution": f"{width}x{height}",
@@ -246,13 +243,11 @@ class VideoProbeUtil:
246
243
  logging.debug(traceback.format_exc())
247
244
 
248
245
  # Fallback 1: Try direct OpenCV access
249
- logging.info(f"🔄 [APP] Daemon not available for device {device_idx}, falling back to direct OpenCV access")
250
246
  metadata = VideoProbeUtil._get_metadata_opencv_direct_device(device_idx)
251
247
  if metadata:
252
248
  return metadata
253
249
 
254
250
  # Fallback 2: Try FFmpeg for device access (Linux v4l2, Windows dshow)
255
- logging.info(f"🔄 [APP] OpenCV failed for device {device_idx}, trying FFmpeg device access")
256
251
  return VideoProbeUtil._get_metadata_ffmpeg_direct_device(device_idx)
257
252
 
258
253
  except Exception as e:
@@ -61,7 +61,7 @@ class DataSenderWorker:
61
61
  self.restricted_area_manager = RestrictedAreaManager(self.server_host, self.worker_id, "worker_source_id", self.token)
62
62
  self.dataset_frame_sender = DatasetFrameSender(self.server_host, self.token)
63
63
 
64
- self.source_updater = WorkerSourceUpdater(self.server_host, self.token)
64
+ self.source_updater = WorkerSourceUpdater(self.worker_id, self.token)
65
65
 
66
66
  def start(self):
67
67
  """Start the Data Sender Worker threads."""
@@ -119,7 +119,7 @@ class PipelineImageWorker:
119
119
  if response.get("success"):
120
120
  logger.info("✅ [APP] Successfully sent Pipeline Image Preview.")
121
121
  else:
122
- logger.error(f"❌ [APP] Failed to send Pipeline Image Preview: {response.get('message')}")
122
+ logger.error(f"❌ [APP] Failed to send Pipeline Image Preview: {response.get('message')}", exc_info=True)
123
123
 
124
124
  except json.JSONDecodeError:
125
125
  logger.error("⚠️ [APP] Invalid JSON message format.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker
3
- Version: 1.2.1
3
+ Version: 1.2.2
4
4
  Summary: Nedo Vision Worker Service Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -165,6 +165,8 @@ pip install -e .
165
165
  pip install -e .[dev]
166
166
  ```
167
167
 
168
+ See [INSTALL.md](INSTALL.md) for detailed installation instructions.
169
+
168
170
  ## 🔍 System Diagnostics
169
171
 
170
172
  Before running the worker service, use the built-in diagnostic tool to verify your system:
@@ -1,6 +1,6 @@
1
- nedo_vision_worker/__init__.py,sha256=OuQTi_CjaQDpS1kVellEttgj1dkluSOCwxE0teArbyY,203
1
+ nedo_vision_worker/__init__.py,sha256=c7rG8vVkrVGLJAt4kkrLAadY2NQ_SG_t161AE3BwGRA,203
2
2
  nedo_vision_worker/cli.py,sha256=ddWspJmSgVkcUYvRdkvTtMNuMTDvNCqLLuMVU9KE3Ik,7457
3
- nedo_vision_worker/doctor.py,sha256=uZ-NM_PfaTG5CG5OWFnl7cEsOTBWMGXNuKWuTV07deg,47228
3
+ nedo_vision_worker/doctor.py,sha256=wNkpe8gLVd76Y_ViyK2h1ZFdqeSl37MnzZN5frWKu30,48410
4
4
  nedo_vision_worker/worker_service.py,sha256=rXUVmyxcJPGhQEZ4UQvjQS5UqlnLBYudHQZCj0dQDxo,10421
5
5
  nedo_vision_worker/config/ConfigurationManager.py,sha256=QrQaQ9Cdjpkcr2JE_miyrWJIZmMgZwJYBz-wE45Zzes,8011
6
6
  nedo_vision_worker/config/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
@@ -45,14 +45,14 @@ nedo_vision_worker/repositories/RestrictedAreaRepository.py,sha256=y3n2ZfQbth1I_
45
45
  nedo_vision_worker/repositories/WorkerSourcePipelineDebugRepository.py,sha256=kOlVEnPOoDRZdZIm8uWXlc89GMvBPI-36QyKecX7ucE,3350
46
46
  nedo_vision_worker/repositories/WorkerSourcePipelineDetectionRepository.py,sha256=cbgg_7p0eNUIgCHoPDZBaRZ1b2Y68p_dfSxpvuGMtRE,1773
47
47
  nedo_vision_worker/repositories/WorkerSourcePipelineRepository.py,sha256=xfmEvgnyt-DdfSApGyFfy0H0dXjFFkjeo4LMr0fVFXU,10053
48
- nedo_vision_worker/repositories/WorkerSourceRepository.py,sha256=Rw9wJ27TETECCNwDxQu19KaKipQ_XHU0JJP6-0rzgmU,1982
48
+ nedo_vision_worker/repositories/WorkerSourceRepository.py,sha256=AhAJLAacMFdsOgtQNiu7Pahl1DAGI0T1THHeUlKwQJc,2385
49
49
  nedo_vision_worker/repositories/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
50
50
  nedo_vision_worker/services/AIModelClient.py,sha256=lxRNax6FR-pV0G1NpJnlaqjbQeu3kRolIUNSw1RkoZA,15406
51
51
  nedo_vision_worker/services/ConnectionInfoClient.py,sha256=toC9zuY2Hrx1Cwq8Gycy_iFlaG1DvFT4qewlLlitpEQ,2214
52
52
  nedo_vision_worker/services/DatasetSourceClient.py,sha256=O5a7onxFl0z47zXaMXWxHAMPuuc-i_vzkd2w5fwrukc,3319
53
53
  nedo_vision_worker/services/DirectDeviceToRTMPStreamer.py,sha256=M5ei0cd3_KDhHZp6EkrOowhAY-hAHfAQh9YDVjQtbQI,22278
54
54
  nedo_vision_worker/services/FileToRTMPServer.py,sha256=yUJxrouoTLSq9XZ88dhDYhP-px10jLoHopkPoy4lQxk,2663
55
- nedo_vision_worker/services/GrpcClientBase.py,sha256=9tNOGFfcm1Vy7ELgiA78KmGYCT13d4nBzpZkRkhsFKI,7385
55
+ nedo_vision_worker/services/GrpcClientBase.py,sha256=bRNeajiPGcJZtNofD_HU7JhLHVPbnuGacqv5Dp62GC0,7400
56
56
  nedo_vision_worker/services/GrpcClientManager.py,sha256=DLXekmxlQogLo8V9-TNDXtyHT_UG-BaggqwsIups55k,5568
57
57
  nedo_vision_worker/services/ImageUploadClient.py,sha256=T353YsRfm74G7Mh-eWr5nvdQHXTfpKwHJFmNW8HyjT8,3019
58
58
  nedo_vision_worker/services/PPEDetectionClient.py,sha256=CC-b0LRAgrftfIKp6TFKpeBkTYefe-C6Z1oz_X3HArQ,4345
@@ -65,34 +65,34 @@ nedo_vision_worker/services/SystemWideDeviceCoordinator.py,sha256=9zBJMCbTMZS7gw
65
65
  nedo_vision_worker/services/VideoSharingDaemon.py,sha256=hYMjUIKNUVT1qSxuUuHN-7Bd85MDkxfqslxDLe2PBYQ,29721
66
66
  nedo_vision_worker/services/VideoStreamClient.py,sha256=QSgUV3LijYrNdnBG1ylABOdUaSatQamfXaqJhAiol9M,7260
67
67
  nedo_vision_worker/services/WorkerSourceClient.py,sha256=vDZeCuHL5QQ2-knZ4TOSA59jzmbbThGIwFKKLEZ72Ws,9198
68
- nedo_vision_worker/services/WorkerSourcePipelineClient.py,sha256=amhZOua0lmlWl7ZkU-SSbT2E1Y5D-uKde9bDCVxOqM4,17924
69
- nedo_vision_worker/services/WorkerSourceUpdater.py,sha256=MsUsKL75sXj2odCcbupkFDW0KXg9LSu6-67iMWpYkHs,7679
68
+ nedo_vision_worker/services/WorkerSourcePipelineClient.py,sha256=qaBx9T2gWMzpqZaeQdbIeklsXNwzWD5kqgB41rrSkBI,17135
69
+ nedo_vision_worker/services/WorkerSourceUpdater.py,sha256=r_pCL1NiUlgPUFrntE1DWFG-KJygZPK51lAUGPwlzxo,7758
70
70
  nedo_vision_worker/services/WorkerStatusClient.py,sha256=7kC5EZjEBwWtHOE6UQ29OPCpYnv_6HSuH7Tc0alK_2Q,2531
71
71
  nedo_vision_worker/services/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
72
- nedo_vision_worker/util/FFmpegUtil.py,sha256=_TRA_Q2CAKatrbhOMKG5Uhhk6FrMJ6sbwMnd0Qwq0co,4086
72
+ nedo_vision_worker/util/FFmpegUtil.py,sha256=QnQrzurmllzGb7SlAAYCrzKBUblweoFU-0h-X-32IYg,1829
73
73
  nedo_vision_worker/util/HardwareID.py,sha256=rSW8-6stm7rjXEdkYGqXMUn56gyw62YiWnSwZQVCCLM,4315
74
74
  nedo_vision_worker/util/ImageUploader.py,sha256=2xipN3fwpKgFmbvoGIdElpGn5ARJyrgR4dXtbRf73hw,3764
75
75
  nedo_vision_worker/util/Networking.py,sha256=uOtL8HkKZXJp02ZZIHWYMAvAsaYb7BsAPTncfdvJx2c,3241
76
76
  nedo_vision_worker/util/PlatformDetector.py,sha256=-iLPrKs7hp_oltkCI3hESJQkC2uRyu1-8mAbZrvgWx0,1501
77
77
  nedo_vision_worker/util/SystemMonitor.py,sha256=2MWYaEXoL91UANT_-SuDWrFMq1ajPorh8co6Py9PV_c,11300
78
- nedo_vision_worker/util/VideoProbeUtil.py,sha256=1ViUjt6NSMilquapHf6XC0h93OwhA40LwGSnYOtpMh0,13362
78
+ nedo_vision_worker/util/VideoProbeUtil.py,sha256=cF-vJ7hIDlXfEJby2a0s9tqwkPGVz_6B3Vv4D5pMmIw,12876
79
79
  nedo_vision_worker/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
80
80
  nedo_vision_worker/worker/CoreActionWorker.py,sha256=lb7zPY3yui6I3F4rX4Ii7JwpWZahLEO72rh3iWOgFmg,5441
81
- nedo_vision_worker/worker/DataSenderWorker.py,sha256=o32MT28EqYwAPmd9NKhX3rfNlDKekNOI2n8mZ6s8CpU,7162
81
+ nedo_vision_worker/worker/DataSenderWorker.py,sha256=9FudRRItiMOcQx5UfVyu4p0Enb9BbgwZZ5EgX6Ho2U4,7160
82
82
  nedo_vision_worker/worker/DataSyncWorker.py,sha256=WvYfi3bG4mOKHU09J_MavfjFPrVgmxrrZYtrlQ-bnio,6265
83
83
  nedo_vision_worker/worker/DatasetFrameSender.py,sha256=1SFYj8LJFNi-anBTapsbq8U_NGMM7mnoMKg9NeFAHys,8087
84
84
  nedo_vision_worker/worker/DatasetFrameWorker.py,sha256=Ni5gPeDPk9Rz4_cbg63u7Y6LVw_-Bz24OvfeY-6Yp44,19320
85
85
  nedo_vision_worker/worker/PPEDetectionManager.py,sha256=fAolWlrsY5SQAWygvjNBNU56IlC0HLlhPgpz7shL-gk,3588
86
86
  nedo_vision_worker/worker/PipelineActionWorker.py,sha256=xgvryjKtEsMj4BKqWzDIaK_lFny-DfMCj5Y2DxHnWww,5651
87
- nedo_vision_worker/worker/PipelineImageWorker.py,sha256=c8_cTasgN-NJABD_qHSRb3hatg81sY_rV3lAAnuW49U,5627
87
+ nedo_vision_worker/worker/PipelineImageWorker.py,sha256=J8VBUG0cwcH3qOJp2zTl30B-XhmPFyvJLjxitKJYq0E,5642
88
88
  nedo_vision_worker/worker/RabbitMQListener.py,sha256=9gR49MDplgpyb-D5HOH0K77-DJQFvhS2E7biL92SjSU,6950
89
89
  nedo_vision_worker/worker/RestrictedAreaManager.py,sha256=3yoXgQ459tV1bOa5choEzR9gE6LklrtHR_e0472U3L0,3521
90
90
  nedo_vision_worker/worker/SystemUsageManager.py,sha256=StutV4UyLUfduYfK20de4SbPd7wqkR7io0gsOajxWSU,4509
91
91
  nedo_vision_worker/worker/VideoStreamWorker.py,sha256=5n6v1PNO7IB-jj_McALLkUP-cBjJoIEw4UiSAs3vTb0,7606
92
92
  nedo_vision_worker/worker/WorkerManager.py,sha256=T0vMfhOd7YesgQ9o2w6soeJ6n9chUAcuwcGe7p31xr0,5298
93
93
  nedo_vision_worker/worker/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
94
- nedo_vision_worker-1.2.1.dist-info/METADATA,sha256=GUzjnngaGQ7i48oY3BMpdPB701o8_n2LxwlayNJb3uw,14591
95
- nedo_vision_worker-1.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
96
- nedo_vision_worker-1.2.1.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
97
- nedo_vision_worker-1.2.1.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
98
- nedo_vision_worker-1.2.1.dist-info/RECORD,,
94
+ nedo_vision_worker-1.2.2.dist-info/METADATA,sha256=w8bht7PkcQq3kajb3eqOK4_c8bBqZTM_QG_ZI2ZnQw8,14661
95
+ nedo_vision_worker-1.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
96
+ nedo_vision_worker-1.2.2.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
97
+ nedo_vision_worker-1.2.2.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
98
+ nedo_vision_worker-1.2.2.dist-info/RECORD,,