nedo-vision-worker 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. nedo_vision_worker/__init__.py +1 -1
  2. nedo_vision_worker/database/DatabaseManager.py +17 -1
  3. nedo_vision_worker/initializer/AppInitializer.py +60 -0
  4. nedo_vision_worker/models/worker_source_pipeline.py +2 -1
  5. nedo_vision_worker/services/DirectDeviceToRTMPStreamer.py +12 -6
  6. nedo_vision_worker/services/FileToRTMPServer.py +7 -3
  7. nedo_vision_worker/services/GrpcClientBase.py +23 -34
  8. nedo_vision_worker/services/GrpcConnection.py +147 -0
  9. nedo_vision_worker/services/RTSPtoRTMPStreamer.py +10 -8
  10. nedo_vision_worker/services/SystemUsageClient.py +2 -1
  11. nedo_vision_worker/services/WorkerSourcePipelineClient.py +25 -7
  12. nedo_vision_worker/util/EncoderSelector.py +109 -0
  13. nedo_vision_worker/util/SystemMonitor.py +3 -0
  14. nedo_vision_worker/worker/DataSyncWorker.py +2 -2
  15. nedo_vision_worker/worker/DatasetFrameWorker.py +1 -1
  16. nedo_vision_worker/worker/PipelinePreviewWorker.py +160 -0
  17. nedo_vision_worker/worker/SystemUsageManager.py +22 -3
  18. nedo_vision_worker/worker/WorkerManager.py +4 -0
  19. nedo_vision_worker/worker_service.py +10 -0
  20. {nedo_vision_worker-1.2.6.dist-info → nedo_vision_worker-1.2.8.dist-info}/METADATA +4 -3
  21. {nedo_vision_worker-1.2.6.dist-info → nedo_vision_worker-1.2.8.dist-info}/RECORD +24 -21
  22. {nedo_vision_worker-1.2.6.dist-info → nedo_vision_worker-1.2.8.dist-info}/WHEEL +0 -0
  23. {nedo_vision_worker-1.2.6.dist-info → nedo_vision_worker-1.2.8.dist-info}/entry_points.txt +0 -0
  24. {nedo_vision_worker-1.2.6.dist-info → nedo_vision_worker-1.2.8.dist-info}/top_level.txt +0 -0
@@ -6,5 +6,5 @@ A library for running worker agents in the Nedo Vision platform.
6
6
 
7
7
  from .worker_service import WorkerService
8
8
 
9
- __version__ = "1.2.6"
9
+ __version__ = "1.2.8"
10
10
  __all__ = ["WorkerService"]
@@ -104,7 +104,23 @@ class DatabaseManager:
104
104
  # Initialize engines and session factories for each database
105
105
  for name, path in DB_PATHS.items():
106
106
  path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
107
- engine = create_engine(f"sqlite:///{path.as_posix()}")
107
+
108
+ # Configure connection pool for multi-threaded usage
109
+ # pool_size: Max connections to keep open
110
+ # max_overflow: Additional connections that can be created temporarily
111
+ # pool_pre_ping: Test connections before using (prevents stale connections)
112
+ # pool_recycle: Recycle connections after N seconds (prevents long-lived stale connections)
113
+ engine = create_engine(
114
+ f"sqlite:///{path.as_posix()}",
115
+ pool_size=20, # Base pool size for persistent connections
116
+ max_overflow=30, # Allow up to 30 additional temporary connections
117
+ pool_pre_ping=True, # Verify connection health before use
118
+ pool_recycle=3600, # Recycle connections after 1 hour
119
+ connect_args={
120
+ "check_same_thread": False, # Required for SQLite with multiple threads
121
+ "timeout": 30.0 # Connection timeout
122
+ }
123
+ )
108
124
  ENGINES[name] = engine
109
125
  SESSION_FACTORIES[name] = scoped_session(sessionmaker(bind=engine)) # Use scoped sessions
110
126
  DatabaseManager.synchronize(name)
@@ -76,3 +76,63 @@ class AppInitializer:
76
76
  logging.error(f"Grpc Error: {ge}")
77
77
  except Exception as e:
78
78
  logging.error(f"Unexpected error during initialization: {e}")
79
+
80
+ @staticmethod
81
+ def update_connection_info(server_host: str, server_port: int, token: str):
82
+ """
83
+ Fetch and update connection information (RabbitMQ credentials) from the server.
84
+ This should be called on startup to ensure credentials are up-to-date.
85
+
86
+ Args:
87
+ server_host: The server hostname or IP address
88
+ server_port: The gRPC server port
89
+ token: Authentication token for the worker
90
+
91
+ Returns:
92
+ bool: True if update was successful, False otherwise
93
+ """
94
+ try:
95
+ # Validate server host
96
+ AppInitializer.validate_server_host(server_host)
97
+
98
+ # Get connection info using the ConnectionInfoClient
99
+ connection_client = ConnectionInfoClient(server_host, server_port, token)
100
+ connection_result = connection_client.get_connection_info()
101
+
102
+ if not connection_result["success"]:
103
+ logging.error(f"Failed to fetch connection info: {connection_result['message']}")
104
+ return False
105
+
106
+ # Check if any RabbitMQ credentials have changed
107
+ current_config = ConfigurationManager.get_all_configs()
108
+ config_updated = False
109
+
110
+ rabbitmq_fields = {
111
+ 'rabbitmq_host': connection_result['rabbitmq_host'],
112
+ 'rabbitmq_port': str(connection_result['rabbitmq_port']),
113
+ 'rabbitmq_username': connection_result['rabbitmq_username'],
114
+ 'rabbitmq_password': connection_result['rabbitmq_password']
115
+ }
116
+
117
+ for field, new_value in rabbitmq_fields.items():
118
+ if current_config.get(field) != new_value:
119
+ ConfigurationManager.set_config(field, new_value)
120
+ config_updated = True
121
+ logging.info(f"✅ [APP] Updated {field}")
122
+
123
+ if config_updated:
124
+ logging.info("✅ [APP] RabbitMQ connection info updated successfully")
125
+ else:
126
+ logging.info("✅ [APP] RabbitMQ connection info is up-to-date")
127
+
128
+ return True
129
+
130
+ except ValueError as ve:
131
+ logging.error(f"Validation error: {ve}")
132
+ return False
133
+ except grpc.RpcError as ge:
134
+ logging.error(f"gRPC Error: {ge}")
135
+ return False
136
+ except Exception as e:
137
+ logging.error(f"Unexpected error updating connection info: {e}")
138
+ return False
@@ -1,4 +1,4 @@
1
- from sqlalchemy import Column, String
1
+ from sqlalchemy import Column, String, DateTime
2
2
  from sqlalchemy.orm import relationship
3
3
  from ..database.DatabaseManager import Base
4
4
 
@@ -13,6 +13,7 @@ class WorkerSourcePipelineEntity(Base):
13
13
  ai_model_id = Column(String, nullable=True)
14
14
  pipeline_status_code = Column(String, nullable=False)
15
15
  location_name = Column(String, nullable=True)
16
+ last_preview_request_at = Column(DateTime, nullable=True)
16
17
 
17
18
  worker_source_pipeline_configs = relationship(
18
19
  "WorkerSourcePipelineConfigEntity",
@@ -8,6 +8,7 @@ import cv2
8
8
  import os
9
9
  from .VideoSharingDaemon import VideoSharingClient
10
10
  from ..database.DatabaseManager import get_storage_path
11
+ from ..util.EncoderSelector import EncoderSelector
11
12
 
12
13
 
13
14
  class DirectDeviceToRTMPStreamer:
@@ -68,6 +69,10 @@ class DirectDeviceToRTMPStreamer:
68
69
 
69
70
  def _start_ffmpeg_stream(self):
70
71
  """Starts an FFmpeg process to stream frames to the RTMP server silently."""
72
+ # Get optimal encoder for hardware
73
+ encoder_args, encoder_name = EncoderSelector.get_encoder_args()
74
+ logging.info(f"🎬 [APP] Using encoder: {encoder_name}")
75
+
71
76
  ffmpeg_command = [
72
77
  "ffmpeg",
73
78
  "-y",
@@ -79,16 +84,17 @@ class DirectDeviceToRTMPStreamer:
79
84
  "-video_size", f"{self.width}x{self.height}",
80
85
  "-framerate", str(self.fps),
81
86
  "-i", "-",
82
- "-c:v", "libx264",
83
- "-preset", "ultrafast",
84
- "-tune", "zerolatency",
87
+
88
+ # Video encoding with optimal encoder
89
+ *encoder_args,
85
90
  "-b:v", self.bitrate,
86
- # Disable Audio (Avoid unnecessary encoding overhead)
87
- "-an",
88
91
  "-maxrate", "2500k",
89
92
  "-bufsize", "5000k",
93
+
94
+ # Disable Audio
95
+ "-an",
96
+
90
97
  "-f", "flv",
91
- # Remove duration limit - let application control duration
92
98
  self.rtmp_url,
93
99
  ]
94
100
 
@@ -1,6 +1,7 @@
1
1
  import subprocess
2
2
  import logging
3
3
  import os
4
+ from ..util.EncoderSelector import EncoderSelector
4
5
 
5
6
  class FileToRTMPStreamer:
6
7
  def __init__(self, video_path, rtmp_url, stream_key, fps=30, resolution="1280x720", loop=False):
@@ -31,6 +32,10 @@ class FileToRTMPStreamer:
31
32
 
32
33
  logging.info(f"📼 [APP] Starting file stream: {self.video_path} → {self.rtmp_url}")
33
34
 
35
+ # Get optimal encoder for hardware
36
+ encoder_args, encoder_name = EncoderSelector.get_encoder_args()
37
+ logging.info(f"🎬 [APP] Using encoder: {encoder_name}")
38
+
34
39
  # FFmpeg command
35
40
  ffmpeg_command = [
36
41
  "ffmpeg",
@@ -38,9 +43,8 @@ class FileToRTMPStreamer:
38
43
  "-stream_loop", "-1" if self.loop else "0", # Loop if needed
39
44
  "-i", self.video_path,
40
45
 
41
- "-c:v", "libx264",
42
- "-preset", "ultrafast",
43
- "-tune", "zerolatency",
46
+ # Video encoding with optimal encoder
47
+ *encoder_args,
44
48
  "-r", str(self.fps),
45
49
  "-b:v", "1500k",
46
50
  "-maxrate", "2000k",
@@ -3,6 +3,7 @@ import logging
3
3
  import time
4
4
  from grpc import StatusCode
5
5
  from typing import Callable, Optional, Any, Dict
6
+ from .GrpcConnection import GrpcConnection
6
7
 
7
8
  logger = logging.getLogger(__name__)
8
9
 
@@ -42,42 +43,21 @@ class GrpcClientBase:
42
43
  def __init__(self, server_host: str, server_port: int = 50051, max_retries: int = 3):
43
44
  self.server_address = f"{server_host}:{server_port}"
44
45
  self.channel: Optional[grpc.Channel] = None
45
- self.stub: Optional[Any] = None
46
46
  self.connected = False
47
47
  self.max_retries = max_retries
48
48
 
49
+ self.connection = GrpcConnection(server_host, server_port)
50
+
49
51
  def connect(self, stub_class, retry_interval: int = 2) -> bool:
50
- attempts = 0
51
- while attempts < self.max_retries and not self.connected:
52
- try:
53
- if self.channel:
54
- self._close_channel()
55
-
56
- self.channel = grpc.insecure_channel(self.server_address)
57
-
58
- future = grpc.channel_ready_future(self.channel)
59
- future.result(timeout=30)
60
-
61
- self.stub = stub_class(self.channel)
62
- self.connected = True
63
- logger.info(f"🚀 Connected to gRPC server at {self.server_address}")
64
- return True
65
-
66
- except (grpc.RpcError, grpc.FutureTimeoutError, Exception) as e:
67
- attempts += 1
68
- self.connected = False
69
- error_msg = str(e)
70
-
71
- logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}", exc_info=True)
72
-
73
- if attempts < self.max_retries:
74
- sleep_time = retry_interval * (2 ** (attempts - 1))
75
- logger.info(f"⏳ Retrying in {sleep_time}s...")
76
- time.sleep(sleep_time)
77
- else:
78
- logger.critical("❌ Max retries reached. Connection failed.")
79
-
80
- return False
52
+ conn = self.connection.get_connection()
53
+ if conn is None:
54
+ return False
55
+ requested_stub = stub_class(conn)
56
+
57
+ self.stub = requested_stub
58
+ self.connected = True
59
+
60
+ return True
81
61
 
82
62
  def _close_channel(self) -> None:
83
63
  try:
@@ -89,6 +69,7 @@ class GrpcClientBase:
89
69
  self.channel = None
90
70
  self.stub = None
91
71
 
72
+ # MARK:
92
73
  def close(self) -> None:
93
74
  self._close_channel()
94
75
  self.connected = False
@@ -104,6 +85,11 @@ class GrpcClientBase:
104
85
  except grpc.RpcError as e:
105
86
  return self._handle_grpc_error(e, rpc_call, *args, **kwargs)
106
87
  except Exception as e:
88
+ print(e)
89
+ print(str(e) == "Cannot invoke RPC on closed channel!")
90
+ if str(e) == "Cannot invoke RPC on closed channel!":
91
+ self.connect(type(self.stub))
92
+
107
93
  logger.error(f"💥 Unexpected RPC error: {e}")
108
94
  return None
109
95
 
@@ -127,8 +113,11 @@ class GrpcClientBase:
127
113
 
128
114
  return None
129
115
 
116
+ # MARK:
117
+ # Should request for reconnection two times. Notify grpc connection to do reconnect
130
118
  def _handle_unavailable(self, rpc_call: Callable, *args, **kwargs) -> Optional[Any]:
131
- self.connected = False
119
+ # self.connected = False
120
+ self.connection.try_reconnect()
132
121
 
133
122
  if self.stub:
134
123
  stub_class = type(self.stub)
@@ -154,7 +143,7 @@ class GrpcClientBase:
154
143
  return error_message.split("debug_error_string")[0].strip()
155
144
 
156
145
  def is_connected(self) -> bool:
157
- return self.connected and self.channel and self.stub
146
+ return self.connected and self.connection.get_connection() is not None and self.stub is not None
158
147
 
159
148
  def get_connection_info(self) -> Dict[str, Any]:
160
149
  return {
@@ -0,0 +1,147 @@
1
+ import grpc
2
+ import logging
3
+ import time
4
+ import threading
5
+ from grpc import StatusCode
6
+ from typing import Optional, Any, Dict
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class GrpcConnection:
12
+ """
13
+ Grpc connection management. Responsible for initiating connection with gRPC server that
14
+ will be used across gRPC clients in the project.
15
+ Expected to be initiated as a singleton.
16
+ """
17
+
18
+ _instance = None
19
+ _init_done = False
20
+ _lock = threading.Lock()
21
+ _reconnectLock = threading.Lock()
22
+ _reconnecting = False
23
+
24
+ def __new__(cls, *args, **kwargs):
25
+ if cls._instance is None:
26
+ with cls._lock:
27
+ if cls._instance is None:
28
+ cls._instance = super().__new__(cls)
29
+ return cls._instance
30
+
31
+ def __init__(self, server_host: str, server_port: int = 50051, max_retries: int = 3):
32
+ if self.__class__._init_done:
33
+ return # prevent re-initialization
34
+ self.__class__._init_done = True
35
+
36
+ self.server_address = f"{server_host}:{server_port}"
37
+ self.channel: Optional[grpc.Channel] = None
38
+ self.connected = False
39
+ self.max_retries = max_retries
40
+ self.connect()
41
+
42
+ def connect(self, retry_interval: int = 2) -> bool:
43
+ attempts = 0
44
+ while attempts < self.max_retries and not self.connected:
45
+ try:
46
+ if self.channel:
47
+ self._close_channel()
48
+
49
+ self.channel = grpc.insecure_channel(self.server_address)
50
+
51
+ future = grpc.channel_ready_future(self.channel)
52
+ future.result(timeout=30)
53
+
54
+ self.connected = True
55
+ logger.info(f"🚀 Connected to gRPC server at {self.server_address}")
56
+ return True
57
+
58
+ except (grpc.RpcError, grpc.FutureTimeoutError, Exception) as e:
59
+ attempts += 1
60
+ self.connected = False
61
+ error_msg = str(e)
62
+
63
+ logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}")
64
+
65
+ if attempts < self.max_retries:
66
+ sleep_time = retry_interval * (2 ** (attempts - 1))
67
+ logger.info(f"⏳ Retrying in {sleep_time}s...")
68
+ time.sleep(sleep_time)
69
+ else:
70
+ logger.critical("❌ Max retries reached. Connection failed.")
71
+
72
+ return False
73
+
74
+ def get_connection(self):
75
+ if self._reconnecting or not self.connected:
76
+ return None
77
+
78
+ return self.channel
79
+
80
+ def _reconnect(self):
81
+ logger.info(f"⏳ Reconnecting...")
82
+ attempts = 0
83
+
84
+ while not self.connected:
85
+ try:
86
+ if self.channel:
87
+ self._close_channel()
88
+
89
+ self.channel = grpc.insecure_channel(self.server_address)
90
+
91
+ future = grpc.channel_ready_future(self.channel)
92
+ future.result(timeout=30)
93
+
94
+ self.connected = True
95
+ self._reconnecting = False
96
+ logger.info(f"🚀 Connected to gRPC server at {self.server_address}")
97
+ self._reconnectLock.release_lock()
98
+ except (grpc.RpcError, grpc.FutureTimeoutError, Exception) as e:
99
+ attempts += 1
100
+ self.connected = False
101
+ error_msg = str(e)
102
+
103
+ logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}")
104
+
105
+ sleep_time = 2 * (2 ** (attempts - 1))
106
+ logger.info(f"⏳ Retrying in {sleep_time}s...")
107
+ time.sleep(sleep_time)
108
+
109
+ def try_reconnect(self):
110
+ if self._reconnecting:
111
+ return
112
+
113
+ if self._reconnectLock.acquire_lock(blocking=False):
114
+ self._reconnecting = True
115
+ self.connected = False
116
+ self._reconnect()
117
+
118
+ def _close_channel(self) -> None:
119
+ try:
120
+ if self.channel:
121
+ self.channel.close()
122
+ logger.info("🔌 gRPC connection closed")
123
+ except Exception as e:
124
+ logger.warning(f"⚠️ Error closing channel: {e}")
125
+ finally:
126
+ self.channel = None
127
+ self.connected = False
128
+
129
+ def close(self) -> None:
130
+ if self.channel:
131
+ self.channel.close()
132
+ self.connected = False
133
+ logger.info("🔌 gRPC connection closed")
134
+
135
+ def is_connected(self) -> bool:
136
+ return self.connected and self.channel is not None
137
+
138
+ def get_connection_info(self) -> Dict[str, Any]:
139
+ return {
140
+ "server_address": self.server_address,
141
+ "connected": self.connected,
142
+ "max_retries": self.max_retries,
143
+ "has_channel": self.channel is not None,
144
+ }
145
+
146
+ def __enter__(self):
147
+ return self
@@ -3,6 +3,7 @@ import logging
3
3
  import time
4
4
  import os
5
5
  from urllib.parse import urlparse
6
+ from ..util.EncoderSelector import EncoderSelector
6
7
 
7
8
  class RTSPtoRTMPStreamer:
8
9
  def __init__(self, rtsp_url, rtmp_url, stream_key, fps=30, resolution="1280x720", duration=120):
@@ -38,6 +39,10 @@ class RTSPtoRTMPStreamer:
38
39
 
39
40
  logging.info(f"📡 [APP] Starting RTSP to RTMP stream: {self.rtsp_url} → {self.rtmp_url} for {self.duration} seconds")
40
41
 
42
+ # Get optimal encoder for hardware
43
+ encoder_args, encoder_name = EncoderSelector.get_encoder_args()
44
+ logging.info(f"🎬 [APP] Using encoder: {encoder_name}")
45
+
41
46
  # FFmpeg command
42
47
  ffmpeg_command = [
43
48
  "ffmpeg",
@@ -47,17 +52,14 @@ class RTSPtoRTMPStreamer:
47
52
  "-strict", "experimental",
48
53
  "-i", self.rtsp_url,
49
54
 
50
- # Video Encoding (Fastest possible)
51
- "-c:v", "libx264",
52
- "-preset", "ultrafast", # 🚀 Reduce CPU usage
53
- "-tune", "zerolatency", # 🚀 Optimize for real-time streaming
54
- "-x264-params", "keyint=40:min-keyint=40", # 🚀 Keyframe optimization
55
- "-r", "25", # ⏳ Limit FPS to 20 (prevents excessive encoding load)
56
- "-b:v", "1500k", # ✅ Lower bitrate to improve performance
55
+ # Video encoding with optimal encoder
56
+ *encoder_args,
57
+ "-r", "25", # Limit FPS to 25
58
+ "-b:v", "1500k", # Bitrate
57
59
  "-maxrate", "2000k", # ✅ Set max bitrate
58
60
  "-bufsize", "4000k", # ✅ Reduce buffer latency
59
61
  "-g", "25", # ✅ Reduce GOP size for faster keyframes
60
- "-vf", "scale='min(1024,iw)':-2", # ✅ Resize width to max 800px
62
+ "-vf", "scale='min(1024,iw)':-2", # ✅ Resize width to max 1024px
61
63
 
62
64
  # ❌ Disable Audio (Avoid unnecessary encoding overhead)
63
65
  "-an",
@@ -21,7 +21,7 @@ class SystemUsageClient(GrpcClientBase):
21
21
  logging.error(f"Failed to connect to gRPC server: {e}")
22
22
  self.stub = None
23
23
 
24
- def send_system_usage(self, device_id: str, cpu_usage: float, ram_usage: dict, gpu_usage: list, latency: float, token: str) -> dict:
24
+ def send_system_usage(self, device_id: str, cpu_usage: float, ram_usage: dict, gpu_usage: list, latency: float, cpu_temperature: float, token: str) -> dict:
25
25
  """
26
26
  Send system usage data to the server using token authentication.
27
27
 
@@ -44,6 +44,7 @@ class SystemUsageClient(GrpcClientBase):
44
44
  request = SystemUsageRequest(
45
45
  device_id=device_id,
46
46
  cpu_usage=cpu_usage,
47
+ cpu_temperature=cpu_temperature,
47
48
  ram_usage_percent=ram_usage.get("percent", 0.0),
48
49
  ram_total=ram_usage.get("total", 0),
49
50
  ram_used=ram_usage.get("used", 0),
@@ -377,7 +377,15 @@ class WorkerSourcePipelineClient(GrpcClientBase):
377
377
  debug_entries = self.debug_repo.get_debug_entries_with_data()
378
378
 
379
379
  for debug_entry in debug_entries:
380
- image_binary = self.read_image_as_binary(debug_entry.image_path)
380
+ try:
381
+ image_binary = self.read_image_as_binary(debug_entry.image_path)
382
+ except FileNotFoundError:
383
+ logging.warning(f"Image file not found: {debug_entry.image_path}, deleting entry {debug_entry.id}")
384
+ self.debug_repo.delete_entry_by_id(debug_entry.id)
385
+ continue
386
+ except Exception as e:
387
+ logging.error(f"Error reading image {debug_entry.image_path}: {e}")
388
+ continue
381
389
 
382
390
  request = SendPipelineDebugRequest(
383
391
  worker_source_pipeline_id=debug_entry.worker_source_pipeline_id,
@@ -391,12 +399,13 @@ class WorkerSourcePipelineClient(GrpcClientBase):
391
399
  if response and response.success:
392
400
  self.debug_repo.delete_entry_by_id(debug_entry.id)
393
401
  else:
394
- return {"success": False, "message": response.message if response else "Unknown error"}
402
+ logging.warning(f"Failed to sync debug entry {debug_entry.id}: {response.message if response else 'Unknown error'}")
395
403
 
396
404
  return {"success": True, "message": "Successfully synced debug entries"}
397
405
 
398
406
  except Exception as e:
399
- logging.error(f"Error syncing pipeline debug: {e}")
407
+ logging.error(f"Error syncing pipeline debug: {e}", exc_info=True)
408
+ return {"success": False, "message": f"Exception: {str(e)}"}
400
409
 
401
410
  def sync_pipeline_detection(self, token: str):
402
411
  if not self.stub:
@@ -406,7 +415,15 @@ class WorkerSourcePipelineClient(GrpcClientBase):
406
415
  entries = self.detection_repo.get_entries()
407
416
 
408
417
  for entry in entries:
409
- image_binary = self.read_image_as_binary(entry.image_path)
418
+ try:
419
+ image_binary = self.read_image_as_binary(entry.image_path)
420
+ except FileNotFoundError:
421
+ logging.warning(f"Image file not found: {entry.image_path}, deleting entry {entry.id}")
422
+ self.detection_repo.delete_entry_by_id(entry.id)
423
+ continue
424
+ except Exception as e:
425
+ logging.error(f"Error reading image {entry.image_path}: {e}")
426
+ continue
410
427
 
411
428
  request = SendPipelineDetectionDataRequest(
412
429
  worker_source_pipeline_id=entry.worker_source_pipeline_id,
@@ -420,9 +437,10 @@ class WorkerSourcePipelineClient(GrpcClientBase):
420
437
  if response and response.success:
421
438
  self.detection_repo.delete_entry_by_id(entry.id)
422
439
  else:
423
- return {"success": False, "message": response.message if response else "Unknown error"}
440
+ logging.warning(f"Failed to sync detection entry {entry.id}: {response.message if response else 'Unknown error'}")
424
441
 
425
- return {"success": True, "message": "Successfully synced debug entries"}
442
+ return {"success": True, "message": "Successfully synced detection entries"}
426
443
 
427
444
  except Exception as e:
428
- logging.error(f"Error syncing pipeline debug: {e}")
445
+ logging.error(f"Error syncing pipeline detection: {e}", exc_info=True)
446
+ return {"success": False, "message": f"Exception: {str(e)}"}
@@ -0,0 +1,109 @@
1
+ """
2
+ Utility for selecting the best available video encoder (GPU or CPU fallback).
3
+ """
4
+ import os
5
+ import sys
6
+ import logging
7
+ from typing import List, Tuple
8
+ from .PlatformDetector import PlatformDetector
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class EncoderSelector:
14
+ """Selects optimal video encoder based on available hardware."""
15
+
16
+ _platform = PlatformDetector()
17
+
18
+ @classmethod
19
+ def get_encoder_args(cls, force_cpu: bool = False) -> Tuple[List[str], str]:
20
+ """
21
+ Get FFmpeg encoder arguments.
22
+
23
+ Args:
24
+ force_cpu: Force CPU encoding even if GPU is available
25
+
26
+ Returns:
27
+ Tuple of (encoder_args_list, encoder_name)
28
+ """
29
+ if force_cpu:
30
+ return cls._get_cpu_encoder()
31
+
32
+ # Check environment variable override
33
+ force_encoder = os.environ.get("RTMP_ENCODER", "").lower()
34
+
35
+ if force_encoder == "cpu" or force_encoder == "libx264":
36
+ return cls._get_cpu_encoder()
37
+ elif force_encoder == "nvenc":
38
+ return cls._get_nvenc_encoder()
39
+
40
+ # Jetson platform
41
+ if cls._platform.is_jetson():
42
+ return cls._get_jetson_encoder()
43
+
44
+ # macOS
45
+ if sys.platform == "darwin":
46
+ return cls._get_videotoolbox_encoder()
47
+
48
+ # NVIDIA GPU
49
+ if cls._has_nvidia_gpu():
50
+ return cls._get_nvenc_encoder()
51
+
52
+ # Fallback to CPU
53
+ return cls._get_cpu_encoder()
54
+
55
+ @staticmethod
56
+ def _has_nvidia_gpu() -> bool:
57
+ """Check if NVIDIA GPU is available."""
58
+ return (
59
+ os.environ.get("NVIDIA_VISIBLE_DEVICES") is not None or
60
+ os.path.exists("/proc/driver/nvidia/version")
61
+ )
62
+
63
+ @staticmethod
64
+ def _get_cpu_encoder() -> Tuple[List[str], str]:
65
+ """Get CPU encoder (libx264) with optimized settings."""
66
+ return [
67
+ "-c:v", "libx264",
68
+ "-preset", "ultrafast",
69
+ "-tune", "zerolatency",
70
+ "-profile:v", "main",
71
+ ], "libx264"
72
+
73
+ @staticmethod
74
+ def _get_nvenc_encoder() -> Tuple[List[str], str]:
75
+ """Get NVIDIA NVENC encoder with GPU-optimized settings."""
76
+ return [
77
+ "-c:v", "h264_nvenc",
78
+ "-preset", "p1", # p1 = fastest preset
79
+ "-tune", "ull", # ultra-low latency
80
+ "-rc:v", "cbr", # constant bitrate
81
+ "-rc-lookahead", "0", # disable lookahead for lower latency
82
+ "-delay", "0", # zero delay
83
+ "-zerolatency", "1", # zero latency mode
84
+ "-profile:v", "main",
85
+ "-gpu", "0", # Use first GPU
86
+ ], "h264_nvenc"
87
+
88
+ @staticmethod
89
+ def _get_jetson_encoder() -> Tuple[List[str], str]:
90
+ """Get Jetson-optimized NVENC encoder."""
91
+ return [
92
+ "-c:v", "h264_nvenc",
93
+ "-preset", "p1",
94
+ "-tune", "ull",
95
+ "-rc:v", "cbr",
96
+ "-rc-lookahead", "0",
97
+ "-delay", "0",
98
+ "-zerolatency", "1",
99
+ "-profile:v", "main",
100
+ ], "h264_nvenc"
101
+
102
+ @staticmethod
103
+ def _get_videotoolbox_encoder() -> Tuple[List[str], str]:
104
+ """Get macOS VideoToolbox encoder."""
105
+ return [
106
+ "-c:v", "h264_videotoolbox",
107
+ "-profile:v", "main",
108
+ "-realtime", "1",
109
+ ], "h264_videotoolbox"
@@ -78,6 +78,9 @@ class SystemMonitor:
78
78
  return [temp.current for temp in core_temps if hasattr(temp, "current")]
79
79
  elif "cpu-thermal" in sensors:
80
80
  return [sensors["cpu-thermal"][0].current]
81
+ elif "k10temp" in sensors and len(sensors["k10temp"]) > 0:
82
+ temp = sensors["k10temp"][0]
83
+ return float(temp.current)
81
84
  else:
82
85
  return {"error": "CPU temperature sensor not found"}
83
86
  except Exception as e:
@@ -138,7 +138,7 @@ class DataSyncWorker:
138
138
 
139
139
  if not response or not response.get("success"):
140
140
  error_message = GrpcClientBase.get_error_message(response)
141
- logger.error(f"❌ [APP] Failed to sync dataset sources: {error_message}")
141
+ logger.error(f"❌ [APP] Failed to sync worker source pipelines detection: {error_message}")
142
142
 
143
143
  except Exception as e:
144
- logger.error("🚨 [APP] Error syncing worker source pipelines detection.", exc_info=True)
144
+ logger.error(f"🚨 [APP] Error syncing worker source pipelines detection: {e}", exc_info=True)
@@ -284,7 +284,7 @@ class DatasetFrameWorker:
284
284
  self.last_sync_time = time.time()
285
285
  else:
286
286
  error_message = response.get("message", "Unknown error") if response else "Unknown error"
287
- logger.error(f"❌ [APP] Failed to sync dataset sources: {error_message}")
287
+ logger.error(f"❌ [APP] Failed to sync dataset sources: {error_message}", exc_info=True)
288
288
 
289
289
  except Exception as e:
290
290
  logger.error("🚨 [APP] Error syncing dataset sources.", exc_info=True)
@@ -0,0 +1,160 @@
1
+ import threading
2
+ import logging
3
+ import json
4
+ from datetime import datetime
5
+ from ..database.DatabaseManager import DatabaseManager
6
+ from ..models.worker_source_pipeline import WorkerSourcePipelineEntity
7
+ from .RabbitMQListener import RabbitMQListener
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ def safe_join_thread(thread, timeout=5):
12
+ """Safely join a thread, avoiding RuntimeError when joining current thread."""
13
+ if thread and thread != threading.current_thread():
14
+ thread.join(timeout=timeout)
15
+ elif thread == threading.current_thread():
16
+ logging.info("🛑 [APP] Thread stopping from within itself, skipping join.")
17
+
18
+ class PipelinePreviewWorker:
19
+ def __init__(self, config: dict):
20
+ """
21
+ Initialize Pipeline Preview Worker.
22
+
23
+ This worker listens for pipeline preview requests and updates the
24
+ last_preview_request_at timestamp in the database. The worker core
25
+ will check this timestamp to decide whether to publish RTMP streams.
26
+
27
+ Args:
28
+ config (dict): Configuration object containing settings.
29
+ """
30
+ if not isinstance(config, dict):
31
+ raise ValueError("⚠️ [APP] config must be a dictionary.")
32
+
33
+ self.config = config
34
+ self.worker_id = self.config.get("worker_id")
35
+
36
+ if not self.worker_id:
37
+ raise ValueError("⚠️ [APP] Configuration is missing 'worker_id'.")
38
+
39
+ self.thread = None
40
+ self.stop_event = threading.Event()
41
+ self.lock = threading.Lock()
42
+
43
+ # Initialize RabbitMQ listener
44
+ self.listener = RabbitMQListener(
45
+ self.config, self.worker_id, self.stop_event, self._process_pipeline_preview_message
46
+ )
47
+
48
+ def start(self):
49
+ """Start the Pipeline Preview Worker."""
50
+ with self.lock:
51
+ if self.thread and self.thread.is_alive():
52
+ logger.warning("⚠️ [APP] Pipeline Preview Worker is already running.")
53
+ return
54
+
55
+ self.stop_event.clear()
56
+ self.thread = threading.Thread(target=self._run, daemon=True)
57
+ self.thread.start()
58
+ logger.info(f"🚀 [APP] Pipeline Preview Worker started (Device: {self.worker_id}).")
59
+
60
+ def stop(self):
61
+ """Stop the Pipeline Preview Worker."""
62
+ with self.lock:
63
+ if not self.thread or not self.thread.is_alive():
64
+ logger.warning("⚠️ [APP] Pipeline Preview Worker is not running.")
65
+ return
66
+
67
+ self.stop_event.set()
68
+ self.listener.stop_listening()
69
+
70
+ safe_join_thread(self.thread)
71
+ self.thread = None
72
+ logger.info(f"🛑 [APP] Pipeline Preview Worker stopped (Device: {self.worker_id}).")
73
+
74
+ def _run(self):
75
+ """Main loop to manage RabbitMQ listener."""
76
+ try:
77
+ while not self.stop_event.is_set():
78
+ logger.info("📡 [APP] Starting pipeline preview message listener...")
79
+ self.listener.start_listening(
80
+ exchange_name="nedo.worker.pipeline.preview",
81
+ queue_name=f"nedo.worker.pipeline.preview.{self.worker_id}"
82
+ )
83
+
84
+ # Wait for the listener thread to finish (connection lost or stop requested)
85
+ while not self.stop_event.is_set() and self.listener.listener_thread and self.listener.listener_thread.is_alive():
86
+ self.listener.listener_thread.join(timeout=5)
87
+
88
+ if not self.stop_event.is_set():
89
+ logger.warning("⚠️ [APP] Pipeline preview listener disconnected. Attempting to reconnect in 10 seconds...")
90
+ self.stop_event.wait(10)
91
+ else:
92
+ logger.info("📡 [APP] Pipeline preview listener stopped.")
93
+ break
94
+
95
+ except Exception as e:
96
+ logger.error("🚨 [APP] Unexpected error in Pipeline Preview Worker loop.", exc_info=True)
97
+
98
+ def _process_pipeline_preview_message(self, message):
99
+ """
100
+ Process messages related to pipeline preview streaming.
101
+ Updates the last_preview_request_at timestamp for the specified pipeline.
102
+ """
103
+ try:
104
+ data = json.loads(message)
105
+ worker_id = data.get("workerId")
106
+ pipeline_id = data.get("pipelineId")
107
+
108
+ logger.info(f"📡 [APP] Received pipeline preview message ({data})")
109
+
110
+ # Validate required fields
111
+ if not pipeline_id:
112
+ logger.error(f"⚠️ [APP] Missing pipelineId in message")
113
+ return
114
+
115
+ if worker_id != self.worker_id:
116
+ logger.warning(f"⚠️ [APP] Worker ID mismatch: expected {self.worker_id}, got {worker_id}")
117
+ return
118
+
119
+ # Update the last_preview_request_at timestamp in database
120
+ self._update_pipeline_preview_timestamp(pipeline_id)
121
+
122
+ except json.JSONDecodeError:
123
+ logger.error("⚠️ [APP] Invalid JSON message format.")
124
+ except Exception as e:
125
+ logger.error("🚨 [APP] Error processing pipeline preview message.", exc_info=True)
126
+
127
+ def _update_pipeline_preview_timestamp(self, pipeline_id: str):
128
+ """
129
+ Update the last_preview_request_at timestamp for the specified pipeline.
130
+
131
+ Args:
132
+ pipeline_id (str): The ID of the pipeline to update.
133
+ """
134
+ session = None
135
+ try:
136
+ session = DatabaseManager.get_session("config")
137
+
138
+ # Find the pipeline
139
+ pipeline = session.query(WorkerSourcePipelineEntity).filter_by(
140
+ id=pipeline_id,
141
+ worker_id=self.worker_id
142
+ ).first()
143
+
144
+ if not pipeline:
145
+ logger.error(f"⚠️ [APP] Pipeline not found: {pipeline_id}")
146
+ return
147
+
148
+ # Update timestamp
149
+ pipeline.last_preview_request_at = datetime.utcnow()
150
+ session.commit()
151
+
152
+ logger.info(f"✅ [APP] Updated preview timestamp for pipeline {pipeline_id} ({pipeline.name})")
153
+
154
+ except Exception as e:
155
+ if session:
156
+ session.rollback()
157
+ logger.error(f"🚨 [APP] Error updating pipeline preview timestamp: {e}", exc_info=True)
158
+ finally:
159
+ if session:
160
+ session.close()
@@ -74,8 +74,28 @@ class SystemUsageManager:
74
74
  try:
75
75
  usage = self.system_monitor.get_system_usage()
76
76
  cpu_usage = usage["cpu"]["usage_percent"]
77
+ cpu_temperature_raw = usage["cpu"]["temperature_celsius"]
77
78
  ram_usage = usage["ram"]
78
- gpu_usage = usage.get("gpu", [])
79
+ gpu_usage_raw = usage.get("gpu", [])
80
+
81
+ if isinstance(cpu_temperature_raw, (int, float)):
82
+ cpu_temperature = float(cpu_temperature_raw)
83
+ elif isinstance(cpu_temperature_raw, list):
84
+ if cpu_temperature_raw:
85
+ cpu_temperature = float(sum(cpu_temperature_raw) / len(cpu_temperature_raw))
86
+ else:
87
+ cpu_temperature = 0.0
88
+ elif isinstance(cpu_temperature_raw, dict):
89
+ cpu_temperature = 0.0
90
+ else:
91
+ cpu_temperature = 0.0
92
+
93
+ if isinstance(gpu_usage_raw, dict):
94
+ gpu_usage = []
95
+ elif isinstance(gpu_usage_raw, list):
96
+ gpu_usage = gpu_usage_raw
97
+ else:
98
+ gpu_usage = []
79
99
 
80
100
  with self.latency_lock:
81
101
  latency = self.latency if self.latency is not None else -1
@@ -85,6 +105,7 @@ class SystemUsageManager:
85
105
  cpu_usage=cpu_usage,
86
106
  ram_usage=ram_usage,
87
107
  gpu_usage=gpu_usage,
108
+ cpu_temperature=cpu_temperature,
88
109
  latency=latency,
89
110
  token=self.token,
90
111
  )
@@ -92,8 +113,6 @@ class SystemUsageManager:
92
113
  if not response or not response.get("success"):
93
114
  error_message = GrpcClientBase.get_error_message(response)
94
115
  logger.error(f"❌ [APP] Failed to send system usage: {error_message}")
95
- #else:
96
- # logger.info("✅ [APP] System usage sent successfully.")
97
116
 
98
117
  except Exception as e:
99
118
  logger.error("🚨 [APP] Error sending system usage.", exc_info=True)
@@ -7,6 +7,7 @@ from .DataSyncWorker import DataSyncWorker
7
7
  from .DataSenderWorker import DataSenderWorker
8
8
  from .PipelineImageWorker import PipelineImageWorker
9
9
  from .VideoStreamWorker import VideoStreamWorker
10
+ from .PipelinePreviewWorker import PipelinePreviewWorker
10
11
  from .CoreActionWorker import CoreActionWorker
11
12
  from .DatasetFrameWorker import DatasetFrameWorker
12
13
 
@@ -38,6 +39,7 @@ class WorkerManager:
38
39
  self.data_sync_worker = DataSyncWorker(config, sync_interval=10)
39
40
  self.data_sender_worker = DataSenderWorker(config, send_interval=10)
40
41
  self.video_stream_worker = VideoStreamWorker(config)
42
+ self.pipeline_preview_worker = PipelinePreviewWorker(config)
41
43
  self.pipeline_image_worker = PipelineImageWorker(config)
42
44
  self.pipeline_action_worker = PipelineActionWorker(config)
43
45
  self.core_action_worker = CoreActionWorker(config, self._start_workers, self._stop_workers)
@@ -47,6 +49,7 @@ class WorkerManager:
47
49
  """Start processing workers while keeping monitoring workers running."""
48
50
  try:
49
51
  self.video_stream_worker.start()
52
+ self.pipeline_preview_worker.start()
50
53
  self.pipeline_image_worker.start()
51
54
  self.data_sender_worker.start_updating()
52
55
  self.dataset_frame_worker.start()
@@ -60,6 +63,7 @@ class WorkerManager:
60
63
  """Stop processing workers while keeping monitoring workers running."""
61
64
  try:
62
65
  self.video_stream_worker.stop()
66
+ self.pipeline_preview_worker.stop()
63
67
  self.pipeline_image_worker.stop()
64
68
  self.data_sender_worker.stop_updating()
65
69
  self.dataset_frame_worker.stop()
@@ -151,6 +151,16 @@ class WorkerService:
151
151
  self.logger.info("✅ [APP] Configuration updated successfully")
152
152
  else:
153
153
  self.logger.info("✅ [APP] Configuration found. No changes needed.")
154
+
155
+ # Always fetch connection info on startup to check for updates
156
+ self.logger.info("🔄 [APP] Checking for connection info updates...")
157
+ token_to_use = self.token if self.token else config.get('token')
158
+ if token_to_use:
159
+ AppInitializer.update_connection_info(server_host, self.server_port, token_to_use)
160
+ # Reload config after potential updates
161
+ config = ConfigurationManager.get_all_configs()
162
+ else:
163
+ self.logger.warning("⚠️ [APP] No token available to fetch connection info updates")
154
164
 
155
165
  # Add runtime parameters to config
156
166
  config['rtmp_server'] = self.rtmp_server
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker
3
- Version: 1.2.6
3
+ Version: 1.2.8
4
4
  Summary: Nedo Vision Worker Service Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -40,9 +40,10 @@ Requires-Dist: protobuf>=3.20.0
40
40
  Requires-Dist: psutil>=5.9.0
41
41
  Requires-Dist: requests>=2.28.0
42
42
  Requires-Dist: SQLAlchemy>=1.4.0
43
- Requires-Dist: opencv-python>=4.6.0; platform_machine not in "aarch64 armv7l"
44
- Requires-Dist: opencv-python-headless>=4.6.0; platform_machine in "aarch64 armv7l"
45
43
  Requires-Dist: pynvml>=11.4.1; platform_system != "Darwin" or platform_machine != "arm64"
44
+ Provides-Extra: opencv
45
+ Requires-Dist: opencv-python>=4.6.0; platform_machine not in "aarch64 armv7l" and extra == "opencv"
46
+ Requires-Dist: opencv-python-headless>=4.6.0; platform_machine in "aarch64 armv7l" and extra == "opencv"
46
47
  Provides-Extra: dev
47
48
  Requires-Dist: pytest>=7.0.0; extra == "dev"
48
49
  Requires-Dist: black>=22.0.0; extra == "dev"
@@ -1,12 +1,12 @@
1
- nedo_vision_worker/__init__.py,sha256=3uSTVntcW747CFtMZ_qyKrptoNgHDvLNjg0pdgb4otM,203
1
+ nedo_vision_worker/__init__.py,sha256=RU8uEjcEodK2R_F4Fg2qBYmhhJpvf-vQBA-Cjo9Dvf8,203
2
2
  nedo_vision_worker/cli.py,sha256=ddWspJmSgVkcUYvRdkvTtMNuMTDvNCqLLuMVU9KE3Ik,7457
3
3
  nedo_vision_worker/doctor.py,sha256=wNkpe8gLVd76Y_ViyK2h1ZFdqeSl37MnzZN5frWKu30,48410
4
- nedo_vision_worker/worker_service.py,sha256=rXUVmyxcJPGhQEZ4UQvjQS5UqlnLBYudHQZCj0dQDxo,10421
4
+ nedo_vision_worker/worker_service.py,sha256=9zz8hKwDwqwpfS0KPQfftGJtRci0uj_wiwcr_TGf-E0,11039
5
5
  nedo_vision_worker/config/ConfigurationManager.py,sha256=QrQaQ9Cdjpkcr2JE_miyrWJIZmMgZwJYBz-wE45Zzes,8011
6
6
  nedo_vision_worker/config/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
7
- nedo_vision_worker/database/DatabaseManager.py,sha256=BZAK3VzbAo_xWUNCni4O8rCLzCjY7mE02PI2HlEpqg4,8505
7
+ nedo_vision_worker/database/DatabaseManager.py,sha256=j2koXo1fnMmAyQnY4sv4txfZR8qIzrPyev-sQ4HBaOQ,9478
8
8
  nedo_vision_worker/database/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
9
- nedo_vision_worker/initializer/AppInitializer.py,sha256=iGw8-7Eg2aNT-nFaxiRIhWHU8iwecLuwuvjWlt1-V0Y,3163
9
+ nedo_vision_worker/initializer/AppInitializer.py,sha256=6UVdjiuayziPYZ7JkQ436z7-9sHj7J3jtp6lfQsu-DU,5698
10
10
  nedo_vision_worker/initializer/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
11
11
  nedo_vision_worker/models/__init__.py,sha256=6ZH2W1Jcy4o6xBPqFPcyxRU2UJ5Zvw_kfO38yLLGtHA,796
12
12
  nedo_vision_worker/models/ai_model.py,sha256=9muyZL9AxtX417-tYUiw8bgvFPtqdXgEAq-hm_mLxGY,2277
@@ -19,7 +19,7 @@ nedo_vision_worker/models/ppe_detection_label.py,sha256=qON7a0fuDv5cK8phGH0gGTzq
19
19
  nedo_vision_worker/models/restricted_area_violation.py,sha256=0enCi3tv15YMy3NaI6FwqhmLYHbbVX4nWTh46qKxrWc,829
20
20
  nedo_vision_worker/models/user.py,sha256=SnLUz2nS7j17bIP-gElMEaR-jWdnNQ0fTpRminVKY60,294
21
21
  nedo_vision_worker/models/worker_source.py,sha256=FB8irZ26LhCKNHBcpIIb5Mi3SoSNm9-q25VIkO5jQWg,793
22
- nedo_vision_worker/models/worker_source_pipeline.py,sha256=xCD4i9pHr8Qy5B_h1dH0Q7V7faS2lAou2UNEzx24oIw,752
22
+ nedo_vision_worker/models/worker_source_pipeline.py,sha256=CGA_nz5wywsJcBPm-5kd0v_-h59f8Iu7uEeX3C91eT4,824
23
23
  nedo_vision_worker/models/worker_source_pipeline_config.py,sha256=dGYTpcTFFu6pmGBufuWBHjv3Xs4RGAQwZn6jp6Ondvs,876
24
24
  nedo_vision_worker/models/worker_source_pipeline_debug.py,sha256=6S7TkN37FrAT4VwsEB38DWSad7QfvNhsOGtSEK8D1Qs,594
25
25
  nedo_vision_worker/models/worker_source_pipeline_detection.py,sha256=p6CJsiVCKprTYrNxJsiTB8njXdHkjZKVEyBceRVE6fY,560
@@ -50,49 +50,52 @@ nedo_vision_worker/repositories/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKr
50
50
  nedo_vision_worker/services/AIModelClient.py,sha256=lxRNax6FR-pV0G1NpJnlaqjbQeu3kRolIUNSw1RkoZA,15406
51
51
  nedo_vision_worker/services/ConnectionInfoClient.py,sha256=toC9zuY2Hrx1Cwq8Gycy_iFlaG1DvFT4qewlLlitpEQ,2214
52
52
  nedo_vision_worker/services/DatasetSourceClient.py,sha256=O5a7onxFl0z47zXaMXWxHAMPuuc-i_vzkd2w5fwrukc,3319
53
- nedo_vision_worker/services/DirectDeviceToRTMPStreamer.py,sha256=M5ei0cd3_KDhHZp6EkrOowhAY-hAHfAQh9YDVjQtbQI,22278
54
- nedo_vision_worker/services/FileToRTMPServer.py,sha256=yUJxrouoTLSq9XZ88dhDYhP-px10jLoHopkPoy4lQxk,2663
55
- nedo_vision_worker/services/GrpcClientBase.py,sha256=bRNeajiPGcJZtNofD_HU7JhLHVPbnuGacqv5Dp62GC0,7400
53
+ nedo_vision_worker/services/DirectDeviceToRTMPStreamer.py,sha256=K0n7iyR7jdce8IWdNuqa1Im-R00QnxBcb7u2KM2Wjbc,22423
54
+ nedo_vision_worker/services/FileToRTMPServer.py,sha256=0hY5pmeAzLw_d3uPR2Qp6gSAYb4rJHiAunuNe08OvkM,2870
55
+ nedo_vision_worker/services/GrpcClientBase.py,sha256=hPyxOGw3aGSW1FhmY3wp3Iq8U1MArXBmvEMdmd63NZ4,6827
56
56
  nedo_vision_worker/services/GrpcClientManager.py,sha256=DLXekmxlQogLo8V9-TNDXtyHT_UG-BaggqwsIups55k,5568
57
+ nedo_vision_worker/services/GrpcConnection.py,sha256=UNjaUC4ZcXuteHQx8AAAL5ymYkT1OpoIvyCYPUc3tCI,4915
57
58
  nedo_vision_worker/services/ImageUploadClient.py,sha256=T353YsRfm74G7Mh-eWr5nvdQHXTfpKwHJFmNW8HyjT8,3019
58
59
  nedo_vision_worker/services/PPEDetectionClient.py,sha256=CC-b0LRAgrftfIKp6TFKpeBkTYefe-C6Z1oz_X3HArQ,4345
59
- nedo_vision_worker/services/RTSPtoRTMPStreamer.py,sha256=hkPRX6iGxUtivsOA36JWBopHEd1RlmgIP8SLqN4d2TU,3863
60
+ nedo_vision_worker/services/RTSPtoRTMPStreamer.py,sha256=LtfrWDHNcm-Ky6nZLnFCF8xgqIm7VQmsWIenK2yKNfo,3804
60
61
  nedo_vision_worker/services/RestrictedAreaClient.py,sha256=AE9SOcVQca4zn5V96boho56EgN5BCIpV-8grvFBBnGo,3853
61
62
  nedo_vision_worker/services/SharedDirectDeviceClient.py,sha256=dylMhqpMsfK_UKLWIVL-ApJRP4g-NCP_55xvlGYBiwo,10760
62
63
  nedo_vision_worker/services/SharedVideoStreamServer.py,sha256=WMKVxkzMoyfbgYiJ0fQOT-Ujz9btz6FLlaDP738yfoY,11601
63
- nedo_vision_worker/services/SystemUsageClient.py,sha256=PbRuwDWKnMarcnkGtOKfYB5nA-3DeKv7V5_hZr8EDEo,3200
64
+ nedo_vision_worker/services/SystemUsageClient.py,sha256=Yf77dooQeNh6CDL5FkWVrX9543OVz1wc3exCAg6GlWw,3273
64
65
  nedo_vision_worker/services/SystemWideDeviceCoordinator.py,sha256=9zBJMCbTMZS7gwN67rFpoUiTr82El2rpIO7DKFzeMjM,9417
65
66
  nedo_vision_worker/services/VideoSharingDaemon.py,sha256=hYMjUIKNUVT1qSxuUuHN-7Bd85MDkxfqslxDLe2PBYQ,29721
66
67
  nedo_vision_worker/services/VideoStreamClient.py,sha256=QSgUV3LijYrNdnBG1ylABOdUaSatQamfXaqJhAiol9M,7260
67
68
  nedo_vision_worker/services/WorkerSourceClient.py,sha256=vDZeCuHL5QQ2-knZ4TOSA59jzmbbThGIwFKKLEZ72Ws,9198
68
- nedo_vision_worker/services/WorkerSourcePipelineClient.py,sha256=qaBx9T2gWMzpqZaeQdbIeklsXNwzWD5kqgB41rrSkBI,17135
69
+ nedo_vision_worker/services/WorkerSourcePipelineClient.py,sha256=cjev_NRGUZrC9tuMm8s5ov92fknQno4vLEt-yMFrUCY,18241
69
70
  nedo_vision_worker/services/WorkerSourceUpdater.py,sha256=4t_CEHBLGDRvvuQS6eEPMivTI11ZuzusKKto6t9tPIk,9115
70
71
  nedo_vision_worker/services/WorkerStatusClient.py,sha256=7kC5EZjEBwWtHOE6UQ29OPCpYnv_6HSuH7Tc0alK_2Q,2531
71
72
  nedo_vision_worker/services/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
73
+ nedo_vision_worker/util/EncoderSelector.py,sha256=-9lZwVmiKzJr1cELeuCXi-jRonty2bpociZq4KDScmA,3399
72
74
  nedo_vision_worker/util/FFmpegUtil.py,sha256=QnQrzurmllzGb7SlAAYCrzKBUblweoFU-0h-X-32IYg,1829
73
75
  nedo_vision_worker/util/HardwareID.py,sha256=rSW8-6stm7rjXEdkYGqXMUn56gyw62YiWnSwZQVCCLM,4315
74
76
  nedo_vision_worker/util/ImageUploader.py,sha256=2xipN3fwpKgFmbvoGIdElpGn5ARJyrgR4dXtbRf73hw,3764
75
77
  nedo_vision_worker/util/Networking.py,sha256=uOtL8HkKZXJp02ZZIHWYMAvAsaYb7BsAPTncfdvJx2c,3241
76
78
  nedo_vision_worker/util/PlatformDetector.py,sha256=-iLPrKs7hp_oltkCI3hESJQkC2uRyu1-8mAbZrvgWx0,1501
77
- nedo_vision_worker/util/SystemMonitor.py,sha256=2MWYaEXoL91UANT_-SuDWrFMq1ajPorh8co6Py9PV_c,11300
79
+ nedo_vision_worker/util/SystemMonitor.py,sha256=2kkqj9mOlywAS2fHdN1TaIXSXvCApcIHj0IO2T9k3Yw,11471
78
80
  nedo_vision_worker/util/VideoProbeUtil.py,sha256=cF-vJ7hIDlXfEJby2a0s9tqwkPGVz_6B3Vv4D5pMmIw,12876
79
81
  nedo_vision_worker/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
80
82
  nedo_vision_worker/worker/CoreActionWorker.py,sha256=lb7zPY3yui6I3F4rX4Ii7JwpWZahLEO72rh3iWOgFmg,5441
81
83
  nedo_vision_worker/worker/DataSenderWorker.py,sha256=9FudRRItiMOcQx5UfVyu4p0Enb9BbgwZZ5EgX6Ho2U4,7160
82
- nedo_vision_worker/worker/DataSyncWorker.py,sha256=WvYfi3bG4mOKHU09J_MavfjFPrVgmxrrZYtrlQ-bnio,6265
84
+ nedo_vision_worker/worker/DataSyncWorker.py,sha256=LmDPt2J1frmXwuR46L6b0MjlFOHfgG-4_0MGQa78zF4,6288
83
85
  nedo_vision_worker/worker/DatasetFrameSender.py,sha256=1SFYj8LJFNi-anBTapsbq8U_NGMM7mnoMKg9NeFAHys,8087
84
- nedo_vision_worker/worker/DatasetFrameWorker.py,sha256=Ni5gPeDPk9Rz4_cbg63u7Y6LVw_-Bz24OvfeY-6Yp44,19320
86
+ nedo_vision_worker/worker/DatasetFrameWorker.py,sha256=Hh_wZuMjwovxsEKFqXSuTRin9eYRBZCbcFKm3CKLMbE,19335
85
87
  nedo_vision_worker/worker/PPEDetectionManager.py,sha256=fAolWlrsY5SQAWygvjNBNU56IlC0HLlhPgpz7shL-gk,3588
86
88
  nedo_vision_worker/worker/PipelineActionWorker.py,sha256=xgvryjKtEsMj4BKqWzDIaK_lFny-DfMCj5Y2DxHnWww,5651
87
89
  nedo_vision_worker/worker/PipelineImageWorker.py,sha256=J8VBUG0cwcH3qOJp2zTl30B-XhmPFyvJLjxitKJYq0E,5642
90
+ nedo_vision_worker/worker/PipelinePreviewWorker.py,sha256=owFiBbktcOZkdImQeykZSeBIR2-mpt6HNkmYIkLRKzE,6397
88
91
  nedo_vision_worker/worker/RabbitMQListener.py,sha256=9gR49MDplgpyb-D5HOH0K77-DJQFvhS2E7biL92SjSU,6950
89
92
  nedo_vision_worker/worker/RestrictedAreaManager.py,sha256=3yoXgQ459tV1bOa5choEzR9gE6LklrtHR_e0472U3L0,3521
90
- nedo_vision_worker/worker/SystemUsageManager.py,sha256=StutV4UyLUfduYfK20de4SbPd7wqkR7io0gsOajxWSU,4509
93
+ nedo_vision_worker/worker/SystemUsageManager.py,sha256=mkh4sT-HkIEY1CJHMEG6LP9ATu39YXvLRLyf995OkoQ,5315
91
94
  nedo_vision_worker/worker/VideoStreamWorker.py,sha256=5n6v1PNO7IB-jj_McALLkUP-cBjJoIEw4UiSAs3vTb0,7606
92
- nedo_vision_worker/worker/WorkerManager.py,sha256=T0vMfhOd7YesgQ9o2w6soeJ6n9chUAcuwcGe7p31xr0,5298
95
+ nedo_vision_worker/worker/WorkerManager.py,sha256=2bxXi19fp3p1qjYBStYRdVVgko8dnevXx1_M_sqH5og,5521
93
96
  nedo_vision_worker/worker/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
94
- nedo_vision_worker-1.2.6.dist-info/METADATA,sha256=irbHs4_-uj182kff6PpW0YND59Q_UwhsPVLb5wI5NNQ,14661
95
- nedo_vision_worker-1.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
96
- nedo_vision_worker-1.2.6.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
97
- nedo_vision_worker-1.2.6.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
98
- nedo_vision_worker-1.2.6.dist-info/RECORD,,
97
+ nedo_vision_worker-1.2.8.dist-info/METADATA,sha256=7MmzgVArAQxzCvxpcCr-LzPrFa0prCfOkaYCPLkmH_0,14728
98
+ nedo_vision_worker-1.2.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
+ nedo_vision_worker-1.2.8.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
100
+ nedo_vision_worker-1.2.8.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
101
+ nedo_vision_worker-1.2.8.dist-info/RECORD,,