nedo-vision-worker 1.2.6__py3-none-any.whl → 1.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,5 +6,5 @@ A library for running worker agents in the Nedo Vision platform.
6
6
 
7
7
  from .worker_service import WorkerService
8
8
 
9
- __version__ = "1.2.6"
9
+ __version__ = "1.2.7"
10
10
  __all__ = ["WorkerService"]
@@ -104,7 +104,23 @@ class DatabaseManager:
104
104
  # Initialize engines and session factories for each database
105
105
  for name, path in DB_PATHS.items():
106
106
  path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
107
- engine = create_engine(f"sqlite:///{path.as_posix()}")
107
+
108
+ # Configure connection pool for multi-threaded usage
109
+ # pool_size: Max connections to keep open
110
+ # max_overflow: Additional connections that can be created temporarily
111
+ # pool_pre_ping: Test connections before using (prevents stale connections)
112
+ # pool_recycle: Recycle connections after N seconds (prevents long-lived stale connections)
113
+ engine = create_engine(
114
+ f"sqlite:///{path.as_posix()}",
115
+ pool_size=20, # Base pool size for persistent connections
116
+ max_overflow=30, # Allow up to 30 additional temporary connections
117
+ pool_pre_ping=True, # Verify connection health before use
118
+ pool_recycle=3600, # Recycle connections after 1 hour
119
+ connect_args={
120
+ "check_same_thread": False, # Required for SQLite with multiple threads
121
+ "timeout": 30.0 # Connection timeout
122
+ }
123
+ )
108
124
  ENGINES[name] = engine
109
125
  SESSION_FACTORIES[name] = scoped_session(sessionmaker(bind=engine)) # Use scoped sessions
110
126
  DatabaseManager.synchronize(name)
@@ -1,4 +1,4 @@
1
- from sqlalchemy import Column, String
1
+ from sqlalchemy import Column, String, DateTime
2
2
  from sqlalchemy.orm import relationship
3
3
  from ..database.DatabaseManager import Base
4
4
 
@@ -13,6 +13,7 @@ class WorkerSourcePipelineEntity(Base):
13
13
  ai_model_id = Column(String, nullable=True)
14
14
  pipeline_status_code = Column(String, nullable=False)
15
15
  location_name = Column(String, nullable=True)
16
+ last_preview_request_at = Column(DateTime, nullable=True)
16
17
 
17
18
  worker_source_pipeline_configs = relationship(
18
19
  "WorkerSourcePipelineConfigEntity",
@@ -3,6 +3,7 @@ import logging
3
3
  import time
4
4
  from grpc import StatusCode
5
5
  from typing import Callable, Optional, Any, Dict
6
+ from .GrpcConnection import GrpcConnection
6
7
 
7
8
  logger = logging.getLogger(__name__)
8
9
 
@@ -42,42 +43,21 @@ class GrpcClientBase:
42
43
  def __init__(self, server_host: str, server_port: int = 50051, max_retries: int = 3):
43
44
  self.server_address = f"{server_host}:{server_port}"
44
45
  self.channel: Optional[grpc.Channel] = None
45
- self.stub: Optional[Any] = None
46
46
  self.connected = False
47
47
  self.max_retries = max_retries
48
48
 
49
+ self.connection = GrpcConnection(server_host, server_port)
50
+
49
51
  def connect(self, stub_class, retry_interval: int = 2) -> bool:
50
- attempts = 0
51
- while attempts < self.max_retries and not self.connected:
52
- try:
53
- if self.channel:
54
- self._close_channel()
55
-
56
- self.channel = grpc.insecure_channel(self.server_address)
57
-
58
- future = grpc.channel_ready_future(self.channel)
59
- future.result(timeout=30)
60
-
61
- self.stub = stub_class(self.channel)
62
- self.connected = True
63
- logger.info(f"🚀 Connected to gRPC server at {self.server_address}")
64
- return True
65
-
66
- except (grpc.RpcError, grpc.FutureTimeoutError, Exception) as e:
67
- attempts += 1
68
- self.connected = False
69
- error_msg = str(e)
70
-
71
- logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}", exc_info=True)
72
-
73
- if attempts < self.max_retries:
74
- sleep_time = retry_interval * (2 ** (attempts - 1))
75
- logger.info(f"⏳ Retrying in {sleep_time}s...")
76
- time.sleep(sleep_time)
77
- else:
78
- logger.critical("❌ Max retries reached. Connection failed.")
79
-
80
- return False
52
+ conn = self.connection.get_connection()
53
+ if conn is None:
54
+ return False
55
+ requested_stub = stub_class(conn)
56
+
57
+ self.stub = requested_stub
58
+ self.connected = True
59
+
60
+ return True
81
61
 
82
62
  def _close_channel(self) -> None:
83
63
  try:
@@ -89,6 +69,7 @@ class GrpcClientBase:
89
69
  self.channel = None
90
70
  self.stub = None
91
71
 
72
+ # MARK:
92
73
  def close(self) -> None:
93
74
  self._close_channel()
94
75
  self.connected = False
@@ -104,6 +85,11 @@ class GrpcClientBase:
104
85
  except grpc.RpcError as e:
105
86
  return self._handle_grpc_error(e, rpc_call, *args, **kwargs)
106
87
  except Exception as e:
88
+ print(e)
89
+ print(str(e) == "Cannot invoke RPC on closed channel!")
90
+ if str(e) == "Cannot invoke RPC on closed channel!":
91
+ self.connect(type(self.stub))
92
+
107
93
  logger.error(f"💥 Unexpected RPC error: {e}")
108
94
  return None
109
95
 
@@ -127,8 +113,11 @@ class GrpcClientBase:
127
113
 
128
114
  return None
129
115
 
116
+ # MARK:
117
+ # Should request for reconnection two times. Notify grpc connection to do reconnect
130
118
  def _handle_unavailable(self, rpc_call: Callable, *args, **kwargs) -> Optional[Any]:
131
- self.connected = False
119
+ # self.connected = False
120
+ self.connection.try_reconnect()
132
121
 
133
122
  if self.stub:
134
123
  stub_class = type(self.stub)
@@ -154,7 +143,7 @@ class GrpcClientBase:
154
143
  return error_message.split("debug_error_string")[0].strip()
155
144
 
156
145
  def is_connected(self) -> bool:
157
- return self.connected and self.channel and self.stub
146
+ return self.connected and self.connection.get_connection() is not None and self.stub is not None
158
147
 
159
148
  def get_connection_info(self) -> Dict[str, Any]:
160
149
  return {
@@ -0,0 +1,147 @@
1
+ import grpc
2
+ import logging
3
+ import time
4
+ import threading
5
+ from grpc import StatusCode
6
+ from typing import Optional, Any, Dict
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class GrpcConnection:
12
+ """
13
+ Grpc connection management. Responsible for initiating connection with gRPC server that
14
+ will be used across gRPC clients in the project.
15
+ Expected to be initiated as a singleton.
16
+ """
17
+
18
+ _instance = None
19
+ _init_done = False
20
+ _lock = threading.Lock()
21
+ _reconnectLock = threading.Lock()
22
+ _reconnecting = False
23
+
24
+ def __new__(cls, *args, **kwargs):
25
+ if cls._instance is None:
26
+ with cls._lock:
27
+ if cls._instance is None:
28
+ cls._instance = super().__new__(cls)
29
+ return cls._instance
30
+
31
+ def __init__(self, server_host: str, server_port: int = 50051, max_retries: int = 3):
32
+ if self.__class__._init_done:
33
+ return # prevent re-initialization
34
+ self.__class__._init_done = True
35
+
36
+ self.server_address = f"{server_host}:{server_port}"
37
+ self.channel: Optional[grpc.Channel] = None
38
+ self.connected = False
39
+ self.max_retries = max_retries
40
+ self.connect()
41
+
42
+ def connect(self, retry_interval: int = 2) -> bool:
43
+ attempts = 0
44
+ while attempts < self.max_retries and not self.connected:
45
+ try:
46
+ if self.channel:
47
+ self._close_channel()
48
+
49
+ self.channel = grpc.insecure_channel(self.server_address)
50
+
51
+ future = grpc.channel_ready_future(self.channel)
52
+ future.result(timeout=30)
53
+
54
+ self.connected = True
55
+ logger.info(f"🚀 Connected to gRPC server at {self.server_address}")
56
+ return True
57
+
58
+ except (grpc.RpcError, grpc.FutureTimeoutError, Exception) as e:
59
+ attempts += 1
60
+ self.connected = False
61
+ error_msg = str(e)
62
+
63
+ logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}")
64
+
65
+ if attempts < self.max_retries:
66
+ sleep_time = retry_interval * (2 ** (attempts - 1))
67
+ logger.info(f"⏳ Retrying in {sleep_time}s...")
68
+ time.sleep(sleep_time)
69
+ else:
70
+ logger.critical("❌ Max retries reached. Connection failed.")
71
+
72
+ return False
73
+
74
+ def get_connection(self):
75
+ if self._reconnecting or not self.connected:
76
+ return None
77
+
78
+ return self.channel
79
+
80
+ def _reconnect(self):
81
+ logger.info(f"⏳ Reconnecting...")
82
+ attempts = 0
83
+
84
+ while not self.connected:
85
+ try:
86
+ if self.channel:
87
+ self._close_channel()
88
+
89
+ self.channel = grpc.insecure_channel(self.server_address)
90
+
91
+ future = grpc.channel_ready_future(self.channel)
92
+ future.result(timeout=30)
93
+
94
+ self.connected = True
95
+ self._reconnecting = False
96
+ logger.info(f"🚀 Connected to gRPC server at {self.server_address}")
97
+ self._reconnectLock.release_lock()
98
+ except (grpc.RpcError, grpc.FutureTimeoutError, Exception) as e:
99
+ attempts += 1
100
+ self.connected = False
101
+ error_msg = str(e)
102
+
103
+ logger.error(f"⚠️ Connection failed ({attempts}/{self.max_retries}): {error_msg}")
104
+
105
+ sleep_time = 2 * (2 ** (attempts - 1))
106
+ logger.info(f"⏳ Retrying in {sleep_time}s...")
107
+ time.sleep(sleep_time)
108
+
109
+ def try_reconnect(self):
110
+ if self._reconnecting:
111
+ return
112
+
113
+ if self._reconnectLock.acquire_lock(blocking=False):
114
+ self._reconnecting = True
115
+ self.connected = False
116
+ self._reconnect()
117
+
118
+ def _close_channel(self) -> None:
119
+ try:
120
+ if self.channel:
121
+ self.channel.close()
122
+ logger.info("🔌 gRPC connection closed")
123
+ except Exception as e:
124
+ logger.warning(f"⚠️ Error closing channel: {e}")
125
+ finally:
126
+ self.channel = None
127
+ self.connected = False
128
+
129
+ def close(self) -> None:
130
+ if self.channel:
131
+ self.channel.close()
132
+ self.connected = False
133
+ logger.info("🔌 gRPC connection closed")
134
+
135
+ def is_connected(self) -> bool:
136
+ return self.connected and self.channel is not None
137
+
138
+ def get_connection_info(self) -> Dict[str, Any]:
139
+ return {
140
+ "server_address": self.server_address,
141
+ "connected": self.connected,
142
+ "max_retries": self.max_retries,
143
+ "has_channel": self.channel is not None,
144
+ }
145
+
146
+ def __enter__(self):
147
+ return self
@@ -21,7 +21,7 @@ class SystemUsageClient(GrpcClientBase):
21
21
  logging.error(f"Failed to connect to gRPC server: {e}")
22
22
  self.stub = None
23
23
 
24
- def send_system_usage(self, device_id: str, cpu_usage: float, ram_usage: dict, gpu_usage: list, latency: float, token: str) -> dict:
24
+ def send_system_usage(self, device_id: str, cpu_usage: float, ram_usage: dict, gpu_usage: list, latency: float, cpu_temperature: float, token: str) -> dict:
25
25
  """
26
26
  Send system usage data to the server using token authentication.
27
27
 
@@ -44,6 +44,7 @@ class SystemUsageClient(GrpcClientBase):
44
44
  request = SystemUsageRequest(
45
45
  device_id=device_id,
46
46
  cpu_usage=cpu_usage,
47
+ cpu_temperature=cpu_temperature,
47
48
  ram_usage_percent=ram_usage.get("percent", 0.0),
48
49
  ram_total=ram_usage.get("total", 0),
49
50
  ram_used=ram_usage.get("used", 0),
@@ -78,6 +78,9 @@ class SystemMonitor:
78
78
  return [temp.current for temp in core_temps if hasattr(temp, "current")]
79
79
  elif "cpu-thermal" in sensors:
80
80
  return [sensors["cpu-thermal"][0].current]
81
+ elif "k10temp" in sensors and len(sensors["k10temp"]) > 0:
82
+ temp = sensors["k10temp"][0]
83
+ return float(temp.current)
81
84
  else:
82
85
  return {"error": "CPU temperature sensor not found"}
83
86
  except Exception as e:
@@ -0,0 +1,160 @@
1
+ import threading
2
+ import logging
3
+ import json
4
+ from datetime import datetime
5
+ from ..database.DatabaseManager import DatabaseManager
6
+ from ..models.worker_source_pipeline import WorkerSourcePipelineEntity
7
+ from .RabbitMQListener import RabbitMQListener
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ def safe_join_thread(thread, timeout=5):
12
+ """Safely join a thread, avoiding RuntimeError when joining current thread."""
13
+ if thread and thread != threading.current_thread():
14
+ thread.join(timeout=timeout)
15
+ elif thread == threading.current_thread():
16
+ logging.info("🛑 [APP] Thread stopping from within itself, skipping join.")
17
+
18
+ class PipelinePreviewWorker:
19
+ def __init__(self, config: dict):
20
+ """
21
+ Initialize Pipeline Preview Worker.
22
+
23
+ This worker listens for pipeline preview requests and updates the
24
+ last_preview_request_at timestamp in the database. The worker core
25
+ will check this timestamp to decide whether to publish RTMP streams.
26
+
27
+ Args:
28
+ config (dict): Configuration object containing settings.
29
+ """
30
+ if not isinstance(config, dict):
31
+ raise ValueError("⚠️ [APP] config must be a dictionary.")
32
+
33
+ self.config = config
34
+ self.worker_id = self.config.get("worker_id")
35
+
36
+ if not self.worker_id:
37
+ raise ValueError("⚠️ [APP] Configuration is missing 'worker_id'.")
38
+
39
+ self.thread = None
40
+ self.stop_event = threading.Event()
41
+ self.lock = threading.Lock()
42
+
43
+ # Initialize RabbitMQ listener
44
+ self.listener = RabbitMQListener(
45
+ self.config, self.worker_id, self.stop_event, self._process_pipeline_preview_message
46
+ )
47
+
48
+ def start(self):
49
+ """Start the Pipeline Preview Worker."""
50
+ with self.lock:
51
+ if self.thread and self.thread.is_alive():
52
+ logger.warning("⚠️ [APP] Pipeline Preview Worker is already running.")
53
+ return
54
+
55
+ self.stop_event.clear()
56
+ self.thread = threading.Thread(target=self._run, daemon=True)
57
+ self.thread.start()
58
+ logger.info(f"🚀 [APP] Pipeline Preview Worker started (Device: {self.worker_id}).")
59
+
60
+ def stop(self):
61
+ """Stop the Pipeline Preview Worker."""
62
+ with self.lock:
63
+ if not self.thread or not self.thread.is_alive():
64
+ logger.warning("⚠️ [APP] Pipeline Preview Worker is not running.")
65
+ return
66
+
67
+ self.stop_event.set()
68
+ self.listener.stop_listening()
69
+
70
+ safe_join_thread(self.thread)
71
+ self.thread = None
72
+ logger.info(f"🛑 [APP] Pipeline Preview Worker stopped (Device: {self.worker_id}).")
73
+
74
+ def _run(self):
75
+ """Main loop to manage RabbitMQ listener."""
76
+ try:
77
+ while not self.stop_event.is_set():
78
+ logger.info("📡 [APP] Starting pipeline preview message listener...")
79
+ self.listener.start_listening(
80
+ exchange_name="nedo.worker.pipeline.preview",
81
+ queue_name=f"nedo.worker.pipeline.preview.{self.worker_id}"
82
+ )
83
+
84
+ # Wait for the listener thread to finish (connection lost or stop requested)
85
+ while not self.stop_event.is_set() and self.listener.listener_thread and self.listener.listener_thread.is_alive():
86
+ self.listener.listener_thread.join(timeout=5)
87
+
88
+ if not self.stop_event.is_set():
89
+ logger.warning("⚠️ [APP] Pipeline preview listener disconnected. Attempting to reconnect in 10 seconds...")
90
+ self.stop_event.wait(10)
91
+ else:
92
+ logger.info("📡 [APP] Pipeline preview listener stopped.")
93
+ break
94
+
95
+ except Exception as e:
96
+ logger.error("🚨 [APP] Unexpected error in Pipeline Preview Worker loop.", exc_info=True)
97
+
98
+ def _process_pipeline_preview_message(self, message):
99
+ """
100
+ Process messages related to pipeline preview streaming.
101
+ Updates the last_preview_request_at timestamp for the specified pipeline.
102
+ """
103
+ try:
104
+ data = json.loads(message)
105
+ worker_id = data.get("workerId")
106
+ pipeline_id = data.get("pipelineId")
107
+
108
+ logger.info(f"📡 [APP] Received pipeline preview message ({data})")
109
+
110
+ # Validate required fields
111
+ if not pipeline_id:
112
+ logger.error(f"⚠️ [APP] Missing pipelineId in message")
113
+ return
114
+
115
+ if worker_id != self.worker_id:
116
+ logger.warning(f"⚠️ [APP] Worker ID mismatch: expected {self.worker_id}, got {worker_id}")
117
+ return
118
+
119
+ # Update the last_preview_request_at timestamp in database
120
+ self._update_pipeline_preview_timestamp(pipeline_id)
121
+
122
+ except json.JSONDecodeError:
123
+ logger.error("⚠️ [APP] Invalid JSON message format.")
124
+ except Exception as e:
125
+ logger.error("🚨 [APP] Error processing pipeline preview message.", exc_info=True)
126
+
127
+ def _update_pipeline_preview_timestamp(self, pipeline_id: str):
128
+ """
129
+ Update the last_preview_request_at timestamp for the specified pipeline.
130
+
131
+ Args:
132
+ pipeline_id (str): The ID of the pipeline to update.
133
+ """
134
+ session = None
135
+ try:
136
+ session = DatabaseManager.get_session("config")
137
+
138
+ # Find the pipeline
139
+ pipeline = session.query(WorkerSourcePipelineEntity).filter_by(
140
+ id=pipeline_id,
141
+ worker_id=self.worker_id
142
+ ).first()
143
+
144
+ if not pipeline:
145
+ logger.error(f"⚠️ [APP] Pipeline not found: {pipeline_id}")
146
+ return
147
+
148
+ # Update timestamp
149
+ pipeline.last_preview_request_at = datetime.utcnow()
150
+ session.commit()
151
+
152
+ logger.info(f"✅ [APP] Updated preview timestamp for pipeline {pipeline_id} ({pipeline.name})")
153
+
154
+ except Exception as e:
155
+ if session:
156
+ session.rollback()
157
+ logger.error(f"🚨 [APP] Error updating pipeline preview timestamp: {e}", exc_info=True)
158
+ finally:
159
+ if session:
160
+ session.close()
@@ -74,8 +74,28 @@ class SystemUsageManager:
74
74
  try:
75
75
  usage = self.system_monitor.get_system_usage()
76
76
  cpu_usage = usage["cpu"]["usage_percent"]
77
+ cpu_temperature_raw = usage["cpu"]["temperature_celsius"]
77
78
  ram_usage = usage["ram"]
78
- gpu_usage = usage.get("gpu", [])
79
+ gpu_usage_raw = usage.get("gpu", [])
80
+
81
+ if isinstance(cpu_temperature_raw, (int, float)):
82
+ cpu_temperature = float(cpu_temperature_raw)
83
+ elif isinstance(cpu_temperature_raw, list):
84
+ if cpu_temperature_raw:
85
+ cpu_temperature = float(sum(cpu_temperature_raw) / len(cpu_temperature_raw))
86
+ else:
87
+ cpu_temperature = 0.0
88
+ elif isinstance(cpu_temperature_raw, dict):
89
+ cpu_temperature = 0.0
90
+ else:
91
+ cpu_temperature = 0.0
92
+
93
+ if isinstance(gpu_usage_raw, dict):
94
+ gpu_usage = []
95
+ elif isinstance(gpu_usage_raw, list):
96
+ gpu_usage = gpu_usage_raw
97
+ else:
98
+ gpu_usage = []
79
99
 
80
100
  with self.latency_lock:
81
101
  latency = self.latency if self.latency is not None else -1
@@ -85,6 +105,7 @@ class SystemUsageManager:
85
105
  cpu_usage=cpu_usage,
86
106
  ram_usage=ram_usage,
87
107
  gpu_usage=gpu_usage,
108
+ cpu_temperature=cpu_temperature,
88
109
  latency=latency,
89
110
  token=self.token,
90
111
  )
@@ -92,8 +113,6 @@ class SystemUsageManager:
92
113
  if not response or not response.get("success"):
93
114
  error_message = GrpcClientBase.get_error_message(response)
94
115
  logger.error(f"❌ [APP] Failed to send system usage: {error_message}")
95
- #else:
96
- # logger.info("✅ [APP] System usage sent successfully.")
97
116
 
98
117
  except Exception as e:
99
118
  logger.error("🚨 [APP] Error sending system usage.", exc_info=True)
@@ -7,6 +7,7 @@ from .DataSyncWorker import DataSyncWorker
7
7
  from .DataSenderWorker import DataSenderWorker
8
8
  from .PipelineImageWorker import PipelineImageWorker
9
9
  from .VideoStreamWorker import VideoStreamWorker
10
+ from .PipelinePreviewWorker import PipelinePreviewWorker
10
11
  from .CoreActionWorker import CoreActionWorker
11
12
  from .DatasetFrameWorker import DatasetFrameWorker
12
13
 
@@ -38,6 +39,7 @@ class WorkerManager:
38
39
  self.data_sync_worker = DataSyncWorker(config, sync_interval=10)
39
40
  self.data_sender_worker = DataSenderWorker(config, send_interval=10)
40
41
  self.video_stream_worker = VideoStreamWorker(config)
42
+ self.pipeline_preview_worker = PipelinePreviewWorker(config)
41
43
  self.pipeline_image_worker = PipelineImageWorker(config)
42
44
  self.pipeline_action_worker = PipelineActionWorker(config)
43
45
  self.core_action_worker = CoreActionWorker(config, self._start_workers, self._stop_workers)
@@ -47,6 +49,7 @@ class WorkerManager:
47
49
  """Start processing workers while keeping monitoring workers running."""
48
50
  try:
49
51
  self.video_stream_worker.start()
52
+ self.pipeline_preview_worker.start()
50
53
  self.pipeline_image_worker.start()
51
54
  self.data_sender_worker.start_updating()
52
55
  self.dataset_frame_worker.start()
@@ -60,6 +63,7 @@ class WorkerManager:
60
63
  """Stop processing workers while keeping monitoring workers running."""
61
64
  try:
62
65
  self.video_stream_worker.stop()
66
+ self.pipeline_preview_worker.stop()
63
67
  self.pipeline_image_worker.stop()
64
68
  self.data_sender_worker.stop_updating()
65
69
  self.dataset_frame_worker.stop()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker
3
- Version: 1.2.6
3
+ Version: 1.2.7
4
4
  Summary: Nedo Vision Worker Service Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -40,9 +40,10 @@ Requires-Dist: protobuf>=3.20.0
40
40
  Requires-Dist: psutil>=5.9.0
41
41
  Requires-Dist: requests>=2.28.0
42
42
  Requires-Dist: SQLAlchemy>=1.4.0
43
- Requires-Dist: opencv-python>=4.6.0; platform_machine not in "aarch64 armv7l"
44
- Requires-Dist: opencv-python-headless>=4.6.0; platform_machine in "aarch64 armv7l"
45
43
  Requires-Dist: pynvml>=11.4.1; platform_system != "Darwin" or platform_machine != "arm64"
44
+ Provides-Extra: opencv
45
+ Requires-Dist: opencv-python>=4.6.0; platform_machine not in "aarch64 armv7l" and extra == "opencv"
46
+ Requires-Dist: opencv-python-headless>=4.6.0; platform_machine in "aarch64 armv7l" and extra == "opencv"
46
47
  Provides-Extra: dev
47
48
  Requires-Dist: pytest>=7.0.0; extra == "dev"
48
49
  Requires-Dist: black>=22.0.0; extra == "dev"
@@ -1,10 +1,10 @@
1
- nedo_vision_worker/__init__.py,sha256=3uSTVntcW747CFtMZ_qyKrptoNgHDvLNjg0pdgb4otM,203
1
+ nedo_vision_worker/__init__.py,sha256=1QtTEMaMlbF3PH-GmXuZzF1PvxRe_CUCme72WXqFci0,203
2
2
  nedo_vision_worker/cli.py,sha256=ddWspJmSgVkcUYvRdkvTtMNuMTDvNCqLLuMVU9KE3Ik,7457
3
3
  nedo_vision_worker/doctor.py,sha256=wNkpe8gLVd76Y_ViyK2h1ZFdqeSl37MnzZN5frWKu30,48410
4
4
  nedo_vision_worker/worker_service.py,sha256=rXUVmyxcJPGhQEZ4UQvjQS5UqlnLBYudHQZCj0dQDxo,10421
5
5
  nedo_vision_worker/config/ConfigurationManager.py,sha256=QrQaQ9Cdjpkcr2JE_miyrWJIZmMgZwJYBz-wE45Zzes,8011
6
6
  nedo_vision_worker/config/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
7
- nedo_vision_worker/database/DatabaseManager.py,sha256=BZAK3VzbAo_xWUNCni4O8rCLzCjY7mE02PI2HlEpqg4,8505
7
+ nedo_vision_worker/database/DatabaseManager.py,sha256=j2koXo1fnMmAyQnY4sv4txfZR8qIzrPyev-sQ4HBaOQ,9478
8
8
  nedo_vision_worker/database/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
9
9
  nedo_vision_worker/initializer/AppInitializer.py,sha256=iGw8-7Eg2aNT-nFaxiRIhWHU8iwecLuwuvjWlt1-V0Y,3163
10
10
  nedo_vision_worker/initializer/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
@@ -19,7 +19,7 @@ nedo_vision_worker/models/ppe_detection_label.py,sha256=qON7a0fuDv5cK8phGH0gGTzq
19
19
  nedo_vision_worker/models/restricted_area_violation.py,sha256=0enCi3tv15YMy3NaI6FwqhmLYHbbVX4nWTh46qKxrWc,829
20
20
  nedo_vision_worker/models/user.py,sha256=SnLUz2nS7j17bIP-gElMEaR-jWdnNQ0fTpRminVKY60,294
21
21
  nedo_vision_worker/models/worker_source.py,sha256=FB8irZ26LhCKNHBcpIIb5Mi3SoSNm9-q25VIkO5jQWg,793
22
- nedo_vision_worker/models/worker_source_pipeline.py,sha256=xCD4i9pHr8Qy5B_h1dH0Q7V7faS2lAou2UNEzx24oIw,752
22
+ nedo_vision_worker/models/worker_source_pipeline.py,sha256=CGA_nz5wywsJcBPm-5kd0v_-h59f8Iu7uEeX3C91eT4,824
23
23
  nedo_vision_worker/models/worker_source_pipeline_config.py,sha256=dGYTpcTFFu6pmGBufuWBHjv3Xs4RGAQwZn6jp6Ondvs,876
24
24
  nedo_vision_worker/models/worker_source_pipeline_debug.py,sha256=6S7TkN37FrAT4VwsEB38DWSad7QfvNhsOGtSEK8D1Qs,594
25
25
  nedo_vision_worker/models/worker_source_pipeline_detection.py,sha256=p6CJsiVCKprTYrNxJsiTB8njXdHkjZKVEyBceRVE6fY,560
@@ -52,15 +52,16 @@ nedo_vision_worker/services/ConnectionInfoClient.py,sha256=toC9zuY2Hrx1Cwq8Gycy_
52
52
  nedo_vision_worker/services/DatasetSourceClient.py,sha256=O5a7onxFl0z47zXaMXWxHAMPuuc-i_vzkd2w5fwrukc,3319
53
53
  nedo_vision_worker/services/DirectDeviceToRTMPStreamer.py,sha256=M5ei0cd3_KDhHZp6EkrOowhAY-hAHfAQh9YDVjQtbQI,22278
54
54
  nedo_vision_worker/services/FileToRTMPServer.py,sha256=yUJxrouoTLSq9XZ88dhDYhP-px10jLoHopkPoy4lQxk,2663
55
- nedo_vision_worker/services/GrpcClientBase.py,sha256=bRNeajiPGcJZtNofD_HU7JhLHVPbnuGacqv5Dp62GC0,7400
55
+ nedo_vision_worker/services/GrpcClientBase.py,sha256=hPyxOGw3aGSW1FhmY3wp3Iq8U1MArXBmvEMdmd63NZ4,6827
56
56
  nedo_vision_worker/services/GrpcClientManager.py,sha256=DLXekmxlQogLo8V9-TNDXtyHT_UG-BaggqwsIups55k,5568
57
+ nedo_vision_worker/services/GrpcConnection.py,sha256=UNjaUC4ZcXuteHQx8AAAL5ymYkT1OpoIvyCYPUc3tCI,4915
57
58
  nedo_vision_worker/services/ImageUploadClient.py,sha256=T353YsRfm74G7Mh-eWr5nvdQHXTfpKwHJFmNW8HyjT8,3019
58
59
  nedo_vision_worker/services/PPEDetectionClient.py,sha256=CC-b0LRAgrftfIKp6TFKpeBkTYefe-C6Z1oz_X3HArQ,4345
59
60
  nedo_vision_worker/services/RTSPtoRTMPStreamer.py,sha256=hkPRX6iGxUtivsOA36JWBopHEd1RlmgIP8SLqN4d2TU,3863
60
61
  nedo_vision_worker/services/RestrictedAreaClient.py,sha256=AE9SOcVQca4zn5V96boho56EgN5BCIpV-8grvFBBnGo,3853
61
62
  nedo_vision_worker/services/SharedDirectDeviceClient.py,sha256=dylMhqpMsfK_UKLWIVL-ApJRP4g-NCP_55xvlGYBiwo,10760
62
63
  nedo_vision_worker/services/SharedVideoStreamServer.py,sha256=WMKVxkzMoyfbgYiJ0fQOT-Ujz9btz6FLlaDP738yfoY,11601
63
- nedo_vision_worker/services/SystemUsageClient.py,sha256=PbRuwDWKnMarcnkGtOKfYB5nA-3DeKv7V5_hZr8EDEo,3200
64
+ nedo_vision_worker/services/SystemUsageClient.py,sha256=Yf77dooQeNh6CDL5FkWVrX9543OVz1wc3exCAg6GlWw,3273
64
65
  nedo_vision_worker/services/SystemWideDeviceCoordinator.py,sha256=9zBJMCbTMZS7gwN67rFpoUiTr82El2rpIO7DKFzeMjM,9417
65
66
  nedo_vision_worker/services/VideoSharingDaemon.py,sha256=hYMjUIKNUVT1qSxuUuHN-7Bd85MDkxfqslxDLe2PBYQ,29721
66
67
  nedo_vision_worker/services/VideoStreamClient.py,sha256=QSgUV3LijYrNdnBG1ylABOdUaSatQamfXaqJhAiol9M,7260
@@ -74,7 +75,7 @@ nedo_vision_worker/util/HardwareID.py,sha256=rSW8-6stm7rjXEdkYGqXMUn56gyw62YiWnS
74
75
  nedo_vision_worker/util/ImageUploader.py,sha256=2xipN3fwpKgFmbvoGIdElpGn5ARJyrgR4dXtbRf73hw,3764
75
76
  nedo_vision_worker/util/Networking.py,sha256=uOtL8HkKZXJp02ZZIHWYMAvAsaYb7BsAPTncfdvJx2c,3241
76
77
  nedo_vision_worker/util/PlatformDetector.py,sha256=-iLPrKs7hp_oltkCI3hESJQkC2uRyu1-8mAbZrvgWx0,1501
77
- nedo_vision_worker/util/SystemMonitor.py,sha256=2MWYaEXoL91UANT_-SuDWrFMq1ajPorh8co6Py9PV_c,11300
78
+ nedo_vision_worker/util/SystemMonitor.py,sha256=2kkqj9mOlywAS2fHdN1TaIXSXvCApcIHj0IO2T9k3Yw,11471
78
79
  nedo_vision_worker/util/VideoProbeUtil.py,sha256=cF-vJ7hIDlXfEJby2a0s9tqwkPGVz_6B3Vv4D5pMmIw,12876
79
80
  nedo_vision_worker/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
80
81
  nedo_vision_worker/worker/CoreActionWorker.py,sha256=lb7zPY3yui6I3F4rX4Ii7JwpWZahLEO72rh3iWOgFmg,5441
@@ -85,14 +86,15 @@ nedo_vision_worker/worker/DatasetFrameWorker.py,sha256=Ni5gPeDPk9Rz4_cbg63u7Y6LV
85
86
  nedo_vision_worker/worker/PPEDetectionManager.py,sha256=fAolWlrsY5SQAWygvjNBNU56IlC0HLlhPgpz7shL-gk,3588
86
87
  nedo_vision_worker/worker/PipelineActionWorker.py,sha256=xgvryjKtEsMj4BKqWzDIaK_lFny-DfMCj5Y2DxHnWww,5651
87
88
  nedo_vision_worker/worker/PipelineImageWorker.py,sha256=J8VBUG0cwcH3qOJp2zTl30B-XhmPFyvJLjxitKJYq0E,5642
89
+ nedo_vision_worker/worker/PipelinePreviewWorker.py,sha256=owFiBbktcOZkdImQeykZSeBIR2-mpt6HNkmYIkLRKzE,6397
88
90
  nedo_vision_worker/worker/RabbitMQListener.py,sha256=9gR49MDplgpyb-D5HOH0K77-DJQFvhS2E7biL92SjSU,6950
89
91
  nedo_vision_worker/worker/RestrictedAreaManager.py,sha256=3yoXgQ459tV1bOa5choEzR9gE6LklrtHR_e0472U3L0,3521
90
- nedo_vision_worker/worker/SystemUsageManager.py,sha256=StutV4UyLUfduYfK20de4SbPd7wqkR7io0gsOajxWSU,4509
92
+ nedo_vision_worker/worker/SystemUsageManager.py,sha256=mkh4sT-HkIEY1CJHMEG6LP9ATu39YXvLRLyf995OkoQ,5315
91
93
  nedo_vision_worker/worker/VideoStreamWorker.py,sha256=5n6v1PNO7IB-jj_McALLkUP-cBjJoIEw4UiSAs3vTb0,7606
92
- nedo_vision_worker/worker/WorkerManager.py,sha256=T0vMfhOd7YesgQ9o2w6soeJ6n9chUAcuwcGe7p31xr0,5298
94
+ nedo_vision_worker/worker/WorkerManager.py,sha256=2bxXi19fp3p1qjYBStYRdVVgko8dnevXx1_M_sqH5og,5521
93
95
  nedo_vision_worker/worker/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
94
- nedo_vision_worker-1.2.6.dist-info/METADATA,sha256=irbHs4_-uj182kff6PpW0YND59Q_UwhsPVLb5wI5NNQ,14661
95
- nedo_vision_worker-1.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
96
- nedo_vision_worker-1.2.6.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
97
- nedo_vision_worker-1.2.6.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
98
- nedo_vision_worker-1.2.6.dist-info/RECORD,,
96
+ nedo_vision_worker-1.2.7.dist-info/METADATA,sha256=xcM1OD1cAy83Rz4OaFYnHsHO8-DgNBrP0zEX9-_uX3I,14728
97
+ nedo_vision_worker-1.2.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
98
+ nedo_vision_worker-1.2.7.dist-info/entry_points.txt,sha256=LrglS-8nCi8C_PL_pa6uxdgCe879hBETHDVXAckvs-8,60
99
+ nedo_vision_worker-1.2.7.dist-info/top_level.txt,sha256=vgilhlkyD34YsEKkaBabmhIpcKSvF3XpzD2By68L-XI,19
100
+ nedo_vision_worker-1.2.7.dist-info/RECORD,,