nedo-vision-worker 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nedo_vision_worker/__init__.py +10 -0
- nedo_vision_worker/cli.py +195 -0
- nedo_vision_worker/config/ConfigurationManager.py +196 -0
- nedo_vision_worker/config/__init__.py +1 -0
- nedo_vision_worker/database/DatabaseManager.py +219 -0
- nedo_vision_worker/database/__init__.py +1 -0
- nedo_vision_worker/doctor.py +453 -0
- nedo_vision_worker/initializer/AppInitializer.py +78 -0
- nedo_vision_worker/initializer/__init__.py +1 -0
- nedo_vision_worker/models/__init__.py +15 -0
- nedo_vision_worker/models/ai_model.py +29 -0
- nedo_vision_worker/models/auth.py +14 -0
- nedo_vision_worker/models/config.py +9 -0
- nedo_vision_worker/models/dataset_source.py +30 -0
- nedo_vision_worker/models/logs.py +9 -0
- nedo_vision_worker/models/ppe_detection.py +39 -0
- nedo_vision_worker/models/ppe_detection_label.py +20 -0
- nedo_vision_worker/models/restricted_area_violation.py +20 -0
- nedo_vision_worker/models/user.py +10 -0
- nedo_vision_worker/models/worker_source.py +19 -0
- nedo_vision_worker/models/worker_source_pipeline.py +21 -0
- nedo_vision_worker/models/worker_source_pipeline_config.py +24 -0
- nedo_vision_worker/models/worker_source_pipeline_debug.py +15 -0
- nedo_vision_worker/models/worker_source_pipeline_detection.py +14 -0
- nedo_vision_worker/protos/AIModelService_pb2.py +46 -0
- nedo_vision_worker/protos/AIModelService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/DatasetSourceService_pb2.py +46 -0
- nedo_vision_worker/protos/DatasetSourceService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/HumanDetectionService_pb2.py +44 -0
- nedo_vision_worker/protos/HumanDetectionService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/PPEDetectionService_pb2.py +46 -0
- nedo_vision_worker/protos/PPEDetectionService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/VisionWorkerService_pb2.py +72 -0
- nedo_vision_worker/protos/VisionWorkerService_pb2_grpc.py +471 -0
- nedo_vision_worker/protos/WorkerSourcePipelineService_pb2.py +64 -0
- nedo_vision_worker/protos/WorkerSourcePipelineService_pb2_grpc.py +312 -0
- nedo_vision_worker/protos/WorkerSourceService_pb2.py +50 -0
- nedo_vision_worker/protos/WorkerSourceService_pb2_grpc.py +183 -0
- nedo_vision_worker/protos/__init__.py +1 -0
- nedo_vision_worker/repositories/AIModelRepository.py +44 -0
- nedo_vision_worker/repositories/DatasetSourceRepository.py +150 -0
- nedo_vision_worker/repositories/PPEDetectionRepository.py +112 -0
- nedo_vision_worker/repositories/RestrictedAreaRepository.py +88 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineDebugRepository.py +90 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineDetectionRepository.py +48 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineRepository.py +174 -0
- nedo_vision_worker/repositories/WorkerSourceRepository.py +46 -0
- nedo_vision_worker/repositories/__init__.py +1 -0
- nedo_vision_worker/services/AIModelClient.py +362 -0
- nedo_vision_worker/services/ConnectionInfoClient.py +57 -0
- nedo_vision_worker/services/DatasetSourceClient.py +88 -0
- nedo_vision_worker/services/FileToRTMPServer.py +78 -0
- nedo_vision_worker/services/GrpcClientBase.py +155 -0
- nedo_vision_worker/services/GrpcClientManager.py +141 -0
- nedo_vision_worker/services/ImageUploadClient.py +82 -0
- nedo_vision_worker/services/PPEDetectionClient.py +108 -0
- nedo_vision_worker/services/RTSPtoRTMPStreamer.py +98 -0
- nedo_vision_worker/services/RestrictedAreaClient.py +100 -0
- nedo_vision_worker/services/SystemUsageClient.py +77 -0
- nedo_vision_worker/services/VideoStreamClient.py +161 -0
- nedo_vision_worker/services/WorkerSourceClient.py +215 -0
- nedo_vision_worker/services/WorkerSourcePipelineClient.py +393 -0
- nedo_vision_worker/services/WorkerSourceUpdater.py +134 -0
- nedo_vision_worker/services/WorkerStatusClient.py +65 -0
- nedo_vision_worker/services/__init__.py +1 -0
- nedo_vision_worker/util/HardwareID.py +104 -0
- nedo_vision_worker/util/ImageUploader.py +92 -0
- nedo_vision_worker/util/Networking.py +94 -0
- nedo_vision_worker/util/PlatformDetector.py +50 -0
- nedo_vision_worker/util/SystemMonitor.py +299 -0
- nedo_vision_worker/util/VideoProbeUtil.py +120 -0
- nedo_vision_worker/util/__init__.py +1 -0
- nedo_vision_worker/worker/CoreActionWorker.py +125 -0
- nedo_vision_worker/worker/DataSenderWorker.py +168 -0
- nedo_vision_worker/worker/DataSyncWorker.py +143 -0
- nedo_vision_worker/worker/DatasetFrameSender.py +208 -0
- nedo_vision_worker/worker/DatasetFrameWorker.py +412 -0
- nedo_vision_worker/worker/PPEDetectionManager.py +86 -0
- nedo_vision_worker/worker/PipelineActionWorker.py +129 -0
- nedo_vision_worker/worker/PipelineImageWorker.py +116 -0
- nedo_vision_worker/worker/RabbitMQListener.py +170 -0
- nedo_vision_worker/worker/RestrictedAreaManager.py +85 -0
- nedo_vision_worker/worker/SystemUsageManager.py +111 -0
- nedo_vision_worker/worker/VideoStreamWorker.py +139 -0
- nedo_vision_worker/worker/WorkerManager.py +155 -0
- nedo_vision_worker/worker/__init__.py +1 -0
- nedo_vision_worker/worker_service.py +264 -0
- nedo_vision_worker-1.0.0.dist-info/METADATA +563 -0
- nedo_vision_worker-1.0.0.dist-info/RECORD +92 -0
- nedo_vision_worker-1.0.0.dist-info/WHEEL +5 -0
- nedo_vision_worker-1.0.0.dist-info/entry_points.txt +2 -0
- nedo_vision_worker-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
4
|
+
from ..util.SystemMonitor import SystemMonitor
|
|
5
|
+
from ..services.SystemUsageClient import SystemUsageClient
|
|
6
|
+
from ..services.GrpcClientBase import GrpcClientBase
|
|
7
|
+
from ..util.Networking import Networking
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
def safe_join_thread(thread, timeout=5):
|
|
12
|
+
"""Safely join a thread, avoiding RuntimeError when joining current thread."""
|
|
13
|
+
if thread and thread != threading.current_thread():
|
|
14
|
+
thread.join(timeout=timeout)
|
|
15
|
+
elif thread == threading.current_thread():
|
|
16
|
+
logging.info("🛑 [APP] Thread stopping from within itself, skipping join.")
|
|
17
|
+
|
|
18
|
+
class SystemUsageManager:
|
|
19
|
+
def __init__(self, server_host: str, device_id: str, token: str):
|
|
20
|
+
"""
|
|
21
|
+
Handles system usage monitoring, latency tracking, and reporting.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
server_host (str): The gRPC server host.
|
|
25
|
+
device_id (str): Unique ID of the device (passed externally).
|
|
26
|
+
token (str): Authentication token for the worker.
|
|
27
|
+
"""
|
|
28
|
+
if not device_id:
|
|
29
|
+
raise ValueError("⚠️ [APP] 'device_id' cannot be empty.")
|
|
30
|
+
if not token:
|
|
31
|
+
raise ValueError("⚠️ [APP] 'token' cannot be empty.")
|
|
32
|
+
|
|
33
|
+
self.system_monitor = SystemMonitor()
|
|
34
|
+
self.system_usage_client = SystemUsageClient(server_host)
|
|
35
|
+
self.server_host = server_host
|
|
36
|
+
self.device_id = device_id
|
|
37
|
+
self.token = token
|
|
38
|
+
self.latency = None
|
|
39
|
+
self.latency_lock = threading.Lock()
|
|
40
|
+
self.stop_event = threading.Event()
|
|
41
|
+
self.latency_thread = None
|
|
42
|
+
|
|
43
|
+
self._start_latency_monitoring()
|
|
44
|
+
|
|
45
|
+
def _start_latency_monitoring(self):
|
|
46
|
+
"""Starts a background thread to monitor network latency."""
|
|
47
|
+
if self.latency_thread and self.latency_thread.is_alive():
|
|
48
|
+
logger.warning("⚠️ [APP] Latency monitoring thread is already running.")
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
self.latency_thread = threading.Thread(target=self._monitor_latency, daemon=True)
|
|
52
|
+
self.latency_thread.start()
|
|
53
|
+
logger.info("📡 [APP] Latency monitoring started.")
|
|
54
|
+
|
|
55
|
+
def _monitor_latency(self):
|
|
56
|
+
"""Periodically checks the network latency using gRPC and updates the latency variable."""
|
|
57
|
+
server_port = 50051 # Default gRPC port
|
|
58
|
+
|
|
59
|
+
while not self.stop_event.is_set():
|
|
60
|
+
try:
|
|
61
|
+
latency_value = Networking.check_grpc_latency(self.server_host, server_port)
|
|
62
|
+
with self.latency_lock:
|
|
63
|
+
self.latency = latency_value
|
|
64
|
+
# logger.info(f"🔄 [APP] Updated network latency: {latency_value} ms")
|
|
65
|
+
|
|
66
|
+
except Exception as e:
|
|
67
|
+
logger.error("🚨 [APP] Error checking gRPC latency.", exc_info=True)
|
|
68
|
+
with self.latency_lock:
|
|
69
|
+
self.latency = None
|
|
70
|
+
time.sleep(10)
|
|
71
|
+
|
|
72
|
+
def process_system_usage(self):
|
|
73
|
+
"""Collect and send system usage data to the server, including network latency."""
|
|
74
|
+
try:
|
|
75
|
+
usage = self.system_monitor.get_system_usage()
|
|
76
|
+
cpu_usage = usage["cpu"]["usage_percent"]
|
|
77
|
+
ram_usage = usage["ram"]
|
|
78
|
+
gpu_usage = usage.get("gpu", [])
|
|
79
|
+
|
|
80
|
+
with self.latency_lock:
|
|
81
|
+
latency = self.latency if self.latency is not None else -1
|
|
82
|
+
|
|
83
|
+
response = self.system_usage_client.send_system_usage(
|
|
84
|
+
device_id=self.device_id,
|
|
85
|
+
cpu_usage=cpu_usage,
|
|
86
|
+
ram_usage=ram_usage,
|
|
87
|
+
gpu_usage=gpu_usage,
|
|
88
|
+
latency=latency,
|
|
89
|
+
token=self.token,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
if not response or not response.get("success"):
|
|
93
|
+
error_message = GrpcClientBase.get_error_message(response)
|
|
94
|
+
logger.error(f"❌ [APP] Failed to send system usage: {error_message}")
|
|
95
|
+
#else:
|
|
96
|
+
# logger.info("✅ [APP] System usage sent successfully.")
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error("🚨 [APP] Error sending system usage.", exc_info=True)
|
|
100
|
+
|
|
101
|
+
def close(self):
|
|
102
|
+
"""Closes the system usage client and stops the latency thread."""
|
|
103
|
+
self.stop_event.set()
|
|
104
|
+
|
|
105
|
+
if self.latency_thread and self.latency_thread.is_alive():
|
|
106
|
+
safe_join_thread(self.latency_thread)
|
|
107
|
+
logger.info("🔌 [APP] Latency monitoring thread stopped.")
|
|
108
|
+
|
|
109
|
+
if self.system_usage_client:
|
|
110
|
+
self.system_usage_client.close_client()
|
|
111
|
+
logger.info("✅ [APP] SystemUsageClient closed.")
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import threading
|
|
3
|
+
import logging
|
|
4
|
+
import json
|
|
5
|
+
from ..database.DatabaseManager import get_storage_path
|
|
6
|
+
from ..services.FileToRTMPServer import FileToRTMPStreamer
|
|
7
|
+
from .RabbitMQListener import RabbitMQListener
|
|
8
|
+
from ..services.RTSPtoRTMPStreamer import RTSPtoRTMPStreamer
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
def safe_join_thread(thread, timeout=5):
|
|
13
|
+
"""Safely join a thread, avoiding RuntimeError when joining current thread."""
|
|
14
|
+
if thread and thread != threading.current_thread():
|
|
15
|
+
thread.join(timeout=timeout)
|
|
16
|
+
elif thread == threading.current_thread():
|
|
17
|
+
logging.info("🛑 [APP] Thread stopping from within itself, skipping join.")
|
|
18
|
+
|
|
19
|
+
class VideoStreamWorker:
|
|
20
|
+
def __init__(self, config: dict, stream_duration=300):
|
|
21
|
+
"""
|
|
22
|
+
Initialize Video Stream Worker.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
config (dict): Configuration object containing settings.
|
|
26
|
+
stream_duration (int): Default stream duration in seconds.
|
|
27
|
+
"""
|
|
28
|
+
if not isinstance(config, dict):
|
|
29
|
+
raise ValueError("⚠️ [APP] config must be a dictionary.")
|
|
30
|
+
|
|
31
|
+
self.config = config
|
|
32
|
+
self.worker_id = self.config.get("worker_id")
|
|
33
|
+
self.source_file_path = get_storage_path("files") / "source_files"
|
|
34
|
+
|
|
35
|
+
if not self.worker_id:
|
|
36
|
+
raise ValueError("⚠️ [APP] Configuration is missing 'worker_id'.")
|
|
37
|
+
|
|
38
|
+
self.stream_duration = stream_duration
|
|
39
|
+
self.rtmp_server = self.config.get("rtmp_server")
|
|
40
|
+
|
|
41
|
+
if not self.rtmp_server:
|
|
42
|
+
raise ValueError("⚠️ [APP] RTMP server URL is required but not provided in configuration.")
|
|
43
|
+
|
|
44
|
+
self.thread = None
|
|
45
|
+
self.stop_event = threading.Event()
|
|
46
|
+
self.lock = threading.Lock()
|
|
47
|
+
|
|
48
|
+
# Initialize RabbitMQ listener
|
|
49
|
+
self.listener = RabbitMQListener(
|
|
50
|
+
self.config, self.worker_id, self.stop_event, self._process_video_preview_message
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def start(self):
|
|
54
|
+
"""Start the Video Stream Worker."""
|
|
55
|
+
with self.lock:
|
|
56
|
+
if self.thread and self.thread.is_alive():
|
|
57
|
+
logger.warning("⚠️ [APP] Stream Worker is already running.")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
self.stop_event.clear()
|
|
61
|
+
self.thread = threading.Thread(target=self._run, daemon=True) # ✅ Run as daemon
|
|
62
|
+
self.thread.start()
|
|
63
|
+
logger.info(f"🚀 [APP] Stream Worker started (Device: {self.worker_id}).")
|
|
64
|
+
|
|
65
|
+
def stop(self):
|
|
66
|
+
"""Stop the Video Stream Worker."""
|
|
67
|
+
with self.lock:
|
|
68
|
+
if not self.thread or not self.thread.is_alive():
|
|
69
|
+
logger.warning("⚠️ [APP] Stream Worker is not running.")
|
|
70
|
+
return
|
|
71
|
+
|
|
72
|
+
self.stop_event.set()
|
|
73
|
+
self.listener.stop_listening()
|
|
74
|
+
|
|
75
|
+
safe_join_thread(self.thread) # Ensures the thread stops gracefully
|
|
76
|
+
self.thread = None
|
|
77
|
+
logger.info(f"🛑 [APP] Stream Worker stopped (Device: {self.worker_id}).")
|
|
78
|
+
|
|
79
|
+
def _run(self):
|
|
80
|
+
"""Main loop to manage RabbitMQ listener."""
|
|
81
|
+
try:
|
|
82
|
+
while not self.stop_event.is_set():
|
|
83
|
+
logger.info("📡 [APP] Waiting for video stream messages...")
|
|
84
|
+
self.listener.start_listening(exchange_name="nedo.worker.stream.preview", queue_name=f"nedo.worker.preview.{self.worker_id}")
|
|
85
|
+
safe_join_thread(self.listener.listener_thread)
|
|
86
|
+
except Exception as e:
|
|
87
|
+
logger.error("🚨 [APP] Unexpected error in Stream Worker loop.", exc_info=True)
|
|
88
|
+
|
|
89
|
+
def _process_video_preview_message(self, message):
|
|
90
|
+
"""Process messages related to video preview streaming."""
|
|
91
|
+
try:
|
|
92
|
+
data = json.loads(message)
|
|
93
|
+
worker_id = data.get("workerId")
|
|
94
|
+
url = data.get("url")
|
|
95
|
+
uuid = data.get("uuid")
|
|
96
|
+
stream_duration = int(data.get("duration", self.stream_duration))
|
|
97
|
+
|
|
98
|
+
logger.info(f"📡 [APP] Received video preview message ({data})")
|
|
99
|
+
|
|
100
|
+
if not url or (not url.startswith("rtsp://") and not url.startswith("worker-source/")):
|
|
101
|
+
logger.error(f"⚠️ [APP] Invalid URL: {url}")
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
if stream_duration <= 0:
|
|
105
|
+
logger.warning(f"⚠️ [APP] Invalid stream duration {stream_duration}. Using default {self.stream_duration}s.")
|
|
106
|
+
stream_duration = self.stream_duration
|
|
107
|
+
|
|
108
|
+
logger.info(f"📡 [APP] Forwarding to RTMP (Worker: {worker_id}, UUID: {uuid})")
|
|
109
|
+
|
|
110
|
+
# Start a streaming thread
|
|
111
|
+
threading.Thread(
|
|
112
|
+
target=self._start_stream,
|
|
113
|
+
args=(url, self.rtmp_server, uuid, stream_duration, worker_id),
|
|
114
|
+
daemon=True,
|
|
115
|
+
).start()
|
|
116
|
+
|
|
117
|
+
except json.JSONDecodeError:
|
|
118
|
+
logger.error("⚠️ [APP] Invalid JSON message format.")
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.error("🚨 [APP] Error processing video preview message.", exc_info=True)
|
|
121
|
+
|
|
122
|
+
def _start_stream(self, url, rtmp_server, stream_key, stream_duration, worker_id):
|
|
123
|
+
"""Runs RTSP-to-RTMP streaming in a separate thread."""
|
|
124
|
+
try:
|
|
125
|
+
logger.info(f"🎥 [APP] Starting RTMP stream (Worker: {worker_id})")
|
|
126
|
+
|
|
127
|
+
if url.startswith("worker-source/"):
|
|
128
|
+
streamer = FileToRTMPStreamer(self.source_file_path / os.path.basename(url), rtmp_server, stream_key, stream_duration)
|
|
129
|
+
else:
|
|
130
|
+
streamer = RTSPtoRTMPStreamer(url, rtmp_server, stream_key, stream_duration)
|
|
131
|
+
|
|
132
|
+
streamer.start_stream()
|
|
133
|
+
|
|
134
|
+
# Schedule stream stop
|
|
135
|
+
threading.Timer(stream_duration, streamer.stop_stream).start()
|
|
136
|
+
logger.info(f"⏳ [APP] Stopping RTMP stream in {stream_duration}s.")
|
|
137
|
+
|
|
138
|
+
except Exception as e:
|
|
139
|
+
logger.error("🚨 [APP] Error in stream worker.", exc_info=True)
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from ..services.WorkerStatusClient import WorkerStatusClient
|
|
3
|
+
from ..services.GrpcClientManager import GrpcClientManager
|
|
4
|
+
from ..services.GrpcClientBase import GrpcClientBase
|
|
5
|
+
from .PipelineActionWorker import PipelineActionWorker
|
|
6
|
+
from .DataSyncWorker import DataSyncWorker
|
|
7
|
+
from .DataSenderWorker import DataSenderWorker
|
|
8
|
+
from .PipelineImageWorker import PipelineImageWorker
|
|
9
|
+
from .VideoStreamWorker import VideoStreamWorker
|
|
10
|
+
from .CoreActionWorker import CoreActionWorker
|
|
11
|
+
from .DatasetFrameWorker import DatasetFrameWorker
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
class WorkerManager:
|
|
16
|
+
def __init__(self, config):
|
|
17
|
+
"""Initialize all worker threads with the given config."""
|
|
18
|
+
self.config = config
|
|
19
|
+
self.worker_id = self.config.get("worker_id")
|
|
20
|
+
self.server_host = self.config.get("server_host")
|
|
21
|
+
self.token = self.config.get("token")
|
|
22
|
+
|
|
23
|
+
if not self.worker_id:
|
|
24
|
+
raise ValueError("⚠️ [APP] Configuration is missing 'worker_id'.")
|
|
25
|
+
if not self.server_host:
|
|
26
|
+
raise ValueError("⚠️ [APP] Configuration is missing 'server_host'.")
|
|
27
|
+
if not self.token:
|
|
28
|
+
raise ValueError("⚠️ [APP] Configuration is missing 'token'.")
|
|
29
|
+
|
|
30
|
+
# Configure the centralized gRPC client manager
|
|
31
|
+
self.client_manager = GrpcClientManager.get_instance()
|
|
32
|
+
self.client_manager.configure(self.server_host)
|
|
33
|
+
|
|
34
|
+
# Get shared client instance
|
|
35
|
+
self.status_client = self.client_manager.get_client(WorkerStatusClient)
|
|
36
|
+
|
|
37
|
+
self.data_sync_worker = DataSyncWorker(config, sync_interval=10)
|
|
38
|
+
self.data_sender_worker = DataSenderWorker(config, send_interval=10)
|
|
39
|
+
self.video_stream_worker = VideoStreamWorker(config)
|
|
40
|
+
self.pipeline_image_worker = PipelineImageWorker(config)
|
|
41
|
+
self.pipeline_action_worker = PipelineActionWorker(config)
|
|
42
|
+
self.core_action_worker = CoreActionWorker(config, self._start_workers, self._stop_workers)
|
|
43
|
+
self.dataset_frame_worker = DatasetFrameWorker(config)
|
|
44
|
+
|
|
45
|
+
def _start_workers(self):
|
|
46
|
+
"""Start processing workers while keeping monitoring workers running."""
|
|
47
|
+
try:
|
|
48
|
+
self.video_stream_worker.start()
|
|
49
|
+
logger.info("🚀 [APP] Video Stream Worker started.")
|
|
50
|
+
|
|
51
|
+
self.pipeline_image_worker.start()
|
|
52
|
+
logger.info("🚀 [APP] Pipeline Image Worker started.")
|
|
53
|
+
|
|
54
|
+
self.data_sender_worker.start_updating()
|
|
55
|
+
logger.info("🚀 [APP] Data Sender Worker started updating.")
|
|
56
|
+
|
|
57
|
+
self.dataset_frame_worker.start()
|
|
58
|
+
logger.info("🚀 [APP] Dataset Frame Worker started.")
|
|
59
|
+
|
|
60
|
+
self._update_status("run")
|
|
61
|
+
|
|
62
|
+
except Exception as e:
|
|
63
|
+
logger.error("🚨 [APP] Failed to start processing workers.", exc_info=True)
|
|
64
|
+
|
|
65
|
+
def _stop_workers(self):
|
|
66
|
+
"""Stop processing workers while keeping monitoring workers running."""
|
|
67
|
+
try:
|
|
68
|
+
self.video_stream_worker.stop()
|
|
69
|
+
logger.info("🛑 [APP] Video Stream Worker stopped.")
|
|
70
|
+
|
|
71
|
+
self.pipeline_image_worker.stop()
|
|
72
|
+
logger.info("🛑 [APP] Pipeline Image Worker stopped.")
|
|
73
|
+
|
|
74
|
+
self.data_sender_worker.stop_updating()
|
|
75
|
+
logger.info("🛑 [APP] Data Sender Worker stopped updating.")
|
|
76
|
+
|
|
77
|
+
self.dataset_frame_worker.stop()
|
|
78
|
+
logger.info("🛑 [APP] Dataset Frame Worker stopped.")
|
|
79
|
+
|
|
80
|
+
self._update_status("stop")
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error("🚨 [APP] Failed to stop processing workers.", exc_info=True)
|
|
84
|
+
|
|
85
|
+
def start_all(self):
|
|
86
|
+
"""Start all workers including monitoring workers."""
|
|
87
|
+
try:
|
|
88
|
+
# Start monitoring workers first
|
|
89
|
+
self.core_action_worker.start()
|
|
90
|
+
logging.info("🚀 [APP] Core Action Worker started and listening for commands.")
|
|
91
|
+
|
|
92
|
+
self.data_sync_worker.start()
|
|
93
|
+
logger.info("🚀 [APP] Data Sync Worker started.")
|
|
94
|
+
|
|
95
|
+
self.data_sender_worker.start()
|
|
96
|
+
logger.info("🚀 [APP] Data Sender Worker started.")
|
|
97
|
+
|
|
98
|
+
self.pipeline_action_worker.start()
|
|
99
|
+
logger.info("🚀 [APP] Pipeline Action Worker started.")
|
|
100
|
+
|
|
101
|
+
self._start_workers()
|
|
102
|
+
|
|
103
|
+
logger.info("✅ [APP] All workers started successfully.")
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error("🚨 [APP] Failed to start all workers.", exc_info=True)
|
|
107
|
+
|
|
108
|
+
def stop_all(self):
|
|
109
|
+
"""Stop all workers including monitoring workers."""
|
|
110
|
+
try:
|
|
111
|
+
self.core_action_worker.stop()
|
|
112
|
+
logger.info("🛑 [APP] Core Action Worker stopped.")
|
|
113
|
+
|
|
114
|
+
self.data_sync_worker.stop()
|
|
115
|
+
logger.info("🛑 [APP] Data Sync Worker stopped.")
|
|
116
|
+
|
|
117
|
+
self.data_sender_worker.stop()
|
|
118
|
+
logger.info("🛑 [APP] Data Sender Worker stopped.")
|
|
119
|
+
|
|
120
|
+
self.pipeline_action_worker.stop()
|
|
121
|
+
logger.info("🛑 [APP] Pipeline Action Worker stopped.")
|
|
122
|
+
|
|
123
|
+
self._stop_workers()
|
|
124
|
+
|
|
125
|
+
logger.info("✅ [APP] All workers stopped successfully.")
|
|
126
|
+
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.error("🚨 [APP] Failed to stop all workers.", exc_info=True)
|
|
129
|
+
finally:
|
|
130
|
+
# Cleanup: close gRPC clients when workers are stopped
|
|
131
|
+
try:
|
|
132
|
+
logger.info("🔌 [APP] Closing gRPC client connections...")
|
|
133
|
+
self.client_manager.close_all_clients()
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logger.warning(f"⚠️ [APP] Error closing gRPC clients: {e}")
|
|
136
|
+
|
|
137
|
+
def _update_status(self, status_code):
|
|
138
|
+
"""
|
|
139
|
+
Update the worker status via gRPC.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
status_code (str): Status code to report to the server
|
|
143
|
+
"""
|
|
144
|
+
try:
|
|
145
|
+
logger.info(f"📡 [APP] Updating worker status to {status_code}")
|
|
146
|
+
result = self.status_client.update_worker_status(self.worker_id, status_code, self.token)
|
|
147
|
+
|
|
148
|
+
if result["success"]:
|
|
149
|
+
logger.info(f"✅ [APP] Status update successful: {result['message']}")
|
|
150
|
+
else:
|
|
151
|
+
error_message = GrpcClientBase.get_error_message(result)
|
|
152
|
+
logger.warning(f"⚠️ [APP] Status update failed: {error_message}")
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
logger.error(f"🚨 [APP] Error updating worker status: {str(e)}")
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import multiprocessing
|
|
3
|
+
import signal
|
|
4
|
+
import sys
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
# Set multiprocessing start method to 'spawn' for CUDA compatibility
|
|
8
|
+
try:
|
|
9
|
+
multiprocessing.set_start_method('spawn', force=True)
|
|
10
|
+
except RuntimeError:
|
|
11
|
+
# Method already set, ignore
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
from .initializer.AppInitializer import AppInitializer
|
|
15
|
+
from .worker.WorkerManager import WorkerManager
|
|
16
|
+
from .config.ConfigurationManager import ConfigurationManager
|
|
17
|
+
from .util.HardwareID import HardwareID
|
|
18
|
+
from .services.GrpcClientBase import set_auth_failure_callback
|
|
19
|
+
from .database.DatabaseManager import set_storage_path
|
|
20
|
+
from . import models # Ensure all models are registered before DB init
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class WorkerService:
|
|
24
|
+
"""
|
|
25
|
+
Main worker service class that manages the worker agent lifecycle.
|
|
26
|
+
Uses hardware ID-based authentication and configuration management.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
server_host: str = "be.vision.sindika.co.id",
|
|
32
|
+
token: str = None,
|
|
33
|
+
system_usage_interval: int = 30,
|
|
34
|
+
rtmp_server: str = None,
|
|
35
|
+
storage_path: str = "data",
|
|
36
|
+
):
|
|
37
|
+
"""
|
|
38
|
+
Initialize the worker service.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
server_host: Manager server host (default: 'be.vision.sindika.co.id')
|
|
42
|
+
token: Authentication token for the worker (obtained from frontend)
|
|
43
|
+
system_usage_interval: Interval for system usage reporting (default: 30)
|
|
44
|
+
rtmp_server: RTMP server URL for video streaming (required)
|
|
45
|
+
storage_path: Storage path for databases and files (default: 'data')
|
|
46
|
+
"""
|
|
47
|
+
# Set the global storage path before any database operations
|
|
48
|
+
set_storage_path(storage_path)
|
|
49
|
+
|
|
50
|
+
self.logger = self._setup_logging()
|
|
51
|
+
self.worker_manager = None
|
|
52
|
+
self.running = False
|
|
53
|
+
self.server_host = server_host
|
|
54
|
+
self.token = token
|
|
55
|
+
self.system_usage_interval = system_usage_interval
|
|
56
|
+
self.rtmp_server = rtmp_server
|
|
57
|
+
self.storage_path = storage_path
|
|
58
|
+
self.config = None
|
|
59
|
+
self.auth_failure_detected = False
|
|
60
|
+
|
|
61
|
+
# Validate required parameters
|
|
62
|
+
if not rtmp_server:
|
|
63
|
+
raise ValueError("RTMP server URL is required. Please provide --rtmp-server parameter.")
|
|
64
|
+
|
|
65
|
+
# Register authentication failure callback
|
|
66
|
+
set_auth_failure_callback(self._on_authentication_failure)
|
|
67
|
+
|
|
68
|
+
# Setup signal handlers
|
|
69
|
+
signal.signal(signal.SIGINT, self._signal_handler)
|
|
70
|
+
signal.signal(signal.SIGTERM, self._signal_handler)
|
|
71
|
+
|
|
72
|
+
def _on_authentication_failure(self):
|
|
73
|
+
"""Called when an authentication failure is detected."""
|
|
74
|
+
if not self.auth_failure_detected:
|
|
75
|
+
self.auth_failure_detected = True
|
|
76
|
+
self.logger.error("🔑 [APP] Authentication failure detected. Shutting down service...")
|
|
77
|
+
self.stop()
|
|
78
|
+
|
|
79
|
+
def _setup_logging(self):
|
|
80
|
+
"""Configure logging settings (allows inline emojis)."""
|
|
81
|
+
logging.basicConfig(
|
|
82
|
+
level=logging.INFO,
|
|
83
|
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
|
84
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
85
|
+
)
|
|
86
|
+
# Only show warnings and errors
|
|
87
|
+
logging.getLogger("sqlalchemy").setLevel(logging.WARNING)
|
|
88
|
+
logging.getLogger("pika").setLevel(logging.WARNING)
|
|
89
|
+
logging.getLogger("grpc").setLevel(logging.FATAL)
|
|
90
|
+
logging.getLogger("ffmpeg").setLevel(logging.FATAL)
|
|
91
|
+
logging.getLogger("subprocess").setLevel(logging.FATAL)
|
|
92
|
+
|
|
93
|
+
return logging.getLogger(__name__)
|
|
94
|
+
|
|
95
|
+
def _initialize_configuration(self):
|
|
96
|
+
"""Initialize the application configuration."""
|
|
97
|
+
self.logger.info("🚀 [APP] Initializing application...")
|
|
98
|
+
|
|
99
|
+
# Initialize database
|
|
100
|
+
ConfigurationManager.init_database()
|
|
101
|
+
|
|
102
|
+
# Load all configurations at once
|
|
103
|
+
config = ConfigurationManager.get_all_configs()
|
|
104
|
+
|
|
105
|
+
# Use the server_host parameter directly
|
|
106
|
+
server_host = self.server_host
|
|
107
|
+
self.logger.info(f"🌐 [APP] Using server host: {server_host}")
|
|
108
|
+
|
|
109
|
+
# Check if configuration exists
|
|
110
|
+
if not config:
|
|
111
|
+
self.logger.info("⚙️ [APP] Configuration not found. Performing first-time setup...")
|
|
112
|
+
|
|
113
|
+
# Get hardware ID
|
|
114
|
+
hardware_id = HardwareID.get_unique_id()
|
|
115
|
+
|
|
116
|
+
self.logger.info(f"🖥️ [APP] Detected Hardware ID: {hardware_id}")
|
|
117
|
+
self.logger.info(f"🌐 [APP] Using Server Host: {server_host}")
|
|
118
|
+
|
|
119
|
+
# Check if token is provided
|
|
120
|
+
if not self.token:
|
|
121
|
+
raise ValueError("Token is required for worker initialization. Please provide a token obtained from the frontend.")
|
|
122
|
+
|
|
123
|
+
# Initialize with token
|
|
124
|
+
AppInitializer.initialize_configuration(hardware_id, server_host, self.token)
|
|
125
|
+
|
|
126
|
+
# Get configuration
|
|
127
|
+
config = ConfigurationManager.get_all_configs()
|
|
128
|
+
else:
|
|
129
|
+
# Check if server_host or token has changed and update if needed
|
|
130
|
+
config_updated = False
|
|
131
|
+
|
|
132
|
+
if config['server_host'] != server_host:
|
|
133
|
+
ConfigurationManager.set_config("server_host", server_host)
|
|
134
|
+
config_updated = True
|
|
135
|
+
self.logger.info(f"✅ [APP] Updated server host to: {server_host}")
|
|
136
|
+
|
|
137
|
+
# Check if token has changed and update if needed
|
|
138
|
+
if self.token and config.get('token') != self.token:
|
|
139
|
+
ConfigurationManager.set_config("token", self.token)
|
|
140
|
+
config_updated = True
|
|
141
|
+
self.logger.info("✅ [APP] Updated authentication token")
|
|
142
|
+
|
|
143
|
+
if config_updated:
|
|
144
|
+
config = ConfigurationManager.get_all_configs()
|
|
145
|
+
self.logger.info("✅ [APP] Configuration updated successfully")
|
|
146
|
+
else:
|
|
147
|
+
self.logger.info("✅ [APP] Configuration found. No changes needed.")
|
|
148
|
+
|
|
149
|
+
# Add runtime parameters to config
|
|
150
|
+
config['rtmp_server'] = self.rtmp_server
|
|
151
|
+
|
|
152
|
+
return config
|
|
153
|
+
|
|
154
|
+
def initialize(self) -> bool:
|
|
155
|
+
"""
|
|
156
|
+
Initialize the worker service components.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
bool: True if initialization successful, False otherwise
|
|
160
|
+
"""
|
|
161
|
+
try:
|
|
162
|
+
self.logger.info("Worker service initialization started")
|
|
163
|
+
|
|
164
|
+
# Initialize configuration
|
|
165
|
+
self.config = self._initialize_configuration()
|
|
166
|
+
|
|
167
|
+
if not self.config:
|
|
168
|
+
raise RuntimeError("Failed to initialize configuration")
|
|
169
|
+
|
|
170
|
+
# Initialize WorkerManager
|
|
171
|
+
self.worker_manager = WorkerManager(self.config)
|
|
172
|
+
|
|
173
|
+
self.logger.info("Worker service initialization completed successfully")
|
|
174
|
+
return True
|
|
175
|
+
except Exception as e:
|
|
176
|
+
self.logger.error(f"Failed to initialize worker service: {e}")
|
|
177
|
+
import traceback
|
|
178
|
+
self.logger.error(traceback.format_exc())
|
|
179
|
+
return False
|
|
180
|
+
|
|
181
|
+
def start(self):
|
|
182
|
+
"""Start the worker service"""
|
|
183
|
+
if not self.running:
|
|
184
|
+
self.running = True
|
|
185
|
+
self.logger.info("Worker service started")
|
|
186
|
+
try:
|
|
187
|
+
# Start all workers via WorkerManager
|
|
188
|
+
self.worker_manager.start_all()
|
|
189
|
+
# Block main thread to keep process alive
|
|
190
|
+
while self.running and not self.auth_failure_detected:
|
|
191
|
+
time.sleep(1)
|
|
192
|
+
|
|
193
|
+
# If authentication failure was detected, exit with error code
|
|
194
|
+
if self.auth_failure_detected:
|
|
195
|
+
self.logger.error("🔑 [APP] Service terminated due to authentication failure")
|
|
196
|
+
sys.exit(1)
|
|
197
|
+
except Exception as e:
|
|
198
|
+
self.logger.error(f"Error in worker service: {e}")
|
|
199
|
+
import traceback
|
|
200
|
+
self.logger.error(traceback.format_exc())
|
|
201
|
+
self.stop()
|
|
202
|
+
else:
|
|
203
|
+
self.logger.info("Service already running.")
|
|
204
|
+
|
|
205
|
+
def stop(self):
|
|
206
|
+
"""Stop the worker service"""
|
|
207
|
+
if self.running:
|
|
208
|
+
self.running = False
|
|
209
|
+
self.logger.info("Worker service stopping...")
|
|
210
|
+
try:
|
|
211
|
+
# Stop all workers via WorkerManager
|
|
212
|
+
if hasattr(self, 'worker_manager'):
|
|
213
|
+
self.worker_manager.stop_all()
|
|
214
|
+
self.logger.info("Worker service stopped")
|
|
215
|
+
except Exception as e:
|
|
216
|
+
self.logger.error(f"Error stopping worker service: {e}")
|
|
217
|
+
import traceback
|
|
218
|
+
self.logger.error(traceback.format_exc())
|
|
219
|
+
else:
|
|
220
|
+
self.logger.info("Service already stopped.")
|
|
221
|
+
|
|
222
|
+
def _signal_handler(self, signum, frame):
|
|
223
|
+
"""Handle system signals for graceful shutdown"""
|
|
224
|
+
self.logger.info(f"Received signal {signum}, shutting down...")
|
|
225
|
+
self.stop()
|
|
226
|
+
sys.exit(0)
|
|
227
|
+
|
|
228
|
+
def run(self):
|
|
229
|
+
"""Run the worker service"""
|
|
230
|
+
if self.initialize():
|
|
231
|
+
self.start()
|
|
232
|
+
else:
|
|
233
|
+
self.logger.error("Failed to initialize worker service")
|
|
234
|
+
sys.exit(1)
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def main():
|
|
238
|
+
"""Main entry point for the worker service"""
|
|
239
|
+
import argparse
|
|
240
|
+
|
|
241
|
+
parser = argparse.ArgumentParser(description="Nedo Vision Worker Service")
|
|
242
|
+
parser.add_argument(
|
|
243
|
+
"--server-host",
|
|
244
|
+
default="be.vision.sindika.co.id",
|
|
245
|
+
help="Manager server host (default: be.vision.sindika.co.id)"
|
|
246
|
+
)
|
|
247
|
+
parser.add_argument(
|
|
248
|
+
"--system-usage-interval",
|
|
249
|
+
type=int,
|
|
250
|
+
default=30,
|
|
251
|
+
help="System usage reporting interval in seconds (default: 30)"
|
|
252
|
+
)
|
|
253
|
+
args = parser.parse_args()
|
|
254
|
+
|
|
255
|
+
# Create and run worker service
|
|
256
|
+
service = WorkerService(
|
|
257
|
+
server_host=args.server_host,
|
|
258
|
+
system_usage_interval=args.system_usage_interval
|
|
259
|
+
)
|
|
260
|
+
service.run()
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
if __name__ == "__main__":
|
|
264
|
+
main()
|