nedo-vision-worker 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nedo_vision_worker/__init__.py +10 -0
- nedo_vision_worker/cli.py +195 -0
- nedo_vision_worker/config/ConfigurationManager.py +196 -0
- nedo_vision_worker/config/__init__.py +1 -0
- nedo_vision_worker/database/DatabaseManager.py +219 -0
- nedo_vision_worker/database/__init__.py +1 -0
- nedo_vision_worker/doctor.py +453 -0
- nedo_vision_worker/initializer/AppInitializer.py +78 -0
- nedo_vision_worker/initializer/__init__.py +1 -0
- nedo_vision_worker/models/__init__.py +15 -0
- nedo_vision_worker/models/ai_model.py +29 -0
- nedo_vision_worker/models/auth.py +14 -0
- nedo_vision_worker/models/config.py +9 -0
- nedo_vision_worker/models/dataset_source.py +30 -0
- nedo_vision_worker/models/logs.py +9 -0
- nedo_vision_worker/models/ppe_detection.py +39 -0
- nedo_vision_worker/models/ppe_detection_label.py +20 -0
- nedo_vision_worker/models/restricted_area_violation.py +20 -0
- nedo_vision_worker/models/user.py +10 -0
- nedo_vision_worker/models/worker_source.py +19 -0
- nedo_vision_worker/models/worker_source_pipeline.py +21 -0
- nedo_vision_worker/models/worker_source_pipeline_config.py +24 -0
- nedo_vision_worker/models/worker_source_pipeline_debug.py +15 -0
- nedo_vision_worker/models/worker_source_pipeline_detection.py +14 -0
- nedo_vision_worker/protos/AIModelService_pb2.py +46 -0
- nedo_vision_worker/protos/AIModelService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/DatasetSourceService_pb2.py +46 -0
- nedo_vision_worker/protos/DatasetSourceService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/HumanDetectionService_pb2.py +44 -0
- nedo_vision_worker/protos/HumanDetectionService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/PPEDetectionService_pb2.py +46 -0
- nedo_vision_worker/protos/PPEDetectionService_pb2_grpc.py +140 -0
- nedo_vision_worker/protos/VisionWorkerService_pb2.py +72 -0
- nedo_vision_worker/protos/VisionWorkerService_pb2_grpc.py +471 -0
- nedo_vision_worker/protos/WorkerSourcePipelineService_pb2.py +64 -0
- nedo_vision_worker/protos/WorkerSourcePipelineService_pb2_grpc.py +312 -0
- nedo_vision_worker/protos/WorkerSourceService_pb2.py +50 -0
- nedo_vision_worker/protos/WorkerSourceService_pb2_grpc.py +183 -0
- nedo_vision_worker/protos/__init__.py +1 -0
- nedo_vision_worker/repositories/AIModelRepository.py +44 -0
- nedo_vision_worker/repositories/DatasetSourceRepository.py +150 -0
- nedo_vision_worker/repositories/PPEDetectionRepository.py +112 -0
- nedo_vision_worker/repositories/RestrictedAreaRepository.py +88 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineDebugRepository.py +90 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineDetectionRepository.py +48 -0
- nedo_vision_worker/repositories/WorkerSourcePipelineRepository.py +174 -0
- nedo_vision_worker/repositories/WorkerSourceRepository.py +46 -0
- nedo_vision_worker/repositories/__init__.py +1 -0
- nedo_vision_worker/services/AIModelClient.py +362 -0
- nedo_vision_worker/services/ConnectionInfoClient.py +57 -0
- nedo_vision_worker/services/DatasetSourceClient.py +88 -0
- nedo_vision_worker/services/FileToRTMPServer.py +78 -0
- nedo_vision_worker/services/GrpcClientBase.py +155 -0
- nedo_vision_worker/services/GrpcClientManager.py +141 -0
- nedo_vision_worker/services/ImageUploadClient.py +82 -0
- nedo_vision_worker/services/PPEDetectionClient.py +108 -0
- nedo_vision_worker/services/RTSPtoRTMPStreamer.py +98 -0
- nedo_vision_worker/services/RestrictedAreaClient.py +100 -0
- nedo_vision_worker/services/SystemUsageClient.py +77 -0
- nedo_vision_worker/services/VideoStreamClient.py +161 -0
- nedo_vision_worker/services/WorkerSourceClient.py +215 -0
- nedo_vision_worker/services/WorkerSourcePipelineClient.py +393 -0
- nedo_vision_worker/services/WorkerSourceUpdater.py +134 -0
- nedo_vision_worker/services/WorkerStatusClient.py +65 -0
- nedo_vision_worker/services/__init__.py +1 -0
- nedo_vision_worker/util/HardwareID.py +104 -0
- nedo_vision_worker/util/ImageUploader.py +92 -0
- nedo_vision_worker/util/Networking.py +94 -0
- nedo_vision_worker/util/PlatformDetector.py +50 -0
- nedo_vision_worker/util/SystemMonitor.py +299 -0
- nedo_vision_worker/util/VideoProbeUtil.py +120 -0
- nedo_vision_worker/util/__init__.py +1 -0
- nedo_vision_worker/worker/CoreActionWorker.py +125 -0
- nedo_vision_worker/worker/DataSenderWorker.py +168 -0
- nedo_vision_worker/worker/DataSyncWorker.py +143 -0
- nedo_vision_worker/worker/DatasetFrameSender.py +208 -0
- nedo_vision_worker/worker/DatasetFrameWorker.py +412 -0
- nedo_vision_worker/worker/PPEDetectionManager.py +86 -0
- nedo_vision_worker/worker/PipelineActionWorker.py +129 -0
- nedo_vision_worker/worker/PipelineImageWorker.py +116 -0
- nedo_vision_worker/worker/RabbitMQListener.py +170 -0
- nedo_vision_worker/worker/RestrictedAreaManager.py +85 -0
- nedo_vision_worker/worker/SystemUsageManager.py +111 -0
- nedo_vision_worker/worker/VideoStreamWorker.py +139 -0
- nedo_vision_worker/worker/WorkerManager.py +155 -0
- nedo_vision_worker/worker/__init__.py +1 -0
- nedo_vision_worker/worker_service.py +264 -0
- nedo_vision_worker-1.0.0.dist-info/METADATA +563 -0
- nedo_vision_worker-1.0.0.dist-info/RECORD +92 -0
- nedo_vision_worker-1.0.0.dist-info/WHEEL +5 -0
- nedo_vision_worker-1.0.0.dist-info/entry_points.txt +2 -0
- nedo_vision_worker-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import grpc
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
from grpc import StatusCode
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
# Global callback for authentication failures
|
|
9
|
+
_auth_failure_callback = None
|
|
10
|
+
|
|
11
|
+
def set_auth_failure_callback(callback):
|
|
12
|
+
"""Set a global callback to be called when authentication failures occur."""
|
|
13
|
+
global _auth_failure_callback
|
|
14
|
+
_auth_failure_callback = callback
|
|
15
|
+
|
|
16
|
+
def _notify_auth_failure():
|
|
17
|
+
"""Notify the registered callback about authentication failure."""
|
|
18
|
+
global _auth_failure_callback
|
|
19
|
+
if _auth_failure_callback:
|
|
20
|
+
_auth_failure_callback()
|
|
21
|
+
|
|
22
|
+
class GrpcClientBase:
|
|
23
|
+
def __init__(self, server_host: str, server_port: int = 50051, max_retries: int = 3):
|
|
24
|
+
"""
|
|
25
|
+
Initialize the gRPC client base.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
server_host (str): The server hostname or IP address.
|
|
29
|
+
server_port (int): The server port. Default is 50051.
|
|
30
|
+
max_retries (int): Maximum number of reconnection attempts.
|
|
31
|
+
"""
|
|
32
|
+
self.server_address = f"{server_host}:{server_port}"
|
|
33
|
+
self.channel = None
|
|
34
|
+
self.stub = None
|
|
35
|
+
self.connected = False
|
|
36
|
+
self.max_retries = max_retries
|
|
37
|
+
|
|
38
|
+
def connect(self, stub_class, retry_interval: int = 2):
|
|
39
|
+
"""
|
|
40
|
+
Create a gRPC channel and stub, with retry logic if the server is unavailable.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
stub_class: The gRPC stub class for the service.
|
|
44
|
+
retry_interval (int): Initial time in seconds between reconnection attempts.
|
|
45
|
+
"""
|
|
46
|
+
attempts = 0
|
|
47
|
+
while attempts < self.max_retries and not self.connected:
|
|
48
|
+
try:
|
|
49
|
+
self.channel = grpc.insecure_channel(self.server_address)
|
|
50
|
+
future = grpc.channel_ready_future(self.channel)
|
|
51
|
+
try:
|
|
52
|
+
future.result(timeout=30)
|
|
53
|
+
except grpc.FutureTimeoutError:
|
|
54
|
+
raise grpc.RpcError("gRPC connection timed out.")
|
|
55
|
+
|
|
56
|
+
self.stub = stub_class(self.channel)
|
|
57
|
+
self.connected = True
|
|
58
|
+
logger.info("🚀 [APP] Successfully connected to gRPC server at %s", self.server_address)
|
|
59
|
+
return # Exit if successful
|
|
60
|
+
|
|
61
|
+
except grpc.RpcError as e:
|
|
62
|
+
attempts += 1
|
|
63
|
+
self.connected = False
|
|
64
|
+
|
|
65
|
+
error_message = getattr(e, "details", lambda: str(e))()
|
|
66
|
+
logger.error("⚠️ [APP] Failed to connect (%d/%d): %s", attempts, self.max_retries, error_message)
|
|
67
|
+
|
|
68
|
+
if attempts < self.max_retries:
|
|
69
|
+
sleep_time = retry_interval * (2 ** (attempts - 1)) # Exponential backoff
|
|
70
|
+
logger.info("⏳ [APP] Retrying in %d seconds...", sleep_time)
|
|
71
|
+
time.sleep(sleep_time)
|
|
72
|
+
else:
|
|
73
|
+
logger.critical("❌ [APP] Maximum retries reached. Could not connect to gRPC server.")
|
|
74
|
+
|
|
75
|
+
except Exception as e:
|
|
76
|
+
logger.critical("🚨 [APP] Unexpected error during gRPC initialization: %s", str(e))
|
|
77
|
+
break # Stop retrying if an unexpected error occurs
|
|
78
|
+
|
|
79
|
+
def close(self):
|
|
80
|
+
"""
|
|
81
|
+
Close the gRPC channel.
|
|
82
|
+
"""
|
|
83
|
+
if self.channel:
|
|
84
|
+
self.channel.close()
|
|
85
|
+
self.connected = False
|
|
86
|
+
logger.info("🔌 [APP] gRPC channel closed.")
|
|
87
|
+
|
|
88
|
+
def handle_rpc(self, rpc_call, *args, **kwargs):
|
|
89
|
+
"""
|
|
90
|
+
Handle an RPC call with error handling.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
rpc_call: The RPC method to call.
|
|
94
|
+
*args: Positional arguments for the RPC call.
|
|
95
|
+
**kwargs: Keyword arguments for the RPC call.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
The RPC response or None if an error occurs.
|
|
99
|
+
"""
|
|
100
|
+
try:
|
|
101
|
+
response = rpc_call(*args, **kwargs)
|
|
102
|
+
return response
|
|
103
|
+
|
|
104
|
+
except grpc.RpcError as e:
|
|
105
|
+
status_code = e.code()
|
|
106
|
+
|
|
107
|
+
# ✅ Extract only the meaningful part of the error message
|
|
108
|
+
error_message = getattr(e, "details", lambda: str(e))()
|
|
109
|
+
error_clean = error_message.split("debug_error_string")[0].strip()
|
|
110
|
+
|
|
111
|
+
self.connected = False # Mark as disconnected for reconnection
|
|
112
|
+
|
|
113
|
+
if status_code == StatusCode.UNAVAILABLE:
|
|
114
|
+
logger.warning("⚠️ [APP] Server unavailable. Attempting to reconnect... (Error: %s)", error_clean)
|
|
115
|
+
self.connect(type(self.stub)) # Attempt to reconnect
|
|
116
|
+
elif status_code == StatusCode.DEADLINE_EXCEEDED:
|
|
117
|
+
logger.error("⏳ [APP] RPC timeout error. (Error: %s)", error_clean)
|
|
118
|
+
elif status_code == StatusCode.PERMISSION_DENIED:
|
|
119
|
+
logger.error("🚫 [APP] RPC call failed: Permission denied. (Error: %s)", error_clean)
|
|
120
|
+
elif status_code == StatusCode.UNAUTHENTICATED:
|
|
121
|
+
logger.error("🔑 [APP] Authentication failed. (Error: %s)", error_clean)
|
|
122
|
+
_notify_auth_failure() # Notify about authentication failure
|
|
123
|
+
elif status_code == StatusCode.INVALID_ARGUMENT:
|
|
124
|
+
logger.error("⚠️ [APP] Invalid argument in RPC call. (Error: %s)", error_clean)
|
|
125
|
+
elif status_code == StatusCode.NOT_FOUND:
|
|
126
|
+
logger.error("🔍 [APP] Requested resource not found. (Error: %s)", error_clean)
|
|
127
|
+
elif status_code == StatusCode.INTERNAL:
|
|
128
|
+
logger.error("💥 [APP] Internal server error encountered. (Error: %s)", error_clean)
|
|
129
|
+
else:
|
|
130
|
+
logger.error("❌ [APP] Unhandled gRPC error: %s (Code: %s)", error_clean, status_code)
|
|
131
|
+
|
|
132
|
+
return None # Ensure the caller handles the failure
|
|
133
|
+
|
|
134
|
+
@staticmethod
|
|
135
|
+
def get_error_message(response):
|
|
136
|
+
"""
|
|
137
|
+
Extract only the meaningful part of the error message.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
response: The RPC response.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
str: The error message.
|
|
144
|
+
"""
|
|
145
|
+
if response and response.get("success"):
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
message = response.get("message", "Unknown error") if response else "Unknown error"
|
|
149
|
+
|
|
150
|
+
# Check for authentication failure in the message
|
|
151
|
+
if message and ("Invalid authentication token" in message or "authentication" in message.lower()):
|
|
152
|
+
logger.error("🔑 [APP] Authentication failure detected in response: %s", message)
|
|
153
|
+
_notify_auth_failure()
|
|
154
|
+
|
|
155
|
+
return message
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import threading
|
|
3
|
+
from typing import Dict, Type, Optional
|
|
4
|
+
from .GrpcClientBase import GrpcClientBase
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
class GrpcClientManager:
|
|
9
|
+
"""
|
|
10
|
+
Centralized gRPC client manager that reuses connections and provides singleton access to clients.
|
|
11
|
+
This optimizes resource usage by sharing connections among multiple workers.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
_instance = None
|
|
15
|
+
_lock = threading.Lock()
|
|
16
|
+
|
|
17
|
+
def __new__(cls):
|
|
18
|
+
"""Singleton pattern implementation."""
|
|
19
|
+
if cls._instance is None:
|
|
20
|
+
with cls._lock:
|
|
21
|
+
if cls._instance is None:
|
|
22
|
+
cls._instance = super(GrpcClientManager, cls).__new__(cls)
|
|
23
|
+
return cls._instance
|
|
24
|
+
|
|
25
|
+
def __init__(self):
|
|
26
|
+
"""Initialize the client manager."""
|
|
27
|
+
if not hasattr(self, '_initialized'):
|
|
28
|
+
self._clients: Dict[str, GrpcClientBase] = {}
|
|
29
|
+
self._clients_lock = threading.RLock()
|
|
30
|
+
self._server_host = None
|
|
31
|
+
self._server_port = 50051
|
|
32
|
+
self._initialized = True
|
|
33
|
+
|
|
34
|
+
def configure(self, server_host: str, server_port: int = 50051):
|
|
35
|
+
"""
|
|
36
|
+
Configure the manager with server connection details.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
server_host (str): The gRPC server host
|
|
40
|
+
server_port (int): The gRPC server port
|
|
41
|
+
"""
|
|
42
|
+
with self._clients_lock:
|
|
43
|
+
self._server_host = server_host
|
|
44
|
+
self._server_port = server_port
|
|
45
|
+
logger.info(f"🔧 [GrpcClientManager] Configured for server: {server_host}:{server_port}")
|
|
46
|
+
|
|
47
|
+
def get_client(self, client_class: Type[GrpcClientBase], client_key: Optional[str] = None) -> GrpcClientBase:
|
|
48
|
+
"""
|
|
49
|
+
Get a shared client instance, creating it if it doesn't exist.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
client_class: The client class to instantiate
|
|
53
|
+
client_key: Optional unique key for the client (defaults to class name)
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
GrpcClientBase: The shared client instance
|
|
57
|
+
"""
|
|
58
|
+
if not self._server_host:
|
|
59
|
+
raise ValueError("GrpcClientManager not configured. Call configure() first.")
|
|
60
|
+
|
|
61
|
+
key = client_key or client_class.__name__
|
|
62
|
+
|
|
63
|
+
with self._clients_lock:
|
|
64
|
+
if key not in self._clients:
|
|
65
|
+
logger.info(f"🚀 [GrpcClientManager] Creating new shared client: {key}")
|
|
66
|
+
client = client_class(self._server_host, self._server_port)
|
|
67
|
+
self._clients[key] = client
|
|
68
|
+
else:
|
|
69
|
+
logger.debug(f"♻️ [GrpcClientManager] Reusing existing client: {key}")
|
|
70
|
+
|
|
71
|
+
return self._clients[key]
|
|
72
|
+
|
|
73
|
+
def close_client(self, client_key: str):
|
|
74
|
+
"""
|
|
75
|
+
Close and remove a specific client.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
client_key (str): The key of the client to close
|
|
79
|
+
"""
|
|
80
|
+
with self._clients_lock:
|
|
81
|
+
if client_key in self._clients:
|
|
82
|
+
try:
|
|
83
|
+
self._clients[client_key].close()
|
|
84
|
+
logger.info(f"🔌 [GrpcClientManager] Closed client: {client_key}")
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.warning(f"⚠️ [GrpcClientManager] Error closing client {client_key}: {e}")
|
|
87
|
+
finally:
|
|
88
|
+
del self._clients[client_key]
|
|
89
|
+
|
|
90
|
+
def close_all_clients(self):
|
|
91
|
+
"""Close all managed clients."""
|
|
92
|
+
with self._clients_lock:
|
|
93
|
+
for key in list(self._clients.keys()):
|
|
94
|
+
self.close_client(key)
|
|
95
|
+
logger.info("🔌 [GrpcClientManager] All clients closed")
|
|
96
|
+
|
|
97
|
+
def get_active_clients(self) -> Dict[str, str]:
|
|
98
|
+
"""
|
|
99
|
+
Get information about active clients.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Dict[str, str]: Dictionary mapping client keys to their class names
|
|
103
|
+
"""
|
|
104
|
+
with self._clients_lock:
|
|
105
|
+
return {key: client.__class__.__name__ for key, client in self._clients.items()}
|
|
106
|
+
|
|
107
|
+
def reconnect_all_clients(self):
|
|
108
|
+
"""Reconnect all managed clients (useful after network issues)."""
|
|
109
|
+
with self._clients_lock:
|
|
110
|
+
reconnected = 0
|
|
111
|
+
for key, client in self._clients.items():
|
|
112
|
+
try:
|
|
113
|
+
if hasattr(client, 'connect') and hasattr(client, 'stub'):
|
|
114
|
+
# Get the stub class from the existing client
|
|
115
|
+
stub_class = type(client.stub)
|
|
116
|
+
client.connect(stub_class)
|
|
117
|
+
reconnected += 1
|
|
118
|
+
logger.info(f"🔄 [GrpcClientManager] Reconnected client: {key}")
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.warning(f"⚠️ [GrpcClientManager] Failed to reconnect client {key}: {e}")
|
|
121
|
+
|
|
122
|
+
logger.info(f"🔄 [GrpcClientManager] Reconnected {reconnected}/{len(self._clients)} clients")
|
|
123
|
+
|
|
124
|
+
@classmethod
|
|
125
|
+
def get_instance(cls) -> 'GrpcClientManager':
|
|
126
|
+
"""Get the singleton instance."""
|
|
127
|
+
return cls()
|
|
128
|
+
|
|
129
|
+
@classmethod
|
|
130
|
+
def get_shared_client(cls, client_class: Type[GrpcClientBase], client_key: Optional[str] = None) -> GrpcClientBase:
|
|
131
|
+
"""
|
|
132
|
+
Convenience method to get a shared client without explicitly getting the manager instance.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
client_class: The client class to instantiate
|
|
136
|
+
client_key: Optional unique key for the client
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
GrpcClientBase: The shared client instance
|
|
140
|
+
"""
|
|
141
|
+
return cls.get_instance().get_client(client_class, client_key)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
from .GrpcClientBase import GrpcClientBase
|
|
2
|
+
from ..protos.VisionWorkerService_pb2_grpc import ImageServiceStub
|
|
3
|
+
from ..protos.VisionWorkerService_pb2 import LastImageDateRequest, UploadImageRequest
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ImageUploadClient(GrpcClientBase):
|
|
7
|
+
def __init__(self, server_host: str, server_port: int = 50051):
|
|
8
|
+
"""
|
|
9
|
+
Initialize the image upload client.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
server_host (str): The server hostname or IP address.
|
|
13
|
+
server_port (int): The server port. Default is 50051.
|
|
14
|
+
"""
|
|
15
|
+
super().__init__(server_host, server_port)
|
|
16
|
+
|
|
17
|
+
def get_last_uploaded_date(self, device_id: str):
|
|
18
|
+
"""
|
|
19
|
+
Retrieve the last uploaded image date from the server.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
device_id (str): The unique device ID.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
dict: A dictionary containing the last upload date and additional information.
|
|
26
|
+
"""
|
|
27
|
+
self.connect(ImageServiceStub) # Ensure connection and stub are established
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
# Prepare the request with the device_id
|
|
31
|
+
request = LastImageDateRequest(device_id=device_id)
|
|
32
|
+
response = self.handle_rpc(self.stub.GetLastImageDate, request)
|
|
33
|
+
|
|
34
|
+
if response and response.success:
|
|
35
|
+
return {
|
|
36
|
+
"success": True,
|
|
37
|
+
"last_uploaded_date": response.last_uploaded_date,
|
|
38
|
+
"message": response.message,
|
|
39
|
+
}
|
|
40
|
+
return {
|
|
41
|
+
"success": False,
|
|
42
|
+
"message": response.message if response else "Unknown error",
|
|
43
|
+
}
|
|
44
|
+
except Exception as e:
|
|
45
|
+
return {"success": False, "message": str(e)}
|
|
46
|
+
|
|
47
|
+
def upload_image(self, device_id: str, metadata: str, image_path: str):
|
|
48
|
+
"""
|
|
49
|
+
Upload an image to the server.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
device_id (str): The unique device ID.
|
|
53
|
+
metadata (str): Metadata as a JSON string.
|
|
54
|
+
image_path (str): Path to the image file.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
dict: A dictionary containing the result of the upload operation.
|
|
58
|
+
"""
|
|
59
|
+
self.connect(ImageServiceStub) # Ensure connection and stub are established
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
# Read the image file
|
|
63
|
+
with open(image_path, "rb") as file:
|
|
64
|
+
image_data = file.read()
|
|
65
|
+
|
|
66
|
+
# Create the request
|
|
67
|
+
request = UploadImageRequest(
|
|
68
|
+
device_id=device_id,
|
|
69
|
+
metadata=metadata,
|
|
70
|
+
image_data=image_data,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Call the RPC
|
|
74
|
+
response = self.handle_rpc(self.stub.UploadImage, request)
|
|
75
|
+
|
|
76
|
+
if response and response.success:
|
|
77
|
+
return {"success": True, "message": response.message}
|
|
78
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
79
|
+
except FileNotFoundError:
|
|
80
|
+
return {"success": False, "message": f"File not found: {image_path}"}
|
|
81
|
+
except Exception as e:
|
|
82
|
+
return {"success": False, "message": str(e)}
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from .GrpcClientBase import GrpcClientBase
|
|
3
|
+
from ..protos.PPEDetectionService_pb2_grpc import PPEDetectionGRPCServiceStub
|
|
4
|
+
from ..protos.PPEDetectionService_pb2 import UpsertPPEDetectionBatchRequest, UpsertPPEDetectionRequest, PPEDetectionLabelRequest
|
|
5
|
+
from ..repositories.PPEDetectionRepository import PPEDetectionRepository
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
class PPEDetectionClient(GrpcClientBase):
|
|
12
|
+
def __init__(self, server_host: str, server_port: int = 50051):
|
|
13
|
+
"""
|
|
14
|
+
Initialize the PPE Detection Batch Client.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
server_host (str): The server hostname or IP address.
|
|
18
|
+
server_port (int): The server port. Default is 50051.
|
|
19
|
+
"""
|
|
20
|
+
super().__init__(server_host, server_port)
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
self.connect(PPEDetectionGRPCServiceStub)
|
|
24
|
+
except Exception as e:
|
|
25
|
+
logger.error(f"Failed to connect to gRPC server: {e}")
|
|
26
|
+
self.stub = None
|
|
27
|
+
|
|
28
|
+
self.repository = PPEDetectionRepository()
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def read_image_as_binary(image_path: str) -> bytes:
|
|
32
|
+
"""
|
|
33
|
+
Reads an image file and returns its binary content.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
image_path (str): Path to the image file.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
bytes: Binary content of the image.
|
|
40
|
+
"""
|
|
41
|
+
with open(image_path, 'rb') as image_file:
|
|
42
|
+
return image_file.read()
|
|
43
|
+
|
|
44
|
+
def send_upsert_batch(self, worker_id: str, worker_source_id: str, detection_data: list, token: str) -> dict:
|
|
45
|
+
"""
|
|
46
|
+
Sends a batch of PPE detection requests to the server using token authentication.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
worker_id (str): The worker ID for the detection.
|
|
50
|
+
worker_source_id (str): The worker source ID for the detection.
|
|
51
|
+
detection_data (list): A list of dictionaries containing PPE detection data.
|
|
52
|
+
token (str): Authentication token for the worker.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
dict: A dictionary containing the result of sending the batch request.
|
|
56
|
+
"""
|
|
57
|
+
if not self.stub:
|
|
58
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
# Prepare the list of UpsertPPEDetectionRequest messages
|
|
62
|
+
ppe_detection_requests = []
|
|
63
|
+
for data in detection_data:
|
|
64
|
+
image_binary = self.read_image_as_binary(data['image'])
|
|
65
|
+
image_tile_binary = self.read_image_as_binary(data['image_tile'])
|
|
66
|
+
|
|
67
|
+
ppe_detection_labels = [
|
|
68
|
+
PPEDetectionLabelRequest(
|
|
69
|
+
code=label['code'],
|
|
70
|
+
confidence_score=label['confidence_score'],
|
|
71
|
+
b_box_x1=label['b_box_x1'],
|
|
72
|
+
b_box_y1=label['b_box_y1'],
|
|
73
|
+
b_box_x2=label['b_box_x2'],
|
|
74
|
+
b_box_y2=label['b_box_y2'],
|
|
75
|
+
)
|
|
76
|
+
for label in data['ppe_detection_labels']
|
|
77
|
+
]
|
|
78
|
+
source = data['worker_source_id']
|
|
79
|
+
request = UpsertPPEDetectionRequest(
|
|
80
|
+
person_id=data['person_id'],
|
|
81
|
+
worker_id=worker_id,
|
|
82
|
+
worker_source_id=source,
|
|
83
|
+
image=image_binary,
|
|
84
|
+
image_tile=image_tile_binary,
|
|
85
|
+
worker_timestamp=data['worker_timestamp'],
|
|
86
|
+
ppe_detection_labels=ppe_detection_labels,
|
|
87
|
+
token=token
|
|
88
|
+
)
|
|
89
|
+
ppe_detection_requests.append(request)
|
|
90
|
+
|
|
91
|
+
# Create the UpsertPPEDetectionBatchRequest
|
|
92
|
+
batch_request = UpsertPPEDetectionBatchRequest(
|
|
93
|
+
ppe_detection_requests=ppe_detection_requests,
|
|
94
|
+
token=token
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Call the UpsertBatch RPC
|
|
98
|
+
response = self.handle_rpc(self.stub.UpsertBatch, batch_request)
|
|
99
|
+
|
|
100
|
+
if response and response.success:
|
|
101
|
+
self.repository.delete_records_from_db(detection_data)
|
|
102
|
+
return {"success": True, "message": response.message}
|
|
103
|
+
|
|
104
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
105
|
+
|
|
106
|
+
except Exception as e:
|
|
107
|
+
logger.error(f"Error sending batch PPE detection: {e}")
|
|
108
|
+
return {"success": False, "message": f"Error occurred: {e}"}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
import os
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
|
|
7
|
+
class RTSPtoRTMPStreamer:
|
|
8
|
+
def __init__(self, rtsp_url, rtmp_url, stream_key, fps=30, resolution="1280x720", duration=120):
|
|
9
|
+
"""
|
|
10
|
+
Initialize the streamer.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
rtsp_url (str): The RTSP stream URL (e.g., from an IP camera).
|
|
14
|
+
rtmp_url (str): The RTMP server URL (without stream key).
|
|
15
|
+
stream_key (str): The unique stream key for RTMP.
|
|
16
|
+
fps (int): Frames per second for output stream.
|
|
17
|
+
resolution (str): Resolution of the output stream.
|
|
18
|
+
duration (int): Duration in seconds to stream.
|
|
19
|
+
"""
|
|
20
|
+
self.rtsp_url = rtsp_url
|
|
21
|
+
self.rtmp_url = f"{rtmp_url}/{stream_key}"
|
|
22
|
+
self.fps = fps
|
|
23
|
+
self.resolution = resolution
|
|
24
|
+
self.duration = duration
|
|
25
|
+
self.stream_key = stream_key
|
|
26
|
+
self.process = None
|
|
27
|
+
|
|
28
|
+
def _detect_stream_type(self, url):
|
|
29
|
+
"""Detect the type of input stream."""
|
|
30
|
+
parsed_url = urlparse(url)
|
|
31
|
+
return "rtsp" if parsed_url.scheme == "rtsp" else "unknown"
|
|
32
|
+
|
|
33
|
+
def start_stream(self):
|
|
34
|
+
"""Start streaming RTSP to RTMP using FFmpeg without logs."""
|
|
35
|
+
if self._detect_stream_type(self.rtsp_url) == "unknown":
|
|
36
|
+
logging.error(f"❌ [APP] Invalid RTSP URL: {self.rtsp_url}")
|
|
37
|
+
return
|
|
38
|
+
|
|
39
|
+
logging.info(f"📡 [APP] Starting RTSP to RTMP stream: {self.rtsp_url} → {self.rtmp_url} for {self.duration} seconds")
|
|
40
|
+
|
|
41
|
+
# FFmpeg command
|
|
42
|
+
ffmpeg_command = [
|
|
43
|
+
"ffmpeg",
|
|
44
|
+
"-rtsp_transport", "tcp", # 🚀 Use TCP
|
|
45
|
+
"-fflags", "nobuffer", # 🚀 Reduce internal buffering
|
|
46
|
+
"-flags", "low_delay", # 🚀 Enable low-latency mode
|
|
47
|
+
"-strict", "experimental",
|
|
48
|
+
"-i", self.rtsp_url,
|
|
49
|
+
|
|
50
|
+
# ✅ Video Encoding (Fastest possible)
|
|
51
|
+
"-c:v", "libx264",
|
|
52
|
+
"-preset", "ultrafast", # 🚀 Reduce CPU usage
|
|
53
|
+
"-tune", "zerolatency", # 🚀 Optimize for real-time streaming
|
|
54
|
+
"-x264-params", "keyint=40:min-keyint=40", # 🚀 Keyframe optimization
|
|
55
|
+
"-r", "25", # ⏳ Limit FPS to 20 (prevents excessive encoding load)
|
|
56
|
+
"-b:v", "1500k", # ✅ Lower bitrate to improve performance
|
|
57
|
+
"-maxrate", "2000k", # ✅ Set max bitrate
|
|
58
|
+
"-bufsize", "4000k", # ✅ Reduce buffer latency
|
|
59
|
+
"-g", "25", # ✅ Reduce GOP size for faster keyframes
|
|
60
|
+
"-vf", "scale='min(1024,iw)':-2", # ✅ Resize width to max 800px
|
|
61
|
+
|
|
62
|
+
# ❌ Disable Audio (Avoid unnecessary encoding overhead)
|
|
63
|
+
"-an",
|
|
64
|
+
|
|
65
|
+
# ✅ Output RTMP Stream
|
|
66
|
+
"-f", "flv",
|
|
67
|
+
self.rtmp_url
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
with open(os.devnull, "w") as devnull:
|
|
72
|
+
self.process = subprocess.Popen(
|
|
73
|
+
ffmpeg_command,
|
|
74
|
+
stdout=devnull, # Redirect stdout to null
|
|
75
|
+
stderr=devnull, # Redirect stderr to null
|
|
76
|
+
text=True
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
logging.info("✅ [APP] FFmpeg process started successfully.")
|
|
80
|
+
|
|
81
|
+
start_time = time.time()
|
|
82
|
+
while self.process.poll() is None:
|
|
83
|
+
if time.time() - start_time > self.duration:
|
|
84
|
+
logging.info(f"⏳ [APP] Streaming duration {self.duration}s reached. Stopping stream...")
|
|
85
|
+
self.stop_stream()
|
|
86
|
+
break
|
|
87
|
+
time.sleep(1)
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
logging.error(f"🚨 [APP] Failed to start FFmpeg: {e}")
|
|
91
|
+
self.stop_stream()
|
|
92
|
+
|
|
93
|
+
def stop_stream(self):
|
|
94
|
+
"""Stop the streaming process."""
|
|
95
|
+
if self.process:
|
|
96
|
+
self.process.terminate()
|
|
97
|
+
self.process.wait()
|
|
98
|
+
logging.info("FFmpeg process terminated.")
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from ..repositories.RestrictedAreaRepository import RestrictedAreaRepository
|
|
3
|
+
from .GrpcClientBase import GrpcClientBase
|
|
4
|
+
from ..protos.HumanDetectionService_pb2_grpc import HumanDetectionGRPCServiceStub
|
|
5
|
+
from ..protos.HumanDetectionService_pb2 import (
|
|
6
|
+
UpsertHumanDetectionBatchRequest,
|
|
7
|
+
UpsertHumanDetectionRequest,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
class RestrictedAreaClient(GrpcClientBase):
|
|
13
|
+
def __init__(self, server_host: str, server_port: int = 50051):
|
|
14
|
+
"""
|
|
15
|
+
Initialize the Restricted Area Violation Client.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
server_host (str): The server hostname or IP address.
|
|
19
|
+
server_port (int): The server port. Default is 50051.
|
|
20
|
+
"""
|
|
21
|
+
super().__init__(server_host, server_port)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
self.connect(HumanDetectionGRPCServiceStub)
|
|
25
|
+
except Exception as e:
|
|
26
|
+
logger.error(f"Failed to connect to gRPC server: {e}")
|
|
27
|
+
self.stub = None
|
|
28
|
+
|
|
29
|
+
self.repository = RestrictedAreaRepository()
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def read_image_as_binary(image_path: str) -> bytes:
|
|
33
|
+
"""
|
|
34
|
+
Reads an image file and returns its binary content.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
image_path (str): Path to the image file.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
bytes: Binary content of the image.
|
|
41
|
+
"""
|
|
42
|
+
with open(image_path, 'rb') as image_file:
|
|
43
|
+
return image_file.read()
|
|
44
|
+
|
|
45
|
+
def send_upsert_batch(self, worker_id: str, worker_source_id: str, violation_data: list, token: str) -> dict:
|
|
46
|
+
"""
|
|
47
|
+
Sends a batch of restricted area violation records to the server using token authentication.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
worker_id (str): The worker ID for the violation.
|
|
51
|
+
worker_source_id (str): The worker source ID for the violation.
|
|
52
|
+
violation_data (list): A list of dictionaries representing the violations.
|
|
53
|
+
token (str): Authentication token for the worker.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
dict: A dictionary containing the result of the batch request.
|
|
57
|
+
"""
|
|
58
|
+
if not self.stub:
|
|
59
|
+
return {"success": False, "message": "gRPC connection is not established."}
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
human_detection_requests = []
|
|
63
|
+
|
|
64
|
+
for data in violation_data:
|
|
65
|
+
image_binary = self.read_image_as_binary(data['image'])
|
|
66
|
+
image_tile_binary = self.read_image_as_binary(data['image_tile'])
|
|
67
|
+
|
|
68
|
+
request = UpsertHumanDetectionRequest(
|
|
69
|
+
person_id=data['person_id'],
|
|
70
|
+
worker_id=worker_id,
|
|
71
|
+
worker_source_id=data['worker_source_id'],
|
|
72
|
+
image=image_binary,
|
|
73
|
+
image_tile=image_tile_binary,
|
|
74
|
+
worker_timestamp=data['worker_timestamp'],
|
|
75
|
+
confidence_score=data['confidence_score'],
|
|
76
|
+
b_box_x1=data['b_box_x1'],
|
|
77
|
+
b_box_y1=data['b_box_y1'],
|
|
78
|
+
b_box_x2=data['b_box_x2'],
|
|
79
|
+
b_box_y2=data['b_box_y2'],
|
|
80
|
+
token=token,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
human_detection_requests.append(request)
|
|
84
|
+
|
|
85
|
+
batch_request = UpsertHumanDetectionBatchRequest(
|
|
86
|
+
human_detection_requests=human_detection_requests,
|
|
87
|
+
token=token
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
response = self.handle_rpc(self.stub.UpsertBatch, batch_request)
|
|
91
|
+
|
|
92
|
+
if response and response.success:
|
|
93
|
+
self.repository.delete_records_from_db(violation_data)
|
|
94
|
+
return {"success": True, "message": response.message}
|
|
95
|
+
|
|
96
|
+
return {"success": False, "message": response.message if response else "Unknown error"}
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error(f"Error sending batch restricted area violation data: {e}")
|
|
100
|
+
return {"success": False, "message": f"Error occurred: {e}"}
|