nedo-vision-worker-core 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- nedo_vision_worker_core/__init__.py +1 -1
- nedo_vision_worker_core/detection/RFDETRDetector.py +3 -0
- nedo_vision_worker_core/pipeline/ModelManager.py +139 -0
- nedo_vision_worker_core/pipeline/PipelineManager.py +16 -3
- nedo_vision_worker_core/pipeline/PipelineProcessor.py +39 -31
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py +57 -108
- nedo_vision_worker_core/repositories/AIModelRepository.py +21 -1
- nedo_vision_worker_core/streams/RTMPStreamer.py +178 -233
- nedo_vision_worker_core/streams/SharedVideoDeviceManager.py +5 -1
- nedo_vision_worker_core/streams/StreamSyncThread.py +51 -24
- nedo_vision_worker_core/streams/VideoStream.py +201 -262
- nedo_vision_worker_core/streams/VideoStreamManager.py +90 -38
- nedo_vision_worker_core/util/PlatformDetector.py +100 -0
- {nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/METADATA +1 -1
- {nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/RECORD +18 -17
- nedo_vision_worker_core/detection/DetectionManager.py +0 -83
- {nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/WHEEL +0 -0
- {nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/entry_points.txt +0 -0
- {nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import time
|
|
3
3
|
import threading
|
|
4
|
-
import cv2
|
|
5
4
|
from typing import Any, Dict, Optional
|
|
6
5
|
|
|
7
6
|
from .VideoStream import VideoStream
|
|
@@ -18,6 +17,11 @@ class VideoStreamManager:
|
|
|
18
17
|
self.direct_device_streams: Dict[Any, Dict[str, Any]] = {}
|
|
19
18
|
# Per-direct-device locks: {worker_source_id: threading.Lock}
|
|
20
19
|
self.direct_device_locks: Dict[Any, threading.Lock] = {}
|
|
20
|
+
|
|
21
|
+
# Reference counting for lazy loading: {worker_source_id: set of pipeline_ids}
|
|
22
|
+
self.stream_references: Dict[Any, set] = {}
|
|
23
|
+
# Store URLs for streams that aren't started yet: {worker_source_id: url}
|
|
24
|
+
self.pending_streams: Dict[Any, str] = {}
|
|
21
25
|
|
|
22
26
|
self.shared_device_manager = SharedVideoDeviceManager()
|
|
23
27
|
|
|
@@ -38,16 +42,71 @@ class VideoStreamManager:
|
|
|
38
42
|
# -----------------------
|
|
39
43
|
# Public API
|
|
40
44
|
# -----------------------
|
|
41
|
-
def
|
|
42
|
-
"""
|
|
45
|
+
def register_stream(self, worker_source_id, url):
|
|
46
|
+
"""Register a stream URL without starting it (lazy loading)."""
|
|
43
47
|
with self._lock:
|
|
48
|
+
if worker_source_id not in self.pending_streams:
|
|
49
|
+
self.pending_streams[worker_source_id] = url
|
|
50
|
+
logging.debug(f"📝 Registered stream {worker_source_id} for lazy loading")
|
|
51
|
+
|
|
52
|
+
def unregister_stream(self, worker_source_id):
|
|
53
|
+
"""Unregister a stream that's no longer available in the database."""
|
|
54
|
+
with self._lock:
|
|
55
|
+
# If it's pending, remove it
|
|
56
|
+
if worker_source_id in self.pending_streams:
|
|
57
|
+
del self.pending_streams[worker_source_id]
|
|
58
|
+
logging.debug(f"🗑️ Unregistered pending stream {worker_source_id}")
|
|
59
|
+
|
|
60
|
+
# If it's active and has no references, remove it
|
|
61
|
+
if worker_source_id in self.stream_references:
|
|
62
|
+
if len(self.stream_references[worker_source_id]) == 0:
|
|
63
|
+
self._stop_stream(worker_source_id)
|
|
64
|
+
|
|
65
|
+
def acquire_stream(self, worker_source_id, pipeline_id):
|
|
66
|
+
"""Request access to a stream for a pipeline. Starts the stream if not already running."""
|
|
67
|
+
with self._lock:
|
|
68
|
+
# Initialize reference set if needed
|
|
69
|
+
if worker_source_id not in self.stream_references:
|
|
70
|
+
self.stream_references[worker_source_id] = set()
|
|
71
|
+
|
|
72
|
+
# Add pipeline reference
|
|
73
|
+
self.stream_references[worker_source_id].add(pipeline_id)
|
|
74
|
+
logging.info(f"🔗 Pipeline {pipeline_id} acquired stream {worker_source_id} (refs: {len(self.stream_references[worker_source_id])})")
|
|
75
|
+
|
|
76
|
+
# If stream is already running, we're done
|
|
44
77
|
if worker_source_id in self.streams or worker_source_id in self.direct_device_streams:
|
|
45
|
-
|
|
78
|
+
return True
|
|
79
|
+
|
|
80
|
+
# Get URL from pending streams
|
|
81
|
+
url = self.pending_streams.get(worker_source_id)
|
|
82
|
+
if not url:
|
|
83
|
+
logging.error(f"❌ Cannot acquire stream {worker_source_id}: URL not registered")
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
# Start the stream (outside lock to avoid blocking)
|
|
87
|
+
return self._start_stream(worker_source_id, url)
|
|
88
|
+
|
|
89
|
+
def release_stream(self, worker_source_id, pipeline_id):
|
|
90
|
+
"""Release a stream reference from a pipeline. Stops the stream if no more references."""
|
|
91
|
+
with self._lock:
|
|
92
|
+
if worker_source_id not in self.stream_references:
|
|
46
93
|
return
|
|
47
|
-
|
|
94
|
+
|
|
95
|
+
# Remove pipeline reference
|
|
96
|
+
self.stream_references[worker_source_id].discard(pipeline_id)
|
|
97
|
+
ref_count = len(self.stream_references[worker_source_id])
|
|
98
|
+
logging.info(f"🔓 Pipeline {pipeline_id} released stream {worker_source_id} (refs: {ref_count})")
|
|
99
|
+
|
|
100
|
+
# If no more references, stop the stream
|
|
101
|
+
if ref_count == 0:
|
|
102
|
+
logging.info(f"💤 Stream {worker_source_id} has no more references, stopping...")
|
|
103
|
+
self._stop_stream(worker_source_id)
|
|
104
|
+
|
|
105
|
+
def _start_stream(self, worker_source_id, url):
|
|
106
|
+
"""Internal method to actually start a stream."""
|
|
48
107
|
if self._is_direct_device(url):
|
|
49
108
|
self._add_direct_device_stream(worker_source_id, url)
|
|
50
|
-
return
|
|
109
|
+
return True
|
|
51
110
|
|
|
52
111
|
# Regular stream
|
|
53
112
|
stream = VideoStream(url)
|
|
@@ -55,18 +114,19 @@ class VideoStreamManager:
|
|
|
55
114
|
stream.start() # start thread
|
|
56
115
|
with self._lock:
|
|
57
116
|
self.streams[worker_source_id] = stream
|
|
58
|
-
logging.info("✅
|
|
117
|
+
logging.info("✅ Started video stream: %s", worker_source_id)
|
|
118
|
+
return True
|
|
59
119
|
except Exception as e:
|
|
60
120
|
logging.error("❌ Failed to start regular stream %s: %s", worker_source_id, e)
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
return
|
|
66
|
-
|
|
121
|
+
return False
|
|
122
|
+
|
|
123
|
+
def _stop_stream(self, worker_source_id):
|
|
124
|
+
"""Internal method to stop a stream."""
|
|
67
125
|
# Direct device?
|
|
68
126
|
with self._lock:
|
|
69
127
|
is_direct = worker_source_id in self.direct_device_streams
|
|
128
|
+
# Clean up references
|
|
129
|
+
self.stream_references.pop(worker_source_id, None)
|
|
70
130
|
|
|
71
131
|
if is_direct:
|
|
72
132
|
self._remove_direct_device_stream(worker_source_id)
|
|
@@ -77,20 +137,16 @@ class VideoStreamManager:
|
|
|
77
137
|
stream = self.streams.pop(worker_source_id, None)
|
|
78
138
|
|
|
79
139
|
if stream is None:
|
|
80
|
-
logging.warning("⚠️ Stream %s not found in manager.", worker_source_id)
|
|
81
140
|
return
|
|
82
141
|
|
|
83
|
-
logging.info("🛑
|
|
142
|
+
logging.info("🛑 Stopping video stream: %s", worker_source_id)
|
|
84
143
|
try:
|
|
85
|
-
# Expectation: VideoStream.stop() should signal and join internally.
|
|
86
144
|
stream.stop()
|
|
87
145
|
except Exception as e:
|
|
88
146
|
logging.error("❌ Error stopping stream %s: %s", worker_source_id, e)
|
|
89
147
|
finally:
|
|
90
148
|
stream = None
|
|
91
149
|
|
|
92
|
-
logging.info("✅ Stream %s removed successfully.", worker_source_id)
|
|
93
|
-
|
|
94
150
|
def start_all(self):
|
|
95
151
|
"""Starts all regular streams that are not alive. (Direct devices are publisher-driven.)"""
|
|
96
152
|
logging.info("🔄 Starting all video streams...")
|
|
@@ -114,51 +170,47 @@ class VideoStreamManager:
|
|
|
114
170
|
|
|
115
171
|
for wid in regular_ids:
|
|
116
172
|
try:
|
|
117
|
-
self.
|
|
173
|
+
self._stop_stream(wid)
|
|
118
174
|
except Exception as e:
|
|
119
175
|
logging.error("Error stopping regular stream %s: %s", wid, e)
|
|
120
176
|
|
|
121
177
|
for wid in direct_ids:
|
|
122
178
|
try:
|
|
123
|
-
self.
|
|
179
|
+
self._stop_stream(wid)
|
|
124
180
|
except Exception as e:
|
|
125
181
|
logging.error("Error stopping direct device stream %s: %s", wid, e)
|
|
126
182
|
|
|
127
183
|
self._running_evt.clear()
|
|
128
184
|
|
|
185
|
+
def wait_for_stream_ready(self, worker_source_id, timeout: float = 10.0) -> bool:
|
|
186
|
+
"""Waits for a specific stream to signal that it has received its first frame."""
|
|
187
|
+
with self._lock:
|
|
188
|
+
stream = self.streams.get(worker_source_id)
|
|
189
|
+
|
|
190
|
+
if stream and hasattr(stream, 'wait_first_frame'):
|
|
191
|
+
return stream.wait_first_frame(timeout)
|
|
192
|
+
|
|
193
|
+
return False
|
|
194
|
+
|
|
129
195
|
def get_frame(self, worker_source_id):
|
|
130
|
-
"""Returns the latest frame for the stream, or None if not available.
|
|
131
|
-
Non-blocking. No sleeps. Short lock scopes.
|
|
132
|
-
"""
|
|
133
|
-
# Direct device path
|
|
196
|
+
"""Returns the latest frame for the stream, or None if not available."""
|
|
134
197
|
with self._lock:
|
|
135
198
|
if worker_source_id in self.direct_device_streams:
|
|
136
|
-
|
|
137
|
-
pass
|
|
199
|
+
stream = None
|
|
138
200
|
else:
|
|
139
|
-
# Regular stream path
|
|
140
201
|
stream = self.streams.get(worker_source_id)
|
|
141
202
|
|
|
142
|
-
# Direct device?
|
|
143
203
|
if worker_source_id in self.direct_device_streams:
|
|
144
204
|
return self._get_direct_device_frame(worker_source_id)
|
|
145
205
|
|
|
146
|
-
|
|
147
|
-
if stream is None or not getattr(stream, "running", False):
|
|
206
|
+
if stream is None or not hasattr(stream, 'running') or not stream.running:
|
|
148
207
|
return None
|
|
149
208
|
|
|
150
209
|
try:
|
|
151
|
-
|
|
152
|
-
start_time = getattr(stream, "start_time", None)
|
|
153
|
-
if start_time is not None and (time.time() - start_time) < 5.0:
|
|
154
|
-
return None
|
|
155
|
-
|
|
156
|
-
# If it's a file and ended, do not sleep here; let the producer handle restarts.
|
|
157
|
-
if getattr(stream, "is_file", False) and stream.is_video_ended():
|
|
210
|
+
if hasattr(stream, 'is_file') and stream.is_file and hasattr(stream, 'is_video_ended') and stream.is_video_ended():
|
|
158
211
|
logging.debug("Video file %s ended; waiting for producer to restart.", worker_source_id)
|
|
159
212
|
return None
|
|
160
213
|
|
|
161
|
-
# Must return a copy (VideoStream.get_frame() expected to handle copying)
|
|
162
214
|
return stream.get_frame()
|
|
163
215
|
except Exception as e:
|
|
164
216
|
logging.error("Error getting frame from stream %s: %s", worker_source_id, e)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import platform
|
|
3
|
+
import sys
|
|
4
|
+
import torch
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class PlatformDetector:
|
|
8
|
+
"""
|
|
9
|
+
Detects platform and multimedia stack capabilities.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
@staticmethod
|
|
13
|
+
def is_linux() -> bool:
|
|
14
|
+
return sys.platform.startswith("linux")
|
|
15
|
+
|
|
16
|
+
@staticmethod
|
|
17
|
+
def is_windows() -> bool:
|
|
18
|
+
return sys.platform.startswith("win")
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def is_macos() -> bool:
|
|
22
|
+
return sys.platform == "darwin"
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def is_jetson() -> bool:
|
|
26
|
+
"""
|
|
27
|
+
Determines if the platform is an NVIDIA Jetson device.
|
|
28
|
+
"""
|
|
29
|
+
try:
|
|
30
|
+
# Device-tree model (most reliable)
|
|
31
|
+
if os.path.exists("/proc/device-tree/model"):
|
|
32
|
+
with open("/proc/device-tree/model", "r") as f:
|
|
33
|
+
model = f.read().strip()
|
|
34
|
+
if "NVIDIA Jetson" in model:
|
|
35
|
+
return True
|
|
36
|
+
|
|
37
|
+
# Jetson-specific libs/paths
|
|
38
|
+
jetson_libraries = ["/usr/lib/aarch64-linux-gnu/tegra", "/etc/nv_tegra_release", "/etc/tegra-release"]
|
|
39
|
+
if any(os.path.exists(p) for p in jetson_libraries):
|
|
40
|
+
return True
|
|
41
|
+
|
|
42
|
+
# Arch alone is not definitive, but is a signal
|
|
43
|
+
if platform.machine() == "aarch64" and os.path.exists("/dev/nvhost-ctrl"):
|
|
44
|
+
return True
|
|
45
|
+
|
|
46
|
+
except Exception:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
return False
|
|
50
|
+
|
|
51
|
+
@staticmethod
|
|
52
|
+
def get_platform_type() -> str:
|
|
53
|
+
"""
|
|
54
|
+
Returns a coarse platform type: 'jetson' | 'mac' | 'windows' | 'linux'
|
|
55
|
+
"""
|
|
56
|
+
if PlatformDetector.is_jetson():
|
|
57
|
+
return "jetson"
|
|
58
|
+
if PlatformDetector.is_macos():
|
|
59
|
+
return "mac"
|
|
60
|
+
if PlatformDetector.is_windows():
|
|
61
|
+
return "windows"
|
|
62
|
+
return "linux"
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def has_gstreamer() -> bool:
|
|
66
|
+
"""
|
|
67
|
+
Check if OpenCV was built with GStreamer support.
|
|
68
|
+
"""
|
|
69
|
+
try:
|
|
70
|
+
import cv2
|
|
71
|
+
info = cv2.getBuildInformation()
|
|
72
|
+
return ("GStreamer: YES" in info) or ("GStreamer: YES" in info)
|
|
73
|
+
except Exception:
|
|
74
|
+
return False
|
|
75
|
+
|
|
76
|
+
@staticmethod
|
|
77
|
+
def has_nvidia_gpu() -> bool:
|
|
78
|
+
"""
|
|
79
|
+
Heuristic for NVIDIA dGPU presence (desktop/server).
|
|
80
|
+
"""
|
|
81
|
+
if PlatformDetector.is_windows():
|
|
82
|
+
return bool(os.environ.get("NVIDIA_VISIBLE_DEVICES", "")) # WSL/Docker hint
|
|
83
|
+
if PlatformDetector.is_linux():
|
|
84
|
+
if os.path.exists("/proc/driver/nvidia/version"):
|
|
85
|
+
return True
|
|
86
|
+
if os.environ.get("NVIDIA_VISIBLE_DEVICES", "") not in ("", "none"):
|
|
87
|
+
return True
|
|
88
|
+
return False
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def get_device() -> str:
|
|
92
|
+
"""
|
|
93
|
+
Check for GPU availability and return the appropriate device.
|
|
94
|
+
"""
|
|
95
|
+
if torch.cuda.is_available():
|
|
96
|
+
return "cuda"
|
|
97
|
+
# Add checks for other devices like MPS if needed
|
|
98
|
+
# elif torch.backends.mps.is_available():
|
|
99
|
+
# return "mps"
|
|
100
|
+
return "cpu"
|
{nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nedo-vision-worker-core
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.5
|
|
4
4
|
Summary: Nedo Vision Worker Core Library for AI Vision Processing
|
|
5
5
|
Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
6
6
|
Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
nedo_vision_worker_core/__init__.py,sha256=
|
|
1
|
+
nedo_vision_worker_core/__init__.py,sha256=GmU149mopLuJKQ3rEY_-OetVB1aD4BIryIFVa7thDAw,1924
|
|
2
2
|
nedo_vision_worker_core/cli.py,sha256=8YuKWsIgICUYXE_QtwyU3WzGhVjTWiAo5uzpFOmjNc8,5766
|
|
3
3
|
nedo_vision_worker_core/core_service.py,sha256=dnHNjbslOeyeWqHDFnk_yKdfTICYzLyRIcuZNwF0Zf4,11323
|
|
4
4
|
nedo_vision_worker_core/doctor.py,sha256=K_-hVV2-mdEefZ4Cfu5hMCiOxBiI1aXY8VtkkpK80Lc,10651
|
|
@@ -14,8 +14,7 @@ nedo_vision_worker_core/config/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrF
|
|
|
14
14
|
nedo_vision_worker_core/database/DatabaseManager.py,sha256=o2e9jcqBbtf0Zd9TYuyQhXyKj9rjnNYbSShr7CxuAPk,9341
|
|
15
15
|
nedo_vision_worker_core/database/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
16
16
|
nedo_vision_worker_core/detection/BaseDetector.py,sha256=bReQCTy4tEA1itvw3kkjBMQnx3Jn4MjnDkzdtPwmNPQ,757
|
|
17
|
-
nedo_vision_worker_core/detection/
|
|
18
|
-
nedo_vision_worker_core/detection/RFDETRDetector.py,sha256=FYZX0wh_y9R3wRYz5vratHcx-pCDnZUkeL2YI__UCPI,2908
|
|
17
|
+
nedo_vision_worker_core/detection/RFDETRDetector.py,sha256=3T3zTFZW0pBv9E-pSpY4JP7wI0LOTM4hxzanvrEXMcE,3093
|
|
19
18
|
nedo_vision_worker_core/detection/YOLODetector.py,sha256=oMCW4KanQCDbtz-ttMiCY5lP1rIgVH_LpfvZvi270j0,2290
|
|
20
19
|
nedo_vision_worker_core/detection/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
21
20
|
nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py,sha256=wqf4hliR_CPkeoeRbBB3PEpQsmasC4mASJ4WyYjNyPE,948
|
|
@@ -58,17 +57,18 @@ nedo_vision_worker_core/models/worker_source_pipeline.py,sha256=xCD4i9pHr8Qy5B_h
|
|
|
58
57
|
nedo_vision_worker_core/models/worker_source_pipeline_config.py,sha256=dGYTpcTFFu6pmGBufuWBHjv3Xs4RGAQwZn6jp6Ondvs,876
|
|
59
58
|
nedo_vision_worker_core/models/worker_source_pipeline_debug.py,sha256=6S7TkN37FrAT4VwsEB38DWSad7QfvNhsOGtSEK8D1Qs,594
|
|
60
59
|
nedo_vision_worker_core/models/worker_source_pipeline_detection.py,sha256=p6CJsiVCKprTYrNxJsiTB8njXdHkjZKVEyBceRVE6fY,560
|
|
60
|
+
nedo_vision_worker_core/pipeline/ModelManager.py,sha256=K7lmVOo-KL7bnWtyafilZs23bzd6loCgfUz7xuAmlVw,6195
|
|
61
61
|
nedo_vision_worker_core/pipeline/PipelineConfigManager.py,sha256=X55i9GyXcW9ylO6cj2UMAZFSxxPViacL4H4DZl60CAY,1157
|
|
62
|
-
nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=
|
|
62
|
+
nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=GGW3fBmDQwNcPJn5yU_BNz7hsR_H7rKoGMImCtC4T9s,6154
|
|
63
63
|
nedo_vision_worker_core/pipeline/PipelinePrepocessor.py,sha256=cCiVSHHqsKCtKYURdYoEjHJX2GnT6zd8kQ6ZukjQ3V0,1271
|
|
64
|
-
nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=
|
|
65
|
-
nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=
|
|
64
|
+
nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=4pAcyVpiPM8KClm6hjENQgjDbPH9_na93-ehBDGw_5U,26446
|
|
65
|
+
nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=CAn-4F95J_VAv_7_ClrT-z9226U8ATrOGg5jOYqNuyM,8601
|
|
66
66
|
nedo_vision_worker_core/pipeline/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
67
67
|
nedo_vision_worker_core/preprocessing/ImageResizer.py,sha256=RvOazxe6dJQuiy0ZH4lIGbdFfiu0FLUVCHoMvxkDNT4,1324
|
|
68
68
|
nedo_vision_worker_core/preprocessing/ImageRoi.py,sha256=iO7oQ-SdUSA_kTIVBuq_mdycXsiJNfiFD3J7-VTxiQ4,2141
|
|
69
69
|
nedo_vision_worker_core/preprocessing/Preprocessor.py,sha256=uYIh0Ld4T1zEEHtKVLbUVBcF0kUwj5zCfPXn__bKwwU,477
|
|
70
70
|
nedo_vision_worker_core/preprocessing/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
71
|
-
nedo_vision_worker_core/repositories/AIModelRepository.py,sha256=
|
|
71
|
+
nedo_vision_worker_core/repositories/AIModelRepository.py,sha256=SzBLwUrjDnc4QayZOA2zL0Jl8YQ-x6S1rde_zCBu6LQ,1591
|
|
72
72
|
nedo_vision_worker_core/repositories/PPEDetectionRepository.py,sha256=C_0QL2sHiSlM9rPmhLmfs6hdZk9FDazy-aVLcznN5w0,6623
|
|
73
73
|
nedo_vision_worker_core/repositories/RestrictedAreaRepository.py,sha256=umZ7IrgoEFqAa9ZZlH7KPUIbmp5yhBc0FSDUOY6UFag,4283
|
|
74
74
|
nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py,sha256=_z-UyaYtg1Q5nqSbs_16ngYesyM3aji4VrP1ZBHm6Jk,2987
|
|
@@ -79,11 +79,11 @@ nedo_vision_worker_core/repositories/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8
|
|
|
79
79
|
nedo_vision_worker_core/services/SharedVideoStreamServer.py,sha256=rhCineMKPG3GQbrMHlSHP4xhXaGZ6Rn1oqIajW5xpaY,9827
|
|
80
80
|
nedo_vision_worker_core/services/VideoSharingDaemon.py,sha256=iY6afEKTOsphfHvmZTL0grezka2DS9DDq-1EIpVMy0Y,28524
|
|
81
81
|
nedo_vision_worker_core/services/VideoSharingDaemonManager.py,sha256=sc8VZo5iwoOdR8uTiel5BKz6-eZ7wwLy3IwV_3tsAu0,10340
|
|
82
|
-
nedo_vision_worker_core/streams/RTMPStreamer.py,sha256=
|
|
83
|
-
nedo_vision_worker_core/streams/SharedVideoDeviceManager.py,sha256=
|
|
84
|
-
nedo_vision_worker_core/streams/StreamSyncThread.py,sha256=
|
|
85
|
-
nedo_vision_worker_core/streams/VideoStream.py,sha256=
|
|
86
|
-
nedo_vision_worker_core/streams/VideoStreamManager.py,sha256=
|
|
82
|
+
nedo_vision_worker_core/streams/RTMPStreamer.py,sha256=Dblfutc1UVHj159KUHFYZ8xFEVhHVknZn_nAqKR6uCs,8695
|
|
83
|
+
nedo_vision_worker_core/streams/SharedVideoDeviceManager.py,sha256=vSslwxbhKH6FPndR1HcSFIVWtF-iiOQMlSa4VvFa6M4,16265
|
|
84
|
+
nedo_vision_worker_core/streams/StreamSyncThread.py,sha256=ETT0N_P90ksn6Q5pb7NvMadqCuoicz_g52lcDkHIp88,5382
|
|
85
|
+
nedo_vision_worker_core/streams/VideoStream.py,sha256=nGtJ4FAZ1Ek-8hVRopEt0bLWLpa10OtyUwdDEuXLObQ,13343
|
|
86
|
+
nedo_vision_worker_core/streams/VideoStreamManager.py,sha256=g5cz-YXPewSubBXxCg4mfzsuGKoOHXu-SrMxaGjYPHw,14956
|
|
87
87
|
nedo_vision_worker_core/streams/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
88
88
|
nedo_vision_worker_core/tracker/SFSORT.py,sha256=0kggw0l4yPZ55AKHdqVX6mu9ehHmJed7jcJ3JQoC4sk,14061
|
|
89
89
|
nedo_vision_worker_core/tracker/TrackerManager.py,sha256=xtDMI657W2s7HM2lMGtwU0x5Hq74BZpLHd-5xk-278I,6152
|
|
@@ -93,10 +93,11 @@ nedo_vision_worker_core/util/DrawingUtils.py,sha256=sLptmzVaJakP_ZgbZsLL03RMH_9N
|
|
|
93
93
|
nedo_vision_worker_core/util/ModelReadinessChecker.py,sha256=ywHvt_d7UlY3DyFEJrO4Iyl0zx3SaLKb-Qab5l5Q8n4,6548
|
|
94
94
|
nedo_vision_worker_core/util/PersonAttributeMatcher.py,sha256=PhYTPYSF62Nfuc7dage03K6icw_bBBdpvXvnlzCbS30,2773
|
|
95
95
|
nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py,sha256=iuzCU32BQKaZ3dIy0QHNg2yoWJA-XhTRwwYqCvFdDgg,1711
|
|
96
|
+
nedo_vision_worker_core/util/PlatformDetector.py,sha256=GGL8UfeMQITR22EMYIRWnuOEnSqo7Dr5mb0PaFrl8AM,3006
|
|
96
97
|
nedo_vision_worker_core/util/TablePrinter.py,sha256=wzLGgb1GFMeIbAP6HmKcZD33j4D-IlyqlyeR7C5yD7w,1137
|
|
97
98
|
nedo_vision_worker_core/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
98
|
-
nedo_vision_worker_core-0.3.
|
|
99
|
-
nedo_vision_worker_core-0.3.
|
|
100
|
-
nedo_vision_worker_core-0.3.
|
|
101
|
-
nedo_vision_worker_core-0.3.
|
|
102
|
-
nedo_vision_worker_core-0.3.
|
|
99
|
+
nedo_vision_worker_core-0.3.5.dist-info/METADATA,sha256=IkDINswFijRo2j9RTaziXJdaivHiTXDe5M6X5fBAh3o,14370
|
|
100
|
+
nedo_vision_worker_core-0.3.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
101
|
+
nedo_vision_worker_core-0.3.5.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
|
|
102
|
+
nedo_vision_worker_core-0.3.5.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
|
|
103
|
+
nedo_vision_worker_core-0.3.5.dist-info/RECORD,,
|
|
@@ -1,83 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
try:
|
|
3
|
-
import torch
|
|
4
|
-
TORCH_AVAILABLE = True
|
|
5
|
-
except ImportError:
|
|
6
|
-
TORCH_AVAILABLE = False
|
|
7
|
-
from .BaseDetector import BaseDetector
|
|
8
|
-
from .RFDETRDetector import RFDETRDetector
|
|
9
|
-
from .YOLODetector import YOLODetector
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class DetectionManager:
|
|
13
|
-
def __init__(self, model=None):
|
|
14
|
-
self.detector: BaseDetector | None = None
|
|
15
|
-
self.model_metadata = None
|
|
16
|
-
|
|
17
|
-
if model:
|
|
18
|
-
self.load_model(model)
|
|
19
|
-
|
|
20
|
-
def load_model(self, model):
|
|
21
|
-
"""
|
|
22
|
-
Loads a new model at runtime and replaces current detector if successful.
|
|
23
|
-
Checks download status before attempting to load the model.
|
|
24
|
-
"""
|
|
25
|
-
if not model:
|
|
26
|
-
if self.detector:
|
|
27
|
-
logging.info("🧹 Model unloaded")
|
|
28
|
-
self.detector = None
|
|
29
|
-
self.model_metadata = None
|
|
30
|
-
return
|
|
31
|
-
|
|
32
|
-
# Check download status before loading
|
|
33
|
-
if not model.is_ready_for_use():
|
|
34
|
-
if model.is_downloading():
|
|
35
|
-
logging.warning(f"⏳ Model {model.name} is still downloading. Skipping load.")
|
|
36
|
-
self.detector = None
|
|
37
|
-
self.model_metadata = None
|
|
38
|
-
return
|
|
39
|
-
elif model.has_download_failed():
|
|
40
|
-
logging.error(f"❌ Model {model.name} download failed: {model.download_error}")
|
|
41
|
-
self.detector = None
|
|
42
|
-
self.model_metadata = None
|
|
43
|
-
return
|
|
44
|
-
else:
|
|
45
|
-
logging.warning(f"⚠️ Model {model.name} is not ready for use (status: {model.download_status})")
|
|
46
|
-
self.detector = None
|
|
47
|
-
self.model_metadata = None
|
|
48
|
-
return
|
|
49
|
-
|
|
50
|
-
detector_type = model.type.lower()
|
|
51
|
-
|
|
52
|
-
try:
|
|
53
|
-
if detector_type == "yolo":
|
|
54
|
-
detector = YOLODetector(model)
|
|
55
|
-
elif detector_type == "rf_detr":
|
|
56
|
-
detector = RFDETRDetector(model)
|
|
57
|
-
else:
|
|
58
|
-
raise ValueError(f"Unsupported model type: {detector_type}")
|
|
59
|
-
|
|
60
|
-
if detector.model is not None:
|
|
61
|
-
self.detector = detector
|
|
62
|
-
self.model_metadata = model
|
|
63
|
-
# Log device info
|
|
64
|
-
if TORCH_AVAILABLE:
|
|
65
|
-
device = "GPU" if torch.cuda.is_available() else "CPU"
|
|
66
|
-
else:
|
|
67
|
-
device = "CPU (torch not installed)"
|
|
68
|
-
logging.info(f"🚀 Model {model.name} loaded on {device}")
|
|
69
|
-
logging.info(f"📥 Model {model.name} with {detector_type} detector loaded")
|
|
70
|
-
else:
|
|
71
|
-
logging.error(f"❌ Error loading model: {model.name} with {detector_type} detector")
|
|
72
|
-
self.detector = None
|
|
73
|
-
self.model_metadata = None
|
|
74
|
-
|
|
75
|
-
except Exception as e:
|
|
76
|
-
logging.error(f"❌ Error loading model: {model.name} with {detector_type} detector: {e}")
|
|
77
|
-
self.detector = None
|
|
78
|
-
self.model_metadata = None
|
|
79
|
-
|
|
80
|
-
def detect_objects(self, frame, confidence_threshold=0.7, class_thresholds=None):
|
|
81
|
-
if not self.detector:
|
|
82
|
-
return []
|
|
83
|
-
return self.detector.detect_objects(frame, confidence_threshold, class_thresholds)
|
|
File without changes
|
{nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{nedo_vision_worker_core-0.3.3.dist-info → nedo_vision_worker_core-0.3.5.dist-info}/top_level.txt
RENAMED
|
File without changes
|