nedo-vision-worker-core 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- nedo_vision_worker_core/__init__.py +1 -1
- nedo_vision_worker_core/ai/VideoDebugger.py +60 -40
- nedo_vision_worker_core/core_service.py +8 -2
- nedo_vision_worker_core/detection/RFDETRDetector.py +37 -3
- nedo_vision_worker_core/pipeline/ModelManager.py +40 -5
- nedo_vision_worker_core/pipeline/PipelineManager.py +82 -75
- nedo_vision_worker_core/pipeline/PipelineProcessor.py +141 -29
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py +1 -1
- {nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/METADATA +4 -4
- {nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/RECORD +13 -13
- {nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/WHEEL +0 -0
- {nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/entry_points.txt +0 -0
- {nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/top_level.txt +0 -0
|
@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
|
|
|
7
7
|
from .core_service import CoreService
|
|
8
8
|
from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
|
|
9
9
|
|
|
10
|
-
__version__ = "0.3.
|
|
10
|
+
__version__ = "0.3.9"
|
|
11
11
|
__all__ = [
|
|
12
12
|
"CoreService",
|
|
13
13
|
"DetectionType",
|
|
@@ -2,68 +2,88 @@ import cv2
|
|
|
2
2
|
import threading
|
|
3
3
|
import time
|
|
4
4
|
from collections import defaultdict
|
|
5
|
+
import logging
|
|
5
6
|
|
|
6
|
-
# TODO: fix timer error (because of threading)
|
|
7
7
|
class VideoDebugger:
|
|
8
|
-
"""
|
|
8
|
+
"""Real-time visualization of video streams with detections."""
|
|
9
9
|
|
|
10
10
|
def __init__(self, enable_visualization=True):
|
|
11
|
-
"""
|
|
12
|
-
Initializes the VideoDebugger with frame drawing and visualization capabilities.
|
|
13
|
-
|
|
14
|
-
Args:
|
|
15
|
-
enable_visualization (bool): Whether to display frames.
|
|
16
|
-
"""
|
|
17
11
|
self.enable_visualization = enable_visualization
|
|
18
|
-
self.windows = {}
|
|
19
|
-
self.lock = threading.Lock()
|
|
12
|
+
self.windows = {}
|
|
13
|
+
self.lock = threading.Lock()
|
|
20
14
|
self.fps_tracker = defaultdict(lambda: {"start_time": time.time(), "frame_count": 0})
|
|
15
|
+
self._cv_lock = threading.Lock() # Prevent OpenCV segfaults
|
|
21
16
|
|
|
22
17
|
def show_frame(self, pipeline_id, worker_source_id, frame):
|
|
23
|
-
"""
|
|
24
|
-
Displays a frame with FPS overlay.
|
|
25
|
-
|
|
26
|
-
Args:
|
|
27
|
-
pipeline_id (str/int): Identifier for the pipeline.
|
|
28
|
-
worker_source_id (str): Identifier for the worker/source.
|
|
29
|
-
frame: The frame to display.
|
|
30
|
-
"""
|
|
18
|
+
"""Display frame with FPS overlay."""
|
|
31
19
|
if not self.enable_visualization or frame is None:
|
|
32
20
|
return
|
|
33
21
|
|
|
34
22
|
window_name = f"Pipeline {pipeline_id} - {worker_source_id}"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
with self.lock:
|
|
26
|
+
if window_name not in self.fps_tracker:
|
|
27
|
+
self.fps_tracker[window_name] = {"start_time": time.time(), "frame_count": 0}
|
|
38
28
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
29
|
+
self.fps_tracker[window_name]["frame_count"] += 1
|
|
30
|
+
elapsed_time = time.time() - self.fps_tracker[window_name]["start_time"]
|
|
31
|
+
fps = self.fps_tracker[window_name]["frame_count"] / max(elapsed_time, 1e-5)
|
|
42
32
|
|
|
43
|
-
|
|
44
|
-
cv2.putText(frame, f"FPS: {fps:.2f}", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
|
|
33
|
+
cv2.putText(frame, f"FPS: {fps:.2f}", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
|
|
45
34
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
self.windows[window_name] = True # Register window
|
|
35
|
+
if window_name not in self.windows:
|
|
36
|
+
self.windows[window_name] = True
|
|
49
37
|
|
|
50
|
-
cv2
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
38
|
+
# Serialize cv2 calls to prevent segfaults
|
|
39
|
+
with self._cv_lock:
|
|
40
|
+
try:
|
|
41
|
+
cv2.imshow(window_name, frame)
|
|
42
|
+
key = cv2.waitKey(1) & 0xFF
|
|
43
|
+
|
|
44
|
+
if key == ord('q'):
|
|
45
|
+
self.close_window(window_name)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
logging.error(f"Error displaying frame for {window_name}: {e}")
|
|
48
|
+
|
|
49
|
+
except Exception as e:
|
|
50
|
+
logging.error(f"Error in show_frame for {window_name}: {e}")
|
|
55
51
|
|
|
56
52
|
def close_window(self, window_name):
|
|
57
|
-
"""
|
|
53
|
+
"""Close specific window."""
|
|
58
54
|
with self.lock:
|
|
59
55
|
if window_name in self.windows:
|
|
60
|
-
|
|
56
|
+
with self._cv_lock:
|
|
57
|
+
try:
|
|
58
|
+
cv2.destroyWindow(window_name)
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logging.error(f"Error closing window {window_name}: {e}")
|
|
61
61
|
del self.windows[window_name]
|
|
62
|
+
|
|
63
|
+
def is_window_open(self, pipeline_id):
|
|
64
|
+
"""Check if a window is open for a given pipeline."""
|
|
65
|
+
with self.lock:
|
|
66
|
+
# Check if any window exists for this pipeline
|
|
67
|
+
for window_name in self.windows.keys():
|
|
68
|
+
if f"Pipeline {pipeline_id}" in window_name:
|
|
69
|
+
return True
|
|
70
|
+
return False
|
|
62
71
|
|
|
63
72
|
def close_all(self):
|
|
64
|
-
"""
|
|
73
|
+
"""Close all windows."""
|
|
74
|
+
with self.lock:
|
|
75
|
+
window_list = list(self.windows.keys())
|
|
76
|
+
|
|
77
|
+
with self._cv_lock:
|
|
78
|
+
try:
|
|
79
|
+
for window in window_list:
|
|
80
|
+
try:
|
|
81
|
+
cv2.destroyWindow(window)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logging.debug(f"Error destroying window {window}: {e}")
|
|
84
|
+
cv2.waitKey(1)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logging.error(f"Error in close_all: {e}")
|
|
87
|
+
|
|
65
88
|
with self.lock:
|
|
66
|
-
for window in list(self.windows.keys()):
|
|
67
|
-
cv2.destroyWindow(window)
|
|
68
89
|
self.windows.clear()
|
|
69
|
-
cv2.waitKey(1)
|
|
@@ -32,7 +32,8 @@ class CoreService:
|
|
|
32
32
|
log_level: str = "INFO",
|
|
33
33
|
storage_path: str = "data",
|
|
34
34
|
rtmp_server: str = "rtmp://live.vision.sindika.co.id:1935/live",
|
|
35
|
-
enable_video_sharing_daemon: bool = True
|
|
35
|
+
enable_video_sharing_daemon: bool = True,
|
|
36
|
+
max_pipeline_workers: int = None):
|
|
36
37
|
"""
|
|
37
38
|
Initialize the Core Service.
|
|
38
39
|
|
|
@@ -42,12 +43,14 @@ class CoreService:
|
|
|
42
43
|
storage_path: Storage path for databases and files (default: data)
|
|
43
44
|
rtmp_server: RTMP server URL for video streaming (default: rtmp://localhost:1935/live)
|
|
44
45
|
enable_video_sharing_daemon: Enable automatic video sharing daemon management (default: True)
|
|
46
|
+
max_pipeline_workers: Maximum concurrent pipeline workers (default: auto-detect based on CPU cores)
|
|
45
47
|
"""
|
|
46
48
|
self.running = True
|
|
47
49
|
self.video_manager = None
|
|
48
50
|
self.stream_sync_thread = None
|
|
49
51
|
self.pipeline_sync_thread = None
|
|
50
52
|
self.enable_video_sharing_daemon = enable_video_sharing_daemon
|
|
53
|
+
self.max_pipeline_workers = max_pipeline_workers
|
|
51
54
|
|
|
52
55
|
# Initialize callback manager if not already done
|
|
53
56
|
if CoreService._callback_manager is None:
|
|
@@ -230,7 +233,10 @@ class CoreService:
|
|
|
230
233
|
self.stream_sync_thread.start()
|
|
231
234
|
|
|
232
235
|
# Start pipeline synchronization thread (AI processing)
|
|
233
|
-
self.pipeline_sync_thread = PipelineSyncThread(
|
|
236
|
+
self.pipeline_sync_thread = PipelineSyncThread(
|
|
237
|
+
self.video_manager,
|
|
238
|
+
max_workers=self.max_pipeline_workers
|
|
239
|
+
)
|
|
234
240
|
self.pipeline_sync_thread.start()
|
|
235
241
|
|
|
236
242
|
logging.info("✅ Nedo Vision Core initialized and running.")
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import cv2
|
|
2
2
|
import logging
|
|
3
3
|
try:
|
|
4
|
-
from rfdetr import RFDETRBase
|
|
4
|
+
from rfdetr import RFDETRNano, RFDETRSmall, RFDETRMedium, RFDETRBase, RFDETRLarge
|
|
5
5
|
RFDETR_AVAILABLE = True
|
|
6
6
|
except ImportError:
|
|
7
7
|
RFDETR_AVAILABLE = False
|
|
8
|
-
RFDETRBase = None
|
|
8
|
+
RFDETRNano = RFDETRSmall = RFDETRMedium = RFDETRBase = RFDETRLarge = None
|
|
9
9
|
|
|
10
10
|
from ..database.DatabaseManager import DatabaseManager
|
|
11
11
|
from ..models.ai_model import AIModelEntity
|
|
@@ -32,6 +32,32 @@ class RFDETRDetector(BaseDetector):
|
|
|
32
32
|
if model:
|
|
33
33
|
self.load_model(model)
|
|
34
34
|
|
|
35
|
+
def _detect_model_variant(self, model_path: str):
|
|
36
|
+
"""
|
|
37
|
+
Automatically detect the correct RF-DETR variant by trying to load the weights.
|
|
38
|
+
Returns the appropriate RF-DETR class or None if all attempts fail.
|
|
39
|
+
"""
|
|
40
|
+
variants = [
|
|
41
|
+
("Nano", RFDETRNano),
|
|
42
|
+
("Small", RFDETRSmall),
|
|
43
|
+
("Medium", RFDETRMedium),
|
|
44
|
+
("Base", RFDETRBase),
|
|
45
|
+
("Large", RFDETRLarge)
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
for variant_name, variant_class in variants:
|
|
49
|
+
try:
|
|
50
|
+
logging.info(f"🔍 Trying RF-DETR {variant_name} variant...")
|
|
51
|
+
temp_model = variant_class(pretrain_weights=model_path)
|
|
52
|
+
logging.info(f"✅ Successfully loaded RF-DETR {variant_name} variant")
|
|
53
|
+
return temp_model, variant_name
|
|
54
|
+
except Exception as e:
|
|
55
|
+
# Only log at debug level to avoid cluttering logs
|
|
56
|
+
logging.debug(f"RF-DETR {variant_name} variant failed: {e}")
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
return None, None
|
|
60
|
+
|
|
35
61
|
def load_model(self, model: AIModelEntity):
|
|
36
62
|
if not isinstance(model, AIModelEntity):
|
|
37
63
|
raise TypeError("model must be an instance of AIModelEntity")
|
|
@@ -44,8 +70,16 @@ class RFDETRDetector(BaseDetector):
|
|
|
44
70
|
return False
|
|
45
71
|
|
|
46
72
|
try:
|
|
47
|
-
|
|
73
|
+
loaded_model, variant_name = self._detect_model_variant(path.as_posix())
|
|
74
|
+
|
|
75
|
+
if loaded_model is None:
|
|
76
|
+
logging.error(f"❌ Could not load model with any RF-DETR variant")
|
|
77
|
+
self.model = None
|
|
78
|
+
return False
|
|
79
|
+
|
|
80
|
+
self.model = loaded_model
|
|
48
81
|
self.model.optimize_for_inference()
|
|
82
|
+
logging.info(f"✅ Loaded {model.name} using RF-DETR {variant_name}")
|
|
49
83
|
return True
|
|
50
84
|
except Exception as e:
|
|
51
85
|
logging.error(f"❌ Error loading RFDETR model {model.name}: {e}")
|
|
@@ -122,16 +122,51 @@ class ModelManager:
|
|
|
122
122
|
return False
|
|
123
123
|
|
|
124
124
|
def sync_cache(self, active_model_ids: Set[str]):
|
|
125
|
-
"""
|
|
126
|
-
Removes detectors from the cache if their corresponding models are no longer in the database
|
|
127
|
-
or are not being used by any active pipeline.
|
|
128
|
-
"""
|
|
125
|
+
"""Remove unused detectors from cache."""
|
|
129
126
|
cached_ids = set(self._detector_cache.keys())
|
|
130
127
|
stale_ids = cached_ids - active_model_ids
|
|
131
128
|
|
|
132
129
|
for model_id in stale_ids:
|
|
133
|
-
|
|
130
|
+
detector = self._detector_cache.pop(model_id, None)
|
|
131
|
+
if detector:
|
|
132
|
+
self._cleanup_detector(detector)
|
|
134
133
|
logging.info(f"🧹 Removed unused detector for model {model_id} from cache.")
|
|
134
|
+
|
|
135
|
+
def _cleanup_detector(self, detector: BaseDetector):
|
|
136
|
+
"""Free detector resources and GPU memory."""
|
|
137
|
+
try:
|
|
138
|
+
if hasattr(detector, 'model') and detector.model is not None:
|
|
139
|
+
# Move model to CPU if possible
|
|
140
|
+
if hasattr(detector.model, 'cpu'):
|
|
141
|
+
try:
|
|
142
|
+
detector.model.cpu()
|
|
143
|
+
except Exception as e:
|
|
144
|
+
logging.debug(f"Error moving model to CPU: {e}")
|
|
145
|
+
|
|
146
|
+
if hasattr(detector.model, 'eval'):
|
|
147
|
+
try:
|
|
148
|
+
detector.model.eval()
|
|
149
|
+
except Exception:
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
detector.model = None
|
|
153
|
+
|
|
154
|
+
detector.metadata = None
|
|
155
|
+
|
|
156
|
+
# Force garbage collection and clear GPU cache
|
|
157
|
+
import gc
|
|
158
|
+
gc.collect()
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
import torch
|
|
162
|
+
if torch.cuda.is_available():
|
|
163
|
+
torch.cuda.empty_cache()
|
|
164
|
+
logging.debug("🧹 GPU cache cleared")
|
|
165
|
+
except ImportError:
|
|
166
|
+
pass
|
|
167
|
+
|
|
168
|
+
except Exception as e:
|
|
169
|
+
logging.error(f"Error cleaning up detector: {e}")
|
|
135
170
|
|
|
136
171
|
def clear_cache(self):
|
|
137
172
|
"""Clears the detector cache."""
|
|
@@ -1,30 +1,40 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
import time
|
|
2
3
|
import threading
|
|
3
4
|
from typing import Dict
|
|
5
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
4
6
|
from .PipelineProcessor import PipelineProcessor
|
|
5
7
|
from ..streams.VideoStreamManager import VideoStreamManager
|
|
6
8
|
|
|
7
9
|
class PipelineManager:
|
|
8
|
-
"""Manages AI pipeline execution
|
|
9
|
-
|
|
10
|
-
def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=
|
|
10
|
+
"""Manages AI pipeline execution with thread pooling for scalability."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=None):
|
|
13
|
+
# Auto-detect optimal worker count if not specified
|
|
14
|
+
if max_workers is None:
|
|
15
|
+
import os
|
|
16
|
+
cpu_count = os.cpu_count() or 4
|
|
17
|
+
# Reserve 2 cores for system/video streams, use rest for pipelines
|
|
18
|
+
max_workers = max(4, cpu_count - 2)
|
|
19
|
+
|
|
11
20
|
self.max_workers = max_workers
|
|
12
|
-
self.
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
21
|
+
self.executor = ThreadPoolExecutor(
|
|
22
|
+
max_workers=max_workers,
|
|
23
|
+
thread_name_prefix="pipeline-worker"
|
|
24
|
+
)
|
|
25
|
+
self.pipeline_futures = {} # {pipeline_id: Future}
|
|
26
|
+
self.pipeline_metadata = {} # {pipeline_id: metadata}
|
|
27
|
+
self.video_manager = video_manager
|
|
28
|
+
self.processors: Dict[str, PipelineProcessor] = {} # {pipeline_id: PipelineProcessor}
|
|
16
29
|
self.running = True
|
|
17
|
-
self._stopping_pipelines = set()
|
|
18
|
-
self._stop_lock = threading.Lock()
|
|
30
|
+
self._stopping_pipelines = set()
|
|
31
|
+
self._stop_lock = threading.Lock()
|
|
19
32
|
self.on_pipeline_stopped = on_pipeline_stopped
|
|
33
|
+
|
|
34
|
+
logging.info(f"🚀 PipelineManager initialized with {max_workers} worker threads")
|
|
20
35
|
|
|
21
36
|
def start_pipeline(self, pipeline, detector):
|
|
22
|
-
"""
|
|
23
|
-
Start a pipeline processing.
|
|
24
|
-
Args:
|
|
25
|
-
pipeline: The pipeline object (contains id, worker_source_id, name, etc.)
|
|
26
|
-
detector: The detector instance to use for processing.
|
|
27
|
-
"""
|
|
37
|
+
"""Start a pipeline processing."""
|
|
28
38
|
pipeline_id = pipeline.id
|
|
29
39
|
worker_source_id = pipeline.worker_source_id
|
|
30
40
|
|
|
@@ -38,64 +48,67 @@ class PipelineManager:
|
|
|
38
48
|
|
|
39
49
|
logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
|
|
40
50
|
|
|
41
|
-
# Acquire
|
|
51
|
+
# Acquire video stream
|
|
42
52
|
if not self.video_manager.acquire_stream(worker_source_id, pipeline_id):
|
|
43
53
|
logging.error(f"❌ Failed to acquire stream {worker_source_id} for pipeline {pipeline_id}")
|
|
44
54
|
return
|
|
45
55
|
|
|
46
56
|
processor = PipelineProcessor(pipeline, detector, False)
|
|
47
57
|
processor.frame_drawer.location_name = pipeline.location_name
|
|
48
|
-
self.processors[pipeline_id] = processor
|
|
58
|
+
self.processors[pipeline_id] = processor
|
|
49
59
|
|
|
50
|
-
active_count = len([
|
|
51
|
-
logging.info(f"📋
|
|
60
|
+
active_count = len([f for f in self.pipeline_futures.values() if not f.done()])
|
|
61
|
+
logging.info(f"📋 Submitting pipeline {pipeline_id} to thread pool (active: {active_count}/{self.max_workers})")
|
|
52
62
|
|
|
53
63
|
try:
|
|
54
|
-
#
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
except Exception as e:
|
|
60
|
-
logging.error(f"❌ Unhandled error in pipeline {pipeline_id} thread: {e}", exc_info=True)
|
|
61
|
-
finally:
|
|
62
|
-
# Ensure cleanup callback is called
|
|
63
|
-
self._handle_pipeline_completion(pipeline_id)
|
|
64
|
-
|
|
65
|
-
# Create and start thread directly
|
|
66
|
-
thread = threading.Thread(
|
|
67
|
-
target=_safe_process_pipeline,
|
|
68
|
-
name=f"pipeline-{pipeline_id[:8]}",
|
|
69
|
-
daemon=True
|
|
64
|
+
# Submit to thread pool instead of creating dedicated thread
|
|
65
|
+
future = self.executor.submit(
|
|
66
|
+
self._pipeline_worker,
|
|
67
|
+
pipeline_id,
|
|
68
|
+
processor
|
|
70
69
|
)
|
|
71
70
|
|
|
72
|
-
|
|
71
|
+
# Add completion callback
|
|
72
|
+
future.add_done_callback(lambda f: self._handle_pipeline_completion(pipeline_id, f))
|
|
73
|
+
|
|
74
|
+
self.pipeline_futures[pipeline_id] = future
|
|
73
75
|
self.pipeline_metadata[pipeline_id] = pipeline
|
|
74
76
|
|
|
75
|
-
logging.info(f"
|
|
76
|
-
thread.start()
|
|
77
|
-
logging.info(f"✅ Pipeline {pipeline_id} thread started successfully")
|
|
77
|
+
logging.info(f"✅ Pipeline {pipeline_id} submitted to thread pool")
|
|
78
78
|
|
|
79
79
|
except Exception as e:
|
|
80
|
-
logging.error(f"❌ Failed to
|
|
81
|
-
# Clean up on failure
|
|
80
|
+
logging.error(f"❌ Failed to submit pipeline {pipeline_id} to thread pool: {e}", exc_info=True)
|
|
82
81
|
self.processors.pop(pipeline_id, None)
|
|
83
82
|
self.video_manager.release_stream(worker_source_id, pipeline_id)
|
|
84
83
|
raise
|
|
84
|
+
|
|
85
|
+
def _pipeline_worker(self, pipeline_id: str, processor: PipelineProcessor):
|
|
86
|
+
"""Worker function executed in thread pool."""
|
|
87
|
+
try:
|
|
88
|
+
logging.info(f"🏁 Pipeline {pipeline_id} worker starting...")
|
|
89
|
+
processor.process_pipeline(self.video_manager)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logging.error(f"❌ Unhandled error in pipeline {pipeline_id} worker: {e}", exc_info=True)
|
|
92
|
+
finally:
|
|
93
|
+
logging.info(f"🏁 Pipeline {pipeline_id} worker finished")
|
|
85
94
|
|
|
86
|
-
def _handle_pipeline_completion(self, pipeline_id: str):
|
|
87
|
-
"""
|
|
88
|
-
Handles cleanup when a pipeline finishes processing.
|
|
89
|
-
"""
|
|
95
|
+
def _handle_pipeline_completion(self, pipeline_id: str, future=None):
|
|
96
|
+
"""Handle cleanup when pipeline finishes."""
|
|
90
97
|
with self._stop_lock:
|
|
91
98
|
if pipeline_id in self._stopping_pipelines:
|
|
92
|
-
return
|
|
99
|
+
return
|
|
93
100
|
|
|
94
101
|
try:
|
|
95
102
|
logging.info(f"🏁 Pipeline {pipeline_id} completed execution")
|
|
103
|
+
|
|
104
|
+
# Log any exception from the future
|
|
105
|
+
if future and not future.cancelled():
|
|
106
|
+
try:
|
|
107
|
+
future.result(timeout=0)
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logging.error(f"Pipeline {pipeline_id} ended with exception: {e}")
|
|
96
110
|
except Exception as e:
|
|
97
111
|
logging.error(f"⚠️ Error in handling pipeline {pipeline_id} completion: {e}")
|
|
98
|
-
|
|
99
112
|
finally:
|
|
100
113
|
self.on_pipeline_stopped(pipeline_id)
|
|
101
114
|
|
|
@@ -108,28 +121,29 @@ class PipelineManager:
|
|
|
108
121
|
self._stopping_pipelines.add(pipeline_id)
|
|
109
122
|
|
|
110
123
|
try:
|
|
111
|
-
# Get worker_source_id before removing metadata
|
|
112
124
|
pipeline = self.pipeline_metadata.get(pipeline_id)
|
|
113
125
|
worker_source_id = pipeline.worker_source_id if pipeline else None
|
|
114
126
|
|
|
115
|
-
# Stop
|
|
127
|
+
# Stop processor first to signal threads
|
|
116
128
|
processor = self.processors.pop(pipeline_id, None)
|
|
117
129
|
if processor:
|
|
118
130
|
processor.stop()
|
|
119
131
|
|
|
120
|
-
#
|
|
121
|
-
|
|
122
|
-
if
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
132
|
+
# Cancel future if still pending/running
|
|
133
|
+
future = self.pipeline_futures.pop(pipeline_id, None)
|
|
134
|
+
if future and not future.done():
|
|
135
|
+
logging.debug(f"Cancelling future for pipeline {pipeline_id}")
|
|
136
|
+
future.cancel()
|
|
137
|
+
|
|
138
|
+
# Wait briefly for graceful shutdown
|
|
139
|
+
try:
|
|
140
|
+
future.result(timeout=1.0)
|
|
141
|
+
except Exception as e:
|
|
142
|
+
logging.debug(f"Pipeline {pipeline_id} future ended: {e}")
|
|
128
143
|
|
|
129
|
-
# Remove metadata
|
|
130
144
|
self.pipeline_metadata.pop(pipeline_id, None)
|
|
131
145
|
|
|
132
|
-
# Release
|
|
146
|
+
# Release video stream
|
|
133
147
|
if worker_source_id:
|
|
134
148
|
self.video_manager.release_stream(worker_source_id, pipeline_id)
|
|
135
149
|
|
|
@@ -147,31 +161,24 @@ class PipelineManager:
|
|
|
147
161
|
return list(self.pipeline_metadata.keys())
|
|
148
162
|
|
|
149
163
|
def get_pipeline(self, pipeline_id):
|
|
150
|
-
"""Returns the
|
|
164
|
+
"""Returns the pipeline metadata."""
|
|
151
165
|
return self.pipeline_metadata.get(pipeline_id, None)
|
|
152
166
|
|
|
153
167
|
def is_running(self, pipeline_id):
|
|
154
|
-
"""
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
Args:
|
|
158
|
-
pipeline_id (str): The ID of the pipeline to check.
|
|
159
|
-
|
|
160
|
-
Returns:
|
|
161
|
-
bool: True if the pipeline is running, False otherwise.
|
|
162
|
-
"""
|
|
163
|
-
thread = self.pipeline_threads.get(pipeline_id)
|
|
164
|
-
return thread is not None and thread.is_alive()
|
|
168
|
+
"""Check if pipeline is currently running."""
|
|
169
|
+
future = self.pipeline_futures.get(pipeline_id)
|
|
170
|
+
return future is not None and not future.done()
|
|
165
171
|
|
|
166
172
|
def shutdown(self):
|
|
167
173
|
"""Shuts down the pipeline manager gracefully."""
|
|
168
174
|
logging.info("🛑 Shutting down PipelineManager...")
|
|
169
175
|
self.running = False
|
|
170
176
|
|
|
171
|
-
|
|
177
|
+
# Stop all pipelines
|
|
178
|
+
for pipeline_id in list(self.pipeline_futures.keys()):
|
|
172
179
|
self.stop_pipeline(pipeline_id)
|
|
173
180
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
self.executor.shutdown(wait=True)
|
|
181
|
+
# Shutdown thread pool
|
|
182
|
+
logging.info("🛑 Shutting down thread pool executor...")
|
|
183
|
+
self.executor.shutdown(wait=True)
|
|
177
184
|
logging.info("✅ PipelineManager stopped.")
|
|
@@ -45,7 +45,7 @@ class PipelineProcessor:
|
|
|
45
45
|
self.rtmp_streamer = None
|
|
46
46
|
self.rtmp_streaming_active = False
|
|
47
47
|
self.last_preview_check_time = 0
|
|
48
|
-
self.preview_check_interval =
|
|
48
|
+
self.preview_check_interval = 10.0 # Check every 10 seconds (reduced from 5s to save CPU)
|
|
49
49
|
self.pipeline_repo = WorkerSourcePipelineRepository()
|
|
50
50
|
|
|
51
51
|
self.detection_processor_codes = [
|
|
@@ -67,6 +67,10 @@ class PipelineProcessor:
|
|
|
67
67
|
self.hevc_error_count = 0
|
|
68
68
|
self.last_hevc_recovery = 0
|
|
69
69
|
self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
|
|
70
|
+
|
|
71
|
+
self.base_detection_interval = 1.0 / 3.0
|
|
72
|
+
self.detection_interval = self.base_detection_interval
|
|
73
|
+
self.is_fps_user_configured = False
|
|
70
74
|
|
|
71
75
|
def update_config(self, pipeline):
|
|
72
76
|
"""Updates the pipeline configuration."""
|
|
@@ -122,7 +126,8 @@ class PipelineProcessor:
|
|
|
122
126
|
def _update_config_internal(self):
|
|
123
127
|
self.config_manager.update(self.pipeline_id)
|
|
124
128
|
self.preprocessor.update(self.config_manager)
|
|
125
|
-
self.
|
|
129
|
+
self.base_detection_interval, self.is_fps_user_configured = self._get_detection_interval()
|
|
130
|
+
self.detection_interval = self.base_detection_interval
|
|
126
131
|
self._update_detection_processor()
|
|
127
132
|
|
|
128
133
|
# Reset failure counters on config update
|
|
@@ -154,8 +159,14 @@ class PipelineProcessor:
|
|
|
154
159
|
if initial_frame is None:
|
|
155
160
|
logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
|
|
156
161
|
return
|
|
162
|
+
|
|
163
|
+
# Auto-adjust FPS based on resolution when user hasn't configured FPS
|
|
164
|
+
if not self.is_fps_user_configured:
|
|
165
|
+
self.detection_interval = self._auto_adjust_detection_interval_for_resolution(initial_frame)
|
|
166
|
+
logging.info(f"📊 Pipeline {pipeline_id}: Auto-adjusted FPS to {1.0/self.detection_interval:.1f} based on {initial_frame.shape[1]}x{initial_frame.shape[0]} resolution (no user config)")
|
|
167
|
+
else:
|
|
168
|
+
logging.info(f"📊 Pipeline {pipeline_id}: Using user-configured FPS {1.0/self.detection_interval:.1f} for {initial_frame.shape[1]}x{initial_frame.shape[0]} stream")
|
|
157
169
|
|
|
158
|
-
# Start detection thread
|
|
159
170
|
self.detection_thread = threading.Thread(
|
|
160
171
|
target=self._detection_worker,
|
|
161
172
|
name=f"detection-{pipeline_id}",
|
|
@@ -163,36 +174,44 @@ class PipelineProcessor:
|
|
|
163
174
|
)
|
|
164
175
|
self.detection_thread.start()
|
|
165
176
|
|
|
177
|
+
target_render_fps = 25.0
|
|
178
|
+
target_frame_time = 1.0 / target_render_fps
|
|
179
|
+
|
|
166
180
|
try:
|
|
167
181
|
while self.running:
|
|
182
|
+
loop_start = time.time()
|
|
183
|
+
|
|
168
184
|
frame = video_manager.get_frame(worker_source_id)
|
|
169
185
|
|
|
170
186
|
if frame is None:
|
|
171
187
|
if not self._handle_frame_failure(video_manager, worker_source_id):
|
|
172
188
|
break
|
|
173
189
|
# no frame this tick—just continue (the streamer will repeat last good frame)
|
|
190
|
+
time.sleep(0.04)
|
|
174
191
|
continue
|
|
175
192
|
|
|
176
|
-
# cv2.imshow("AA", frame)
|
|
177
|
-
# cv2.waitKey(1)
|
|
178
|
-
# continue
|
|
179
|
-
|
|
180
193
|
# successful frame
|
|
181
194
|
self.consecutive_frame_failures = 0
|
|
182
195
|
self.last_successful_frame_time = time.time()
|
|
183
196
|
self.frame_counter += 1
|
|
184
197
|
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
self.
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
198
|
+
should_draw = self.rtmp_streaming_active or self.debug_flag or self.video_debugger.is_window_open(pipeline_id)
|
|
199
|
+
|
|
200
|
+
if should_draw:
|
|
201
|
+
# draw annotations
|
|
202
|
+
try:
|
|
203
|
+
self.frame_drawer.draw_polygons(frame)
|
|
204
|
+
frame_to_draw = frame.copy() if self.debug_flag else frame
|
|
205
|
+
drawn_frame = self.frame_drawer.draw_frame(
|
|
206
|
+
frame_to_draw,
|
|
207
|
+
self.tracked_objects_render,
|
|
208
|
+
with_trails=True,
|
|
209
|
+
trail_length=int(max(1, 2 / self.detection_interval))
|
|
210
|
+
)
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logging.error(f"❌ Draw failed, using raw frame: {e}")
|
|
213
|
+
drawn_frame = frame
|
|
214
|
+
else:
|
|
196
215
|
drawn_frame = frame
|
|
197
216
|
|
|
198
217
|
# debug snapshot if requested
|
|
@@ -201,7 +220,7 @@ class PipelineProcessor:
|
|
|
201
220
|
try:
|
|
202
221
|
self.debug_repo.update_debug_entries_by_pipeline_id(
|
|
203
222
|
self.pipeline_id,
|
|
204
|
-
self.frame_drawer.draw_frame(frame
|
|
223
|
+
self.frame_drawer.draw_frame(frame, tracked_objects_render),
|
|
205
224
|
tracked_objects_render
|
|
206
225
|
)
|
|
207
226
|
except Exception as e:
|
|
@@ -260,13 +279,14 @@ class PipelineProcessor:
|
|
|
260
279
|
except queue.Full:
|
|
261
280
|
pass
|
|
262
281
|
|
|
263
|
-
# visualize
|
|
264
282
|
try:
|
|
265
283
|
self.video_debugger.show_frame(pipeline_id, worker_source_id, drawn_frame)
|
|
266
284
|
except Exception as e:
|
|
267
285
|
logging.error(f"⚠️ Failed to render frame for pipeline {pipeline_id}: {e}")
|
|
268
286
|
|
|
269
|
-
time.
|
|
287
|
+
loop_elapsed = time.time() - loop_start
|
|
288
|
+
sleep_time = max(0.001, target_frame_time - loop_elapsed)
|
|
289
|
+
time.sleep(sleep_time)
|
|
270
290
|
|
|
271
291
|
except Exception as e:
|
|
272
292
|
logging.error(f"❌ Error in pipeline {pipeline_id}: {e}", exc_info=True)
|
|
@@ -309,6 +329,12 @@ class PipelineProcessor:
|
|
|
309
329
|
while self.running:
|
|
310
330
|
try:
|
|
311
331
|
frame = self.frame_queue.get(block=True, timeout=1)
|
|
332
|
+
|
|
333
|
+
# Check for poison pill (None = stop signal)
|
|
334
|
+
if frame is None:
|
|
335
|
+
logging.debug(f"Detection worker for {pipeline_id} received stop signal")
|
|
336
|
+
break
|
|
337
|
+
|
|
312
338
|
current_time = time.time()
|
|
313
339
|
|
|
314
340
|
# Update config periodically
|
|
@@ -320,6 +346,9 @@ class PipelineProcessor:
|
|
|
320
346
|
try:
|
|
321
347
|
while True:
|
|
322
348
|
newer = self.frame_queue.get_nowait()
|
|
349
|
+
if newer is None: # Stop signal
|
|
350
|
+
logging.debug(f"Detection worker for {pipeline_id} received stop signal")
|
|
351
|
+
return
|
|
323
352
|
frame = newer
|
|
324
353
|
except queue.Empty:
|
|
325
354
|
pass
|
|
@@ -505,12 +534,23 @@ class PipelineProcessor:
|
|
|
505
534
|
return False
|
|
506
535
|
|
|
507
536
|
def stop(self):
|
|
508
|
-
"""
|
|
537
|
+
"""Stop processor and cleanup resources."""
|
|
509
538
|
if not self.running:
|
|
510
539
|
return
|
|
511
540
|
logging.info("🛑 Stopping PipelineProcessor...")
|
|
512
541
|
self.running = False
|
|
513
542
|
|
|
543
|
+
# Wake up detection thread immediately with poison pill
|
|
544
|
+
try:
|
|
545
|
+
self.frame_queue.put_nowait(None)
|
|
546
|
+
except queue.Full:
|
|
547
|
+
try:
|
|
548
|
+
self.frame_queue.get_nowait()
|
|
549
|
+
self.frame_queue.put_nowait(None)
|
|
550
|
+
except:
|
|
551
|
+
pass
|
|
552
|
+
|
|
553
|
+
# Stop RTMP streamer
|
|
514
554
|
if hasattr(self, 'rtmp_streamer') and self.rtmp_streamer:
|
|
515
555
|
try:
|
|
516
556
|
self.rtmp_streamer.stop_stream()
|
|
@@ -519,6 +559,7 @@ class PipelineProcessor:
|
|
|
519
559
|
finally:
|
|
520
560
|
self.rtmp_streamer = None
|
|
521
561
|
|
|
562
|
+
# Clear frame queue
|
|
522
563
|
try:
|
|
523
564
|
while True:
|
|
524
565
|
try:
|
|
@@ -528,32 +569,103 @@ class PipelineProcessor:
|
|
|
528
569
|
except Exception as e:
|
|
529
570
|
logging.error(f"Error clearing frame queue: {e}")
|
|
530
571
|
|
|
572
|
+
# Wait for detection thread (should exit quickly with poison pill)
|
|
531
573
|
if self.detection_thread and self.detection_thread.is_alive():
|
|
532
574
|
try:
|
|
533
|
-
self.detection_thread.join(timeout=
|
|
575
|
+
self.detection_thread.join(timeout=1.0)
|
|
534
576
|
if self.detection_thread.is_alive():
|
|
535
|
-
logging.warning("Detection thread did not terminate cleanly")
|
|
577
|
+
logging.warning("Detection thread did not terminate cleanly within 1s")
|
|
536
578
|
except Exception as e:
|
|
537
579
|
logging.error(f"Error joining detection thread: {e}")
|
|
538
580
|
finally:
|
|
539
581
|
self.detection_thread = None
|
|
540
582
|
|
|
541
|
-
|
|
583
|
+
# Unload detector immediately to free GPU memory
|
|
584
|
+
if hasattr(self, 'detector') and self.detector:
|
|
585
|
+
try:
|
|
586
|
+
if hasattr(self.detector, 'model') and self.detector.model:
|
|
587
|
+
if hasattr(self.detector.model, 'cpu'):
|
|
588
|
+
self.detector.model.cpu()
|
|
589
|
+
self.detector.model = None
|
|
590
|
+
except Exception as e:
|
|
591
|
+
logging.debug(f"Error clearing detector: {e}")
|
|
542
592
|
|
|
593
|
+
# Clear tracking data
|
|
594
|
+
if hasattr(self, 'tracked_objects_render'):
|
|
595
|
+
self.tracked_objects_render.clear()
|
|
596
|
+
|
|
597
|
+
# Clear tracker state
|
|
598
|
+
if hasattr(self, 'tracker_manager') and self.tracker_manager:
|
|
599
|
+
try:
|
|
600
|
+
if hasattr(self.tracker_manager, 'track_uuid_map'):
|
|
601
|
+
self.tracker_manager.track_uuid_map.clear()
|
|
602
|
+
if hasattr(self.tracker_manager, 'track_count_map'):
|
|
603
|
+
self.tracker_manager.track_count_map.clear()
|
|
604
|
+
if hasattr(self.tracker_manager, 'track_attributes_presence'):
|
|
605
|
+
self.tracker_manager.track_attributes_presence.clear()
|
|
606
|
+
if hasattr(self.tracker_manager, 'track_last_seen'):
|
|
607
|
+
self.tracker_manager.track_last_seen.clear()
|
|
608
|
+
except Exception as e:
|
|
609
|
+
logging.error(f"Error clearing tracker state: {e}")
|
|
610
|
+
|
|
611
|
+
# Close debugger
|
|
543
612
|
try:
|
|
544
613
|
if hasattr(self, 'video_debugger'):
|
|
545
614
|
self.video_debugger.close_all()
|
|
546
615
|
except Exception as e:
|
|
547
616
|
logging.error(f"Error closing video debugger: {e}")
|
|
617
|
+
|
|
618
|
+
# Force garbage collection
|
|
619
|
+
import gc
|
|
620
|
+
gc.collect()
|
|
548
621
|
|
|
549
622
|
logging.info("✅ PipelineProcessor stopped successfully")
|
|
550
623
|
|
|
551
624
|
def _get_detection_interval(self):
|
|
625
|
+
"""Returns (interval, is_user_configured) tuple."""
|
|
552
626
|
config = self.config_manager.get_feature_config("processing_speed")
|
|
553
|
-
fps = config.get("decimal",
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
627
|
+
fps = config.get("decimal", None)
|
|
628
|
+
|
|
629
|
+
# Check if user explicitly configured FPS
|
|
630
|
+
is_user_configured = fps is not None
|
|
631
|
+
|
|
632
|
+
if fps is None or fps <= 0:
|
|
633
|
+
fps = 3.0 # default 3 fps when not configured
|
|
634
|
+
|
|
635
|
+
return 1.0 / fps, is_user_configured
|
|
636
|
+
|
|
637
|
+
def _auto_adjust_detection_interval_for_resolution(self, frame):
|
|
638
|
+
"""
|
|
639
|
+
Auto-adjust detection interval based on frame resolution.
|
|
640
|
+
Automatically applies when user hasn't configured processing_speed FPS.
|
|
641
|
+
User-configured FPS always takes precedence.
|
|
642
|
+
"""
|
|
643
|
+
if frame is None:
|
|
644
|
+
return self.base_detection_interval
|
|
645
|
+
|
|
646
|
+
height, width = frame.shape[:2]
|
|
647
|
+
total_pixels = height * width
|
|
648
|
+
|
|
649
|
+
# Define resolution tiers
|
|
650
|
+
PIXELS_4K = 3840 * 2160 # ~8.3M pixels
|
|
651
|
+
PIXELS_2K = 2560 * 1440 # ~3.7M pixels
|
|
652
|
+
|
|
653
|
+
base_interval = self.base_detection_interval
|
|
654
|
+
|
|
655
|
+
# Adjust based on resolution
|
|
656
|
+
if total_pixels >= PIXELS_4K:
|
|
657
|
+
# 4K: Reduce FPS by 50% (double the interval)
|
|
658
|
+
adjusted_interval = base_interval * 2.0
|
|
659
|
+
logging.debug(f"📊 4K stream detected ({width}x{height}): FPS reduced to {1.0/adjusted_interval:.1f}")
|
|
660
|
+
elif total_pixels >= PIXELS_2K:
|
|
661
|
+
# 2K: Reduce FPS by 25% (increase interval by 1.33x)
|
|
662
|
+
adjusted_interval = base_interval * 1.33
|
|
663
|
+
logging.debug(f"📊 2K stream detected ({width}x{height}): FPS reduced to {1.0/adjusted_interval:.1f}")
|
|
664
|
+
else:
|
|
665
|
+
# 1080p and below: Use full configured FPS
|
|
666
|
+
adjusted_interval = base_interval
|
|
667
|
+
|
|
668
|
+
return adjusted_interval
|
|
557
669
|
|
|
558
670
|
def enable_debug(self):
|
|
559
671
|
self.debug_flag = True
|
|
@@ -13,7 +13,7 @@ from ..streams.VideoStreamManager import VideoStreamManager
|
|
|
13
13
|
class PipelineSyncThread(threading.Thread):
|
|
14
14
|
"""Thread responsible for synchronizing worker source pipelines from the database in real-time."""
|
|
15
15
|
|
|
16
|
-
def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=
|
|
16
|
+
def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=None):
|
|
17
17
|
super().__init__(daemon=True) # Runs as a daemon
|
|
18
18
|
self.video_manager = video_manager
|
|
19
19
|
self.polling_interval = polling_interval
|
{nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nedo-vision-worker-core
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.9
|
|
4
4
|
Summary: Nedo Vision Worker Core Library for AI Vision Processing
|
|
5
5
|
Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
6
6
|
Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
@@ -30,7 +30,7 @@ Classifier: Environment :: No Input/Output (Daemon)
|
|
|
30
30
|
Requires-Python: >=3.8
|
|
31
31
|
Description-Content-Type: text/markdown
|
|
32
32
|
Requires-Dist: alembic>=1.8.0
|
|
33
|
-
Requires-Dist: numpy
|
|
33
|
+
Requires-Dist: numpy<2.0.0,>=1.21.0
|
|
34
34
|
Requires-Dist: pillow>=8.0.0
|
|
35
35
|
Requires-Dist: psutil>=5.9.0
|
|
36
36
|
Requires-Dist: scipy>=1.9.0
|
|
@@ -40,8 +40,6 @@ Requires-Dist: torch>=1.9.0
|
|
|
40
40
|
Requires-Dist: torchvision>=0.10.0
|
|
41
41
|
Requires-Dist: ultralytics>=8.0.0
|
|
42
42
|
Requires-Dist: rfdetr<2.0.0,>=1.2.0
|
|
43
|
-
Provides-Extra: opencv
|
|
44
|
-
Requires-Dist: opencv-python>=4.6.0; extra == "opencv"
|
|
45
43
|
Provides-Extra: dev
|
|
46
44
|
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
47
45
|
Requires-Dist: black>=22.0.0; extra == "dev"
|
|
@@ -49,6 +47,8 @@ Requires-Dist: isort>=5.10.0; extra == "dev"
|
|
|
49
47
|
Requires-Dist: mypy>=0.950; extra == "dev"
|
|
50
48
|
Requires-Dist: flake8>=4.0.0; extra == "dev"
|
|
51
49
|
Requires-Dist: pre-commit>=2.17.0; extra == "dev"
|
|
50
|
+
Provides-Extra: opencv
|
|
51
|
+
Requires-Dist: opencv-python<5.0.0,>=4.6.0; extra == "opencv"
|
|
52
52
|
|
|
53
53
|
# Nedo Vision Worker Core
|
|
54
54
|
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
nedo_vision_worker_core/__init__.py,sha256=
|
|
1
|
+
nedo_vision_worker_core/__init__.py,sha256=QryRnDxJa6xTUFbrfTu0WKF5JjXqKErB4K1nEZVLgjQ,1924
|
|
2
2
|
nedo_vision_worker_core/cli.py,sha256=8YuKWsIgICUYXE_QtwyU3WzGhVjTWiAo5uzpFOmjNc8,5766
|
|
3
|
-
nedo_vision_worker_core/core_service.py,sha256=
|
|
3
|
+
nedo_vision_worker_core/core_service.py,sha256=q8-GuGW_l5l6wTWQDqc7BDdhM7zKC-mMLZ5wIHu9xV0,11628
|
|
4
4
|
nedo_vision_worker_core/doctor.py,sha256=K_-hVV2-mdEefZ4Cfu5hMCiOxBiI1aXY8VtkkpK80Lc,10651
|
|
5
5
|
nedo_vision_worker_core/ai/FrameDrawer.py,sha256=lj83WFaE70BQfkEc6AHcMBXaiEm8l3s_zJZG9C0NkAs,5286
|
|
6
6
|
nedo_vision_worker_core/ai/ImageDebugger.py,sha256=5FwgNGZrxO2eT7hxdxp7N2gQ0oyyYDZChJ3PJapKu-w,4612
|
|
7
|
-
nedo_vision_worker_core/ai/VideoDebugger.py,sha256=
|
|
7
|
+
nedo_vision_worker_core/ai/VideoDebugger.py,sha256=NQQ8O9gMR0CLmJ8gQ1N3F32qaQWTfbbgH-sb9pNC-Ng,3437
|
|
8
8
|
nedo_vision_worker_core/ai/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
9
9
|
nedo_vision_worker_core/callbacks/DetectionCallbackManager.py,sha256=Ogoco3JD_o5IMccruau1ly69bDWnsckJyVtzCw259JQ,13379
|
|
10
10
|
nedo_vision_worker_core/callbacks/DetectionCallbackTypes.py,sha256=U7Qb0dCMtOHuZi_HNjapKjPqsCNM9ucHQosjHk9vPJ8,5057
|
|
@@ -14,7 +14,7 @@ nedo_vision_worker_core/config/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrF
|
|
|
14
14
|
nedo_vision_worker_core/database/DatabaseManager.py,sha256=EDSz6auDx3i-DofHJBZdcEWyDHXqCwFB54WTBu9ExME,10314
|
|
15
15
|
nedo_vision_worker_core/database/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
16
16
|
nedo_vision_worker_core/detection/BaseDetector.py,sha256=bReQCTy4tEA1itvw3kkjBMQnx3Jn4MjnDkzdtPwmNPQ,757
|
|
17
|
-
nedo_vision_worker_core/detection/RFDETRDetector.py,sha256=
|
|
17
|
+
nedo_vision_worker_core/detection/RFDETRDetector.py,sha256=nimnCELy3FLi-6VRinw_id1pAk2jMDKrTfKINSM200k,4602
|
|
18
18
|
nedo_vision_worker_core/detection/YOLODetector.py,sha256=oMCW4KanQCDbtz-ttMiCY5lP1rIgVH_LpfvZvi270j0,2290
|
|
19
19
|
nedo_vision_worker_core/detection/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
20
20
|
nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py,sha256=wqf4hliR_CPkeoeRbBB3PEpQsmasC4mASJ4WyYjNyPE,948
|
|
@@ -57,12 +57,12 @@ nedo_vision_worker_core/models/worker_source_pipeline.py,sha256=CGA_nz5wywsJcBPm
|
|
|
57
57
|
nedo_vision_worker_core/models/worker_source_pipeline_config.py,sha256=dGYTpcTFFu6pmGBufuWBHjv3Xs4RGAQwZn6jp6Ondvs,876
|
|
58
58
|
nedo_vision_worker_core/models/worker_source_pipeline_debug.py,sha256=6S7TkN37FrAT4VwsEB38DWSad7QfvNhsOGtSEK8D1Qs,594
|
|
59
59
|
nedo_vision_worker_core/models/worker_source_pipeline_detection.py,sha256=p6CJsiVCKprTYrNxJsiTB8njXdHkjZKVEyBceRVE6fY,560
|
|
60
|
-
nedo_vision_worker_core/pipeline/ModelManager.py,sha256=
|
|
60
|
+
nedo_vision_worker_core/pipeline/ModelManager.py,sha256=2DoQiIdF-PAqU7nT_u6bj-DY0aT2FHb8kt24okGGCRc,7449
|
|
61
61
|
nedo_vision_worker_core/pipeline/PipelineConfigManager.py,sha256=X55i9GyXcW9ylO6cj2UMAZFSxxPViacL4H4DZl60CAY,1157
|
|
62
|
-
nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=
|
|
62
|
+
nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=3I9UBJu_rRfTEctwj8i4hO4MHjpBtYpfh-rIi64qgEw,7638
|
|
63
63
|
nedo_vision_worker_core/pipeline/PipelinePrepocessor.py,sha256=cCiVSHHqsKCtKYURdYoEjHJX2GnT6zd8kQ6ZukjQ3V0,1271
|
|
64
|
-
nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=
|
|
65
|
-
nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=
|
|
64
|
+
nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=CyuMRsddb-86p8HF-pOgeNnCtH4o-je0apYo-qEpwJk,34656
|
|
65
|
+
nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=HkW6wj0eDr6M1K3Y25IlB2V6tpIZsKA34AM49AXvcQk,8707
|
|
66
66
|
nedo_vision_worker_core/pipeline/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
67
67
|
nedo_vision_worker_core/preprocessing/ImageResizer.py,sha256=RvOazxe6dJQuiy0ZH4lIGbdFfiu0FLUVCHoMvxkDNT4,1324
|
|
68
68
|
nedo_vision_worker_core/preprocessing/ImageRoi.py,sha256=iO7oQ-SdUSA_kTIVBuq_mdycXsiJNfiFD3J7-VTxiQ4,2141
|
|
@@ -98,8 +98,8 @@ nedo_vision_worker_core/util/PipelinePreviewChecker.py,sha256=XxlSMlrDlRrzfV8_Y-
|
|
|
98
98
|
nedo_vision_worker_core/util/PlatformDetector.py,sha256=GGL8UfeMQITR22EMYIRWnuOEnSqo7Dr5mb0PaFrl8AM,3006
|
|
99
99
|
nedo_vision_worker_core/util/TablePrinter.py,sha256=wzLGgb1GFMeIbAP6HmKcZD33j4D-IlyqlyeR7C5yD7w,1137
|
|
100
100
|
nedo_vision_worker_core/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
101
|
-
nedo_vision_worker_core-0.3.
|
|
102
|
-
nedo_vision_worker_core-0.3.
|
|
103
|
-
nedo_vision_worker_core-0.3.
|
|
104
|
-
nedo_vision_worker_core-0.3.
|
|
105
|
-
nedo_vision_worker_core-0.3.
|
|
101
|
+
nedo_vision_worker_core-0.3.9.dist-info/METADATA,sha256=VLdUQm-NwmGPkccYFQCDrEXrLnJW8x4WemX-39gTs5U,14426
|
|
102
|
+
nedo_vision_worker_core-0.3.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
103
|
+
nedo_vision_worker_core-0.3.9.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
|
|
104
|
+
nedo_vision_worker_core-0.3.9.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
|
|
105
|
+
nedo_vision_worker_core-0.3.9.dist-info/RECORD,,
|
|
File without changes
|
{nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{nedo_vision_worker_core-0.3.7.dist-info → nedo_vision_worker_core-0.3.9.dist-info}/top_level.txt
RENAMED
|
File without changes
|