nedo-vision-worker-core 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.3.6"
10
+ __version__ = "0.3.8"
11
11
  __all__ = [
12
12
  "CoreService",
13
13
  "DetectionType",
@@ -2,68 +2,79 @@ import cv2
2
2
  import threading
3
3
  import time
4
4
  from collections import defaultdict
5
+ import logging
5
6
 
6
- # TODO: fix timer error (because of threading)
7
7
  class VideoDebugger:
8
- """Handles real-time visualization of video streams with object detections."""
8
+ """Real-time visualization of video streams with detections."""
9
9
 
10
10
  def __init__(self, enable_visualization=True):
11
- """
12
- Initializes the VideoDebugger with frame drawing and visualization capabilities.
13
-
14
- Args:
15
- enable_visualization (bool): Whether to display frames.
16
- """
17
11
  self.enable_visualization = enable_visualization
18
- self.windows = {} # Tracks OpenCV windows
19
- self.lock = threading.Lock() # Thread-safe updates
12
+ self.windows = {}
13
+ self.lock = threading.Lock()
20
14
  self.fps_tracker = defaultdict(lambda: {"start_time": time.time(), "frame_count": 0})
15
+ self._cv_lock = threading.Lock() # Prevent OpenCV segfaults
21
16
 
22
17
  def show_frame(self, pipeline_id, worker_source_id, frame):
23
- """
24
- Displays a frame with FPS overlay.
25
-
26
- Args:
27
- pipeline_id (str/int): Identifier for the pipeline.
28
- worker_source_id (str): Identifier for the worker/source.
29
- frame: The frame to display.
30
- """
18
+ """Display frame with FPS overlay."""
31
19
  if not self.enable_visualization or frame is None:
32
20
  return
33
21
 
34
22
  window_name = f"Pipeline {pipeline_id} - {worker_source_id}"
35
- with self.lock:
36
- if window_name not in self.fps_tracker:
37
- self.fps_tracker[window_name] = {"start_time": time.time(), "frame_count": 0}
23
+
24
+ try:
25
+ with self.lock:
26
+ if window_name not in self.fps_tracker:
27
+ self.fps_tracker[window_name] = {"start_time": time.time(), "frame_count": 0}
38
28
 
39
- self.fps_tracker[window_name]["frame_count"] += 1
40
- elapsed_time = time.time() - self.fps_tracker[window_name]["start_time"]
41
- fps = self.fps_tracker[window_name]["frame_count"] / max(elapsed_time, 1e-5)
29
+ self.fps_tracker[window_name]["frame_count"] += 1
30
+ elapsed_time = time.time() - self.fps_tracker[window_name]["start_time"]
31
+ fps = self.fps_tracker[window_name]["frame_count"] / max(elapsed_time, 1e-5)
42
32
 
43
- # Display FPS on the frame
44
- cv2.putText(frame, f"FPS: {fps:.2f}", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
33
+ cv2.putText(frame, f"FPS: {fps:.2f}", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
45
34
 
46
- # Show the window
47
- if window_name not in self.windows:
48
- self.windows[window_name] = True # Register window
35
+ if window_name not in self.windows:
36
+ self.windows[window_name] = True
49
37
 
50
- cv2.imshow(window_name, frame)
51
-
52
- # Close on 'q' press
53
- if cv2.waitKey(1) & 0xFF == ord('q'):
54
- self.close_window(window_name)
38
+ # Serialize cv2 calls to prevent segfaults
39
+ with self._cv_lock:
40
+ try:
41
+ cv2.imshow(window_name, frame)
42
+ key = cv2.waitKey(1) & 0xFF
43
+
44
+ if key == ord('q'):
45
+ self.close_window(window_name)
46
+ except Exception as e:
47
+ logging.error(f"Error displaying frame for {window_name}: {e}")
48
+
49
+ except Exception as e:
50
+ logging.error(f"Error in show_frame for {window_name}: {e}")
55
51
 
56
52
  def close_window(self, window_name):
57
- """Closes a specific OpenCV window."""
53
+ """Close specific window."""
58
54
  with self.lock:
59
55
  if window_name in self.windows:
60
- cv2.destroyWindow(window_name)
56
+ with self._cv_lock:
57
+ try:
58
+ cv2.destroyWindow(window_name)
59
+ except Exception as e:
60
+ logging.error(f"Error closing window {window_name}: {e}")
61
61
  del self.windows[window_name]
62
62
 
63
63
  def close_all(self):
64
- """Closes all OpenCV windows."""
64
+ """Close all windows."""
65
+ with self.lock:
66
+ window_list = list(self.windows.keys())
67
+
68
+ with self._cv_lock:
69
+ try:
70
+ for window in window_list:
71
+ try:
72
+ cv2.destroyWindow(window)
73
+ except Exception as e:
74
+ logging.debug(f"Error destroying window {window}: {e}")
75
+ cv2.waitKey(1)
76
+ except Exception as e:
77
+ logging.error(f"Error in close_all: {e}")
78
+
65
79
  with self.lock:
66
- for window in list(self.windows.keys()):
67
- cv2.destroyWindow(window)
68
80
  self.windows.clear()
69
- cv2.waitKey(1)
@@ -32,7 +32,8 @@ class CoreService:
32
32
  log_level: str = "INFO",
33
33
  storage_path: str = "data",
34
34
  rtmp_server: str = "rtmp://live.vision.sindika.co.id:1935/live",
35
- enable_video_sharing_daemon: bool = True):
35
+ enable_video_sharing_daemon: bool = True,
36
+ max_pipeline_workers: int = None):
36
37
  """
37
38
  Initialize the Core Service.
38
39
 
@@ -42,12 +43,14 @@ class CoreService:
42
43
  storage_path: Storage path for databases and files (default: data)
43
44
  rtmp_server: RTMP server URL for video streaming (default: rtmp://localhost:1935/live)
44
45
  enable_video_sharing_daemon: Enable automatic video sharing daemon management (default: True)
46
+ max_pipeline_workers: Maximum concurrent pipeline workers (default: auto-detect based on CPU cores)
45
47
  """
46
48
  self.running = True
47
49
  self.video_manager = None
48
50
  self.stream_sync_thread = None
49
51
  self.pipeline_sync_thread = None
50
52
  self.enable_video_sharing_daemon = enable_video_sharing_daemon
53
+ self.max_pipeline_workers = max_pipeline_workers
51
54
 
52
55
  # Initialize callback manager if not already done
53
56
  if CoreService._callback_manager is None:
@@ -230,7 +233,10 @@ class CoreService:
230
233
  self.stream_sync_thread.start()
231
234
 
232
235
  # Start pipeline synchronization thread (AI processing)
233
- self.pipeline_sync_thread = PipelineSyncThread(self.video_manager)
236
+ self.pipeline_sync_thread = PipelineSyncThread(
237
+ self.video_manager,
238
+ max_workers=self.max_pipeline_workers
239
+ )
234
240
  self.pipeline_sync_thread.start()
235
241
 
236
242
  logging.info("✅ Nedo Vision Core initialized and running.")
@@ -1,11 +1,11 @@
1
1
  import cv2
2
2
  import logging
3
3
  try:
4
- from rfdetr import RFDETRBase
4
+ from rfdetr import RFDETRNano, RFDETRSmall, RFDETRMedium, RFDETRBase, RFDETRLarge
5
5
  RFDETR_AVAILABLE = True
6
6
  except ImportError:
7
7
  RFDETR_AVAILABLE = False
8
- RFDETRBase = None
8
+ RFDETRNano = RFDETRSmall = RFDETRMedium = RFDETRBase = RFDETRLarge = None
9
9
 
10
10
  from ..database.DatabaseManager import DatabaseManager
11
11
  from ..models.ai_model import AIModelEntity
@@ -32,6 +32,32 @@ class RFDETRDetector(BaseDetector):
32
32
  if model:
33
33
  self.load_model(model)
34
34
 
35
+ def _detect_model_variant(self, model_path: str):
36
+ """
37
+ Automatically detect the correct RF-DETR variant by trying to load the weights.
38
+ Returns the appropriate RF-DETR class or None if all attempts fail.
39
+ """
40
+ variants = [
41
+ ("Nano", RFDETRNano),
42
+ ("Small", RFDETRSmall),
43
+ ("Medium", RFDETRMedium),
44
+ ("Base", RFDETRBase),
45
+ ("Large", RFDETRLarge)
46
+ ]
47
+
48
+ for variant_name, variant_class in variants:
49
+ try:
50
+ logging.info(f"🔍 Trying RF-DETR {variant_name} variant...")
51
+ temp_model = variant_class(pretrain_weights=model_path)
52
+ logging.info(f"✅ Successfully loaded RF-DETR {variant_name} variant")
53
+ return temp_model, variant_name
54
+ except Exception as e:
55
+ # Only log at debug level to avoid cluttering logs
56
+ logging.debug(f"RF-DETR {variant_name} variant failed: {e}")
57
+ continue
58
+
59
+ return None, None
60
+
35
61
  def load_model(self, model: AIModelEntity):
36
62
  if not isinstance(model, AIModelEntity):
37
63
  raise TypeError("model must be an instance of AIModelEntity")
@@ -44,8 +70,16 @@ class RFDETRDetector(BaseDetector):
44
70
  return False
45
71
 
46
72
  try:
47
- self.model = RFDETRBase(pretrain_weights=path.as_posix())
73
+ loaded_model, variant_name = self._detect_model_variant(path.as_posix())
74
+
75
+ if loaded_model is None:
76
+ logging.error(f"❌ Could not load model with any RF-DETR variant")
77
+ self.model = None
78
+ return False
79
+
80
+ self.model = loaded_model
48
81
  self.model.optimize_for_inference()
82
+ logging.info(f"✅ Loaded {model.name} using RF-DETR {variant_name}")
49
83
  return True
50
84
  except Exception as e:
51
85
  logging.error(f"❌ Error loading RFDETR model {model.name}: {e}")
@@ -1,4 +1,4 @@
1
- from sqlalchemy import Column, String
1
+ from sqlalchemy import Column, String, DateTime
2
2
  from sqlalchemy.orm import relationship
3
3
  from ..database.DatabaseManager import Base
4
4
 
@@ -13,6 +13,7 @@ class WorkerSourcePipelineEntity(Base):
13
13
  ai_model_id = Column(String, nullable=True)
14
14
  pipeline_status_code = Column(String, nullable=False)
15
15
  location_name = Column(String, nullable=True)
16
+ last_preview_request_at = Column(DateTime, nullable=True)
16
17
 
17
18
  worker_source_pipeline_configs = relationship(
18
19
  "WorkerSourcePipelineConfigEntity",
@@ -122,16 +122,51 @@ class ModelManager:
122
122
  return False
123
123
 
124
124
  def sync_cache(self, active_model_ids: Set[str]):
125
- """
126
- Removes detectors from the cache if their corresponding models are no longer in the database
127
- or are not being used by any active pipeline.
128
- """
125
+ """Remove unused detectors from cache."""
129
126
  cached_ids = set(self._detector_cache.keys())
130
127
  stale_ids = cached_ids - active_model_ids
131
128
 
132
129
  for model_id in stale_ids:
133
- del self._detector_cache[model_id]
130
+ detector = self._detector_cache.pop(model_id, None)
131
+ if detector:
132
+ self._cleanup_detector(detector)
134
133
  logging.info(f"🧹 Removed unused detector for model {model_id} from cache.")
134
+
135
+ def _cleanup_detector(self, detector: BaseDetector):
136
+ """Free detector resources and GPU memory."""
137
+ try:
138
+ if hasattr(detector, 'model') and detector.model is not None:
139
+ # Move model to CPU if possible
140
+ if hasattr(detector.model, 'cpu'):
141
+ try:
142
+ detector.model.cpu()
143
+ except Exception as e:
144
+ logging.debug(f"Error moving model to CPU: {e}")
145
+
146
+ if hasattr(detector.model, 'eval'):
147
+ try:
148
+ detector.model.eval()
149
+ except Exception:
150
+ pass
151
+
152
+ detector.model = None
153
+
154
+ detector.metadata = None
155
+
156
+ # Force garbage collection and clear GPU cache
157
+ import gc
158
+ gc.collect()
159
+
160
+ try:
161
+ import torch
162
+ if torch.cuda.is_available():
163
+ torch.cuda.empty_cache()
164
+ logging.debug("🧹 GPU cache cleared")
165
+ except ImportError:
166
+ pass
167
+
168
+ except Exception as e:
169
+ logging.error(f"Error cleaning up detector: {e}")
135
170
 
136
171
  def clear_cache(self):
137
172
  """Clears the detector cache."""
@@ -1,30 +1,40 @@
1
1
  import logging
2
+ import time
2
3
  import threading
3
4
  from typing import Dict
5
+ from concurrent.futures import ThreadPoolExecutor
4
6
  from .PipelineProcessor import PipelineProcessor
5
7
  from ..streams.VideoStreamManager import VideoStreamManager
6
8
 
7
9
  class PipelineManager:
8
- """Manages AI pipeline execution and video stream processing."""
9
-
10
- def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=50):
10
+ """Manages AI pipeline execution with thread pooling for scalability."""
11
+
12
+ def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=None):
13
+ # Auto-detect optimal worker count if not specified
14
+ if max_workers is None:
15
+ import os
16
+ cpu_count = os.cpu_count() or 4
17
+ # Reserve 2 cores for system/video streams, use rest for pipelines
18
+ max_workers = max(4, cpu_count - 2)
19
+
11
20
  self.max_workers = max_workers
12
- self.pipeline_threads = {} # Stores Thread objects {pipeline_id: Thread}
13
- self.pipeline_metadata = {} # Stores actual pipeline data {pipeline_id: metadata}
14
- self.video_manager = video_manager # Manages video streams
15
- self.processors: Dict[str, PipelineProcessor] = {} # Stores PipelineProcessor instances per pipeline
21
+ self.executor = ThreadPoolExecutor(
22
+ max_workers=max_workers,
23
+ thread_name_prefix="pipeline-worker"
24
+ )
25
+ self.pipeline_futures = {} # {pipeline_id: Future}
26
+ self.pipeline_metadata = {} # {pipeline_id: metadata}
27
+ self.video_manager = video_manager
28
+ self.processors: Dict[str, PipelineProcessor] = {} # {pipeline_id: PipelineProcessor}
16
29
  self.running = True
17
- self._stopping_pipelines = set() # Track pipelines being stopped
18
- self._stop_lock = threading.Lock() # Lock for thread-safe pipeline stopping
30
+ self._stopping_pipelines = set()
31
+ self._stop_lock = threading.Lock()
19
32
  self.on_pipeline_stopped = on_pipeline_stopped
33
+
34
+ logging.info(f"🚀 PipelineManager initialized with {max_workers} worker threads")
20
35
 
21
36
  def start_pipeline(self, pipeline, detector):
22
- """
23
- Start a pipeline processing.
24
- Args:
25
- pipeline: The pipeline object (contains id, worker_source_id, name, etc.)
26
- detector: The detector instance to use for processing.
27
- """
37
+ """Start a pipeline processing."""
28
38
  pipeline_id = pipeline.id
29
39
  worker_source_id = pipeline.worker_source_id
30
40
 
@@ -38,64 +48,67 @@ class PipelineManager:
38
48
 
39
49
  logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
40
50
 
41
- # Acquire the video stream (starts it if not already running)
51
+ # Acquire video stream
42
52
  if not self.video_manager.acquire_stream(worker_source_id, pipeline_id):
43
53
  logging.error(f"❌ Failed to acquire stream {worker_source_id} for pipeline {pipeline_id}")
44
54
  return
45
55
 
46
56
  processor = PipelineProcessor(pipeline, detector, False)
47
57
  processor.frame_drawer.location_name = pipeline.location_name
48
- self.processors[pipeline_id] = processor # Store processor instance
58
+ self.processors[pipeline_id] = processor
49
59
 
50
- active_count = len([t for t in self.pipeline_threads.values() if t.is_alive()])
51
- logging.info(f"📋 Starting pipeline {pipeline_id} thread (active threads: {active_count})")
60
+ active_count = len([f for f in self.pipeline_futures.values() if not f.done()])
61
+ logging.info(f"📋 Submitting pipeline {pipeline_id} to thread pool (active: {active_count}/{self.max_workers})")
52
62
 
53
63
  try:
54
- # Wrap the execution to catch any early errors
55
- def _safe_process_pipeline():
56
- try:
57
- logging.info(f"🏁 Pipeline {pipeline_id} thread execution beginning...")
58
- processor.process_pipeline(self.video_manager)
59
- except Exception as e:
60
- logging.error(f"❌ Unhandled error in pipeline {pipeline_id} thread: {e}", exc_info=True)
61
- finally:
62
- # Ensure cleanup callback is called
63
- self._handle_pipeline_completion(pipeline_id)
64
-
65
- # Create and start thread directly
66
- thread = threading.Thread(
67
- target=_safe_process_pipeline,
68
- name=f"pipeline-{pipeline_id[:8]}",
69
- daemon=True
64
+ # Submit to thread pool instead of creating dedicated thread
65
+ future = self.executor.submit(
66
+ self._pipeline_worker,
67
+ pipeline_id,
68
+ processor
70
69
  )
71
70
 
72
- self.pipeline_threads[pipeline_id] = thread
71
+ # Add completion callback
72
+ future.add_done_callback(lambda f: self._handle_pipeline_completion(pipeline_id, f))
73
+
74
+ self.pipeline_futures[pipeline_id] = future
73
75
  self.pipeline_metadata[pipeline_id] = pipeline
74
76
 
75
- logging.info(f"⚙️ Starting thread for pipeline {pipeline_id}")
76
- thread.start()
77
- logging.info(f"✅ Pipeline {pipeline_id} thread started successfully")
77
+ logging.info(f" Pipeline {pipeline_id} submitted to thread pool")
78
78
 
79
79
  except Exception as e:
80
- logging.error(f"❌ Failed to start pipeline {pipeline_id} thread: {e}", exc_info=True)
81
- # Clean up on failure
80
+ logging.error(f"❌ Failed to submit pipeline {pipeline_id} to thread pool: {e}", exc_info=True)
82
81
  self.processors.pop(pipeline_id, None)
83
82
  self.video_manager.release_stream(worker_source_id, pipeline_id)
84
83
  raise
84
+
85
+ def _pipeline_worker(self, pipeline_id: str, processor: PipelineProcessor):
86
+ """Worker function executed in thread pool."""
87
+ try:
88
+ logging.info(f"🏁 Pipeline {pipeline_id} worker starting...")
89
+ processor.process_pipeline(self.video_manager)
90
+ except Exception as e:
91
+ logging.error(f"❌ Unhandled error in pipeline {pipeline_id} worker: {e}", exc_info=True)
92
+ finally:
93
+ logging.info(f"🏁 Pipeline {pipeline_id} worker finished")
85
94
 
86
- def _handle_pipeline_completion(self, pipeline_id: str):
87
- """
88
- Handles cleanup when a pipeline finishes processing.
89
- """
95
+ def _handle_pipeline_completion(self, pipeline_id: str, future=None):
96
+ """Handle cleanup when pipeline finishes."""
90
97
  with self._stop_lock:
91
98
  if pipeline_id in self._stopping_pipelines:
92
- return # If it's already being stopped manually, don't trigger again
99
+ return
93
100
 
94
101
  try:
95
102
  logging.info(f"🏁 Pipeline {pipeline_id} completed execution")
103
+
104
+ # Log any exception from the future
105
+ if future and not future.cancelled():
106
+ try:
107
+ future.result(timeout=0)
108
+ except Exception as e:
109
+ logging.error(f"Pipeline {pipeline_id} ended with exception: {e}")
96
110
  except Exception as e:
97
111
  logging.error(f"⚠️ Error in handling pipeline {pipeline_id} completion: {e}")
98
-
99
112
  finally:
100
113
  self.on_pipeline_stopped(pipeline_id)
101
114
 
@@ -108,28 +121,29 @@ class PipelineManager:
108
121
  self._stopping_pipelines.add(pipeline_id)
109
122
 
110
123
  try:
111
- # Get worker_source_id before removing metadata
112
124
  pipeline = self.pipeline_metadata.get(pipeline_id)
113
125
  worker_source_id = pipeline.worker_source_id if pipeline else None
114
126
 
115
- # Stop AI processing
127
+ # Stop processor first to signal threads
116
128
  processor = self.processors.pop(pipeline_id, None)
117
129
  if processor:
118
130
  processor.stop()
119
131
 
120
- # Stop execution thread (thread will terminate naturally)
121
- thread = self.pipeline_threads.pop(pipeline_id, None)
122
- if thread and thread.is_alive():
123
- # Thread is daemon, will stop when processor.running becomes False
124
- logging.debug(f"Waiting for pipeline {pipeline_id} thread to terminate...")
125
- thread.join(timeout=5.0)
126
- if thread.is_alive():
127
- logging.warning(f"Pipeline {pipeline_id} thread did not terminate cleanly")
132
+ # Cancel future if still pending/running
133
+ future = self.pipeline_futures.pop(pipeline_id, None)
134
+ if future and not future.done():
135
+ logging.debug(f"Cancelling future for pipeline {pipeline_id}")
136
+ future.cancel()
137
+
138
+ # Wait briefly for graceful shutdown
139
+ try:
140
+ future.result(timeout=1.0)
141
+ except Exception as e:
142
+ logging.debug(f"Pipeline {pipeline_id} future ended: {e}")
128
143
 
129
- # Remove metadata
130
144
  self.pipeline_metadata.pop(pipeline_id, None)
131
145
 
132
- # Release the video stream (stops it if no more pipelines use it)
146
+ # Release video stream
133
147
  if worker_source_id:
134
148
  self.video_manager.release_stream(worker_source_id, pipeline_id)
135
149
 
@@ -147,31 +161,24 @@ class PipelineManager:
147
161
  return list(self.pipeline_metadata.keys())
148
162
 
149
163
  def get_pipeline(self, pipeline_id):
150
- """Returns the actual pipeline metadata (not the Future object)."""
164
+ """Returns the pipeline metadata."""
151
165
  return self.pipeline_metadata.get(pipeline_id, None)
152
166
 
153
167
  def is_running(self, pipeline_id):
154
- """
155
- Checks if a pipeline is currently running.
156
-
157
- Args:
158
- pipeline_id (str): The ID of the pipeline to check.
159
-
160
- Returns:
161
- bool: True if the pipeline is running, False otherwise.
162
- """
163
- thread = self.pipeline_threads.get(pipeline_id)
164
- return thread is not None and thread.is_alive()
168
+ """Check if pipeline is currently running."""
169
+ future = self.pipeline_futures.get(pipeline_id)
170
+ return future is not None and not future.done()
165
171
 
166
172
  def shutdown(self):
167
173
  """Shuts down the pipeline manager gracefully."""
168
174
  logging.info("🛑 Shutting down PipelineManager...")
169
175
  self.running = False
170
176
 
171
- for pipeline_id in list(self.pipeline_threads.keys()):
177
+ # Stop all pipelines
178
+ for pipeline_id in list(self.pipeline_futures.keys()):
172
179
  self.stop_pipeline(pipeline_id)
173
180
 
174
- logging.info("✅ PipelineManager stopped.")
175
-
176
- self.executor.shutdown(wait=True) # Wait for all threads to finish
181
+ # Shutdown thread pool
182
+ logging.info("🛑 Shutting down thread pool executor...")
183
+ self.executor.shutdown(wait=True)
177
184
  logging.info("✅ PipelineManager stopped.")
@@ -8,12 +8,14 @@ from .PipelineConfigManager import PipelineConfigManager
8
8
  from .PipelinePrepocessor import PipelinePrepocessor
9
9
  from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
10
10
  from ..repositories.WorkerSourcePipelineDetectionRepository import WorkerSourcePipelineDetectionRepository
11
+ from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
11
12
  from ..streams.VideoStreamManager import VideoStreamManager
12
13
  from ..ai.VideoDebugger import VideoDebugger
13
14
  from ..ai.FrameDrawer import FrameDrawer
14
15
  from ..tracker.TrackerManager import TrackerManager
15
16
  from ..detection.BaseDetector import BaseDetector
16
17
  from ..streams.RTMPStreamer import RTMPStreamer
18
+ from ..util.PipelinePreviewChecker import PipelinePreviewChecker
17
19
 
18
20
 
19
21
  class PipelineProcessor:
@@ -41,6 +43,10 @@ class PipelineProcessor:
41
43
  self.worker_source_id = pipeline.worker_source_id
42
44
 
43
45
  self.rtmp_streamer = None
46
+ self.rtmp_streaming_active = False
47
+ self.last_preview_check_time = 0
48
+ self.preview_check_interval = 5.0 # Check every 5 seconds
49
+ self.pipeline_repo = WorkerSourcePipelineRepository()
44
50
 
45
51
  self.detection_processor_codes = [
46
52
  PPEDetectionProcessor.code,
@@ -61,6 +67,10 @@ class PipelineProcessor:
61
67
  self.hevc_error_count = 0
62
68
  self.last_hevc_recovery = 0
63
69
  self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
70
+
71
+ self.base_detection_interval = 1.0 / 3.0
72
+ self.detection_interval = self.base_detection_interval
73
+ self.is_fps_user_configured = False
64
74
 
65
75
  def update_config(self, pipeline):
66
76
  """Updates the pipeline configuration."""
@@ -116,7 +126,8 @@ class PipelineProcessor:
116
126
  def _update_config_internal(self):
117
127
  self.config_manager.update(self.pipeline_id)
118
128
  self.preprocessor.update(self.config_manager)
119
- self.detection_interval = self._get_detection_interval()
129
+ self.base_detection_interval, self.is_fps_user_configured = self._get_detection_interval()
130
+ self.detection_interval = self.base_detection_interval
120
131
  self._update_detection_processor()
121
132
 
122
133
  # Reset failure counters on config update
@@ -148,8 +159,14 @@ class PipelineProcessor:
148
159
  if initial_frame is None:
149
160
  logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
150
161
  return
162
+
163
+ # Auto-adjust FPS based on resolution when user hasn't configured FPS
164
+ if not self.is_fps_user_configured:
165
+ self.detection_interval = self._auto_adjust_detection_interval_for_resolution(initial_frame)
166
+ logging.info(f"📊 Pipeline {pipeline_id}: Auto-adjusted FPS to {1.0/self.detection_interval:.1f} based on {initial_frame.shape[1]}x{initial_frame.shape[0]} resolution (no user config)")
167
+ else:
168
+ logging.info(f"📊 Pipeline {pipeline_id}: Using user-configured FPS {1.0/self.detection_interval:.1f} for {initial_frame.shape[1]}x{initial_frame.shape[0]} stream")
151
169
 
152
- # Start detection thread
153
170
  self.detection_thread = threading.Thread(
154
171
  target=self._detection_worker,
155
172
  name=f"detection-{pipeline_id}",
@@ -167,10 +184,6 @@ class PipelineProcessor:
167
184
  # no frame this tick—just continue (the streamer will repeat last good frame)
168
185
  continue
169
186
 
170
- # cv2.imshow("AA", frame)
171
- # cv2.waitKey(1)
172
- # continue
173
-
174
187
  # successful frame
175
188
  self.consecutive_frame_failures = 0
176
189
  self.last_successful_frame_time = time.time()
@@ -179,8 +192,9 @@ class PipelineProcessor:
179
192
  # draw annotations
180
193
  try:
181
194
  self.frame_drawer.draw_polygons(frame)
195
+ frame_to_draw = frame.copy() if self.debug_flag else frame
182
196
  drawn_frame = self.frame_drawer.draw_frame(
183
- frame.copy(),
197
+ frame_to_draw,
184
198
  self.tracked_objects_render,
185
199
  with_trails=True,
186
200
  trail_length=int(max(1, 2 / self.detection_interval))
@@ -195,33 +209,49 @@ class PipelineProcessor:
195
209
  try:
196
210
  self.debug_repo.update_debug_entries_by_pipeline_id(
197
211
  self.pipeline_id,
198
- self.frame_drawer.draw_frame(frame.copy(), tracked_objects_render),
212
+ self.frame_drawer.draw_frame(frame, tracked_objects_render),
199
213
  tracked_objects_render
200
214
  )
201
215
  except Exception as e:
202
216
  logging.warning(f"Debug save failed: {e}")
203
217
  self.debug_flag = False
204
218
 
205
- # Push frame to RTMP stream
206
- # RTMPStreamer handles its own restarts internally
207
- if self.rtmp_streamer is None:
208
- try:
209
- self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
210
- logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id}")
211
- except Exception as e:
212
- logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
213
- self.rtmp_streamer = None
219
+ # Check if RTMP streaming should be active based on preview requests
220
+ current_time = time.time()
221
+ if current_time - self.last_preview_check_time >= self.preview_check_interval:
222
+ self._check_and_update_rtmp_streaming()
223
+ self.last_preview_check_time = current_time
214
224
 
215
- if self.rtmp_streamer:
216
- try:
217
- self.rtmp_streamer.push_frame(drawn_frame)
218
- except Exception as e:
219
- logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
220
- if "initialization_failed" in str(e).lower():
221
- try:
222
- self.rtmp_streamer.stop_stream()
223
- except Exception:
224
- pass
225
+ # Push frame to RTMP stream if preview is active
226
+ if self.rtmp_streaming_active:
227
+ if self.rtmp_streamer is None:
228
+ try:
229
+ self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
230
+ logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id} (preview requested)")
231
+ except Exception as e:
232
+ logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
233
+ self.rtmp_streamer = None
234
+
235
+ if self.rtmp_streamer:
236
+ try:
237
+ self.rtmp_streamer.push_frame(drawn_frame)
238
+ except Exception as e:
239
+ logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
240
+ if "initialization_failed" in str(e).lower():
241
+ try:
242
+ self.rtmp_streamer.stop_stream()
243
+ except Exception:
244
+ pass
245
+ self.rtmp_streamer = None
246
+ else:
247
+ # Stop RTMP streaming if preview is no longer active
248
+ if self.rtmp_streamer is not None:
249
+ try:
250
+ logging.info(f"🛑 Stopping RTMP streamer for pipeline {pipeline_id} (preview expired)")
251
+ self.rtmp_streamer.stop_stream()
252
+ except Exception as e:
253
+ logging.error(f"❌ Error stopping RTMP streamer: {e}")
254
+ finally:
225
255
  self.rtmp_streamer = None
226
256
 
227
257
  # feed detection worker with latest-only behavior
@@ -244,7 +274,9 @@ class PipelineProcessor:
244
274
  except Exception as e:
245
275
  logging.error(f"⚠️ Failed to render frame for pipeline {pipeline_id}: {e}")
246
276
 
247
- time.sleep(0.1)
277
+ # Dynamic sleep based on detection interval to reduce CPU
278
+ sleep_time = min(0.01, self.detection_interval / 10)
279
+ time.sleep(sleep_time)
248
280
 
249
281
  except Exception as e:
250
282
  logging.error(f"❌ Error in pipeline {pipeline_id}: {e}", exc_info=True)
@@ -287,6 +319,12 @@ class PipelineProcessor:
287
319
  while self.running:
288
320
  try:
289
321
  frame = self.frame_queue.get(block=True, timeout=1)
322
+
323
+ # Check for poison pill (None = stop signal)
324
+ if frame is None:
325
+ logging.debug(f"Detection worker for {pipeline_id} received stop signal")
326
+ break
327
+
290
328
  current_time = time.time()
291
329
 
292
330
  # Update config periodically
@@ -298,6 +336,9 @@ class PipelineProcessor:
298
336
  try:
299
337
  while True:
300
338
  newer = self.frame_queue.get_nowait()
339
+ if newer is None: # Stop signal
340
+ logging.debug(f"Detection worker for {pipeline_id} received stop signal")
341
+ return
301
342
  frame = newer
302
343
  except queue.Empty:
303
344
  pass
@@ -483,12 +524,23 @@ class PipelineProcessor:
483
524
  return False
484
525
 
485
526
  def stop(self):
486
- """Stops the Pipeline processor and cleans up resources."""
527
+ """Stop processor and cleanup resources."""
487
528
  if not self.running:
488
529
  return
489
530
  logging.info("🛑 Stopping PipelineProcessor...")
490
531
  self.running = False
491
532
 
533
+ # Wake up detection thread immediately with poison pill
534
+ try:
535
+ self.frame_queue.put_nowait(None)
536
+ except queue.Full:
537
+ try:
538
+ self.frame_queue.get_nowait()
539
+ self.frame_queue.put_nowait(None)
540
+ except:
541
+ pass
542
+
543
+ # Stop RTMP streamer
492
544
  if hasattr(self, 'rtmp_streamer') and self.rtmp_streamer:
493
545
  try:
494
546
  self.rtmp_streamer.stop_stream()
@@ -497,6 +549,7 @@ class PipelineProcessor:
497
549
  finally:
498
550
  self.rtmp_streamer = None
499
551
 
552
+ # Clear frame queue
500
553
  try:
501
554
  while True:
502
555
  try:
@@ -506,38 +559,136 @@ class PipelineProcessor:
506
559
  except Exception as e:
507
560
  logging.error(f"Error clearing frame queue: {e}")
508
561
 
562
+ # Wait for detection thread (should exit quickly with poison pill)
509
563
  if self.detection_thread and self.detection_thread.is_alive():
510
564
  try:
511
- self.detection_thread.join(timeout=5.0)
565
+ self.detection_thread.join(timeout=1.0)
512
566
  if self.detection_thread.is_alive():
513
- logging.warning("Detection thread did not terminate cleanly")
567
+ logging.warning("Detection thread did not terminate cleanly within 1s")
514
568
  except Exception as e:
515
569
  logging.error(f"Error joining detection thread: {e}")
516
570
  finally:
517
571
  self.detection_thread = None
518
572
 
519
- self.tracked_objects_render.clear()
573
+ # Unload detector immediately to free GPU memory
574
+ if hasattr(self, 'detector') and self.detector:
575
+ try:
576
+ if hasattr(self.detector, 'model') and self.detector.model:
577
+ if hasattr(self.detector.model, 'cpu'):
578
+ self.detector.model.cpu()
579
+ self.detector.model = None
580
+ except Exception as e:
581
+ logging.debug(f"Error clearing detector: {e}")
520
582
 
583
+ # Clear tracking data
584
+ if hasattr(self, 'tracked_objects_render'):
585
+ self.tracked_objects_render.clear()
586
+
587
+ # Clear tracker state
588
+ if hasattr(self, 'tracker_manager') and self.tracker_manager:
589
+ try:
590
+ if hasattr(self.tracker_manager, 'track_uuid_map'):
591
+ self.tracker_manager.track_uuid_map.clear()
592
+ if hasattr(self.tracker_manager, 'track_count_map'):
593
+ self.tracker_manager.track_count_map.clear()
594
+ if hasattr(self.tracker_manager, 'track_attributes_presence'):
595
+ self.tracker_manager.track_attributes_presence.clear()
596
+ if hasattr(self.tracker_manager, 'track_last_seen'):
597
+ self.tracker_manager.track_last_seen.clear()
598
+ except Exception as e:
599
+ logging.error(f"Error clearing tracker state: {e}")
600
+
601
+ # Close debugger
521
602
  try:
522
603
  if hasattr(self, 'video_debugger'):
523
604
  self.video_debugger.close_all()
524
605
  except Exception as e:
525
606
  logging.error(f"Error closing video debugger: {e}")
607
+
608
+ # Force garbage collection
609
+ import gc
610
+ gc.collect()
526
611
 
527
612
  logging.info("✅ PipelineProcessor stopped successfully")
528
613
 
529
614
  def _get_detection_interval(self):
615
+ """Returns (interval, is_user_configured) tuple."""
530
616
  config = self.config_manager.get_feature_config("processing_speed")
531
- fps = config.get("decimal", 1.0)
532
- if fps <= 0:
533
- return 1.0 / 10.0 # default 10 fps
534
- return 1.0 / fps
617
+ fps = config.get("decimal", None)
618
+
619
+ # Check if user explicitly configured FPS
620
+ is_user_configured = fps is not None
621
+
622
+ if fps is None or fps <= 0:
623
+ fps = 3.0 # default 3 fps when not configured
624
+
625
+ return 1.0 / fps, is_user_configured
626
+
627
+ def _auto_adjust_detection_interval_for_resolution(self, frame):
628
+ """
629
+ Auto-adjust detection interval based on frame resolution.
630
+ Automatically applies when user hasn't configured processing_speed FPS.
631
+ User-configured FPS always takes precedence.
632
+ """
633
+ if frame is None:
634
+ return self.base_detection_interval
635
+
636
+ height, width = frame.shape[:2]
637
+ total_pixels = height * width
638
+
639
+ # Define resolution tiers
640
+ PIXELS_4K = 3840 * 2160 # ~8.3M pixels
641
+ PIXELS_2K = 2560 * 1440 # ~3.7M pixels
642
+
643
+ base_interval = self.base_detection_interval
644
+
645
+ # Adjust based on resolution
646
+ if total_pixels >= PIXELS_4K:
647
+ # 4K: Reduce FPS by 50% (double the interval)
648
+ adjusted_interval = base_interval * 2.0
649
+ logging.debug(f"📊 4K stream detected ({width}x{height}): FPS reduced to {1.0/adjusted_interval:.1f}")
650
+ elif total_pixels >= PIXELS_2K:
651
+ # 2K: Reduce FPS by 25% (increase interval by 1.33x)
652
+ adjusted_interval = base_interval * 1.33
653
+ logging.debug(f"📊 2K stream detected ({width}x{height}): FPS reduced to {1.0/adjusted_interval:.1f}")
654
+ else:
655
+ # 1080p and below: Use full configured FPS
656
+ adjusted_interval = base_interval
657
+
658
+ return adjusted_interval
535
659
 
536
660
  def enable_debug(self):
537
661
  self.debug_flag = True
538
662
  self.consecutive_frame_failures = 0
539
663
  self.last_successful_frame_time = time.time()
540
664
 
665
+ def _check_and_update_rtmp_streaming(self):
666
+ """
667
+ Check if RTMP streaming should be active based on preview requests.
668
+ Updates the rtmp_streaming_active flag.
669
+ """
670
+ try:
671
+ # Get fresh pipeline data from database
672
+ pipeline = self.pipeline_repo.get_worker_source_pipeline(self.pipeline_id)
673
+
674
+ if not pipeline:
675
+ logging.warning(f"⚠️ Pipeline {self.pipeline_id} not found in database")
676
+ self.rtmp_streaming_active = False
677
+ return
678
+
679
+ # Check if preview is active using the utility
680
+ should_stream = PipelinePreviewChecker.should_stream_rtmp(
681
+ pipeline.last_preview_request_at,
682
+ preview_window_seconds=300 # 5 minutes
683
+ )
684
+
685
+ self.rtmp_streaming_active = should_stream
686
+
687
+ except Exception as e:
688
+ logging.error(f"❌ Error checking preview status for pipeline {self.pipeline_id}: {e}")
689
+ # On error, disable streaming to be safe
690
+ self.rtmp_streaming_active = False
691
+
541
692
  def reset_frame_failure_counters(self):
542
693
  logging.info(f"🔄 Resetting frame failure counters for pipeline {self.pipeline_id}")
543
694
  self.consecutive_frame_failures = 0
@@ -13,7 +13,7 @@ from ..streams.VideoStreamManager import VideoStreamManager
13
13
  class PipelineSyncThread(threading.Thread):
14
14
  """Thread responsible for synchronizing worker source pipelines from the database in real-time."""
15
15
 
16
- def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=4):
16
+ def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=None):
17
17
  super().__init__(daemon=True) # Runs as a daemon
18
18
  self.video_manager = video_manager
19
19
  self.polling_interval = polling_interval
@@ -0,0 +1,50 @@
1
+ from datetime import datetime
2
+
3
+ class PipelinePreviewChecker:
4
+ """
5
+ Utility class to check if RTMP streaming should be enabled for a pipeline
6
+ based on the last preview request timestamp.
7
+ """
8
+
9
+ @staticmethod
10
+ def should_stream_rtmp(last_preview_request_at, preview_window_seconds=300):
11
+ """
12
+ Check if RTMP streaming should be enabled based on the last preview request.
13
+
14
+ Args:
15
+ last_preview_request_at: DateTime object or None representing the last preview request
16
+ preview_window_seconds: Time window in seconds to keep streaming after a request (default: 300 = 5 minutes)
17
+
18
+ Returns:
19
+ bool: True if streaming should be enabled, False otherwise
20
+ """
21
+ if last_preview_request_at is None:
22
+ return False
23
+
24
+ # Calculate the time difference
25
+ current_time = datetime.utcnow()
26
+ time_since_request = current_time - last_preview_request_at
27
+
28
+ # Check if we're within the preview window
29
+ return time_since_request.total_seconds() <= preview_window_seconds
30
+
31
+ @staticmethod
32
+ def get_remaining_preview_time(last_preview_request_at, preview_window_seconds=300):
33
+ """
34
+ Get the remaining time (in seconds) for the preview window.
35
+
36
+ Args:
37
+ last_preview_request_at: DateTime object or None representing the last preview request
38
+ preview_window_seconds: Time window in seconds (default: 300 = 5 minutes)
39
+
40
+ Returns:
41
+ int: Remaining seconds in the preview window, or 0 if expired/not requested
42
+ """
43
+ if last_preview_request_at is None:
44
+ return 0
45
+
46
+ current_time = datetime.utcnow()
47
+ time_since_request = current_time - last_preview_request_at
48
+ remaining = preview_window_seconds - time_since_request.total_seconds()
49
+
50
+ return max(0, int(remaining))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker-core
3
- Version: 0.3.6
3
+ Version: 0.3.8
4
4
  Summary: Nedo Vision Worker Core Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -1,10 +1,10 @@
1
- nedo_vision_worker_core/__init__.py,sha256=E4gmykhnzrU8ANXRIGP9LQG8TNbk6eAyuWqtAsFdj7Y,1924
1
+ nedo_vision_worker_core/__init__.py,sha256=HAUgt6tA__5tRxc5EtEDSooC_CQI0Uyjp0RFJtSK754,1924
2
2
  nedo_vision_worker_core/cli.py,sha256=8YuKWsIgICUYXE_QtwyU3WzGhVjTWiAo5uzpFOmjNc8,5766
3
- nedo_vision_worker_core/core_service.py,sha256=dnHNjbslOeyeWqHDFnk_yKdfTICYzLyRIcuZNwF0Zf4,11323
3
+ nedo_vision_worker_core/core_service.py,sha256=q8-GuGW_l5l6wTWQDqc7BDdhM7zKC-mMLZ5wIHu9xV0,11628
4
4
  nedo_vision_worker_core/doctor.py,sha256=K_-hVV2-mdEefZ4Cfu5hMCiOxBiI1aXY8VtkkpK80Lc,10651
5
5
  nedo_vision_worker_core/ai/FrameDrawer.py,sha256=lj83WFaE70BQfkEc6AHcMBXaiEm8l3s_zJZG9C0NkAs,5286
6
6
  nedo_vision_worker_core/ai/ImageDebugger.py,sha256=5FwgNGZrxO2eT7hxdxp7N2gQ0oyyYDZChJ3PJapKu-w,4612
7
- nedo_vision_worker_core/ai/VideoDebugger.py,sha256=M6XVuK2Lq2ceE5UdYj2GLaMbEU6THmGzgQlVkqs-lAc,2578
7
+ nedo_vision_worker_core/ai/VideoDebugger.py,sha256=4BqOB_vszS4Prux1VFnZ3nGwQ3dyQOljtp5ud0XZBCU,3074
8
8
  nedo_vision_worker_core/ai/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
9
9
  nedo_vision_worker_core/callbacks/DetectionCallbackManager.py,sha256=Ogoco3JD_o5IMccruau1ly69bDWnsckJyVtzCw259JQ,13379
10
10
  nedo_vision_worker_core/callbacks/DetectionCallbackTypes.py,sha256=U7Qb0dCMtOHuZi_HNjapKjPqsCNM9ucHQosjHk9vPJ8,5057
@@ -14,7 +14,7 @@ nedo_vision_worker_core/config/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrF
14
14
  nedo_vision_worker_core/database/DatabaseManager.py,sha256=EDSz6auDx3i-DofHJBZdcEWyDHXqCwFB54WTBu9ExME,10314
15
15
  nedo_vision_worker_core/database/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
16
16
  nedo_vision_worker_core/detection/BaseDetector.py,sha256=bReQCTy4tEA1itvw3kkjBMQnx3Jn4MjnDkzdtPwmNPQ,757
17
- nedo_vision_worker_core/detection/RFDETRDetector.py,sha256=3T3zTFZW0pBv9E-pSpY4JP7wI0LOTM4hxzanvrEXMcE,3093
17
+ nedo_vision_worker_core/detection/RFDETRDetector.py,sha256=nimnCELy3FLi-6VRinw_id1pAk2jMDKrTfKINSM200k,4602
18
18
  nedo_vision_worker_core/detection/YOLODetector.py,sha256=oMCW4KanQCDbtz-ttMiCY5lP1rIgVH_LpfvZvi270j0,2290
19
19
  nedo_vision_worker_core/detection/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
20
20
  nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py,sha256=wqf4hliR_CPkeoeRbBB3PEpQsmasC4mASJ4WyYjNyPE,948
@@ -53,16 +53,16 @@ nedo_vision_worker_core/models/ppe_detection_label.py,sha256=qON7a0fuDv5cK8phGH0
53
53
  nedo_vision_worker_core/models/restricted_area_violation.py,sha256=0enCi3tv15YMy3NaI6FwqhmLYHbbVX4nWTh46qKxrWc,829
54
54
  nedo_vision_worker_core/models/user.py,sha256=SnLUz2nS7j17bIP-gElMEaR-jWdnNQ0fTpRminVKY60,294
55
55
  nedo_vision_worker_core/models/worker_source.py,sha256=FB8irZ26LhCKNHBcpIIb5Mi3SoSNm9-q25VIkO5jQWg,793
56
- nedo_vision_worker_core/models/worker_source_pipeline.py,sha256=xCD4i9pHr8Qy5B_h1dH0Q7V7faS2lAou2UNEzx24oIw,752
56
+ nedo_vision_worker_core/models/worker_source_pipeline.py,sha256=CGA_nz5wywsJcBPm-5kd0v_-h59f8Iu7uEeX3C91eT4,824
57
57
  nedo_vision_worker_core/models/worker_source_pipeline_config.py,sha256=dGYTpcTFFu6pmGBufuWBHjv3Xs4RGAQwZn6jp6Ondvs,876
58
58
  nedo_vision_worker_core/models/worker_source_pipeline_debug.py,sha256=6S7TkN37FrAT4VwsEB38DWSad7QfvNhsOGtSEK8D1Qs,594
59
59
  nedo_vision_worker_core/models/worker_source_pipeline_detection.py,sha256=p6CJsiVCKprTYrNxJsiTB8njXdHkjZKVEyBceRVE6fY,560
60
- nedo_vision_worker_core/pipeline/ModelManager.py,sha256=K7lmVOo-KL7bnWtyafilZs23bzd6loCgfUz7xuAmlVw,6195
60
+ nedo_vision_worker_core/pipeline/ModelManager.py,sha256=2DoQiIdF-PAqU7nT_u6bj-DY0aT2FHb8kt24okGGCRc,7449
61
61
  nedo_vision_worker_core/pipeline/PipelineConfigManager.py,sha256=X55i9GyXcW9ylO6cj2UMAZFSxxPViacL4H4DZl60CAY,1157
62
- nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=S3QxTcJjDhOY2O8x9c62kYXotgjV4enlCJLcziZEIh8,7589
62
+ nedo_vision_worker_core/pipeline/PipelineManager.py,sha256=3I9UBJu_rRfTEctwj8i4hO4MHjpBtYpfh-rIi64qgEw,7638
63
63
  nedo_vision_worker_core/pipeline/PipelinePrepocessor.py,sha256=cCiVSHHqsKCtKYURdYoEjHJX2GnT6zd8kQ6ZukjQ3V0,1271
64
- nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=KMdFDvQO2yI9NxjNDwRQU07St1Y01QgM97tgNuyelw0,26766
65
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=2tIqheE2BG-DAEqUgq9i4blz0vQWmnYu8MgHPLJkg3g,8704
64
+ nedo_vision_worker_core/pipeline/PipelineProcessor.py,sha256=9gQ6AVOD_cC-c9VLqJ8WwaDb8HZNkAOT35Op0nHDROs,34194
65
+ nedo_vision_worker_core/pipeline/PipelineSyncThread.py,sha256=HkW6wj0eDr6M1K3Y25IlB2V6tpIZsKA34AM49AXvcQk,8707
66
66
  nedo_vision_worker_core/pipeline/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
67
67
  nedo_vision_worker_core/preprocessing/ImageResizer.py,sha256=RvOazxe6dJQuiy0ZH4lIGbdFfiu0FLUVCHoMvxkDNT4,1324
68
68
  nedo_vision_worker_core/preprocessing/ImageRoi.py,sha256=iO7oQ-SdUSA_kTIVBuq_mdycXsiJNfiFD3J7-VTxiQ4,2141
@@ -94,11 +94,12 @@ nedo_vision_worker_core/util/DrawingUtils.py,sha256=sLptmzVaJakP_ZgbZsLL03RMH_9N
94
94
  nedo_vision_worker_core/util/ModelReadinessChecker.py,sha256=ywHvt_d7UlY3DyFEJrO4Iyl0zx3SaLKb-Qab5l5Q8n4,6548
95
95
  nedo_vision_worker_core/util/PersonAttributeMatcher.py,sha256=PhYTPYSF62Nfuc7dage03K6icw_bBBdpvXvnlzCbS30,2773
96
96
  nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py,sha256=iuzCU32BQKaZ3dIy0QHNg2yoWJA-XhTRwwYqCvFdDgg,1711
97
+ nedo_vision_worker_core/util/PipelinePreviewChecker.py,sha256=XxlSMlrDlRrzfV8_Y--40Xfk5N7FjGgkKHth3KKCZzU,1963
97
98
  nedo_vision_worker_core/util/PlatformDetector.py,sha256=GGL8UfeMQITR22EMYIRWnuOEnSqo7Dr5mb0PaFrl8AM,3006
98
99
  nedo_vision_worker_core/util/TablePrinter.py,sha256=wzLGgb1GFMeIbAP6HmKcZD33j4D-IlyqlyeR7C5yD7w,1137
99
100
  nedo_vision_worker_core/util/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
100
- nedo_vision_worker_core-0.3.6.dist-info/METADATA,sha256=bGu8iMZGYWUU3zPPN23EwAZenJNJlayPcGzE-AT6sl0,14412
101
- nedo_vision_worker_core-0.3.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
102
- nedo_vision_worker_core-0.3.6.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
103
- nedo_vision_worker_core-0.3.6.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
104
- nedo_vision_worker_core-0.3.6.dist-info/RECORD,,
101
+ nedo_vision_worker_core-0.3.8.dist-info/METADATA,sha256=uClzBEF9GttUKm4thh8BpzA7VEhVBeZ8M6Yf1bGFl10,14412
102
+ nedo_vision_worker_core-0.3.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
103
+ nedo_vision_worker_core-0.3.8.dist-info/entry_points.txt,sha256=pIPafsvPnBw-fpBKBmc1NQCQ6PQY3ad8mZ6mn8_p5FI,70
104
+ nedo_vision_worker_core-0.3.8.dist-info/top_level.txt,sha256=y8kusXjVYqtG8MSHYWTrk8bRrvjOrphKXYyzu943TTQ,24
105
+ nedo_vision_worker_core-0.3.8.dist-info/RECORD,,