nedo-vision-worker-core 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.3.3"
10
+ __version__ = "0.3.5"
11
11
  __all__ = [
12
12
  "CoreService",
13
13
  "DetectionType",
@@ -9,6 +9,7 @@ except ImportError:
9
9
 
10
10
  from ..database.DatabaseManager import DatabaseManager
11
11
  from ..models.ai_model import AIModelEntity
12
+ from ..util.PlatformDetector import PlatformDetector
12
13
  from .BaseDetector import BaseDetector
13
14
 
14
15
  logging.getLogger("ultralytics").setLevel(logging.WARNING)
@@ -25,6 +26,8 @@ class RFDETRDetector(BaseDetector):
25
26
  raise TypeError("model must be an instance of AIModelEntity")
26
27
  self.model = None
27
28
  self.metadata = None
29
+ self.device = PlatformDetector.get_device()
30
+ logging.info(f"ℹ️ RFDETRDetector will use '{self.device}' device.")
28
31
 
29
32
  if model:
30
33
  self.load_model(model)
@@ -0,0 +1,139 @@
1
+ import logging
2
+ from typing import Dict, Optional, Set
3
+
4
+ from ..repositories.AIModelRepository import AIModelRepository
5
+ from ..detection.BaseDetector import BaseDetector
6
+ from ..detection.YOLODetector import YOLODetector
7
+ from ..detection.RFDETRDetector import RFDETRDetector
8
+ from ..models.ai_model import AIModelEntity
9
+
10
+
11
+ class ModelManager:
12
+ """Manages loading and caching of AI models to avoid redundant loads."""
13
+
14
+ def __init__(self):
15
+ self._detector_cache: Dict[str, BaseDetector] = {}
16
+ self._model_repo = AIModelRepository()
17
+ logging.info("🤖 ModelManager initialized.")
18
+
19
+ def get_detector(self, model_id: str) -> Optional[BaseDetector]:
20
+ """
21
+ Retrieves a detector by its model ID.
22
+
23
+ This method implements a lazy-loading and cache-validation strategy:
24
+ 1. It fetches the latest model metadata from the database.
25
+ 2. If a detector is already cached, it validates its metadata against the DB version.
26
+ 3. If the cached version is stale (e.g., version or classes changed), it's evicted.
27
+ 4. If no detector is cached or the cache was stale, it loads the detector on-demand.
28
+ """
29
+ if not model_id:
30
+ return None
31
+
32
+ # 1. Fetch the current model state from the database
33
+ db_model: AIModelEntity = self._model_repo.get_model(model_id)
34
+ if not db_model:
35
+ # If the model doesn't exist in DB, ensure it's not in cache either
36
+ if model_id in self._detector_cache:
37
+ logging.info(f"🧹 Removing detector for deleted model {model_id} from cache.")
38
+ del self._detector_cache[model_id]
39
+ return None
40
+
41
+ # 2. Check if a detector is cached
42
+ if model_id in self._detector_cache:
43
+ cached_detector = self._detector_cache[model_id]
44
+
45
+ # 3. Check if the cached version is stale
46
+ if self._has_metadata_changed(cached_detector.metadata, db_model):
47
+ logging.info(f"Reloading detector for model {model_id} due to metadata changes.")
48
+ del self._detector_cache[model_id]
49
+ # Fall through to load the new version
50
+ else:
51
+ # Cache is fresh, return it
52
+ logging.debug(f"🧠 Detector for model {model_id} found in cache and is fresh.")
53
+ return cached_detector
54
+
55
+ # 4. If not cached or was stale, load it now
56
+ return self._load_and_cache_detector(model_id, db_model)
57
+
58
+ def _load_and_cache_detector(self, model_id: str, db_model: AIModelEntity) -> Optional[BaseDetector]:
59
+ """Creates a detector from a DB model entity and caches it."""
60
+ logging.info(f"🔄 Loading model {model_id} (version: {db_model.version}) from database to create detector...")
61
+
62
+ # Check model readiness before attempting to load
63
+ if not db_model.is_ready_for_use():
64
+ if db_model.is_downloading():
65
+ logging.warning(f"⏳ Model {model_id} is still downloading. Skipping detector load.")
66
+ elif db_model.has_download_failed():
67
+ logging.error(f"❌ Model {model_id} download failed: {db_model.download_error}")
68
+ else:
69
+ logging.warning(f"⚠️ Model {model_id} is not ready for use (status: {db_model.download_status})")
70
+ return None
71
+
72
+ detector_type = db_model.type.lower()
73
+ detector: Optional[BaseDetector] = None
74
+ try:
75
+ if detector_type == "yolo":
76
+ detector = YOLODetector(db_model)
77
+ elif detector_type == "rf_detr":
78
+ detector = RFDETRDetector(db_model)
79
+ else:
80
+ raise ValueError(f"Unsupported model type: {detector_type}")
81
+
82
+ if detector and detector.model is not None:
83
+ self._detector_cache[model_id] = detector
84
+ logging.info(f"✅ Detector for model {model_id} loaded and cached successfully.")
85
+ return detector
86
+ else:
87
+ logging.error(f"❌ Failed to load detector for model: {db_model.name}")
88
+ return None
89
+
90
+ except Exception as e:
91
+ logging.error(f"❌ Error creating detector for model {db_model.name}: {e}")
92
+ return None
93
+
94
+ def _has_metadata_changed(self, cached_model: AIModelEntity, db_model: AIModelEntity) -> bool:
95
+ """Check if critical model metadata has changed."""
96
+ if cached_model.version != db_model.version:
97
+ logging.info(
98
+ f"🔄 Model {db_model.id} version changed "
99
+ f"({cached_model.version} -> {db_model.version})."
100
+ )
101
+ return True
102
+
103
+ # Compare classes
104
+ cached_classes = set(cached_model.get_classes() or [])
105
+ db_classes = set(db_model.get_classes() or [])
106
+ if cached_classes != db_classes:
107
+ logging.info(f"🔄 Model {db_model.id} classes changed.")
108
+ return True
109
+
110
+ # Compare PPE class groups
111
+ cached_ppe_groups = cached_model.get_ppe_class_groups() or {}
112
+ db_ppe_groups = db_model.get_ppe_class_groups() or {}
113
+ if cached_ppe_groups != db_ppe_groups:
114
+ logging.info(f"🔄 Model {db_model.id} PPE groups changed.")
115
+ return True
116
+
117
+ # Compare main class
118
+ if cached_model.get_main_class() != db_model.get_main_class():
119
+ logging.info(f"🔄 Model {db_model.id} main class changed.")
120
+ return True
121
+
122
+ return False
123
+
124
+ def sync_cache(self, active_model_ids: Set[str]):
125
+ """
126
+ Removes detectors from the cache if their corresponding models are no longer in the database
127
+ or are not being used by any active pipeline.
128
+ """
129
+ cached_ids = set(self._detector_cache.keys())
130
+ stale_ids = cached_ids - active_model_ids
131
+
132
+ for model_id in stale_ids:
133
+ del self._detector_cache[model_id]
134
+ logging.info(f"🧹 Removed unused detector for model {model_id} from cache.")
135
+
136
+ def clear_cache(self):
137
+ """Clears the detector cache."""
138
+ logging.info("🧹 Clearing all detectors from cache.")
139
+ self._detector_cache.clear()
@@ -19,12 +19,12 @@ class PipelineManager:
19
19
  self._stop_lock = threading.Lock() # Lock for thread-safe pipeline stopping
20
20
  self.on_pipeline_stopped = on_pipeline_stopped
21
21
 
22
- def start_pipeline(self, pipeline, model):
22
+ def start_pipeline(self, pipeline, detector):
23
23
  """
24
24
  Start a pipeline processing.
25
25
  Args:
26
26
  pipeline: The pipeline object (contains id, worker_source_id, name, etc.)
27
- model: The AI model to use for processing.
27
+ detector: The detector instance to use for processing.
28
28
  """
29
29
  pipeline_id = pipeline.id
30
30
  worker_source_id = pipeline.worker_source_id
@@ -39,7 +39,12 @@ class PipelineManager:
39
39
 
40
40
  logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
41
41
 
42
- processor = PipelineProcessor(pipeline_id, worker_source_id, model, False)
42
+ # Acquire the video stream (starts it if not already running)
43
+ if not self.video_manager.acquire_stream(worker_source_id, pipeline_id):
44
+ logging.error(f"❌ Failed to acquire stream {worker_source_id} for pipeline {pipeline_id}")
45
+ return
46
+
47
+ processor = PipelineProcessor(pipeline, detector, False)
43
48
  processor.frame_drawer.location_name = pipeline.location_name
44
49
  self.processors[pipeline_id] = processor # Store processor instance
45
50
 
@@ -79,6 +84,10 @@ class PipelineManager:
79
84
  self._stopping_pipelines.add(pipeline_id)
80
85
 
81
86
  try:
87
+ # Get worker_source_id before removing metadata
88
+ pipeline = self.pipeline_metadata.get(pipeline_id)
89
+ worker_source_id = pipeline.worker_source_id if pipeline else None
90
+
82
91
  # Stop AI processing
83
92
  processor = self.processors.pop(pipeline_id, None)
84
93
  if processor:
@@ -92,6 +101,10 @@ class PipelineManager:
92
101
  # Remove metadata
93
102
  self.pipeline_metadata.pop(pipeline_id, None)
94
103
 
104
+ # Release the video stream (stops it if no more pipelines use it)
105
+ if worker_source_id:
106
+ self.video_manager.release_stream(worker_source_id, pipeline_id)
107
+
95
108
  logging.info(f"✅ Pipeline {pipeline_id} stopped successfully.")
96
109
 
97
110
  except Exception as e:
@@ -12,18 +12,19 @@ from ..streams.VideoStreamManager import VideoStreamManager
12
12
  from ..ai.VideoDebugger import VideoDebugger
13
13
  from ..ai.FrameDrawer import FrameDrawer
14
14
  from ..tracker.TrackerManager import TrackerManager
15
- from ..detection.DetectionManager import DetectionManager
15
+ from ..detection.BaseDetector import BaseDetector
16
16
  from ..streams.RTMPStreamer import RTMPStreamer
17
17
 
18
18
 
19
19
  class PipelineProcessor:
20
20
  """Handles pipeline processing including preprocessing, AI model inference, tracking, and video stream processing."""
21
21
 
22
- def __init__(self, pipeline_id, worker_source_id, model, enable_visualization=True):
22
+ def __init__(self, pipeline, detector: BaseDetector, enable_visualization=True):
23
+ self._pipeline = pipeline
23
24
  self.running = True
24
25
  self.video_debugger = VideoDebugger(enable_visualization)
25
26
  self.tracker_manager = TrackerManager()
26
- self.detection_manager = DetectionManager(model)
27
+ self.detector = detector
27
28
  self.config_manager = PipelineConfigManager()
28
29
  self.preprocessor = PipelinePrepocessor()
29
30
  self.detection_processor = None
@@ -36,8 +37,8 @@ class PipelineProcessor:
36
37
  self.detection_thread = None
37
38
  self.frame_counter = 0
38
39
  self.frame_drawer = FrameDrawer()
39
- self.pipeline_id = pipeline_id
40
- self.worker_source_id = worker_source_id
40
+ self.pipeline_id = pipeline.id
41
+ self.worker_source_id = pipeline.worker_source_id
41
42
 
42
43
  self.rtmp_streamer = None
43
44
 
@@ -61,11 +62,16 @@ class PipelineProcessor:
61
62
  self.last_hevc_recovery = 0
62
63
  self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
63
64
 
64
- def load_model(self, model):
65
- logging.info(f"🔄 Loading new model for pipeline {self.pipeline_id}: {getattr(model, 'name', None) or 'Unknown'}")
66
- self.detection_manager.load_model(model)
65
+ def update_config(self, pipeline):
66
+ """Updates the pipeline configuration."""
67
+ self._pipeline = pipeline
68
+ self._update_config_internal()
69
+
70
+ def load_detector(self, detector: BaseDetector):
71
+ logging.info(f"🔄 Loading new detector for pipeline {self.pipeline_id}")
72
+ self.detector = detector
67
73
  self._update_detection_processor()
68
- logging.info(f"✅ Model updated for pipeline {self.pipeline_id}")
74
+ logging.info(f"✅ Detector updated for pipeline {self.pipeline_id}")
69
75
 
70
76
  def _get_detection_processor_code(self):
71
77
  for code in self.detection_processor_codes:
@@ -107,7 +113,7 @@ class PipelineProcessor:
107
113
  self.frame_drawer.update_config()
108
114
  self.tracker_manager.update_config([], [], [])
109
115
 
110
- def _update_config(self):
116
+ def _update_config_internal(self):
111
117
  self.config_manager.update(self.pipeline_id)
112
118
  self.preprocessor.update(self.config_manager)
113
119
  self.detection_interval = self._get_detection_interval()
@@ -117,7 +123,7 @@ class PipelineProcessor:
117
123
  self.consecutive_frame_failures = 0
118
124
  self.last_successful_frame_time = time.time()
119
125
 
120
- ai_model = self.detection_manager.model_metadata
126
+ ai_model = self.detector.metadata if self.detector else None
121
127
  if self.detection_processor:
122
128
  config = self.config_manager.get_feature_config(self.detection_processor.code)
123
129
  self.detection_processor.update(self.config_manager, ai_model)
@@ -133,7 +139,7 @@ class PipelineProcessor:
133
139
  worker_source_id = self.worker_source_id
134
140
  logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
135
141
 
136
- self._update_config()
142
+ self._update_config_internal()
137
143
  self.consecutive_frame_failures = 0
138
144
  self.last_successful_frame_time = time.time()
139
145
 
@@ -243,7 +249,7 @@ class PipelineProcessor:
243
249
  processed_frame = self.preprocessor.apply(frame)
244
250
 
245
251
  class_thresholds = {}
246
- ai_model = self.detection_manager.model_metadata
252
+ ai_model = self.detector.metadata if self.detector else None
247
253
 
248
254
  if self.detection_processor:
249
255
  if self.detection_processor.code == PPEDetectionProcessor.code:
@@ -253,7 +259,10 @@ class PipelineProcessor:
253
259
  if main_threshold and ai_model and ai_model.get_main_class():
254
260
  class_thresholds[ai_model.get_main_class()] = main_threshold
255
261
 
256
- detections = self.detection_manager.detect_objects(processed_frame, self.threshold, class_thresholds)
262
+ detections = []
263
+ if self.detector:
264
+ detections = self.detector.detect_objects(processed_frame, self.threshold, class_thresholds)
265
+
257
266
  detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
258
267
 
259
268
  if self.detection_processor:
@@ -277,9 +286,8 @@ class PipelineProcessor:
277
286
 
278
287
  # Update config periodically
279
288
  if (current_time - last_config_update_time) >= config_update_interval:
280
- self._update_config()
289
+ self._update_config_internal()
281
290
  last_config_update_time = current_time
282
- logging.info(f"🔄 Updated pipeline config for {pipeline_id}")
283
291
 
284
292
  # Keep only the latest frame if we fell behind
285
293
  try:
@@ -324,25 +332,24 @@ class PipelineProcessor:
324
332
  except Exception as e:
325
333
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
326
334
 
327
- def _wait_for_frame(self, video_manager, max_retries=10, sleep_time=3):
335
+ def _wait_for_frame(self, video_manager, max_wait_time=30.0):
328
336
  logging.info(f"⏳ Waiting for initial frame from {self.worker_source_id}...")
329
- for retry_count in range(max_retries):
337
+
338
+ is_ready = video_manager.wait_for_stream_ready(self.worker_source_id, timeout=max_wait_time)
339
+
340
+ if is_ready:
330
341
  frame = video_manager.get_frame(self.worker_source_id)
331
342
  if frame is not None:
332
343
  logging.info(f"✅ Initial frame received from {self.worker_source_id}")
333
344
  return frame
334
-
335
- if not video_manager.has_stream(self.worker_source_id):
336
- logging.error(f"❌ Stream {self.worker_source_id} not found in video manager")
337
- return None
338
-
339
- logging.warning(f"⚠️ Waiting for video stream {self.worker_source_id} (Attempt {retry_count + 1}/{max_retries})...")
340
- if retry_count >= 3:
345
+ else:
346
+ logging.error(f"❌ Stream {self.worker_source_id} reported ready, but the first frame could not be retrieved.")
341
347
  self._log_stream_diagnostics(video_manager, self.worker_source_id)
342
- time.sleep(sleep_time)
343
-
344
- logging.error(f"❌ Failed to get initial frame from {self.worker_source_id} after {max_retries} attempts")
345
- return None
348
+ return None
349
+ else:
350
+ logging.error(f"❌ Timed out after {max_wait_time}s waiting for first frame from {self.worker_source_id}.")
351
+ self._log_stream_diagnostics(video_manager, self.worker_source_id)
352
+ return None
346
353
 
347
354
  def _handle_frame_failure(self, video_manager, worker_source_id):
348
355
  """Handle frame retrieval failures with progressive backoff and recovery attempts."""
@@ -452,9 +459,10 @@ class PipelineProcessor:
452
459
  logging.error(f" Cannot get stream URL for {worker_source_id}")
453
460
  return False
454
461
 
455
- video_manager.remove_stream(worker_source_id)
462
+ # Use internal methods to restart the stream without affecting reference counting
463
+ video_manager._stop_stream(worker_source_id)
456
464
  time.sleep(1.0)
457
- video_manager.add_stream(worker_source_id, stream_url)
465
+ video_manager._start_stream(worker_source_id, stream_url)
458
466
  time.sleep(2.0)
459
467
 
460
468
  if not video_manager.has_stream(worker_source_id):
@@ -2,13 +2,12 @@ import json
2
2
  import logging
3
3
  import time
4
4
  import threading
5
- from typing import Dict, Set, Optional, Callable
6
- from ..repositories.AIModelRepository import AIModelRepository
5
+ from typing import Dict, Set, Optional
7
6
  from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
8
7
  from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
9
8
  from .PipelineManager import PipelineManager
9
+ from .ModelManager import ModelManager
10
10
  from ..streams.VideoStreamManager import VideoStreamManager
11
- from ..util.ModelReadinessChecker import ModelReadinessChecker
12
11
 
13
12
 
14
13
  class PipelineSyncThread(threading.Thread):
@@ -20,7 +19,7 @@ class PipelineSyncThread(threading.Thread):
20
19
  self.polling_interval = polling_interval
21
20
  self.pipeline_repo = WorkerSourcePipelineRepository()
22
21
  self.debug_repo = WorkerSourcePipelineDebugRepository()
23
- self.ai_model_repo = AIModelRepository()
22
+ self.model_manager = ModelManager()
24
23
  self.running = True
25
24
  self.pipeline_manager = PipelineManager(video_manager, self.on_pipeline_stopped, max_workers)
26
25
 
@@ -47,8 +46,6 @@ class PipelineSyncThread(threading.Thread):
47
46
  """Continuously updates pipelines based on database changes."""
48
47
  while self.running:
49
48
  try:
50
- # Cache model and pipeline data
51
- models = {m.id: m for m in self.ai_model_repo.get_models()}
52
49
  db_pipelines = {p.id: p for p in self.pipeline_repo.get_all_pipelines()}
53
50
 
54
51
  # Get pipeline IDs for comparison
@@ -57,25 +54,25 @@ class PipelineSyncThread(threading.Thread):
57
54
 
58
55
  restarted_pipeline = False
59
56
 
60
- # Helper function for model lookup
61
- def get_model(pipeline):
62
- return models.get(pipeline.ai_model_id)
63
-
64
57
  # Process pipeline changes
65
- self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines, get_model, restarted_pipeline)
58
+ self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines, restarted_pipeline)
66
59
  self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
67
- self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines, get_model)
60
+ self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines)
68
61
 
69
62
  if restarted_pipeline:
70
63
  self.pipeline_repo.session.commit()
71
64
 
65
+ # Sync the cache to remove unused detectors
66
+ active_model_ids = {p.ai_model_id for p in db_pipelines.values() if p.pipeline_status_code == 'run'}
67
+ self.model_manager.sync_cache(active_model_ids)
68
+
72
69
  except Exception as e:
73
70
  logging.error(f"⚠️ Error syncing pipelines from database: {e}", exc_info=True)
74
71
 
75
72
  time.sleep(self.polling_interval)
76
73
 
77
74
  def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
78
- get_model: Callable, restarted_pipeline: bool) -> None:
75
+ restarted_pipeline: bool) -> None:
79
76
  """Add new pipelines that exist in DB but not locally."""
80
77
  for pid in pipeline_ids:
81
78
  pipeline = db_pipelines[pid]
@@ -85,17 +82,14 @@ class PipelineSyncThread(threading.Thread):
85
82
  restarted_pipeline = True
86
83
 
87
84
  if pipeline.pipeline_status_code == 'run':
88
- model = get_model(pipeline)
85
+ detector = self.model_manager.get_detector(pipeline.ai_model_id)
89
86
 
90
- # Check if model is ready before starting pipeline
91
- if model:
92
- readiness = ModelReadinessChecker.check_model_readiness(model)
93
- if not readiness["ready"]:
94
- logging.warning(f"⚠️ Pipeline {pid} ({pipeline.name}): {readiness['reason']}. Skipping pipeline start.")
95
- continue
87
+ if not detector and pipeline.ai_model_id:
88
+ logging.warning(f"⚠️ Could not load detector for pipeline {pid} ({pipeline.name}). Skipping.")
89
+ continue
96
90
 
97
91
  logging.info(f"🟢 Adding new pipeline: {pid} ({pipeline.name})")
98
- self.pipeline_manager.start_pipeline(pipeline, model)
92
+ self.pipeline_manager.start_pipeline(pipeline, detector)
99
93
 
100
94
  def _remove_deleted_pipelines(self, pipeline_ids: Set[str]) -> None:
101
95
  """Remove pipelines that exist locally but not in DB."""
@@ -103,109 +97,64 @@ class PipelineSyncThread(threading.Thread):
103
97
  logging.info(f"🔴 Removing deleted pipeline: {pid}")
104
98
  self.pipeline_manager.stop_pipeline(pid)
105
99
 
106
- def _update_existing_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
107
- get_model: Callable) -> None:
100
+ def _update_existing_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]) -> None:
108
101
  """Update existing pipelines that need changes."""
109
102
  debug_pipeline_ids = self.debug_repo.get_pipeline_ids_to_debug()
110
103
 
111
104
  for pid in pipeline_ids:
112
105
  db_pipeline = db_pipelines[pid]
113
- local_pipeline = self.pipeline_manager.get_pipeline(pid)
114
- processor = self.pipeline_manager.processors[pid]
115
- local_proc = processor.detection_manager
116
- db_model = get_model(db_pipeline)
117
-
118
- self.update_pipeline(pid, db_pipeline, local_pipeline, local_proc, db_model, local_proc.model_metadata)
106
+
107
+ # Check if pipeline should be stopped (status changed to stop/restart in DB)
108
+ if db_pipeline.pipeline_status_code in ['stop', 'restart']:
109
+ if self.pipeline_manager.is_running(pid):
110
+ logging.info(f"⏹️ Stopping pipeline due to status change: {pid}")
111
+ self.pipeline_manager.stop_pipeline(pid)
112
+ continue
113
+
114
+ processor = self.pipeline_manager.processors.get(pid)
115
+ if not processor:
116
+ # Pipeline exists in both sets but processor doesn't exist - shouldn't happen
117
+ # but if it does, try to start it if status is 'run'
118
+ if db_pipeline.pipeline_status_code == 'run':
119
+ logging.warning(f"⚠️ Pipeline {pid} exists locally but has no processor. Restarting...")
120
+ detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
121
+ self.pipeline_manager.start_pipeline(db_pipeline, detector)
122
+ continue
123
+
124
+ local_detector = processor.detector
125
+
126
+ self.update_pipeline(pid, db_pipeline, local_detector)
119
127
  if pid in debug_pipeline_ids:
120
128
  processor.enable_debug()
121
129
 
122
- def update_pipeline(self, pid, db_pipeline, local_pipeline, local_proc, db_model, local_model):
123
- """Handles pipeline updates, ensuring correct model and status."""
130
+ def update_pipeline(self, pid: str, db_pipeline: object, local_detector: object) -> None:
131
+ """Updates a single pipeline if necessary (only called for running pipelines)."""
124
132
  processor = self.pipeline_manager.processors.get(pid)
125
- processor.frame_drawer.location_name = db_pipeline.location_name
133
+ if not processor:
134
+ return
126
135
 
127
- # Case 1: Pipeline should be running but isn't
128
- if db_pipeline.pipeline_status_code == "run" and not self.pipeline_manager.is_running(pid):
129
- logging.info(f"🟢 Starting pipeline {pid}: {db_pipeline.name} (status: RUNNING)")
130
- self.pipeline_manager.start_pipeline(db_pipeline, db_model)
136
+ # At this point, we know db_pipeline.pipeline_status_code == 'run' (checked in caller)
137
+ # Check for significant changes that require a restart
138
+ db_detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
131
139
 
132
- # Case 2: Pipeline should be stopped but is running
133
- elif db_pipeline.pipeline_status_code == "stop" and self.pipeline_manager.is_running(pid):
134
- logging.info(f"🔴 Stopping pipeline {pid}: {db_pipeline.name} (status: STOPPED)")
140
+ requires_restart = any([
141
+ db_pipeline.ai_model_id != processor._pipeline.ai_model_id,
142
+ db_pipeline.worker_source_id != processor._pipeline.worker_source_id,
143
+ local_detector != db_detector
144
+ ])
145
+
146
+ if requires_restart:
147
+ logging.info(f"🔄 Restarting pipeline due to significant changes: {pid}")
135
148
  self.pipeline_manager.stop_pipeline(pid)
149
+ self.pipeline_manager.start_pipeline(db_pipeline, db_detector)
150
+ else:
151
+ # Update config for minor changes that don't require restart
152
+ processor.update_config(db_pipeline)
136
153
 
137
- # Case 3: Pipeline configuration has changed, needs restart
138
- elif self._has_pipeline_changed(local_pipeline, db_pipeline):
139
- logging.info(f"🟡 Updating pipeline {pid}: {db_pipeline.name} (status: RESTARTING)")
140
- if self.pipeline_manager.is_running(pid):
141
- self.pipeline_manager.stop_pipeline(pid)
142
- self.pipeline_manager.start_pipeline(db_pipeline, db_model)
143
-
144
- # Case 4: AI Model has changed
145
- elif local_model and db_model and local_model.id != db_model.id:
146
- if db_model:
147
- readiness = ModelReadinessChecker.check_model_readiness(db_model)
148
- if readiness["ready"]:
149
- local_proc.load_model(db_model)
150
- logging.info(f"🔄 Model updated for pipeline {pid}: {db_pipeline.name} "
151
- f"(version: {db_model.version if db_model else 'removed'})")
152
- else:
153
- logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
154
-
155
- # Case 5: Local model exists but doesn't match DB model
156
- elif local_model and (not db_model or local_model.version != db_model.version):
157
- if not db_model or ModelReadinessChecker.check_model_readiness(db_model)["ready"]:
158
- local_proc.load_model(db_model)
159
- logging.info(f"🔄 Model updated for pipeline {pid}: {db_pipeline.name} "
160
- f"(version: {db_model.version if db_model else 'removed'})")
161
- else:
162
- readiness = ModelReadinessChecker.check_model_readiness(db_model)
163
- logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
164
-
165
- # Case 6: DB model exists but local model doesn't
166
- elif db_model and not local_model:
167
- readiness = ModelReadinessChecker.check_model_readiness(db_model)
168
- if readiness["ready"]:
169
- logging.info(f"🔄 Added model for pipeline {pid}: {db_pipeline.name} (version: {db_model.version})")
170
- local_proc.load_model(db_model)
171
- else:
172
- logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
173
-
174
- # Case 7: Model metadata has changed (same ID and version, but different properties)
175
- elif local_model and db_model and local_model.id == db_model.id and local_model.version == db_model.version:
176
- # Check if model metadata (classes, PPE groups, main_class) has changed
177
- if self._has_model_metadata_changed(local_model, db_model):
178
- readiness = ModelReadinessChecker.check_model_readiness(db_model)
179
- if readiness["ready"]:
180
- local_proc.load_model(db_model)
181
- logging.info(f"🔄 Model metadata updated for pipeline {pid}: {db_pipeline.name} "
182
- f"(same version {db_model.version}, updated properties)")
183
- else:
184
- logging.warning(f"⚠️ Pipeline {pid}: {readiness['reason']}")
185
-
186
- def _has_model_metadata_changed(self, local_model, db_model):
187
- """Check if model metadata has changed without version change."""
188
- # Compare classes
189
- local_classes = set(local_model.get_classes() or [])
190
- db_classes = set(db_model.get_classes() or [])
191
- if local_classes != db_classes:
192
- return True
193
-
194
- # Compare PPE class groups
195
- local_ppe_groups = local_model.get_ppe_class_groups() or {}
196
- db_ppe_groups = db_model.get_ppe_class_groups() or {}
197
- if local_ppe_groups != db_ppe_groups:
198
- return True
199
-
200
- # Compare main class
201
- if local_model.get_main_class() != db_model.get_main_class():
202
- return True
203
-
204
- return False
205
154
 
206
155
  def _has_pipeline_changed(self, local_pipeline, db_pipeline):
207
156
  """Checks if the pipeline configuration has changed."""
208
- if db_pipeline.pipeline_status_code == "restart":
157
+ if not local_pipeline or db_pipeline.pipeline_status_code == "restart":
209
158
  return True
210
159
 
211
160
  local_configs = local_pipeline.worker_source_pipeline_configs
@@ -28,4 +28,24 @@ class AIModelRepository:
28
28
  return models
29
29
  except SQLAlchemyError as e:
30
30
  logging.error(f"Error retrieving models: {e}")
31
- return []
31
+ return []
32
+
33
+ def get_model(self, model_id: str) -> AIModelEntity | None:
34
+ """
35
+ Retrieves a single AI model by its ID.
36
+
37
+ Args:
38
+ model_id: The ID of the model to retrieve.
39
+
40
+ Returns:
41
+ An AIModelEntity object or None if not found.
42
+ """
43
+ try:
44
+ self.session.expire_all()
45
+ model = self.session.query(AIModelEntity).filter_by(id=model_id).first()
46
+ if model:
47
+ self.session.expunge(model)
48
+ return model
49
+ except SQLAlchemyError as e:
50
+ logging.error(f"Error retrieving model {model_id}: {e}")
51
+ return None