nedo-vision-worker-core 0.3.2__tar.gz → 0.3.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/PKG-INFO +1 -1
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/__init__.py +1 -1
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/RFDETRDetector.py +3 -0
- nedo_vision_worker_core-0.3.4/nedo_vision_worker_core/pipeline/ModelManager.py +139 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/pipeline/PipelineManager.py +3 -3
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/pipeline/PipelineProcessor.py +36 -29
- nedo_vision_worker_core-0.3.4/nedo_vision_worker_core/pipeline/PipelineSyncThread.py +186 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/AIModelRepository.py +21 -1
- nedo_vision_worker_core-0.3.4/nedo_vision_worker_core/streams/RTMPStreamer.py +229 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/streams/SharedVideoDeviceManager.py +5 -1
- nedo_vision_worker_core-0.3.4/nedo_vision_worker_core/streams/VideoStream.py +361 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/streams/VideoStreamManager.py +14 -18
- nedo_vision_worker_core-0.3.4/nedo_vision_worker_core/util/PlatformDetector.py +100 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core.egg-info/PKG-INFO +1 -1
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core.egg-info/SOURCES.txt +2 -1
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/requirements.txt +1 -1
- nedo_vision_worker_core-0.3.2/nedo_vision_worker_core/detection/DetectionManager.py +0 -83
- nedo_vision_worker_core-0.3.2/nedo_vision_worker_core/pipeline/PipelineSyncThread.py +0 -234
- nedo_vision_worker_core-0.3.2/nedo_vision_worker_core/streams/RTMPStreamer.py +0 -284
- nedo_vision_worker_core-0.3.2/nedo_vision_worker_core/streams/VideoStream.py +0 -422
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/MANIFEST.in +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/README.md +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/ai/FrameDrawer.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/ai/ImageDebugger.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/ai/VideoDebugger.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/ai/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/callbacks/DetectionCallbackManager.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/callbacks/DetectionCallbackTypes.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/callbacks/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/cli.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/config/ConfigurationManager.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/config/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/core_service.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/database/DatabaseManager.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/database/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/BaseDetector.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/YOLODetector.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/detection/detection_processing/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/doctor.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/boots-green.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/boots-red.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/gloves-green.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/gloves-red.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/goggles-green.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/goggles-red.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/helmet-green.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/helmet-red.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/mask-red.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/vest-green.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/icons/vest-red.png +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/ai_model.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/auth.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/config.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/dataset_source.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/logs.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/ppe_detection.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/ppe_detection_label.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/restricted_area_violation.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/user.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/worker_source.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/worker_source_pipeline.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/worker_source_pipeline_config.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/worker_source_pipeline_debug.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/models/worker_source_pipeline_detection.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/pipeline/PipelineConfigManager.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/pipeline/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/preprocessing/ImageResizer.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/preprocessing/ImageRoi.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/preprocessing/Preprocessor.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/preprocessing/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/PPEDetectionRepository.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/WorkerSourceRepository.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/repositories/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/services/SharedVideoStreamServer.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/services/VideoSharingDaemon.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/services/VideoSharingDaemonManager.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/streams/StreamSyncThread.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/streams/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/tracker/SFSORT.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/tracker/TrackerManager.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/tracker/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/BoundingBoxMetrics.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/DrawingUtils.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/ModelReadinessChecker.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/PersonAttributeMatcher.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/TablePrinter.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/util/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core.egg-info/dependency_links.txt +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core.egg-info/entry_points.txt +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core.egg-info/requires.txt +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core.egg-info/top_level.txt +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/pyproject.toml +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/setup.cfg +0 -0
- {nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nedo-vision-worker-core
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.4
|
|
4
4
|
Summary: Nedo Vision Worker Core Library for AI Vision Processing
|
|
5
5
|
Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
6
6
|
Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
{nedo_vision_worker_core-0.3.2 → nedo_vision_worker_core-0.3.4}/nedo_vision_worker_core/__init__.py
RENAMED
|
@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
|
|
|
7
7
|
from .core_service import CoreService
|
|
8
8
|
from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
|
|
9
9
|
|
|
10
|
-
__version__ = "0.3.
|
|
10
|
+
__version__ = "0.3.4"
|
|
11
11
|
__all__ = [
|
|
12
12
|
"CoreService",
|
|
13
13
|
"DetectionType",
|
|
@@ -9,6 +9,7 @@ except ImportError:
|
|
|
9
9
|
|
|
10
10
|
from ..database.DatabaseManager import DatabaseManager
|
|
11
11
|
from ..models.ai_model import AIModelEntity
|
|
12
|
+
from ..util.PlatformDetector import PlatformDetector
|
|
12
13
|
from .BaseDetector import BaseDetector
|
|
13
14
|
|
|
14
15
|
logging.getLogger("ultralytics").setLevel(logging.WARNING)
|
|
@@ -25,6 +26,8 @@ class RFDETRDetector(BaseDetector):
|
|
|
25
26
|
raise TypeError("model must be an instance of AIModelEntity")
|
|
26
27
|
self.model = None
|
|
27
28
|
self.metadata = None
|
|
29
|
+
self.device = PlatformDetector.get_device()
|
|
30
|
+
logging.info(f"ℹ️ RFDETRDetector will use '{self.device}' device.")
|
|
28
31
|
|
|
29
32
|
if model:
|
|
30
33
|
self.load_model(model)
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Dict, Optional, Set
|
|
3
|
+
|
|
4
|
+
from ..repositories.AIModelRepository import AIModelRepository
|
|
5
|
+
from ..detection.BaseDetector import BaseDetector
|
|
6
|
+
from ..detection.YOLODetector import YOLODetector
|
|
7
|
+
from ..detection.RFDETRDetector import RFDETRDetector
|
|
8
|
+
from ..models.ai_model import AIModelEntity
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ModelManager:
|
|
12
|
+
"""Manages loading and caching of AI models to avoid redundant loads."""
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
self._detector_cache: Dict[str, BaseDetector] = {}
|
|
16
|
+
self._model_repo = AIModelRepository()
|
|
17
|
+
logging.info("🤖 ModelManager initialized.")
|
|
18
|
+
|
|
19
|
+
def get_detector(self, model_id: str) -> Optional[BaseDetector]:
|
|
20
|
+
"""
|
|
21
|
+
Retrieves a detector by its model ID.
|
|
22
|
+
|
|
23
|
+
This method implements a lazy-loading and cache-validation strategy:
|
|
24
|
+
1. It fetches the latest model metadata from the database.
|
|
25
|
+
2. If a detector is already cached, it validates its metadata against the DB version.
|
|
26
|
+
3. If the cached version is stale (e.g., version or classes changed), it's evicted.
|
|
27
|
+
4. If no detector is cached or the cache was stale, it loads the detector on-demand.
|
|
28
|
+
"""
|
|
29
|
+
if not model_id:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
# 1. Fetch the current model state from the database
|
|
33
|
+
db_model: AIModelEntity = self._model_repo.get_model(model_id)
|
|
34
|
+
if not db_model:
|
|
35
|
+
# If the model doesn't exist in DB, ensure it's not in cache either
|
|
36
|
+
if model_id in self._detector_cache:
|
|
37
|
+
logging.info(f"🧹 Removing detector for deleted model {model_id} from cache.")
|
|
38
|
+
del self._detector_cache[model_id]
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
# 2. Check if a detector is cached
|
|
42
|
+
if model_id in self._detector_cache:
|
|
43
|
+
cached_detector = self._detector_cache[model_id]
|
|
44
|
+
|
|
45
|
+
# 3. Check if the cached version is stale
|
|
46
|
+
if self._has_metadata_changed(cached_detector.metadata, db_model):
|
|
47
|
+
logging.info(f"Reloading detector for model {model_id} due to metadata changes.")
|
|
48
|
+
del self._detector_cache[model_id]
|
|
49
|
+
# Fall through to load the new version
|
|
50
|
+
else:
|
|
51
|
+
# Cache is fresh, return it
|
|
52
|
+
logging.debug(f"🧠 Detector for model {model_id} found in cache and is fresh.")
|
|
53
|
+
return cached_detector
|
|
54
|
+
|
|
55
|
+
# 4. If not cached or was stale, load it now
|
|
56
|
+
return self._load_and_cache_detector(model_id, db_model)
|
|
57
|
+
|
|
58
|
+
def _load_and_cache_detector(self, model_id: str, db_model: AIModelEntity) -> Optional[BaseDetector]:
|
|
59
|
+
"""Creates a detector from a DB model entity and caches it."""
|
|
60
|
+
logging.info(f"🔄 Loading model {model_id} (version: {db_model.version}) from database to create detector...")
|
|
61
|
+
|
|
62
|
+
# Check model readiness before attempting to load
|
|
63
|
+
if not db_model.is_ready_for_use():
|
|
64
|
+
if db_model.is_downloading():
|
|
65
|
+
logging.warning(f"⏳ Model {model_id} is still downloading. Skipping detector load.")
|
|
66
|
+
elif db_model.has_download_failed():
|
|
67
|
+
logging.error(f"❌ Model {model_id} download failed: {db_model.download_error}")
|
|
68
|
+
else:
|
|
69
|
+
logging.warning(f"⚠️ Model {model_id} is not ready for use (status: {db_model.download_status})")
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
detector_type = db_model.type.lower()
|
|
73
|
+
detector: Optional[BaseDetector] = None
|
|
74
|
+
try:
|
|
75
|
+
if detector_type == "yolo":
|
|
76
|
+
detector = YOLODetector(db_model)
|
|
77
|
+
elif detector_type == "rf_detr":
|
|
78
|
+
detector = RFDETRDetector(db_model)
|
|
79
|
+
else:
|
|
80
|
+
raise ValueError(f"Unsupported model type: {detector_type}")
|
|
81
|
+
|
|
82
|
+
if detector and detector.model is not None:
|
|
83
|
+
self._detector_cache[model_id] = detector
|
|
84
|
+
logging.info(f"✅ Detector for model {model_id} loaded and cached successfully.")
|
|
85
|
+
return detector
|
|
86
|
+
else:
|
|
87
|
+
logging.error(f"❌ Failed to load detector for model: {db_model.name}")
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logging.error(f"❌ Error creating detector for model {db_model.name}: {e}")
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
def _has_metadata_changed(self, cached_model: AIModelEntity, db_model: AIModelEntity) -> bool:
|
|
95
|
+
"""Check if critical model metadata has changed."""
|
|
96
|
+
if cached_model.version != db_model.version:
|
|
97
|
+
logging.info(
|
|
98
|
+
f"🔄 Model {db_model.id} version changed "
|
|
99
|
+
f"({cached_model.version} -> {db_model.version})."
|
|
100
|
+
)
|
|
101
|
+
return True
|
|
102
|
+
|
|
103
|
+
# Compare classes
|
|
104
|
+
cached_classes = set(cached_model.get_classes() or [])
|
|
105
|
+
db_classes = set(db_model.get_classes() or [])
|
|
106
|
+
if cached_classes != db_classes:
|
|
107
|
+
logging.info(f"🔄 Model {db_model.id} classes changed.")
|
|
108
|
+
return True
|
|
109
|
+
|
|
110
|
+
# Compare PPE class groups
|
|
111
|
+
cached_ppe_groups = cached_model.get_ppe_class_groups() or {}
|
|
112
|
+
db_ppe_groups = db_model.get_ppe_class_groups() or {}
|
|
113
|
+
if cached_ppe_groups != db_ppe_groups:
|
|
114
|
+
logging.info(f"🔄 Model {db_model.id} PPE groups changed.")
|
|
115
|
+
return True
|
|
116
|
+
|
|
117
|
+
# Compare main class
|
|
118
|
+
if cached_model.get_main_class() != db_model.get_main_class():
|
|
119
|
+
logging.info(f"🔄 Model {db_model.id} main class changed.")
|
|
120
|
+
return True
|
|
121
|
+
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
def sync_cache(self, active_model_ids: Set[str]):
|
|
125
|
+
"""
|
|
126
|
+
Removes detectors from the cache if their corresponding models are no longer in the database
|
|
127
|
+
or are not being used by any active pipeline.
|
|
128
|
+
"""
|
|
129
|
+
cached_ids = set(self._detector_cache.keys())
|
|
130
|
+
stale_ids = cached_ids - active_model_ids
|
|
131
|
+
|
|
132
|
+
for model_id in stale_ids:
|
|
133
|
+
del self._detector_cache[model_id]
|
|
134
|
+
logging.info(f"🧹 Removed unused detector for model {model_id} from cache.")
|
|
135
|
+
|
|
136
|
+
def clear_cache(self):
|
|
137
|
+
"""Clears the detector cache."""
|
|
138
|
+
logging.info("🧹 Clearing all detectors from cache.")
|
|
139
|
+
self._detector_cache.clear()
|
|
@@ -19,12 +19,12 @@ class PipelineManager:
|
|
|
19
19
|
self._stop_lock = threading.Lock() # Lock for thread-safe pipeline stopping
|
|
20
20
|
self.on_pipeline_stopped = on_pipeline_stopped
|
|
21
21
|
|
|
22
|
-
def start_pipeline(self, pipeline,
|
|
22
|
+
def start_pipeline(self, pipeline, detector):
|
|
23
23
|
"""
|
|
24
24
|
Start a pipeline processing.
|
|
25
25
|
Args:
|
|
26
26
|
pipeline: The pipeline object (contains id, worker_source_id, name, etc.)
|
|
27
|
-
|
|
27
|
+
detector: The detector instance to use for processing.
|
|
28
28
|
"""
|
|
29
29
|
pipeline_id = pipeline.id
|
|
30
30
|
worker_source_id = pipeline.worker_source_id
|
|
@@ -39,7 +39,7 @@ class PipelineManager:
|
|
|
39
39
|
|
|
40
40
|
logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
|
|
41
41
|
|
|
42
|
-
processor = PipelineProcessor(
|
|
42
|
+
processor = PipelineProcessor(pipeline, detector, False)
|
|
43
43
|
processor.frame_drawer.location_name = pipeline.location_name
|
|
44
44
|
self.processors[pipeline_id] = processor # Store processor instance
|
|
45
45
|
|
|
@@ -12,18 +12,19 @@ from ..streams.VideoStreamManager import VideoStreamManager
|
|
|
12
12
|
from ..ai.VideoDebugger import VideoDebugger
|
|
13
13
|
from ..ai.FrameDrawer import FrameDrawer
|
|
14
14
|
from ..tracker.TrackerManager import TrackerManager
|
|
15
|
-
from ..detection.
|
|
15
|
+
from ..detection.BaseDetector import BaseDetector
|
|
16
16
|
from ..streams.RTMPStreamer import RTMPStreamer
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class PipelineProcessor:
|
|
20
20
|
"""Handles pipeline processing including preprocessing, AI model inference, tracking, and video stream processing."""
|
|
21
21
|
|
|
22
|
-
def __init__(self,
|
|
22
|
+
def __init__(self, pipeline, detector: BaseDetector, enable_visualization=True):
|
|
23
|
+
self._pipeline = pipeline
|
|
23
24
|
self.running = True
|
|
24
25
|
self.video_debugger = VideoDebugger(enable_visualization)
|
|
25
26
|
self.tracker_manager = TrackerManager()
|
|
26
|
-
self.
|
|
27
|
+
self.detector = detector
|
|
27
28
|
self.config_manager = PipelineConfigManager()
|
|
28
29
|
self.preprocessor = PipelinePrepocessor()
|
|
29
30
|
self.detection_processor = None
|
|
@@ -36,8 +37,8 @@ class PipelineProcessor:
|
|
|
36
37
|
self.detection_thread = None
|
|
37
38
|
self.frame_counter = 0
|
|
38
39
|
self.frame_drawer = FrameDrawer()
|
|
39
|
-
self.pipeline_id =
|
|
40
|
-
self.worker_source_id = worker_source_id
|
|
40
|
+
self.pipeline_id = pipeline.id
|
|
41
|
+
self.worker_source_id = pipeline.worker_source_id
|
|
41
42
|
|
|
42
43
|
self.rtmp_streamer = None
|
|
43
44
|
|
|
@@ -61,11 +62,16 @@ class PipelineProcessor:
|
|
|
61
62
|
self.last_hevc_recovery = 0
|
|
62
63
|
self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
|
|
63
64
|
|
|
64
|
-
def
|
|
65
|
-
|
|
66
|
-
self.
|
|
65
|
+
def update_config(self, pipeline):
|
|
66
|
+
"""Updates the pipeline configuration."""
|
|
67
|
+
self._pipeline = pipeline
|
|
68
|
+
self._update_config_internal()
|
|
69
|
+
|
|
70
|
+
def load_detector(self, detector: BaseDetector):
|
|
71
|
+
logging.info(f"🔄 Loading new detector for pipeline {self.pipeline_id}")
|
|
72
|
+
self.detector = detector
|
|
67
73
|
self._update_detection_processor()
|
|
68
|
-
logging.info(f"✅
|
|
74
|
+
logging.info(f"✅ Detector updated for pipeline {self.pipeline_id}")
|
|
69
75
|
|
|
70
76
|
def _get_detection_processor_code(self):
|
|
71
77
|
for code in self.detection_processor_codes:
|
|
@@ -107,7 +113,7 @@ class PipelineProcessor:
|
|
|
107
113
|
self.frame_drawer.update_config()
|
|
108
114
|
self.tracker_manager.update_config([], [], [])
|
|
109
115
|
|
|
110
|
-
def
|
|
116
|
+
def _update_config_internal(self):
|
|
111
117
|
self.config_manager.update(self.pipeline_id)
|
|
112
118
|
self.preprocessor.update(self.config_manager)
|
|
113
119
|
self.detection_interval = self._get_detection_interval()
|
|
@@ -117,7 +123,7 @@ class PipelineProcessor:
|
|
|
117
123
|
self.consecutive_frame_failures = 0
|
|
118
124
|
self.last_successful_frame_time = time.time()
|
|
119
125
|
|
|
120
|
-
ai_model = self.
|
|
126
|
+
ai_model = self.detector.metadata if self.detector else None
|
|
121
127
|
if self.detection_processor:
|
|
122
128
|
config = self.config_manager.get_feature_config(self.detection_processor.code)
|
|
123
129
|
self.detection_processor.update(self.config_manager, ai_model)
|
|
@@ -133,7 +139,7 @@ class PipelineProcessor:
|
|
|
133
139
|
worker_source_id = self.worker_source_id
|
|
134
140
|
logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
|
|
135
141
|
|
|
136
|
-
self.
|
|
142
|
+
self._update_config_internal()
|
|
137
143
|
self.consecutive_frame_failures = 0
|
|
138
144
|
self.last_successful_frame_time = time.time()
|
|
139
145
|
|
|
@@ -243,7 +249,7 @@ class PipelineProcessor:
|
|
|
243
249
|
processed_frame = self.preprocessor.apply(frame)
|
|
244
250
|
|
|
245
251
|
class_thresholds = {}
|
|
246
|
-
ai_model = self.
|
|
252
|
+
ai_model = self.detector.metadata if self.detector else None
|
|
247
253
|
|
|
248
254
|
if self.detection_processor:
|
|
249
255
|
if self.detection_processor.code == PPEDetectionProcessor.code:
|
|
@@ -253,7 +259,10 @@ class PipelineProcessor:
|
|
|
253
259
|
if main_threshold and ai_model and ai_model.get_main_class():
|
|
254
260
|
class_thresholds[ai_model.get_main_class()] = main_threshold
|
|
255
261
|
|
|
256
|
-
detections =
|
|
262
|
+
detections = []
|
|
263
|
+
if self.detector:
|
|
264
|
+
detections = self.detector.detect_objects(processed_frame, self.threshold, class_thresholds)
|
|
265
|
+
|
|
257
266
|
detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
|
|
258
267
|
|
|
259
268
|
if self.detection_processor:
|
|
@@ -277,9 +286,8 @@ class PipelineProcessor:
|
|
|
277
286
|
|
|
278
287
|
# Update config periodically
|
|
279
288
|
if (current_time - last_config_update_time) >= config_update_interval:
|
|
280
|
-
self.
|
|
289
|
+
self._update_config_internal()
|
|
281
290
|
last_config_update_time = current_time
|
|
282
|
-
logging.info(f"🔄 Updated pipeline config for {pipeline_id}")
|
|
283
291
|
|
|
284
292
|
# Keep only the latest frame if we fell behind
|
|
285
293
|
try:
|
|
@@ -324,25 +332,24 @@ class PipelineProcessor:
|
|
|
324
332
|
except Exception as e:
|
|
325
333
|
logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
|
|
326
334
|
|
|
327
|
-
def _wait_for_frame(self, video_manager,
|
|
335
|
+
def _wait_for_frame(self, video_manager, max_wait_time=30.0):
|
|
328
336
|
logging.info(f"⏳ Waiting for initial frame from {self.worker_source_id}...")
|
|
329
|
-
|
|
337
|
+
|
|
338
|
+
is_ready = video_manager.wait_for_stream_ready(self.worker_source_id, timeout=max_wait_time)
|
|
339
|
+
|
|
340
|
+
if is_ready:
|
|
330
341
|
frame = video_manager.get_frame(self.worker_source_id)
|
|
331
342
|
if frame is not None:
|
|
332
343
|
logging.info(f"✅ Initial frame received from {self.worker_source_id}")
|
|
333
344
|
return frame
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
logging.error(f"❌ Stream {self.worker_source_id} not found in video manager")
|
|
337
|
-
return None
|
|
338
|
-
|
|
339
|
-
logging.warning(f"⚠️ Waiting for video stream {self.worker_source_id} (Attempt {retry_count + 1}/{max_retries})...")
|
|
340
|
-
if retry_count >= 3:
|
|
345
|
+
else:
|
|
346
|
+
logging.error(f"❌ Stream {self.worker_source_id} reported ready, but the first frame could not be retrieved.")
|
|
341
347
|
self._log_stream_diagnostics(video_manager, self.worker_source_id)
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
348
|
+
return None
|
|
349
|
+
else:
|
|
350
|
+
logging.error(f"❌ Timed out after {max_wait_time}s waiting for first frame from {self.worker_source_id}.")
|
|
351
|
+
self._log_stream_diagnostics(video_manager, self.worker_source_id)
|
|
352
|
+
return None
|
|
346
353
|
|
|
347
354
|
def _handle_frame_failure(self, video_manager, worker_source_id):
|
|
348
355
|
"""Handle frame retrieval failures with progressive backoff and recovery attempts."""
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
import threading
|
|
5
|
+
from typing import Dict, Set, Optional
|
|
6
|
+
from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
|
|
7
|
+
from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
|
|
8
|
+
from .PipelineManager import PipelineManager
|
|
9
|
+
from .ModelManager import ModelManager
|
|
10
|
+
from ..streams.VideoStreamManager import VideoStreamManager
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PipelineSyncThread(threading.Thread):
|
|
14
|
+
"""Thread responsible for synchronizing worker source pipelines from the database in real-time."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=4):
|
|
17
|
+
super().__init__(daemon=True) # Runs as a daemon
|
|
18
|
+
self.video_manager = video_manager
|
|
19
|
+
self.polling_interval = polling_interval
|
|
20
|
+
self.pipeline_repo = WorkerSourcePipelineRepository()
|
|
21
|
+
self.debug_repo = WorkerSourcePipelineDebugRepository()
|
|
22
|
+
self.model_manager = ModelManager()
|
|
23
|
+
self.running = True
|
|
24
|
+
self.pipeline_manager = PipelineManager(video_manager, self.on_pipeline_stopped, max_workers)
|
|
25
|
+
|
|
26
|
+
def _parse_json(self, value: str) -> Optional[dict]:
|
|
27
|
+
"""Attempts to parse the value as JSON if applicable."""
|
|
28
|
+
if not value:
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
value = value.strip() # Remove leading/trailing spaces
|
|
32
|
+
if (value.startswith("{") and value.endswith("}")) or (value.startswith("[") and value.endswith("]")):
|
|
33
|
+
try:
|
|
34
|
+
return json.loads(value) # Parse JSON object or list
|
|
35
|
+
except json.JSONDecodeError:
|
|
36
|
+
pass # Keep as string if parsing fails
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
def on_pipeline_stopped(self, pipeline_id: str) -> None:
|
|
40
|
+
"""Set the pipeline as stopped in the database."""
|
|
41
|
+
pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
|
|
42
|
+
pipeline.pipeline_status_code = "run" if pipeline.pipeline_status_code == "restart" else "stop"
|
|
43
|
+
self.pipeline_repo.session.commit()
|
|
44
|
+
|
|
45
|
+
def run(self) -> None:
|
|
46
|
+
"""Continuously updates pipelines based on database changes."""
|
|
47
|
+
while self.running:
|
|
48
|
+
try:
|
|
49
|
+
db_pipelines = {p.id: p for p in self.pipeline_repo.get_all_pipelines()}
|
|
50
|
+
|
|
51
|
+
# Get pipeline IDs for comparison
|
|
52
|
+
local_pipeline_ids = set(self.pipeline_manager.get_active_pipelines())
|
|
53
|
+
db_pipeline_ids = set(db_pipelines.keys())
|
|
54
|
+
|
|
55
|
+
restarted_pipeline = False
|
|
56
|
+
|
|
57
|
+
# Process pipeline changes
|
|
58
|
+
self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines, restarted_pipeline)
|
|
59
|
+
self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
|
|
60
|
+
self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines)
|
|
61
|
+
|
|
62
|
+
if restarted_pipeline:
|
|
63
|
+
self.pipeline_repo.session.commit()
|
|
64
|
+
|
|
65
|
+
# Sync the cache to remove unused detectors
|
|
66
|
+
active_model_ids = {p.ai_model_id for p in db_pipelines.values() if p.pipeline_status_code == 'run'}
|
|
67
|
+
self.model_manager.sync_cache(active_model_ids)
|
|
68
|
+
|
|
69
|
+
except Exception as e:
|
|
70
|
+
logging.error(f"⚠️ Error syncing pipelines from database: {e}", exc_info=True)
|
|
71
|
+
|
|
72
|
+
time.sleep(self.polling_interval)
|
|
73
|
+
|
|
74
|
+
def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
|
|
75
|
+
restarted_pipeline: bool) -> None:
|
|
76
|
+
"""Add new pipelines that exist in DB but not locally."""
|
|
77
|
+
for pid in pipeline_ids:
|
|
78
|
+
pipeline = db_pipelines[pid]
|
|
79
|
+
|
|
80
|
+
if pipeline.pipeline_status_code == 'restart':
|
|
81
|
+
pipeline.pipeline_status_code = 'run'
|
|
82
|
+
restarted_pipeline = True
|
|
83
|
+
|
|
84
|
+
if pipeline.pipeline_status_code == 'run':
|
|
85
|
+
detector = self.model_manager.get_detector(pipeline.ai_model_id)
|
|
86
|
+
|
|
87
|
+
if not detector and pipeline.ai_model_id:
|
|
88
|
+
logging.warning(f"⚠️ Could not load detector for pipeline {pid} ({pipeline.name}). Skipping.")
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
logging.info(f"🟢 Adding new pipeline: {pid} ({pipeline.name})")
|
|
92
|
+
self.pipeline_manager.start_pipeline(pipeline, detector)
|
|
93
|
+
|
|
94
|
+
def _remove_deleted_pipelines(self, pipeline_ids: Set[str]) -> None:
|
|
95
|
+
"""Remove pipelines that exist locally but not in DB."""
|
|
96
|
+
for pid in pipeline_ids:
|
|
97
|
+
logging.info(f"🔴 Removing deleted pipeline: {pid}")
|
|
98
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
99
|
+
|
|
100
|
+
def _update_existing_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]) -> None:
|
|
101
|
+
"""Update existing pipelines that need changes."""
|
|
102
|
+
debug_pipeline_ids = self.debug_repo.get_pipeline_ids_to_debug()
|
|
103
|
+
|
|
104
|
+
for pid in pipeline_ids:
|
|
105
|
+
db_pipeline = db_pipelines[pid]
|
|
106
|
+
processor = self.pipeline_manager.processors.get(pid)
|
|
107
|
+
if not processor:
|
|
108
|
+
continue
|
|
109
|
+
|
|
110
|
+
local_detector = processor.detector
|
|
111
|
+
|
|
112
|
+
self.update_pipeline(pid, db_pipeline, local_detector)
|
|
113
|
+
if pid in debug_pipeline_ids:
|
|
114
|
+
processor.enable_debug()
|
|
115
|
+
|
|
116
|
+
def update_pipeline(self, pid: str, db_pipeline: object, local_detector: object) -> None:
|
|
117
|
+
"""Updates a single pipeline if necessary."""
|
|
118
|
+
processor = self.pipeline_manager.processors.get(pid)
|
|
119
|
+
if not processor:
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
# Stop/start based on status change
|
|
123
|
+
if db_pipeline.pipeline_status_code != processor._pipeline.pipeline_status_code:
|
|
124
|
+
if db_pipeline.pipeline_status_code == 'run':
|
|
125
|
+
logging.info(f"▶️ Resuming pipeline: {pid}")
|
|
126
|
+
self.pipeline_manager.start_pipeline(db_pipeline, self.model_manager.get_detector(db_pipeline.ai_model_id))
|
|
127
|
+
elif db_pipeline.pipeline_status_code in ['stop', 'restart']:
|
|
128
|
+
logging.info(f"⏹️ Stopping pipeline: {pid}")
|
|
129
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
130
|
+
if db_pipeline.pipeline_status_code == 'restart':
|
|
131
|
+
# This will be picked up by the 'add_new_pipelines' logic in the next cycle
|
|
132
|
+
return
|
|
133
|
+
else:
|
|
134
|
+
processor.update_config(db_pipeline) # Update config for non-running pipelines
|
|
135
|
+
return
|
|
136
|
+
elif db_pipeline.pipeline_status_code != 'run':
|
|
137
|
+
processor.update_config(db_pipeline)
|
|
138
|
+
return
|
|
139
|
+
|
|
140
|
+
# Check for significant changes that require a restart
|
|
141
|
+
db_detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
|
|
142
|
+
|
|
143
|
+
requires_restart = any([
|
|
144
|
+
db_pipeline.ai_model_id != processor._pipeline.ai_model_id,
|
|
145
|
+
db_pipeline.worker_source_id != processor._pipeline.worker_source_id,
|
|
146
|
+
local_detector != db_detector
|
|
147
|
+
])
|
|
148
|
+
|
|
149
|
+
if requires_restart and db_pipeline.pipeline_status_code == 'run':
|
|
150
|
+
logging.info(f"🔄 Restarting pipeline due to significant changes: {pid}")
|
|
151
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
152
|
+
self.pipeline_manager.start_pipeline(db_pipeline, db_detector)
|
|
153
|
+
else:
|
|
154
|
+
# Update config for minor changes that don't require restart
|
|
155
|
+
processor.update_config(db_pipeline)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _has_pipeline_changed(self, local_pipeline, db_pipeline):
|
|
159
|
+
"""Checks if the pipeline configuration has changed."""
|
|
160
|
+
if not local_pipeline or db_pipeline.pipeline_status_code == "restart":
|
|
161
|
+
return True
|
|
162
|
+
|
|
163
|
+
local_configs = local_pipeline.worker_source_pipeline_configs
|
|
164
|
+
db_configs = db_pipeline.worker_source_pipeline_configs
|
|
165
|
+
|
|
166
|
+
# Convert config objects to comparable structures
|
|
167
|
+
local_config_values = [
|
|
168
|
+
(config.pipeline_config_id, config.is_enabled, config.value,
|
|
169
|
+
config.pipeline_config_name, config.pipeline_config_code)
|
|
170
|
+
for config in local_configs
|
|
171
|
+
]
|
|
172
|
+
|
|
173
|
+
db_config_values = [
|
|
174
|
+
(config.pipeline_config_id, config.is_enabled, config.value,
|
|
175
|
+
config.pipeline_config_name, config.pipeline_config_code)
|
|
176
|
+
for config in db_configs
|
|
177
|
+
]
|
|
178
|
+
|
|
179
|
+
return sorted(local_config_values) != sorted(db_config_values)
|
|
180
|
+
|
|
181
|
+
def stop(self):
|
|
182
|
+
"""Stops the synchronization thread and shuts down pipelines properly."""
|
|
183
|
+
logging.info("🛑 Stopping PipelineSyncThread...")
|
|
184
|
+
self.running = False
|
|
185
|
+
self.video_manager.stop_all()
|
|
186
|
+
self.pipeline_manager.shutdown()
|
|
@@ -28,4 +28,24 @@ class AIModelRepository:
|
|
|
28
28
|
return models
|
|
29
29
|
except SQLAlchemyError as e:
|
|
30
30
|
logging.error(f"Error retrieving models: {e}")
|
|
31
|
-
return []
|
|
31
|
+
return []
|
|
32
|
+
|
|
33
|
+
def get_model(self, model_id: str) -> AIModelEntity | None:
|
|
34
|
+
"""
|
|
35
|
+
Retrieves a single AI model by its ID.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
model_id: The ID of the model to retrieve.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
An AIModelEntity object or None if not found.
|
|
42
|
+
"""
|
|
43
|
+
try:
|
|
44
|
+
self.session.expire_all()
|
|
45
|
+
model = self.session.query(AIModelEntity).filter_by(id=model_id).first()
|
|
46
|
+
if model:
|
|
47
|
+
self.session.expunge(model)
|
|
48
|
+
return model
|
|
49
|
+
except SQLAlchemyError as e:
|
|
50
|
+
logging.error(f"Error retrieving model {model_id}: {e}")
|
|
51
|
+
return None
|