nedo-vision-worker-core 0.3.3__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

Files changed (116) hide show
  1. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/PKG-INFO +1 -1
  2. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/__init__.py +1 -1
  3. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/RFDETRDetector.py +3 -0
  4. nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/pipeline/ModelManager.py +139 -0
  5. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/pipeline/PipelineManager.py +16 -3
  6. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/pipeline/PipelineProcessor.py +39 -31
  7. nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/pipeline/PipelineSyncThread.py +183 -0
  8. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/AIModelRepository.py +21 -1
  9. nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/streams/RTMPStreamer.py +229 -0
  10. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/streams/SharedVideoDeviceManager.py +5 -1
  11. nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/streams/StreamSyncThread.py +107 -0
  12. nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/streams/VideoStream.py +361 -0
  13. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/streams/VideoStreamManager.py +90 -38
  14. nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/util/PlatformDetector.py +100 -0
  15. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core.egg-info/PKG-INFO +1 -1
  16. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core.egg-info/SOURCES.txt +2 -1
  17. nedo_vision_worker_core-0.3.3/nedo_vision_worker_core/detection/DetectionManager.py +0 -83
  18. nedo_vision_worker_core-0.3.3/nedo_vision_worker_core/pipeline/PipelineSyncThread.py +0 -234
  19. nedo_vision_worker_core-0.3.3/nedo_vision_worker_core/streams/RTMPStreamer.py +0 -284
  20. nedo_vision_worker_core-0.3.3/nedo_vision_worker_core/streams/StreamSyncThread.py +0 -80
  21. nedo_vision_worker_core-0.3.3/nedo_vision_worker_core/streams/VideoStream.py +0 -422
  22. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/MANIFEST.in +0 -0
  23. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/README.md +0 -0
  24. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/ai/FrameDrawer.py +0 -0
  25. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/ai/ImageDebugger.py +0 -0
  26. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/ai/VideoDebugger.py +0 -0
  27. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/ai/__init__.py +0 -0
  28. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/callbacks/DetectionCallbackManager.py +0 -0
  29. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/callbacks/DetectionCallbackTypes.py +0 -0
  30. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/callbacks/__init__.py +0 -0
  31. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/cli.py +0 -0
  32. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/config/ConfigurationManager.py +0 -0
  33. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/config/__init__.py +0 -0
  34. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/core_service.py +0 -0
  35. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/database/DatabaseManager.py +0 -0
  36. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/database/__init__.py +0 -0
  37. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/BaseDetector.py +0 -0
  38. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/YOLODetector.py +0 -0
  39. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/__init__.py +0 -0
  40. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +0 -0
  41. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +0 -0
  42. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +0 -0
  43. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/detection/detection_processing/__init__.py +0 -0
  44. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/doctor.py +0 -0
  45. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
  46. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
  47. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
  48. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
  49. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
  50. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
  51. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
  52. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
  53. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
  54. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
  55. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/boots-green.png +0 -0
  56. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/boots-red.png +0 -0
  57. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/gloves-green.png +0 -0
  58. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/gloves-red.png +0 -0
  59. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/goggles-green.png +0 -0
  60. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/goggles-red.png +0 -0
  61. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/helmet-green.png +0 -0
  62. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/helmet-red.png +0 -0
  63. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/mask-red.png +0 -0
  64. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/vest-green.png +0 -0
  65. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/icons/vest-red.png +0 -0
  66. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/__init__.py +0 -0
  67. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/ai_model.py +0 -0
  68. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/auth.py +0 -0
  69. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/config.py +0 -0
  70. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/dataset_source.py +0 -0
  71. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/logs.py +0 -0
  72. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/ppe_detection.py +0 -0
  73. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/ppe_detection_label.py +0 -0
  74. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/restricted_area_violation.py +0 -0
  75. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/user.py +0 -0
  76. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/worker_source.py +0 -0
  77. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/worker_source_pipeline.py +0 -0
  78. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/worker_source_pipeline_config.py +0 -0
  79. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/worker_source_pipeline_debug.py +0 -0
  80. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/models/worker_source_pipeline_detection.py +0 -0
  81. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/pipeline/PipelineConfigManager.py +0 -0
  82. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +0 -0
  83. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/pipeline/__init__.py +0 -0
  84. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/preprocessing/ImageResizer.py +0 -0
  85. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/preprocessing/ImageRoi.py +0 -0
  86. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/preprocessing/Preprocessor.py +0 -0
  87. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/preprocessing/__init__.py +0 -0
  88. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/PPEDetectionRepository.py +0 -0
  89. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +0 -0
  90. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +0 -0
  91. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +0 -0
  92. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +0 -0
  93. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/WorkerSourceRepository.py +0 -0
  94. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/repositories/__init__.py +0 -0
  95. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/services/SharedVideoStreamServer.py +0 -0
  96. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/services/VideoSharingDaemon.py +0 -0
  97. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/services/VideoSharingDaemonManager.py +0 -0
  98. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/streams/__init__.py +0 -0
  99. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/tracker/SFSORT.py +0 -0
  100. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/tracker/TrackerManager.py +0 -0
  101. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/tracker/__init__.py +0 -0
  102. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/BoundingBoxMetrics.py +0 -0
  103. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/DrawingUtils.py +0 -0
  104. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/ModelReadinessChecker.py +0 -0
  105. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/PersonAttributeMatcher.py +0 -0
  106. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +0 -0
  107. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/TablePrinter.py +0 -0
  108. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core/util/__init__.py +0 -0
  109. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core.egg-info/dependency_links.txt +0 -0
  110. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core.egg-info/entry_points.txt +0 -0
  111. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core.egg-info/requires.txt +0 -0
  112. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/nedo_vision_worker_core.egg-info/top_level.txt +0 -0
  113. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/pyproject.toml +0 -0
  114. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/requirements.txt +0 -0
  115. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/setup.cfg +0 -0
  116. {nedo_vision_worker_core-0.3.3 → nedo_vision_worker_core-0.3.5}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker-core
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: Nedo Vision Worker Core Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.3.3"
10
+ __version__ = "0.3.5"
11
11
  __all__ = [
12
12
  "CoreService",
13
13
  "DetectionType",
@@ -9,6 +9,7 @@ except ImportError:
9
9
 
10
10
  from ..database.DatabaseManager import DatabaseManager
11
11
  from ..models.ai_model import AIModelEntity
12
+ from ..util.PlatformDetector import PlatformDetector
12
13
  from .BaseDetector import BaseDetector
13
14
 
14
15
  logging.getLogger("ultralytics").setLevel(logging.WARNING)
@@ -25,6 +26,8 @@ class RFDETRDetector(BaseDetector):
25
26
  raise TypeError("model must be an instance of AIModelEntity")
26
27
  self.model = None
27
28
  self.metadata = None
29
+ self.device = PlatformDetector.get_device()
30
+ logging.info(f"ℹ️ RFDETRDetector will use '{self.device}' device.")
28
31
 
29
32
  if model:
30
33
  self.load_model(model)
@@ -0,0 +1,139 @@
1
+ import logging
2
+ from typing import Dict, Optional, Set
3
+
4
+ from ..repositories.AIModelRepository import AIModelRepository
5
+ from ..detection.BaseDetector import BaseDetector
6
+ from ..detection.YOLODetector import YOLODetector
7
+ from ..detection.RFDETRDetector import RFDETRDetector
8
+ from ..models.ai_model import AIModelEntity
9
+
10
+
11
+ class ModelManager:
12
+ """Manages loading and caching of AI models to avoid redundant loads."""
13
+
14
+ def __init__(self):
15
+ self._detector_cache: Dict[str, BaseDetector] = {}
16
+ self._model_repo = AIModelRepository()
17
+ logging.info("🤖 ModelManager initialized.")
18
+
19
+ def get_detector(self, model_id: str) -> Optional[BaseDetector]:
20
+ """
21
+ Retrieves a detector by its model ID.
22
+
23
+ This method implements a lazy-loading and cache-validation strategy:
24
+ 1. It fetches the latest model metadata from the database.
25
+ 2. If a detector is already cached, it validates its metadata against the DB version.
26
+ 3. If the cached version is stale (e.g., version or classes changed), it's evicted.
27
+ 4. If no detector is cached or the cache was stale, it loads the detector on-demand.
28
+ """
29
+ if not model_id:
30
+ return None
31
+
32
+ # 1. Fetch the current model state from the database
33
+ db_model: AIModelEntity = self._model_repo.get_model(model_id)
34
+ if not db_model:
35
+ # If the model doesn't exist in DB, ensure it's not in cache either
36
+ if model_id in self._detector_cache:
37
+ logging.info(f"🧹 Removing detector for deleted model {model_id} from cache.")
38
+ del self._detector_cache[model_id]
39
+ return None
40
+
41
+ # 2. Check if a detector is cached
42
+ if model_id in self._detector_cache:
43
+ cached_detector = self._detector_cache[model_id]
44
+
45
+ # 3. Check if the cached version is stale
46
+ if self._has_metadata_changed(cached_detector.metadata, db_model):
47
+ logging.info(f"Reloading detector for model {model_id} due to metadata changes.")
48
+ del self._detector_cache[model_id]
49
+ # Fall through to load the new version
50
+ else:
51
+ # Cache is fresh, return it
52
+ logging.debug(f"🧠 Detector for model {model_id} found in cache and is fresh.")
53
+ return cached_detector
54
+
55
+ # 4. If not cached or was stale, load it now
56
+ return self._load_and_cache_detector(model_id, db_model)
57
+
58
+ def _load_and_cache_detector(self, model_id: str, db_model: AIModelEntity) -> Optional[BaseDetector]:
59
+ """Creates a detector from a DB model entity and caches it."""
60
+ logging.info(f"🔄 Loading model {model_id} (version: {db_model.version}) from database to create detector...")
61
+
62
+ # Check model readiness before attempting to load
63
+ if not db_model.is_ready_for_use():
64
+ if db_model.is_downloading():
65
+ logging.warning(f"⏳ Model {model_id} is still downloading. Skipping detector load.")
66
+ elif db_model.has_download_failed():
67
+ logging.error(f"❌ Model {model_id} download failed: {db_model.download_error}")
68
+ else:
69
+ logging.warning(f"⚠️ Model {model_id} is not ready for use (status: {db_model.download_status})")
70
+ return None
71
+
72
+ detector_type = db_model.type.lower()
73
+ detector: Optional[BaseDetector] = None
74
+ try:
75
+ if detector_type == "yolo":
76
+ detector = YOLODetector(db_model)
77
+ elif detector_type == "rf_detr":
78
+ detector = RFDETRDetector(db_model)
79
+ else:
80
+ raise ValueError(f"Unsupported model type: {detector_type}")
81
+
82
+ if detector and detector.model is not None:
83
+ self._detector_cache[model_id] = detector
84
+ logging.info(f"✅ Detector for model {model_id} loaded and cached successfully.")
85
+ return detector
86
+ else:
87
+ logging.error(f"❌ Failed to load detector for model: {db_model.name}")
88
+ return None
89
+
90
+ except Exception as e:
91
+ logging.error(f"❌ Error creating detector for model {db_model.name}: {e}")
92
+ return None
93
+
94
+ def _has_metadata_changed(self, cached_model: AIModelEntity, db_model: AIModelEntity) -> bool:
95
+ """Check if critical model metadata has changed."""
96
+ if cached_model.version != db_model.version:
97
+ logging.info(
98
+ f"🔄 Model {db_model.id} version changed "
99
+ f"({cached_model.version} -> {db_model.version})."
100
+ )
101
+ return True
102
+
103
+ # Compare classes
104
+ cached_classes = set(cached_model.get_classes() or [])
105
+ db_classes = set(db_model.get_classes() or [])
106
+ if cached_classes != db_classes:
107
+ logging.info(f"🔄 Model {db_model.id} classes changed.")
108
+ return True
109
+
110
+ # Compare PPE class groups
111
+ cached_ppe_groups = cached_model.get_ppe_class_groups() or {}
112
+ db_ppe_groups = db_model.get_ppe_class_groups() or {}
113
+ if cached_ppe_groups != db_ppe_groups:
114
+ logging.info(f"🔄 Model {db_model.id} PPE groups changed.")
115
+ return True
116
+
117
+ # Compare main class
118
+ if cached_model.get_main_class() != db_model.get_main_class():
119
+ logging.info(f"🔄 Model {db_model.id} main class changed.")
120
+ return True
121
+
122
+ return False
123
+
124
+ def sync_cache(self, active_model_ids: Set[str]):
125
+ """
126
+ Removes detectors from the cache if their corresponding models are no longer in the database
127
+ or are not being used by any active pipeline.
128
+ """
129
+ cached_ids = set(self._detector_cache.keys())
130
+ stale_ids = cached_ids - active_model_ids
131
+
132
+ for model_id in stale_ids:
133
+ del self._detector_cache[model_id]
134
+ logging.info(f"🧹 Removed unused detector for model {model_id} from cache.")
135
+
136
+ def clear_cache(self):
137
+ """Clears the detector cache."""
138
+ logging.info("🧹 Clearing all detectors from cache.")
139
+ self._detector_cache.clear()
@@ -19,12 +19,12 @@ class PipelineManager:
19
19
  self._stop_lock = threading.Lock() # Lock for thread-safe pipeline stopping
20
20
  self.on_pipeline_stopped = on_pipeline_stopped
21
21
 
22
- def start_pipeline(self, pipeline, model):
22
+ def start_pipeline(self, pipeline, detector):
23
23
  """
24
24
  Start a pipeline processing.
25
25
  Args:
26
26
  pipeline: The pipeline object (contains id, worker_source_id, name, etc.)
27
- model: The AI model to use for processing.
27
+ detector: The detector instance to use for processing.
28
28
  """
29
29
  pipeline_id = pipeline.id
30
30
  worker_source_id = pipeline.worker_source_id
@@ -39,7 +39,12 @@ class PipelineManager:
39
39
 
40
40
  logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
41
41
 
42
- processor = PipelineProcessor(pipeline_id, worker_source_id, model, False)
42
+ # Acquire the video stream (starts it if not already running)
43
+ if not self.video_manager.acquire_stream(worker_source_id, pipeline_id):
44
+ logging.error(f"❌ Failed to acquire stream {worker_source_id} for pipeline {pipeline_id}")
45
+ return
46
+
47
+ processor = PipelineProcessor(pipeline, detector, False)
43
48
  processor.frame_drawer.location_name = pipeline.location_name
44
49
  self.processors[pipeline_id] = processor # Store processor instance
45
50
 
@@ -79,6 +84,10 @@ class PipelineManager:
79
84
  self._stopping_pipelines.add(pipeline_id)
80
85
 
81
86
  try:
87
+ # Get worker_source_id before removing metadata
88
+ pipeline = self.pipeline_metadata.get(pipeline_id)
89
+ worker_source_id = pipeline.worker_source_id if pipeline else None
90
+
82
91
  # Stop AI processing
83
92
  processor = self.processors.pop(pipeline_id, None)
84
93
  if processor:
@@ -92,6 +101,10 @@ class PipelineManager:
92
101
  # Remove metadata
93
102
  self.pipeline_metadata.pop(pipeline_id, None)
94
103
 
104
+ # Release the video stream (stops it if no more pipelines use it)
105
+ if worker_source_id:
106
+ self.video_manager.release_stream(worker_source_id, pipeline_id)
107
+
95
108
  logging.info(f"✅ Pipeline {pipeline_id} stopped successfully.")
96
109
 
97
110
  except Exception as e:
@@ -12,18 +12,19 @@ from ..streams.VideoStreamManager import VideoStreamManager
12
12
  from ..ai.VideoDebugger import VideoDebugger
13
13
  from ..ai.FrameDrawer import FrameDrawer
14
14
  from ..tracker.TrackerManager import TrackerManager
15
- from ..detection.DetectionManager import DetectionManager
15
+ from ..detection.BaseDetector import BaseDetector
16
16
  from ..streams.RTMPStreamer import RTMPStreamer
17
17
 
18
18
 
19
19
  class PipelineProcessor:
20
20
  """Handles pipeline processing including preprocessing, AI model inference, tracking, and video stream processing."""
21
21
 
22
- def __init__(self, pipeline_id, worker_source_id, model, enable_visualization=True):
22
+ def __init__(self, pipeline, detector: BaseDetector, enable_visualization=True):
23
+ self._pipeline = pipeline
23
24
  self.running = True
24
25
  self.video_debugger = VideoDebugger(enable_visualization)
25
26
  self.tracker_manager = TrackerManager()
26
- self.detection_manager = DetectionManager(model)
27
+ self.detector = detector
27
28
  self.config_manager = PipelineConfigManager()
28
29
  self.preprocessor = PipelinePrepocessor()
29
30
  self.detection_processor = None
@@ -36,8 +37,8 @@ class PipelineProcessor:
36
37
  self.detection_thread = None
37
38
  self.frame_counter = 0
38
39
  self.frame_drawer = FrameDrawer()
39
- self.pipeline_id = pipeline_id
40
- self.worker_source_id = worker_source_id
40
+ self.pipeline_id = pipeline.id
41
+ self.worker_source_id = pipeline.worker_source_id
41
42
 
42
43
  self.rtmp_streamer = None
43
44
 
@@ -61,11 +62,16 @@ class PipelineProcessor:
61
62
  self.last_hevc_recovery = 0
62
63
  self.hevc_recovery_cooldown = 30.0 # 30 seconds between HEVC recovery attempts
63
64
 
64
- def load_model(self, model):
65
- logging.info(f"🔄 Loading new model for pipeline {self.pipeline_id}: {getattr(model, 'name', None) or 'Unknown'}")
66
- self.detection_manager.load_model(model)
65
+ def update_config(self, pipeline):
66
+ """Updates the pipeline configuration."""
67
+ self._pipeline = pipeline
68
+ self._update_config_internal()
69
+
70
+ def load_detector(self, detector: BaseDetector):
71
+ logging.info(f"🔄 Loading new detector for pipeline {self.pipeline_id}")
72
+ self.detector = detector
67
73
  self._update_detection_processor()
68
- logging.info(f"✅ Model updated for pipeline {self.pipeline_id}")
74
+ logging.info(f"✅ Detector updated for pipeline {self.pipeline_id}")
69
75
 
70
76
  def _get_detection_processor_code(self):
71
77
  for code in self.detection_processor_codes:
@@ -107,7 +113,7 @@ class PipelineProcessor:
107
113
  self.frame_drawer.update_config()
108
114
  self.tracker_manager.update_config([], [], [])
109
115
 
110
- def _update_config(self):
116
+ def _update_config_internal(self):
111
117
  self.config_manager.update(self.pipeline_id)
112
118
  self.preprocessor.update(self.config_manager)
113
119
  self.detection_interval = self._get_detection_interval()
@@ -117,7 +123,7 @@ class PipelineProcessor:
117
123
  self.consecutive_frame_failures = 0
118
124
  self.last_successful_frame_time = time.time()
119
125
 
120
- ai_model = self.detection_manager.model_metadata
126
+ ai_model = self.detector.metadata if self.detector else None
121
127
  if self.detection_processor:
122
128
  config = self.config_manager.get_feature_config(self.detection_processor.code)
123
129
  self.detection_processor.update(self.config_manager, ai_model)
@@ -133,7 +139,7 @@ class PipelineProcessor:
133
139
  worker_source_id = self.worker_source_id
134
140
  logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
135
141
 
136
- self._update_config()
142
+ self._update_config_internal()
137
143
  self.consecutive_frame_failures = 0
138
144
  self.last_successful_frame_time = time.time()
139
145
 
@@ -243,7 +249,7 @@ class PipelineProcessor:
243
249
  processed_frame = self.preprocessor.apply(frame)
244
250
 
245
251
  class_thresholds = {}
246
- ai_model = self.detection_manager.model_metadata
252
+ ai_model = self.detector.metadata if self.detector else None
247
253
 
248
254
  if self.detection_processor:
249
255
  if self.detection_processor.code == PPEDetectionProcessor.code:
@@ -253,7 +259,10 @@ class PipelineProcessor:
253
259
  if main_threshold and ai_model and ai_model.get_main_class():
254
260
  class_thresholds[ai_model.get_main_class()] = main_threshold
255
261
 
256
- detections = self.detection_manager.detect_objects(processed_frame, self.threshold, class_thresholds)
262
+ detections = []
263
+ if self.detector:
264
+ detections = self.detector.detect_objects(processed_frame, self.threshold, class_thresholds)
265
+
257
266
  detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
258
267
 
259
268
  if self.detection_processor:
@@ -277,9 +286,8 @@ class PipelineProcessor:
277
286
 
278
287
  # Update config periodically
279
288
  if (current_time - last_config_update_time) >= config_update_interval:
280
- self._update_config()
289
+ self._update_config_internal()
281
290
  last_config_update_time = current_time
282
- logging.info(f"🔄 Updated pipeline config for {pipeline_id}")
283
291
 
284
292
  # Keep only the latest frame if we fell behind
285
293
  try:
@@ -324,25 +332,24 @@ class PipelineProcessor:
324
332
  except Exception as e:
325
333
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
326
334
 
327
- def _wait_for_frame(self, video_manager, max_retries=10, sleep_time=3):
335
+ def _wait_for_frame(self, video_manager, max_wait_time=30.0):
328
336
  logging.info(f"⏳ Waiting for initial frame from {self.worker_source_id}...")
329
- for retry_count in range(max_retries):
337
+
338
+ is_ready = video_manager.wait_for_stream_ready(self.worker_source_id, timeout=max_wait_time)
339
+
340
+ if is_ready:
330
341
  frame = video_manager.get_frame(self.worker_source_id)
331
342
  if frame is not None:
332
343
  logging.info(f"✅ Initial frame received from {self.worker_source_id}")
333
344
  return frame
334
-
335
- if not video_manager.has_stream(self.worker_source_id):
336
- logging.error(f"❌ Stream {self.worker_source_id} not found in video manager")
337
- return None
338
-
339
- logging.warning(f"⚠️ Waiting for video stream {self.worker_source_id} (Attempt {retry_count + 1}/{max_retries})...")
340
- if retry_count >= 3:
345
+ else:
346
+ logging.error(f"❌ Stream {self.worker_source_id} reported ready, but the first frame could not be retrieved.")
341
347
  self._log_stream_diagnostics(video_manager, self.worker_source_id)
342
- time.sleep(sleep_time)
343
-
344
- logging.error(f"❌ Failed to get initial frame from {self.worker_source_id} after {max_retries} attempts")
345
- return None
348
+ return None
349
+ else:
350
+ logging.error(f"❌ Timed out after {max_wait_time}s waiting for first frame from {self.worker_source_id}.")
351
+ self._log_stream_diagnostics(video_manager, self.worker_source_id)
352
+ return None
346
353
 
347
354
  def _handle_frame_failure(self, video_manager, worker_source_id):
348
355
  """Handle frame retrieval failures with progressive backoff and recovery attempts."""
@@ -452,9 +459,10 @@ class PipelineProcessor:
452
459
  logging.error(f" Cannot get stream URL for {worker_source_id}")
453
460
  return False
454
461
 
455
- video_manager.remove_stream(worker_source_id)
462
+ # Use internal methods to restart the stream without affecting reference counting
463
+ video_manager._stop_stream(worker_source_id)
456
464
  time.sleep(1.0)
457
- video_manager.add_stream(worker_source_id, stream_url)
465
+ video_manager._start_stream(worker_source_id, stream_url)
458
466
  time.sleep(2.0)
459
467
 
460
468
  if not video_manager.has_stream(worker_source_id):
@@ -0,0 +1,183 @@
1
+ import json
2
+ import logging
3
+ import time
4
+ import threading
5
+ from typing import Dict, Set, Optional
6
+ from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
7
+ from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
8
+ from .PipelineManager import PipelineManager
9
+ from .ModelManager import ModelManager
10
+ from ..streams.VideoStreamManager import VideoStreamManager
11
+
12
+
13
+ class PipelineSyncThread(threading.Thread):
14
+ """Thread responsible for synchronizing worker source pipelines from the database in real-time."""
15
+
16
+ def __init__(self, video_manager: VideoStreamManager, polling_interval=5, max_workers=4):
17
+ super().__init__(daemon=True) # Runs as a daemon
18
+ self.video_manager = video_manager
19
+ self.polling_interval = polling_interval
20
+ self.pipeline_repo = WorkerSourcePipelineRepository()
21
+ self.debug_repo = WorkerSourcePipelineDebugRepository()
22
+ self.model_manager = ModelManager()
23
+ self.running = True
24
+ self.pipeline_manager = PipelineManager(video_manager, self.on_pipeline_stopped, max_workers)
25
+
26
+ def _parse_json(self, value: str) -> Optional[dict]:
27
+ """Attempts to parse the value as JSON if applicable."""
28
+ if not value:
29
+ return None
30
+
31
+ value = value.strip() # Remove leading/trailing spaces
32
+ if (value.startswith("{") and value.endswith("}")) or (value.startswith("[") and value.endswith("]")):
33
+ try:
34
+ return json.loads(value) # Parse JSON object or list
35
+ except json.JSONDecodeError:
36
+ pass # Keep as string if parsing fails
37
+ return None
38
+
39
+ def on_pipeline_stopped(self, pipeline_id: str) -> None:
40
+ """Set the pipeline as stopped in the database."""
41
+ pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
42
+ pipeline.pipeline_status_code = "run" if pipeline.pipeline_status_code == "restart" else "stop"
43
+ self.pipeline_repo.session.commit()
44
+
45
+ def run(self) -> None:
46
+ """Continuously updates pipelines based on database changes."""
47
+ while self.running:
48
+ try:
49
+ db_pipelines = {p.id: p for p in self.pipeline_repo.get_all_pipelines()}
50
+
51
+ # Get pipeline IDs for comparison
52
+ local_pipeline_ids = set(self.pipeline_manager.get_active_pipelines())
53
+ db_pipeline_ids = set(db_pipelines.keys())
54
+
55
+ restarted_pipeline = False
56
+
57
+ # Process pipeline changes
58
+ self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines, restarted_pipeline)
59
+ self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
60
+ self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines)
61
+
62
+ if restarted_pipeline:
63
+ self.pipeline_repo.session.commit()
64
+
65
+ # Sync the cache to remove unused detectors
66
+ active_model_ids = {p.ai_model_id for p in db_pipelines.values() if p.pipeline_status_code == 'run'}
67
+ self.model_manager.sync_cache(active_model_ids)
68
+
69
+ except Exception as e:
70
+ logging.error(f"⚠️ Error syncing pipelines from database: {e}", exc_info=True)
71
+
72
+ time.sleep(self.polling_interval)
73
+
74
+ def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
75
+ restarted_pipeline: bool) -> None:
76
+ """Add new pipelines that exist in DB but not locally."""
77
+ for pid in pipeline_ids:
78
+ pipeline = db_pipelines[pid]
79
+
80
+ if pipeline.pipeline_status_code == 'restart':
81
+ pipeline.pipeline_status_code = 'run'
82
+ restarted_pipeline = True
83
+
84
+ if pipeline.pipeline_status_code == 'run':
85
+ detector = self.model_manager.get_detector(pipeline.ai_model_id)
86
+
87
+ if not detector and pipeline.ai_model_id:
88
+ logging.warning(f"⚠️ Could not load detector for pipeline {pid} ({pipeline.name}). Skipping.")
89
+ continue
90
+
91
+ logging.info(f"🟢 Adding new pipeline: {pid} ({pipeline.name})")
92
+ self.pipeline_manager.start_pipeline(pipeline, detector)
93
+
94
+ def _remove_deleted_pipelines(self, pipeline_ids: Set[str]) -> None:
95
+ """Remove pipelines that exist locally but not in DB."""
96
+ for pid in pipeline_ids:
97
+ logging.info(f"🔴 Removing deleted pipeline: {pid}")
98
+ self.pipeline_manager.stop_pipeline(pid)
99
+
100
+ def _update_existing_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]) -> None:
101
+ """Update existing pipelines that need changes."""
102
+ debug_pipeline_ids = self.debug_repo.get_pipeline_ids_to_debug()
103
+
104
+ for pid in pipeline_ids:
105
+ db_pipeline = db_pipelines[pid]
106
+
107
+ # Check if pipeline should be stopped (status changed to stop/restart in DB)
108
+ if db_pipeline.pipeline_status_code in ['stop', 'restart']:
109
+ if self.pipeline_manager.is_running(pid):
110
+ logging.info(f"⏹️ Stopping pipeline due to status change: {pid}")
111
+ self.pipeline_manager.stop_pipeline(pid)
112
+ continue
113
+
114
+ processor = self.pipeline_manager.processors.get(pid)
115
+ if not processor:
116
+ # Pipeline exists in both sets but processor doesn't exist - shouldn't happen
117
+ # but if it does, try to start it if status is 'run'
118
+ if db_pipeline.pipeline_status_code == 'run':
119
+ logging.warning(f"⚠️ Pipeline {pid} exists locally but has no processor. Restarting...")
120
+ detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
121
+ self.pipeline_manager.start_pipeline(db_pipeline, detector)
122
+ continue
123
+
124
+ local_detector = processor.detector
125
+
126
+ self.update_pipeline(pid, db_pipeline, local_detector)
127
+ if pid in debug_pipeline_ids:
128
+ processor.enable_debug()
129
+
130
+ def update_pipeline(self, pid: str, db_pipeline: object, local_detector: object) -> None:
131
+ """Updates a single pipeline if necessary (only called for running pipelines)."""
132
+ processor = self.pipeline_manager.processors.get(pid)
133
+ if not processor:
134
+ return
135
+
136
+ # At this point, we know db_pipeline.pipeline_status_code == 'run' (checked in caller)
137
+ # Check for significant changes that require a restart
138
+ db_detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
139
+
140
+ requires_restart = any([
141
+ db_pipeline.ai_model_id != processor._pipeline.ai_model_id,
142
+ db_pipeline.worker_source_id != processor._pipeline.worker_source_id,
143
+ local_detector != db_detector
144
+ ])
145
+
146
+ if requires_restart:
147
+ logging.info(f"🔄 Restarting pipeline due to significant changes: {pid}")
148
+ self.pipeline_manager.stop_pipeline(pid)
149
+ self.pipeline_manager.start_pipeline(db_pipeline, db_detector)
150
+ else:
151
+ # Update config for minor changes that don't require restart
152
+ processor.update_config(db_pipeline)
153
+
154
+
155
+ def _has_pipeline_changed(self, local_pipeline, db_pipeline):
156
+ """Checks if the pipeline configuration has changed."""
157
+ if not local_pipeline or db_pipeline.pipeline_status_code == "restart":
158
+ return True
159
+
160
+ local_configs = local_pipeline.worker_source_pipeline_configs
161
+ db_configs = db_pipeline.worker_source_pipeline_configs
162
+
163
+ # Convert config objects to comparable structures
164
+ local_config_values = [
165
+ (config.pipeline_config_id, config.is_enabled, config.value,
166
+ config.pipeline_config_name, config.pipeline_config_code)
167
+ for config in local_configs
168
+ ]
169
+
170
+ db_config_values = [
171
+ (config.pipeline_config_id, config.is_enabled, config.value,
172
+ config.pipeline_config_name, config.pipeline_config_code)
173
+ for config in db_configs
174
+ ]
175
+
176
+ return sorted(local_config_values) != sorted(db_config_values)
177
+
178
+ def stop(self):
179
+ """Stops the synchronization thread and shuts down pipelines properly."""
180
+ logging.info("🛑 Stopping PipelineSyncThread...")
181
+ self.running = False
182
+ self.video_manager.stop_all()
183
+ self.pipeline_manager.shutdown()
@@ -28,4 +28,24 @@ class AIModelRepository:
28
28
  return models
29
29
  except SQLAlchemyError as e:
30
30
  logging.error(f"Error retrieving models: {e}")
31
- return []
31
+ return []
32
+
33
+ def get_model(self, model_id: str) -> AIModelEntity | None:
34
+ """
35
+ Retrieves a single AI model by its ID.
36
+
37
+ Args:
38
+ model_id: The ID of the model to retrieve.
39
+
40
+ Returns:
41
+ An AIModelEntity object or None if not found.
42
+ """
43
+ try:
44
+ self.session.expire_all()
45
+ model = self.session.query(AIModelEntity).filter_by(id=model_id).first()
46
+ if model:
47
+ self.session.expunge(model)
48
+ return model
49
+ except SQLAlchemyError as e:
50
+ logging.error(f"Error retrieving model {model_id}: {e}")
51
+ return None