nedo-vision-worker-core 0.4.0__tar.gz → 0.4.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/PKG-INFO +1 -1
  2. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/__init__.py +2 -2
  3. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/ModelManager.py +19 -44
  4. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/PipelineManager.py +14 -0
  5. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +5 -0
  6. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/PipelineProcessor.py +52 -21
  7. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/PipelineSyncThread.py +30 -7
  8. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/preprocessing/ImageRoi.py +43 -11
  9. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/preprocessing/Preprocessor.py +4 -1
  10. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +11 -3
  11. nedo_vision_worker_core-0.4.9/nedo_vision_worker_core/repositories/WorkerSourceRepository.py +56 -0
  12. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/streams/RTMPStreamer.py +65 -19
  13. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/streams/VideoStream.py +16 -0
  14. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core.egg-info/PKG-INFO +1 -1
  15. nedo_vision_worker_core-0.4.0/nedo_vision_worker_core/repositories/WorkerSourceRepository.py +0 -21
  16. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/MANIFEST.in +0 -0
  17. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/README.md +0 -0
  18. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/ai/FrameDrawer.py +0 -0
  19. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/ai/ImageDebugger.py +0 -0
  20. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/ai/VideoDebugger.py +0 -0
  21. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/ai/__init__.py +0 -0
  22. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/callbacks/DetectionCallbackManager.py +0 -0
  23. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/callbacks/DetectionCallbackTypes.py +0 -0
  24. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/callbacks/__init__.py +0 -0
  25. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/cli.py +0 -0
  26. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/config/ConfigurationManager.py +0 -0
  27. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/config/__init__.py +0 -0
  28. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/core_service.py +0 -0
  29. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/database/DatabaseManager.py +0 -0
  30. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/database/__init__.py +0 -0
  31. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/BaseDetector.py +0 -0
  32. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/RFDETRDetector.py +0 -0
  33. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/YOLODetector.py +0 -0
  34. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/__init__.py +0 -0
  35. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +0 -0
  36. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +0 -0
  37. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +0 -0
  38. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/detection/detection_processing/__init__.py +0 -0
  39. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/doctor.py +0 -0
  40. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
  41. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
  42. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
  43. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
  44. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
  45. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
  46. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
  47. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
  48. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
  49. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
  50. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/boots-green.png +0 -0
  51. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/boots-red.png +0 -0
  52. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/gloves-green.png +0 -0
  53. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/gloves-red.png +0 -0
  54. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/goggles-green.png +0 -0
  55. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/goggles-red.png +0 -0
  56. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/helmet-green.png +0 -0
  57. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/helmet-red.png +0 -0
  58. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/mask-red.png +0 -0
  59. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/vest-green.png +0 -0
  60. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/icons/vest-red.png +0 -0
  61. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/__init__.py +0 -0
  62. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/ai_model.py +0 -0
  63. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/auth.py +0 -0
  64. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/config.py +0 -0
  65. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/dataset_source.py +0 -0
  66. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/logs.py +0 -0
  67. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/ppe_detection.py +0 -0
  68. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/ppe_detection_label.py +0 -0
  69. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/restricted_area_violation.py +0 -0
  70. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/user.py +0 -0
  71. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/worker_source.py +0 -0
  72. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/worker_source_pipeline.py +0 -0
  73. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/worker_source_pipeline_config.py +0 -0
  74. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/worker_source_pipeline_debug.py +0 -0
  75. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/models/worker_source_pipeline_detection.py +0 -0
  76. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/PipelineConfigManager.py +0 -0
  77. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/pipeline/__init__.py +0 -0
  78. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/preprocessing/ImageResizer.py +0 -0
  79. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/preprocessing/__init__.py +0 -0
  80. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/AIModelRepository.py +0 -0
  81. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/BaseRepository.py +0 -0
  82. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/PPEDetectionRepository.py +0 -0
  83. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +0 -0
  84. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +0 -0
  85. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +0 -0
  86. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/repositories/__init__.py +0 -0
  87. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/services/SharedVideoStreamServer.py +0 -0
  88. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/services/VideoSharingDaemon.py +0 -0
  89. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/services/VideoSharingDaemonManager.py +0 -0
  90. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/streams/SharedVideoDeviceManager.py +0 -0
  91. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/streams/StreamSyncThread.py +0 -0
  92. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/streams/VideoStreamManager.py +0 -0
  93. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/streams/__init__.py +0 -0
  94. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/tracker/SFSORT.py +0 -0
  95. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/tracker/TrackerManager.py +0 -0
  96. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/tracker/__init__.py +0 -0
  97. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/BoundingBoxMetrics.py +0 -0
  98. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/DrawingUtils.py +0 -0
  99. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/ModelReadinessChecker.py +0 -0
  100. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/PersonAttributeMatcher.py +0 -0
  101. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +0 -0
  102. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/PipelinePreviewChecker.py +0 -0
  103. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/PlatformDetector.py +0 -0
  104. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/TablePrinter.py +0 -0
  105. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core/util/__init__.py +0 -0
  106. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core.egg-info/SOURCES.txt +0 -0
  107. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core.egg-info/dependency_links.txt +0 -0
  108. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core.egg-info/entry_points.txt +0 -0
  109. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core.egg-info/requires.txt +0 -0
  110. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/nedo_vision_worker_core.egg-info/top_level.txt +0 -0
  111. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/pyproject.toml +0 -0
  112. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/requirements.txt +0 -0
  113. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/setup.cfg +0 -0
  114. {nedo_vision_worker_core-0.4.0 → nedo_vision_worker_core-0.4.9}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nedo-vision-worker-core
3
- Version: 0.4.0
3
+ Version: 0.4.9
4
4
  Summary: Nedo Vision Worker Core Library for AI Vision Processing
5
5
  Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
6
6
  Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
@@ -7,10 +7,10 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.4.0"
10
+ __version__ = "0.4.9"
11
11
  __all__ = [
12
12
  "CoreService",
13
- "DetectionType",
13
+ "DetectionType",
14
14
  "CallbackTrigger",
15
15
  "DetectionData",
16
16
  "IntervalMetadata",
@@ -12,19 +12,15 @@ class ModelManager:
12
12
  """Manages loading and caching of AI models to avoid redundant loads."""
13
13
 
14
14
  def __init__(self):
15
- self._detector_cache: Dict[str, BaseDetector] = {}
15
+ # Cache disabled to prevent shared state issues
16
+ # self._detector_cache: Dict[str, BaseDetector] = {}
16
17
  self._model_repo = AIModelRepository()
17
- logging.info("🤖 ModelManager initialized.")
18
+ logging.info("🤖 ModelManager initialized (Cache Disabled).")
18
19
 
19
20
  def get_detector(self, model_id: str) -> Optional[BaseDetector]:
20
21
  """
21
22
  Retrieves a detector by its model ID.
22
-
23
- This method implements a lazy-loading and cache-validation strategy:
24
- 1. It fetches the latest model metadata from the database.
25
- 2. If a detector is already cached, it validates its metadata against the DB version.
26
- 3. If the cached version is stale (e.g., version or classes changed), it's evicted.
27
- 4. If no detector is cached or the cache was stale, it loads the detector on-demand.
23
+ ALWAYS loads a new instance (Cache Disabled).
28
24
  """
29
25
  if not model_id:
30
26
  return None
@@ -32,32 +28,14 @@ class ModelManager:
32
28
  # 1. Fetch the current model state from the database
33
29
  db_model: AIModelEntity = self._model_repo.get_model(model_id)
34
30
  if not db_model:
35
- # If the model doesn't exist in DB, ensure it's not in cache either
36
- if model_id in self._detector_cache:
37
- logging.info(f"🧹 Removing detector for deleted model {model_id} from cache.")
38
- del self._detector_cache[model_id]
39
31
  return None
40
32
 
41
- # 2. Check if a detector is cached
42
- if model_id in self._detector_cache:
43
- cached_detector = self._detector_cache[model_id]
44
-
45
- # 3. Check if the cached version is stale
46
- if self._has_metadata_changed(cached_detector.metadata, db_model):
47
- logging.info(f"Reloading detector for model {model_id} due to metadata changes.")
48
- del self._detector_cache[model_id]
49
- # Fall through to load the new version
50
- else:
51
- # Cache is fresh, return it
52
- logging.debug(f"🧠 Detector for model {model_id} found in cache and is fresh.")
53
- return cached_detector
54
-
55
- # 4. If not cached or was stale, load it now
56
- return self._load_and_cache_detector(model_id, db_model)
33
+ # Always load new detector
34
+ return self._load_detector(model_id, db_model)
57
35
 
58
- def _load_and_cache_detector(self, model_id: str, db_model: AIModelEntity) -> Optional[BaseDetector]:
59
- """Creates a detector from a DB model entity and caches it."""
60
- logging.info(f"🔄 Loading model {model_id} (version: {db_model.version}) from database to create detector...")
36
+ def _load_detector(self, model_id: str, db_model: AIModelEntity) -> Optional[BaseDetector]:
37
+ """Creates a detector from a DB model entity."""
38
+ logging.info(f"🔄 Loading model {model_id} (version: {db_model.version}) from database...")
61
39
 
62
40
  # Check model readiness before attempting to load
63
41
  if not db_model.is_ready_for_use():
@@ -80,8 +58,7 @@ class ModelManager:
80
58
  raise ValueError(f"Unsupported model type: {detector_type}")
81
59
 
82
60
  if detector and detector.model is not None:
83
- self._detector_cache[model_id] = detector
84
- logging.info(f"✅ Detector for model {model_id} loaded and cached successfully.")
61
+ logging.info(f"✅ Detector for model {model_id} loaded successfully.")
85
62
  return detector
86
63
  else:
87
64
  logging.error(f"❌ Failed to load detector for model: {db_model.name}")
@@ -91,7 +68,11 @@ class ModelManager:
91
68
  logging.error(f"❌ Error creating detector for model {db_model.name}: {e}")
92
69
  return None
93
70
 
94
- def _has_metadata_changed(self, cached_model: AIModelEntity, db_model: AIModelEntity) -> bool:
71
+ def get_model_metadata(self, model_id: str) -> Optional[AIModelEntity]:
72
+ """Retrieves model metadata without loading the detector."""
73
+ return self._model_repo.get_model(model_id)
74
+
75
+ def has_metadata_changed(self, cached_model: AIModelEntity, db_model: AIModelEntity) -> bool:
95
76
  """Check if critical model metadata has changed."""
96
77
  if cached_model.version != db_model.version:
97
78
  logging.info(
@@ -123,14 +104,8 @@ class ModelManager:
123
104
 
124
105
  def sync_cache(self, active_model_ids: Set[str]):
125
106
  """Remove unused detectors from cache."""
126
- cached_ids = set(self._detector_cache.keys())
127
- stale_ids = cached_ids - active_model_ids
128
-
129
- for model_id in stale_ids:
130
- detector = self._detector_cache.pop(model_id, None)
131
- if detector:
132
- self._cleanup_detector(detector)
133
- logging.info(f"🧹 Removed unused detector for model {model_id} from cache.")
107
+ # Cache is disabled, so no sync needed.
108
+ pass
134
109
 
135
110
  def _cleanup_detector(self, detector: BaseDetector):
136
111
  """Free detector resources and GPU memory."""
@@ -170,5 +145,5 @@ class ModelManager:
170
145
 
171
146
  def clear_cache(self):
172
147
  """Clears the detector cache."""
173
- logging.info("🧹 Clearing all detectors from cache.")
174
- self._detector_cache.clear()
148
+ logging.info("🧹 Clearing all detectors from cache (No-op as cache is disabled).")
149
+ # self._detector_cache.clear()
@@ -31,6 +31,11 @@ class PipelineManager:
31
31
  self._stop_lock = threading.Lock()
32
32
  self.on_pipeline_stopped = on_pipeline_stopped
33
33
 
34
+ # Stagger pipeline startup to reduce CPU spikes
35
+ self._last_pipeline_start = 0
36
+ self._pipeline_start_delay = 1.0 # 1 second between pipeline starts
37
+ self._start_lock = threading.Lock()
38
+
34
39
  logging.info(f"🚀 PipelineManager initialized with {max_workers} worker threads")
35
40
 
36
41
  def start_pipeline(self, pipeline, detector):
@@ -46,6 +51,15 @@ class PipelineManager:
46
51
  logging.warning(f"⚠️ Pipeline {pipeline_id} is already running.")
47
52
  return
48
53
 
54
+ # Stagger pipeline starts to reduce CPU spikes
55
+ with self._start_lock:
56
+ time_since_last_start = time.time() - self._last_pipeline_start
57
+ if time_since_last_start < self._pipeline_start_delay:
58
+ delay = self._pipeline_start_delay - time_since_last_start
59
+ logging.info(f"⏳ Staggering pipeline {pipeline_id} start by {delay:.2f}s to reduce CPU spike")
60
+ time.sleep(delay)
61
+ self._last_pipeline_start = time.time()
62
+
49
63
  logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
50
64
 
51
65
  # Acquire video stream
@@ -37,4 +37,9 @@ class PipelinePrepocessor:
37
37
  for det, bbox in zip(detections, bboxes):
38
38
  det["bbox"] = bbox
39
39
 
40
+ return detections
41
+
42
+ def filter_detections(self, detections: list, dimension: Tuple[int, int]) -> list:
43
+ for preprocessor in reversed(self.preprocessors):
44
+ detections = preprocessor.filter_detections(detections, dimension)
40
45
  return detections
@@ -47,6 +47,10 @@ class PipelineProcessor:
47
47
  self.last_preview_check_time = 0
48
48
  self.preview_check_interval = 10.0 # Check every 10 seconds (reduced from 5s to save CPU)
49
49
  self.pipeline_repo = WorkerSourcePipelineRepository()
50
+
51
+ # RTMP frame rate limiting to reduce CPU
52
+ self.last_rtmp_frame_time = 0
53
+ self.rtmp_frame_interval = 1.0 / 25.0 # 25 FPS for RTMP (matching render FPS)
50
54
 
51
55
  self.detection_processor_codes = [
52
56
  PPEDetectionProcessor.code,
@@ -71,6 +75,7 @@ class PipelineProcessor:
71
75
  self.base_detection_interval = 1.0 / 3.0
72
76
  self.detection_interval = self.base_detection_interval
73
77
  self.is_fps_user_configured = False
78
+ self.fps_resolution_modifier = 1.0
74
79
 
75
80
  def update_config(self, pipeline):
76
81
  """Updates the pipeline configuration."""
@@ -127,7 +132,13 @@ class PipelineProcessor:
127
132
  self.config_manager.update(self.pipeline_id)
128
133
  self.preprocessor.update(self.config_manager)
129
134
  self.base_detection_interval, self.is_fps_user_configured = self._get_detection_interval()
130
- self.detection_interval = self.base_detection_interval
135
+
136
+ if self.is_fps_user_configured:
137
+ self.detection_interval = self.base_detection_interval
138
+ self.fps_resolution_modifier = 1.0
139
+ else:
140
+ self.detection_interval = self.base_detection_interval * self.fps_resolution_modifier
141
+
131
142
  self._update_detection_processor()
132
143
 
133
144
  # Reset failure counters on config update
@@ -198,10 +209,15 @@ class PipelineProcessor:
198
209
  if loop_start - last_preview_check >= self.preview_check_interval:
199
210
  self._check_and_update_rtmp_streaming()
200
211
  last_preview_check = loop_start
201
-
202
- should_draw = self.rtmp_streaming_active or self.debug_flag
203
212
 
204
- if should_draw:
213
+ # For RTMP, also check if it's time to send a frame (rate limiting)
214
+ should_draw_for_rtmp = (
215
+ self.rtmp_streaming_active and
216
+ self.rtmp_streamer is not None and
217
+ (loop_start - self.last_rtmp_frame_time >= self.rtmp_frame_interval)
218
+ )
219
+
220
+ if should_draw_for_rtmp or self.debug_flag:
205
221
  try:
206
222
  frame_to_draw = frame.copy()
207
223
  self.frame_drawer.draw_polygons(frame_to_draw)
@@ -215,7 +231,7 @@ class PipelineProcessor:
215
231
  logging.error(f"❌ Draw failed, using raw frame: {e}")
216
232
  drawn_frame = frame
217
233
  else:
218
- drawn_frame = frame
234
+ drawn_frame = None # Don't waste CPU drawing if not needed
219
235
 
220
236
  if self.debug_flag:
221
237
  tracked_objects_render = self._process_frame(frame)
@@ -235,13 +251,16 @@ class PipelineProcessor:
235
251
  try:
236
252
  self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
237
253
  logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id} (preview requested)")
254
+ self.last_rtmp_frame_time = 0 # Reset frame time on new stream
238
255
  except Exception as e:
239
256
  logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
240
257
  self.rtmp_streamer = None
241
258
 
242
- if self.rtmp_streamer:
259
+ if self.rtmp_streamer and drawn_frame is not None:
260
+ # Frame already rate-limited by drawing logic above
243
261
  try:
244
262
  self.rtmp_streamer.push_frame(drawn_frame)
263
+ self.last_rtmp_frame_time = loop_start
245
264
  except Exception as e:
246
265
  logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
247
266
  if "initialization_failed" in str(e).lower():
@@ -259,9 +278,10 @@ class PipelineProcessor:
259
278
  finally:
260
279
  self.rtmp_streamer = None
261
280
 
262
- # Simplified queue feeding - avoid expensive try-except
263
- if not self.frame_queue.full():
264
- self.frame_queue.put_nowait(frame)
281
+ # Only feed frames to detection queue if detection processor is active
282
+ if self.detection_processor is not None:
283
+ if not self.frame_queue.full():
284
+ self.frame_queue.put_nowait(frame)
265
285
 
266
286
  loop_elapsed = time.time() - loop_start
267
287
  sleep_time = max(0.001, target_frame_time - loop_elapsed)
@@ -290,6 +310,7 @@ class PipelineProcessor:
290
310
  detections = self.detector.detect_objects(processed_frame, self.threshold, class_thresholds)
291
311
 
292
312
  detections = self.preprocessor.revert_detections_bboxes(detections, dimension)
313
+ detections = self.preprocessor.filter_detections(detections, dimension)
293
314
 
294
315
  if self.detection_processor:
295
316
  matched_results = self.detection_processor.process(detections, dimension)
@@ -307,7 +328,13 @@ class PipelineProcessor:
307
328
 
308
329
  while self.running:
309
330
  try:
310
- frame = self.frame_queue.get(block=True, timeout=1)
331
+ # Calculate how long to wait for next detection
332
+ current_time = time.time()
333
+ time_since_last_detection = current_time - last_detection_time
334
+ time_until_next_detection = max(0.1, self.detection_interval - time_since_last_detection)
335
+
336
+ # Wait for frame with timeout aligned to detection interval
337
+ frame = self.frame_queue.get(block=True, timeout=time_until_next_detection)
311
338
 
312
339
  # Check for poison pill (None = stop signal)
313
340
  if frame is None:
@@ -332,9 +359,14 @@ class PipelineProcessor:
332
359
  except queue.Empty:
333
360
  pass
334
361
 
335
- # Respect detection interval
362
+ # Respect detection interval - skip if too soon
336
363
  if (current_time - last_detection_time) < self.detection_interval:
364
+ # Sleep the remaining time instead of busy-waiting
365
+ remaining_time = self.detection_interval - (current_time - last_detection_time)
366
+ if remaining_time > 0.01:
367
+ time.sleep(remaining_time)
337
368
  continue
369
+
338
370
  last_detection_time = current_time
339
371
 
340
372
  if self.detection_processor is None or frame is None or frame.size == 0:
@@ -363,6 +395,7 @@ class PipelineProcessor:
363
395
  )
364
396
 
365
397
  except queue.Empty:
398
+ # Timeout occurred - this is normal, just continue to next iteration
366
399
  pass
367
400
  except Exception as e:
368
401
  logging.error(f"❌ Error in detection thread for pipeline {pipeline_id}: {e}", exc_info=True)
@@ -602,7 +635,7 @@ class PipelineProcessor:
602
635
 
603
636
  def _get_detection_interval(self):
604
637
  """Returns (interval, is_user_configured) tuple."""
605
- config = self.config_manager.get_feature_config("processing_speed")
638
+ config = self.config_manager.get_feature_config("processing_sped")
606
639
  fps = config.get("decimal", None)
607
640
 
608
641
  # Check if user explicitly configured FPS
@@ -616,7 +649,7 @@ class PipelineProcessor:
616
649
  def _auto_adjust_detection_interval_for_resolution(self, frame):
617
650
  """
618
651
  Auto-adjust detection interval based on frame resolution.
619
- Automatically applies when user hasn't configured processing_speed FPS.
652
+ Automatically applies when user hasn't configured processing_sped FPS.
620
653
  User-configured FPS always takes precedence.
621
654
  """
622
655
  if frame is None:
@@ -629,22 +662,20 @@ class PipelineProcessor:
629
662
  PIXELS_4K = 3840 * 2160 # ~8.3M pixels
630
663
  PIXELS_2K = 2560 * 1440 # ~3.7M pixels
631
664
 
632
- base_interval = self.base_detection_interval
633
-
634
665
  # Adjust based on resolution
635
666
  if total_pixels >= PIXELS_4K:
636
667
  # 4K: Reduce FPS by 50% (double the interval)
637
- adjusted_interval = base_interval * 2.0
638
- logging.debug(f"📊 4K stream detected ({width}x{height}): FPS reduced to {1.0/adjusted_interval:.1f}")
668
+ self.fps_resolution_modifier = 2.0
669
+ logging.debug(f"📊 4K stream detected ({width}x{height}): FPS reduced to {1.0/(self.base_detection_interval * 2.0):.1f}")
639
670
  elif total_pixels >= PIXELS_2K:
640
671
  # 2K: Reduce FPS by 25% (increase interval by 1.33x)
641
- adjusted_interval = base_interval * 1.33
642
- logging.debug(f"📊 2K stream detected ({width}x{height}): FPS reduced to {1.0/adjusted_interval:.1f}")
672
+ self.fps_resolution_modifier = 1.33
673
+ logging.debug(f"📊 2K stream detected ({width}x{height}): FPS reduced to {1.0/(self.base_detection_interval * 1.33):.1f}")
643
674
  else:
644
675
  # 1080p and below: Use full configured FPS
645
- adjusted_interval = base_interval
676
+ self.fps_resolution_modifier = 1.0
646
677
 
647
- return adjusted_interval
678
+ return self.base_detection_interval * self.fps_resolution_modifier
648
679
 
649
680
  def enable_debug(self):
650
681
  self.debug_flag = True
@@ -5,6 +5,7 @@ import threading
5
5
  from typing import Dict, Set, Optional
6
6
  from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
7
7
  from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
8
+ from ..repositories.WorkerSourceRepository import WorkerSourceRepository
8
9
  from .PipelineManager import PipelineManager
9
10
  from .ModelManager import ModelManager
10
11
  from ..streams.VideoStreamManager import VideoStreamManager
@@ -19,6 +20,7 @@ class PipelineSyncThread(threading.Thread):
19
20
  self.polling_interval = polling_interval
20
21
  self.pipeline_repo = WorkerSourcePipelineRepository()
21
22
  self.debug_repo = WorkerSourcePipelineDebugRepository()
23
+ self.source_repo = WorkerSourceRepository()
22
24
  self.model_manager = ModelManager()
23
25
  self.running = True
24
26
  self.pipeline_manager = PipelineManager(video_manager, self.on_pipeline_stopped, max_workers)
@@ -82,6 +84,11 @@ class PipelineSyncThread(threading.Thread):
82
84
  pipeline.pipeline_status_code = 'run'
83
85
 
84
86
  if pipeline.pipeline_status_code == 'run':
87
+ # Check if source is connected before starting pipeline
88
+ if not self.source_repo.is_source_connected(pipeline.worker_source_id):
89
+ logging.warning(f"⚠️ Skipping pipeline {pid} ({pipeline.name}): Source {pipeline.worker_source_id} is disconnected")
90
+ continue
91
+
85
92
  detector = self.model_manager.get_detector(pipeline.ai_model_id)
86
93
 
87
94
  if not detector and pipeline.ai_model_id:
@@ -135,17 +142,33 @@ class PipelineSyncThread(threading.Thread):
135
142
 
136
143
  # At this point, we know db_pipeline.pipeline_status_code == 'run' (checked in caller)
137
144
  # Check for significant changes that require a restart
138
- db_detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
139
-
140
- requires_restart = any([
141
- db_pipeline.ai_model_id != processor._pipeline.ai_model_id,
142
- db_pipeline.worker_source_id != processor._pipeline.worker_source_id,
143
- local_detector != db_detector
144
- ])
145
+
146
+ requires_restart = False
147
+
148
+ if db_pipeline.ai_model_id != processor._pipeline.ai_model_id:
149
+ requires_restart = True
150
+ elif db_pipeline.worker_source_id != processor._pipeline.worker_source_id:
151
+ requires_restart = True
152
+ else:
153
+ # Check metadata changes without loading detector
154
+ db_model_metadata = self.model_manager.get_model_metadata(db_pipeline.ai_model_id)
155
+ if local_detector and db_model_metadata:
156
+ if self.model_manager.has_metadata_changed(local_detector.metadata, db_model_metadata):
157
+ requires_restart = True
158
+ elif (local_detector is None) != (db_model_metadata is None):
159
+ requires_restart = True
145
160
 
146
161
  if requires_restart:
162
+ # Check if source is connected before restarting
163
+ if not self.source_repo.is_source_connected(db_pipeline.worker_source_id):
164
+ logging.warning(f"⚠️ Cannot restart pipeline {pid}: Source {db_pipeline.worker_source_id} is disconnected")
165
+ return
166
+
147
167
  logging.info(f"🔄 Restarting pipeline due to significant changes: {pid}")
148
168
  self.pipeline_manager.stop_pipeline(pid)
169
+
170
+ # Load detector ONLY when restarting
171
+ db_detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
149
172
  self.pipeline_manager.start_pipeline(db_pipeline, db_detector)
150
173
  else:
151
174
  # Update config for minor changes that don't require restart
@@ -11,7 +11,7 @@ class ImageRoi(Preprocessor):
11
11
  self.is_enabled = False
12
12
  self.normalized_points = []
13
13
 
14
- def _get_roi_points(self, config: PipelineConfigManager) -> float:
14
+ def _get_roi_points(self, config: PipelineConfigManager) -> list:
15
15
  if not self.is_enabled:
16
16
  return []
17
17
 
@@ -21,12 +21,22 @@ class ImageRoi(Preprocessor):
21
21
  self.is_enabled = config.is_feature_enabled(self.code)
22
22
  self.normalized_points = self._get_roi_points(config)
23
23
 
24
+ def _get_scaled_points(self, width: int, height: int) -> list:
25
+ points = []
26
+ for p in self.normalized_points:
27
+ x = int(p["x"] * width)
28
+ y = int(p["y"] * height)
29
+ x = max(0, min(x, width))
30
+ y = max(0, min(y, height))
31
+ points.append((x, y))
32
+ return points
33
+
24
34
  def apply(self, image: np.ndarray) -> np.ndarray:
25
35
  if not self.is_enabled or len(self.normalized_points) < 4:
26
36
  return image
27
37
 
28
38
  height, width = image.shape[:2]
29
- points = [(int(p["x"] * width), int(p["y"] * height)) for p in self.normalized_points]
39
+ points = self._get_scaled_points(width, height)
30
40
  x_coords = [p[0] for p in points]
31
41
  y_coords = [p[1] for p in points]
32
42
 
@@ -35,15 +45,15 @@ class ImageRoi(Preprocessor):
35
45
  roi_width = max(x_coords) - roi_x + 1
36
46
  roi_height = max(y_coords) - roi_y + 1
37
47
 
38
- cropped = image[roi_y:roi_y+roi_height, roi_x:roi_x+roi_width].copy()
39
- mask = np.zeros(cropped.shape[:2], dtype=np.uint8)
40
-
41
- offset_points = [(x - roi_x, y - roi_y) for x, y in points]
42
- points_np = np.array(offset_points, dtype=np.int32).reshape((-1, 1, 2))
48
+ if roi_width <= 0 or roi_height <= 0:
49
+ return image
43
50
 
44
- cv2.fillPoly(mask, [points_np], 255)
51
+ cropped = image[roi_y:roi_y+roi_height, roi_x:roi_x+roi_width].copy()
52
+
53
+ if cropped.size == 0:
54
+ return image
45
55
 
46
- return cv2.bitwise_and(cropped, cropped, mask=mask)
56
+ return cropped
47
57
 
48
58
 
49
59
  def revert_bboxes(self, bboxes: np.ndarray, dimension: Tuple[int, int]) -> np.ndarray:
@@ -51,11 +61,33 @@ class ImageRoi(Preprocessor):
51
61
  return bboxes
52
62
 
53
63
  height, width = dimension
54
- points = [(int(p["x"] * width), int(p["y"] * height)) for p in self.normalized_points]
64
+ points = self._get_scaled_points(width, height)
55
65
  x_coords = [p[0] for p in points]
56
66
  y_coords = [p[1] for p in points]
57
67
 
58
68
  roi_x = min(x_coords)
59
69
  roi_y = min(y_coords)
60
70
 
61
- return bboxes + np.array([roi_x, roi_y, roi_x, roi_y])
71
+ return bboxes + np.array([roi_x, roi_y, roi_x, roi_y])
72
+
73
+ def filter_detections(self, detections: list, dimension: Tuple[int, int]) -> list:
74
+ if not self.is_enabled or len(self.normalized_points) < 4:
75
+ return detections
76
+
77
+ height, width = dimension
78
+ points = self._get_scaled_points(width, height)
79
+ points_np = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
80
+
81
+ filtered_detections = []
82
+ for det in detections:
83
+ bbox = det["bbox"]
84
+ # bbox is [x1, y1, x2, y2]
85
+ center_x = (bbox[0] + bbox[2]) / 2
86
+ center_y = (bbox[1] + bbox[3]) / 2
87
+
88
+ # Measure distance. If >= 0, it's inside or on edge.
89
+ result = cv2.pointPolygonTest(points_np, (center_x, center_y), False)
90
+ if result >= 0:
91
+ filtered_detections.append(det)
92
+
93
+ return filtered_detections
@@ -13,4 +13,7 @@ class Preprocessor(ABC):
13
13
  pass
14
14
  @abstractmethod
15
15
  def revert_bboxes(self, bboxes: np.ndarray, dimension: Tuple[int, int]) -> np.ndarray:
16
- pass
16
+ pass
17
+
18
+ def filter_detections(self, detections: list, dimension: Tuple[int, int]) -> list:
19
+ return detections
@@ -18,12 +18,14 @@ class WorkerSourcePipelineDetectionRepository(BaseRepository):
18
18
  def save_detection(self, pipeline_id: int, frame, tracked_objects, frame_drawer: FrameDrawer):
19
19
  """
20
20
  Save detection data that need to be sent to database.
21
+ Only saves if there are violations detected.
21
22
  """
22
23
  now = datetime.now(timezone.utc)
23
24
  current_datetime = now.strftime("%Y%m%d_%H%M%S")
24
25
 
25
26
  frame_drawer.draw_polygons(frame)
26
27
  filtered_objects = []
28
+ has_violations = False
27
29
 
28
30
  for tracked_obj in tracked_objects:
29
31
  attributes = tracked_obj["attributes"]
@@ -34,9 +36,17 @@ class WorkerSourcePipelineDetectionRepository(BaseRepository):
34
36
  obj = tracked_obj.copy()
35
37
  obj["attributes"] = [attr for attr in attributes if attr.get("count", 0) >= 5]
36
38
 
39
+ # Check if any attribute is a violation
40
+ for attr in obj["attributes"]:
41
+ attr_label = attr.get("label", "")
42
+ if attr_label in frame_drawer.violation_labels:
43
+ has_violations = True
44
+ break
45
+
37
46
  filtered_objects.append(obj)
38
47
 
39
- if not filtered_objects:
48
+ # Only save and trigger webhook/MQTT if there are actual violations
49
+ if not filtered_objects or not has_violations:
40
50
  return
41
51
 
42
52
  drawn_frame = frame_drawer.draw_frame(frame.copy(), filtered_objects)
@@ -64,7 +74,5 @@ class WorkerSourcePipelineDetectionRepository(BaseRepository):
64
74
  )
65
75
  session.add(new_detection)
66
76
  session.flush()
67
- # Commit happens automatically via context manager
68
- print(f"✅ Inserted detection data for pipeline {pipeline_id}")
69
77
  except Exception as e:
70
78
  print(f"❌ Database error while saving detection: {e}")
@@ -0,0 +1,56 @@
1
+ from .BaseRepository import BaseRepository
2
+ from ..models.worker_source import WorkerSourceEntity
3
+
4
+
5
+ class WorkerSourceRepository(BaseRepository):
6
+ def __init__(self):
7
+ super().__init__(db_name="config")
8
+
9
+ def get_worker_sources(self):
10
+ """
11
+ Fetch all worker sources from the local database in a single query.
12
+
13
+ Returns:
14
+ list: A list of WorkerSourceEntity records.
15
+ """
16
+ with self._get_session() as session:
17
+ session.expire_all()
18
+ sources = session.query(WorkerSourceEntity).all()
19
+ for source in sources:
20
+ session.expunge(source)
21
+ return sources
22
+
23
+ def get_worker_source(self, source_id: str):
24
+ """
25
+ Fetch a single worker source by ID.
26
+
27
+ Args:
28
+ source_id (str): The worker source ID
29
+
30
+ Returns:
31
+ WorkerSourceEntity: The worker source entity or None if not found
32
+ """
33
+ with self._get_session() as session:
34
+ session.expire_all()
35
+ source = session.query(WorkerSourceEntity).filter(
36
+ WorkerSourceEntity.id == source_id
37
+ ).first()
38
+ if source:
39
+ session.expunge(source)
40
+ return source
41
+
42
+ def is_source_connected(self, source_id: str) -> bool:
43
+ """
44
+ Check if a worker source is connected.
45
+
46
+ Args:
47
+ source_id (str): The worker source ID
48
+
49
+ Returns:
50
+ bool: True if source is connected, False otherwise
51
+ """
52
+ source = self.get_worker_source(source_id)
53
+ if not source:
54
+ return False
55
+
56
+ return source.status_code == "connected" if source.status_code else False