nedo-vision-worker-core 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

Files changed (21) hide show
  1. nedo_vision_worker_core/__init__.py +1 -1
  2. nedo_vision_worker_core/database/DatabaseManager.py +17 -1
  3. nedo_vision_worker_core/models/worker_source_pipeline.py +2 -1
  4. nedo_vision_worker_core/pipeline/PipelineManager.py +50 -19
  5. nedo_vision_worker_core/pipeline/PipelineProcessor.py +70 -16
  6. nedo_vision_worker_core/pipeline/PipelineSyncThread.py +12 -12
  7. nedo_vision_worker_core/repositories/AIModelRepository.py +17 -17
  8. nedo_vision_worker_core/repositories/BaseRepository.py +44 -0
  9. nedo_vision_worker_core/repositories/PPEDetectionRepository.py +77 -79
  10. nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +37 -38
  11. nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +47 -46
  12. nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +14 -15
  13. nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +68 -36
  14. nedo_vision_worker_core/repositories/WorkerSourceRepository.py +9 -7
  15. nedo_vision_worker_core/streams/RTMPStreamer.py +283 -106
  16. nedo_vision_worker_core/util/PipelinePreviewChecker.py +50 -0
  17. {nedo_vision_worker_core-0.3.5.dist-info → nedo_vision_worker_core-0.3.7.dist-info}/METADATA +3 -2
  18. {nedo_vision_worker_core-0.3.5.dist-info → nedo_vision_worker_core-0.3.7.dist-info}/RECORD +21 -19
  19. {nedo_vision_worker_core-0.3.5.dist-info → nedo_vision_worker_core-0.3.7.dist-info}/WHEEL +0 -0
  20. {nedo_vision_worker_core-0.3.5.dist-info → nedo_vision_worker_core-0.3.7.dist-info}/entry_points.txt +0 -0
  21. {nedo_vision_worker_core-0.3.5.dist-info → nedo_vision_worker_core-0.3.7.dist-info}/top_level.txt +0 -0
@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
7
7
  from .core_service import CoreService
8
8
  from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
9
9
 
10
- __version__ = "0.3.5"
10
+ __version__ = "0.3.7"
11
11
  __all__ = [
12
12
  "CoreService",
13
13
  "DetectionType",
@@ -121,7 +121,23 @@ class DatabaseManager:
121
121
  # Initialize engines and session factories for each database
122
122
  for name, path in DB_PATHS.items():
123
123
  path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
124
- engine = create_engine(f"sqlite:///{path.as_posix()}")
124
+
125
+ # Configure connection pool for multi-threaded usage
126
+ # pool_size: Max connections to keep open
127
+ # max_overflow: Additional connections that can be created temporarily
128
+ # pool_pre_ping: Test connections before using (prevents stale connections)
129
+ # pool_recycle: Recycle connections after N seconds (prevents long-lived stale connections)
130
+ engine = create_engine(
131
+ f"sqlite:///{path.as_posix()}",
132
+ pool_size=20, # Base pool size for persistent connections
133
+ max_overflow=30, # Allow up to 30 additional temporary connections
134
+ pool_pre_ping=True, # Verify connection health before use
135
+ pool_recycle=3600, # Recycle connections after 1 hour
136
+ connect_args={
137
+ "check_same_thread": False, # Required for SQLite with multiple threads
138
+ "timeout": 30.0 # Connection timeout
139
+ }
140
+ )
125
141
  ENGINES[name] = engine
126
142
  SESSION_FACTORIES[name] = scoped_session(sessionmaker(bind=engine)) # Use scoped sessions
127
143
  DatabaseManager.synchronize(name)
@@ -1,4 +1,4 @@
1
- from sqlalchemy import Column, String
1
+ from sqlalchemy import Column, String, DateTime
2
2
  from sqlalchemy.orm import relationship
3
3
  from ..database.DatabaseManager import Base
4
4
 
@@ -13,6 +13,7 @@ class WorkerSourcePipelineEntity(Base):
13
13
  ai_model_id = Column(String, nullable=True)
14
14
  pipeline_status_code = Column(String, nullable=False)
15
15
  location_name = Column(String, nullable=True)
16
+ last_preview_request_at = Column(DateTime, nullable=True)
16
17
 
17
18
  worker_source_pipeline_configs = relationship(
18
19
  "WorkerSourcePipelineConfigEntity",
@@ -1,6 +1,5 @@
1
1
  import logging
2
2
  import threading
3
- from concurrent.futures import ThreadPoolExecutor, Future
4
3
  from typing import Dict
5
4
  from .PipelineProcessor import PipelineProcessor
6
5
  from ..streams.VideoStreamManager import VideoStreamManager
@@ -9,8 +8,8 @@ class PipelineManager:
9
8
  """Manages AI pipeline execution and video stream processing."""
10
9
 
11
10
  def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=50):
12
- self.executor = ThreadPoolExecutor(max_workers=max_workers) # Thread pool for parallel execution
13
- self.pipeline_threads = {} # Stores Future objects {pipeline_id: Future}
11
+ self.max_workers = max_workers
12
+ self.pipeline_threads = {} # Stores Thread objects {pipeline_id: Thread}
14
13
  self.pipeline_metadata = {} # Stores actual pipeline data {pipeline_id: metadata}
15
14
  self.video_manager = video_manager # Manages video streams
16
15
  self.processors: Dict[str, PipelineProcessor] = {} # Stores PipelineProcessor instances per pipeline
@@ -48,14 +47,43 @@ class PipelineManager:
48
47
  processor.frame_drawer.location_name = pipeline.location_name
49
48
  self.processors[pipeline_id] = processor # Store processor instance
50
49
 
51
- future = self.executor.submit(processor.process_pipeline, self.video_manager)
52
- self.pipeline_threads[pipeline_id] = future
53
- self.pipeline_metadata[pipeline_id] = pipeline
50
+ active_count = len([t for t in self.pipeline_threads.values() if t.is_alive()])
51
+ logging.info(f"📋 Starting pipeline {pipeline_id} thread (active threads: {active_count})")
52
+
53
+ try:
54
+ # Wrap the execution to catch any early errors
55
+ def _safe_process_pipeline():
56
+ try:
57
+ logging.info(f"🏁 Pipeline {pipeline_id} thread execution beginning...")
58
+ processor.process_pipeline(self.video_manager)
59
+ except Exception as e:
60
+ logging.error(f"❌ Unhandled error in pipeline {pipeline_id} thread: {e}", exc_info=True)
61
+ finally:
62
+ # Ensure cleanup callback is called
63
+ self._handle_pipeline_completion(pipeline_id)
64
+
65
+ # Create and start thread directly
66
+ thread = threading.Thread(
67
+ target=_safe_process_pipeline,
68
+ name=f"pipeline-{pipeline_id[:8]}",
69
+ daemon=True
70
+ )
71
+
72
+ self.pipeline_threads[pipeline_id] = thread
73
+ self.pipeline_metadata[pipeline_id] = pipeline
74
+
75
+ logging.info(f"⚙️ Starting thread for pipeline {pipeline_id}")
76
+ thread.start()
77
+ logging.info(f"✅ Pipeline {pipeline_id} thread started successfully")
54
78
 
55
- # Add callback to detect when a pipeline finishes
56
- future.add_done_callback(lambda f: self._handle_pipeline_completion(pipeline_id, f))
79
+ except Exception as e:
80
+ logging.error(f"❌ Failed to start pipeline {pipeline_id} thread: {e}", exc_info=True)
81
+ # Clean up on failure
82
+ self.processors.pop(pipeline_id, None)
83
+ self.video_manager.release_stream(worker_source_id, pipeline_id)
84
+ raise
57
85
 
58
- def _handle_pipeline_completion(self, pipeline_id: str, future: Future):
86
+ def _handle_pipeline_completion(self, pipeline_id: str):
59
87
  """
60
88
  Handles cleanup when a pipeline finishes processing.
61
89
  """
@@ -64,11 +92,7 @@ class PipelineManager:
64
92
  return # If it's already being stopped manually, don't trigger again
65
93
 
66
94
  try:
67
- if future.cancelled():
68
- logging.info(f"🚫 Pipeline {pipeline_id} was cancelled.")
69
- elif future.exception():
70
- logging.error(f"❌ Pipeline {pipeline_id} encountered an error: {future.exception()}", exc_info=True)
71
-
95
+ logging.info(f"🏁 Pipeline {pipeline_id} completed execution")
72
96
  except Exception as e:
73
97
  logging.error(f"⚠️ Error in handling pipeline {pipeline_id} completion: {e}")
74
98
 
@@ -93,10 +117,14 @@ class PipelineManager:
93
117
  if processor:
94
118
  processor.stop()
95
119
 
96
- # Cancel execution thread
97
- future = self.pipeline_threads.pop(pipeline_id, None)
98
- if future:
99
- future.cancel()
120
+ # Stop execution thread (thread will terminate naturally)
121
+ thread = self.pipeline_threads.pop(pipeline_id, None)
122
+ if thread and thread.is_alive():
123
+ # Thread is daemon, will stop when processor.running becomes False
124
+ logging.debug(f"Waiting for pipeline {pipeline_id} thread to terminate...")
125
+ thread.join(timeout=5.0)
126
+ if thread.is_alive():
127
+ logging.warning(f"Pipeline {pipeline_id} thread did not terminate cleanly")
100
128
 
101
129
  # Remove metadata
102
130
  self.pipeline_metadata.pop(pipeline_id, None)
@@ -132,7 +160,8 @@ class PipelineManager:
132
160
  Returns:
133
161
  bool: True if the pipeline is running, False otherwise.
134
162
  """
135
- return pipeline_id in self.pipeline_threads and not self.pipeline_threads[pipeline_id].done()
163
+ thread = self.pipeline_threads.get(pipeline_id)
164
+ return thread is not None and thread.is_alive()
136
165
 
137
166
  def shutdown(self):
138
167
  """Shuts down the pipeline manager gracefully."""
@@ -142,5 +171,7 @@ class PipelineManager:
142
171
  for pipeline_id in list(self.pipeline_threads.keys()):
143
172
  self.stop_pipeline(pipeline_id)
144
173
 
174
+ logging.info("✅ PipelineManager stopped.")
175
+
145
176
  self.executor.shutdown(wait=True) # Wait for all threads to finish
146
177
  logging.info("✅ PipelineManager stopped.")
@@ -8,12 +8,14 @@ from .PipelineConfigManager import PipelineConfigManager
8
8
  from .PipelinePrepocessor import PipelinePrepocessor
9
9
  from ..repositories.WorkerSourcePipelineDebugRepository import WorkerSourcePipelineDebugRepository
10
10
  from ..repositories.WorkerSourcePipelineDetectionRepository import WorkerSourcePipelineDetectionRepository
11
+ from ..repositories.WorkerSourcePipelineRepository import WorkerSourcePipelineRepository
11
12
  from ..streams.VideoStreamManager import VideoStreamManager
12
13
  from ..ai.VideoDebugger import VideoDebugger
13
14
  from ..ai.FrameDrawer import FrameDrawer
14
15
  from ..tracker.TrackerManager import TrackerManager
15
16
  from ..detection.BaseDetector import BaseDetector
16
17
  from ..streams.RTMPStreamer import RTMPStreamer
18
+ from ..util.PipelinePreviewChecker import PipelinePreviewChecker
17
19
 
18
20
 
19
21
  class PipelineProcessor:
@@ -41,6 +43,10 @@ class PipelineProcessor:
41
43
  self.worker_source_id = pipeline.worker_source_id
42
44
 
43
45
  self.rtmp_streamer = None
46
+ self.rtmp_streaming_active = False
47
+ self.last_preview_check_time = 0
48
+ self.preview_check_interval = 5.0 # Check every 5 seconds
49
+ self.pipeline_repo = WorkerSourcePipelineRepository()
44
50
 
45
51
  self.detection_processor_codes = [
46
52
  PPEDetectionProcessor.code,
@@ -137,6 +143,7 @@ class PipelineProcessor:
137
143
  def process_pipeline(self, video_manager: VideoStreamManager):
138
144
  pipeline_id = self.pipeline_id
139
145
  worker_source_id = self.worker_source_id
146
+
140
147
  logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
141
148
 
142
149
  self._update_config_internal()
@@ -148,9 +155,6 @@ class PipelineProcessor:
148
155
  logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
149
156
  return
150
157
 
151
- # Start RTMP (internal pacer thread will begin on first push_frame())
152
- self.rtmp_streamer = RTMPStreamer(pipeline_id)
153
-
154
158
  # Start detection thread
155
159
  self.detection_thread = threading.Thread(
156
160
  target=self._detection_worker,
@@ -204,20 +208,43 @@ class PipelineProcessor:
204
208
  logging.warning(f"Debug save failed: {e}")
205
209
  self.debug_flag = False
206
210
 
207
- # ---- RTMP push (latest-only; pacing handled inside RTMPStreamer) ----
208
- if self.rtmp_streamer is None or not self.rtmp_streamer.is_active():
209
- # (re)create; it will learn WxH on first push
210
- self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
211
+ # Check if RTMP streaming should be active based on preview requests
212
+ current_time = time.time()
213
+ if current_time - self.last_preview_check_time >= self.preview_check_interval:
214
+ self._check_and_update_rtmp_streaming()
215
+ self.last_preview_check_time = current_time
211
216
 
212
- try:
213
- self.rtmp_streamer.push_frame(drawn_frame)
214
- except Exception as e:
215
- logging.error(f"❌ RTMP push error: {e}")
216
- try:
217
- self.rtmp_streamer.stop_stream()
218
- except Exception:
219
- pass
220
- self.rtmp_streamer = None
217
+ # Push frame to RTMP stream if preview is active
218
+ if self.rtmp_streaming_active:
219
+ if self.rtmp_streamer is None:
220
+ try:
221
+ self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
222
+ logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id} (preview requested)")
223
+ except Exception as e:
224
+ logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
225
+ self.rtmp_streamer = None
226
+
227
+ if self.rtmp_streamer:
228
+ try:
229
+ self.rtmp_streamer.push_frame(drawn_frame)
230
+ except Exception as e:
231
+ logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
232
+ if "initialization_failed" in str(e).lower():
233
+ try:
234
+ self.rtmp_streamer.stop_stream()
235
+ except Exception:
236
+ pass
237
+ self.rtmp_streamer = None
238
+ else:
239
+ # Stop RTMP streaming if preview is no longer active
240
+ if self.rtmp_streamer is not None:
241
+ try:
242
+ logging.info(f"🛑 Stopping RTMP streamer for pipeline {pipeline_id} (preview expired)")
243
+ self.rtmp_streamer.stop_stream()
244
+ except Exception as e:
245
+ logging.error(f"❌ Error stopping RTMP streamer: {e}")
246
+ finally:
247
+ self.rtmp_streamer = None
221
248
 
222
249
  # feed detection worker with latest-only behavior
223
250
  if self.detection_thread and self.detection_thread.is_alive():
@@ -533,6 +560,33 @@ class PipelineProcessor:
533
560
  self.consecutive_frame_failures = 0
534
561
  self.last_successful_frame_time = time.time()
535
562
 
563
+ def _check_and_update_rtmp_streaming(self):
564
+ """
565
+ Check if RTMP streaming should be active based on preview requests.
566
+ Updates the rtmp_streaming_active flag.
567
+ """
568
+ try:
569
+ # Get fresh pipeline data from database
570
+ pipeline = self.pipeline_repo.get_worker_source_pipeline(self.pipeline_id)
571
+
572
+ if not pipeline:
573
+ logging.warning(f"⚠️ Pipeline {self.pipeline_id} not found in database")
574
+ self.rtmp_streaming_active = False
575
+ return
576
+
577
+ # Check if preview is active using the utility
578
+ should_stream = PipelinePreviewChecker.should_stream_rtmp(
579
+ pipeline.last_preview_request_at,
580
+ preview_window_seconds=300 # 5 minutes
581
+ )
582
+
583
+ self.rtmp_streaming_active = should_stream
584
+
585
+ except Exception as e:
586
+ logging.error(f"❌ Error checking preview status for pipeline {self.pipeline_id}: {e}")
587
+ # On error, disable streaming to be safe
588
+ self.rtmp_streaming_active = False
589
+
536
590
  def reset_frame_failure_counters(self):
537
591
  logging.info(f"🔄 Resetting frame failure counters for pipeline {self.pipeline_id}")
538
592
  self.consecutive_frame_failures = 0
@@ -38,9 +38,13 @@ class PipelineSyncThread(threading.Thread):
38
38
 
39
39
  def on_pipeline_stopped(self, pipeline_id: str) -> None:
40
40
  """Set the pipeline as stopped in the database."""
41
- pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
42
- pipeline.pipeline_status_code = "run" if pipeline.pipeline_status_code == "restart" else "stop"
43
- self.pipeline_repo.session.commit()
41
+ try:
42
+ pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
43
+ if pipeline:
44
+ new_status = "run" if pipeline.pipeline_status_code == "restart" else "stop"
45
+ self.pipeline_repo.update_pipeline_status(pipeline_id, new_status)
46
+ except Exception as e:
47
+ logging.error(f"Failed to update pipeline status for {pipeline_id}: {e}")
44
48
 
45
49
  def run(self) -> None:
46
50
  """Continuously updates pipelines based on database changes."""
@@ -52,16 +56,11 @@ class PipelineSyncThread(threading.Thread):
52
56
  local_pipeline_ids = set(self.pipeline_manager.get_active_pipelines())
53
57
  db_pipeline_ids = set(db_pipelines.keys())
54
58
 
55
- restarted_pipeline = False
56
-
57
59
  # Process pipeline changes
58
- self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines, restarted_pipeline)
60
+ self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines)
59
61
  self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
60
62
  self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines)
61
63
 
62
- if restarted_pipeline:
63
- self.pipeline_repo.session.commit()
64
-
65
64
  # Sync the cache to remove unused detectors
66
65
  active_model_ids = {p.ai_model_id for p in db_pipelines.values() if p.pipeline_status_code == 'run'}
67
66
  self.model_manager.sync_cache(active_model_ids)
@@ -71,15 +70,16 @@ class PipelineSyncThread(threading.Thread):
71
70
 
72
71
  time.sleep(self.polling_interval)
73
72
 
74
- def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object],
75
- restarted_pipeline: bool) -> None:
73
+ def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]) -> None:
76
74
  """Add new pipelines that exist in DB but not locally."""
77
75
  for pid in pipeline_ids:
78
76
  pipeline = db_pipelines[pid]
79
77
 
80
78
  if pipeline.pipeline_status_code == 'restart':
79
+ # Update status in database
80
+ self.pipeline_repo.update_pipeline_status(pid, 'run')
81
+ # Update local object too for consistency
81
82
  pipeline.pipeline_status_code = 'run'
82
- restarted_pipeline = True
83
83
 
84
84
  if pipeline.pipeline_status_code == 'run':
85
85
  detector = self.model_manager.get_detector(pipeline.ai_model_id)
@@ -1,15 +1,13 @@
1
1
  import logging
2
- from sqlalchemy.orm import Session
3
2
  from sqlalchemy.exc import SQLAlchemyError
4
- from ..database.DatabaseManager import DatabaseManager
3
+ from .BaseRepository import BaseRepository
5
4
  from ..models.ai_model import AIModelEntity
6
5
 
7
- class AIModelRepository:
6
+ class AIModelRepository(BaseRepository):
8
7
  """Handles storage of AI Models into SQLite using SQLAlchemy."""
9
8
 
10
9
  def __init__(self):
11
- self.db_manager = DatabaseManager()
12
- self.session: Session = self.db_manager.get_session("default")
10
+ super().__init__(db_name="default")
13
11
 
14
12
  def get_models(self) -> list:
15
13
  """
@@ -19,13 +17,14 @@ class AIModelRepository:
19
17
  list: A list of AIModelEntity objects.
20
18
  """
21
19
  try:
22
- self.session.expire_all()
23
- models = self.session.query(AIModelEntity).all()
24
-
25
- for model in models:
26
- self.session.expunge(model)
27
-
28
- return models
20
+ with self._get_session() as session:
21
+ session.expire_all()
22
+ models = session.query(AIModelEntity).all()
23
+
24
+ for model in models:
25
+ session.expunge(model)
26
+
27
+ return models
29
28
  except SQLAlchemyError as e:
30
29
  logging.error(f"Error retrieving models: {e}")
31
30
  return []
@@ -41,11 +40,12 @@ class AIModelRepository:
41
40
  An AIModelEntity object or None if not found.
42
41
  """
43
42
  try:
44
- self.session.expire_all()
45
- model = self.session.query(AIModelEntity).filter_by(id=model_id).first()
46
- if model:
47
- self.session.expunge(model)
48
- return model
43
+ with self._get_session() as session:
44
+ session.expire_all()
45
+ model = session.query(AIModelEntity).filter_by(id=model_id).first()
46
+ if model:
47
+ session.expunge(model)
48
+ return model
49
49
  except SQLAlchemyError as e:
50
50
  logging.error(f"Error retrieving model {model_id}: {e}")
51
51
  return None
@@ -0,0 +1,44 @@
1
+ from contextlib import contextmanager
2
+ from ..database.DatabaseManager import DatabaseManager
3
+
4
+
5
+ class BaseRepository:
6
+ """
7
+ Base repository class that provides thread-safe database session management.
8
+
9
+ All repositories should inherit from this class to ensure proper connection pooling
10
+ and to prevent connection leaks in multi-threaded environments.
11
+ """
12
+
13
+ def __init__(self, db_name: str = "default"):
14
+ """
15
+ Initialize the base repository.
16
+
17
+ Args:
18
+ db_name: Name of the database to connect to ('default', 'config', or 'logging')
19
+ """
20
+ self.db_manager = DatabaseManager()
21
+ self.db_name = db_name
22
+
23
+ @contextmanager
24
+ def _get_session(self):
25
+ """
26
+ Context manager for database sessions.
27
+
28
+ Ensures sessions are properly opened and closed, preventing connection leaks.
29
+ Each operation gets a fresh session that is automatically closed when done.
30
+
31
+ Usage:
32
+ with self._get_session() as session:
33
+ results = session.query(Model).all()
34
+ return results
35
+ """
36
+ session = self.db_manager.get_session(self.db_name)
37
+ try:
38
+ yield session
39
+ session.commit() # Commit any pending changes
40
+ except Exception:
41
+ session.rollback() # Rollback on error
42
+ raise
43
+ finally:
44
+ session.close() # Always close the session
@@ -4,21 +4,19 @@ import datetime
4
4
  import uuid
5
5
  import logging
6
6
  from pathlib import Path
7
- from sqlalchemy.orm import Session
8
7
  from sqlalchemy.exc import SQLAlchemyError
9
- from ..database.DatabaseManager import DatabaseManager
8
+ from .BaseRepository import BaseRepository
10
9
  from ..models.ppe_detection import PPEDetectionEntity
11
10
  from ..models.ppe_detection_label import PPEDetectionLabelEntity
12
11
  from ..util.DrawingUtils import DrawingUtils
12
+ from ..database.DatabaseManager import DatabaseManager
13
13
 
14
- class PPEDetectionRepository:
14
+ class PPEDetectionRepository(BaseRepository):
15
15
  """Handles storage of PPE detections into SQLite using SQLAlchemy."""
16
16
 
17
17
  def __init__(self):
18
-
18
+ super().__init__(db_name="default")
19
19
  self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "ppe_detections"
20
- self.db_manager = DatabaseManager()
21
- self.session: Session = self.db_manager.get_session("default")
22
20
  os.makedirs(self.storage_dir, exist_ok=True)
23
21
 
24
22
  def save_ppe_detection(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
@@ -64,86 +62,86 @@ class PPEDetectionRepository:
64
62
  cv2.imwrite(cropped_image_path, cropped_image)
65
63
 
66
64
  try:
67
- new_detection = PPEDetectionEntity(
68
- id=str(uuid.uuid4()),
69
- worker_id=pipeline_id,
70
- worker_source_id=worker_source_id,
71
- person_id=person_id,
72
- image_path=full_image_path,
73
- image_tile_path=cropped_image_path,
74
- b_box_x1=bbox[0],
75
- b_box_y1=bbox[1],
76
- b_box_x2=bbox[2],
77
- b_box_y2=bbox[3],
78
- detection_count=tracked_obj.get("detections", 0)
79
- )
80
- self.session.add(new_detection)
81
- self.session.flush()
82
-
83
- for attr in filtered_attributes:
84
- label = attr["label"]
85
- valid_attributes.append(label)
86
-
87
- if attr and "bbox" in attr:
88
- attr_bbox = attr["bbox"]
89
- # Assuming attr_bbox is in [x, y, width, height] format.
90
- attr_b_box_x1 = attr_bbox[0]
91
- attr_b_box_y1 = attr_bbox[1]
92
- attr_b_box_x2 = attr_bbox[2]
93
- attr_b_box_y2 = attr_bbox[3]
94
- else:
95
- # Fallback to default values if the attribute bbox is not available.
96
- attr_b_box_x1 = 0.0
97
- attr_b_box_y1 = 0.0
98
- attr_b_box_x2 = 0.0
99
- attr_b_box_y2 = 0.0
100
-
101
- # Retrieve confidence score; default to 1.0 if not available.
102
- if attr:
103
- confidence_score = attr.get("confidence", 1.0)
104
- else:
105
- confidence_score = 1.0
106
-
107
- new_label = PPEDetectionLabelEntity(
65
+ with self._get_session() as session:
66
+ new_detection = PPEDetectionEntity(
108
67
  id=str(uuid.uuid4()),
109
- detection_id=new_detection.id,
110
- code=label,
111
- confidence_score=confidence_score,
112
- detection_count=attr.get("count", 0),
113
- b_box_x1=attr_b_box_x1,
114
- b_box_y1=attr_b_box_y1,
115
- b_box_x2=attr_b_box_x2,
116
- b_box_y2=attr_b_box_y2
117
- )
118
- self.session.add(new_label)
119
-
120
- self.session.commit()
121
- logging.info(f"✅ Inserted detection for Person {person_id}, Attributes: {valid_attributes}")
122
-
123
- # Trigger detection callback with unified data structure
124
- try:
125
- from ..core_service import CoreService
126
- from ..detection.detection_processing.PPEDetectionProcessor import PPEDetectionProcessor
127
-
128
- # Create unified detection data using the processor's factory method
129
- unified_data = PPEDetectionProcessor.create_detection_data(
130
- pipeline_id=pipeline_id,
68
+ worker_id=pipeline_id,
131
69
  worker_source_id=worker_source_id,
132
70
  person_id=person_id,
133
- detection_id=new_detection.id,
134
- tracked_obj=tracked_obj,
135
71
  image_path=full_image_path,
136
72
  image_tile_path=cropped_image_path,
137
- frame_id=frame_id
73
+ b_box_x1=bbox[0],
74
+ b_box_y1=bbox[1],
75
+ b_box_x2=bbox[2],
76
+ b_box_y2=bbox[3],
77
+ detection_count=tracked_obj.get("detections", 0)
138
78
  )
139
-
140
- # Trigger callbacks
141
- CoreService.trigger_detection(unified_data)
142
-
143
- except Exception as e:
144
- logging.warning(f"⚠️ Failed to trigger PPE detection callback: {e}")
79
+ session.add(new_detection)
80
+ session.flush()
81
+
82
+ for attr in filtered_attributes:
83
+ label = attr["label"]
84
+ valid_attributes.append(label)
85
+
86
+ if attr and "bbox" in attr:
87
+ attr_bbox = attr["bbox"]
88
+ # Assuming attr_bbox is in [x, y, width, height] format.
89
+ attr_b_box_x1 = attr_bbox[0]
90
+ attr_b_box_y1 = attr_bbox[1]
91
+ attr_b_box_x2 = attr_bbox[2]
92
+ attr_b_box_y2 = attr_bbox[3]
93
+ else:
94
+ # Fallback to default values if the attribute bbox is not available.
95
+ attr_b_box_x1 = 0.0
96
+ attr_b_box_y1 = 0.0
97
+ attr_b_box_x2 = 0.0
98
+ attr_b_box_y2 = 0.0
99
+
100
+ # Retrieve confidence score; default to 1.0 if not available.
101
+ if attr:
102
+ confidence_score = attr.get("confidence", 1.0)
103
+ else:
104
+ confidence_score = 1.0
105
+
106
+ new_label = PPEDetectionLabelEntity(
107
+ id=str(uuid.uuid4()),
108
+ detection_id=new_detection.id,
109
+ code=label,
110
+ confidence_score=confidence_score,
111
+ detection_count=attr.get("count", 0),
112
+ b_box_x1=attr_b_box_x1,
113
+ b_box_y1=attr_b_box_y1,
114
+ b_box_x2=attr_b_box_x2,
115
+ b_box_y2=attr_b_box_y2
116
+ )
117
+ session.add(new_label)
118
+
119
+ # Commit happens automatically via context manager
120
+ logging.info(f"✅ Inserted detection for Person {person_id}, Attributes: {valid_attributes}")
121
+
122
+ # Trigger detection callback with unified data structure
123
+ try:
124
+ from ..core_service import CoreService
125
+ from ..detection.detection_processing.PPEDetectionProcessor import PPEDetectionProcessor
126
+
127
+ # Create unified detection data using the processor's factory method
128
+ unified_data = PPEDetectionProcessor.create_detection_data(
129
+ pipeline_id=pipeline_id,
130
+ worker_source_id=worker_source_id,
131
+ person_id=person_id,
132
+ detection_id=new_detection.id,
133
+ tracked_obj=tracked_obj,
134
+ image_path=full_image_path,
135
+ image_tile_path=cropped_image_path,
136
+ frame_id=frame_id
137
+ )
138
+
139
+ # Trigger callbacks
140
+ CoreService.trigger_detection(unified_data)
141
+
142
+ except Exception as e:
143
+ logging.warning(f"⚠️ Failed to trigger PPE detection callback: {e}")
145
144
 
146
145
  except SQLAlchemyError as e:
147
- self.session.rollback()
148
146
  logging.error(f"❌ Database error while saving detection: {e}")
149
147