nedo-vision-worker-core 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- nedo_vision_worker_core/__init__.py +1 -1
- nedo_vision_worker_core/database/DatabaseManager.py +17 -1
- nedo_vision_worker_core/pipeline/PipelineManager.py +63 -19
- nedo_vision_worker_core/pipeline/PipelineProcessor.py +23 -17
- nedo_vision_worker_core/pipeline/PipelineSyncThread.py +29 -32
- nedo_vision_worker_core/repositories/AIModelRepository.py +17 -17
- nedo_vision_worker_core/repositories/BaseRepository.py +44 -0
- nedo_vision_worker_core/repositories/PPEDetectionRepository.py +77 -79
- nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +37 -38
- nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +47 -46
- nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +14 -15
- nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +68 -36
- nedo_vision_worker_core/repositories/WorkerSourceRepository.py +9 -7
- nedo_vision_worker_core/streams/RTMPStreamer.py +283 -106
- nedo_vision_worker_core/streams/StreamSyncThread.py +51 -24
- nedo_vision_worker_core/streams/VideoStreamManager.py +76 -20
- {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/METADATA +3 -2
- {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/RECORD +21 -20
- {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/WHEEL +0 -0
- {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/entry_points.txt +0 -0
- {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/top_level.txt +0 -0
|
@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
|
|
|
7
7
|
from .core_service import CoreService
|
|
8
8
|
from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
|
|
9
9
|
|
|
10
|
-
__version__ = "0.3.
|
|
10
|
+
__version__ = "0.3.6"
|
|
11
11
|
__all__ = [
|
|
12
12
|
"CoreService",
|
|
13
13
|
"DetectionType",
|
|
@@ -121,7 +121,23 @@ class DatabaseManager:
|
|
|
121
121
|
# Initialize engines and session factories for each database
|
|
122
122
|
for name, path in DB_PATHS.items():
|
|
123
123
|
path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
|
|
124
|
-
|
|
124
|
+
|
|
125
|
+
# Configure connection pool for multi-threaded usage
|
|
126
|
+
# pool_size: Max connections to keep open
|
|
127
|
+
# max_overflow: Additional connections that can be created temporarily
|
|
128
|
+
# pool_pre_ping: Test connections before using (prevents stale connections)
|
|
129
|
+
# pool_recycle: Recycle connections after N seconds (prevents long-lived stale connections)
|
|
130
|
+
engine = create_engine(
|
|
131
|
+
f"sqlite:///{path.as_posix()}",
|
|
132
|
+
pool_size=20, # Base pool size for persistent connections
|
|
133
|
+
max_overflow=30, # Allow up to 30 additional temporary connections
|
|
134
|
+
pool_pre_ping=True, # Verify connection health before use
|
|
135
|
+
pool_recycle=3600, # Recycle connections after 1 hour
|
|
136
|
+
connect_args={
|
|
137
|
+
"check_same_thread": False, # Required for SQLite with multiple threads
|
|
138
|
+
"timeout": 30.0 # Connection timeout
|
|
139
|
+
}
|
|
140
|
+
)
|
|
125
141
|
ENGINES[name] = engine
|
|
126
142
|
SESSION_FACTORIES[name] = scoped_session(sessionmaker(bind=engine)) # Use scoped sessions
|
|
127
143
|
DatabaseManager.synchronize(name)
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import threading
|
|
3
|
-
from concurrent.futures import ThreadPoolExecutor, Future
|
|
4
3
|
from typing import Dict
|
|
5
4
|
from .PipelineProcessor import PipelineProcessor
|
|
6
5
|
from ..streams.VideoStreamManager import VideoStreamManager
|
|
@@ -9,8 +8,8 @@ class PipelineManager:
|
|
|
9
8
|
"""Manages AI pipeline execution and video stream processing."""
|
|
10
9
|
|
|
11
10
|
def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=50):
|
|
12
|
-
self.
|
|
13
|
-
self.pipeline_threads = {} # Stores
|
|
11
|
+
self.max_workers = max_workers
|
|
12
|
+
self.pipeline_threads = {} # Stores Thread objects {pipeline_id: Thread}
|
|
14
13
|
self.pipeline_metadata = {} # Stores actual pipeline data {pipeline_id: metadata}
|
|
15
14
|
self.video_manager = video_manager # Manages video streams
|
|
16
15
|
self.processors: Dict[str, PipelineProcessor] = {} # Stores PipelineProcessor instances per pipeline
|
|
@@ -39,18 +38,52 @@ class PipelineManager:
|
|
|
39
38
|
|
|
40
39
|
logging.info(f"🚀 Starting Pipeline processing for pipeline: {pipeline_id} | Source: {worker_source_id} ({pipeline.name})")
|
|
41
40
|
|
|
41
|
+
# Acquire the video stream (starts it if not already running)
|
|
42
|
+
if not self.video_manager.acquire_stream(worker_source_id, pipeline_id):
|
|
43
|
+
logging.error(f"❌ Failed to acquire stream {worker_source_id} for pipeline {pipeline_id}")
|
|
44
|
+
return
|
|
45
|
+
|
|
42
46
|
processor = PipelineProcessor(pipeline, detector, False)
|
|
43
47
|
processor.frame_drawer.location_name = pipeline.location_name
|
|
44
48
|
self.processors[pipeline_id] = processor # Store processor instance
|
|
45
49
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
50
|
+
active_count = len([t for t in self.pipeline_threads.values() if t.is_alive()])
|
|
51
|
+
logging.info(f"📋 Starting pipeline {pipeline_id} thread (active threads: {active_count})")
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
# Wrap the execution to catch any early errors
|
|
55
|
+
def _safe_process_pipeline():
|
|
56
|
+
try:
|
|
57
|
+
logging.info(f"🏁 Pipeline {pipeline_id} thread execution beginning...")
|
|
58
|
+
processor.process_pipeline(self.video_manager)
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logging.error(f"❌ Unhandled error in pipeline {pipeline_id} thread: {e}", exc_info=True)
|
|
61
|
+
finally:
|
|
62
|
+
# Ensure cleanup callback is called
|
|
63
|
+
self._handle_pipeline_completion(pipeline_id)
|
|
64
|
+
|
|
65
|
+
# Create and start thread directly
|
|
66
|
+
thread = threading.Thread(
|
|
67
|
+
target=_safe_process_pipeline,
|
|
68
|
+
name=f"pipeline-{pipeline_id[:8]}",
|
|
69
|
+
daemon=True
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
self.pipeline_threads[pipeline_id] = thread
|
|
73
|
+
self.pipeline_metadata[pipeline_id] = pipeline
|
|
74
|
+
|
|
75
|
+
logging.info(f"⚙️ Starting thread for pipeline {pipeline_id}")
|
|
76
|
+
thread.start()
|
|
77
|
+
logging.info(f"✅ Pipeline {pipeline_id} thread started successfully")
|
|
49
78
|
|
|
50
|
-
|
|
51
|
-
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logging.error(f"❌ Failed to start pipeline {pipeline_id} thread: {e}", exc_info=True)
|
|
81
|
+
# Clean up on failure
|
|
82
|
+
self.processors.pop(pipeline_id, None)
|
|
83
|
+
self.video_manager.release_stream(worker_source_id, pipeline_id)
|
|
84
|
+
raise
|
|
52
85
|
|
|
53
|
-
def _handle_pipeline_completion(self, pipeline_id: str
|
|
86
|
+
def _handle_pipeline_completion(self, pipeline_id: str):
|
|
54
87
|
"""
|
|
55
88
|
Handles cleanup when a pipeline finishes processing.
|
|
56
89
|
"""
|
|
@@ -59,11 +92,7 @@ class PipelineManager:
|
|
|
59
92
|
return # If it's already being stopped manually, don't trigger again
|
|
60
93
|
|
|
61
94
|
try:
|
|
62
|
-
|
|
63
|
-
logging.info(f"🚫 Pipeline {pipeline_id} was cancelled.")
|
|
64
|
-
elif future.exception():
|
|
65
|
-
logging.error(f"❌ Pipeline {pipeline_id} encountered an error: {future.exception()}", exc_info=True)
|
|
66
|
-
|
|
95
|
+
logging.info(f"🏁 Pipeline {pipeline_id} completed execution")
|
|
67
96
|
except Exception as e:
|
|
68
97
|
logging.error(f"⚠️ Error in handling pipeline {pipeline_id} completion: {e}")
|
|
69
98
|
|
|
@@ -79,19 +108,31 @@ class PipelineManager:
|
|
|
79
108
|
self._stopping_pipelines.add(pipeline_id)
|
|
80
109
|
|
|
81
110
|
try:
|
|
111
|
+
# Get worker_source_id before removing metadata
|
|
112
|
+
pipeline = self.pipeline_metadata.get(pipeline_id)
|
|
113
|
+
worker_source_id = pipeline.worker_source_id if pipeline else None
|
|
114
|
+
|
|
82
115
|
# Stop AI processing
|
|
83
116
|
processor = self.processors.pop(pipeline_id, None)
|
|
84
117
|
if processor:
|
|
85
118
|
processor.stop()
|
|
86
119
|
|
|
87
|
-
#
|
|
88
|
-
|
|
89
|
-
if
|
|
90
|
-
|
|
120
|
+
# Stop execution thread (thread will terminate naturally)
|
|
121
|
+
thread = self.pipeline_threads.pop(pipeline_id, None)
|
|
122
|
+
if thread and thread.is_alive():
|
|
123
|
+
# Thread is daemon, will stop when processor.running becomes False
|
|
124
|
+
logging.debug(f"Waiting for pipeline {pipeline_id} thread to terminate...")
|
|
125
|
+
thread.join(timeout=5.0)
|
|
126
|
+
if thread.is_alive():
|
|
127
|
+
logging.warning(f"Pipeline {pipeline_id} thread did not terminate cleanly")
|
|
91
128
|
|
|
92
129
|
# Remove metadata
|
|
93
130
|
self.pipeline_metadata.pop(pipeline_id, None)
|
|
94
131
|
|
|
132
|
+
# Release the video stream (stops it if no more pipelines use it)
|
|
133
|
+
if worker_source_id:
|
|
134
|
+
self.video_manager.release_stream(worker_source_id, pipeline_id)
|
|
135
|
+
|
|
95
136
|
logging.info(f"✅ Pipeline {pipeline_id} stopped successfully.")
|
|
96
137
|
|
|
97
138
|
except Exception as e:
|
|
@@ -119,7 +160,8 @@ class PipelineManager:
|
|
|
119
160
|
Returns:
|
|
120
161
|
bool: True if the pipeline is running, False otherwise.
|
|
121
162
|
"""
|
|
122
|
-
|
|
163
|
+
thread = self.pipeline_threads.get(pipeline_id)
|
|
164
|
+
return thread is not None and thread.is_alive()
|
|
123
165
|
|
|
124
166
|
def shutdown(self):
|
|
125
167
|
"""Shuts down the pipeline manager gracefully."""
|
|
@@ -129,5 +171,7 @@ class PipelineManager:
|
|
|
129
171
|
for pipeline_id in list(self.pipeline_threads.keys()):
|
|
130
172
|
self.stop_pipeline(pipeline_id)
|
|
131
173
|
|
|
174
|
+
logging.info("✅ PipelineManager stopped.")
|
|
175
|
+
|
|
132
176
|
self.executor.shutdown(wait=True) # Wait for all threads to finish
|
|
133
177
|
logging.info("✅ PipelineManager stopped.")
|
|
@@ -137,6 +137,7 @@ class PipelineProcessor:
|
|
|
137
137
|
def process_pipeline(self, video_manager: VideoStreamManager):
|
|
138
138
|
pipeline_id = self.pipeline_id
|
|
139
139
|
worker_source_id = self.worker_source_id
|
|
140
|
+
|
|
140
141
|
logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
|
|
141
142
|
|
|
142
143
|
self._update_config_internal()
|
|
@@ -148,9 +149,6 @@ class PipelineProcessor:
|
|
|
148
149
|
logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
|
|
149
150
|
return
|
|
150
151
|
|
|
151
|
-
# Start RTMP (internal pacer thread will begin on first push_frame())
|
|
152
|
-
self.rtmp_streamer = RTMPStreamer(pipeline_id)
|
|
153
|
-
|
|
154
152
|
# Start detection thread
|
|
155
153
|
self.detection_thread = threading.Thread(
|
|
156
154
|
target=self._detection_worker,
|
|
@@ -204,20 +202,27 @@ class PipelineProcessor:
|
|
|
204
202
|
logging.warning(f"Debug save failed: {e}")
|
|
205
203
|
self.debug_flag = False
|
|
206
204
|
|
|
207
|
-
#
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
205
|
+
# Push frame to RTMP stream
|
|
206
|
+
# RTMPStreamer handles its own restarts internally
|
|
207
|
+
if self.rtmp_streamer is None:
|
|
208
|
+
try:
|
|
209
|
+
self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
|
|
210
|
+
logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id}")
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
|
|
213
|
+
self.rtmp_streamer = None
|
|
211
214
|
|
|
212
|
-
|
|
213
|
-
self.rtmp_streamer.push_frame(drawn_frame)
|
|
214
|
-
except Exception as e:
|
|
215
|
-
logging.error(f"❌ RTMP push error: {e}")
|
|
215
|
+
if self.rtmp_streamer:
|
|
216
216
|
try:
|
|
217
|
-
self.rtmp_streamer.
|
|
218
|
-
except Exception:
|
|
219
|
-
|
|
220
|
-
|
|
217
|
+
self.rtmp_streamer.push_frame(drawn_frame)
|
|
218
|
+
except Exception as e:
|
|
219
|
+
logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
|
|
220
|
+
if "initialization_failed" in str(e).lower():
|
|
221
|
+
try:
|
|
222
|
+
self.rtmp_streamer.stop_stream()
|
|
223
|
+
except Exception:
|
|
224
|
+
pass
|
|
225
|
+
self.rtmp_streamer = None
|
|
221
226
|
|
|
222
227
|
# feed detection worker with latest-only behavior
|
|
223
228
|
if self.detection_thread and self.detection_thread.is_alive():
|
|
@@ -459,9 +464,10 @@ class PipelineProcessor:
|
|
|
459
464
|
logging.error(f" Cannot get stream URL for {worker_source_id}")
|
|
460
465
|
return False
|
|
461
466
|
|
|
462
|
-
|
|
467
|
+
# Use internal methods to restart the stream without affecting reference counting
|
|
468
|
+
video_manager._stop_stream(worker_source_id)
|
|
463
469
|
time.sleep(1.0)
|
|
464
|
-
video_manager.
|
|
470
|
+
video_manager._start_stream(worker_source_id, stream_url)
|
|
465
471
|
time.sleep(2.0)
|
|
466
472
|
|
|
467
473
|
if not video_manager.has_stream(worker_source_id):
|
|
@@ -38,9 +38,13 @@ class PipelineSyncThread(threading.Thread):
|
|
|
38
38
|
|
|
39
39
|
def on_pipeline_stopped(self, pipeline_id: str) -> None:
|
|
40
40
|
"""Set the pipeline as stopped in the database."""
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
41
|
+
try:
|
|
42
|
+
pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
|
|
43
|
+
if pipeline:
|
|
44
|
+
new_status = "run" if pipeline.pipeline_status_code == "restart" else "stop"
|
|
45
|
+
self.pipeline_repo.update_pipeline_status(pipeline_id, new_status)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
logging.error(f"Failed to update pipeline status for {pipeline_id}: {e}")
|
|
44
48
|
|
|
45
49
|
def run(self) -> None:
|
|
46
50
|
"""Continuously updates pipelines based on database changes."""
|
|
@@ -52,16 +56,11 @@ class PipelineSyncThread(threading.Thread):
|
|
|
52
56
|
local_pipeline_ids = set(self.pipeline_manager.get_active_pipelines())
|
|
53
57
|
db_pipeline_ids = set(db_pipelines.keys())
|
|
54
58
|
|
|
55
|
-
restarted_pipeline = False
|
|
56
|
-
|
|
57
59
|
# Process pipeline changes
|
|
58
|
-
self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines
|
|
60
|
+
self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines)
|
|
59
61
|
self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
|
|
60
62
|
self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines)
|
|
61
63
|
|
|
62
|
-
if restarted_pipeline:
|
|
63
|
-
self.pipeline_repo.session.commit()
|
|
64
|
-
|
|
65
64
|
# Sync the cache to remove unused detectors
|
|
66
65
|
active_model_ids = {p.ai_model_id for p in db_pipelines.values() if p.pipeline_status_code == 'run'}
|
|
67
66
|
self.model_manager.sync_cache(active_model_ids)
|
|
@@ -71,15 +70,16 @@ class PipelineSyncThread(threading.Thread):
|
|
|
71
70
|
|
|
72
71
|
time.sleep(self.polling_interval)
|
|
73
72
|
|
|
74
|
-
def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]
|
|
75
|
-
restarted_pipeline: bool) -> None:
|
|
73
|
+
def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]) -> None:
|
|
76
74
|
"""Add new pipelines that exist in DB but not locally."""
|
|
77
75
|
for pid in pipeline_ids:
|
|
78
76
|
pipeline = db_pipelines[pid]
|
|
79
77
|
|
|
80
78
|
if pipeline.pipeline_status_code == 'restart':
|
|
79
|
+
# Update status in database
|
|
80
|
+
self.pipeline_repo.update_pipeline_status(pid, 'run')
|
|
81
|
+
# Update local object too for consistency
|
|
81
82
|
pipeline.pipeline_status_code = 'run'
|
|
82
|
-
restarted_pipeline = True
|
|
83
83
|
|
|
84
84
|
if pipeline.pipeline_status_code == 'run':
|
|
85
85
|
detector = self.model_manager.get_detector(pipeline.ai_model_id)
|
|
@@ -103,8 +103,22 @@ class PipelineSyncThread(threading.Thread):
|
|
|
103
103
|
|
|
104
104
|
for pid in pipeline_ids:
|
|
105
105
|
db_pipeline = db_pipelines[pid]
|
|
106
|
+
|
|
107
|
+
# Check if pipeline should be stopped (status changed to stop/restart in DB)
|
|
108
|
+
if db_pipeline.pipeline_status_code in ['stop', 'restart']:
|
|
109
|
+
if self.pipeline_manager.is_running(pid):
|
|
110
|
+
logging.info(f"⏹️ Stopping pipeline due to status change: {pid}")
|
|
111
|
+
self.pipeline_manager.stop_pipeline(pid)
|
|
112
|
+
continue
|
|
113
|
+
|
|
106
114
|
processor = self.pipeline_manager.processors.get(pid)
|
|
107
115
|
if not processor:
|
|
116
|
+
# Pipeline exists in both sets but processor doesn't exist - shouldn't happen
|
|
117
|
+
# but if it does, try to start it if status is 'run'
|
|
118
|
+
if db_pipeline.pipeline_status_code == 'run':
|
|
119
|
+
logging.warning(f"⚠️ Pipeline {pid} exists locally but has no processor. Restarting...")
|
|
120
|
+
detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
|
|
121
|
+
self.pipeline_manager.start_pipeline(db_pipeline, detector)
|
|
108
122
|
continue
|
|
109
123
|
|
|
110
124
|
local_detector = processor.detector
|
|
@@ -114,29 +128,12 @@ class PipelineSyncThread(threading.Thread):
|
|
|
114
128
|
processor.enable_debug()
|
|
115
129
|
|
|
116
130
|
def update_pipeline(self, pid: str, db_pipeline: object, local_detector: object) -> None:
|
|
117
|
-
"""Updates a single pipeline if necessary."""
|
|
131
|
+
"""Updates a single pipeline if necessary (only called for running pipelines)."""
|
|
118
132
|
processor = self.pipeline_manager.processors.get(pid)
|
|
119
133
|
if not processor:
|
|
120
134
|
return
|
|
121
135
|
|
|
122
|
-
#
|
|
123
|
-
if db_pipeline.pipeline_status_code != processor._pipeline.pipeline_status_code:
|
|
124
|
-
if db_pipeline.pipeline_status_code == 'run':
|
|
125
|
-
logging.info(f"▶️ Resuming pipeline: {pid}")
|
|
126
|
-
self.pipeline_manager.start_pipeline(db_pipeline, self.model_manager.get_detector(db_pipeline.ai_model_id))
|
|
127
|
-
elif db_pipeline.pipeline_status_code in ['stop', 'restart']:
|
|
128
|
-
logging.info(f"⏹️ Stopping pipeline: {pid}")
|
|
129
|
-
self.pipeline_manager.stop_pipeline(pid)
|
|
130
|
-
if db_pipeline.pipeline_status_code == 'restart':
|
|
131
|
-
# This will be picked up by the 'add_new_pipelines' logic in the next cycle
|
|
132
|
-
return
|
|
133
|
-
else:
|
|
134
|
-
processor.update_config(db_pipeline) # Update config for non-running pipelines
|
|
135
|
-
return
|
|
136
|
-
elif db_pipeline.pipeline_status_code != 'run':
|
|
137
|
-
processor.update_config(db_pipeline)
|
|
138
|
-
return
|
|
139
|
-
|
|
136
|
+
# At this point, we know db_pipeline.pipeline_status_code == 'run' (checked in caller)
|
|
140
137
|
# Check for significant changes that require a restart
|
|
141
138
|
db_detector = self.model_manager.get_detector(db_pipeline.ai_model_id)
|
|
142
139
|
|
|
@@ -146,7 +143,7 @@ class PipelineSyncThread(threading.Thread):
|
|
|
146
143
|
local_detector != db_detector
|
|
147
144
|
])
|
|
148
145
|
|
|
149
|
-
if requires_restart
|
|
146
|
+
if requires_restart:
|
|
150
147
|
logging.info(f"🔄 Restarting pipeline due to significant changes: {pid}")
|
|
151
148
|
self.pipeline_manager.stop_pipeline(pid)
|
|
152
149
|
self.pipeline_manager.start_pipeline(db_pipeline, db_detector)
|
|
@@ -1,15 +1,13 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from sqlalchemy.orm import Session
|
|
3
2
|
from sqlalchemy.exc import SQLAlchemyError
|
|
4
|
-
from
|
|
3
|
+
from .BaseRepository import BaseRepository
|
|
5
4
|
from ..models.ai_model import AIModelEntity
|
|
6
5
|
|
|
7
|
-
class AIModelRepository:
|
|
6
|
+
class AIModelRepository(BaseRepository):
|
|
8
7
|
"""Handles storage of AI Models into SQLite using SQLAlchemy."""
|
|
9
8
|
|
|
10
9
|
def __init__(self):
|
|
11
|
-
|
|
12
|
-
self.session: Session = self.db_manager.get_session("default")
|
|
10
|
+
super().__init__(db_name="default")
|
|
13
11
|
|
|
14
12
|
def get_models(self) -> list:
|
|
15
13
|
"""
|
|
@@ -19,13 +17,14 @@ class AIModelRepository:
|
|
|
19
17
|
list: A list of AIModelEntity objects.
|
|
20
18
|
"""
|
|
21
19
|
try:
|
|
22
|
-
self.
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
20
|
+
with self._get_session() as session:
|
|
21
|
+
session.expire_all()
|
|
22
|
+
models = session.query(AIModelEntity).all()
|
|
23
|
+
|
|
24
|
+
for model in models:
|
|
25
|
+
session.expunge(model)
|
|
26
|
+
|
|
27
|
+
return models
|
|
29
28
|
except SQLAlchemyError as e:
|
|
30
29
|
logging.error(f"Error retrieving models: {e}")
|
|
31
30
|
return []
|
|
@@ -41,11 +40,12 @@ class AIModelRepository:
|
|
|
41
40
|
An AIModelEntity object or None if not found.
|
|
42
41
|
"""
|
|
43
42
|
try:
|
|
44
|
-
self.
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
43
|
+
with self._get_session() as session:
|
|
44
|
+
session.expire_all()
|
|
45
|
+
model = session.query(AIModelEntity).filter_by(id=model_id).first()
|
|
46
|
+
if model:
|
|
47
|
+
session.expunge(model)
|
|
48
|
+
return model
|
|
49
49
|
except SQLAlchemyError as e:
|
|
50
50
|
logging.error(f"Error retrieving model {model_id}: {e}")
|
|
51
51
|
return None
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from contextlib import contextmanager
|
|
2
|
+
from ..database.DatabaseManager import DatabaseManager
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class BaseRepository:
|
|
6
|
+
"""
|
|
7
|
+
Base repository class that provides thread-safe database session management.
|
|
8
|
+
|
|
9
|
+
All repositories should inherit from this class to ensure proper connection pooling
|
|
10
|
+
and to prevent connection leaks in multi-threaded environments.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, db_name: str = "default"):
|
|
14
|
+
"""
|
|
15
|
+
Initialize the base repository.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
db_name: Name of the database to connect to ('default', 'config', or 'logging')
|
|
19
|
+
"""
|
|
20
|
+
self.db_manager = DatabaseManager()
|
|
21
|
+
self.db_name = db_name
|
|
22
|
+
|
|
23
|
+
@contextmanager
|
|
24
|
+
def _get_session(self):
|
|
25
|
+
"""
|
|
26
|
+
Context manager for database sessions.
|
|
27
|
+
|
|
28
|
+
Ensures sessions are properly opened and closed, preventing connection leaks.
|
|
29
|
+
Each operation gets a fresh session that is automatically closed when done.
|
|
30
|
+
|
|
31
|
+
Usage:
|
|
32
|
+
with self._get_session() as session:
|
|
33
|
+
results = session.query(Model).all()
|
|
34
|
+
return results
|
|
35
|
+
"""
|
|
36
|
+
session = self.db_manager.get_session(self.db_name)
|
|
37
|
+
try:
|
|
38
|
+
yield session
|
|
39
|
+
session.commit() # Commit any pending changes
|
|
40
|
+
except Exception:
|
|
41
|
+
session.rollback() # Rollback on error
|
|
42
|
+
raise
|
|
43
|
+
finally:
|
|
44
|
+
session.close() # Always close the session
|
|
@@ -4,21 +4,19 @@ import datetime
|
|
|
4
4
|
import uuid
|
|
5
5
|
import logging
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from sqlalchemy.orm import Session
|
|
8
7
|
from sqlalchemy.exc import SQLAlchemyError
|
|
9
|
-
from
|
|
8
|
+
from .BaseRepository import BaseRepository
|
|
10
9
|
from ..models.ppe_detection import PPEDetectionEntity
|
|
11
10
|
from ..models.ppe_detection_label import PPEDetectionLabelEntity
|
|
12
11
|
from ..util.DrawingUtils import DrawingUtils
|
|
12
|
+
from ..database.DatabaseManager import DatabaseManager
|
|
13
13
|
|
|
14
|
-
class PPEDetectionRepository:
|
|
14
|
+
class PPEDetectionRepository(BaseRepository):
|
|
15
15
|
"""Handles storage of PPE detections into SQLite using SQLAlchemy."""
|
|
16
16
|
|
|
17
17
|
def __init__(self):
|
|
18
|
-
|
|
18
|
+
super().__init__(db_name="default")
|
|
19
19
|
self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "ppe_detections"
|
|
20
|
-
self.db_manager = DatabaseManager()
|
|
21
|
-
self.session: Session = self.db_manager.get_session("default")
|
|
22
20
|
os.makedirs(self.storage_dir, exist_ok=True)
|
|
23
21
|
|
|
24
22
|
def save_ppe_detection(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
|
|
@@ -64,86 +62,86 @@ class PPEDetectionRepository:
|
|
|
64
62
|
cv2.imwrite(cropped_image_path, cropped_image)
|
|
65
63
|
|
|
66
64
|
try:
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
worker_id=pipeline_id,
|
|
70
|
-
worker_source_id=worker_source_id,
|
|
71
|
-
person_id=person_id,
|
|
72
|
-
image_path=full_image_path,
|
|
73
|
-
image_tile_path=cropped_image_path,
|
|
74
|
-
b_box_x1=bbox[0],
|
|
75
|
-
b_box_y1=bbox[1],
|
|
76
|
-
b_box_x2=bbox[2],
|
|
77
|
-
b_box_y2=bbox[3],
|
|
78
|
-
detection_count=tracked_obj.get("detections", 0)
|
|
79
|
-
)
|
|
80
|
-
self.session.add(new_detection)
|
|
81
|
-
self.session.flush()
|
|
82
|
-
|
|
83
|
-
for attr in filtered_attributes:
|
|
84
|
-
label = attr["label"]
|
|
85
|
-
valid_attributes.append(label)
|
|
86
|
-
|
|
87
|
-
if attr and "bbox" in attr:
|
|
88
|
-
attr_bbox = attr["bbox"]
|
|
89
|
-
# Assuming attr_bbox is in [x, y, width, height] format.
|
|
90
|
-
attr_b_box_x1 = attr_bbox[0]
|
|
91
|
-
attr_b_box_y1 = attr_bbox[1]
|
|
92
|
-
attr_b_box_x2 = attr_bbox[2]
|
|
93
|
-
attr_b_box_y2 = attr_bbox[3]
|
|
94
|
-
else:
|
|
95
|
-
# Fallback to default values if the attribute bbox is not available.
|
|
96
|
-
attr_b_box_x1 = 0.0
|
|
97
|
-
attr_b_box_y1 = 0.0
|
|
98
|
-
attr_b_box_x2 = 0.0
|
|
99
|
-
attr_b_box_y2 = 0.0
|
|
100
|
-
|
|
101
|
-
# Retrieve confidence score; default to 1.0 if not available.
|
|
102
|
-
if attr:
|
|
103
|
-
confidence_score = attr.get("confidence", 1.0)
|
|
104
|
-
else:
|
|
105
|
-
confidence_score = 1.0
|
|
106
|
-
|
|
107
|
-
new_label = PPEDetectionLabelEntity(
|
|
65
|
+
with self._get_session() as session:
|
|
66
|
+
new_detection = PPEDetectionEntity(
|
|
108
67
|
id=str(uuid.uuid4()),
|
|
109
|
-
|
|
110
|
-
code=label,
|
|
111
|
-
confidence_score=confidence_score,
|
|
112
|
-
detection_count=attr.get("count", 0),
|
|
113
|
-
b_box_x1=attr_b_box_x1,
|
|
114
|
-
b_box_y1=attr_b_box_y1,
|
|
115
|
-
b_box_x2=attr_b_box_x2,
|
|
116
|
-
b_box_y2=attr_b_box_y2
|
|
117
|
-
)
|
|
118
|
-
self.session.add(new_label)
|
|
119
|
-
|
|
120
|
-
self.session.commit()
|
|
121
|
-
logging.info(f"✅ Inserted detection for Person {person_id}, Attributes: {valid_attributes}")
|
|
122
|
-
|
|
123
|
-
# Trigger detection callback with unified data structure
|
|
124
|
-
try:
|
|
125
|
-
from ..core_service import CoreService
|
|
126
|
-
from ..detection.detection_processing.PPEDetectionProcessor import PPEDetectionProcessor
|
|
127
|
-
|
|
128
|
-
# Create unified detection data using the processor's factory method
|
|
129
|
-
unified_data = PPEDetectionProcessor.create_detection_data(
|
|
130
|
-
pipeline_id=pipeline_id,
|
|
68
|
+
worker_id=pipeline_id,
|
|
131
69
|
worker_source_id=worker_source_id,
|
|
132
70
|
person_id=person_id,
|
|
133
|
-
detection_id=new_detection.id,
|
|
134
|
-
tracked_obj=tracked_obj,
|
|
135
71
|
image_path=full_image_path,
|
|
136
72
|
image_tile_path=cropped_image_path,
|
|
137
|
-
|
|
73
|
+
b_box_x1=bbox[0],
|
|
74
|
+
b_box_y1=bbox[1],
|
|
75
|
+
b_box_x2=bbox[2],
|
|
76
|
+
b_box_y2=bbox[3],
|
|
77
|
+
detection_count=tracked_obj.get("detections", 0)
|
|
138
78
|
)
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
79
|
+
session.add(new_detection)
|
|
80
|
+
session.flush()
|
|
81
|
+
|
|
82
|
+
for attr in filtered_attributes:
|
|
83
|
+
label = attr["label"]
|
|
84
|
+
valid_attributes.append(label)
|
|
85
|
+
|
|
86
|
+
if attr and "bbox" in attr:
|
|
87
|
+
attr_bbox = attr["bbox"]
|
|
88
|
+
# Assuming attr_bbox is in [x, y, width, height] format.
|
|
89
|
+
attr_b_box_x1 = attr_bbox[0]
|
|
90
|
+
attr_b_box_y1 = attr_bbox[1]
|
|
91
|
+
attr_b_box_x2 = attr_bbox[2]
|
|
92
|
+
attr_b_box_y2 = attr_bbox[3]
|
|
93
|
+
else:
|
|
94
|
+
# Fallback to default values if the attribute bbox is not available.
|
|
95
|
+
attr_b_box_x1 = 0.0
|
|
96
|
+
attr_b_box_y1 = 0.0
|
|
97
|
+
attr_b_box_x2 = 0.0
|
|
98
|
+
attr_b_box_y2 = 0.0
|
|
99
|
+
|
|
100
|
+
# Retrieve confidence score; default to 1.0 if not available.
|
|
101
|
+
if attr:
|
|
102
|
+
confidence_score = attr.get("confidence", 1.0)
|
|
103
|
+
else:
|
|
104
|
+
confidence_score = 1.0
|
|
105
|
+
|
|
106
|
+
new_label = PPEDetectionLabelEntity(
|
|
107
|
+
id=str(uuid.uuid4()),
|
|
108
|
+
detection_id=new_detection.id,
|
|
109
|
+
code=label,
|
|
110
|
+
confidence_score=confidence_score,
|
|
111
|
+
detection_count=attr.get("count", 0),
|
|
112
|
+
b_box_x1=attr_b_box_x1,
|
|
113
|
+
b_box_y1=attr_b_box_y1,
|
|
114
|
+
b_box_x2=attr_b_box_x2,
|
|
115
|
+
b_box_y2=attr_b_box_y2
|
|
116
|
+
)
|
|
117
|
+
session.add(new_label)
|
|
118
|
+
|
|
119
|
+
# Commit happens automatically via context manager
|
|
120
|
+
logging.info(f"✅ Inserted detection for Person {person_id}, Attributes: {valid_attributes}")
|
|
121
|
+
|
|
122
|
+
# Trigger detection callback with unified data structure
|
|
123
|
+
try:
|
|
124
|
+
from ..core_service import CoreService
|
|
125
|
+
from ..detection.detection_processing.PPEDetectionProcessor import PPEDetectionProcessor
|
|
126
|
+
|
|
127
|
+
# Create unified detection data using the processor's factory method
|
|
128
|
+
unified_data = PPEDetectionProcessor.create_detection_data(
|
|
129
|
+
pipeline_id=pipeline_id,
|
|
130
|
+
worker_source_id=worker_source_id,
|
|
131
|
+
person_id=person_id,
|
|
132
|
+
detection_id=new_detection.id,
|
|
133
|
+
tracked_obj=tracked_obj,
|
|
134
|
+
image_path=full_image_path,
|
|
135
|
+
image_tile_path=cropped_image_path,
|
|
136
|
+
frame_id=frame_id
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Trigger callbacks
|
|
140
|
+
CoreService.trigger_detection(unified_data)
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logging.warning(f"⚠️ Failed to trigger PPE detection callback: {e}")
|
|
145
144
|
|
|
146
145
|
except SQLAlchemyError as e:
|
|
147
|
-
self.session.rollback()
|
|
148
146
|
logging.error(f"❌ Database error while saving detection: {e}")
|
|
149
147
|
|