nedo-vision-worker-core 0.3.5__tar.gz → 0.3.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nedo-vision-worker-core might be problematic. Click here for more details.
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/PKG-INFO +3 -2
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/__init__.py +1 -1
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/database/DatabaseManager.py +17 -1
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/PipelineManager.py +50 -19
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/PipelineProcessor.py +20 -15
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/PipelineSyncThread.py +12 -12
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/repositories/AIModelRepository.py +17 -17
- nedo_vision_worker_core-0.3.6/nedo_vision_worker_core/repositories/BaseRepository.py +44 -0
- nedo_vision_worker_core-0.3.6/nedo_vision_worker_core/repositories/PPEDetectionRepository.py +147 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +37 -38
- nedo_vision_worker_core-0.3.6/nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +82 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +14 -15
- nedo_vision_worker_core-0.3.6/nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +111 -0
- nedo_vision_worker_core-0.3.6/nedo_vision_worker_core/repositories/WorkerSourceRepository.py +21 -0
- nedo_vision_worker_core-0.3.6/nedo_vision_worker_core/streams/RTMPStreamer.py +406 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core.egg-info/PKG-INFO +3 -2
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core.egg-info/SOURCES.txt +1 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core.egg-info/requires.txt +3 -1
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/pyproject.toml +4 -1
- nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/repositories/PPEDetectionRepository.py +0 -149
- nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +0 -81
- nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +0 -79
- nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/repositories/WorkerSourceRepository.py +0 -19
- nedo_vision_worker_core-0.3.5/nedo_vision_worker_core/streams/RTMPStreamer.py +0 -229
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/MANIFEST.in +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/README.md +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/ai/FrameDrawer.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/ai/ImageDebugger.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/ai/VideoDebugger.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/ai/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/callbacks/DetectionCallbackManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/callbacks/DetectionCallbackTypes.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/callbacks/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/cli.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/config/ConfigurationManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/config/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/core_service.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/database/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/BaseDetector.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/RFDETRDetector.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/YOLODetector.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/detection/detection_processing/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/doctor.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/boots-green.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/boots-red.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/gloves-green.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/gloves-red.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/goggles-green.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/goggles-red.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/helmet-green.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/helmet-red.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/mask-red.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/vest-green.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/icons/vest-red.png +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/ai_model.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/auth.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/config.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/dataset_source.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/logs.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/ppe_detection.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/ppe_detection_label.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/restricted_area_violation.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/user.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/worker_source.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/worker_source_pipeline.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/worker_source_pipeline_config.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/worker_source_pipeline_debug.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/models/worker_source_pipeline_detection.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/ModelManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/PipelineConfigManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/pipeline/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/preprocessing/ImageResizer.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/preprocessing/ImageRoi.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/preprocessing/Preprocessor.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/preprocessing/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/repositories/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/services/SharedVideoStreamServer.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/services/VideoSharingDaemon.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/services/VideoSharingDaemonManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/streams/SharedVideoDeviceManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/streams/StreamSyncThread.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/streams/VideoStream.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/streams/VideoStreamManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/streams/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/tracker/SFSORT.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/tracker/TrackerManager.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/tracker/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/BoundingBoxMetrics.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/DrawingUtils.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/ModelReadinessChecker.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/PersonAttributeMatcher.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/PlatformDetector.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/TablePrinter.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/util/__init__.py +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core.egg-info/dependency_links.txt +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core.egg-info/entry_points.txt +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core.egg-info/top_level.txt +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/requirements.txt +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/setup.cfg +0 -0
- {nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nedo-vision-worker-core
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.6
|
|
4
4
|
Summary: Nedo Vision Worker Core Library for AI Vision Processing
|
|
5
5
|
Author-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
6
6
|
Maintainer-email: Willy Achmat Fauzi <willy.achmat@gmail.com>
|
|
@@ -31,7 +31,6 @@ Requires-Python: >=3.8
|
|
|
31
31
|
Description-Content-Type: text/markdown
|
|
32
32
|
Requires-Dist: alembic>=1.8.0
|
|
33
33
|
Requires-Dist: numpy>=1.21.0
|
|
34
|
-
Requires-Dist: opencv-python>=4.6.0
|
|
35
34
|
Requires-Dist: pillow>=8.0.0
|
|
36
35
|
Requires-Dist: psutil>=5.9.0
|
|
37
36
|
Requires-Dist: scipy>=1.9.0
|
|
@@ -41,6 +40,8 @@ Requires-Dist: torch>=1.9.0
|
|
|
41
40
|
Requires-Dist: torchvision>=0.10.0
|
|
42
41
|
Requires-Dist: ultralytics>=8.0.0
|
|
43
42
|
Requires-Dist: rfdetr<2.0.0,>=1.2.0
|
|
43
|
+
Provides-Extra: opencv
|
|
44
|
+
Requires-Dist: opencv-python>=4.6.0; extra == "opencv"
|
|
44
45
|
Provides-Extra: dev
|
|
45
46
|
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
46
47
|
Requires-Dist: black>=22.0.0; extra == "dev"
|
{nedo_vision_worker_core-0.3.5 → nedo_vision_worker_core-0.3.6}/nedo_vision_worker_core/__init__.py
RENAMED
|
@@ -7,7 +7,7 @@ A library for running AI vision processing and detection in the Nedo Vision plat
|
|
|
7
7
|
from .core_service import CoreService
|
|
8
8
|
from .callbacks import DetectionType, CallbackTrigger, DetectionData, IntervalMetadata
|
|
9
9
|
|
|
10
|
-
__version__ = "0.3.
|
|
10
|
+
__version__ = "0.3.6"
|
|
11
11
|
__all__ = [
|
|
12
12
|
"CoreService",
|
|
13
13
|
"DetectionType",
|
|
@@ -121,7 +121,23 @@ class DatabaseManager:
|
|
|
121
121
|
# Initialize engines and session factories for each database
|
|
122
122
|
for name, path in DB_PATHS.items():
|
|
123
123
|
path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
|
|
124
|
-
|
|
124
|
+
|
|
125
|
+
# Configure connection pool for multi-threaded usage
|
|
126
|
+
# pool_size: Max connections to keep open
|
|
127
|
+
# max_overflow: Additional connections that can be created temporarily
|
|
128
|
+
# pool_pre_ping: Test connections before using (prevents stale connections)
|
|
129
|
+
# pool_recycle: Recycle connections after N seconds (prevents long-lived stale connections)
|
|
130
|
+
engine = create_engine(
|
|
131
|
+
f"sqlite:///{path.as_posix()}",
|
|
132
|
+
pool_size=20, # Base pool size for persistent connections
|
|
133
|
+
max_overflow=30, # Allow up to 30 additional temporary connections
|
|
134
|
+
pool_pre_ping=True, # Verify connection health before use
|
|
135
|
+
pool_recycle=3600, # Recycle connections after 1 hour
|
|
136
|
+
connect_args={
|
|
137
|
+
"check_same_thread": False, # Required for SQLite with multiple threads
|
|
138
|
+
"timeout": 30.0 # Connection timeout
|
|
139
|
+
}
|
|
140
|
+
)
|
|
125
141
|
ENGINES[name] = engine
|
|
126
142
|
SESSION_FACTORIES[name] = scoped_session(sessionmaker(bind=engine)) # Use scoped sessions
|
|
127
143
|
DatabaseManager.synchronize(name)
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import threading
|
|
3
|
-
from concurrent.futures import ThreadPoolExecutor, Future
|
|
4
3
|
from typing import Dict
|
|
5
4
|
from .PipelineProcessor import PipelineProcessor
|
|
6
5
|
from ..streams.VideoStreamManager import VideoStreamManager
|
|
@@ -9,8 +8,8 @@ class PipelineManager:
|
|
|
9
8
|
"""Manages AI pipeline execution and video stream processing."""
|
|
10
9
|
|
|
11
10
|
def __init__(self, video_manager: VideoStreamManager, on_pipeline_stopped, max_workers=50):
|
|
12
|
-
self.
|
|
13
|
-
self.pipeline_threads = {} # Stores
|
|
11
|
+
self.max_workers = max_workers
|
|
12
|
+
self.pipeline_threads = {} # Stores Thread objects {pipeline_id: Thread}
|
|
14
13
|
self.pipeline_metadata = {} # Stores actual pipeline data {pipeline_id: metadata}
|
|
15
14
|
self.video_manager = video_manager # Manages video streams
|
|
16
15
|
self.processors: Dict[str, PipelineProcessor] = {} # Stores PipelineProcessor instances per pipeline
|
|
@@ -48,14 +47,43 @@ class PipelineManager:
|
|
|
48
47
|
processor.frame_drawer.location_name = pipeline.location_name
|
|
49
48
|
self.processors[pipeline_id] = processor # Store processor instance
|
|
50
49
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
50
|
+
active_count = len([t for t in self.pipeline_threads.values() if t.is_alive()])
|
|
51
|
+
logging.info(f"📋 Starting pipeline {pipeline_id} thread (active threads: {active_count})")
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
# Wrap the execution to catch any early errors
|
|
55
|
+
def _safe_process_pipeline():
|
|
56
|
+
try:
|
|
57
|
+
logging.info(f"🏁 Pipeline {pipeline_id} thread execution beginning...")
|
|
58
|
+
processor.process_pipeline(self.video_manager)
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logging.error(f"❌ Unhandled error in pipeline {pipeline_id} thread: {e}", exc_info=True)
|
|
61
|
+
finally:
|
|
62
|
+
# Ensure cleanup callback is called
|
|
63
|
+
self._handle_pipeline_completion(pipeline_id)
|
|
64
|
+
|
|
65
|
+
# Create and start thread directly
|
|
66
|
+
thread = threading.Thread(
|
|
67
|
+
target=_safe_process_pipeline,
|
|
68
|
+
name=f"pipeline-{pipeline_id[:8]}",
|
|
69
|
+
daemon=True
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
self.pipeline_threads[pipeline_id] = thread
|
|
73
|
+
self.pipeline_metadata[pipeline_id] = pipeline
|
|
74
|
+
|
|
75
|
+
logging.info(f"⚙️ Starting thread for pipeline {pipeline_id}")
|
|
76
|
+
thread.start()
|
|
77
|
+
logging.info(f"✅ Pipeline {pipeline_id} thread started successfully")
|
|
54
78
|
|
|
55
|
-
|
|
56
|
-
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logging.error(f"❌ Failed to start pipeline {pipeline_id} thread: {e}", exc_info=True)
|
|
81
|
+
# Clean up on failure
|
|
82
|
+
self.processors.pop(pipeline_id, None)
|
|
83
|
+
self.video_manager.release_stream(worker_source_id, pipeline_id)
|
|
84
|
+
raise
|
|
57
85
|
|
|
58
|
-
def _handle_pipeline_completion(self, pipeline_id: str
|
|
86
|
+
def _handle_pipeline_completion(self, pipeline_id: str):
|
|
59
87
|
"""
|
|
60
88
|
Handles cleanup when a pipeline finishes processing.
|
|
61
89
|
"""
|
|
@@ -64,11 +92,7 @@ class PipelineManager:
|
|
|
64
92
|
return # If it's already being stopped manually, don't trigger again
|
|
65
93
|
|
|
66
94
|
try:
|
|
67
|
-
|
|
68
|
-
logging.info(f"🚫 Pipeline {pipeline_id} was cancelled.")
|
|
69
|
-
elif future.exception():
|
|
70
|
-
logging.error(f"❌ Pipeline {pipeline_id} encountered an error: {future.exception()}", exc_info=True)
|
|
71
|
-
|
|
95
|
+
logging.info(f"🏁 Pipeline {pipeline_id} completed execution")
|
|
72
96
|
except Exception as e:
|
|
73
97
|
logging.error(f"⚠️ Error in handling pipeline {pipeline_id} completion: {e}")
|
|
74
98
|
|
|
@@ -93,10 +117,14 @@ class PipelineManager:
|
|
|
93
117
|
if processor:
|
|
94
118
|
processor.stop()
|
|
95
119
|
|
|
96
|
-
#
|
|
97
|
-
|
|
98
|
-
if
|
|
99
|
-
|
|
120
|
+
# Stop execution thread (thread will terminate naturally)
|
|
121
|
+
thread = self.pipeline_threads.pop(pipeline_id, None)
|
|
122
|
+
if thread and thread.is_alive():
|
|
123
|
+
# Thread is daemon, will stop when processor.running becomes False
|
|
124
|
+
logging.debug(f"Waiting for pipeline {pipeline_id} thread to terminate...")
|
|
125
|
+
thread.join(timeout=5.0)
|
|
126
|
+
if thread.is_alive():
|
|
127
|
+
logging.warning(f"Pipeline {pipeline_id} thread did not terminate cleanly")
|
|
100
128
|
|
|
101
129
|
# Remove metadata
|
|
102
130
|
self.pipeline_metadata.pop(pipeline_id, None)
|
|
@@ -132,7 +160,8 @@ class PipelineManager:
|
|
|
132
160
|
Returns:
|
|
133
161
|
bool: True if the pipeline is running, False otherwise.
|
|
134
162
|
"""
|
|
135
|
-
|
|
163
|
+
thread = self.pipeline_threads.get(pipeline_id)
|
|
164
|
+
return thread is not None and thread.is_alive()
|
|
136
165
|
|
|
137
166
|
def shutdown(self):
|
|
138
167
|
"""Shuts down the pipeline manager gracefully."""
|
|
@@ -142,5 +171,7 @@ class PipelineManager:
|
|
|
142
171
|
for pipeline_id in list(self.pipeline_threads.keys()):
|
|
143
172
|
self.stop_pipeline(pipeline_id)
|
|
144
173
|
|
|
174
|
+
logging.info("✅ PipelineManager stopped.")
|
|
175
|
+
|
|
145
176
|
self.executor.shutdown(wait=True) # Wait for all threads to finish
|
|
146
177
|
logging.info("✅ PipelineManager stopped.")
|
|
@@ -137,6 +137,7 @@ class PipelineProcessor:
|
|
|
137
137
|
def process_pipeline(self, video_manager: VideoStreamManager):
|
|
138
138
|
pipeline_id = self.pipeline_id
|
|
139
139
|
worker_source_id = self.worker_source_id
|
|
140
|
+
|
|
140
141
|
logging.info(f"🎯 Running pipeline processing for pipeline {pipeline_id} | Source: {worker_source_id}")
|
|
141
142
|
|
|
142
143
|
self._update_config_internal()
|
|
@@ -148,9 +149,6 @@ class PipelineProcessor:
|
|
|
148
149
|
logging.error(f"❌ Pipeline {pipeline_id} | Source {worker_source_id}: No initial frame available. Exiting...")
|
|
149
150
|
return
|
|
150
151
|
|
|
151
|
-
# Start RTMP (internal pacer thread will begin on first push_frame())
|
|
152
|
-
self.rtmp_streamer = RTMPStreamer(pipeline_id)
|
|
153
|
-
|
|
154
152
|
# Start detection thread
|
|
155
153
|
self.detection_thread = threading.Thread(
|
|
156
154
|
target=self._detection_worker,
|
|
@@ -204,20 +202,27 @@ class PipelineProcessor:
|
|
|
204
202
|
logging.warning(f"Debug save failed: {e}")
|
|
205
203
|
self.debug_flag = False
|
|
206
204
|
|
|
207
|
-
#
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
205
|
+
# Push frame to RTMP stream
|
|
206
|
+
# RTMPStreamer handles its own restarts internally
|
|
207
|
+
if self.rtmp_streamer is None:
|
|
208
|
+
try:
|
|
209
|
+
self.rtmp_streamer = RTMPStreamer(self.pipeline_id)
|
|
210
|
+
logging.info(f"🎬 RTMP streamer initialized for pipeline {pipeline_id}")
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logging.error(f"❌ Failed to initialize RTMP streamer for pipeline {pipeline_id}: {e}")
|
|
213
|
+
self.rtmp_streamer = None
|
|
211
214
|
|
|
212
|
-
|
|
213
|
-
self.rtmp_streamer.push_frame(drawn_frame)
|
|
214
|
-
except Exception as e:
|
|
215
|
-
logging.error(f"❌ RTMP push error: {e}")
|
|
215
|
+
if self.rtmp_streamer:
|
|
216
216
|
try:
|
|
217
|
-
self.rtmp_streamer.
|
|
218
|
-
except Exception:
|
|
219
|
-
|
|
220
|
-
|
|
217
|
+
self.rtmp_streamer.push_frame(drawn_frame)
|
|
218
|
+
except Exception as e:
|
|
219
|
+
logging.error(f"❌ RTMP push error for pipeline {pipeline_id}: {e}")
|
|
220
|
+
if "initialization_failed" in str(e).lower():
|
|
221
|
+
try:
|
|
222
|
+
self.rtmp_streamer.stop_stream()
|
|
223
|
+
except Exception:
|
|
224
|
+
pass
|
|
225
|
+
self.rtmp_streamer = None
|
|
221
226
|
|
|
222
227
|
# feed detection worker with latest-only behavior
|
|
223
228
|
if self.detection_thread and self.detection_thread.is_alive():
|
|
@@ -38,9 +38,13 @@ class PipelineSyncThread(threading.Thread):
|
|
|
38
38
|
|
|
39
39
|
def on_pipeline_stopped(self, pipeline_id: str) -> None:
|
|
40
40
|
"""Set the pipeline as stopped in the database."""
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
41
|
+
try:
|
|
42
|
+
pipeline = self.pipeline_repo.get_worker_source_pipeline(pipeline_id)
|
|
43
|
+
if pipeline:
|
|
44
|
+
new_status = "run" if pipeline.pipeline_status_code == "restart" else "stop"
|
|
45
|
+
self.pipeline_repo.update_pipeline_status(pipeline_id, new_status)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
logging.error(f"Failed to update pipeline status for {pipeline_id}: {e}")
|
|
44
48
|
|
|
45
49
|
def run(self) -> None:
|
|
46
50
|
"""Continuously updates pipelines based on database changes."""
|
|
@@ -52,16 +56,11 @@ class PipelineSyncThread(threading.Thread):
|
|
|
52
56
|
local_pipeline_ids = set(self.pipeline_manager.get_active_pipelines())
|
|
53
57
|
db_pipeline_ids = set(db_pipelines.keys())
|
|
54
58
|
|
|
55
|
-
restarted_pipeline = False
|
|
56
|
-
|
|
57
59
|
# Process pipeline changes
|
|
58
|
-
self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines
|
|
60
|
+
self._add_new_pipelines(db_pipeline_ids - local_pipeline_ids, db_pipelines)
|
|
59
61
|
self._remove_deleted_pipelines(local_pipeline_ids - db_pipeline_ids)
|
|
60
62
|
self._update_existing_pipelines(db_pipeline_ids & local_pipeline_ids, db_pipelines)
|
|
61
63
|
|
|
62
|
-
if restarted_pipeline:
|
|
63
|
-
self.pipeline_repo.session.commit()
|
|
64
|
-
|
|
65
64
|
# Sync the cache to remove unused detectors
|
|
66
65
|
active_model_ids = {p.ai_model_id for p in db_pipelines.values() if p.pipeline_status_code == 'run'}
|
|
67
66
|
self.model_manager.sync_cache(active_model_ids)
|
|
@@ -71,15 +70,16 @@ class PipelineSyncThread(threading.Thread):
|
|
|
71
70
|
|
|
72
71
|
time.sleep(self.polling_interval)
|
|
73
72
|
|
|
74
|
-
def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]
|
|
75
|
-
restarted_pipeline: bool) -> None:
|
|
73
|
+
def _add_new_pipelines(self, pipeline_ids: Set[str], db_pipelines: Dict[str, object]) -> None:
|
|
76
74
|
"""Add new pipelines that exist in DB but not locally."""
|
|
77
75
|
for pid in pipeline_ids:
|
|
78
76
|
pipeline = db_pipelines[pid]
|
|
79
77
|
|
|
80
78
|
if pipeline.pipeline_status_code == 'restart':
|
|
79
|
+
# Update status in database
|
|
80
|
+
self.pipeline_repo.update_pipeline_status(pid, 'run')
|
|
81
|
+
# Update local object too for consistency
|
|
81
82
|
pipeline.pipeline_status_code = 'run'
|
|
82
|
-
restarted_pipeline = True
|
|
83
83
|
|
|
84
84
|
if pipeline.pipeline_status_code == 'run':
|
|
85
85
|
detector = self.model_manager.get_detector(pipeline.ai_model_id)
|
|
@@ -1,15 +1,13 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from sqlalchemy.orm import Session
|
|
3
2
|
from sqlalchemy.exc import SQLAlchemyError
|
|
4
|
-
from
|
|
3
|
+
from .BaseRepository import BaseRepository
|
|
5
4
|
from ..models.ai_model import AIModelEntity
|
|
6
5
|
|
|
7
|
-
class AIModelRepository:
|
|
6
|
+
class AIModelRepository(BaseRepository):
|
|
8
7
|
"""Handles storage of AI Models into SQLite using SQLAlchemy."""
|
|
9
8
|
|
|
10
9
|
def __init__(self):
|
|
11
|
-
|
|
12
|
-
self.session: Session = self.db_manager.get_session("default")
|
|
10
|
+
super().__init__(db_name="default")
|
|
13
11
|
|
|
14
12
|
def get_models(self) -> list:
|
|
15
13
|
"""
|
|
@@ -19,13 +17,14 @@ class AIModelRepository:
|
|
|
19
17
|
list: A list of AIModelEntity objects.
|
|
20
18
|
"""
|
|
21
19
|
try:
|
|
22
|
-
self.
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
20
|
+
with self._get_session() as session:
|
|
21
|
+
session.expire_all()
|
|
22
|
+
models = session.query(AIModelEntity).all()
|
|
23
|
+
|
|
24
|
+
for model in models:
|
|
25
|
+
session.expunge(model)
|
|
26
|
+
|
|
27
|
+
return models
|
|
29
28
|
except SQLAlchemyError as e:
|
|
30
29
|
logging.error(f"Error retrieving models: {e}")
|
|
31
30
|
return []
|
|
@@ -41,11 +40,12 @@ class AIModelRepository:
|
|
|
41
40
|
An AIModelEntity object or None if not found.
|
|
42
41
|
"""
|
|
43
42
|
try:
|
|
44
|
-
self.
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
43
|
+
with self._get_session() as session:
|
|
44
|
+
session.expire_all()
|
|
45
|
+
model = session.query(AIModelEntity).filter_by(id=model_id).first()
|
|
46
|
+
if model:
|
|
47
|
+
session.expunge(model)
|
|
48
|
+
return model
|
|
49
49
|
except SQLAlchemyError as e:
|
|
50
50
|
logging.error(f"Error retrieving model {model_id}: {e}")
|
|
51
51
|
return None
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from contextlib import contextmanager
|
|
2
|
+
from ..database.DatabaseManager import DatabaseManager
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class BaseRepository:
|
|
6
|
+
"""
|
|
7
|
+
Base repository class that provides thread-safe database session management.
|
|
8
|
+
|
|
9
|
+
All repositories should inherit from this class to ensure proper connection pooling
|
|
10
|
+
and to prevent connection leaks in multi-threaded environments.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, db_name: str = "default"):
|
|
14
|
+
"""
|
|
15
|
+
Initialize the base repository.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
db_name: Name of the database to connect to ('default', 'config', or 'logging')
|
|
19
|
+
"""
|
|
20
|
+
self.db_manager = DatabaseManager()
|
|
21
|
+
self.db_name = db_name
|
|
22
|
+
|
|
23
|
+
@contextmanager
|
|
24
|
+
def _get_session(self):
|
|
25
|
+
"""
|
|
26
|
+
Context manager for database sessions.
|
|
27
|
+
|
|
28
|
+
Ensures sessions are properly opened and closed, preventing connection leaks.
|
|
29
|
+
Each operation gets a fresh session that is automatically closed when done.
|
|
30
|
+
|
|
31
|
+
Usage:
|
|
32
|
+
with self._get_session() as session:
|
|
33
|
+
results = session.query(Model).all()
|
|
34
|
+
return results
|
|
35
|
+
"""
|
|
36
|
+
session = self.db_manager.get_session(self.db_name)
|
|
37
|
+
try:
|
|
38
|
+
yield session
|
|
39
|
+
session.commit() # Commit any pending changes
|
|
40
|
+
except Exception:
|
|
41
|
+
session.rollback() # Rollback on error
|
|
42
|
+
raise
|
|
43
|
+
finally:
|
|
44
|
+
session.close() # Always close the session
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import cv2
|
|
3
|
+
import datetime
|
|
4
|
+
import uuid
|
|
5
|
+
import logging
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from sqlalchemy.exc import SQLAlchemyError
|
|
8
|
+
from .BaseRepository import BaseRepository
|
|
9
|
+
from ..models.ppe_detection import PPEDetectionEntity
|
|
10
|
+
from ..models.ppe_detection_label import PPEDetectionLabelEntity
|
|
11
|
+
from ..util.DrawingUtils import DrawingUtils
|
|
12
|
+
from ..database.DatabaseManager import DatabaseManager
|
|
13
|
+
|
|
14
|
+
class PPEDetectionRepository(BaseRepository):
|
|
15
|
+
"""Handles storage of PPE detections into SQLite using SQLAlchemy."""
|
|
16
|
+
|
|
17
|
+
def __init__(self):
|
|
18
|
+
super().__init__(db_name="default")
|
|
19
|
+
self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "ppe_detections"
|
|
20
|
+
os.makedirs(self.storage_dir, exist_ok=True)
|
|
21
|
+
|
|
22
|
+
def save_ppe_detection(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
|
|
23
|
+
"""
|
|
24
|
+
Inserts new detections only if at least one attribute's detection count is >= 5.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
pipeline_id (str): Unique ID of the video pipeline.
|
|
28
|
+
worker_source_id (str): Source of the video stream.
|
|
29
|
+
frame_id (int): Frame number.
|
|
30
|
+
tracked_objects (list): List of detected persons and their attribute counts.
|
|
31
|
+
frame (numpy.ndarray): Image frame for saving snapshots.
|
|
32
|
+
"""
|
|
33
|
+
current_datetime = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%d_%H%M%S") # Timestamp
|
|
34
|
+
|
|
35
|
+
for tracked_obj in tracked_objects:
|
|
36
|
+
person_id = tracked_obj["uuid"]
|
|
37
|
+
attributes = tracked_obj["attributes"]
|
|
38
|
+
valid_attributes = []
|
|
39
|
+
|
|
40
|
+
if not any(attr.get("count", 0) == 5 for attr in attributes):
|
|
41
|
+
continue # Skip this detection
|
|
42
|
+
|
|
43
|
+
filtered_attributes = [attr for attr in attributes if attr.get("count", 0) >= 5]
|
|
44
|
+
|
|
45
|
+
draw_obj = tracked_obj.copy()
|
|
46
|
+
draw_obj["attributes"] = filtered_attributes
|
|
47
|
+
|
|
48
|
+
drawn_frame = frame_drawer.draw_frame(frame.copy(), [draw_obj])
|
|
49
|
+
|
|
50
|
+
# Save full frame image
|
|
51
|
+
full_image_filename = f"{pipeline_id}_{person_id}_{current_datetime}.jpg"
|
|
52
|
+
full_image_path = os.path.join(self.storage_dir, full_image_filename)
|
|
53
|
+
cv2.imwrite(full_image_path, drawn_frame)
|
|
54
|
+
|
|
55
|
+
# Save cropped image with buffer
|
|
56
|
+
bbox = tracked_obj["bbox"]
|
|
57
|
+
cropped_image, obj = DrawingUtils.crop_with_bounding_box(frame, tracked_obj)
|
|
58
|
+
cropped_image = frame_drawer.draw_frame(cropped_image, [obj])
|
|
59
|
+
|
|
60
|
+
cropped_image_filename = f"{pipeline_id}_{person_id}_{current_datetime}_cropped.jpg"
|
|
61
|
+
cropped_image_path = os.path.join(self.storage_dir, cropped_image_filename)
|
|
62
|
+
cv2.imwrite(cropped_image_path, cropped_image)
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
with self._get_session() as session:
|
|
66
|
+
new_detection = PPEDetectionEntity(
|
|
67
|
+
id=str(uuid.uuid4()),
|
|
68
|
+
worker_id=pipeline_id,
|
|
69
|
+
worker_source_id=worker_source_id,
|
|
70
|
+
person_id=person_id,
|
|
71
|
+
image_path=full_image_path,
|
|
72
|
+
image_tile_path=cropped_image_path,
|
|
73
|
+
b_box_x1=bbox[0],
|
|
74
|
+
b_box_y1=bbox[1],
|
|
75
|
+
b_box_x2=bbox[2],
|
|
76
|
+
b_box_y2=bbox[3],
|
|
77
|
+
detection_count=tracked_obj.get("detections", 0)
|
|
78
|
+
)
|
|
79
|
+
session.add(new_detection)
|
|
80
|
+
session.flush()
|
|
81
|
+
|
|
82
|
+
for attr in filtered_attributes:
|
|
83
|
+
label = attr["label"]
|
|
84
|
+
valid_attributes.append(label)
|
|
85
|
+
|
|
86
|
+
if attr and "bbox" in attr:
|
|
87
|
+
attr_bbox = attr["bbox"]
|
|
88
|
+
# Assuming attr_bbox is in [x, y, width, height] format.
|
|
89
|
+
attr_b_box_x1 = attr_bbox[0]
|
|
90
|
+
attr_b_box_y1 = attr_bbox[1]
|
|
91
|
+
attr_b_box_x2 = attr_bbox[2]
|
|
92
|
+
attr_b_box_y2 = attr_bbox[3]
|
|
93
|
+
else:
|
|
94
|
+
# Fallback to default values if the attribute bbox is not available.
|
|
95
|
+
attr_b_box_x1 = 0.0
|
|
96
|
+
attr_b_box_y1 = 0.0
|
|
97
|
+
attr_b_box_x2 = 0.0
|
|
98
|
+
attr_b_box_y2 = 0.0
|
|
99
|
+
|
|
100
|
+
# Retrieve confidence score; default to 1.0 if not available.
|
|
101
|
+
if attr:
|
|
102
|
+
confidence_score = attr.get("confidence", 1.0)
|
|
103
|
+
else:
|
|
104
|
+
confidence_score = 1.0
|
|
105
|
+
|
|
106
|
+
new_label = PPEDetectionLabelEntity(
|
|
107
|
+
id=str(uuid.uuid4()),
|
|
108
|
+
detection_id=new_detection.id,
|
|
109
|
+
code=label,
|
|
110
|
+
confidence_score=confidence_score,
|
|
111
|
+
detection_count=attr.get("count", 0),
|
|
112
|
+
b_box_x1=attr_b_box_x1,
|
|
113
|
+
b_box_y1=attr_b_box_y1,
|
|
114
|
+
b_box_x2=attr_b_box_x2,
|
|
115
|
+
b_box_y2=attr_b_box_y2
|
|
116
|
+
)
|
|
117
|
+
session.add(new_label)
|
|
118
|
+
|
|
119
|
+
# Commit happens automatically via context manager
|
|
120
|
+
logging.info(f"✅ Inserted detection for Person {person_id}, Attributes: {valid_attributes}")
|
|
121
|
+
|
|
122
|
+
# Trigger detection callback with unified data structure
|
|
123
|
+
try:
|
|
124
|
+
from ..core_service import CoreService
|
|
125
|
+
from ..detection.detection_processing.PPEDetectionProcessor import PPEDetectionProcessor
|
|
126
|
+
|
|
127
|
+
# Create unified detection data using the processor's factory method
|
|
128
|
+
unified_data = PPEDetectionProcessor.create_detection_data(
|
|
129
|
+
pipeline_id=pipeline_id,
|
|
130
|
+
worker_source_id=worker_source_id,
|
|
131
|
+
person_id=person_id,
|
|
132
|
+
detection_id=new_detection.id,
|
|
133
|
+
tracked_obj=tracked_obj,
|
|
134
|
+
image_path=full_image_path,
|
|
135
|
+
image_tile_path=cropped_image_path,
|
|
136
|
+
frame_id=frame_id
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Trigger callbacks
|
|
140
|
+
CoreService.trigger_detection(unified_data)
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logging.warning(f"⚠️ Failed to trigger PPE detection callback: {e}")
|
|
144
|
+
|
|
145
|
+
except SQLAlchemyError as e:
|
|
146
|
+
logging.error(f"❌ Database error while saving detection: {e}")
|
|
147
|
+
|
|
@@ -5,18 +5,17 @@ import cv2
|
|
|
5
5
|
import datetime
|
|
6
6
|
import logging
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from sqlalchemy.orm import Session
|
|
9
8
|
from sqlalchemy.exc import SQLAlchemyError
|
|
9
|
+
from .BaseRepository import BaseRepository
|
|
10
10
|
from ..models.restricted_area_violation import RestrictedAreaViolationEntity
|
|
11
11
|
from ..database.DatabaseManager import DatabaseManager
|
|
12
12
|
from ..util.DrawingUtils import DrawingUtils
|
|
13
13
|
|
|
14
|
-
class RestrictedAreaRepository:
|
|
14
|
+
class RestrictedAreaRepository(BaseRepository):
|
|
15
15
|
def __init__(self):
|
|
16
|
+
super().__init__(db_name="default")
|
|
16
17
|
self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "restricted_violations"
|
|
17
18
|
os.makedirs(self.storage_dir, exist_ok=True)
|
|
18
|
-
self.db_manager = DatabaseManager()
|
|
19
|
-
self.session: Session = self.db_manager.get_session("default")
|
|
20
19
|
|
|
21
20
|
def save_area_violation(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
|
|
22
21
|
"""
|
|
@@ -50,45 +49,45 @@ class RestrictedAreaRepository:
|
|
|
50
49
|
cv2.imwrite(cropped_image_path, cropped_image)
|
|
51
50
|
|
|
52
51
|
try:
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
person_id=person_id,
|
|
56
|
-
image_path=full_image_path,
|
|
57
|
-
image_tile_path=cropped_image_path,
|
|
58
|
-
confidence_score=tracked_obj.get("confidence", 1),
|
|
59
|
-
b_box_x1=bbox[0],
|
|
60
|
-
b_box_y1=bbox[1],
|
|
61
|
-
b_box_x2=bbox[2],
|
|
62
|
-
b_box_y2=bbox[3],
|
|
63
|
-
)
|
|
64
|
-
self.session.add(new_detection)
|
|
65
|
-
self.session.flush()
|
|
66
|
-
self.session.commit()
|
|
67
|
-
logging.info(f"✅ Inserted restricted area violation for Person {person_id}")
|
|
68
|
-
|
|
69
|
-
# Trigger detection callback
|
|
70
|
-
try:
|
|
71
|
-
from ..core_service import CoreService
|
|
72
|
-
from ..detection.detection_processing.HumanDetectionProcessor import HumanDetectionProcessor
|
|
73
|
-
|
|
74
|
-
# Create unified detection data using the processor's factory method
|
|
75
|
-
unified_data = HumanDetectionProcessor.create_detection_data(
|
|
76
|
-
pipeline_id=pipeline_id,
|
|
52
|
+
with self._get_session() as session:
|
|
53
|
+
new_detection = RestrictedAreaViolationEntity(
|
|
77
54
|
worker_source_id=worker_source_id,
|
|
78
55
|
person_id=person_id,
|
|
79
|
-
detection_id=new_detection.id if hasattr(new_detection, 'id') else f"area_{person_id}_{current_datetime}",
|
|
80
|
-
tracked_obj=tracked_obj,
|
|
81
56
|
image_path=full_image_path,
|
|
82
57
|
image_tile_path=cropped_image_path,
|
|
83
|
-
|
|
58
|
+
confidence_score=tracked_obj.get("confidence", 1),
|
|
59
|
+
b_box_x1=bbox[0],
|
|
60
|
+
b_box_y1=bbox[1],
|
|
61
|
+
b_box_x2=bbox[2],
|
|
62
|
+
b_box_y2=bbox[3],
|
|
84
63
|
)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
64
|
+
session.add(new_detection)
|
|
65
|
+
session.flush()
|
|
66
|
+
# Commit happens automatically via context manager
|
|
67
|
+
logging.info(f"✅ Inserted restricted area violation for Person {person_id}")
|
|
68
|
+
|
|
69
|
+
# Trigger detection callback
|
|
70
|
+
try:
|
|
71
|
+
from ..core_service import CoreService
|
|
72
|
+
from ..detection.detection_processing.HumanDetectionProcessor import HumanDetectionProcessor
|
|
73
|
+
|
|
74
|
+
# Create unified detection data using the processor's factory method
|
|
75
|
+
unified_data = HumanDetectionProcessor.create_detection_data(
|
|
76
|
+
pipeline_id=pipeline_id,
|
|
77
|
+
worker_source_id=worker_source_id,
|
|
78
|
+
person_id=person_id,
|
|
79
|
+
detection_id=new_detection.id if hasattr(new_detection, 'id') else f"area_{person_id}_{current_datetime}",
|
|
80
|
+
tracked_obj=tracked_obj,
|
|
81
|
+
image_path=full_image_path,
|
|
82
|
+
image_tile_path=cropped_image_path,
|
|
83
|
+
frame_id=frame_id
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Trigger callbacks
|
|
87
|
+
CoreService.trigger_detection(unified_data)
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
logging.warning(f"⚠️ Failed to trigger area violation callback: {e}")
|
|
91
91
|
|
|
92
92
|
except SQLAlchemyError as e:
|
|
93
|
-
self.session.rollback()
|
|
94
93
|
logging.error(f"❌ Database error while saving detection: {e}")
|