nedo-vision-worker-core 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

Files changed (95) hide show
  1. nedo_vision_worker_core/__init__.py +23 -0
  2. nedo_vision_worker_core/ai/FrameDrawer.py +144 -0
  3. nedo_vision_worker_core/ai/ImageDebugger.py +126 -0
  4. nedo_vision_worker_core/ai/VideoDebugger.py +69 -0
  5. nedo_vision_worker_core/ai/__init__.py +1 -0
  6. nedo_vision_worker_core/cli.py +197 -0
  7. nedo_vision_worker_core/config/ConfigurationManager.py +173 -0
  8. nedo_vision_worker_core/config/__init__.py +1 -0
  9. nedo_vision_worker_core/core_service.py +237 -0
  10. nedo_vision_worker_core/database/DatabaseManager.py +236 -0
  11. nedo_vision_worker_core/database/__init__.py +1 -0
  12. nedo_vision_worker_core/detection/BaseDetector.py +22 -0
  13. nedo_vision_worker_core/detection/DetectionManager.py +83 -0
  14. nedo_vision_worker_core/detection/RFDETRDetector.py +62 -0
  15. nedo_vision_worker_core/detection/YOLODetector.py +57 -0
  16. nedo_vision_worker_core/detection/__init__.py +1 -0
  17. nedo_vision_worker_core/detection/detection_processing/DetectionProcessor.py +29 -0
  18. nedo_vision_worker_core/detection/detection_processing/HumanDetectionProcessor.py +47 -0
  19. nedo_vision_worker_core/detection/detection_processing/PPEDetectionProcessor.py +44 -0
  20. nedo_vision_worker_core/detection/detection_processing/__init__.py +1 -0
  21. nedo_vision_worker_core/doctor.py +342 -0
  22. nedo_vision_worker_core/drawing_assets/blue/inner_corner.png +0 -0
  23. nedo_vision_worker_core/drawing_assets/blue/inner_frame.png +0 -0
  24. nedo_vision_worker_core/drawing_assets/blue/line.png +0 -0
  25. nedo_vision_worker_core/drawing_assets/blue/top_left.png +0 -0
  26. nedo_vision_worker_core/drawing_assets/blue/top_right.png +0 -0
  27. nedo_vision_worker_core/drawing_assets/red/inner_corner.png +0 -0
  28. nedo_vision_worker_core/drawing_assets/red/inner_frame.png +0 -0
  29. nedo_vision_worker_core/drawing_assets/red/line.png +0 -0
  30. nedo_vision_worker_core/drawing_assets/red/top_left.png +0 -0
  31. nedo_vision_worker_core/drawing_assets/red/top_right.png +0 -0
  32. nedo_vision_worker_core/icons/boots-green.png +0 -0
  33. nedo_vision_worker_core/icons/boots-red.png +0 -0
  34. nedo_vision_worker_core/icons/gloves-green.png +0 -0
  35. nedo_vision_worker_core/icons/gloves-red.png +0 -0
  36. nedo_vision_worker_core/icons/goggles-green.png +0 -0
  37. nedo_vision_worker_core/icons/goggles-red.png +0 -0
  38. nedo_vision_worker_core/icons/helmet-green.png +0 -0
  39. nedo_vision_worker_core/icons/helmet-red.png +0 -0
  40. nedo_vision_worker_core/icons/mask-red.png +0 -0
  41. nedo_vision_worker_core/icons/vest-green.png +0 -0
  42. nedo_vision_worker_core/icons/vest-red.png +0 -0
  43. nedo_vision_worker_core/models/__init__.py +20 -0
  44. nedo_vision_worker_core/models/ai_model.py +41 -0
  45. nedo_vision_worker_core/models/auth.py +14 -0
  46. nedo_vision_worker_core/models/config.py +9 -0
  47. nedo_vision_worker_core/models/dataset_source.py +30 -0
  48. nedo_vision_worker_core/models/logs.py +9 -0
  49. nedo_vision_worker_core/models/ppe_detection.py +39 -0
  50. nedo_vision_worker_core/models/ppe_detection_label.py +20 -0
  51. nedo_vision_worker_core/models/restricted_area_violation.py +20 -0
  52. nedo_vision_worker_core/models/user.py +10 -0
  53. nedo_vision_worker_core/models/worker_source.py +19 -0
  54. nedo_vision_worker_core/models/worker_source_pipeline.py +21 -0
  55. nedo_vision_worker_core/models/worker_source_pipeline_config.py +24 -0
  56. nedo_vision_worker_core/models/worker_source_pipeline_debug.py +15 -0
  57. nedo_vision_worker_core/models/worker_source_pipeline_detection.py +14 -0
  58. nedo_vision_worker_core/pipeline/PipelineConfigManager.py +32 -0
  59. nedo_vision_worker_core/pipeline/PipelineManager.py +133 -0
  60. nedo_vision_worker_core/pipeline/PipelinePrepocessor.py +40 -0
  61. nedo_vision_worker_core/pipeline/PipelineProcessor.py +338 -0
  62. nedo_vision_worker_core/pipeline/PipelineSyncThread.py +202 -0
  63. nedo_vision_worker_core/pipeline/__init__.py +1 -0
  64. nedo_vision_worker_core/preprocessing/ImageResizer.py +42 -0
  65. nedo_vision_worker_core/preprocessing/ImageRoi.py +61 -0
  66. nedo_vision_worker_core/preprocessing/Preprocessor.py +16 -0
  67. nedo_vision_worker_core/preprocessing/__init__.py +1 -0
  68. nedo_vision_worker_core/repositories/AIModelRepository.py +31 -0
  69. nedo_vision_worker_core/repositories/PPEDetectionRepository.py +146 -0
  70. nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +90 -0
  71. nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +81 -0
  72. nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +71 -0
  73. nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +79 -0
  74. nedo_vision_worker_core/repositories/WorkerSourceRepository.py +19 -0
  75. nedo_vision_worker_core/repositories/__init__.py +1 -0
  76. nedo_vision_worker_core/streams/RTMPStreamer.py +146 -0
  77. nedo_vision_worker_core/streams/StreamSyncThread.py +66 -0
  78. nedo_vision_worker_core/streams/VideoStream.py +324 -0
  79. nedo_vision_worker_core/streams/VideoStreamManager.py +121 -0
  80. nedo_vision_worker_core/streams/__init__.py +1 -0
  81. nedo_vision_worker_core/tracker/SFSORT.py +325 -0
  82. nedo_vision_worker_core/tracker/TrackerManager.py +163 -0
  83. nedo_vision_worker_core/tracker/__init__.py +1 -0
  84. nedo_vision_worker_core/util/BoundingBoxMetrics.py +53 -0
  85. nedo_vision_worker_core/util/DrawingUtils.py +354 -0
  86. nedo_vision_worker_core/util/ModelReadinessChecker.py +188 -0
  87. nedo_vision_worker_core/util/PersonAttributeMatcher.py +70 -0
  88. nedo_vision_worker_core/util/PersonRestrictedAreaMatcher.py +45 -0
  89. nedo_vision_worker_core/util/TablePrinter.py +28 -0
  90. nedo_vision_worker_core/util/__init__.py +1 -0
  91. nedo_vision_worker_core-0.2.0.dist-info/METADATA +347 -0
  92. nedo_vision_worker_core-0.2.0.dist-info/RECORD +95 -0
  93. nedo_vision_worker_core-0.2.0.dist-info/WHEEL +5 -0
  94. nedo_vision_worker_core-0.2.0.dist-info/entry_points.txt +2 -0
  95. nedo_vision_worker_core-0.2.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,31 @@
1
+ import logging
2
+ from sqlalchemy.orm import Session
3
+ from sqlalchemy.exc import SQLAlchemyError
4
+ from ..database.DatabaseManager import DatabaseManager
5
+ from ..models.ai_model import AIModelEntity
6
+
7
+ class AIModelRepository:
8
+ """Handles storage of AI Models into SQLite using SQLAlchemy."""
9
+
10
+ def __init__(self):
11
+ self.db_manager = DatabaseManager()
12
+ self.session: Session = self.db_manager.get_session("default")
13
+
14
+ def get_models(self) -> list:
15
+ """
16
+ Retrieves all AI models from the database.
17
+
18
+ Returns:
19
+ list: A list of AIModelEntity objects.
20
+ """
21
+ try:
22
+ self.session.expire_all()
23
+ models = self.session.query(AIModelEntity).all()
24
+
25
+ for model in models:
26
+ self.session.expunge(model)
27
+
28
+ return models
29
+ except SQLAlchemyError as e:
30
+ logging.error(f"Error retrieving models: {e}")
31
+ return []
@@ -0,0 +1,146 @@
1
+ import os
2
+ import cv2
3
+ import datetime
4
+ import uuid
5
+ import logging
6
+ from pathlib import Path
7
+ from sqlalchemy.orm import Session
8
+ from sqlalchemy.exc import SQLAlchemyError
9
+ from ..database.DatabaseManager import DatabaseManager
10
+ from ..models.ppe_detection import PPEDetectionEntity
11
+ from ..models.ppe_detection_label import PPEDetectionLabelEntity
12
+ from ..util.DrawingUtils import DrawingUtils
13
+
14
+ class PPEDetectionRepository:
15
+ """Handles storage of PPE detections into SQLite using SQLAlchemy."""
16
+
17
+ def __init__(self):
18
+
19
+ self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "ppe_detections"
20
+ self.db_manager = DatabaseManager()
21
+ self.session: Session = self.db_manager.get_session("default")
22
+ os.makedirs(self.storage_dir, exist_ok=True)
23
+
24
+ def save_ppe_detection(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
25
+ """
26
+ Inserts new detections only if at least one attribute's detection count is >= 5.
27
+
28
+ Args:
29
+ pipeline_id (str): Unique ID of the video pipeline.
30
+ worker_source_id (str): Source of the video stream.
31
+ frame_id (int): Frame number.
32
+ tracked_objects (list): List of detected persons and their attribute counts.
33
+ frame (numpy.ndarray): Image frame for saving snapshots.
34
+ """
35
+ current_datetime = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%d_%H%M%S") # Timestamp
36
+
37
+ for tracked_obj in tracked_objects:
38
+ person_id = tracked_obj["uuid"]
39
+ attributes = tracked_obj["attributes"]
40
+ valid_attributes = []
41
+
42
+ if not any(attr.get("count", 0) == 5 for attr in attributes):
43
+ continue # Skip this detection
44
+
45
+ filtered_attributes = [attr for attr in attributes if attr.get("count", 0) >= 5]
46
+
47
+ draw_obj = tracked_obj.copy()
48
+ draw_obj["attributes"] = filtered_attributes
49
+
50
+ drawn_frame = frame_drawer.draw_frame(frame.copy(), [draw_obj])
51
+
52
+ # Save full frame image
53
+ full_image_filename = f"{pipeline_id}_{person_id}_{current_datetime}.jpg"
54
+ full_image_path = os.path.join(self.storage_dir, full_image_filename)
55
+ cv2.imwrite(full_image_path, drawn_frame)
56
+
57
+ # Save cropped image with buffer
58
+ bbox = tracked_obj["bbox"]
59
+ cropped_image, obj = DrawingUtils.crop_with_bounding_box(frame, tracked_obj)
60
+ cropped_image = frame_drawer.draw_frame(cropped_image, [obj])
61
+
62
+ cropped_image_filename = f"{pipeline_id}_{person_id}_{current_datetime}_cropped.jpg"
63
+ cropped_image_path = os.path.join(self.storage_dir, cropped_image_filename)
64
+ cv2.imwrite(cropped_image_path, cropped_image)
65
+
66
+ try:
67
+ new_detection = PPEDetectionEntity(
68
+ id=str(uuid.uuid4()),
69
+ worker_id=pipeline_id,
70
+ worker_source_id=worker_source_id,
71
+ person_id=person_id,
72
+ image_path=full_image_path,
73
+ image_tile_path=cropped_image_path,
74
+ b_box_x1=bbox[0],
75
+ b_box_y1=bbox[1],
76
+ b_box_x2=bbox[2],
77
+ b_box_y2=bbox[3],
78
+ detection_count=tracked_obj.get("detections", 0)
79
+ )
80
+ self.session.add(new_detection)
81
+ self.session.flush()
82
+
83
+ for attr in filtered_attributes:
84
+ label = attr["label"]
85
+ valid_attributes.append(label)
86
+
87
+ if attr and "bbox" in attr:
88
+ attr_bbox = attr["bbox"]
89
+ # Assuming attr_bbox is in [x, y, width, height] format.
90
+ attr_b_box_x1 = attr_bbox[0]
91
+ attr_b_box_y1 = attr_bbox[1]
92
+ attr_b_box_x2 = attr_bbox[2]
93
+ attr_b_box_y2 = attr_bbox[3]
94
+ else:
95
+ # Fallback to default values if the attribute bbox is not available.
96
+ attr_b_box_x1 = 0.0
97
+ attr_b_box_y1 = 0.0
98
+ attr_b_box_x2 = 0.0
99
+ attr_b_box_y2 = 0.0
100
+
101
+ # Retrieve confidence score; default to 1.0 if not available.
102
+ if attr:
103
+ confidence_score = attr.get("confidence", 1.0)
104
+ else:
105
+ confidence_score = 1.0
106
+
107
+ new_label = PPEDetectionLabelEntity(
108
+ id=str(uuid.uuid4()),
109
+ detection_id=new_detection.id,
110
+ code=label,
111
+ confidence_score=confidence_score,
112
+ detection_count=attr.get("count", 0),
113
+ b_box_x1=attr_b_box_x1,
114
+ b_box_y1=attr_b_box_y1,
115
+ b_box_x2=attr_b_box_x2,
116
+ b_box_y2=attr_b_box_y2
117
+ )
118
+ self.session.add(new_label)
119
+
120
+ self.session.commit()
121
+ logging.info(f"✅ Inserted detection for Person {person_id}, Attributes: {valid_attributes}")
122
+
123
+ # Trigger detection callback
124
+ try:
125
+ from ..core_service import CoreService
126
+ detection_data = {
127
+ 'type': 'ppe_detection',
128
+ 'pipeline_id': pipeline_id,
129
+ 'worker_source_id': worker_source_id,
130
+ 'person_id': person_id,
131
+ 'detection_id': new_detection.id,
132
+ 'attributes': valid_attributes,
133
+ 'confidence_score': tracked_obj.get("confidence", 1.0),
134
+ 'bbox': bbox,
135
+ 'image_path': full_image_path,
136
+ 'image_tile_path': cropped_image_path,
137
+ 'timestamp': current_datetime
138
+ }
139
+ CoreService.trigger_detection_callback('ppe_detection', detection_data)
140
+ except Exception as e:
141
+ logging.warning(f"⚠️ Failed to trigger PPE detection callback: {e}")
142
+
143
+ except SQLAlchemyError as e:
144
+ self.session.rollback()
145
+ logging.error(f"❌ Database error while saving detection: {e}")
146
+
@@ -0,0 +1,90 @@
1
+ # repositories/restricted_area_repository.py
2
+
3
+ import os
4
+ import cv2
5
+ import datetime
6
+ import logging
7
+ from pathlib import Path
8
+ from sqlalchemy.orm import Session
9
+ from sqlalchemy.exc import SQLAlchemyError
10
+ from ..models.restricted_area_violation import RestrictedAreaViolationEntity
11
+ from ..database.DatabaseManager import DatabaseManager
12
+ from ..util.DrawingUtils import DrawingUtils
13
+
14
+ class RestrictedAreaRepository:
15
+ def __init__(self):
16
+ self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "restricted_violations"
17
+ os.makedirs(self.storage_dir, exist_ok=True)
18
+ self.db_manager = DatabaseManager()
19
+ self.session: Session = self.db_manager.get_session("default")
20
+
21
+ def save_area_violation(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
22
+ """
23
+ Save restricted area violation event.
24
+ """
25
+ current_datetime = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%d_%H%M%S") # Timestamp
26
+
27
+ frame_drawer.draw_polygons(frame)
28
+
29
+ for tracked_obj in tracked_objects:
30
+ person_id = tracked_obj["uuid"]
31
+ attributes = tracked_obj["attributes"]
32
+
33
+ if not any((attr.get("label") == "in_restricted_area" and attr.get("count", 0) == 5) for attr in attributes):
34
+ continue
35
+
36
+ drawn_frame = frame_drawer.draw_frame(frame.copy(), [tracked_obj.copy()])
37
+
38
+ # Save full frame image
39
+ full_image_filename = f"{pipeline_id}_{person_id}_{current_datetime}.jpg"
40
+ full_image_path = os.path.join(self.storage_dir, full_image_filename)
41
+ cv2.imwrite(full_image_path, drawn_frame)
42
+
43
+ # Save cropped image with buffer
44
+ bbox = tracked_obj["bbox"]
45
+ cropped_image, obj = DrawingUtils.crop_with_bounding_box(frame, tracked_obj)
46
+ cropped_image = frame_drawer.draw_frame(cropped_image, [obj])
47
+
48
+ cropped_image_filename = f"{pipeline_id}_{person_id}_{current_datetime}_cropped.jpg"
49
+ cropped_image_path = os.path.join(self.storage_dir, cropped_image_filename)
50
+ cv2.imwrite(cropped_image_path, cropped_image)
51
+
52
+ try:
53
+ new_detection = RestrictedAreaViolationEntity(
54
+ worker_source_id=worker_source_id,
55
+ person_id=person_id,
56
+ image_path=full_image_path,
57
+ image_tile_path=cropped_image_path,
58
+ confidence_score=tracked_obj.get("confidence", 1),
59
+ b_box_x1=bbox[0],
60
+ b_box_y1=bbox[1],
61
+ b_box_x2=bbox[2],
62
+ b_box_y2=bbox[3],
63
+ )
64
+ self.session.add(new_detection)
65
+ self.session.flush()
66
+ self.session.commit()
67
+ logging.info(f"✅ Inserted restricted area violation for Person {person_id}")
68
+
69
+ # Trigger detection callback
70
+ try:
71
+ from ..core_service import CoreService
72
+ detection_data = {
73
+ 'type': 'area_violation',
74
+ 'pipeline_id': pipeline_id,
75
+ 'worker_source_id': worker_source_id,
76
+ 'person_id': person_id,
77
+ 'detection_id': new_detection.id if hasattr(new_detection, 'id') else None,
78
+ 'confidence_score': tracked_obj.get("confidence", 1.0),
79
+ 'bbox': bbox,
80
+ 'image_path': full_image_path,
81
+ 'image_tile_path': cropped_image_path,
82
+ 'timestamp': current_datetime
83
+ }
84
+ CoreService.trigger_detection_callback('area_violation', detection_data)
85
+ except Exception as e:
86
+ logging.warning(f"⚠️ Failed to trigger area violation callback: {e}")
87
+
88
+ except SQLAlchemyError as e:
89
+ self.session.rollback()
90
+ logging.error(f"❌ Database error while saving detection: {e}")
@@ -0,0 +1,81 @@
1
+ from datetime import datetime, timedelta, timezone
2
+ import json
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ from sqlalchemy.orm import Session
7
+ from ..database.DatabaseManager import DatabaseManager
8
+ from ..models.worker_source_pipeline_debug import WorkerSourcePipelineDebugEntity
9
+
10
+
11
+ class WorkerSourcePipelineDebugRepository:
12
+ def __init__(self):
13
+ self.db_manager = DatabaseManager()
14
+ self.session: Session = self.db_manager.get_session("default")
15
+ self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "debug_image"
16
+ os.makedirs(self.storage_dir, exist_ok=True)
17
+
18
+ def get_pipeline_ids_to_debug(self):
19
+ """
20
+ Retrieve all distinct worker_source_pipeline_id values that need debugging.
21
+
22
+ :return: A list of pipeline IDs (str) with null data.
23
+ """
24
+ now = datetime.now(timezone.utc)
25
+ cutoff_time = now - timedelta(minutes=1)
26
+
27
+ self.session.query(WorkerSourcePipelineDebugEntity)\
28
+ .filter(
29
+ WorkerSourcePipelineDebugEntity.data == None,
30
+ WorkerSourcePipelineDebugEntity.created_at < cutoff_time
31
+ ).delete(synchronize_session=False)
32
+
33
+ self.session.commit()
34
+
35
+ results = self.session.query(
36
+ WorkerSourcePipelineDebugEntity.worker_source_pipeline_id
37
+ ).filter(
38
+ WorkerSourcePipelineDebugEntity.data == None
39
+ ).distinct().all()
40
+
41
+ return [row[0] for row in results]
42
+
43
+ def update_debug_entries_by_pipeline_id(self, pipeline_id: int, image, data: str):
44
+ """
45
+ Update all debug entries for a given pipeline ID with new data.
46
+
47
+ :param pipeline_id: The ID of the pipeline for which to update debug entries.
48
+ :param new_data: The new data to update the entries with.
49
+ :return: The number of updated entries.
50
+ """
51
+ now = datetime.now(timezone.utc)
52
+ current_datetime = now.strftime("%Y%m%d_%H%M%S")
53
+
54
+ stringified_data = json.dumps(
55
+ {
56
+ "timestamp": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
57
+ "tracked_objects": data,
58
+ },
59
+ default=lambda o: (
60
+ float(o) if isinstance(o, np.floating) else
61
+ int(o) if isinstance(o, np.integer) else
62
+ list(o) if isinstance(o, (np.ndarray, tuple)) else
63
+ str(o)
64
+ )
65
+ )
66
+
67
+ full_image_filename = f"{pipeline_id}_{current_datetime}.jpg"
68
+ full_image_path = os.path.join(self.storage_dir, full_image_filename)
69
+ cv2.imwrite(full_image_path, image)
70
+
71
+ updated_entries = self.session.query(WorkerSourcePipelineDebugEntity)\
72
+ .filter_by(worker_source_pipeline_id=pipeline_id)\
73
+ .update(
74
+ {
75
+ "image_path": full_image_path,
76
+ "data": stringified_data
77
+ },
78
+ synchronize_session="fetch"
79
+ )
80
+ self.session.commit()
81
+ return updated_entries
@@ -0,0 +1,71 @@
1
+ from datetime import datetime, timezone
2
+ import json
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ from sqlalchemy.orm import Session
7
+ from ..ai.FrameDrawer import FrameDrawer
8
+ from ..database.DatabaseManager import DatabaseManager
9
+ from ..models.worker_source_pipeline_detection import WorkerSourcePipelineDetectionEntity
10
+
11
+
12
+ class WorkerSourcePipelineDetectionRepository:
13
+ def __init__(self):
14
+ self.db_manager = DatabaseManager()
15
+ self.session: Session = self.db_manager.get_session("default")
16
+ self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "detection_image"
17
+ os.makedirs(self.storage_dir, exist_ok=True)
18
+
19
+ def save_detection(self, pipeline_id: int, frame, tracked_objects, frame_drawer: FrameDrawer):
20
+ """
21
+ Save detection data that need to be sent to database.
22
+ """
23
+ now = datetime.now(timezone.utc)
24
+ current_datetime = now.strftime("%Y%m%d_%H%M%S")
25
+
26
+ frame_drawer.draw_polygons(frame)
27
+ filtered_objects = []
28
+
29
+ for tracked_obj in tracked_objects:
30
+ attributes = tracked_obj["attributes"]
31
+
32
+ if not any(attr.get("count", 0) == 5 for attr in attributes):
33
+ continue
34
+
35
+ obj = tracked_obj.copy()
36
+ obj["attributes"] = [attr for attr in attributes if attr.get("count", 0) >= 5]
37
+
38
+ filtered_objects.append(obj)
39
+
40
+ if not filtered_objects:
41
+ return
42
+
43
+ drawn_frame = frame_drawer.draw_frame(frame.copy(), filtered_objects)
44
+
45
+ full_image_filename = f"{pipeline_id}_{current_datetime}.jpg"
46
+ full_image_path = os.path.join(self.storage_dir, full_image_filename)
47
+ cv2.imwrite(full_image_path, drawn_frame)
48
+
49
+ stringified_data = json.dumps(filtered_objects,
50
+ default=lambda o: (
51
+ float(o) if isinstance(o, np.floating) else
52
+ int(o) if isinstance(o, np.integer) else
53
+ list(o) if isinstance(o, (np.ndarray, tuple)) else
54
+ str(o)
55
+ )
56
+ )
57
+
58
+ try:
59
+ new_detection = WorkerSourcePipelineDetectionEntity(
60
+ worker_source_pipeline_id=pipeline_id,
61
+ image_path=full_image_path,
62
+ data=stringified_data,
63
+ created_at=datetime.utcnow()
64
+ )
65
+ self.session.add(new_detection)
66
+ self.session.flush()
67
+ self.session.commit()
68
+ print(f"✅ Inserted detection data for pipeline {pipeline_id}")
69
+ except Exception as e:
70
+ self.session.rollback()
71
+ print(f"❌ Database error while saving detection: {e}")
@@ -0,0 +1,79 @@
1
+ import json
2
+ from sqlalchemy.orm import Session
3
+ from sqlalchemy.exc import SQLAlchemyError
4
+ from ..database.DatabaseManager import DatabaseManager
5
+ from ..models.worker_source_pipeline import WorkerSourcePipelineEntity
6
+ from ..models.worker_source_pipeline_config import WorkerSourcePipelineConfigEntity
7
+
8
+
9
+ class WorkerSourcePipelineRepository:
10
+ def __init__(self):
11
+ self.db_manager = DatabaseManager()
12
+ self.session: Session = self.db_manager.get_session("config")
13
+
14
+ def get_all_pipelines(self):
15
+ """
16
+ Fetch all worker source pipelines from the local database in a single query.
17
+
18
+ Returns:
19
+ list: A list of WorkerSourcePipelineEntity records.
20
+ """
21
+ self.session.expire_all()
22
+ return self.session.query(WorkerSourcePipelineEntity).all()
23
+
24
+ def get_pipeline_configs_by_pipeline_id(self, pipeline_id):
25
+ """
26
+ Retrieves all pipeline configurations for a given pipeline ID and returns them as a dictionary.
27
+
28
+ The dictionary format:
29
+ {
30
+ "config_code_1": { "id": "xxx", "is_enabled": true, "value": "some_value", "name": "Config Name" },
31
+ "config_code_2": { "id": "yyy", "is_enabled": false, "value": "another_value", "name": "Another Config Name" }
32
+ }
33
+
34
+ Args:
35
+ pipeline_id (str): The unique identifier of the pipeline.
36
+
37
+ Returns:
38
+ dict: A dictionary mapping pipeline_config_code to its configuration details.
39
+ """
40
+ try:
41
+ pipeline_configs = (
42
+ self.session.query(WorkerSourcePipelineConfigEntity)
43
+ .filter(WorkerSourcePipelineConfigEntity.worker_source_pipeline_id == pipeline_id)
44
+ .all()
45
+ )
46
+
47
+ def parse_value(value):
48
+ """Attempts to parse the value as JSON if applicable."""
49
+ if not value:
50
+ return value # Keep None or empty string as is
51
+
52
+ value = value.strip() # Remove leading/trailing spaces
53
+ if (value.startswith("{") and value.endswith("}")) or (value.startswith("[") and value.endswith("]")):
54
+ try:
55
+ return json.loads(value) # Parse JSON object or list
56
+ except json.JSONDecodeError:
57
+ pass # Keep as string if parsing fails
58
+ return value # Return original value if not JSON
59
+
60
+ # Convert result into a dictionary with pipeline_config_code as key
61
+ config_dict = {
62
+ config.pipeline_config_code: {
63
+ "id": config.id,
64
+ "is_enabled": config.is_enabled, # Keep original boolean value
65
+ "value": parse_value(config.value), # Parse JSON if applicable
66
+ "name": config.pipeline_config_name
67
+ }
68
+ for config in pipeline_configs
69
+ }
70
+
71
+ return config_dict
72
+
73
+ except SQLAlchemyError as e:
74
+ print(f"Database error while retrieving pipeline configs: {e}")
75
+ return {}
76
+
77
+ def get_worker_source_pipeline(self, pipeline_id):
78
+ self.session.expire_all()
79
+ return self.session.query(WorkerSourcePipelineEntity).filter_by(id=pipeline_id).first()
@@ -0,0 +1,19 @@
1
+ from sqlalchemy.orm import Session
2
+ from ..database.DatabaseManager import DatabaseManager
3
+ from ..models.worker_source import WorkerSourceEntity
4
+
5
+
6
+ class WorkerSourceRepository:
7
+ def __init__(self):
8
+ self.db_manager = DatabaseManager()
9
+ self.session: Session = self.db_manager.get_session("config")
10
+
11
+ def get_worker_sources(self):
12
+ """
13
+ Fetch all worker sources from the local database in a single query.
14
+
15
+ Returns:
16
+ list: A list of WorkerSourceEntity records.
17
+ """
18
+ self.session.expire_all()
19
+ return self.session.query(WorkerSourceEntity).all()
@@ -0,0 +1,146 @@
1
+ import subprocess
2
+ import logging
3
+ import cv2
4
+ import numpy as np
5
+ import os
6
+
7
+ class RTMPStreamer:
8
+ """Handles streaming video frames to an RTMP server using FFmpeg."""
9
+
10
+ def __init__(self, pipeline_id, fps=25, bitrate="1500k"):
11
+ """
12
+ Initializes the RTMP streaming process.
13
+
14
+ :param pipeline_id: Unique identifier for the stream (used as the stream key).
15
+ :param fps: Frames per second.
16
+ :param bitrate: Bitrate for video encoding.
17
+ """
18
+ self.rtmp_server = os.environ.get("RTMP_SERVER", "rtmp://localhost:1935/live")
19
+ self.rtmp_url = f"{self.rtmp_server}/{pipeline_id}" # RTMP URL with dynamic stream key
20
+ self.fps = fps
21
+ self.bitrate = bitrate
22
+ self.width = None
23
+ self.height = None
24
+ self.ffmpeg_process = None
25
+ self.started = False # Ensure FFmpeg starts only once
26
+ self.active = False # Add status flag
27
+
28
+ def _calculate_resolution(self, frame):
29
+ """Determines resolution with max width 1024 while maintaining aspect ratio."""
30
+ original_height, original_width = frame.shape[:2]
31
+ if original_width > 1024:
32
+ scale_factor = 1024 / original_width
33
+ new_width = 1024
34
+ new_height = int(original_height * scale_factor)
35
+ else:
36
+ new_width, new_height = original_width, original_height
37
+
38
+ logging.info(f"📏 Adjusted resolution: {new_width}x{new_height} (Original: {original_width}x{original_height})")
39
+ return new_width, new_height
40
+
41
+ def is_active(self):
42
+ """Check if the RTMP streamer is active and ready to send frames."""
43
+ return self.active and self.ffmpeg_process and self.ffmpeg_process.poll() is None
44
+
45
+ def _start_ffmpeg_stream(self):
46
+ """Starts an FFmpeg process to stream frames to the RTMP server silently."""
47
+ ffmpeg_command = [
48
+ "ffmpeg",
49
+ "-y",
50
+ "-loglevel", "panic", # 🔇 Suppress all output except fatal errors
51
+ "-nostats", # 🔇 Hide encoding progress updates
52
+ "-hide_banner", # 🔇 Hide FFmpeg banner information
53
+ "-f", "rawvideo",
54
+ "-pixel_format", "bgr24",
55
+ "-video_size", f"{self.width}x{self.height}",
56
+ "-framerate", str(self.fps),
57
+ "-i", "-",
58
+ "-c:v", "libx264",
59
+ "-preset", "ultrafast",
60
+ "-tune", "zerolatency",
61
+ "-b:v", self.bitrate,
62
+ # ❌ Disable Audio (Avoid unnecessary encoding overhead)
63
+ "-an",
64
+ "-maxrate", "2000k",
65
+ "-bufsize", "4000k",
66
+ "-f", "flv",
67
+ self.rtmp_url,
68
+ ]
69
+
70
+ try:
71
+ with open(os.devnull, "w") as devnull:
72
+ self.ffmpeg_process = subprocess.Popen(
73
+ ffmpeg_command,
74
+ stdin=subprocess.PIPE,
75
+ stdout=devnull,
76
+ stderr=devnull
77
+ )
78
+ logging.info(f"📡 RTMP streaming started: {self.rtmp_url} ({self.width}x{self.height})")
79
+ self.started = True
80
+ self.active = True
81
+ except Exception as e:
82
+ logging.error(f"❌ Failed to start FFmpeg: {e}")
83
+ self.ffmpeg_process = None
84
+ self.active = False
85
+
86
+ def send_frame(self, frame):
87
+ """Sends a video frame to the RTMP stream with dynamic resolution."""
88
+ if frame is None or not isinstance(frame, np.ndarray):
89
+ logging.error("❌ Invalid frame received")
90
+ return
91
+
92
+ try:
93
+ # Validate frame before processing
94
+ if frame.size == 0 or not frame.data:
95
+ logging.error("❌ Empty frame detected")
96
+ return
97
+
98
+ # Set resolution on the first frame
99
+ if not self.started:
100
+ self.width, self.height = self._calculate_resolution(frame)
101
+ self._start_ffmpeg_stream()
102
+
103
+ if self.is_active():
104
+ # Create a copy of the frame to prevent reference issues
105
+ frame_copy = frame.copy()
106
+
107
+ # Resize only if necessary
108
+ if frame_copy.shape[1] > 1024:
109
+ frame_copy = cv2.resize(frame_copy, (self.width, self.height),
110
+ interpolation=cv2.INTER_AREA)
111
+
112
+ # Additional frame validation
113
+ if frame_copy.size == 0 or not frame_copy.data:
114
+ logging.error("❌ Frame became invalid after processing")
115
+ return
116
+
117
+ if self.ffmpeg_process and self.ffmpeg_process.stdin:
118
+ self.ffmpeg_process.stdin.write(frame_copy.tobytes())
119
+ self.ffmpeg_process.stdin.flush() # Ensure data is written
120
+
121
+ except BrokenPipeError:
122
+ logging.error("❌ RTMP connection broken")
123
+ self.stop_stream()
124
+ except Exception as e:
125
+ logging.error(f"❌ Failed to send frame to RTMP: {e}")
126
+ self.stop_stream()
127
+
128
+ def stop_stream(self):
129
+ """Stops the FFmpeg streaming process."""
130
+ self.active = False
131
+ if self.ffmpeg_process:
132
+ try:
133
+ if self.ffmpeg_process.stdin:
134
+ self.ffmpeg_process.stdin.close()
135
+ self.ffmpeg_process.terminate()
136
+ self.ffmpeg_process.wait(timeout=5)
137
+ except Exception as e:
138
+ logging.error(f"❌ Error stopping RTMP stream: {e}")
139
+ # Force kill if normal termination fails
140
+ try:
141
+ self.ffmpeg_process.kill()
142
+ except Exception:
143
+ pass
144
+ finally:
145
+ self.ffmpeg_process = None
146
+ logging.info("✅ RTMP streaming process stopped.")