nedo-vision-worker-core 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nedo-vision-worker-core might be problematic. Click here for more details.

Files changed (21) hide show
  1. nedo_vision_worker_core/__init__.py +1 -1
  2. nedo_vision_worker_core/database/DatabaseManager.py +17 -1
  3. nedo_vision_worker_core/pipeline/PipelineManager.py +63 -19
  4. nedo_vision_worker_core/pipeline/PipelineProcessor.py +23 -17
  5. nedo_vision_worker_core/pipeline/PipelineSyncThread.py +29 -32
  6. nedo_vision_worker_core/repositories/AIModelRepository.py +17 -17
  7. nedo_vision_worker_core/repositories/BaseRepository.py +44 -0
  8. nedo_vision_worker_core/repositories/PPEDetectionRepository.py +77 -79
  9. nedo_vision_worker_core/repositories/RestrictedAreaRepository.py +37 -38
  10. nedo_vision_worker_core/repositories/WorkerSourcePipelineDebugRepository.py +47 -46
  11. nedo_vision_worker_core/repositories/WorkerSourcePipelineDetectionRepository.py +14 -15
  12. nedo_vision_worker_core/repositories/WorkerSourcePipelineRepository.py +68 -36
  13. nedo_vision_worker_core/repositories/WorkerSourceRepository.py +9 -7
  14. nedo_vision_worker_core/streams/RTMPStreamer.py +283 -106
  15. nedo_vision_worker_core/streams/StreamSyncThread.py +51 -24
  16. nedo_vision_worker_core/streams/VideoStreamManager.py +76 -20
  17. {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/METADATA +3 -2
  18. {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/RECORD +21 -20
  19. {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/WHEEL +0 -0
  20. {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/entry_points.txt +0 -0
  21. {nedo_vision_worker_core-0.3.4.dist-info → nedo_vision_worker_core-0.3.6.dist-info}/top_level.txt +0 -0
@@ -5,18 +5,17 @@ import cv2
5
5
  import datetime
6
6
  import logging
7
7
  from pathlib import Path
8
- from sqlalchemy.orm import Session
9
8
  from sqlalchemy.exc import SQLAlchemyError
9
+ from .BaseRepository import BaseRepository
10
10
  from ..models.restricted_area_violation import RestrictedAreaViolationEntity
11
11
  from ..database.DatabaseManager import DatabaseManager
12
12
  from ..util.DrawingUtils import DrawingUtils
13
13
 
14
- class RestrictedAreaRepository:
14
+ class RestrictedAreaRepository(BaseRepository):
15
15
  def __init__(self):
16
+ super().__init__(db_name="default")
16
17
  self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "restricted_violations"
17
18
  os.makedirs(self.storage_dir, exist_ok=True)
18
- self.db_manager = DatabaseManager()
19
- self.session: Session = self.db_manager.get_session("default")
20
19
 
21
20
  def save_area_violation(self, pipeline_id, worker_source_id, frame_id, tracked_objects, frame, frame_drawer):
22
21
  """
@@ -50,45 +49,45 @@ class RestrictedAreaRepository:
50
49
  cv2.imwrite(cropped_image_path, cropped_image)
51
50
 
52
51
  try:
53
- new_detection = RestrictedAreaViolationEntity(
54
- worker_source_id=worker_source_id,
55
- person_id=person_id,
56
- image_path=full_image_path,
57
- image_tile_path=cropped_image_path,
58
- confidence_score=tracked_obj.get("confidence", 1),
59
- b_box_x1=bbox[0],
60
- b_box_y1=bbox[1],
61
- b_box_x2=bbox[2],
62
- b_box_y2=bbox[3],
63
- )
64
- self.session.add(new_detection)
65
- self.session.flush()
66
- self.session.commit()
67
- logging.info(f"✅ Inserted restricted area violation for Person {person_id}")
68
-
69
- # Trigger detection callback
70
- try:
71
- from ..core_service import CoreService
72
- from ..detection.detection_processing.HumanDetectionProcessor import HumanDetectionProcessor
73
-
74
- # Create unified detection data using the processor's factory method
75
- unified_data = HumanDetectionProcessor.create_detection_data(
76
- pipeline_id=pipeline_id,
52
+ with self._get_session() as session:
53
+ new_detection = RestrictedAreaViolationEntity(
77
54
  worker_source_id=worker_source_id,
78
55
  person_id=person_id,
79
- detection_id=new_detection.id if hasattr(new_detection, 'id') else f"area_{person_id}_{current_datetime}",
80
- tracked_obj=tracked_obj,
81
56
  image_path=full_image_path,
82
57
  image_tile_path=cropped_image_path,
83
- frame_id=frame_id
58
+ confidence_score=tracked_obj.get("confidence", 1),
59
+ b_box_x1=bbox[0],
60
+ b_box_y1=bbox[1],
61
+ b_box_x2=bbox[2],
62
+ b_box_y2=bbox[3],
84
63
  )
85
-
86
- # Trigger callbacks
87
- CoreService.trigger_detection(unified_data)
88
-
89
- except Exception as e:
90
- logging.warning(f"⚠️ Failed to trigger area violation callback: {e}")
64
+ session.add(new_detection)
65
+ session.flush()
66
+ # Commit happens automatically via context manager
67
+ logging.info(f"✅ Inserted restricted area violation for Person {person_id}")
68
+
69
+ # Trigger detection callback
70
+ try:
71
+ from ..core_service import CoreService
72
+ from ..detection.detection_processing.HumanDetectionProcessor import HumanDetectionProcessor
73
+
74
+ # Create unified detection data using the processor's factory method
75
+ unified_data = HumanDetectionProcessor.create_detection_data(
76
+ pipeline_id=pipeline_id,
77
+ worker_source_id=worker_source_id,
78
+ person_id=person_id,
79
+ detection_id=new_detection.id if hasattr(new_detection, 'id') else f"area_{person_id}_{current_datetime}",
80
+ tracked_obj=tracked_obj,
81
+ image_path=full_image_path,
82
+ image_tile_path=cropped_image_path,
83
+ frame_id=frame_id
84
+ )
85
+
86
+ # Trigger callbacks
87
+ CoreService.trigger_detection(unified_data)
88
+
89
+ except Exception as e:
90
+ logging.warning(f"⚠️ Failed to trigger area violation callback: {e}")
91
91
 
92
92
  except SQLAlchemyError as e:
93
- self.session.rollback()
94
93
  logging.error(f"❌ Database error while saving detection: {e}")
@@ -3,15 +3,14 @@ import json
3
3
  import os
4
4
  import cv2
5
5
  import numpy as np
6
- from sqlalchemy.orm import Session
6
+ from .BaseRepository import BaseRepository
7
7
  from ..database.DatabaseManager import DatabaseManager
8
8
  from ..models.worker_source_pipeline_debug import WorkerSourcePipelineDebugEntity
9
9
 
10
10
 
11
- class WorkerSourcePipelineDebugRepository:
11
+ class WorkerSourcePipelineDebugRepository(BaseRepository):
12
12
  def __init__(self):
13
- self.db_manager = DatabaseManager()
14
- self.session: Session = self.db_manager.get_session("default")
13
+ super().__init__(db_name="default")
15
14
  self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "debug_image"
16
15
  os.makedirs(self.storage_dir, exist_ok=True)
17
16
 
@@ -21,24 +20,25 @@ class WorkerSourcePipelineDebugRepository:
21
20
 
22
21
  :return: A list of pipeline IDs (str) with null data.
23
22
  """
24
- now = datetime.now(timezone.utc)
25
- cutoff_time = now - timedelta(minutes=1)
23
+ with self._get_session() as session:
24
+ now = datetime.now(timezone.utc)
25
+ cutoff_time = now - timedelta(minutes=1)
26
26
 
27
- self.session.query(WorkerSourcePipelineDebugEntity)\
28
- .filter(
29
- WorkerSourcePipelineDebugEntity.data == None,
30
- WorkerSourcePipelineDebugEntity.created_at < cutoff_time
31
- ).delete(synchronize_session=False)
27
+ session.query(WorkerSourcePipelineDebugEntity)\
28
+ .filter(
29
+ WorkerSourcePipelineDebugEntity.data == None,
30
+ WorkerSourcePipelineDebugEntity.created_at < cutoff_time
31
+ ).delete(synchronize_session=False)
32
32
 
33
- self.session.commit()
33
+ session.commit()
34
34
 
35
- results = self.session.query(
36
- WorkerSourcePipelineDebugEntity.worker_source_pipeline_id
37
- ).filter(
38
- WorkerSourcePipelineDebugEntity.data == None
39
- ).distinct().all()
35
+ results = session.query(
36
+ WorkerSourcePipelineDebugEntity.worker_source_pipeline_id
37
+ ).filter(
38
+ WorkerSourcePipelineDebugEntity.data == None
39
+ ).distinct().all()
40
40
 
41
- return [row[0] for row in results]
41
+ return [row[0] for row in results]
42
42
 
43
43
  def update_debug_entries_by_pipeline_id(self, pipeline_id: int, image, data: str):
44
44
  """
@@ -48,34 +48,35 @@ class WorkerSourcePipelineDebugRepository:
48
48
  :param new_data: The new data to update the entries with.
49
49
  :return: The number of updated entries.
50
50
  """
51
- now = datetime.now(timezone.utc)
52
- current_datetime = now.strftime("%Y%m%d_%H%M%S")
51
+ with self._get_session() as session:
52
+ now = datetime.now(timezone.utc)
53
+ current_datetime = now.strftime("%Y%m%d_%H%M%S")
53
54
 
54
- stringified_data = json.dumps(
55
- {
56
- "timestamp": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
57
- "tracked_objects": data,
58
- },
59
- default=lambda o: (
60
- float(o) if isinstance(o, np.floating) else
61
- int(o) if isinstance(o, np.integer) else
62
- list(o) if isinstance(o, (np.ndarray, tuple)) else
63
- str(o)
64
- )
65
- )
66
-
67
- full_image_filename = f"{pipeline_id}_{current_datetime}.jpg"
68
- full_image_path = os.path.join(self.storage_dir, full_image_filename)
69
- cv2.imwrite(full_image_path, image)
70
-
71
- updated_entries = self.session.query(WorkerSourcePipelineDebugEntity)\
72
- .filter_by(worker_source_pipeline_id=pipeline_id)\
73
- .update(
55
+ stringified_data = json.dumps(
74
56
  {
75
- "image_path": full_image_path,
76
- "data": stringified_data
57
+ "timestamp": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
58
+ "tracked_objects": data,
77
59
  },
78
- synchronize_session="fetch"
79
- )
80
- self.session.commit()
81
- return updated_entries
60
+ default=lambda o: (
61
+ float(o) if isinstance(o, np.floating) else
62
+ int(o) if isinstance(o, np.integer) else
63
+ list(o) if isinstance(o, (np.ndarray, tuple)) else
64
+ str(o)
65
+ )
66
+ )
67
+
68
+ full_image_filename = f"{pipeline_id}_{current_datetime}.jpg"
69
+ full_image_path = os.path.join(self.storage_dir, full_image_filename)
70
+ cv2.imwrite(full_image_path, image)
71
+
72
+ updated_entries = session.query(WorkerSourcePipelineDebugEntity)\
73
+ .filter_by(worker_source_pipeline_id=pipeline_id)\
74
+ .update(
75
+ {
76
+ "image_path": full_image_path,
77
+ "data": stringified_data
78
+ },
79
+ synchronize_session="fetch"
80
+ )
81
+ # Commit happens automatically via context manager
82
+ return updated_entries
@@ -3,16 +3,15 @@ import json
3
3
  import os
4
4
  import cv2
5
5
  import numpy as np
6
- from sqlalchemy.orm import Session
6
+ from .BaseRepository import BaseRepository
7
7
  from ..ai.FrameDrawer import FrameDrawer
8
8
  from ..database.DatabaseManager import DatabaseManager
9
9
  from ..models.worker_source_pipeline_detection import WorkerSourcePipelineDetectionEntity
10
10
 
11
11
 
12
- class WorkerSourcePipelineDetectionRepository:
12
+ class WorkerSourcePipelineDetectionRepository(BaseRepository):
13
13
  def __init__(self):
14
- self.db_manager = DatabaseManager()
15
- self.session: Session = self.db_manager.get_session("default")
14
+ super().__init__(db_name="default")
16
15
  self.storage_dir = DatabaseManager.STORAGE_PATHS["files"] / "detection_image"
17
16
  os.makedirs(self.storage_dir, exist_ok=True)
18
17
 
@@ -56,16 +55,16 @@ class WorkerSourcePipelineDetectionRepository:
56
55
  )
57
56
 
58
57
  try:
59
- new_detection = WorkerSourcePipelineDetectionEntity(
60
- worker_source_pipeline_id=pipeline_id,
61
- image_path=full_image_path,
62
- data=stringified_data,
63
- created_at=datetime.utcnow()
64
- )
65
- self.session.add(new_detection)
66
- self.session.flush()
67
- self.session.commit()
68
- print(f"✅ Inserted detection data for pipeline {pipeline_id}")
58
+ with self._get_session() as session:
59
+ new_detection = WorkerSourcePipelineDetectionEntity(
60
+ worker_source_pipeline_id=pipeline_id,
61
+ image_path=full_image_path,
62
+ data=stringified_data,
63
+ created_at=datetime.utcnow()
64
+ )
65
+ session.add(new_detection)
66
+ session.flush()
67
+ # Commit happens automatically via context manager
68
+ print(f"✅ Inserted detection data for pipeline {pipeline_id}")
69
69
  except Exception as e:
70
- self.session.rollback()
71
70
  print(f"❌ Database error while saving detection: {e}")
@@ -1,15 +1,13 @@
1
1
  import json
2
- from sqlalchemy.orm import Session
3
2
  from sqlalchemy.exc import SQLAlchemyError
4
- from ..database.DatabaseManager import DatabaseManager
3
+ from .BaseRepository import BaseRepository
5
4
  from ..models.worker_source_pipeline import WorkerSourcePipelineEntity
6
5
  from ..models.worker_source_pipeline_config import WorkerSourcePipelineConfigEntity
7
6
 
8
7
 
9
- class WorkerSourcePipelineRepository:
8
+ class WorkerSourcePipelineRepository(BaseRepository):
10
9
  def __init__(self):
11
- self.db_manager = DatabaseManager()
12
- self.session: Session = self.db_manager.get_session("config")
10
+ super().__init__(db_name="config")
13
11
 
14
12
  def get_all_pipelines(self):
15
13
  """
@@ -18,8 +16,14 @@ class WorkerSourcePipelineRepository:
18
16
  Returns:
19
17
  list: A list of WorkerSourcePipelineEntity records.
20
18
  """
21
- self.session.expire_all()
22
- return self.session.query(WorkerSourcePipelineEntity).all()
19
+ with self._get_session() as session:
20
+ session.expire_all()
21
+ # Query and detach from session before returning
22
+ pipelines = session.query(WorkerSourcePipelineEntity).all()
23
+ # Expunge objects so they can be used outside session
24
+ for pipeline in pipelines:
25
+ session.expunge(pipeline)
26
+ return pipelines
23
27
 
24
28
  def get_pipeline_configs_by_pipeline_id(self, pipeline_id):
25
29
  """
@@ -38,42 +42,70 @@ class WorkerSourcePipelineRepository:
38
42
  dict: A dictionary mapping pipeline_config_code to its configuration details.
39
43
  """
40
44
  try:
41
- pipeline_configs = (
42
- self.session.query(WorkerSourcePipelineConfigEntity)
43
- .filter(WorkerSourcePipelineConfigEntity.worker_source_pipeline_id == pipeline_id)
44
- .all()
45
- )
45
+ with self._get_session() as session:
46
+ pipeline_configs = (
47
+ session.query(WorkerSourcePipelineConfigEntity)
48
+ .filter(WorkerSourcePipelineConfigEntity.worker_source_pipeline_id == pipeline_id)
49
+ .all()
50
+ )
46
51
 
47
- def parse_value(value):
48
- """Attempts to parse the value as JSON if applicable."""
49
- if not value:
50
- return value # Keep None or empty string as is
51
-
52
- value = value.strip() # Remove leading/trailing spaces
53
- if (value.startswith("{") and value.endswith("}")) or (value.startswith("[") and value.endswith("]")):
54
- try:
55
- return json.loads(value) # Parse JSON object or list
56
- except json.JSONDecodeError:
57
- pass # Keep as string if parsing fails
58
- return value # Return original value if not JSON
52
+ def parse_value(value):
53
+ """Attempts to parse the value as JSON if applicable."""
54
+ if not value:
55
+ return value # Keep None or empty string as is
56
+
57
+ value = value.strip() # Remove leading/trailing spaces
58
+ if (value.startswith("{") and value.endswith("}")) or (value.startswith("[") and value.endswith("]")):
59
+ try:
60
+ return json.loads(value) # Parse JSON object or list
61
+ except json.JSONDecodeError:
62
+ pass # Keep as string if parsing fails
63
+ return value # Return original value if not JSON
59
64
 
60
- # Convert result into a dictionary with pipeline_config_code as key
61
- config_dict = {
62
- config.pipeline_config_code: {
63
- "id": config.id,
64
- "is_enabled": config.is_enabled, # Keep original boolean value
65
- "value": parse_value(config.value), # Parse JSON if applicable
66
- "name": config.pipeline_config_name
65
+ # Convert result into a dictionary with pipeline_config_code as key
66
+ config_dict = {
67
+ config.pipeline_config_code: {
68
+ "id": config.id,
69
+ "is_enabled": config.is_enabled, # Keep original boolean value
70
+ "value": parse_value(config.value), # Parse JSON if applicable
71
+ "name": config.pipeline_config_name
72
+ }
73
+ for config in pipeline_configs
67
74
  }
68
- for config in pipeline_configs
69
- }
70
75
 
71
- return config_dict
76
+ return config_dict
72
77
 
73
78
  except SQLAlchemyError as e:
74
79
  print(f"Database error while retrieving pipeline configs: {e}")
75
80
  return {}
76
81
 
77
82
  def get_worker_source_pipeline(self, pipeline_id):
78
- self.session.expire_all()
79
- return self.session.query(WorkerSourcePipelineEntity).filter_by(id=pipeline_id).first()
83
+ with self._get_session() as session:
84
+ session.expire_all()
85
+ pipeline = session.query(WorkerSourcePipelineEntity).filter_by(id=pipeline_id).first()
86
+ if pipeline:
87
+ session.expunge(pipeline) # Detach from session
88
+ return pipeline
89
+
90
+ def update_pipeline_status(self, pipeline_id: str, status_code: str) -> bool:
91
+ """
92
+ Update the status of a pipeline.
93
+
94
+ Args:
95
+ pipeline_id: The ID of the pipeline to update
96
+ status_code: The new status code ('run', 'stop', 'restart')
97
+
98
+ Returns:
99
+ bool: True if update was successful, False otherwise
100
+ """
101
+ try:
102
+ with self._get_session() as session:
103
+ pipeline = session.query(WorkerSourcePipelineEntity).filter_by(id=pipeline_id).first()
104
+ if pipeline:
105
+ pipeline.pipeline_status_code = status_code
106
+ # Commit happens automatically via context manager
107
+ return True
108
+ return False
109
+ except SQLAlchemyError as e:
110
+ print(f"Database error while updating pipeline status: {e}")
111
+ return False
@@ -1,12 +1,10 @@
1
- from sqlalchemy.orm import Session
2
- from ..database.DatabaseManager import DatabaseManager
1
+ from .BaseRepository import BaseRepository
3
2
  from ..models.worker_source import WorkerSourceEntity
4
3
 
5
4
 
6
- class WorkerSourceRepository:
5
+ class WorkerSourceRepository(BaseRepository):
7
6
  def __init__(self):
8
- self.db_manager = DatabaseManager()
9
- self.session: Session = self.db_manager.get_session("config")
7
+ super().__init__(db_name="config")
10
8
 
11
9
  def get_worker_sources(self):
12
10
  """
@@ -15,5 +13,9 @@ class WorkerSourceRepository:
15
13
  Returns:
16
14
  list: A list of WorkerSourceEntity records.
17
15
  """
18
- self.session.expire_all()
19
- return self.session.query(WorkerSourceEntity).all()
16
+ with self._get_session() as session:
17
+ session.expire_all()
18
+ sources = session.query(WorkerSourceEntity).all()
19
+ for source in sources:
20
+ session.expunge(source)
21
+ return sources