msight-vision 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. msight_vision-0.1.0/PKG-INFO +28 -0
  2. msight_vision-0.1.0/README.md +12 -0
  3. msight_vision-0.1.0/cli/__init__.py +0 -0
  4. msight_vision-0.1.0/cli/launch_2d_viewer.py +10 -0
  5. msight_vision-0.1.0/cli/launch_custom_fuser.py +23 -0
  6. msight_vision-0.1.0/cli/launch_finite_difference_state_estimator.py +22 -0
  7. msight_vision-0.1.0/cli/launch_road_user_list_viewer.py +23 -0
  8. msight_vision-0.1.0/cli/launch_sort_tracker.py +24 -0
  9. msight_vision-0.1.0/cli/launch_yolo_onestage_detection.py +22 -0
  10. msight_vision-0.1.0/msight_vision/__init__.py +8 -0
  11. msight_vision-0.1.0/msight_vision/base.py +99 -0
  12. msight_vision-0.1.0/msight_vision/detector_yolo.py +87 -0
  13. msight_vision-0.1.0/msight_vision/fuser.py +325 -0
  14. msight_vision-0.1.0/msight_vision/localizer.py +32 -0
  15. msight_vision-0.1.0/msight_vision/msight_core/__init__.py +6 -0
  16. msight_vision-0.1.0/msight_vision/msight_core/detection.py +103 -0
  17. msight_vision-0.1.0/msight_vision/msight_core/fusion.py +64 -0
  18. msight_vision-0.1.0/msight_vision/msight_core/state_estimation.py +38 -0
  19. msight_vision-0.1.0/msight_vision/msight_core/tracking.py +31 -0
  20. msight_vision-0.1.0/msight_vision/msight_core/viewer.py +55 -0
  21. msight_vision-0.1.0/msight_vision/msight_core/warper.py +98 -0
  22. msight_vision-0.1.0/msight_vision/state_estimator.py +121 -0
  23. msight_vision-0.1.0/msight_vision/tracker.py +525 -0
  24. msight_vision-0.1.0/msight_vision/utils/__init__.py +3 -0
  25. msight_vision-0.1.0/msight_vision/utils/data.py +80 -0
  26. msight_vision-0.1.0/msight_vision/utils/typing.py +18 -0
  27. msight_vision-0.1.0/msight_vision/utils/vis.py +17 -0
  28. msight_vision-0.1.0/msight_vision/warper.py +89 -0
  29. msight_vision-0.1.0/msight_vision.egg-info/PKG-INFO +28 -0
  30. msight_vision-0.1.0/msight_vision.egg-info/SOURCES.txt +34 -0
  31. msight_vision-0.1.0/msight_vision.egg-info/dependency_links.txt +1 -0
  32. msight_vision-0.1.0/msight_vision.egg-info/entry_points.txt +7 -0
  33. msight_vision-0.1.0/msight_vision.egg-info/requires.txt +6 -0
  34. msight_vision-0.1.0/msight_vision.egg-info/top_level.txt +2 -0
  35. msight_vision-0.1.0/pyproject.toml +38 -0
  36. msight_vision-0.1.0/setup.cfg +4 -0
@@ -0,0 +1,28 @@
1
+ Metadata-Version: 2.4
2
+ Name: msight_vision
3
+ Version: 0.1.0
4
+ Summary: 2D detection module for the MSight roadside perception system
5
+ Author-email: Rusheng Zhang <rushengz@umich.edu>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/michigan-traffic-lab/MSight_Vision
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: numpy
11
+ Requires-Dist: opencv-python
12
+ Requires-Dist: matplotlib
13
+ Requires-Dist: geopy
14
+ Requires-Dist: filterpy
15
+ Requires-Dist: ultralytics
16
+
17
+ # MSight 2D Detection Library
18
+
19
+ # Installation
20
+ ## 1. Install pre-requirment
21
+ This library is based on Pytorch, please follow the [the Pytorch official website](https://pytorch.org/) to install the the correct Pytorch version that is compatible with you machine's CUDA configuration.
22
+
23
+ ## 2. MSight dependency
24
+ ### MSight Base
25
+ This library is based on [msight_base](https://github.com/michigan-traffic-lab/MSight_base), please follow the guidance to install them.
26
+
27
+ ## MSight Edge (Optional)
28
+ If you want to use the packed node of this library, you need to install [msight_edge](https://github.com/michigan-traffic-lab/MSight_Edge2), follow the instruction and install it.
@@ -0,0 +1,12 @@
1
+ # MSight 2D Detection Library
2
+
3
+ # Installation
4
+ ## 1. Install pre-requirment
5
+ This library is based on Pytorch, please follow the [the Pytorch official website](https://pytorch.org/) to install the the correct Pytorch version that is compatible with you machine's CUDA configuration.
6
+
7
+ ## 2. MSight dependency
8
+ ### MSight Base
9
+ This library is based on [msight_base](https://github.com/michigan-traffic-lab/MSight_base), please follow the guidance to install them.
10
+
11
+ ## MSight Edge (Optional)
12
+ If you want to use the packed node of this library, you need to install [msight_edge](https://github.com/michigan-traffic-lab/MSight_Edge2), follow the instruction and install it.
File without changes
@@ -0,0 +1,10 @@
1
+ from msight_vision.msight_core import DetectionResults2DViewerNode
2
+ from msight_core.utils import get_node_config_from_args, get_default_arg_parser
3
+
4
+ def main():
5
+ parser = get_default_arg_parser(description="Launch Detection Results 2D Viewer Node", node_class=DetectionResults2DViewerNode)
6
+ args = parser.parse_args()
7
+ detection_node = DetectionResults2DViewerNode(
8
+ configs=get_node_config_from_args(args)
9
+ )
10
+ detection_node.spin()
@@ -0,0 +1,23 @@
1
+ from msight_vision.msight_core import FuserNode
2
+ from msight_core.utils import get_node_config_from_args, get_default_arg_parser
3
+ import time
4
+
5
+ def main():
6
+ parser = get_default_arg_parser(description="Launch Fuser Node", node_class=FuserNode)
7
+ parser.add_argument("--fusion-config", "-fc", type=str, required=True, help="Path to the configuration file")
8
+ parser.add_argument("--wait", "-w", type=int, default=0, help="Wait time before starting the node (in seconds)")
9
+ args = parser.parse_args()
10
+ if args.wait > 0:
11
+ print(f"Waiting for {args.wait} seconds before starting the node...")
12
+ time.sleep(args.wait)
13
+
14
+ configs = get_node_config_from_args(args)
15
+
16
+ detection_node = FuserNode(
17
+ configs,
18
+ args.fusion_config,
19
+ )
20
+ detection_node.spin()
21
+
22
+ if __name__ == "__main__":
23
+ main()
@@ -0,0 +1,22 @@
1
+ from msight_vision.msight_core import FiniteDifferenceStateEstimatorNode
2
+ from msight_core.utils import get_node_config_from_args, get_default_arg_parser
3
+ import time
4
+
5
+ def main():
6
+ parser = get_default_arg_parser(description="Launch Finite Difference State Estimator Node", node_class=FiniteDifferenceStateEstimatorNode)
7
+ parser.add_argument("--estimator-configs", "-ec", type=str, required=True, help="Path to the configuration file")
8
+ parser.add_argument("--wait", "-w", type=int, default=0, help="Wait time before starting the node (in seconds)")
9
+ args = parser.parse_args()
10
+
11
+ if args.wait > 0:
12
+ print(f"Waiting for {args.wait} seconds before starting the node...")
13
+ time.sleep(args.wait)
14
+ configs = get_node_config_from_args(args)
15
+ detection_node = FiniteDifferenceStateEstimatorNode(
16
+ configs,
17
+ args.estimator_configs,
18
+ )
19
+ detection_node.spin()
20
+
21
+ if __name__ == "__main__":
22
+ main()
@@ -0,0 +1,23 @@
1
+ from msight_vision.msight_core import RoadUserListViewerNode
2
+ from msight_core.utils import get_node_config_from_args, get_default_arg_parser
3
+
4
+ def main():
5
+ parser = get_default_arg_parser(description="Launch Road User List Viewer Node", node_class=RoadUserListViewerNode)
6
+ parser.add_argument("--basemap", type=str, required=True, help="Path to the basemap image")
7
+ parser.add_argument("--show-trajectory", action='store_true', help="Flag to show trajectory")
8
+ parser.add_argument("--show-heading", action='store_true', help="Flag to show heading")
9
+ args = parser.parse_args()
10
+ configs = get_node_config_from_args(args)
11
+ # sub_topic = get_topic(redis_client, "example_fused_results")
12
+
13
+ detection_node = RoadUserListViewerNode(
14
+ configs,
15
+ args.basemap,
16
+ args.show_trajectory,
17
+ # False,
18
+ args.show_heading,
19
+ )
20
+ detection_node.spin()
21
+
22
+ if __name__ == "__main__":
23
+ main()
@@ -0,0 +1,24 @@
1
+ from msight_vision.msight_core import SortTrackerNode
2
+ from msight_core.utils import get_node_config_from_args, get_default_arg_parser
3
+ import time
4
+
5
+ def main():
6
+ parser = get_default_arg_parser(description="Launch SORT Tracker Node", node_class=SortTrackerNode)
7
+ parser.add_argument("--tracking-configs", "-tc", type=str, required=True,
8
+ help="Path to the configuration file")
9
+ parser.add_argument("--wait", "-w", type=int, default=0, help="Wait time before starting the node (in seconds)")
10
+ args = parser.parse_args()
11
+
12
+ if args.wait > 0:
13
+ print(f"Waiting for {args.wait} seconds before starting the node...")
14
+ time.sleep(args.wait)
15
+
16
+ configs = get_node_config_from_args(args)
17
+ detection_node = SortTrackerNode(
18
+ configs,
19
+ args.tracking_configs,
20
+ )
21
+ detection_node.spin()
22
+
23
+ if __name__ == "__main__":
24
+ main()
@@ -0,0 +1,22 @@
1
+ from msight_vision.msight_core import YoloOneStageDetectionNode
2
+ from msight_core.utils import get_node_config_from_args, get_default_arg_parser
3
+ import time
4
+
5
+ def main():
6
+ parser = get_default_arg_parser(description="Launch YOLO One-Stage Detection Node", node_class=YoloOneStageDetectionNode)
7
+ parser.add_argument("--det-configs", "-dc", type=str, required=True, help="Path to the configuration file")
8
+ parser.add_argument("--wait", "-w", type=int, default=0, help="Wait time before starting the node (in seconds)")
9
+ args = parser.parse_args()
10
+
11
+ if args.wait > 0:
12
+ print(f"Waiting for {args.wait} seconds before starting the node...")
13
+ time.sleep(args.wait)
14
+ configs = get_node_config_from_args(args)
15
+ detection_node = YoloOneStageDetectionNode(
16
+ configs,
17
+ args.det_configs,
18
+ )
19
+ detection_node.spin()
20
+
21
+ if __name__ == "__main__":
22
+ main()
@@ -0,0 +1,8 @@
1
+ from importlib.metadata import version
2
+ from .detector_yolo import YoloDetector, Yolo26Detector
3
+ from .localizer import HashLocalizer
4
+ from .tracker import SortTracker
5
+ from .warper import ClassicWarper, ClassicWarperWithExternalUpdate
6
+ from .fuser import FuserBase
7
+ from .state_estimator import FiniteDifferenceStateEstimator
8
+ __version__ = version("msight_vision")
@@ -0,0 +1,99 @@
1
+ from msight_base import DetectionResultBase, DetectedObjectBase, RoadUserPoint
2
+ import numpy as np
3
+ from typing import List, Dict
4
+
5
+ class DetectedObject2D(DetectedObjectBase):
6
+ """Detected object for 2D images."""
7
+
8
+ def __init__(self, box: list, class_id: int, score: float, pixel_bottom_center: List[float], obj_id: str = None, lat: float = None, lon: float = None, x: float = None, y: float = None):
9
+ """
10
+ Initialize the detected object.
11
+ :param box: bounding box coordinates (x1, y1, x2, y2)
12
+ :param class_id: class ID of the detected object
13
+ :param score: confidence score of the detection
14
+ :param obj_id: unique ID of the detected object (optional)
15
+ :param lat: latitude of the detected object (optional)
16
+ :param lon: longitude of the detected object (optional)
17
+ :param x: x coordinate in the coordination of interest like utm of the detected object (optional)
18
+ :param y: y coordinate in the coordination of intererst like utm of the detected object (optional)
19
+ """
20
+ super().__init__()
21
+ self.box = box
22
+ self.class_id = class_id
23
+ self.score = score
24
+ self.pixel_bottom_center = pixel_bottom_center
25
+ self.obj_id = obj_id
26
+ self.lat = lat
27
+ self.lon = lon
28
+ self.x = x
29
+ self.y = y
30
+
31
+ def to_dict(self):
32
+ """
33
+ Convert the detected object to a dictionary.
34
+ :return: dictionary representation of the detected object
35
+ """
36
+ return {
37
+ "box": self.box,
38
+ "class_id": self.class_id,
39
+ "score": self.score,
40
+ "obj_id": self.obj_id,
41
+ "lat": self.lat,
42
+ "lon": self.lon,
43
+ "x": self.x,
44
+ "y": self.y,
45
+ "pixel_bottom_center": self.pixel_bottom_center,
46
+ }
47
+
48
+ @staticmethod
49
+ def from_dict(data: dict):
50
+ """
51
+ Create a DetectedObject2D instance from a dictionary.
52
+ :param data: dictionary representation of the detected object
53
+ :return: DetectedObject2D instance
54
+ """
55
+ return DetectedObject2D(
56
+ box=data["box"],
57
+ class_id=data["class_id"],
58
+ score=data["score"],
59
+ obj_id=data.get("obj_id"),
60
+ lat=data.get("lat") or None,
61
+ lon=data.get("lon") or None,
62
+ x=data.get("x") or None,
63
+ y=data.get("y") or None,
64
+ pixel_bottom_center=data.get("pixel_bottom_center") or None,
65
+ )
66
+
67
+ def __repr__(self):
68
+ return f"DetectedObject2D(box={self.box}, class_id={self.class_id}, score={self.score}, obj_id={self.obj_id}, lat={self.lat}, lon={self.lon}, x={self.x}, y={self.y})"
69
+
70
+ class DetectionResult2D(DetectionResultBase):
71
+ """Detection result for 2D images."""
72
+
73
+ def __init__(self, object_list: List[DetectedObject2D], timestamp: int, sensor_type: str):
74
+ """
75
+ Initialize the detection result.
76
+ :param detected_objects: list of detected objects
77
+ """
78
+ super().__init__(object_list, timestamp, sensor_type)
79
+
80
+ class ImageDetector2DBase:
81
+ def detect(self, image: np.ndarray) -> DetectionResult2D:
82
+ """
83
+ Detector base, a detector detects objects in the image.
84
+ :param image: input image
85
+ :return: list of detected objects
86
+ """
87
+ raise NotImplementedError("detect method not implemented")
88
+
89
+ class TrackerBase:
90
+ def __init__(self):
91
+ pass
92
+
93
+ def track(self, list) ->Dict[str, RoadUserPoint]:
94
+ """
95
+ Track the detected objects in the image.
96
+ :param detection_result: DetectionResult2D instance
97
+ :return: updated DetectionResult2D instance with tracking information
98
+ """
99
+ raise NotImplementedError("track method not implemented")
@@ -0,0 +1,87 @@
1
+ from numpy import ndarray
2
+ from msight_vision.base import DetectionResult2D, DetectedObject2D
3
+ from .base import ImageDetector2DBase
4
+ from ultralytics import YOLO
5
+ from pathlib import Path
6
+
7
+ class YoloDetector(ImageDetector2DBase):
8
+ """YOLOv5 detector for 2D images."""
9
+
10
+ def __init__(self, model_path: Path, device: str = "cpu", confthre: float = 0.25, nmsthre: float = 0.45, fp16: bool = False, class_agnostic_nms: bool = False):
11
+ """
12
+ Initialize the YOLO detector.
13
+ :param model_path: path to the YOLO model
14
+ :param device: device to run the model on (e.g., 'cpu', 'cuda')
15
+ """
16
+ super().__init__()
17
+ self.model = YOLO(str(model_path))
18
+ self.device = device
19
+ self.confthre = confthre
20
+ self.nmsthre = nmsthre
21
+ self.fp16 = fp16
22
+ self.class_agnostic_nms = class_agnostic_nms
23
+
24
+
25
+ def convert_yolo_result_to_detection_result(self, yolo_output_results, timestamp, sensor_type):
26
+ """
27
+ Convert YOLO output results to DetectionResult2D.
28
+ :param yolo_output_results: YOLO output results
29
+ :param timestamp: timestamp of the image
30
+ :param sensor_type: type of the sensor
31
+ :return: DetectionResult2D instance
32
+ """
33
+ # Convert YOLO output to DetectionResult2D
34
+ bboxes = yolo_output_results[0].boxes.xyxy.cpu().numpy()
35
+ confs = yolo_output_results[0].boxes.conf.cpu().numpy()
36
+ class_ids = yolo_output_results[0].boxes.cls.cpu().numpy()
37
+
38
+ detected_objects = []
39
+ for i in range(len(bboxes)):
40
+ box = bboxes[i]
41
+ class_id = int(class_ids[i])
42
+ score = float(confs[i])
43
+ # calculate the center coordinates of the bounding box
44
+ center_x = float((box[0] + box[2]) / 2)
45
+ center_y = float((box[1] + box[3]) / 2)
46
+ # print(class_id)
47
+ detected_object = DetectedObject2D(
48
+ box=[float(box[0]), float(box[1]), float(box[2]), float(box[3])],
49
+ class_id=class_id,
50
+ score=score,
51
+ pixel_bottom_center=[center_x, center_y],
52
+ )
53
+ detected_objects.append(detected_object)
54
+
55
+ detection_result = DetectionResult2D(
56
+ detected_objects,
57
+ timestamp,
58
+ sensor_type,
59
+ )
60
+
61
+ return detection_result
62
+
63
+ def detect(self, image: ndarray, timestamp, sensor_type) -> DetectionResult2D:
64
+ yolo_output_results = self.model(image, device=self.device, conf=self.confthre, iou=self.nmsthre, half=self.fp16, verbose=False, agnostic_nms=self.class_agnostic_nms)
65
+ ## Convert results to DetectionResult2D
66
+ detection_result = self.convert_yolo_result_to_detection_result(
67
+ yolo_output_results,
68
+ timestamp,
69
+ sensor_type,
70
+ )
71
+ return detection_result
72
+
73
+ class Yolo26Detector(YoloDetector):
74
+ """YOLOv2.6 detector for 2D images."""
75
+ def __init__(self, model_path: Path, device: str = "cpu", confthre: float = 0.25, nmsthre: float = 0.45, fp16: bool = False, class_agnostic_nms: bool = False, end2end: bool = False):
76
+ super().__init__(model_path, device, confthre, nmsthre, fp16, class_agnostic_nms)
77
+
78
+ self.end2end = end2end
79
+ def detect(self, image: ndarray, timestamp, sensor_type) -> DetectionResult2D:
80
+ yolo_output_results = self.model(image, device=self.device, conf=self.confthre, iou=self.nmsthre, half=self.fp16, verbose=False, agnostic_nms=self.class_agnostic_nms, end2end=self.end2end)
81
+ ## Convert results to DetectionResult2D
82
+ detection_result = self.convert_yolo_result_to_detection_result(
83
+ yolo_output_results,
84
+ timestamp,
85
+ sensor_type,
86
+ )
87
+ return detection_result