ultralytics 8.3.87__py3-none-any.whl → 8.3.89__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. tests/test_solutions.py +34 -45
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +46 -39
  4. ultralytics/data/augment.py +2 -2
  5. ultralytics/data/base.py +7 -9
  6. ultralytics/data/converter.py +30 -29
  7. ultralytics/data/utils.py +20 -28
  8. ultralytics/engine/model.py +2 -2
  9. ultralytics/engine/tuner.py +11 -21
  10. ultralytics/hub/__init__.py +13 -17
  11. ultralytics/models/fastsam/model.py +4 -7
  12. ultralytics/models/nas/model.py +8 -14
  13. ultralytics/models/nas/predict.py +7 -9
  14. ultralytics/models/nas/val.py +7 -9
  15. ultralytics/models/rtdetr/predict.py +6 -9
  16. ultralytics/models/rtdetr/train.py +5 -8
  17. ultralytics/models/rtdetr/val.py +5 -8
  18. ultralytics/models/yolo/classify/predict.py +6 -9
  19. ultralytics/models/yolo/classify/train.py +5 -8
  20. ultralytics/models/yolo/classify/val.py +5 -8
  21. ultralytics/models/yolo/detect/predict.py +6 -9
  22. ultralytics/models/yolo/detect/train.py +5 -8
  23. ultralytics/models/yolo/detect/val.py +5 -8
  24. ultralytics/models/yolo/obb/predict.py +6 -9
  25. ultralytics/models/yolo/obb/train.py +5 -8
  26. ultralytics/models/yolo/obb/val.py +10 -15
  27. ultralytics/models/yolo/pose/predict.py +6 -9
  28. ultralytics/models/yolo/pose/train.py +5 -8
  29. ultralytics/models/yolo/pose/val.py +12 -17
  30. ultralytics/models/yolo/segment/predict.py +6 -9
  31. ultralytics/models/yolo/segment/train.py +5 -8
  32. ultralytics/models/yolo/segment/val.py +10 -15
  33. ultralytics/models/yolo/world/train.py +5 -8
  34. ultralytics/models/yolo/world/train_world.py +21 -25
  35. ultralytics/nn/modules/__init__.py +9 -12
  36. ultralytics/nn/tasks.py +7 -12
  37. ultralytics/solutions/__init__.py +14 -6
  38. ultralytics/solutions/ai_gym.py +39 -28
  39. ultralytics/solutions/analytics.py +22 -18
  40. ultralytics/solutions/distance_calculation.py +25 -25
  41. ultralytics/solutions/heatmap.py +40 -38
  42. ultralytics/solutions/instance_segmentation.py +69 -0
  43. ultralytics/solutions/object_blurrer.py +89 -0
  44. ultralytics/solutions/object_counter.py +35 -33
  45. ultralytics/solutions/object_cropper.py +84 -0
  46. ultralytics/solutions/parking_management.py +21 -9
  47. ultralytics/solutions/queue_management.py +20 -39
  48. ultralytics/solutions/region_counter.py +54 -51
  49. ultralytics/solutions/security_alarm.py +40 -30
  50. ultralytics/solutions/solutions.py +594 -16
  51. ultralytics/solutions/speed_estimation.py +34 -31
  52. ultralytics/solutions/streamlit_inference.py +34 -28
  53. ultralytics/solutions/trackzone.py +29 -18
  54. ultralytics/solutions/vision_eye.py +69 -0
  55. ultralytics/trackers/utils/kalman_filter.py +23 -23
  56. ultralytics/utils/__init__.py +5 -8
  57. ultralytics/utils/checks.py +25 -35
  58. ultralytics/utils/downloads.py +25 -48
  59. ultralytics/utils/instance.py +9 -11
  60. ultralytics/utils/ops.py +5 -9
  61. ultralytics/utils/plotting.py +8 -428
  62. ultralytics/utils/torch_utils.py +23 -33
  63. ultralytics/utils/tuner.py +5 -9
  64. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/METADATA +2 -2
  65. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/RECORD +69 -65
  66. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/LICENSE +0 -0
  67. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/WHEEL +0 -0
  68. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/entry_points.txt +0 -0
  69. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,69 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
+ from ultralytics.utils.plotting import colors
5
+
6
+
7
+ class InstanceSegmentation(BaseSolution):
8
+ """
9
+ A class to manage instance segmentation in images or video streams.
10
+
11
+ This class extends the BaseSolution class and provides functionality for performing instance segmentation, including
12
+ drawing segmented masks with bounding boxes and labels.
13
+
14
+ Attributes:
15
+ model (str): The segmentation model to use for inference.
16
+
17
+ Methods:
18
+ process: Processes the input image to perform instance segmentation and annotate results.
19
+
20
+ Examples:
21
+ >>> segmenter = InstanceSegmentation()
22
+ >>> frame = cv2.imread("frame.jpg")
23
+ >>> results = segmenter.segment(frame)
24
+ >>> print(f"Total segmented instances: {results['total_tracks']}")
25
+ """
26
+
27
+ def __init__(self, **kwargs):
28
+ """
29
+ Initializes the InstanceSegmentation class for detecting and annotating segmented instances.
30
+
31
+ Args:
32
+ **kwargs (Any): Keyword arguments passed to the BaseSolution parent class.
33
+ model (str): Model name or path, defaults to "yolo11n-seg.pt".
34
+ """
35
+ kwargs["model"] = kwargs.get("model", "yolo11n-seg.pt")
36
+ super().__init__(**kwargs)
37
+
38
+ def process(self, im0):
39
+ """
40
+ Performs instance segmentation on the input image and annotates the results.
41
+
42
+ Args:
43
+ im0 (numpy.ndarray): The input image for segmentation.
44
+
45
+ Returns:
46
+ (SolutionResults): Object containing the annotated image and total number of tracked instances.
47
+
48
+ Examples:
49
+ >>> segmenter = InstanceSegmentation()
50
+ >>> frame = cv2.imread("image.jpg")
51
+ >>> summary = segmenter.segment(frame)
52
+ >>> print(summary)
53
+ """
54
+ self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
55
+ annotator = SolutionAnnotator(im0, self.line_width)
56
+
57
+ # Iterate over detected classes, track IDs, and segmentation masks
58
+ if self.masks is None:
59
+ self.LOGGER.warning("⚠️ No masks detected! Ensure you're using a supported Ultralytics segmentation model.")
60
+ else:
61
+ for cls, t_id, mask in zip(self.clss, self.track_ids, self.masks):
62
+ # Annotate the image with segmentation mask, mask color, and label
63
+ annotator.segmentation_mask(mask=mask, mask_color=colors(t_id, True), label=self.names[cls])
64
+
65
+ plot_im = annotator.result()
66
+ self.display_output(plot_im) # Display the annotated output using the base class function
67
+
68
+ # Return SolutionResults
69
+ return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids))
@@ -0,0 +1,89 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+
4
+ import cv2
5
+
6
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
7
+ from ultralytics.utils import LOGGER
8
+ from ultralytics.utils.plotting import colors
9
+
10
+
11
+ class ObjectBlurrer(BaseSolution):
12
+ """
13
+ A class to manage the blurring of detected objects in a real-time video stream.
14
+
15
+ This class extends the BaseSolution class and provides functionality for blurring objects based on detected bounding
16
+ boxes. The blurred areas are updated directly in the input image, allowing for privacy preservation or other effects.
17
+
18
+ Attributes:
19
+ blur_ratio (int): The intensity of the blur effect applied to detected objects (higher values create more blur).
20
+ iou (float): Intersection over Union threshold for object detection.
21
+ conf (float): Confidence threshold for object detection.
22
+
23
+ Methods:
24
+ process: Applies a blurring effect to detected objects in the input image.
25
+ extract_tracks: Extracts tracking information from detected objects.
26
+ display_output: Displays the processed output image.
27
+
28
+ Examples:
29
+ >>> blurrer = ObjectBlurrer()
30
+ >>> frame = cv2.imread("frame.jpg")
31
+ >>> processed_results = blurrer.process(frame)
32
+ >>> print(f"Total blurred objects: {processed_results.total_tracks}")
33
+ """
34
+
35
+ def __init__(self, **kwargs):
36
+ """
37
+ Initializes the ObjectBlurrer class for applying a blur effect to objects detected in video streams or images.
38
+
39
+ Args:
40
+ **kwargs (Any): Keyword arguments passed to the parent class and for configuration.
41
+ blur_ratio (float): Intensity of the blur effect (0.1-1.0, default=0.5).
42
+ """
43
+ super().__init__(**kwargs)
44
+ blur_ratio = kwargs.get("blur_ratio", 0.5)
45
+ if blur_ratio < 0.1:
46
+ LOGGER.warning("⚠️ blur ratio cannot be less than 0.1, updating it to default value 0.5")
47
+ blur_ratio = 0.5
48
+ self.blur_ratio = int(blur_ratio * 100)
49
+
50
+ def process(self, im0):
51
+ """
52
+ Applies a blurring effect to detected objects in the input image.
53
+
54
+ This method extracts tracking information, applies blur to regions corresponding to detected objects,
55
+ and annotates the image with bounding boxes.
56
+
57
+ Args:
58
+ im0 (numpy.ndarray): The input image containing detected objects.
59
+
60
+ Returns:
61
+ (SolutionResults): Object containing the processed image and number of tracked objects.
62
+ - plot_im (numpy.ndarray): The annotated output image with blurred objects.
63
+ - total_tracks (int): The total number of tracked objects in the frame.
64
+
65
+ Examples:
66
+ >>> blurrer = ObjectBlurrer()
67
+ >>> frame = cv2.imread("image.jpg")
68
+ >>> results = blurrer.process(frame)
69
+ >>> print(f"Blurred {results.total_tracks} objects")
70
+ """
71
+ self.extract_tracks(im0) # Extract tracks
72
+ annotator = SolutionAnnotator(im0, self.line_width)
73
+
74
+ # Iterate over bounding boxes and classes
75
+ for box, cls in zip(self.boxes, self.clss):
76
+ # Crop and blur the detected object
77
+ blur_obj = cv2.blur(
78
+ im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])],
79
+ (self.blur_ratio, self.blur_ratio),
80
+ )
81
+ # Update the blurred area in the original image
82
+ im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] = blur_obj
83
+ annotator.box_label(box, label=self.names[cls], color=colors(cls, True)) # Annotate bounding box
84
+
85
+ plot_im = annotator.result()
86
+ self.display_output(plot_im) # Display the output using the base class function
87
+
88
+ # Return a SolutionResults
89
+ return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids))
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- from ultralytics.solutions.solutions import BaseSolution
4
- from ultralytics.utils.plotting import Annotator, colors
3
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
+ from ultralytics.utils.plotting import colors
5
5
 
6
6
 
7
7
  class ObjectCounter(BaseSolution):
@@ -24,12 +24,12 @@ class ObjectCounter(BaseSolution):
24
24
  count_objects: Counts objects within a polygonal or linear region.
25
25
  store_classwise_counts: Initializes class-wise counts if not already present.
26
26
  display_counts: Displays object counts on the frame.
27
- count: Processes input data (frames or object tracks) and updates counts.
27
+ process: Processes input data (frames or object tracks) and updates counts.
28
28
 
29
29
  Examples:
30
30
  >>> counter = ObjectCounter()
31
31
  >>> frame = cv2.imread("frame.jpg")
32
- >>> processed_frame = counter.count(frame)
32
+ >>> results = counter.process(frame)
33
33
  >>> print(f"Inward count: {counter.in_count}, Outward count: {counter.out_count}")
34
34
  """
35
35
 
@@ -41,7 +41,7 @@ class ObjectCounter(BaseSolution):
41
41
  self.out_count = 0 # Counter for objects moving outward
42
42
  self.counted_ids = [] # List of IDs of objects that have been counted
43
43
  self.classwise_counts = {} # Dictionary for counts, categorized by object class
44
- self.region_initialized = False # Bool variable for region initialization
44
+ self.region_initialized = False # Flag indicating whether the region has been initialized
45
45
 
46
46
  self.show_in = self.CFG["show_in"]
47
47
  self.show_out = self.CFG["show_out"]
@@ -51,7 +51,7 @@ class ObjectCounter(BaseSolution):
51
51
  Counts objects within a polygonal or linear region based on their tracks.
52
52
 
53
53
  Args:
54
- current_centroid (Tuple[float, float]): Current centroid values in the current frame.
54
+ current_centroid (Tuple[float, float]): Current centroid coordinates (x, y) in the current frame.
55
55
  track_id (int): Unique identifier for the tracked object.
56
56
  prev_position (Tuple[float, float]): Last frame position coordinates (x, y) of the track.
57
57
  cls (int): Class index for classwise count updates.
@@ -60,10 +60,10 @@ class ObjectCounter(BaseSolution):
60
60
  >>> counter = ObjectCounter()
61
61
  >>> track_line = {1: [100, 200], 2: [110, 210], 3: [120, 220]}
62
62
  >>> box = [130, 230, 150, 250]
63
- >>> track_id = 1
64
- >>> prev_position = (120, 220)
65
- >>> cls = 0
66
- >>> counter.count_objects(current_centroid, track_id, prev_position, cls)
63
+ >>> track_id_num = 1
64
+ >>> previous_position = (120, 220)
65
+ >>> class_to_count = 0 # In COCO model, class 0 = person
66
+ >>> counter.count_objects((140, 240), track_id_num, previous_position, class_to_count)
67
67
  """
68
68
  if prev_position is None or track_id in self.counted_ids:
69
69
  return
@@ -101,10 +101,10 @@ class ObjectCounter(BaseSolution):
101
101
  and current_centroid[0] > prev_position[0]
102
102
  or region_width >= region_height
103
103
  and current_centroid[1] > prev_position[1]
104
- ): # Moving right
104
+ ): # Moving right or downward
105
105
  self.in_count += 1
106
106
  self.classwise_counts[self.names[cls]]["IN"] += 1
107
- else: # Moving left
107
+ else: # Moving left or upward
108
108
  self.out_count += 1
109
109
  self.classwise_counts[self.names[cls]]["OUT"] += 1
110
110
  self.counted_ids.append(track_id)
@@ -116,9 +116,6 @@ class ObjectCounter(BaseSolution):
116
116
  Args:
117
117
  cls (int): Class index for classwise count updates.
118
118
 
119
- This method ensures that the 'classwise_counts' dictionary contains an entry for the specified class,
120
- initializing 'IN' and 'OUT' counts to zero if the class is not already present.
121
-
122
119
  Examples:
123
120
  >>> counter = ObjectCounter()
124
121
  >>> counter.store_classwise_counts(0) # Initialize counts for class index 0
@@ -128,12 +125,12 @@ class ObjectCounter(BaseSolution):
128
125
  if self.names[cls] not in self.classwise_counts:
129
126
  self.classwise_counts[self.names[cls]] = {"IN": 0, "OUT": 0}
130
127
 
131
- def display_counts(self, im0):
128
+ def display_counts(self, plot_im):
132
129
  """
133
130
  Displays object counts on the input image or frame.
134
131
 
135
132
  Args:
136
- im0 (numpy.ndarray): The input image or frame to display counts on.
133
+ plot_im (numpy.ndarray): The image or frame to display counts on.
137
134
 
138
135
  Examples:
139
136
  >>> counter = ObjectCounter()
@@ -146,11 +143,10 @@ class ObjectCounter(BaseSolution):
146
143
  for key, value in self.classwise_counts.items()
147
144
  if value["IN"] != 0 or value["OUT"] != 0
148
145
  }
149
-
150
146
  if labels_dict:
151
- self.annotator.display_analytics(im0, labels_dict, (104, 31, 17), (255, 255, 255), 10)
147
+ self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255), 10)
152
148
 
153
- def count(self, im0):
149
+ def process(self, im0):
154
150
  """
155
151
  Processes input data (frames or object tracks) and updates object counts.
156
152
 
@@ -161,19 +157,21 @@ class ObjectCounter(BaseSolution):
161
157
  im0 (numpy.ndarray): The input image or frame to be processed.
162
158
 
163
159
  Returns:
164
- (numpy.ndarray): The processed image with annotations and count information.
160
+ (SolutionResults): Contains processed image `im0`, 'in_count' (int, count of objects entering the region),
161
+ 'out_count' (int, count of objects exiting the region), 'classwise_count' (Dict, per-class object count),
162
+ and 'total_tracks' (int, total number of tracked objects).
165
163
 
166
164
  Examples:
167
165
  >>> counter = ObjectCounter()
168
166
  >>> frame = cv2.imread("path/to/image.jpg")
169
- >>> processed_frame = counter.count(frame)
167
+ >>> results = counter.process(frame)
170
168
  """
171
169
  if not self.region_initialized:
172
170
  self.initialize_region()
173
171
  self.region_initialized = True
174
172
 
175
- self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
176
173
  self.extract_tracks(im0) # Extract tracks
174
+ self.annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
177
175
 
178
176
  self.annotator.draw_region(
179
177
  reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2
@@ -184,20 +182,24 @@ class ObjectCounter(BaseSolution):
184
182
  # Draw bounding box and counting region
185
183
  self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
186
184
  self.store_tracking_history(track_id, box) # Store track history
187
- self.store_classwise_counts(cls) # store classwise counts in dict
185
+ self.store_classwise_counts(cls) # Store classwise counts in dict
188
186
 
189
- # Draw tracks of objects
190
- self.annotator.draw_centroid_and_tracks(
191
- self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
192
- )
193
187
  current_centroid = ((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)
194
- # store previous position of track for object counting
188
+ # Store previous position of track for object counting
195
189
  prev_position = None
196
190
  if len(self.track_history[track_id]) > 1:
197
191
  prev_position = self.track_history[track_id][-2]
198
192
  self.count_objects(current_centroid, track_id, prev_position, cls) # Perform object counting
199
193
 
200
- self.display_counts(im0) # Display the counts on the frame
201
- self.display_output(im0) # display output with base class function
202
-
203
- return im0 # return output image for more usage
194
+ plot_im = self.annotator.result()
195
+ self.display_counts(plot_im) # Display the counts on the frame
196
+ self.display_output(plot_im) # Display output with base class function
197
+
198
+ # Return SolutionResults
199
+ return SolutionResults(
200
+ plot_im=plot_im,
201
+ in_count=self.in_count,
202
+ out_count=self.out_count,
203
+ classwise_count=self.classwise_counts,
204
+ total_tracks=len(self.track_ids),
205
+ )
@@ -0,0 +1,84 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import os
4
+ from pathlib import Path
5
+
6
+ from ultralytics.solutions.solutions import BaseSolution, SolutionResults
7
+ from ultralytics.utils.plotting import save_one_box
8
+
9
+
10
+ class ObjectCropper(BaseSolution):
11
+ """
12
+ A class to manage the cropping of detected objects in a real-time video stream or images.
13
+
14
+ This class extends the BaseSolution class and provides functionality for cropping objects based on detected bounding
15
+ boxes. The cropped images are saved to a specified directory for further analysis or usage.
16
+
17
+ Attributes:
18
+ crop_dir (str): Directory where cropped object images are stored.
19
+ crop_idx (int): Counter for the total number of cropped objects.
20
+ iou (float): IoU (Intersection over Union) threshold for non-maximum suppression.
21
+ conf (float): Confidence threshold for filtering detections.
22
+
23
+ Methods:
24
+ process: Crops detected objects from the input image and saves them to the output directory.
25
+
26
+ Examples:
27
+ >>> cropper = ObjectCropper()
28
+ >>> frame = cv2.imread("frame.jpg")
29
+ >>> processed_results = cropper.process(frame)
30
+ >>> print(f"Total cropped objects: {cropper.crop_idx}")
31
+ """
32
+
33
+ def __init__(self, **kwargs):
34
+ """
35
+ Initializes the ObjectCropper class for cropping objects from detected bounding boxes.
36
+
37
+ Args:
38
+ **kwargs (Any): Keyword arguments passed to the parent class and used for configuration.
39
+ crop_dir (str): Path to the directory for saving cropped object images.
40
+ """
41
+ super().__init__(**kwargs)
42
+
43
+ self.crop_dir = kwargs.get("crop_dir", "cropped-detections") # Directory for storing cropped detections
44
+ if not os.path.exists(self.crop_dir):
45
+ os.mkdir(self.crop_dir) # Create directory if it does not exist
46
+ if self.CFG["show"]:
47
+ self.LOGGER.info(
48
+ f"⚠️ show=True disabled for crop solution, results will be saved in the directory named: {self.crop_dir}"
49
+ )
50
+ self.crop_idx = 0 # Initialize counter for total cropped objects
51
+ self.iou = self.CFG["iou"]
52
+ self.conf = self.CFG["conf"] if self.CFG["conf"] is not None else 0.25
53
+
54
+ def process(self, im0):
55
+ """
56
+ Crops detected objects from the input image and saves them as separate images.
57
+
58
+ Args:
59
+ im0 (numpy.ndarray): The input image containing detected objects.
60
+
61
+ Returns:
62
+ (SolutionResults): A SolutionResults object containing the total number of cropped objects and processed image.
63
+
64
+ Examples:
65
+ >>> cropper = ObjectCropper()
66
+ >>> frame = cv2.imread("image.jpg")
67
+ >>> results = cropper.process(frame)
68
+ >>> print(f"Total cropped objects: {results.total_crop_objects}")
69
+ """
70
+ results = self.model.predict(
71
+ im0, classes=self.classes, conf=self.conf, iou=self.iou, device=self.CFG["device"]
72
+ )[0]
73
+
74
+ for box in results.boxes:
75
+ self.crop_idx += 1
76
+ save_one_box(
77
+ box.xyxy,
78
+ im0,
79
+ file=Path(self.crop_dir) / f"crop_{self.crop_idx}.jpg",
80
+ BGR=True,
81
+ )
82
+
83
+ # Return SolutionResults
84
+ return SolutionResults(plot_im=im0, total_crop_objects=self.crop_idx)
@@ -5,10 +5,9 @@ import json
5
5
  import cv2
6
6
  import numpy as np
7
7
 
8
- from ultralytics.solutions.solutions import BaseSolution
8
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
9
9
  from ultralytics.utils import LOGGER
10
10
  from ultralytics.utils.checks import check_imshow
11
- from ultralytics.utils.plotting import Annotator
12
11
 
13
12
 
14
13
  class ParkingPtsSelection:
@@ -189,7 +188,7 @@ class ParkingManagement(BaseSolution):
189
188
  dc (Tuple[int, int, int]): RGB color tuple for centroid visualization of detected objects.
190
189
 
191
190
  Methods:
192
- process_data: Processes model data for parking lot management and visualization.
191
+ process: Processes the input image for parking lot management and visualization.
193
192
 
194
193
  Examples:
195
194
  >>> from ultralytics.solutions import ParkingManagement
@@ -216,9 +215,9 @@ class ParkingManagement(BaseSolution):
216
215
  self.occ = (0, 255, 0) # occupied region color
217
216
  self.dc = (255, 0, 189) # centroid color for each box
218
217
 
219
- def process_data(self, im0):
218
+ def process(self, im0):
220
219
  """
221
- Processes the model data for parking lot management.
220
+ Processes the input image for parking lot management and visualization.
222
221
 
223
222
  This function analyzes the input image, extracts tracks, and determines the occupancy status of parking
224
223
  regions defined in the JSON file. It annotates the image with occupied and available parking spots,
@@ -227,14 +226,18 @@ class ParkingManagement(BaseSolution):
227
226
  Args:
228
227
  im0 (np.ndarray): The input inference image.
229
228
 
229
+ Returns:
230
+ (SolutionResults): Contains processed image `plot_im`, 'filled_slots' (number of occupied parking slots),
231
+ 'available_slots' (number of available parking slots), and 'total_tracks' (total number of tracked objects).
232
+
230
233
  Examples:
231
234
  >>> parking_manager = ParkingManagement(json_file="parking_regions.json")
232
235
  >>> image = cv2.imread("parking_lot.jpg")
233
- >>> parking_manager.process_data(image)
236
+ >>> results = parking_manager.process(image)
234
237
  """
235
238
  self.extract_tracks(im0) # extract tracks from im0
236
239
  es, fs = len(self.json), 0 # empty slots, filled slots
237
- annotator = Annotator(im0, self.line_width) # init annotator
240
+ annotator = SolutionAnnotator(im0, self.line_width) # init annotator
238
241
 
239
242
  for region in self.json:
240
243
  # Convert points to a NumPy array with the correct dtype and reshape properly
@@ -257,5 +260,14 @@ class ParkingManagement(BaseSolution):
257
260
  self.pr_info["Occupancy"], self.pr_info["Available"] = fs, es
258
261
 
259
262
  annotator.display_analytics(im0, self.pr_info, (104, 31, 17), (255, 255, 255), 10)
260
- self.display_output(im0) # display output with base class function
261
- return im0 # return output image for more usage
263
+
264
+ plot_im = annotator.result()
265
+ self.display_output(plot_im) # display output with base class function
266
+
267
+ # Return SolutionResults
268
+ return SolutionResults(
269
+ plot_im=plot_im,
270
+ filled_slots=self.pr_info["Occupancy"],
271
+ available_slots=self.pr_info["Available"],
272
+ total_tracks=len(self.track_ids),
273
+ )
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- from ultralytics.solutions.solutions import BaseSolution
4
- from ultralytics.utils.plotting import Annotator, colors
3
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
+ from ultralytics.utils.plotting import colors
5
5
 
6
6
 
7
7
  class QueueManager(BaseSolution):
@@ -15,13 +15,12 @@ class QueueManager(BaseSolution):
15
15
  counts (int): The current count of objects in the queue.
16
16
  rect_color (Tuple[int, int, int]): RGB color tuple for drawing the queue region rectangle.
17
17
  region_length (int): The number of points defining the queue region.
18
- annotator (Annotator): An instance of the Annotator class for drawing on frames.
19
18
  track_line (List[Tuple[int, int]]): List of track line coordinates.
20
19
  track_history (Dict[int, List[Tuple[int, int]]]): Dictionary storing tracking history for each object.
21
20
 
22
21
  Methods:
23
22
  initialize_region: Initializes the queue region.
24
- process_queue: Processes a single frame for queue management.
23
+ process: Processes a single frame for queue management.
25
24
  extract_tracks: Extracts object tracks from the current frame.
26
25
  store_tracking_history: Stores the tracking history for an object.
27
26
  display_output: Displays the processed output.
@@ -33,18 +32,18 @@ class QueueManager(BaseSolution):
33
32
  >>> success, im0 = cap.read()
34
33
  >>> if not success:
35
34
  >>> break
36
- >>> out = queue.process_queue(im0)
35
+ >>> results = queue_manager.process(im0)
37
36
  """
38
37
 
39
38
  def __init__(self, **kwargs):
40
39
  """Initializes the QueueManager with parameters for tracking and counting objects in a video stream."""
41
40
  super().__init__(**kwargs)
42
41
  self.initialize_region()
43
- self.counts = 0 # Queue counts Information
44
- self.rect_color = (255, 255, 255) # Rectangle color
42
+ self.counts = 0 # Queue counts information
43
+ self.rect_color = (255, 255, 255) # Rectangle color for visualization
45
44
  self.region_length = len(self.region) # Store region length for further usage
46
45
 
47
- def process_queue(self, im0):
46
+ def process(self, im0):
48
47
  """
49
48
  Processes the queue management for a single frame of video.
50
49
 
@@ -52,48 +51,28 @@ class QueueManager(BaseSolution):
52
51
  im0 (numpy.ndarray): Input image for processing, typically a frame from a video stream.
53
52
 
54
53
  Returns:
55
- (numpy.ndarray): Processed image with annotations, bounding boxes, and queue counts.
56
-
57
- This method performs the following steps:
58
- 1. Resets the queue count for the current frame.
59
- 2. Initializes an Annotator object for drawing on the image.
60
- 3. Extracts tracks from the image.
61
- 4. Draws the counting region on the image.
62
- 5. For each detected object:
63
- - Draws bounding boxes and labels.
64
- - Stores tracking history.
65
- - Draws centroids and tracks.
66
- - Checks if the object is inside the counting region and updates the count.
67
- 6. Displays the queue count on the image.
68
- 7. Displays the processed output.
54
+ (SolutionResults): Contains processed image `im0`, 'queue_count' (int, number of objects in the queue) and
55
+ 'total_tracks' (int, total number of tracked objects).
69
56
 
70
57
  Examples:
71
58
  >>> queue_manager = QueueManager()
72
59
  >>> frame = cv2.imread("frame.jpg")
73
- >>> processed_frame = queue_manager.process_queue(frame)
60
+ >>> results = queue_manager.process(frame)
74
61
  """
75
62
  self.counts = 0 # Reset counts every frame
76
- self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
77
- self.extract_tracks(im0) # Extract tracks
78
-
79
- self.annotator.draw_region(
80
- reg_pts=self.region, color=self.rect_color, thickness=self.line_width * 2
81
- ) # Draw region
63
+ self.extract_tracks(im0) # Extract tracks from the current frame
64
+ annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
65
+ annotator.draw_region(reg_pts=self.region, color=self.rect_color, thickness=self.line_width * 2) # Draw region
82
66
 
83
67
  for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
84
68
  # Draw bounding box and counting region
85
- self.annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
69
+ annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
86
70
  self.store_tracking_history(track_id, box) # Store track history
87
71
 
88
- # Draw tracks of objects
89
- self.annotator.draw_centroid_and_tracks(
90
- self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
91
- )
92
-
93
72
  # Cache frequently accessed attributes
94
73
  track_history = self.track_history.get(track_id, [])
95
74
 
96
- # store previous position of track and check if the object is inside the counting region
75
+ # Store previous position of track and check if the object is inside the counting region
97
76
  prev_position = None
98
77
  if len(track_history) > 1:
99
78
  prev_position = track_history[-2]
@@ -101,12 +80,14 @@ class QueueManager(BaseSolution):
101
80
  self.counts += 1
102
81
 
103
82
  # Display queue counts
104
- self.annotator.queue_counts_display(
83
+ annotator.queue_counts_display(
105
84
  f"Queue Counts : {str(self.counts)}",
106
85
  points=self.region,
107
86
  region_color=self.rect_color,
108
87
  txt_color=(104, 31, 17),
109
88
  )
110
- self.display_output(im0) # display output with base class function
89
+ plot_im = annotator.result()
90
+ self.display_output(plot_im) # Display output with base class function
111
91
 
112
- return im0 # return output image for more usage
92
+ # Return a SolutionResults object with processed data
93
+ return SolutionResults(plot_im=plot_im, queue_count=self.counts, total_tracks=len(self.track_ids))