ultralytics 8.3.15__py3-none-any.whl → 8.3.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,237 +5,232 @@ import json
5
5
  import cv2
6
6
  import numpy as np
7
7
 
8
- from ultralytics.utils.checks import check_imshow, check_requirements
8
+ from ultralytics.solutions.solutions import LOGGER, BaseSolution, check_requirements
9
9
  from ultralytics.utils.plotting import Annotator
10
10
 
11
11
 
12
12
  class ParkingPtsSelection:
13
- """Class for selecting and managing parking zone points on images using a Tkinter-based UI."""
13
+ """
14
+ A class for selecting and managing parking zone points on images using a Tkinter-based UI.
15
+
16
+ This class provides functionality to upload an image, select points to define parking zones, and save the
17
+ selected points to a JSON file. It uses Tkinter for the graphical user interface.
18
+
19
+ Attributes:
20
+ tk (module): The Tkinter module for GUI operations.
21
+ filedialog (module): Tkinter's filedialog module for file selection operations.
22
+ messagebox (module): Tkinter's messagebox module for displaying message boxes.
23
+ master (tk.Tk): The main Tkinter window.
24
+ canvas (tk.Canvas): The canvas widget for displaying the image and drawing bounding boxes.
25
+ image (PIL.Image.Image): The uploaded image.
26
+ canvas_image (ImageTk.PhotoImage): The image displayed on the canvas.
27
+ rg_data (List[List[Tuple[int, int]]]): List of bounding boxes, each defined by 4 points.
28
+ current_box (List[Tuple[int, int]]): Temporary storage for the points of the current bounding box.
29
+ imgw (int): Original width of the uploaded image.
30
+ imgh (int): Original height of the uploaded image.
31
+ canvas_max_width (int): Maximum width of the canvas.
32
+ canvas_max_height (int): Maximum height of the canvas.
33
+
34
+ Methods:
35
+ setup_ui: Sets up the Tkinter UI components.
36
+ initialize_properties: Initializes the necessary properties.
37
+ upload_image: Uploads an image, resizes it to fit the canvas, and displays it.
38
+ on_canvas_click: Handles mouse clicks to add points for bounding boxes.
39
+ draw_box: Draws a bounding box on the canvas.
40
+ remove_last_bounding_box: Removes the last bounding box and redraws the canvas.
41
+ redraw_canvas: Redraws the canvas with the image and all bounding boxes.
42
+ save_to_json: Saves the bounding boxes to a JSON file.
43
+
44
+ Examples:
45
+ >>> parking_selector = ParkingPtsSelection()
46
+ >>> # Use the GUI to upload an image, select parking zones, and save the data
47
+ """
14
48
 
15
49
  def __init__(self):
16
- """Initializes the UI for selecting parking zone points in a tkinter window."""
50
+ """Initializes the ParkingPtsSelection class, setting up UI and properties for parking zone point selection."""
17
51
  check_requirements("tkinter")
52
+ import tkinter as tk
53
+ from tkinter import filedialog, messagebox
18
54
 
19
- import tkinter as tk # scope for multi-environment compatibility
55
+ self.tk, self.filedialog, self.messagebox = tk, filedialog, messagebox
56
+ self.setup_ui()
57
+ self.initialize_properties()
58
+ self.master.mainloop()
20
59
 
21
- self.tk = tk
22
- self.master = tk.Tk()
60
+ def setup_ui(self):
61
+ """Sets up the Tkinter UI components for the parking zone points selection interface."""
62
+ self.master = self.tk.Tk()
23
63
  self.master.title("Ultralytics Parking Zones Points Selector")
24
-
25
- # Disable window resizing
26
64
  self.master.resizable(False, False)
27
65
 
28
- # Setup canvas for image display
66
+ # Canvas for image display
29
67
  self.canvas = self.tk.Canvas(self.master, bg="white")
68
+ self.canvas.pack(side=self.tk.BOTTOM)
30
69
 
31
- # Setup buttons
70
+ # Button frame with buttons
32
71
  button_frame = self.tk.Frame(self.master)
33
72
  button_frame.pack(side=self.tk.TOP)
34
73
 
35
- self.tk.Button(button_frame, text="Upload Image", command=self.upload_image).grid(row=0, column=0)
36
- self.tk.Button(button_frame, text="Remove Last BBox", command=self.remove_last_bounding_box).grid(
37
- row=0, column=1
38
- )
39
- self.tk.Button(button_frame, text="Save", command=self.save_to_json).grid(row=0, column=2)
40
-
41
- # Initialize properties
42
- self.image_path = None
43
- self.image = None
44
- self.canvas_image = None
45
- self.rg_data = [] # region coordinates
46
- self.current_box = []
47
- self.imgw = 0 # image width
48
- self.imgh = 0 # image height
74
+ for text, cmd in [
75
+ ("Upload Image", self.upload_image),
76
+ ("Remove Last BBox", self.remove_last_bounding_box),
77
+ ("Save", self.save_to_json),
78
+ ]:
79
+ self.tk.Button(button_frame, text=text, command=cmd).pack(side=self.tk.LEFT)
49
80
 
50
- # Constants
51
- self.canvas_max_width = 1280
52
- self.canvas_max_height = 720
53
-
54
- self.master.mainloop()
81
+ def initialize_properties(self):
82
+ """Initialize properties for image, canvas, bounding boxes, and dimensions."""
83
+ self.image = self.canvas_image = None
84
+ self.rg_data, self.current_box = [], []
85
+ self.imgw = self.imgh = 0
86
+ self.canvas_max_width, self.canvas_max_height = 1280, 720
55
87
 
56
88
  def upload_image(self):
57
- """Upload an image and resize it to fit canvas."""
58
- from tkinter import filedialog
59
-
89
+ """Uploads and displays an image on the canvas, resizing it to fit within specified dimensions."""
60
90
  from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
61
91
 
62
- self.image_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")])
63
- if not self.image_path:
92
+ self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")]))
93
+ if not self.image:
64
94
  return
65
95
 
66
- self.image = Image.open(self.image_path)
67
96
  self.imgw, self.imgh = self.image.size
68
-
69
- # Calculate the aspect ratio and resize image
70
97
  aspect_ratio = self.imgw / self.imgh
71
- if aspect_ratio > 1:
72
- # Landscape orientation
73
- canvas_width = min(self.canvas_max_width, self.imgw)
74
- canvas_height = int(canvas_width / aspect_ratio)
75
- else:
76
- # Portrait orientation
77
- canvas_height = min(self.canvas_max_height, self.imgh)
78
- canvas_width = int(canvas_height * aspect_ratio)
79
-
80
- # Check if canvas is already initialized
81
- if self.canvas:
82
- self.canvas.destroy() # Destroy previous canvas
83
-
84
- self.canvas = self.tk.Canvas(self.master, bg="white", width=canvas_width, height=canvas_height)
85
- resized_image = self.image.resize((canvas_width, canvas_height), Image.LANCZOS)
86
- self.canvas_image = ImageTk.PhotoImage(resized_image)
87
- self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
98
+ canvas_width = (
99
+ min(self.canvas_max_width, self.imgw) if aspect_ratio > 1 else int(self.canvas_max_height * aspect_ratio)
100
+ )
101
+ canvas_height = (
102
+ min(self.canvas_max_height, self.imgh) if aspect_ratio <= 1 else int(canvas_width / aspect_ratio)
103
+ )
88
104
 
89
- self.canvas.pack(side=self.tk.BOTTOM)
105
+ self.canvas.config(width=canvas_width, height=canvas_height)
106
+ self.canvas_image = ImageTk.PhotoImage(self.image.resize((canvas_width, canvas_height), Image.LANCZOS))
107
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
90
108
  self.canvas.bind("<Button-1>", self.on_canvas_click)
91
109
 
92
- # Reset bounding boxes and current box
93
- self.rg_data = []
94
- self.current_box = []
110
+ self.rg_data.clear(), self.current_box.clear()
95
111
 
96
112
  def on_canvas_click(self, event):
97
- """Handle mouse clicks on canvas to create points for bounding boxes."""
113
+ """Handles mouse clicks to add points for bounding boxes on the canvas."""
98
114
  self.current_box.append((event.x, event.y))
99
115
  self.canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
100
-
101
116
  if len(self.current_box) == 4:
102
- self.rg_data.append(self.current_box)
103
- [
104
- self.canvas.create_line(self.current_box[i], self.current_box[(i + 1) % 4], fill="blue", width=2)
105
- for i in range(4)
106
- ]
107
- self.current_box = []
117
+ self.rg_data.append(self.current_box.copy())
118
+ self.draw_box(self.current_box)
119
+ self.current_box.clear()
108
120
 
109
- def remove_last_bounding_box(self):
110
- """Remove the last drawn bounding box from canvas."""
111
- from tkinter import messagebox # scope for multi-environment compatibility
121
+ def draw_box(self, box):
122
+ """Draws a bounding box on the canvas using the provided coordinates."""
123
+ for i in range(4):
124
+ self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2)
112
125
 
113
- if self.rg_data:
114
- self.rg_data.pop() # Remove the last bounding box
115
- self.canvas.delete("all") # Clear the canvas
116
- self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image) # Redraw the image
126
+ def remove_last_bounding_box(self):
127
+ """Removes the last bounding box from the list and redraws the canvas."""
128
+ if not self.rg_data:
129
+ self.messagebox.showwarning("Warning", "No bounding boxes to remove.")
130
+ return
131
+ self.rg_data.pop()
132
+ self.redraw_canvas()
117
133
 
118
- # Redraw all bounding boxes
119
- for box in self.rg_data:
120
- [self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2) for i in range(4)]
121
- messagebox.showinfo("Success", "Last bounding box removed.")
122
- else:
123
- messagebox.showwarning("Warning", "No bounding boxes to remove.")
134
+ def redraw_canvas(self):
135
+ """Redraws the canvas with the image and all bounding boxes."""
136
+ self.canvas.delete("all")
137
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
138
+ for box in self.rg_data:
139
+ self.draw_box(box)
124
140
 
125
141
  def save_to_json(self):
126
- """Saves rescaled bounding boxes to 'bounding_boxes.json' based on image-to-canvas size ratio."""
127
- from tkinter import messagebox # scope for multi-environment compatibility
128
-
129
- rg_data = [] # regions data
130
- for box in self.rg_data:
131
- rs_box = [
132
- (
133
- int(x * self.imgw / self.canvas.winfo_width()), # width scaling
134
- int(y * self.imgh / self.canvas.winfo_height()), # height scaling
135
- )
136
- for x, y in box
137
- ]
138
- rg_data.append({"points": rs_box})
142
+ """Saves the selected parking zone points to a JSON file with scaled coordinates."""
143
+ scale_w, scale_h = self.imgw / self.canvas.winfo_width(), self.imgh / self.canvas.winfo_height()
144
+ data = [{"points": [(int(x * scale_w), int(y * scale_h)) for x, y in box]} for box in self.rg_data]
139
145
  with open("bounding_boxes.json", "w") as f:
140
- json.dump(rg_data, f, indent=4)
146
+ json.dump(data, f, indent=4)
147
+ self.messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
141
148
 
142
- messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
143
149
 
150
+ class ParkingManagement(BaseSolution):
151
+ """
152
+ Manages parking occupancy and availability using YOLO model for real-time monitoring and visualization.
144
153
 
145
- class ParkingManagement:
146
- """Manages parking occupancy and availability using YOLO model for real-time monitoring and visualization."""
154
+ This class extends BaseSolution to provide functionality for parking lot management, including detection of
155
+ occupied spaces, visualization of parking regions, and display of occupancy statistics.
147
156
 
148
- def __init__(
149
- self,
150
- model, # Ultralytics YOLO model file path
151
- json_file, # Parking management annotation file created from Parking Annotator
152
- occupied_region_color=(0, 0, 255), # occupied region color
153
- available_region_color=(0, 255, 0), # available region color
154
- ):
155
- """
156
- Initializes the parking management system with a YOLO model and visualization settings.
157
+ Attributes:
158
+ json_file (str): Path to the JSON file containing parking region details.
159
+ json (List[Dict]): Loaded JSON data containing parking region information.
160
+ pr_info (Dict[str, int]): Dictionary storing parking information (Occupancy and Available spaces).
161
+ arc (Tuple[int, int, int]): RGB color tuple for available region visualization.
162
+ occ (Tuple[int, int, int]): RGB color tuple for occupied region visualization.
163
+ dc (Tuple[int, int, int]): RGB color tuple for centroid visualization of detected objects.
157
164
 
158
- Args:
159
- model (str): Path to the YOLO model.
160
- json_file (str): file that have all parking slot points data
161
- occupied_region_color (tuple): RGB color tuple for occupied regions.
162
- available_region_color (tuple): RGB color tuple for available regions.
163
- """
164
- # Model initialization
165
- from ultralytics import YOLO
165
+ Methods:
166
+ process_data: Processes model data for parking lot management and visualization.
166
167
 
167
- self.model = YOLO(model)
168
+ Examples:
169
+ >>> from ultralytics.solutions import ParkingManagement
170
+ >>> parking_manager = ParkingManagement(model="yolov8n.pt", json_file="parking_regions.json")
171
+ >>> results = parking_manager(source="parking_lot_video.mp4")
172
+ >>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
173
+ >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
174
+ """
168
175
 
169
- # Load JSON data
170
- with open(json_file) as f:
171
- self.json_data = json.load(f)
176
+ def __init__(self, **kwargs):
177
+ """Initializes the parking management system with a YOLO model and visualization settings."""
178
+ super().__init__(**kwargs)
172
179
 
173
- self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
180
+ self.json_file = self.CFG["json_file"] # Load JSON data
181
+ if self.json_file is None:
182
+ LOGGER.warning("❌ json_file argument missing. Parking region details required.")
183
+ raise ValueError("❌ Json file path can not be empty")
174
184
 
175
- self.occ = occupied_region_color
176
- self.arc = available_region_color
185
+ with open(self.json_file) as f:
186
+ self.json = json.load(f)
177
187
 
178
- self.env_check = check_imshow(warn=True) # check if environment supports imshow
188
+ self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
179
189
 
180
- def process_data(self, im0):
181
- """
182
- Process the model data for parking lot management.
190
+ self.arc = (0, 0, 255) # available region color
191
+ self.occ = (0, 255, 0) # occupied region color
192
+ self.dc = (255, 0, 189) # centroid color for each box
183
193
 
184
- Args:
185
- im0 (ndarray): inference image
194
+ def process_data(self, im0):
186
195
  """
187
- results = self.model.track(im0, persist=True, show=False) # object tracking
196
+ Processes the model data for parking lot management.
188
197
 
189
- es, fs = len(self.json_data), 0 # empty slots, filled slots
190
- annotator = Annotator(im0) # init annotator
198
+ This function analyzes the input image, extracts tracks, and determines the occupancy status of parking
199
+ regions defined in the JSON file. It annotates the image with occupied and available parking spots,
200
+ and updates the parking information.
191
201
 
192
- # extract tracks data
193
- if results[0].boxes.id is None:
194
- self.display_frames(im0)
195
- return im0
202
+ Args:
203
+ im0 (np.ndarray): The input inference image.
196
204
 
197
- boxes = results[0].boxes.xyxy.cpu().tolist()
198
- clss = results[0].boxes.cls.cpu().tolist()
205
+ Examples:
206
+ >>> parking_manager = ParkingManagement(json_file="parking_regions.json")
207
+ >>> image = cv2.imread("parking_lot.jpg")
208
+ >>> parking_manager.process_data(image)
209
+ """
210
+ self.extract_tracks(im0) # extract tracks from im0
211
+ es, fs = len(self.json), 0 # empty slots, filled slots
212
+ annotator = Annotator(im0, self.line_width) # init annotator
199
213
 
200
- for region in self.json_data:
214
+ for region in self.json:
201
215
  # Convert points to a NumPy array with the correct dtype and reshape properly
202
216
  pts_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
203
217
  rg_occupied = False # occupied region initialization
204
- for box, cls in zip(boxes, clss):
205
- xc = int((box[0] + box[2]) / 2)
206
- yc = int((box[1] + box[3]) / 2)
207
- annotator.display_objects_labels(
208
- im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
209
- )
218
+ for box, cls in zip(self.boxes, self.clss):
219
+ xc, yc = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
210
220
  dist = cv2.pointPolygonTest(pts_array, (xc, yc), False)
211
221
  if dist >= 0:
222
+ # cv2.circle(im0, (xc, yc), radius=self.line_width * 4, color=self.dc, thickness=-1)
223
+ annotator.display_objects_labels(
224
+ im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
225
+ )
212
226
  rg_occupied = True
213
227
  break
214
- if rg_occupied:
215
- fs += 1
216
- es -= 1
217
-
228
+ fs, es = (fs + 1, es - 1) if rg_occupied else (fs, es)
218
229
  # Plotting regions
219
- color = self.occ if rg_occupied else self.arc
220
- cv2.polylines(im0, [pts_array], isClosed=True, color=color, thickness=2)
230
+ cv2.polylines(im0, [pts_array], isClosed=True, color=self.occ if rg_occupied else self.arc, thickness=2)
221
231
 
222
- self.pr_info["Occupancy"] = fs
223
- self.pr_info["Available"] = es
232
+ self.pr_info["Occupancy"], self.pr_info["Available"] = fs, es
224
233
 
225
234
  annotator.display_analytics(im0, self.pr_info, (104, 31, 17), (255, 255, 255), 10)
226
-
227
- self.display_frames(im0)
228
- return im0
229
-
230
- def display_frames(self, im0):
231
- """
232
- Display frame.
233
-
234
- Args:
235
- im0 (ndarray): inference image
236
- """
237
- if self.env_check:
238
- cv2.imshow("Ultralytics Parking Manager", im0)
239
- # Break Window
240
- if cv2.waitKey(1) & 0xFF == ord("q"):
241
- return
235
+ self.display_output(im0) # display output with base class function
236
+ return im0 # return output image for more usage
@@ -1,16 +1,40 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- from shapely.geometry import Point
4
-
5
- from ultralytics.solutions.solutions import BaseSolution # Import a parent class
3
+ from ultralytics.solutions.solutions import BaseSolution
6
4
  from ultralytics.utils.plotting import Annotator, colors
7
5
 
8
6
 
9
7
  class QueueManager(BaseSolution):
10
- """A class to manage the queue in a real-time video stream based on object tracks."""
8
+ """
9
+ Manages queue counting in real-time video streams based on object tracks.
10
+
11
+ This class extends BaseSolution to provide functionality for tracking and counting objects within a specified
12
+ region in video frames.
13
+
14
+ Attributes:
15
+ counts (int): The current count of objects in the queue.
16
+ rect_color (Tuple[int, int, int]): RGB color tuple for drawing the queue region rectangle.
17
+ region_length (int): The number of points defining the queue region.
18
+ annotator (Annotator): An instance of the Annotator class for drawing on frames.
19
+ track_line (List[Tuple[int, int]]): List of track line coordinates.
20
+ track_history (Dict[int, List[Tuple[int, int]]]): Dictionary storing tracking history for each object.
21
+
22
+ Methods:
23
+ initialize_region: Initializes the queue region.
24
+ process_queue: Processes a single frame for queue management.
25
+ extract_tracks: Extracts object tracks from the current frame.
26
+ store_tracking_history: Stores the tracking history for an object.
27
+ display_output: Displays the processed output.
28
+
29
+ Examples:
30
+ >>> queue_manager = QueueManager(source="video.mp4", region=[100, 100, 200, 200, 300, 300])
31
+ >>> for frame in video_stream:
32
+ ... processed_frame = queue_manager.process_queue(frame)
33
+ ... cv2.imshow("Queue Management", processed_frame)
34
+ """
11
35
 
12
36
  def __init__(self, **kwargs):
13
- """Initializes the QueueManager with specified parameters for tracking and counting objects."""
37
+ """Initializes the QueueManager with parameters for tracking and counting objects in a video stream."""
14
38
  super().__init__(**kwargs)
15
39
  self.initialize_region()
16
40
  self.counts = 0 # Queue counts Information
@@ -19,12 +43,31 @@ class QueueManager(BaseSolution):
19
43
 
20
44
  def process_queue(self, im0):
21
45
  """
22
- Main function to start the queue management process.
46
+ Processes the queue management for a single frame of video.
23
47
 
24
48
  Args:
25
- im0 (ndarray): The input image that will be used for processing
26
- Returns
27
- im0 (ndarray): The processed image for more usage
49
+ im0 (numpy.ndarray): Input image for processing, typically a frame from a video stream.
50
+
51
+ Returns:
52
+ (numpy.ndarray): Processed image with annotations, bounding boxes, and queue counts.
53
+
54
+ This method performs the following steps:
55
+ 1. Resets the queue count for the current frame.
56
+ 2. Initializes an Annotator object for drawing on the image.
57
+ 3. Extracts tracks from the image.
58
+ 4. Draws the counting region on the image.
59
+ 5. For each detected object:
60
+ - Draws bounding boxes and labels.
61
+ - Stores tracking history.
62
+ - Draws centroids and tracks.
63
+ - Checks if the object is inside the counting region and updates the count.
64
+ 6. Displays the queue count on the image.
65
+ 7. Displays the processed output.
66
+
67
+ Examples:
68
+ >>> queue_manager = QueueManager()
69
+ >>> frame = cv2.imread("frame.jpg")
70
+ >>> processed_frame = queue_manager.process_queue(frame)
28
71
  """
29
72
  self.counts = 0 # Reset counts every frame
30
73
  self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
@@ -48,8 +91,10 @@ class QueueManager(BaseSolution):
48
91
  track_history = self.track_history.get(track_id, [])
49
92
 
50
93
  # store previous position of track and check if the object is inside the counting region
51
- prev_position = track_history[-2] if len(track_history) > 1 else None
52
- if self.region_length >= 3 and prev_position and self.r_s.contains(Point(self.track_line[-1])):
94
+ prev_position = None
95
+ if len(track_history) > 1:
96
+ prev_position = track_history[-2]
97
+ if self.region_length >= 3 and prev_position and self.r_s.contains(self.Point(self.track_line[-1])):
53
98
  self.counts += 1
54
99
 
55
100
  # Display queue counts
@@ -9,21 +9,51 @@ from ultralytics import YOLO
9
9
  from ultralytics.utils import LOGGER, yaml_load
10
10
  from ultralytics.utils.checks import check_imshow, check_requirements
11
11
 
12
- check_requirements("shapely>=2.0.0")
13
- from shapely.geometry import LineString, Polygon
14
-
15
12
  DEFAULT_SOL_CFG_PATH = Path(__file__).resolve().parents[1] / "cfg/solutions/default.yaml"
16
13
 
17
14
 
18
15
  class BaseSolution:
19
- """A class to manage all the Ultralytics Solutions: https://docs.ultralytics.com/solutions/."""
16
+ """
17
+ A base class for managing Ultralytics Solutions.
18
+
19
+ This class provides core functionality for various Ultralytics Solutions, including model loading, object tracking,
20
+ and region initialization.
21
+
22
+ Attributes:
23
+ LineString (shapely.geometry.LineString): Class for creating line string geometries.
24
+ Polygon (shapely.geometry.Polygon): Class for creating polygon geometries.
25
+ Point (shapely.geometry.Point): Class for creating point geometries.
26
+ CFG (Dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
27
+ region (List[Tuple[int, int]]): List of coordinate tuples defining a region of interest.
28
+ line_width (int): Width of lines used in visualizations.
29
+ model (ultralytics.YOLO): Loaded YOLO model instance.
30
+ names (Dict[int, str]): Dictionary mapping class indices to class names.
31
+ env_check (bool): Flag indicating whether the environment supports image display.
32
+ track_history (collections.defaultdict): Dictionary to store tracking history for each object.
33
+
34
+ Methods:
35
+ extract_tracks: Apply object tracking and extract tracks from an input image.
36
+ store_tracking_history: Store object tracking history for a given track ID and bounding box.
37
+ initialize_region: Initialize the counting region and line segment based on configuration.
38
+ display_output: Display the results of processing, including showing frames or saving results.
39
+
40
+ Examples:
41
+ >>> solution = BaseSolution(model="yolov8n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
42
+ >>> solution.initialize_region()
43
+ >>> image = cv2.imread("image.jpg")
44
+ >>> solution.extract_tracks(image)
45
+ >>> solution.display_output(image)
46
+ """
20
47
 
21
48
  def __init__(self, **kwargs):
22
- """
23
- Base initializer for all solutions.
49
+ """Initializes the BaseSolution class with configuration settings and YOLO model for Ultralytics solutions."""
50
+ check_requirements("shapely>=2.0.0")
51
+ from shapely.geometry import LineString, Point, Polygon
52
+
53
+ self.LineString = LineString
54
+ self.Polygon = Polygon
55
+ self.Point = Point
24
56
 
25
- Child classes should call this with necessary parameters.
26
- """
27
57
  # Load config and update with args
28
58
  self.CFG = yaml_load(DEFAULT_SOL_CFG_PATH)
29
59
  self.CFG.update(kwargs)
@@ -42,10 +72,15 @@ class BaseSolution:
42
72
 
43
73
  def extract_tracks(self, im0):
44
74
  """
45
- Apply object tracking and extract tracks.
75
+ Applies object tracking and extracts tracks from an input image or frame.
46
76
 
47
77
  Args:
48
- im0 (ndarray): The input image or frame
78
+ im0 (ndarray): The input image or frame.
79
+
80
+ Examples:
81
+ >>> solution = BaseSolution()
82
+ >>> frame = cv2.imread("path/to/image.jpg")
83
+ >>> solution.extract_tracks(frame)
49
84
  """
50
85
  self.tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"])
51
86
 
@@ -62,11 +97,18 @@ class BaseSolution:
62
97
 
63
98
  def store_tracking_history(self, track_id, box):
64
99
  """
65
- Store object tracking history.
100
+ Stores the tracking history of an object.
101
+
102
+ This method updates the tracking history for a given object by appending the center point of its
103
+ bounding box to the track line. It maintains a maximum of 30 points in the tracking history.
66
104
 
67
105
  Args:
68
- track_id (int): The track ID of the object
69
- box (list): Bounding box coordinates of the object
106
+ track_id (int): The unique identifier for the tracked object.
107
+ box (List[float]): The bounding box coordinates of the object in the format [x1, y1, x2, y2].
108
+
109
+ Examples:
110
+ >>> solution = BaseSolution()
111
+ >>> solution.store_tracking_history(1, [100, 200, 300, 400])
70
112
  """
71
113
  # Store tracking history
72
114
  self.track_line = self.track_history[track_id]
@@ -75,19 +117,32 @@ class BaseSolution:
75
117
  self.track_line.pop(0)
76
118
 
77
119
  def initialize_region(self):
78
- """Initialize the counting region and line segment based on config."""
79
- self.region = [(20, 400), (1080, 404), (1080, 360), (20, 360)] if self.region is None else self.region
80
- self.r_s = Polygon(self.region) if len(self.region) >= 3 else LineString(self.region) # region segment
81
- self.l_s = LineString(
82
- [(self.region[0][0], self.region[0][1]), (self.region[1][0], self.region[1][1])]
83
- ) # line segment
120
+ """Initialize the counting region and line segment based on configuration settings."""
121
+ if self.region is None:
122
+ self.region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
123
+ self.r_s = (
124
+ self.Polygon(self.region) if len(self.region) >= 3 else self.LineString(self.region)
125
+ ) # region or line
84
126
 
85
127
  def display_output(self, im0):
86
128
  """
87
129
  Display the results of the processing, which could involve showing frames, printing counts, or saving results.
88
130
 
131
+ This method is responsible for visualizing the output of the object detection and tracking process. It displays
132
+ the processed frame with annotations, and allows for user interaction to close the display.
133
+
89
134
  Args:
90
- im0 (ndarray): The input image or frame
135
+ im0 (numpy.ndarray): The input image or frame that has been processed and annotated.
136
+
137
+ Examples:
138
+ >>> solution = BaseSolution()
139
+ >>> frame = cv2.imread("path/to/image.jpg")
140
+ >>> solution.display_output(frame)
141
+
142
+ Notes:
143
+ - This method will only display output if the 'show' configuration is set to True and the environment
144
+ supports image display.
145
+ - The display can be closed by pressing the 'q' key.
91
146
  """
92
147
  if self.CFG.get("show") and self.env_check:
93
148
  cv2.imshow("Ultralytics Solutions", im0)