ultralytics 8.1.28__py3-none-any.whl → 8.3.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (247) hide show
  1. tests/__init__.py +22 -0
  2. tests/conftest.py +83 -0
  3. tests/test_cli.py +122 -0
  4. tests/test_cuda.py +155 -0
  5. tests/test_engine.py +131 -0
  6. tests/test_exports.py +216 -0
  7. tests/test_integrations.py +150 -0
  8. tests/test_python.py +615 -0
  9. tests/test_solutions.py +94 -0
  10. ultralytics/__init__.py +11 -8
  11. ultralytics/cfg/__init__.py +569 -131
  12. ultralytics/cfg/datasets/Argoverse.yaml +2 -1
  13. ultralytics/cfg/datasets/DOTAv1.5.yaml +3 -2
  14. ultralytics/cfg/datasets/DOTAv1.yaml +3 -2
  15. ultralytics/cfg/datasets/GlobalWheat2020.yaml +3 -2
  16. ultralytics/cfg/datasets/ImageNet.yaml +2 -1
  17. ultralytics/cfg/datasets/Objects365.yaml +5 -4
  18. ultralytics/cfg/datasets/SKU-110K.yaml +2 -1
  19. ultralytics/cfg/datasets/VOC.yaml +3 -2
  20. ultralytics/cfg/datasets/VisDrone.yaml +6 -5
  21. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  22. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  23. ultralytics/cfg/datasets/carparts-seg.yaml +3 -2
  24. ultralytics/cfg/datasets/coco-pose.yaml +7 -6
  25. ultralytics/cfg/datasets/coco.yaml +3 -2
  26. ultralytics/cfg/datasets/coco128-seg.yaml +4 -3
  27. ultralytics/cfg/datasets/coco128.yaml +4 -3
  28. ultralytics/cfg/datasets/coco8-pose.yaml +3 -2
  29. ultralytics/cfg/datasets/coco8-seg.yaml +3 -2
  30. ultralytics/cfg/datasets/coco8.yaml +3 -2
  31. ultralytics/cfg/datasets/crack-seg.yaml +3 -2
  32. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  33. ultralytics/cfg/datasets/dota8.yaml +3 -2
  34. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  35. ultralytics/cfg/datasets/lvis.yaml +1236 -0
  36. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  37. ultralytics/cfg/datasets/open-images-v7.yaml +2 -1
  38. ultralytics/cfg/datasets/package-seg.yaml +5 -4
  39. ultralytics/cfg/datasets/signature.yaml +21 -0
  40. ultralytics/cfg/datasets/tiger-pose.yaml +3 -2
  41. ultralytics/cfg/datasets/xView.yaml +2 -1
  42. ultralytics/cfg/default.yaml +14 -11
  43. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +24 -0
  44. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  45. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  46. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  47. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  48. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  49. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +5 -2
  50. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +5 -2
  51. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +5 -2
  52. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +5 -2
  53. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  54. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  55. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  56. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  57. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  58. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  59. ultralytics/cfg/models/v3/yolov3-spp.yaml +5 -2
  60. ultralytics/cfg/models/v3/yolov3-tiny.yaml +5 -2
  61. ultralytics/cfg/models/v3/yolov3.yaml +5 -2
  62. ultralytics/cfg/models/v5/yolov5-p6.yaml +5 -2
  63. ultralytics/cfg/models/v5/yolov5.yaml +5 -2
  64. ultralytics/cfg/models/v6/yolov6.yaml +5 -2
  65. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +5 -2
  66. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +5 -2
  67. ultralytics/cfg/models/v8/yolov8-cls.yaml +5 -2
  68. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +6 -2
  69. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +6 -2
  70. ultralytics/cfg/models/v8/yolov8-ghost.yaml +5 -2
  71. ultralytics/cfg/models/v8/yolov8-obb.yaml +5 -2
  72. ultralytics/cfg/models/v8/yolov8-p2.yaml +5 -2
  73. ultralytics/cfg/models/v8/yolov8-p6.yaml +10 -7
  74. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +5 -2
  75. ultralytics/cfg/models/v8/yolov8-pose.yaml +5 -2
  76. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +5 -2
  77. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +5 -2
  78. ultralytics/cfg/models/v8/yolov8-seg.yaml +5 -2
  79. ultralytics/cfg/models/v8/yolov8-world.yaml +5 -2
  80. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +5 -2
  81. ultralytics/cfg/models/v8/yolov8.yaml +5 -2
  82. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  83. ultralytics/cfg/models/v9/yolov9c.yaml +30 -25
  84. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  85. ultralytics/cfg/models/v9/yolov9e.yaml +46 -42
  86. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  87. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  88. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  89. ultralytics/cfg/solutions/default.yaml +24 -0
  90. ultralytics/cfg/trackers/botsort.yaml +8 -5
  91. ultralytics/cfg/trackers/bytetrack.yaml +8 -5
  92. ultralytics/data/__init__.py +14 -3
  93. ultralytics/data/annotator.py +37 -15
  94. ultralytics/data/augment.py +1783 -289
  95. ultralytics/data/base.py +62 -27
  96. ultralytics/data/build.py +36 -8
  97. ultralytics/data/converter.py +196 -36
  98. ultralytics/data/dataset.py +233 -94
  99. ultralytics/data/loaders.py +199 -96
  100. ultralytics/data/split_dota.py +39 -29
  101. ultralytics/data/utils.py +110 -40
  102. ultralytics/engine/__init__.py +1 -1
  103. ultralytics/engine/exporter.py +569 -242
  104. ultralytics/engine/model.py +604 -252
  105. ultralytics/engine/predictor.py +22 -11
  106. ultralytics/engine/results.py +1228 -218
  107. ultralytics/engine/trainer.py +190 -129
  108. ultralytics/engine/tuner.py +18 -18
  109. ultralytics/engine/validator.py +18 -15
  110. ultralytics/hub/__init__.py +31 -13
  111. ultralytics/hub/auth.py +11 -7
  112. ultralytics/hub/google/__init__.py +159 -0
  113. ultralytics/hub/session.py +128 -94
  114. ultralytics/hub/utils.py +20 -21
  115. ultralytics/models/__init__.py +4 -2
  116. ultralytics/models/fastsam/__init__.py +2 -3
  117. ultralytics/models/fastsam/model.py +26 -4
  118. ultralytics/models/fastsam/predict.py +127 -63
  119. ultralytics/models/fastsam/utils.py +1 -44
  120. ultralytics/models/fastsam/val.py +1 -1
  121. ultralytics/models/nas/__init__.py +1 -1
  122. ultralytics/models/nas/model.py +21 -10
  123. ultralytics/models/nas/predict.py +3 -6
  124. ultralytics/models/nas/val.py +4 -4
  125. ultralytics/models/rtdetr/__init__.py +1 -1
  126. ultralytics/models/rtdetr/model.py +1 -1
  127. ultralytics/models/rtdetr/predict.py +6 -8
  128. ultralytics/models/rtdetr/train.py +6 -2
  129. ultralytics/models/rtdetr/val.py +3 -3
  130. ultralytics/models/sam/__init__.py +3 -3
  131. ultralytics/models/sam/amg.py +29 -23
  132. ultralytics/models/sam/build.py +211 -13
  133. ultralytics/models/sam/model.py +91 -30
  134. ultralytics/models/sam/modules/__init__.py +1 -1
  135. ultralytics/models/sam/modules/blocks.py +1129 -0
  136. ultralytics/models/sam/modules/decoders.py +381 -53
  137. ultralytics/models/sam/modules/encoders.py +515 -324
  138. ultralytics/models/sam/modules/memory_attention.py +237 -0
  139. ultralytics/models/sam/modules/sam.py +969 -21
  140. ultralytics/models/sam/modules/tiny_encoder.py +425 -154
  141. ultralytics/models/sam/modules/transformer.py +159 -60
  142. ultralytics/models/sam/modules/utils.py +293 -0
  143. ultralytics/models/sam/predict.py +1263 -132
  144. ultralytics/models/utils/__init__.py +1 -1
  145. ultralytics/models/utils/loss.py +36 -24
  146. ultralytics/models/utils/ops.py +3 -7
  147. ultralytics/models/yolo/__init__.py +3 -3
  148. ultralytics/models/yolo/classify/__init__.py +1 -1
  149. ultralytics/models/yolo/classify/predict.py +7 -8
  150. ultralytics/models/yolo/classify/train.py +17 -22
  151. ultralytics/models/yolo/classify/val.py +8 -4
  152. ultralytics/models/yolo/detect/__init__.py +1 -1
  153. ultralytics/models/yolo/detect/predict.py +3 -5
  154. ultralytics/models/yolo/detect/train.py +11 -4
  155. ultralytics/models/yolo/detect/val.py +90 -52
  156. ultralytics/models/yolo/model.py +14 -9
  157. ultralytics/models/yolo/obb/__init__.py +1 -1
  158. ultralytics/models/yolo/obb/predict.py +2 -2
  159. ultralytics/models/yolo/obb/train.py +5 -3
  160. ultralytics/models/yolo/obb/val.py +41 -23
  161. ultralytics/models/yolo/pose/__init__.py +1 -1
  162. ultralytics/models/yolo/pose/predict.py +3 -5
  163. ultralytics/models/yolo/pose/train.py +2 -2
  164. ultralytics/models/yolo/pose/val.py +51 -17
  165. ultralytics/models/yolo/segment/__init__.py +1 -1
  166. ultralytics/models/yolo/segment/predict.py +3 -5
  167. ultralytics/models/yolo/segment/train.py +2 -2
  168. ultralytics/models/yolo/segment/val.py +60 -19
  169. ultralytics/models/yolo/world/__init__.py +5 -0
  170. ultralytics/models/yolo/world/train.py +92 -0
  171. ultralytics/models/yolo/world/train_world.py +109 -0
  172. ultralytics/nn/__init__.py +1 -1
  173. ultralytics/nn/autobackend.py +228 -93
  174. ultralytics/nn/modules/__init__.py +39 -14
  175. ultralytics/nn/modules/activation.py +21 -0
  176. ultralytics/nn/modules/block.py +527 -67
  177. ultralytics/nn/modules/conv.py +24 -7
  178. ultralytics/nn/modules/head.py +177 -34
  179. ultralytics/nn/modules/transformer.py +6 -5
  180. ultralytics/nn/modules/utils.py +1 -2
  181. ultralytics/nn/tasks.py +225 -77
  182. ultralytics/solutions/__init__.py +30 -1
  183. ultralytics/solutions/ai_gym.py +96 -143
  184. ultralytics/solutions/analytics.py +247 -0
  185. ultralytics/solutions/distance_calculation.py +78 -135
  186. ultralytics/solutions/heatmap.py +93 -247
  187. ultralytics/solutions/object_counter.py +184 -259
  188. ultralytics/solutions/parking_management.py +246 -0
  189. ultralytics/solutions/queue_management.py +112 -0
  190. ultralytics/solutions/region_counter.py +116 -0
  191. ultralytics/solutions/security_alarm.py +144 -0
  192. ultralytics/solutions/solutions.py +178 -0
  193. ultralytics/solutions/speed_estimation.py +86 -174
  194. ultralytics/solutions/streamlit_inference.py +190 -0
  195. ultralytics/solutions/trackzone.py +68 -0
  196. ultralytics/trackers/__init__.py +1 -1
  197. ultralytics/trackers/basetrack.py +32 -13
  198. ultralytics/trackers/bot_sort.py +61 -28
  199. ultralytics/trackers/byte_tracker.py +83 -51
  200. ultralytics/trackers/track.py +21 -6
  201. ultralytics/trackers/utils/__init__.py +1 -1
  202. ultralytics/trackers/utils/gmc.py +62 -48
  203. ultralytics/trackers/utils/kalman_filter.py +166 -35
  204. ultralytics/trackers/utils/matching.py +40 -21
  205. ultralytics/utils/__init__.py +511 -239
  206. ultralytics/utils/autobatch.py +40 -22
  207. ultralytics/utils/benchmarks.py +266 -85
  208. ultralytics/utils/callbacks/__init__.py +1 -1
  209. ultralytics/utils/callbacks/base.py +1 -3
  210. ultralytics/utils/callbacks/clearml.py +7 -6
  211. ultralytics/utils/callbacks/comet.py +39 -17
  212. ultralytics/utils/callbacks/dvc.py +1 -1
  213. ultralytics/utils/callbacks/hub.py +16 -16
  214. ultralytics/utils/callbacks/mlflow.py +28 -24
  215. ultralytics/utils/callbacks/neptune.py +6 -2
  216. ultralytics/utils/callbacks/raytune.py +3 -4
  217. ultralytics/utils/callbacks/tensorboard.py +18 -18
  218. ultralytics/utils/callbacks/wb.py +27 -20
  219. ultralytics/utils/checks.py +160 -100
  220. ultralytics/utils/dist.py +2 -1
  221. ultralytics/utils/downloads.py +44 -37
  222. ultralytics/utils/errors.py +1 -1
  223. ultralytics/utils/files.py +72 -38
  224. ultralytics/utils/instance.py +41 -19
  225. ultralytics/utils/loss.py +84 -56
  226. ultralytics/utils/metrics.py +61 -56
  227. ultralytics/utils/ops.py +94 -89
  228. ultralytics/utils/patches.py +30 -14
  229. ultralytics/utils/plotting.py +600 -269
  230. ultralytics/utils/tal.py +67 -26
  231. ultralytics/utils/torch_utils.py +302 -102
  232. ultralytics/utils/triton.py +2 -1
  233. ultralytics/utils/tuner.py +21 -12
  234. ultralytics-8.3.62.dist-info/METADATA +370 -0
  235. ultralytics-8.3.62.dist-info/RECORD +241 -0
  236. {ultralytics-8.1.28.dist-info → ultralytics-8.3.62.dist-info}/WHEEL +1 -1
  237. ultralytics/data/explorer/__init__.py +0 -5
  238. ultralytics/data/explorer/explorer.py +0 -472
  239. ultralytics/data/explorer/gui/__init__.py +0 -1
  240. ultralytics/data/explorer/gui/dash.py +0 -268
  241. ultralytics/data/explorer/utils.py +0 -166
  242. ultralytics/models/fastsam/prompt.py +0 -357
  243. ultralytics-8.1.28.dist-info/METADATA +0 -373
  244. ultralytics-8.1.28.dist-info/RECORD +0 -197
  245. {ultralytics-8.1.28.dist-info → ultralytics-8.3.62.dist-info}/LICENSE +0 -0
  246. {ultralytics-8.1.28.dist-info → ultralytics-8.3.62.dist-info}/entry_points.txt +0 -0
  247. {ultralytics-8.1.28.dist-info → ultralytics-8.3.62.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,246 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import json
4
+
5
+ import cv2
6
+ import numpy as np
7
+
8
+ from ultralytics.solutions.solutions import BaseSolution
9
+ from ultralytics.utils import LOGGER
10
+ from ultralytics.utils.checks import check_requirements
11
+ from ultralytics.utils.plotting import Annotator
12
+
13
+
14
+ class ParkingPtsSelection:
15
+ """
16
+ A class for selecting and managing parking zone points on images using a Tkinter-based UI.
17
+
18
+ This class provides functionality to upload an image, select points to define parking zones, and save the
19
+ selected points to a JSON file. It uses Tkinter for the graphical user interface.
20
+
21
+ Attributes:
22
+ tk (module): The Tkinter module for GUI operations.
23
+ filedialog (module): Tkinter's filedialog module for file selection operations.
24
+ messagebox (module): Tkinter's messagebox module for displaying message boxes.
25
+ master (tk.Tk): The main Tkinter window.
26
+ canvas (tk.Canvas): The canvas widget for displaying the image and drawing bounding boxes.
27
+ image (PIL.Image.Image): The uploaded image.
28
+ canvas_image (ImageTk.PhotoImage): The image displayed on the canvas.
29
+ rg_data (List[List[Tuple[int, int]]]): List of bounding boxes, each defined by 4 points.
30
+ current_box (List[Tuple[int, int]]): Temporary storage for the points of the current bounding box.
31
+ imgw (int): Original width of the uploaded image.
32
+ imgh (int): Original height of the uploaded image.
33
+ canvas_max_width (int): Maximum width of the canvas.
34
+ canvas_max_height (int): Maximum height of the canvas.
35
+
36
+ Methods:
37
+ initialize_properties: Initializes the necessary properties.
38
+ upload_image: Uploads an image, resizes it to fit the canvas, and displays it.
39
+ on_canvas_click: Handles mouse clicks to add points for bounding boxes.
40
+ draw_box: Draws a bounding box on the canvas.
41
+ remove_last_bounding_box: Removes the last bounding box and redraws the canvas.
42
+ redraw_canvas: Redraws the canvas with the image and all bounding boxes.
43
+ save_to_json: Saves the bounding boxes to a JSON file.
44
+
45
+ Examples:
46
+ >>> parking_selector = ParkingPtsSelection()
47
+ >>> # Use the GUI to upload an image, select parking zones, and save the data
48
+ """
49
+
50
+ def __init__(self):
51
+ """Initializes the ParkingPtsSelection class, setting up UI and properties for parking zone point selection."""
52
+ check_requirements("tkinter")
53
+ import tkinter as tk
54
+ from tkinter import filedialog, messagebox
55
+
56
+ self.tk, self.filedialog, self.messagebox = tk, filedialog, messagebox
57
+ self.master = self.tk.Tk() # Reference to the main application window or parent widget
58
+ self.master.title("Ultralytics Parking Zones Points Selector")
59
+ self.master.resizable(False, False)
60
+
61
+ self.canvas = self.tk.Canvas(self.master, bg="white") # Canvas widget for displaying images or graphics
62
+ self.canvas.pack(side=self.tk.BOTTOM)
63
+
64
+ self.image = None # Variable to store the loaded image
65
+ self.canvas_image = None # Reference to the image displayed on the canvas
66
+ self.canvas_max_width = None # Maximum allowed width for the canvas
67
+ self.canvas_max_height = None # Maximum allowed height for the canvas
68
+ self.rg_data = None # Data related to region or annotation management
69
+ self.current_box = None # Stores the currently selected or active bounding box
70
+ self.imgh = None # Height of the current image
71
+ self.imgw = None # Width of the current image
72
+
73
+ # Button frame with buttons
74
+ button_frame = self.tk.Frame(self.master)
75
+ button_frame.pack(side=self.tk.TOP)
76
+
77
+ for text, cmd in [
78
+ ("Upload Image", self.upload_image),
79
+ ("Remove Last BBox", self.remove_last_bounding_box),
80
+ ("Save", self.save_to_json),
81
+ ]:
82
+ self.tk.Button(button_frame, text=text, command=cmd).pack(side=self.tk.LEFT)
83
+
84
+ self.initialize_properties()
85
+ self.master.mainloop()
86
+
87
+ def initialize_properties(self):
88
+ """Initialize properties for image, canvas, bounding boxes, and dimensions."""
89
+ self.image = self.canvas_image = None
90
+ self.rg_data, self.current_box = [], []
91
+ self.imgw = self.imgh = 0
92
+ self.canvas_max_width, self.canvas_max_height = 1280, 720
93
+
94
+ def upload_image(self):
95
+ """Uploads and displays an image on the canvas, resizing it to fit within specified dimensions."""
96
+ from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
97
+
98
+ self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png *.jpg *.jpeg")]))
99
+ if not self.image:
100
+ return
101
+
102
+ self.imgw, self.imgh = self.image.size
103
+ aspect_ratio = self.imgw / self.imgh
104
+ canvas_width = (
105
+ min(self.canvas_max_width, self.imgw) if aspect_ratio > 1 else int(self.canvas_max_height * aspect_ratio)
106
+ )
107
+ canvas_height = (
108
+ min(self.canvas_max_height, self.imgh) if aspect_ratio <= 1 else int(canvas_width / aspect_ratio)
109
+ )
110
+
111
+ self.canvas.config(width=canvas_width, height=canvas_height)
112
+ self.canvas_image = ImageTk.PhotoImage(self.image.resize((canvas_width, canvas_height)))
113
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
114
+ self.canvas.bind("<Button-1>", self.on_canvas_click)
115
+
116
+ self.rg_data.clear(), self.current_box.clear()
117
+
118
+ def on_canvas_click(self, event):
119
+ """Handles mouse clicks to add points for bounding boxes on the canvas."""
120
+ self.current_box.append((event.x, event.y))
121
+ self.canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
122
+ if len(self.current_box) == 4:
123
+ self.rg_data.append(self.current_box.copy())
124
+ self.draw_box(self.current_box)
125
+ self.current_box.clear()
126
+
127
+ def draw_box(self, box):
128
+ """Draws a bounding box on the canvas using the provided coordinates."""
129
+ for i in range(4):
130
+ self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2)
131
+
132
+ def remove_last_bounding_box(self):
133
+ """Removes the last bounding box from the list and redraws the canvas."""
134
+ if not self.rg_data:
135
+ self.messagebox.showwarning("Warning", "No bounding boxes to remove.")
136
+ return
137
+ self.rg_data.pop()
138
+ self.redraw_canvas()
139
+
140
+ def redraw_canvas(self):
141
+ """Redraws the canvas with the image and all bounding boxes."""
142
+ self.canvas.delete("all")
143
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
144
+ for box in self.rg_data:
145
+ self.draw_box(box)
146
+
147
+ def save_to_json(self):
148
+ """Saves the selected parking zone points to a JSON file with scaled coordinates."""
149
+ scale_w, scale_h = self.imgw / self.canvas.winfo_width(), self.imgh / self.canvas.winfo_height()
150
+ data = [{"points": [(int(x * scale_w), int(y * scale_h)) for x, y in box]} for box in self.rg_data]
151
+
152
+ from io import StringIO # Function level import, as it's only required to store coordinates, not every frame
153
+
154
+ write_buffer = StringIO()
155
+ json.dump(data, write_buffer, indent=4)
156
+ with open("bounding_boxes.json", "w", encoding="utf-8") as f:
157
+ f.write(write_buffer.getvalue())
158
+ self.messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
159
+
160
+
161
+ class ParkingManagement(BaseSolution):
162
+ """
163
+ Manages parking occupancy and availability using YOLO model for real-time monitoring and visualization.
164
+
165
+ This class extends BaseSolution to provide functionality for parking lot management, including detection of
166
+ occupied spaces, visualization of parking regions, and display of occupancy statistics.
167
+
168
+ Attributes:
169
+ json_file (str): Path to the JSON file containing parking region details.
170
+ json (List[Dict]): Loaded JSON data containing parking region information.
171
+ pr_info (Dict[str, int]): Dictionary storing parking information (Occupancy and Available spaces).
172
+ arc (Tuple[int, int, int]): RGB color tuple for available region visualization.
173
+ occ (Tuple[int, int, int]): RGB color tuple for occupied region visualization.
174
+ dc (Tuple[int, int, int]): RGB color tuple for centroid visualization of detected objects.
175
+
176
+ Methods:
177
+ process_data: Processes model data for parking lot management and visualization.
178
+
179
+ Examples:
180
+ >>> from ultralytics.solutions import ParkingManagement
181
+ >>> parking_manager = ParkingManagement(model="yolov8n.pt", json_file="parking_regions.json")
182
+ >>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
183
+ >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
184
+ """
185
+
186
+ def __init__(self, **kwargs):
187
+ """Initializes the parking management system with a YOLO model and visualization settings."""
188
+ super().__init__(**kwargs)
189
+
190
+ self.json_file = self.CFG["json_file"] # Load JSON data
191
+ if self.json_file is None:
192
+ LOGGER.warning("❌ json_file argument missing. Parking region details required.")
193
+ raise ValueError("❌ Json file path can not be empty")
194
+
195
+ with open(self.json_file) as f:
196
+ self.json = json.load(f)
197
+
198
+ self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
199
+
200
+ self.arc = (0, 0, 255) # available region color
201
+ self.occ = (0, 255, 0) # occupied region color
202
+ self.dc = (255, 0, 189) # centroid color for each box
203
+
204
+ def process_data(self, im0):
205
+ """
206
+ Processes the model data for parking lot management.
207
+
208
+ This function analyzes the input image, extracts tracks, and determines the occupancy status of parking
209
+ regions defined in the JSON file. It annotates the image with occupied and available parking spots,
210
+ and updates the parking information.
211
+
212
+ Args:
213
+ im0 (np.ndarray): The input inference image.
214
+
215
+ Examples:
216
+ >>> parking_manager = ParkingManagement(json_file="parking_regions.json")
217
+ >>> image = cv2.imread("parking_lot.jpg")
218
+ >>> parking_manager.process_data(image)
219
+ """
220
+ self.extract_tracks(im0) # extract tracks from im0
221
+ es, fs = len(self.json), 0 # empty slots, filled slots
222
+ annotator = Annotator(im0, self.line_width) # init annotator
223
+
224
+ for region in self.json:
225
+ # Convert points to a NumPy array with the correct dtype and reshape properly
226
+ pts_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
227
+ rg_occupied = False # occupied region initialization
228
+ for box, cls in zip(self.boxes, self.clss):
229
+ xc, yc = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
230
+ dist = cv2.pointPolygonTest(pts_array, (xc, yc), False)
231
+ if dist >= 0:
232
+ # cv2.circle(im0, (xc, yc), radius=self.line_width * 4, color=self.dc, thickness=-1)
233
+ annotator.display_objects_labels(
234
+ im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
235
+ )
236
+ rg_occupied = True
237
+ break
238
+ fs, es = (fs + 1, es - 1) if rg_occupied else (fs, es)
239
+ # Plotting regions
240
+ cv2.polylines(im0, [pts_array], isClosed=True, color=self.occ if rg_occupied else self.arc, thickness=2)
241
+
242
+ self.pr_info["Occupancy"], self.pr_info["Available"] = fs, es
243
+
244
+ annotator.display_analytics(im0, self.pr_info, (104, 31, 17), (255, 255, 255), 10)
245
+ self.display_output(im0) # display output with base class function
246
+ return im0 # return output image for more usage
@@ -0,0 +1,112 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.solutions.solutions import BaseSolution
4
+ from ultralytics.utils.plotting import Annotator, colors
5
+
6
+
7
+ class QueueManager(BaseSolution):
8
+ """
9
+ Manages queue counting in real-time video streams based on object tracks.
10
+
11
+ This class extends BaseSolution to provide functionality for tracking and counting objects within a specified
12
+ region in video frames.
13
+
14
+ Attributes:
15
+ counts (int): The current count of objects in the queue.
16
+ rect_color (Tuple[int, int, int]): RGB color tuple for drawing the queue region rectangle.
17
+ region_length (int): The number of points defining the queue region.
18
+ annotator (Annotator): An instance of the Annotator class for drawing on frames.
19
+ track_line (List[Tuple[int, int]]): List of track line coordinates.
20
+ track_history (Dict[int, List[Tuple[int, int]]]): Dictionary storing tracking history for each object.
21
+
22
+ Methods:
23
+ initialize_region: Initializes the queue region.
24
+ process_queue: Processes a single frame for queue management.
25
+ extract_tracks: Extracts object tracks from the current frame.
26
+ store_tracking_history: Stores the tracking history for an object.
27
+ display_output: Displays the processed output.
28
+
29
+ Examples:
30
+ >>> cap = cv2.VideoCapture("Path/to/video/file.mp4")
31
+ >>> queue_manager = QueueManager(region=[100, 100, 200, 200, 300, 300])
32
+ >>> while cap.isOpened():
33
+ >>> success, im0 = cap.read()
34
+ >>> if not success:
35
+ >>> break
36
+ >>> out = queue.process_queue(im0)
37
+ """
38
+
39
+ def __init__(self, **kwargs):
40
+ """Initializes the QueueManager with parameters for tracking and counting objects in a video stream."""
41
+ super().__init__(**kwargs)
42
+ self.initialize_region()
43
+ self.counts = 0 # Queue counts Information
44
+ self.rect_color = (255, 255, 255) # Rectangle color
45
+ self.region_length = len(self.region) # Store region length for further usage
46
+
47
+ def process_queue(self, im0):
48
+ """
49
+ Processes the queue management for a single frame of video.
50
+
51
+ Args:
52
+ im0 (numpy.ndarray): Input image for processing, typically a frame from a video stream.
53
+
54
+ Returns:
55
+ (numpy.ndarray): Processed image with annotations, bounding boxes, and queue counts.
56
+
57
+ This method performs the following steps:
58
+ 1. Resets the queue count for the current frame.
59
+ 2. Initializes an Annotator object for drawing on the image.
60
+ 3. Extracts tracks from the image.
61
+ 4. Draws the counting region on the image.
62
+ 5. For each detected object:
63
+ - Draws bounding boxes and labels.
64
+ - Stores tracking history.
65
+ - Draws centroids and tracks.
66
+ - Checks if the object is inside the counting region and updates the count.
67
+ 6. Displays the queue count on the image.
68
+ 7. Displays the processed output.
69
+
70
+ Examples:
71
+ >>> queue_manager = QueueManager()
72
+ >>> frame = cv2.imread("frame.jpg")
73
+ >>> processed_frame = queue_manager.process_queue(frame)
74
+ """
75
+ self.counts = 0 # Reset counts every frame
76
+ self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
77
+ self.extract_tracks(im0) # Extract tracks
78
+
79
+ self.annotator.draw_region(
80
+ reg_pts=self.region, color=self.rect_color, thickness=self.line_width * 2
81
+ ) # Draw region
82
+
83
+ for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
84
+ # Draw bounding box and counting region
85
+ self.annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
86
+ self.store_tracking_history(track_id, box) # Store track history
87
+
88
+ # Draw tracks of objects
89
+ self.annotator.draw_centroid_and_tracks(
90
+ self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
91
+ )
92
+
93
+ # Cache frequently accessed attributes
94
+ track_history = self.track_history.get(track_id, [])
95
+
96
+ # store previous position of track and check if the object is inside the counting region
97
+ prev_position = None
98
+ if len(track_history) > 1:
99
+ prev_position = track_history[-2]
100
+ if self.region_length >= 3 and prev_position and self.r_s.contains(self.Point(self.track_line[-1])):
101
+ self.counts += 1
102
+
103
+ # Display queue counts
104
+ self.annotator.queue_counts_display(
105
+ f"Queue Counts : {str(self.counts)}",
106
+ points=self.region,
107
+ region_color=self.rect_color,
108
+ txt_color=(104, 31, 17),
109
+ )
110
+ self.display_output(im0) # display output with base class function
111
+
112
+ return im0 # return output image for more usage
@@ -0,0 +1,116 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.solutions.solutions import BaseSolution
4
+ from ultralytics.utils import LOGGER
5
+ from ultralytics.utils.plotting import Annotator, colors
6
+
7
+
8
+ class RegionCounter(BaseSolution):
9
+ """
10
+ A class designed for real-time counting of objects within user-defined regions in a video stream.
11
+
12
+ This class inherits from `BaseSolution` and offers functionalities to define polygonal regions in a video
13
+ frame, track objects, and count those objects that pass through each defined region. This makes it useful
14
+ for applications that require counting in specified areas, such as monitoring zones or segmented sections.
15
+
16
+ Attributes:
17
+ region_template (dict): A template for creating new counting regions with default attributes including
18
+ the name, polygon coordinates, and display colors.
19
+ counting_regions (list): A list storing all defined regions, where each entry is based on `region_template`
20
+ and includes specific region settings like name, coordinates, and color.
21
+
22
+ Methods:
23
+ add_region: Adds a new counting region with specified attributes, such as the region's name, polygon points,
24
+ region color, and text color.
25
+ count: Processes video frames to count objects in each region, drawing regions and displaying counts
26
+ on the frame. Handles object detection, region definition, and containment checks.
27
+ """
28
+
29
+ def __init__(self, **kwargs):
30
+ """Initializes the RegionCounter class for real-time counting in different regions of the video streams."""
31
+ super().__init__(**kwargs)
32
+ self.region_template = {
33
+ "name": "Default Region",
34
+ "polygon": None,
35
+ "counts": 0,
36
+ "dragging": False,
37
+ "region_color": (255, 255, 255),
38
+ "text_color": (0, 0, 0),
39
+ }
40
+ self.counting_regions = []
41
+
42
+ def add_region(self, name, polygon_points, region_color, text_color):
43
+ """
44
+ Adds a new region to the counting list based on the provided template with specific attributes.
45
+
46
+ Args:
47
+ name (str): Name assigned to the new region.
48
+ polygon_points (list[tuple]): List of (x, y) coordinates defining the region's polygon.
49
+ region_color (tuple): BGR color for region visualization.
50
+ text_color (tuple): BGR color for the text within the region.
51
+ """
52
+ region = self.region_template.copy()
53
+ region.update(
54
+ {
55
+ "name": name,
56
+ "polygon": self.Polygon(polygon_points),
57
+ "region_color": region_color,
58
+ "text_color": text_color,
59
+ }
60
+ )
61
+ self.counting_regions.append(region)
62
+
63
+ def count(self, im0):
64
+ """
65
+ Processes the input frame to detect and count objects within each defined region.
66
+
67
+ Args:
68
+ im0 (numpy.ndarray): Input image frame where objects and regions are annotated.
69
+
70
+ Returns:
71
+ im0 (numpy.ndarray): Processed image frame with annotated counting information.
72
+ """
73
+ self.annotator = Annotator(im0, line_width=self.line_width)
74
+ self.extract_tracks(im0)
75
+
76
+ # Region initialization and conversion
77
+ if self.region is None:
78
+ self.initialize_region()
79
+ regions = {"Region#01": self.region}
80
+ else:
81
+ regions = self.region if isinstance(self.region, dict) else {"Region#01": self.region}
82
+
83
+ # Draw regions and process counts for each defined area
84
+ for idx, (region_name, reg_pts) in enumerate(regions.items(), start=1):
85
+ if not isinstance(reg_pts, list) or not all(isinstance(pt, tuple) for pt in reg_pts):
86
+ LOGGER.warning(f"Invalid region points for {region_name}: {reg_pts}")
87
+ continue # Skip invalid entries
88
+ color = colors(idx, True)
89
+ self.annotator.draw_region(reg_pts=reg_pts, color=color, thickness=self.line_width * 2)
90
+ self.add_region(region_name, reg_pts, color, self.annotator.get_txt_color())
91
+
92
+ # Prepare regions for containment check
93
+ for region in self.counting_regions:
94
+ region["prepared_polygon"] = self.prep(region["polygon"])
95
+
96
+ # Process bounding boxes and count objects within each region
97
+ for box, cls in zip(self.boxes, self.clss):
98
+ self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
99
+ bbox_center = ((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)
100
+
101
+ for region in self.counting_regions:
102
+ if region["prepared_polygon"].contains(self.Point(bbox_center)):
103
+ region["counts"] += 1
104
+
105
+ # Display counts in each region
106
+ for region in self.counting_regions:
107
+ self.annotator.text_label(
108
+ region["polygon"].bounds,
109
+ label=str(region["counts"]),
110
+ color=region["region_color"],
111
+ txt_color=region["text_color"],
112
+ )
113
+ region["counts"] = 0 # Reset count for next frame
114
+
115
+ self.display_output(im0)
116
+ return im0
@@ -0,0 +1,144 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.solutions.solutions import BaseSolution
4
+ from ultralytics.utils import LOGGER
5
+ from ultralytics.utils.plotting import Annotator, colors
6
+
7
+
8
+ class SecurityAlarm(BaseSolution):
9
+ """
10
+ A class to manage security alarm functionalities for real-time monitoring.
11
+
12
+ This class extends the BaseSolution class and provides features to monitor
13
+ objects in a frame, send email notifications when specific thresholds are
14
+ exceeded for total detections, and annotate the output frame for visualization.
15
+
16
+ Attributes:
17
+ email_sent (bool): Flag to track if an email has already been sent for the current event.
18
+ records (int): Threshold for the number of detected objects to trigger an alert.
19
+
20
+ Methods:
21
+ authenticate: Sets up email server authentication for sending alerts.
22
+ send_email: Sends an email notification with details and an image attachment.
23
+ monitor: Monitors the frame, processes detections, and triggers alerts if thresholds are crossed.
24
+
25
+ Examples:
26
+ >>> security = SecurityAlarm()
27
+ >>> security.authenticate("abc@gmail.com", "1111222233334444", "xyz@gmail.com")
28
+ >>> frame = cv2.imread("frame.jpg")
29
+ >>> processed_frame = security.monitor(frame)
30
+ """
31
+
32
+ def __init__(self, **kwargs):
33
+ """Initializes the SecurityAlarm class with parameters for real-time object monitoring."""
34
+ super().__init__(**kwargs)
35
+ self.email_sent = False
36
+ self.records = self.CFG["records"]
37
+ self.server = None
38
+ self.to_email = ""
39
+ self.from_email = ""
40
+
41
+ def authenticate(self, from_email, password, to_email):
42
+ """
43
+ Authenticates the email server for sending alert notifications.
44
+
45
+ Args:
46
+ from_email (str): Sender's email address.
47
+ password (str): Password for the sender's email account.
48
+ to_email (str): Recipient's email address.
49
+
50
+ This method initializes a secure connection with the SMTP server
51
+ and logs in using the provided credentials.
52
+
53
+ Examples:
54
+ >>> alarm = SecurityAlarm()
55
+ >>> alarm.authenticate("sender@example.com", "password123", "recipient@example.com")
56
+ """
57
+ import smtplib
58
+
59
+ self.server = smtplib.SMTP("smtp.gmail.com: 587")
60
+ self.server.starttls()
61
+ self.server.login(from_email, password)
62
+ self.to_email = to_email
63
+ self.from_email = from_email
64
+
65
+ def send_email(self, im0, records=5):
66
+ """
67
+ Sends an email notification with an image attachment indicating the number of objects detected.
68
+
69
+ Args:
70
+ im0 (numpy.ndarray): The input image or frame to be attached to the email.
71
+ records (int): The number of detected objects to be included in the email message.
72
+
73
+ This method encodes the input image, composes the email message with
74
+ details about the detection, and sends it to the specified recipient.
75
+
76
+ Examples:
77
+ >>> alarm = SecurityAlarm()
78
+ >>> frame = cv2.imread("path/to/image.jpg")
79
+ >>> alarm.send_email(frame, records=10)
80
+ """
81
+ from email.mime.image import MIMEImage
82
+ from email.mime.multipart import MIMEMultipart
83
+ from email.mime.text import MIMEText
84
+
85
+ import cv2
86
+
87
+ img_bytes = cv2.imencode(".jpg", im0)[1].tobytes() # Encode the image as JPEG
88
+
89
+ # Create the email
90
+ message = MIMEMultipart()
91
+ message["From"] = self.from_email
92
+ message["To"] = self.to_email
93
+ message["Subject"] = "Security Alert"
94
+
95
+ # Add the text message body
96
+ message_body = f"Ultralytics ALERT!!! {records} objects have been detected!!"
97
+ message.attach(MIMEText(message_body))
98
+
99
+ # Attach the image
100
+ image_attachment = MIMEImage(img_bytes, name="ultralytics.jpg")
101
+ message.attach(image_attachment)
102
+
103
+ # Send the email
104
+ try:
105
+ self.server.send_message(message)
106
+ LOGGER.info("✅ Email sent successfully!")
107
+ except Exception as e:
108
+ print(f"❌ Failed to send email: {e}")
109
+
110
+ def monitor(self, im0):
111
+ """
112
+ Monitors the frame, processes object detections, and triggers alerts if thresholds are exceeded.
113
+
114
+ Args:
115
+ im0 (numpy.ndarray): The input image or frame to be processed and annotated.
116
+
117
+ This method processes the input frame, extracts detections, annotates the frame
118
+ with bounding boxes, and sends an email notification if the number of detected objects
119
+ surpasses the specified threshold and an alert has not already been sent.
120
+
121
+ Returns:
122
+ (numpy.ndarray): The processed frame with annotations.
123
+
124
+ Examples:
125
+ >>> alarm = SecurityAlarm()
126
+ >>> frame = cv2.imread("path/to/image.jpg")
127
+ >>> processed_frame = alarm.monitor(frame)
128
+ """
129
+ self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
130
+ self.extract_tracks(im0) # Extract tracks
131
+
132
+ # Iterate over bounding boxes, track ids and classes index
133
+ for box, cls in zip(self.boxes, self.clss):
134
+ # Draw bounding box
135
+ self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
136
+
137
+ total_det = len(self.clss)
138
+ if total_det > self.records and not self.email_sent: # Only send email If not sent before
139
+ self.send_email(im0, total_det)
140
+ self.email_sent = True
141
+
142
+ self.display_output(im0) # display output with base class function
143
+
144
+ return im0 # return output image for more usage