dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
  2. dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
  3. dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +22 -0
  8. tests/conftest.py +83 -0
  9. tests/test_cli.py +138 -0
  10. tests/test_cuda.py +215 -0
  11. tests/test_engine.py +131 -0
  12. tests/test_exports.py +236 -0
  13. tests/test_integrations.py +154 -0
  14. tests/test_python.py +694 -0
  15. tests/test_solutions.py +187 -0
  16. ultralytics/__init__.py +30 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1023 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +77 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +443 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/VOC.yaml +106 -0
  29. ultralytics/cfg/datasets/VisDrone.yaml +77 -0
  30. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  31. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  32. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  33. ultralytics/cfg/datasets/coco-pose.yaml +42 -0
  34. ultralytics/cfg/datasets/coco.yaml +118 -0
  35. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  36. ultralytics/cfg/datasets/coco128.yaml +101 -0
  37. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  38. ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
  39. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  40. ultralytics/cfg/datasets/coco8.yaml +101 -0
  41. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  42. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  43. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  44. ultralytics/cfg/datasets/dota8.yaml +35 -0
  45. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  46. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  47. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  48. ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
  49. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  50. ultralytics/cfg/datasets/signature.yaml +21 -0
  51. ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
  52. ultralytics/cfg/datasets/xView.yaml +155 -0
  53. ultralytics/cfg/default.yaml +127 -0
  54. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  55. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  56. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  57. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  58. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  59. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  60. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  61. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  62. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  63. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  64. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  65. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  68. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  69. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  70. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  71. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  74. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  75. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  77. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  78. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  79. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  80. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  81. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  82. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  83. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
  84. ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
  85. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  86. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  87. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  88. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  89. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  90. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  91. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  92. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  93. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  94. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  95. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  96. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  97. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  99. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  100. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  102. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  103. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  105. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  106. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  109. ultralytics/cfg/trackers/botsort.yaml +22 -0
  110. ultralytics/cfg/trackers/bytetrack.yaml +14 -0
  111. ultralytics/data/__init__.py +26 -0
  112. ultralytics/data/annotator.py +66 -0
  113. ultralytics/data/augment.py +2945 -0
  114. ultralytics/data/base.py +438 -0
  115. ultralytics/data/build.py +258 -0
  116. ultralytics/data/converter.py +754 -0
  117. ultralytics/data/dataset.py +834 -0
  118. ultralytics/data/loaders.py +676 -0
  119. ultralytics/data/scripts/download_weights.sh +18 -0
  120. ultralytics/data/scripts/get_coco.sh +61 -0
  121. ultralytics/data/scripts/get_coco128.sh +18 -0
  122. ultralytics/data/scripts/get_imagenet.sh +52 -0
  123. ultralytics/data/split.py +125 -0
  124. ultralytics/data/split_dota.py +325 -0
  125. ultralytics/data/utils.py +777 -0
  126. ultralytics/engine/__init__.py +1 -0
  127. ultralytics/engine/exporter.py +1519 -0
  128. ultralytics/engine/model.py +1156 -0
  129. ultralytics/engine/predictor.py +502 -0
  130. ultralytics/engine/results.py +1840 -0
  131. ultralytics/engine/trainer.py +853 -0
  132. ultralytics/engine/tuner.py +243 -0
  133. ultralytics/engine/validator.py +377 -0
  134. ultralytics/hub/__init__.py +168 -0
  135. ultralytics/hub/auth.py +137 -0
  136. ultralytics/hub/google/__init__.py +176 -0
  137. ultralytics/hub/session.py +446 -0
  138. ultralytics/hub/utils.py +248 -0
  139. ultralytics/models/__init__.py +9 -0
  140. ultralytics/models/fastsam/__init__.py +7 -0
  141. ultralytics/models/fastsam/model.py +61 -0
  142. ultralytics/models/fastsam/predict.py +181 -0
  143. ultralytics/models/fastsam/utils.py +24 -0
  144. ultralytics/models/fastsam/val.py +40 -0
  145. ultralytics/models/nas/__init__.py +7 -0
  146. ultralytics/models/nas/model.py +102 -0
  147. ultralytics/models/nas/predict.py +58 -0
  148. ultralytics/models/nas/val.py +39 -0
  149. ultralytics/models/rtdetr/__init__.py +7 -0
  150. ultralytics/models/rtdetr/model.py +63 -0
  151. ultralytics/models/rtdetr/predict.py +84 -0
  152. ultralytics/models/rtdetr/train.py +85 -0
  153. ultralytics/models/rtdetr/val.py +191 -0
  154. ultralytics/models/sam/__init__.py +6 -0
  155. ultralytics/models/sam/amg.py +260 -0
  156. ultralytics/models/sam/build.py +358 -0
  157. ultralytics/models/sam/model.py +170 -0
  158. ultralytics/models/sam/modules/__init__.py +1 -0
  159. ultralytics/models/sam/modules/blocks.py +1129 -0
  160. ultralytics/models/sam/modules/decoders.py +515 -0
  161. ultralytics/models/sam/modules/encoders.py +854 -0
  162. ultralytics/models/sam/modules/memory_attention.py +299 -0
  163. ultralytics/models/sam/modules/sam.py +1006 -0
  164. ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
  165. ultralytics/models/sam/modules/transformer.py +351 -0
  166. ultralytics/models/sam/modules/utils.py +394 -0
  167. ultralytics/models/sam/predict.py +1605 -0
  168. ultralytics/models/utils/__init__.py +1 -0
  169. ultralytics/models/utils/loss.py +455 -0
  170. ultralytics/models/utils/ops.py +268 -0
  171. ultralytics/models/yolo/__init__.py +7 -0
  172. ultralytics/models/yolo/classify/__init__.py +7 -0
  173. ultralytics/models/yolo/classify/predict.py +88 -0
  174. ultralytics/models/yolo/classify/train.py +233 -0
  175. ultralytics/models/yolo/classify/val.py +215 -0
  176. ultralytics/models/yolo/detect/__init__.py +7 -0
  177. ultralytics/models/yolo/detect/predict.py +124 -0
  178. ultralytics/models/yolo/detect/train.py +217 -0
  179. ultralytics/models/yolo/detect/val.py +451 -0
  180. ultralytics/models/yolo/model.py +354 -0
  181. ultralytics/models/yolo/obb/__init__.py +7 -0
  182. ultralytics/models/yolo/obb/predict.py +66 -0
  183. ultralytics/models/yolo/obb/train.py +81 -0
  184. ultralytics/models/yolo/obb/val.py +283 -0
  185. ultralytics/models/yolo/pose/__init__.py +7 -0
  186. ultralytics/models/yolo/pose/predict.py +79 -0
  187. ultralytics/models/yolo/pose/train.py +154 -0
  188. ultralytics/models/yolo/pose/val.py +394 -0
  189. ultralytics/models/yolo/segment/__init__.py +7 -0
  190. ultralytics/models/yolo/segment/predict.py +113 -0
  191. ultralytics/models/yolo/segment/train.py +123 -0
  192. ultralytics/models/yolo/segment/val.py +428 -0
  193. ultralytics/models/yolo/world/__init__.py +5 -0
  194. ultralytics/models/yolo/world/train.py +119 -0
  195. ultralytics/models/yolo/world/train_world.py +176 -0
  196. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  197. ultralytics/models/yolo/yoloe/predict.py +169 -0
  198. ultralytics/models/yolo/yoloe/train.py +298 -0
  199. ultralytics/models/yolo/yoloe/train_seg.py +124 -0
  200. ultralytics/models/yolo/yoloe/val.py +191 -0
  201. ultralytics/nn/__init__.py +29 -0
  202. ultralytics/nn/autobackend.py +842 -0
  203. ultralytics/nn/modules/__init__.py +182 -0
  204. ultralytics/nn/modules/activation.py +53 -0
  205. ultralytics/nn/modules/block.py +1966 -0
  206. ultralytics/nn/modules/conv.py +712 -0
  207. ultralytics/nn/modules/head.py +880 -0
  208. ultralytics/nn/modules/transformer.py +713 -0
  209. ultralytics/nn/modules/utils.py +164 -0
  210. ultralytics/nn/tasks.py +1627 -0
  211. ultralytics/nn/text_model.py +351 -0
  212. ultralytics/solutions/__init__.py +41 -0
  213. ultralytics/solutions/ai_gym.py +116 -0
  214. ultralytics/solutions/analytics.py +252 -0
  215. ultralytics/solutions/config.py +106 -0
  216. ultralytics/solutions/distance_calculation.py +124 -0
  217. ultralytics/solutions/heatmap.py +127 -0
  218. ultralytics/solutions/instance_segmentation.py +84 -0
  219. ultralytics/solutions/object_blurrer.py +90 -0
  220. ultralytics/solutions/object_counter.py +195 -0
  221. ultralytics/solutions/object_cropper.py +84 -0
  222. ultralytics/solutions/parking_management.py +273 -0
  223. ultralytics/solutions/queue_management.py +93 -0
  224. ultralytics/solutions/region_counter.py +120 -0
  225. ultralytics/solutions/security_alarm.py +154 -0
  226. ultralytics/solutions/similarity_search.py +172 -0
  227. ultralytics/solutions/solutions.py +724 -0
  228. ultralytics/solutions/speed_estimation.py +110 -0
  229. ultralytics/solutions/streamlit_inference.py +196 -0
  230. ultralytics/solutions/templates/similarity-search.html +160 -0
  231. ultralytics/solutions/trackzone.py +88 -0
  232. ultralytics/solutions/vision_eye.py +68 -0
  233. ultralytics/trackers/__init__.py +7 -0
  234. ultralytics/trackers/basetrack.py +124 -0
  235. ultralytics/trackers/bot_sort.py +260 -0
  236. ultralytics/trackers/byte_tracker.py +480 -0
  237. ultralytics/trackers/track.py +125 -0
  238. ultralytics/trackers/utils/__init__.py +1 -0
  239. ultralytics/trackers/utils/gmc.py +376 -0
  240. ultralytics/trackers/utils/kalman_filter.py +493 -0
  241. ultralytics/trackers/utils/matching.py +157 -0
  242. ultralytics/utils/__init__.py +1435 -0
  243. ultralytics/utils/autobatch.py +106 -0
  244. ultralytics/utils/autodevice.py +174 -0
  245. ultralytics/utils/benchmarks.py +695 -0
  246. ultralytics/utils/callbacks/__init__.py +5 -0
  247. ultralytics/utils/callbacks/base.py +234 -0
  248. ultralytics/utils/callbacks/clearml.py +153 -0
  249. ultralytics/utils/callbacks/comet.py +552 -0
  250. ultralytics/utils/callbacks/dvc.py +205 -0
  251. ultralytics/utils/callbacks/hub.py +108 -0
  252. ultralytics/utils/callbacks/mlflow.py +138 -0
  253. ultralytics/utils/callbacks/neptune.py +140 -0
  254. ultralytics/utils/callbacks/raytune.py +43 -0
  255. ultralytics/utils/callbacks/tensorboard.py +132 -0
  256. ultralytics/utils/callbacks/wb.py +185 -0
  257. ultralytics/utils/checks.py +897 -0
  258. ultralytics/utils/dist.py +119 -0
  259. ultralytics/utils/downloads.py +499 -0
  260. ultralytics/utils/errors.py +43 -0
  261. ultralytics/utils/export.py +219 -0
  262. ultralytics/utils/files.py +221 -0
  263. ultralytics/utils/instance.py +499 -0
  264. ultralytics/utils/loss.py +813 -0
  265. ultralytics/utils/metrics.py +1356 -0
  266. ultralytics/utils/ops.py +885 -0
  267. ultralytics/utils/patches.py +143 -0
  268. ultralytics/utils/plotting.py +1011 -0
  269. ultralytics/utils/tal.py +416 -0
  270. ultralytics/utils/torch_utils.py +990 -0
  271. ultralytics/utils/triton.py +116 -0
  272. ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,273 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import json
4
+
5
+ import cv2
6
+ import numpy as np
7
+
8
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
9
+ from ultralytics.utils import LOGGER
10
+ from ultralytics.utils.checks import check_imshow
11
+
12
+
13
+ class ParkingPtsSelection:
14
+ """
15
+ A class for selecting and managing parking zone points on images using a Tkinter-based UI.
16
+
17
+ This class provides functionality to upload an image, select points to define parking zones, and save the
18
+ selected points to a JSON file. It uses Tkinter for the graphical user interface.
19
+
20
+ Attributes:
21
+ tk (module): The Tkinter module for GUI operations.
22
+ filedialog (module): Tkinter's filedialog module for file selection operations.
23
+ messagebox (module): Tkinter's messagebox module for displaying message boxes.
24
+ master (tk.Tk): The main Tkinter window.
25
+ canvas (tk.Canvas): The canvas widget for displaying the image and drawing bounding boxes.
26
+ image (PIL.Image.Image): The uploaded image.
27
+ canvas_image (ImageTk.PhotoImage): The image displayed on the canvas.
28
+ rg_data (List[List[Tuple[int, int]]]): List of bounding boxes, each defined by 4 points.
29
+ current_box (List[Tuple[int, int]]): Temporary storage for the points of the current bounding box.
30
+ imgw (int): Original width of the uploaded image.
31
+ imgh (int): Original height of the uploaded image.
32
+ canvas_max_width (int): Maximum width of the canvas.
33
+ canvas_max_height (int): Maximum height of the canvas.
34
+
35
+ Methods:
36
+ initialize_properties: Initializes the necessary properties.
37
+ upload_image: Uploads an image, resizes it to fit the canvas, and displays it.
38
+ on_canvas_click: Handles mouse clicks to add points for bounding boxes.
39
+ draw_box: Draws a bounding box on the canvas.
40
+ remove_last_bounding_box: Removes the last bounding box and redraws the canvas.
41
+ redraw_canvas: Redraws the canvas with the image and all bounding boxes.
42
+ save_to_json: Saves the bounding boxes to a JSON file.
43
+
44
+ Examples:
45
+ >>> parking_selector = ParkingPtsSelection()
46
+ >>> # Use the GUI to upload an image, select parking zones, and save the data
47
+ """
48
+
49
+ def __init__(self):
50
+ """Initialize the ParkingPtsSelection class, setting up UI and properties for parking zone point selection."""
51
+ try: # check if tkinter installed
52
+ import tkinter as tk
53
+ from tkinter import filedialog, messagebox
54
+ except ImportError: # Display error with recommendations
55
+ import platform
56
+
57
+ install_cmd = {
58
+ "Linux": "sudo apt install python3-tk (Debian/Ubuntu) | sudo dnf install python3-tkinter (Fedora) | "
59
+ "sudo pacman -S tk (Arch)",
60
+ "Windows": "reinstall Python and enable the checkbox `tcl/tk and IDLE` on **Optional Features** during installation",
61
+ "Darwin": "reinstall Python from https://www.python.org/downloads/macos/ or `brew install python-tk`",
62
+ }.get(platform.system(), "Unknown OS. Check your Python installation.")
63
+
64
+ LOGGER.warning(f" Tkinter is not configured or supported. Potential fix: {install_cmd}")
65
+ return
66
+
67
+ if not check_imshow(warn=True):
68
+ return
69
+
70
+ self.tk, self.filedialog, self.messagebox = tk, filedialog, messagebox
71
+ self.master = self.tk.Tk() # Reference to the main application window or parent widget
72
+ self.master.title("Ultralytics Parking Zones Points Selector")
73
+ self.master.resizable(False, False)
74
+
75
+ self.canvas = self.tk.Canvas(self.master, bg="white") # Canvas widget for displaying images or graphics
76
+ self.canvas.pack(side=self.tk.BOTTOM)
77
+
78
+ self.image = None # Variable to store the loaded image
79
+ self.canvas_image = None # Reference to the image displayed on the canvas
80
+ self.canvas_max_width = None # Maximum allowed width for the canvas
81
+ self.canvas_max_height = None # Maximum allowed height for the canvas
82
+ self.rg_data = None # Data related to region or annotation management
83
+ self.current_box = None # Stores the currently selected or active bounding box
84
+ self.imgh = None # Height of the current image
85
+ self.imgw = None # Width of the current image
86
+
87
+ # Button frame with buttons
88
+ button_frame = self.tk.Frame(self.master)
89
+ button_frame.pack(side=self.tk.TOP)
90
+
91
+ for text, cmd in [
92
+ ("Upload Image", self.upload_image),
93
+ ("Remove Last BBox", self.remove_last_bounding_box),
94
+ ("Save", self.save_to_json),
95
+ ]:
96
+ self.tk.Button(button_frame, text=text, command=cmd).pack(side=self.tk.LEFT)
97
+
98
+ self.initialize_properties()
99
+ self.master.mainloop()
100
+
101
+ def initialize_properties(self):
102
+ """Initialize properties for image, canvas, bounding boxes, and dimensions."""
103
+ self.image = self.canvas_image = None
104
+ self.rg_data, self.current_box = [], []
105
+ self.imgw = self.imgh = 0
106
+ self.canvas_max_width, self.canvas_max_height = 1280, 720
107
+
108
+ def upload_image(self):
109
+ """Upload and display an image on the canvas, resizing it to fit within specified dimensions."""
110
+ from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
111
+
112
+ self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png *.jpg *.jpeg")]))
113
+ if not self.image:
114
+ return
115
+
116
+ self.imgw, self.imgh = self.image.size
117
+ aspect_ratio = self.imgw / self.imgh
118
+ canvas_width = (
119
+ min(self.canvas_max_width, self.imgw) if aspect_ratio > 1 else int(self.canvas_max_height * aspect_ratio)
120
+ )
121
+ canvas_height = (
122
+ min(self.canvas_max_height, self.imgh) if aspect_ratio <= 1 else int(canvas_width / aspect_ratio)
123
+ )
124
+
125
+ self.canvas.config(width=canvas_width, height=canvas_height)
126
+ self.canvas_image = ImageTk.PhotoImage(self.image.resize((canvas_width, canvas_height)))
127
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
128
+ self.canvas.bind("<Button-1>", self.on_canvas_click)
129
+
130
+ self.rg_data.clear(), self.current_box.clear()
131
+
132
+ def on_canvas_click(self, event):
133
+ """Handle mouse clicks to add points for bounding boxes on the canvas."""
134
+ self.current_box.append((event.x, event.y))
135
+ self.canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
136
+ if len(self.current_box) == 4:
137
+ self.rg_data.append(self.current_box.copy())
138
+ self.draw_box(self.current_box)
139
+ self.current_box.clear()
140
+
141
+ def draw_box(self, box):
142
+ """Draw a bounding box on the canvas using the provided coordinates."""
143
+ for i in range(4):
144
+ self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2)
145
+
146
+ def remove_last_bounding_box(self):
147
+ """Remove the last bounding box from the list and redraw the canvas."""
148
+ if not self.rg_data:
149
+ self.messagebox.showwarning("Warning", "No bounding boxes to remove.")
150
+ return
151
+ self.rg_data.pop()
152
+ self.redraw_canvas()
153
+
154
+ def redraw_canvas(self):
155
+ """Redraw the canvas with the image and all bounding boxes."""
156
+ self.canvas.delete("all")
157
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
158
+ for box in self.rg_data:
159
+ self.draw_box(box)
160
+
161
+ def save_to_json(self):
162
+ """Save the selected parking zone points to a JSON file with scaled coordinates."""
163
+ scale_w, scale_h = self.imgw / self.canvas.winfo_width(), self.imgh / self.canvas.winfo_height()
164
+ data = [{"points": [(int(x * scale_w), int(y * scale_h)) for x, y in box]} for box in self.rg_data]
165
+
166
+ from io import StringIO # Function level import, as it's only required to store coordinates, not every frame
167
+
168
+ write_buffer = StringIO()
169
+ json.dump(data, write_buffer, indent=4)
170
+ with open("bounding_boxes.json", "w", encoding="utf-8") as f:
171
+ f.write(write_buffer.getvalue())
172
+ self.messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
173
+
174
+
175
+ class ParkingManagement(BaseSolution):
176
+ """
177
+ Manages parking occupancy and availability using YOLO model for real-time monitoring and visualization.
178
+
179
+ This class extends BaseSolution to provide functionality for parking lot management, including detection of
180
+ occupied spaces, visualization of parking regions, and display of occupancy statistics.
181
+
182
+ Attributes:
183
+ json_file (str): Path to the JSON file containing parking region details.
184
+ json (List[Dict]): Loaded JSON data containing parking region information.
185
+ pr_info (Dict[str, int]): Dictionary storing parking information (Occupancy and Available spaces).
186
+ arc (Tuple[int, int, int]): RGB color tuple for available region visualization.
187
+ occ (Tuple[int, int, int]): RGB color tuple for occupied region visualization.
188
+ dc (Tuple[int, int, int]): RGB color tuple for centroid visualization of detected objects.
189
+
190
+ Methods:
191
+ process: Processes the input image for parking lot management and visualization.
192
+
193
+ Examples:
194
+ >>> from ultralytics.solutions import ParkingManagement
195
+ >>> parking_manager = ParkingManagement(model="yolo11n.pt", json_file="parking_regions.json")
196
+ >>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
197
+ >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
198
+ """
199
+
200
+ def __init__(self, **kwargs):
201
+ """Initialize the parking management system with a YOLO model and visualization settings."""
202
+ super().__init__(**kwargs)
203
+
204
+ self.json_file = self.CFG["json_file"] # Load parking regions JSON data
205
+ if self.json_file is None:
206
+ LOGGER.warning("json_file argument missing. Parking region details required.")
207
+ raise ValueError("❌ Json file path can not be empty")
208
+
209
+ with open(self.json_file) as f:
210
+ self.json = json.load(f)
211
+
212
+ self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
213
+
214
+ self.arc = (0, 0, 255) # available region color
215
+ self.occ = (0, 255, 0) # occupied region color
216
+ self.dc = (255, 0, 189) # centroid color for each box
217
+
218
+ def process(self, im0):
219
+ """
220
+ Process the input image for parking lot management and visualization.
221
+
222
+ This function analyzes the input image, extracts tracks, and determines the occupancy status of parking
223
+ regions defined in the JSON file. It annotates the image with occupied and available parking spots,
224
+ and updates the parking information.
225
+
226
+ Args:
227
+ im0 (np.ndarray): The input inference image.
228
+
229
+ Returns:
230
+ (SolutionResults): Contains processed image `plot_im`, 'filled_slots' (number of occupied parking slots),
231
+ 'available_slots' (number of available parking slots), and 'total_tracks' (total number of tracked objects).
232
+
233
+ Examples:
234
+ >>> parking_manager = ParkingManagement(json_file="parking_regions.json")
235
+ >>> image = cv2.imread("parking_lot.jpg")
236
+ >>> results = parking_manager.process(image)
237
+ """
238
+ self.extract_tracks(im0) # extract tracks from im0
239
+ es, fs = len(self.json), 0 # empty slots, filled slots
240
+ annotator = SolutionAnnotator(im0, self.line_width) # init annotator
241
+
242
+ for region in self.json:
243
+ # Convert points to a NumPy array with the correct dtype and reshape properly
244
+ pts_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
245
+ rg_occupied = False # occupied region initialization
246
+ for box, cls in zip(self.boxes, self.clss):
247
+ xc, yc = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
248
+ dist = cv2.pointPolygonTest(pts_array, (xc, yc), False)
249
+ if dist >= 0:
250
+ # cv2.circle(im0, (xc, yc), radius=self.line_width * 4, color=self.dc, thickness=-1)
251
+ annotator.display_objects_labels(
252
+ im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
253
+ )
254
+ rg_occupied = True
255
+ break
256
+ fs, es = (fs + 1, es - 1) if rg_occupied else (fs, es)
257
+ # Plotting regions
258
+ cv2.polylines(im0, [pts_array], isClosed=True, color=self.occ if rg_occupied else self.arc, thickness=2)
259
+
260
+ self.pr_info["Occupancy"], self.pr_info["Available"] = fs, es
261
+
262
+ annotator.display_analytics(im0, self.pr_info, (104, 31, 17), (255, 255, 255), 10)
263
+
264
+ plot_im = annotator.result()
265
+ self.display_output(plot_im) # display output with base class function
266
+
267
+ # Return SolutionResults
268
+ return SolutionResults(
269
+ plot_im=plot_im,
270
+ filled_slots=self.pr_info["Occupancy"],
271
+ available_slots=self.pr_info["Available"],
272
+ total_tracks=len(self.track_ids),
273
+ )
@@ -0,0 +1,93 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
+ from ultralytics.utils.plotting import colors
5
+
6
+
7
+ class QueueManager(BaseSolution):
8
+ """
9
+ Manages queue counting in real-time video streams based on object tracks.
10
+
11
+ This class extends BaseSolution to provide functionality for tracking and counting objects within a specified
12
+ region in video frames.
13
+
14
+ Attributes:
15
+ counts (int): The current count of objects in the queue.
16
+ rect_color (Tuple[int, int, int]): RGB color tuple for drawing the queue region rectangle.
17
+ region_length (int): The number of points defining the queue region.
18
+ track_line (List[Tuple[int, int]]): List of track line coordinates.
19
+ track_history (Dict[int, List[Tuple[int, int]]]): Dictionary storing tracking history for each object.
20
+
21
+ Methods:
22
+ initialize_region: Initializes the queue region.
23
+ process: Processes a single frame for queue management.
24
+ extract_tracks: Extracts object tracks from the current frame.
25
+ store_tracking_history: Stores the tracking history for an object.
26
+ display_output: Displays the processed output.
27
+
28
+ Examples:
29
+ >>> cap = cv2.VideoCapture("path/to/video.mp4")
30
+ >>> queue_manager = QueueManager(region=[100, 100, 200, 200, 300, 300])
31
+ >>> while cap.isOpened():
32
+ >>> success, im0 = cap.read()
33
+ >>> if not success:
34
+ >>> break
35
+ >>> results = queue_manager.process(im0)
36
+ """
37
+
38
+ def __init__(self, **kwargs):
39
+ """Initializes the QueueManager with parameters for tracking and counting objects in a video stream."""
40
+ super().__init__(**kwargs)
41
+ self.initialize_region()
42
+ self.counts = 0 # Queue counts information
43
+ self.rect_color = (255, 255, 255) # Rectangle color for visualization
44
+ self.region_length = len(self.region) # Store region length for further usage
45
+
46
+ def process(self, im0):
47
+ """
48
+ Process queue management for a single frame of video.
49
+
50
+ Args:
51
+ im0 (numpy.ndarray): Input image for processing, typically a frame from a video stream.
52
+
53
+ Returns:
54
+ (SolutionResults): Contains processed image `im0`, 'queue_count' (int, number of objects in the queue) and
55
+ 'total_tracks' (int, total number of tracked objects).
56
+
57
+ Examples:
58
+ >>> queue_manager = QueueManager()
59
+ >>> frame = cv2.imread("frame.jpg")
60
+ >>> results = queue_manager.process(frame)
61
+ """
62
+ self.counts = 0 # Reset counts every frame
63
+ self.extract_tracks(im0) # Extract tracks from the current frame
64
+ annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
65
+ annotator.draw_region(reg_pts=self.region, color=self.rect_color, thickness=self.line_width * 2) # Draw region
66
+
67
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
68
+ # Draw bounding box and counting region
69
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(track_id, True))
70
+ self.store_tracking_history(track_id, box) # Store track history
71
+
72
+ # Cache frequently accessed attributes
73
+ track_history = self.track_history.get(track_id, [])
74
+
75
+ # Store previous position of track and check if the object is inside the counting region
76
+ prev_position = None
77
+ if len(track_history) > 1:
78
+ prev_position = track_history[-2]
79
+ if self.region_length >= 3 and prev_position and self.r_s.contains(self.Point(self.track_line[-1])):
80
+ self.counts += 1
81
+
82
+ # Display queue counts
83
+ annotator.queue_counts_display(
84
+ f"Queue Counts : {str(self.counts)}",
85
+ points=self.region,
86
+ region_color=self.rect_color,
87
+ txt_color=(104, 31, 17),
88
+ )
89
+ plot_im = annotator.result()
90
+ self.display_output(plot_im) # Display output with base class function
91
+
92
+ # Return a SolutionResults object with processed data
93
+ return SolutionResults(plot_im=plot_im, queue_count=self.counts, total_tracks=len(self.track_ids))
@@ -0,0 +1,120 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import numpy as np
4
+
5
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
6
+ from ultralytics.utils.plotting import colors
7
+
8
+
9
+ class RegionCounter(BaseSolution):
10
+ """
11
+ A class for real-time counting of objects within user-defined regions in a video stream.
12
+
13
+ This class inherits from `BaseSolution` and provides functionality to define polygonal regions in a video frame,
14
+ track objects, and count those objects that pass through each defined region. Useful for applications requiring
15
+ counting in specified areas, such as monitoring zones or segmented sections.
16
+
17
+ Attributes:
18
+ region_template (dict): Template for creating new counting regions with default attributes including name,
19
+ polygon coordinates, and display colors.
20
+ counting_regions (list): List storing all defined regions, where each entry is based on `region_template`
21
+ and includes specific region settings like name, coordinates, and color.
22
+ region_counts (dict): Dictionary storing the count of objects for each named region.
23
+
24
+ Methods:
25
+ add_region: Adds a new counting region with specified attributes.
26
+ process: Processes video frames to count objects in each region.
27
+ """
28
+
29
+ def __init__(self, **kwargs):
30
+ """Initializes the RegionCounter class for real-time counting in different regions of video streams."""
31
+ super().__init__(**kwargs)
32
+ self.region_template = {
33
+ "name": "Default Region",
34
+ "polygon": None,
35
+ "counts": 0,
36
+ "dragging": False,
37
+ "region_color": (255, 255, 255),
38
+ "text_color": (0, 0, 0),
39
+ }
40
+ self.region_counts = {}
41
+ self.counting_regions = []
42
+
43
+ def add_region(self, name, polygon_points, region_color, text_color):
44
+ """
45
+ Add a new region to the counting list based on the provided template with specific attributes.
46
+
47
+ Args:
48
+ name (str): Name assigned to the new region.
49
+ polygon_points (List[Tuple]): List of (x, y) coordinates defining the region's polygon.
50
+ region_color (tuple): BGR color for region visualization.
51
+ text_color (tuple): BGR color for the text within the region.
52
+ """
53
+ region = self.region_template.copy()
54
+ region.update(
55
+ {
56
+ "name": name,
57
+ "polygon": self.Polygon(polygon_points),
58
+ "region_color": region_color,
59
+ "text_color": text_color,
60
+ }
61
+ )
62
+ self.counting_regions.append(region)
63
+
64
+ def process(self, im0):
65
+ """
66
+ Process the input frame to detect and count objects within each defined region.
67
+
68
+ Args:
69
+ im0 (np.ndarray): Input image frame where objects and regions are annotated.
70
+
71
+ Returns:
72
+ (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects),
73
+ and 'region_counts' (dict, counts of objects per region).
74
+ """
75
+ self.extract_tracks(im0)
76
+ annotator = SolutionAnnotator(im0, line_width=self.line_width)
77
+
78
+ # Ensure self.region is initialized and structured as a dictionary
79
+ if not isinstance(self.region, dict):
80
+ self.region = {"Region#01": self.region or self.initialize_region()}
81
+
82
+ # Draw only valid regions
83
+ for idx, (region_name, reg_pts) in enumerate(self.region.items(), start=1):
84
+ color = colors(idx, True)
85
+ annotator.draw_region(reg_pts, color, self.line_width * 2)
86
+ self.add_region(region_name, reg_pts, color, annotator.get_txt_color())
87
+
88
+ # Prepare regions for containment check (only process valid ones)
89
+ for region in self.counting_regions:
90
+ if "prepared_polygon" not in region:
91
+ region["prepared_polygon"] = self.prep(region["polygon"])
92
+
93
+ # Convert bounding boxes to NumPy array for center points
94
+ boxes_np = np.array([((box[0] + box[2]) / 2, (box[1] + box[3]) / 2) for box in self.boxes], dtype=np.float32)
95
+ points = [self.Point(pt) for pt in boxes_np] # Convert centers to Point objects
96
+
97
+ # Process bounding boxes & check containment
98
+ if points:
99
+ for point, cls, track_id, box, conf in zip(points, self.clss, self.track_ids, self.boxes, self.confs):
100
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(track_id, True))
101
+
102
+ for region in self.counting_regions:
103
+ if region["prepared_polygon"].contains(point):
104
+ region["counts"] += 1
105
+ self.region_counts[region["name"]] = region["counts"]
106
+
107
+ # Display region counts
108
+ for region in self.counting_regions:
109
+ annotator.text_label(
110
+ region["polygon"].bounds,
111
+ label=str(region["counts"]),
112
+ color=region["region_color"],
113
+ txt_color=region["text_color"],
114
+ margin=self.line_width * 4,
115
+ )
116
+ region["counts"] = 0 # Reset for next frame
117
+ plot_im = annotator.result()
118
+ self.display_output(plot_im)
119
+
120
+ return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids), region_counts=self.region_counts)
@@ -0,0 +1,154 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
+ from ultralytics.utils import LOGGER
5
+ from ultralytics.utils.plotting import colors
6
+
7
+
8
+ class SecurityAlarm(BaseSolution):
9
+ """
10
+ A class to manage security alarm functionalities for real-time monitoring.
11
+
12
+ This class extends the BaseSolution class and provides features to monitor objects in a frame, send email
13
+ notifications when specific thresholds are exceeded for total detections, and annotate the output frame for
14
+ visualization.
15
+
16
+ Attributes:
17
+ email_sent (bool): Flag to track if an email has already been sent for the current event.
18
+ records (int): Threshold for the number of detected objects to trigger an alert.
19
+ server (smtplib.SMTP): SMTP server connection for sending email alerts.
20
+ to_email (str): Recipient's email address for alerts.
21
+ from_email (str): Sender's email address for alerts.
22
+
23
+ Methods:
24
+ authenticate: Set up email server authentication for sending alerts.
25
+ send_email: Send an email notification with details and an image attachment.
26
+ process: Monitor the frame, process detections, and trigger alerts if thresholds are crossed.
27
+
28
+ Examples:
29
+ >>> security = SecurityAlarm()
30
+ >>> security.authenticate("abc@gmail.com", "1111222233334444", "xyz@gmail.com")
31
+ >>> frame = cv2.imread("frame.jpg")
32
+ >>> results = security.process(frame)
33
+ """
34
+
35
+ def __init__(self, **kwargs):
36
+ """
37
+ Initialize the SecurityAlarm class with parameters for real-time object monitoring.
38
+
39
+ Args:
40
+ **kwargs (Any): Additional keyword arguments passed to the parent class.
41
+ """
42
+ super().__init__(**kwargs)
43
+ self.email_sent = False
44
+ self.records = self.CFG["records"]
45
+ self.server = None
46
+ self.to_email = ""
47
+ self.from_email = ""
48
+
49
+ def authenticate(self, from_email, password, to_email):
50
+ """
51
+ Authenticate the email server for sending alert notifications.
52
+
53
+ Args:
54
+ from_email (str): Sender's email address.
55
+ password (str): Password for the sender's email account.
56
+ to_email (str): Recipient's email address.
57
+
58
+ This method initializes a secure connection with the SMTP server and logs in using the provided credentials.
59
+
60
+ Examples:
61
+ >>> alarm = SecurityAlarm()
62
+ >>> alarm.authenticate("sender@example.com", "password123", "recipient@example.com")
63
+ """
64
+ import smtplib
65
+
66
+ self.server = smtplib.SMTP("smtp.gmail.com: 587")
67
+ self.server.starttls()
68
+ self.server.login(from_email, password)
69
+ self.to_email = to_email
70
+ self.from_email = from_email
71
+
72
+ def send_email(self, im0, records=5):
73
+ """
74
+ Send an email notification with an image attachment indicating the number of objects detected.
75
+
76
+ Args:
77
+ im0 (numpy.ndarray): The input image or frame to be attached to the email.
78
+ records (int): The number of detected objects to be included in the email message.
79
+
80
+ This method encodes the input image, composes the email message with details about the detection, and sends it
81
+ to the specified recipient.
82
+
83
+ Examples:
84
+ >>> alarm = SecurityAlarm()
85
+ >>> frame = cv2.imread("path/to/image.jpg")
86
+ >>> alarm.send_email(frame, records=10)
87
+ """
88
+ from email.mime.image import MIMEImage
89
+ from email.mime.multipart import MIMEMultipart
90
+ from email.mime.text import MIMEText
91
+
92
+ import cv2
93
+
94
+ img_bytes = cv2.imencode(".jpg", im0)[1].tobytes() # Encode the image as JPEG
95
+
96
+ # Create the email
97
+ message = MIMEMultipart()
98
+ message["From"] = self.from_email
99
+ message["To"] = self.to_email
100
+ message["Subject"] = "Security Alert"
101
+
102
+ # Add the text message body
103
+ message_body = f"Ultralytics ALERT!!! {records} objects have been detected!!"
104
+ message.attach(MIMEText(message_body))
105
+
106
+ # Attach the image
107
+ image_attachment = MIMEImage(img_bytes, name="ultralytics.jpg")
108
+ message.attach(image_attachment)
109
+
110
+ # Send the email
111
+ try:
112
+ self.server.send_message(message)
113
+ LOGGER.info("Email sent successfully!")
114
+ except Exception as e:
115
+ LOGGER.error(f"Failed to send email: {e}")
116
+
117
+ def process(self, im0):
118
+ """
119
+ Monitor the frame, process object detections, and trigger alerts if thresholds are exceeded.
120
+
121
+ Args:
122
+ im0 (numpy.ndarray): The input image or frame to be processed and annotated.
123
+
124
+ Returns:
125
+ (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (total number of tracked objects) and
126
+ 'email_sent' (whether an email alert was triggered).
127
+
128
+ This method processes the input frame, extracts detections, annotates the frame with bounding boxes, and sends
129
+ an email notification if the number of detected objects surpasses the specified threshold and an alert has not
130
+ already been sent.
131
+
132
+ Examples:
133
+ >>> alarm = SecurityAlarm()
134
+ >>> frame = cv2.imread("path/to/image.jpg")
135
+ >>> results = alarm.process(frame)
136
+ """
137
+ self.extract_tracks(im0) # Extract tracks
138
+ annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
139
+
140
+ # Iterate over bounding boxes and classes index
141
+ for box, cls in zip(self.boxes, self.clss):
142
+ # Draw bounding box
143
+ annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
144
+
145
+ total_det = len(self.clss)
146
+ if total_det >= self.records and not self.email_sent: # Only send email if not sent before
147
+ self.send_email(im0, total_det)
148
+ self.email_sent = True
149
+
150
+ plot_im = annotator.result()
151
+ self.display_output(plot_im) # Display output with base class function
152
+
153
+ # Return a SolutionResults
154
+ return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids), email_sent=self.email_sent)