dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
  2. dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
  3. dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +22 -0
  8. tests/conftest.py +83 -0
  9. tests/test_cli.py +138 -0
  10. tests/test_cuda.py +215 -0
  11. tests/test_engine.py +131 -0
  12. tests/test_exports.py +236 -0
  13. tests/test_integrations.py +154 -0
  14. tests/test_python.py +694 -0
  15. tests/test_solutions.py +187 -0
  16. ultralytics/__init__.py +30 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1023 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +77 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +443 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/VOC.yaml +106 -0
  29. ultralytics/cfg/datasets/VisDrone.yaml +77 -0
  30. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  31. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  32. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  33. ultralytics/cfg/datasets/coco-pose.yaml +42 -0
  34. ultralytics/cfg/datasets/coco.yaml +118 -0
  35. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  36. ultralytics/cfg/datasets/coco128.yaml +101 -0
  37. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  38. ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
  39. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  40. ultralytics/cfg/datasets/coco8.yaml +101 -0
  41. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  42. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  43. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  44. ultralytics/cfg/datasets/dota8.yaml +35 -0
  45. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  46. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  47. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  48. ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
  49. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  50. ultralytics/cfg/datasets/signature.yaml +21 -0
  51. ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
  52. ultralytics/cfg/datasets/xView.yaml +155 -0
  53. ultralytics/cfg/default.yaml +127 -0
  54. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  55. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  56. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  57. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  58. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  59. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  60. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  61. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  62. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  63. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  64. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  65. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  68. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  69. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  70. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  71. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  74. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  75. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  77. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  78. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  79. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  80. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  81. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  82. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  83. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
  84. ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
  85. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  86. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  87. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  88. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  89. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  90. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  91. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  92. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  93. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  94. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  95. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  96. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  97. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  99. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  100. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  102. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  103. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  105. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  106. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  109. ultralytics/cfg/trackers/botsort.yaml +22 -0
  110. ultralytics/cfg/trackers/bytetrack.yaml +14 -0
  111. ultralytics/data/__init__.py +26 -0
  112. ultralytics/data/annotator.py +66 -0
  113. ultralytics/data/augment.py +2945 -0
  114. ultralytics/data/base.py +438 -0
  115. ultralytics/data/build.py +258 -0
  116. ultralytics/data/converter.py +754 -0
  117. ultralytics/data/dataset.py +834 -0
  118. ultralytics/data/loaders.py +676 -0
  119. ultralytics/data/scripts/download_weights.sh +18 -0
  120. ultralytics/data/scripts/get_coco.sh +61 -0
  121. ultralytics/data/scripts/get_coco128.sh +18 -0
  122. ultralytics/data/scripts/get_imagenet.sh +52 -0
  123. ultralytics/data/split.py +125 -0
  124. ultralytics/data/split_dota.py +325 -0
  125. ultralytics/data/utils.py +777 -0
  126. ultralytics/engine/__init__.py +1 -0
  127. ultralytics/engine/exporter.py +1519 -0
  128. ultralytics/engine/model.py +1156 -0
  129. ultralytics/engine/predictor.py +502 -0
  130. ultralytics/engine/results.py +1840 -0
  131. ultralytics/engine/trainer.py +853 -0
  132. ultralytics/engine/tuner.py +243 -0
  133. ultralytics/engine/validator.py +377 -0
  134. ultralytics/hub/__init__.py +168 -0
  135. ultralytics/hub/auth.py +137 -0
  136. ultralytics/hub/google/__init__.py +176 -0
  137. ultralytics/hub/session.py +446 -0
  138. ultralytics/hub/utils.py +248 -0
  139. ultralytics/models/__init__.py +9 -0
  140. ultralytics/models/fastsam/__init__.py +7 -0
  141. ultralytics/models/fastsam/model.py +61 -0
  142. ultralytics/models/fastsam/predict.py +181 -0
  143. ultralytics/models/fastsam/utils.py +24 -0
  144. ultralytics/models/fastsam/val.py +40 -0
  145. ultralytics/models/nas/__init__.py +7 -0
  146. ultralytics/models/nas/model.py +102 -0
  147. ultralytics/models/nas/predict.py +58 -0
  148. ultralytics/models/nas/val.py +39 -0
  149. ultralytics/models/rtdetr/__init__.py +7 -0
  150. ultralytics/models/rtdetr/model.py +63 -0
  151. ultralytics/models/rtdetr/predict.py +84 -0
  152. ultralytics/models/rtdetr/train.py +85 -0
  153. ultralytics/models/rtdetr/val.py +191 -0
  154. ultralytics/models/sam/__init__.py +6 -0
  155. ultralytics/models/sam/amg.py +260 -0
  156. ultralytics/models/sam/build.py +358 -0
  157. ultralytics/models/sam/model.py +170 -0
  158. ultralytics/models/sam/modules/__init__.py +1 -0
  159. ultralytics/models/sam/modules/blocks.py +1129 -0
  160. ultralytics/models/sam/modules/decoders.py +515 -0
  161. ultralytics/models/sam/modules/encoders.py +854 -0
  162. ultralytics/models/sam/modules/memory_attention.py +299 -0
  163. ultralytics/models/sam/modules/sam.py +1006 -0
  164. ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
  165. ultralytics/models/sam/modules/transformer.py +351 -0
  166. ultralytics/models/sam/modules/utils.py +394 -0
  167. ultralytics/models/sam/predict.py +1605 -0
  168. ultralytics/models/utils/__init__.py +1 -0
  169. ultralytics/models/utils/loss.py +455 -0
  170. ultralytics/models/utils/ops.py +268 -0
  171. ultralytics/models/yolo/__init__.py +7 -0
  172. ultralytics/models/yolo/classify/__init__.py +7 -0
  173. ultralytics/models/yolo/classify/predict.py +88 -0
  174. ultralytics/models/yolo/classify/train.py +233 -0
  175. ultralytics/models/yolo/classify/val.py +215 -0
  176. ultralytics/models/yolo/detect/__init__.py +7 -0
  177. ultralytics/models/yolo/detect/predict.py +124 -0
  178. ultralytics/models/yolo/detect/train.py +217 -0
  179. ultralytics/models/yolo/detect/val.py +451 -0
  180. ultralytics/models/yolo/model.py +354 -0
  181. ultralytics/models/yolo/obb/__init__.py +7 -0
  182. ultralytics/models/yolo/obb/predict.py +66 -0
  183. ultralytics/models/yolo/obb/train.py +81 -0
  184. ultralytics/models/yolo/obb/val.py +283 -0
  185. ultralytics/models/yolo/pose/__init__.py +7 -0
  186. ultralytics/models/yolo/pose/predict.py +79 -0
  187. ultralytics/models/yolo/pose/train.py +154 -0
  188. ultralytics/models/yolo/pose/val.py +394 -0
  189. ultralytics/models/yolo/segment/__init__.py +7 -0
  190. ultralytics/models/yolo/segment/predict.py +113 -0
  191. ultralytics/models/yolo/segment/train.py +123 -0
  192. ultralytics/models/yolo/segment/val.py +428 -0
  193. ultralytics/models/yolo/world/__init__.py +5 -0
  194. ultralytics/models/yolo/world/train.py +119 -0
  195. ultralytics/models/yolo/world/train_world.py +176 -0
  196. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  197. ultralytics/models/yolo/yoloe/predict.py +169 -0
  198. ultralytics/models/yolo/yoloe/train.py +298 -0
  199. ultralytics/models/yolo/yoloe/train_seg.py +124 -0
  200. ultralytics/models/yolo/yoloe/val.py +191 -0
  201. ultralytics/nn/__init__.py +29 -0
  202. ultralytics/nn/autobackend.py +842 -0
  203. ultralytics/nn/modules/__init__.py +182 -0
  204. ultralytics/nn/modules/activation.py +53 -0
  205. ultralytics/nn/modules/block.py +1966 -0
  206. ultralytics/nn/modules/conv.py +712 -0
  207. ultralytics/nn/modules/head.py +880 -0
  208. ultralytics/nn/modules/transformer.py +713 -0
  209. ultralytics/nn/modules/utils.py +164 -0
  210. ultralytics/nn/tasks.py +1627 -0
  211. ultralytics/nn/text_model.py +351 -0
  212. ultralytics/solutions/__init__.py +41 -0
  213. ultralytics/solutions/ai_gym.py +116 -0
  214. ultralytics/solutions/analytics.py +252 -0
  215. ultralytics/solutions/config.py +106 -0
  216. ultralytics/solutions/distance_calculation.py +124 -0
  217. ultralytics/solutions/heatmap.py +127 -0
  218. ultralytics/solutions/instance_segmentation.py +84 -0
  219. ultralytics/solutions/object_blurrer.py +90 -0
  220. ultralytics/solutions/object_counter.py +195 -0
  221. ultralytics/solutions/object_cropper.py +84 -0
  222. ultralytics/solutions/parking_management.py +273 -0
  223. ultralytics/solutions/queue_management.py +93 -0
  224. ultralytics/solutions/region_counter.py +120 -0
  225. ultralytics/solutions/security_alarm.py +154 -0
  226. ultralytics/solutions/similarity_search.py +172 -0
  227. ultralytics/solutions/solutions.py +724 -0
  228. ultralytics/solutions/speed_estimation.py +110 -0
  229. ultralytics/solutions/streamlit_inference.py +196 -0
  230. ultralytics/solutions/templates/similarity-search.html +160 -0
  231. ultralytics/solutions/trackzone.py +88 -0
  232. ultralytics/solutions/vision_eye.py +68 -0
  233. ultralytics/trackers/__init__.py +7 -0
  234. ultralytics/trackers/basetrack.py +124 -0
  235. ultralytics/trackers/bot_sort.py +260 -0
  236. ultralytics/trackers/byte_tracker.py +480 -0
  237. ultralytics/trackers/track.py +125 -0
  238. ultralytics/trackers/utils/__init__.py +1 -0
  239. ultralytics/trackers/utils/gmc.py +376 -0
  240. ultralytics/trackers/utils/kalman_filter.py +493 -0
  241. ultralytics/trackers/utils/matching.py +157 -0
  242. ultralytics/utils/__init__.py +1435 -0
  243. ultralytics/utils/autobatch.py +106 -0
  244. ultralytics/utils/autodevice.py +174 -0
  245. ultralytics/utils/benchmarks.py +695 -0
  246. ultralytics/utils/callbacks/__init__.py +5 -0
  247. ultralytics/utils/callbacks/base.py +234 -0
  248. ultralytics/utils/callbacks/clearml.py +153 -0
  249. ultralytics/utils/callbacks/comet.py +552 -0
  250. ultralytics/utils/callbacks/dvc.py +205 -0
  251. ultralytics/utils/callbacks/hub.py +108 -0
  252. ultralytics/utils/callbacks/mlflow.py +138 -0
  253. ultralytics/utils/callbacks/neptune.py +140 -0
  254. ultralytics/utils/callbacks/raytune.py +43 -0
  255. ultralytics/utils/callbacks/tensorboard.py +132 -0
  256. ultralytics/utils/callbacks/wb.py +185 -0
  257. ultralytics/utils/checks.py +897 -0
  258. ultralytics/utils/dist.py +119 -0
  259. ultralytics/utils/downloads.py +499 -0
  260. ultralytics/utils/errors.py +43 -0
  261. ultralytics/utils/export.py +219 -0
  262. ultralytics/utils/files.py +221 -0
  263. ultralytics/utils/instance.py +499 -0
  264. ultralytics/utils/loss.py +813 -0
  265. ultralytics/utils/metrics.py +1356 -0
  266. ultralytics/utils/ops.py +885 -0
  267. ultralytics/utils/patches.py +143 -0
  268. ultralytics/utils/plotting.py +1011 -0
  269. ultralytics/utils/tal.py +416 -0
  270. ultralytics/utils/torch_utils.py +990 -0
  271. ultralytics/utils/triton.py +116 -0
  272. ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,724 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import math
4
+ from collections import defaultdict
5
+
6
+ import cv2
7
+ import numpy as np
8
+
9
+ from ultralytics import YOLO
10
+ from ultralytics.solutions.config import SolutionConfig
11
+ from ultralytics.utils import ASSETS_URL, LOGGER
12
+ from ultralytics.utils.checks import check_imshow, check_requirements
13
+ from ultralytics.utils.plotting import Annotator
14
+
15
+
16
+ class BaseSolution:
17
+ """
18
+ A base class for managing Ultralytics Solutions.
19
+
20
+ This class provides core functionality for various Ultralytics Solutions, including model loading, object tracking,
21
+ and region initialization.
22
+
23
+ Attributes:
24
+ LineString (shapely.geometry.LineString): Class for creating line string geometries.
25
+ Polygon (shapely.geometry.Polygon): Class for creating polygon geometries.
26
+ Point (shapely.geometry.Point): Class for creating point geometries.
27
+ CFG (dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
28
+ region (List[Tuple[int, int]]): List of coordinate tuples defining a region of interest.
29
+ line_width (int): Width of lines used in visualizations.
30
+ model (ultralytics.YOLO): Loaded YOLO model instance.
31
+ names (Dict[int, str]): Dictionary mapping class indices to class names.
32
+ env_check (bool): Flag indicating whether the environment supports image display.
33
+ track_history (collections.defaultdict): Dictionary to store tracking history for each object.
34
+
35
+ Methods:
36
+ extract_tracks: Apply object tracking and extract tracks from an input image.
37
+ store_tracking_history: Store object tracking history for a given track ID and bounding box.
38
+ initialize_region: Initialize the counting region and line segment based on configuration.
39
+ display_output: Display the results of processing, including showing frames or saving results.
40
+
41
+ Examples:
42
+ >>> solution = BaseSolution(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
43
+ >>> solution.initialize_region()
44
+ >>> image = cv2.imread("image.jpg")
45
+ >>> solution.extract_tracks(image)
46
+ >>> solution.display_output(image)
47
+ """
48
+
49
+ def __init__(self, is_cli=False, **kwargs):
50
+ """
51
+ Initializes the BaseSolution class with configuration settings and the YOLO model.
52
+
53
+ Args:
54
+ is_cli (bool): Enables CLI mode if set to True.
55
+ **kwargs (Any): Additional configuration parameters that override defaults.
56
+ """
57
+ self.CFG = vars(SolutionConfig().update(**kwargs))
58
+ self.LOGGER = LOGGER # Store logger object to be used in multiple solution classes
59
+
60
+ if self.__class__.__name__ != "VisualAISearch":
61
+ check_requirements("shapely>=2.0.0")
62
+ from shapely.geometry import LineString, Point, Polygon
63
+ from shapely.prepared import prep
64
+
65
+ self.LineString = LineString
66
+ self.Polygon = Polygon
67
+ self.Point = Point
68
+ self.prep = prep
69
+ self.annotator = None # Initialize annotator
70
+ self.tracks = None
71
+ self.track_data = None
72
+ self.boxes = []
73
+ self.clss = []
74
+ self.track_ids = []
75
+ self.track_line = None
76
+ self.masks = None
77
+ self.r_s = None
78
+
79
+ self.LOGGER.info(f"Ultralytics Solutions: ✅ {self.CFG}")
80
+ self.region = self.CFG["region"] # Store region data for other classes usage
81
+ self.line_width = self.CFG["line_width"]
82
+
83
+ # Load Model and store additional information (classes, show_conf, show_label)
84
+ if self.CFG["model"] is None:
85
+ self.CFG["model"] = "yolo11n.pt"
86
+ self.model = YOLO(self.CFG["model"])
87
+ self.names = self.model.names
88
+ self.classes = self.CFG["classes"]
89
+ self.show_conf = self.CFG["show_conf"]
90
+ self.show_labels = self.CFG["show_labels"]
91
+
92
+ self.track_add_args = { # Tracker additional arguments for advance configuration
93
+ k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker", "device", "verbose"]
94
+ } # verbose must be passed to track method; setting it False in YOLO still logs the track information.
95
+
96
+ if is_cli and self.CFG["source"] is None:
97
+ d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
98
+ self.LOGGER.warning(f"source not provided. using default source {ASSETS_URL}/{d_s}")
99
+ from ultralytics.utils.downloads import safe_download
100
+
101
+ safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
102
+ self.CFG["source"] = d_s # set default source
103
+
104
+ # Initialize environment and region setup
105
+ self.env_check = check_imshow(warn=True)
106
+ self.track_history = defaultdict(list)
107
+
108
+ def adjust_box_label(self, cls, conf, track_id=None):
109
+ """
110
+ Generates a formatted label for a bounding box.
111
+
112
+ This method constructs a label string for a bounding box using the class index and confidence score.
113
+ Optionally includes the track ID if provided. The label format adapts based on the display settings
114
+ defined in `self.show_conf` and `self.show_labels`.
115
+
116
+ Args:
117
+ cls (int): The class index of the detected object.
118
+ conf (float): The confidence score of the detection.
119
+ track_id (int, optional): The unique identifier for the tracked object. Defaults to None.
120
+
121
+ Returns:
122
+ (str or None): The formatted label string if `self.show_labels` is True; otherwise, None.
123
+ """
124
+ name = ("" if track_id is None else f"{track_id} ") + self.names[cls]
125
+ return (f"{name} {conf:.2f}" if self.show_conf else name) if self.show_labels else None
126
+
127
+ def extract_tracks(self, im0):
128
+ """
129
+ Applies object tracking and extracts tracks from an input image or frame.
130
+
131
+ Args:
132
+ im0 (np.ndarray): The input image or frame.
133
+
134
+ Examples:
135
+ >>> solution = BaseSolution()
136
+ >>> frame = cv2.imread("path/to/image.jpg")
137
+ >>> solution.extract_tracks(frame)
138
+ """
139
+ self.tracks = self.model.track(source=im0, persist=True, classes=self.classes, **self.track_add_args)
140
+ self.track_data = self.tracks[0].obb or self.tracks[0].boxes # Extract tracks for OBB or object detection
141
+
142
+ if self.track_data and self.track_data.id is not None:
143
+ self.boxes = self.track_data.xyxy.cpu()
144
+ self.clss = self.track_data.cls.cpu().tolist()
145
+ self.track_ids = self.track_data.id.int().cpu().tolist()
146
+ self.confs = self.track_data.conf.cpu().tolist()
147
+ else:
148
+ self.LOGGER.warning("no tracks found!")
149
+ self.boxes, self.clss, self.track_ids, self.confs = [], [], [], []
150
+
151
+ def store_tracking_history(self, track_id, box, is_obb=False):
152
+ """
153
+ Stores the tracking history of an object.
154
+
155
+ This method updates the tracking history for a given object by appending the center point of its
156
+ bounding box to the track line. It maintains a maximum of 30 points in the tracking history.
157
+
158
+ Args:
159
+ track_id (int): The unique identifier for the tracked object.
160
+ box (List[float]): The bounding box coordinates of the object in the format [x1, y1, x2, y2].
161
+ is_obb (bool): True if OBB model is used (applies to object counting only).
162
+
163
+ Examples:
164
+ >>> solution = BaseSolution()
165
+ >>> solution.store_tracking_history(1, [100, 200, 300, 400])
166
+ """
167
+ # Store tracking history
168
+ self.track_line = self.track_history[track_id]
169
+ self.track_line.append(tuple(box.mean(dim=0)) if is_obb else (box[:4:2].mean(), box[1:4:2].mean()))
170
+ if len(self.track_line) > 30:
171
+ self.track_line.pop(0)
172
+
173
+ def initialize_region(self):
174
+ """Initialize the counting region and line segment based on configuration settings."""
175
+ if self.region is None:
176
+ self.region = [(10, 200), (540, 200), (540, 180), (10, 180)]
177
+ self.r_s = (
178
+ self.Polygon(self.region) if len(self.region) >= 3 else self.LineString(self.region)
179
+ ) # region or line
180
+
181
+ def display_output(self, plot_im):
182
+ """
183
+ Display the results of the processing, which could involve showing frames, printing counts, or saving results.
184
+
185
+ This method is responsible for visualizing the output of the object detection and tracking process. It displays
186
+ the processed frame with annotations, and allows for user interaction to close the display.
187
+
188
+ Args:
189
+ plot_im (numpy.ndarray): The image or frame that has been processed and annotated.
190
+
191
+ Examples:
192
+ >>> solution = BaseSolution()
193
+ >>> frame = cv2.imread("path/to/image.jpg")
194
+ >>> solution.display_output(frame)
195
+
196
+ Notes:
197
+ - This method will only display output if the 'show' configuration is set to True and the environment
198
+ supports image display.
199
+ - The display can be closed by pressing the 'q' key.
200
+ """
201
+ if self.CFG.get("show") and self.env_check:
202
+ cv2.imshow("Ultralytics Solutions", plot_im)
203
+ if cv2.waitKey(1) & 0xFF == ord("q"):
204
+ cv2.destroyAllWindows() # Closes current frame window
205
+ return
206
+
207
+ def process(self, *args, **kwargs):
208
+ """Process method should be implemented by each Solution subclass."""
209
+
210
+ def __call__(self, *args, **kwargs):
211
+ """Allow instances to be called like a function with flexible arguments."""
212
+ result = self.process(*args, **kwargs) # Call the subclass-specific process method
213
+ if self.CFG["verbose"]: # extract verbose value to display the output logs if True
214
+ LOGGER.info(f"🚀 Results: {result}")
215
+ return result
216
+
217
+
218
+ class SolutionAnnotator(Annotator):
219
+ """
220
+ A specialized annotator class for visualizing and analyzing computer vision tasks.
221
+
222
+ This class extends the base Annotator class, providing additional methods for drawing regions, centroids, tracking
223
+ trails, and visual annotations for Ultralytics Solutions: https://docs.ultralytics.com/solutions/.
224
+ and parking management.
225
+
226
+ Attributes:
227
+ im (np.ndarray): The image being annotated.
228
+ line_width (int): Thickness of lines used in annotations.
229
+ font_size (int): Size of the font used for text annotations.
230
+ font (str): Path to the font file used for text rendering.
231
+ pil (bool): Whether to use PIL for text rendering.
232
+ example (str): An example attribute for demonstration purposes.
233
+
234
+ Methods:
235
+ draw_region: Draws a region using specified points, colors, and thickness.
236
+ queue_counts_display: Displays queue counts in the specified region.
237
+ display_analytics: Displays overall statistics for parking lot management.
238
+ estimate_pose_angle: Calculates the angle between three points in an object pose.
239
+ draw_specific_points: Draws specific keypoints on the image.
240
+ plot_workout_information: Draws a labeled text box on the image.
241
+ plot_angle_and_count_and_stage: Visualizes angle, step count, and stage for workout monitoring.
242
+ plot_distance_and_line: Displays the distance between centroids and connects them with a line.
243
+ display_objects_labels: Annotates bounding boxes with object class labels.
244
+ sweep_annotator: Visualizes a vertical sweep line and optional label.
245
+ visioneye: Maps and connects object centroids to a visual "eye" point.
246
+ circle_label: Draws a circular label within a bounding box.
247
+ text_label: Draws a rectangular label within a bounding box.
248
+
249
+ Examples:
250
+ >>> annotator = SolutionAnnotator(image)
251
+ >>> annotator.draw_region([(0, 0), (100, 100)], color=(0, 255, 0), thickness=5)
252
+ >>> annotator.display_analytics(
253
+ ... image, text={"Available Spots": 5}, txt_color=(0, 0, 0), bg_color=(255, 255, 255), margin=10
254
+ ... )
255
+ """
256
+
257
+ def __init__(self, im, line_width=None, font_size=None, font="Arial.ttf", pil=False, example="abc"):
258
+ """
259
+ Initializes the SolutionAnnotator class with an image for annotation.
260
+
261
+ Args:
262
+ im (np.ndarray): The image to be annotated.
263
+ line_width (int, optional): Line thickness for drawing on the image.
264
+ font_size (int, optional): Font size for text annotations.
265
+ font (str, optional): Path to the font file.
266
+ pil (bool, optional): Indicates whether to use PIL for rendering text.
267
+ example (str, optional): An example parameter for demonstration purposes.
268
+ """
269
+ super().__init__(im, line_width, font_size, font, pil, example)
270
+
271
+ def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5):
272
+ """
273
+ Draw a region or line on the image.
274
+
275
+ Args:
276
+ reg_pts (List[Tuple[int, int]]): Region points (for line 2 points, for region 4+ points).
277
+ color (Tuple[int, int, int]): RGB color value for the region.
278
+ thickness (int): Line thickness for drawing the region.
279
+ """
280
+ cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness)
281
+
282
+ # Draw small circles at the corner points
283
+ for point in reg_pts:
284
+ cv2.circle(self.im, (point[0], point[1]), thickness * 2, color, -1) # -1 fills the circle
285
+
286
+ def queue_counts_display(self, label, points=None, region_color=(255, 255, 255), txt_color=(0, 0, 0)):
287
+ """
288
+ Displays queue counts on an image centered at the points with customizable font size and colors.
289
+
290
+ Args:
291
+ label (str): Queue counts label.
292
+ points (List[Tuple[int, int]]): Region points for center point calculation to display text.
293
+ region_color (Tuple[int, int, int]): RGB queue region color.
294
+ txt_color (Tuple[int, int, int]): RGB text display color.
295
+ """
296
+ x_values = [point[0] for point in points]
297
+ y_values = [point[1] for point in points]
298
+ center_x = sum(x_values) // len(points)
299
+ center_y = sum(y_values) // len(points)
300
+
301
+ text_size = cv2.getTextSize(label, 0, fontScale=self.sf, thickness=self.tf)[0]
302
+ text_width = text_size[0]
303
+ text_height = text_size[1]
304
+
305
+ rect_width = text_width + 20
306
+ rect_height = text_height + 20
307
+ rect_top_left = (center_x - rect_width // 2, center_y - rect_height // 2)
308
+ rect_bottom_right = (center_x + rect_width // 2, center_y + rect_height // 2)
309
+ cv2.rectangle(self.im, rect_top_left, rect_bottom_right, region_color, -1)
310
+
311
+ text_x = center_x - text_width // 2
312
+ text_y = center_y + text_height // 2
313
+
314
+ # Draw text
315
+ cv2.putText(
316
+ self.im,
317
+ label,
318
+ (text_x, text_y),
319
+ 0,
320
+ fontScale=self.sf,
321
+ color=txt_color,
322
+ thickness=self.tf,
323
+ lineType=cv2.LINE_AA,
324
+ )
325
+
326
+ def display_analytics(self, im0, text, txt_color, bg_color, margin):
327
+ """
328
+ Display the overall statistics for parking lots, object counter etc.
329
+
330
+ Args:
331
+ im0 (np.ndarray): Inference image.
332
+ text (Dict[str, Any]): Labels dictionary.
333
+ txt_color (Tuple[int, int, int]): Display color for text foreground.
334
+ bg_color (Tuple[int, int, int]): Display color for text background.
335
+ margin (int): Gap between text and rectangle for better display.
336
+ """
337
+ horizontal_gap = int(im0.shape[1] * 0.02)
338
+ vertical_gap = int(im0.shape[0] * 0.01)
339
+ text_y_offset = 0
340
+ for label, value in text.items():
341
+ txt = f"{label}: {value}"
342
+ text_size = cv2.getTextSize(txt, 0, self.sf, self.tf)[0]
343
+ if text_size[0] < 5 or text_size[1] < 5:
344
+ text_size = (5, 5)
345
+ text_x = im0.shape[1] - text_size[0] - margin * 2 - horizontal_gap
346
+ text_y = text_y_offset + text_size[1] + margin * 2 + vertical_gap
347
+ rect_x1 = text_x - margin * 2
348
+ rect_y1 = text_y - text_size[1] - margin * 2
349
+ rect_x2 = text_x + text_size[0] + margin * 2
350
+ rect_y2 = text_y + margin * 2
351
+ cv2.rectangle(im0, (rect_x1, rect_y1), (rect_x2, rect_y2), bg_color, -1)
352
+ cv2.putText(im0, txt, (text_x, text_y), 0, self.sf, txt_color, self.tf, lineType=cv2.LINE_AA)
353
+ text_y_offset = rect_y2
354
+
355
+ @staticmethod
356
+ def estimate_pose_angle(a, b, c):
357
+ """
358
+ Calculate the angle between three points for workout monitoring.
359
+
360
+ Args:
361
+ a (List[float]): The coordinates of the first point.
362
+ b (List[float]): The coordinates of the second point (vertex).
363
+ c (List[float]): The coordinates of the third point.
364
+
365
+ Returns:
366
+ (float): The angle in degrees between the three points.
367
+ """
368
+ radians = math.atan2(c[1] - b[1], c[0] - b[0]) - math.atan2(a[1] - b[1], a[0] - b[0])
369
+ angle = abs(radians * 180.0 / math.pi)
370
+ return angle if angle <= 180.0 else (360 - angle)
371
+
372
+ def draw_specific_kpts(self, keypoints, indices=None, radius=2, conf_thresh=0.25):
373
+ """
374
+ Draw specific keypoints for gym steps counting.
375
+
376
+ Args:
377
+ keypoints (List[List[float]]): Keypoints data to be plotted, each in format [x, y, confidence].
378
+ indices (List[int], optional): Keypoint indices to be plotted.
379
+ radius (int, optional): Keypoint radius.
380
+ conf_thresh (float, optional): Confidence threshold for keypoints.
381
+
382
+ Returns:
383
+ (np.ndarray): Image with drawn keypoints.
384
+
385
+ Note:
386
+ Keypoint format: [x, y] or [x, y, confidence].
387
+ Modifies self.im in-place.
388
+ """
389
+ indices = indices or [2, 5, 7]
390
+ points = [(int(k[0]), int(k[1])) for i, k in enumerate(keypoints) if i in indices and k[2] >= conf_thresh]
391
+
392
+ # Draw lines between consecutive points
393
+ for start, end in zip(points[:-1], points[1:]):
394
+ cv2.line(self.im, start, end, (0, 255, 0), 2, lineType=cv2.LINE_AA)
395
+
396
+ # Draw circles for keypoints
397
+ for pt in points:
398
+ cv2.circle(self.im, pt, radius, (0, 0, 255), -1, lineType=cv2.LINE_AA)
399
+
400
+ return self.im
401
+
402
+ def plot_workout_information(self, display_text, position, color=(104, 31, 17), txt_color=(255, 255, 255)):
403
+ """
404
+ Draw workout text with a background on the image.
405
+
406
+ Args:
407
+ display_text (str): The text to be displayed.
408
+ position (Tuple[int, int]): Coordinates (x, y) on the image where the text will be placed.
409
+ color (Tuple[int, int, int], optional): Text background color.
410
+ txt_color (Tuple[int, int, int], optional): Text foreground color.
411
+
412
+ Returns:
413
+ (int): The height of the text.
414
+ """
415
+ (text_width, text_height), _ = cv2.getTextSize(display_text, 0, self.sf, self.tf)
416
+
417
+ # Draw background rectangle
418
+ cv2.rectangle(
419
+ self.im,
420
+ (position[0], position[1] - text_height - 5),
421
+ (position[0] + text_width + 10, position[1] - text_height - 5 + text_height + 10 + self.tf),
422
+ color,
423
+ -1,
424
+ )
425
+ # Draw text
426
+ cv2.putText(self.im, display_text, position, 0, self.sf, txt_color, self.tf)
427
+
428
+ return text_height
429
+
430
+ def plot_angle_and_count_and_stage(
431
+ self, angle_text, count_text, stage_text, center_kpt, color=(104, 31, 17), txt_color=(255, 255, 255)
432
+ ):
433
+ """
434
+ Plot the pose angle, count value, and step stage for workout monitoring.
435
+
436
+ Args:
437
+ angle_text (str): Angle value for workout monitoring.
438
+ count_text (str): Counts value for workout monitoring.
439
+ stage_text (str): Stage decision for workout monitoring.
440
+ center_kpt (List[int]): Centroid pose index for workout monitoring.
441
+ color (Tuple[int, int, int], optional): Text background color.
442
+ txt_color (Tuple[int, int, int], optional): Text foreground color.
443
+ """
444
+ # Format text
445
+ angle_text, count_text, stage_text = f" {angle_text:.2f}", f"Steps : {count_text}", f" {stage_text}"
446
+
447
+ # Draw angle, count and stage text
448
+ angle_height = self.plot_workout_information(
449
+ angle_text, (int(center_kpt[0]), int(center_kpt[1])), color, txt_color
450
+ )
451
+ count_height = self.plot_workout_information(
452
+ count_text, (int(center_kpt[0]), int(center_kpt[1]) + angle_height + 20), color, txt_color
453
+ )
454
+ self.plot_workout_information(
455
+ stage_text, (int(center_kpt[0]), int(center_kpt[1]) + angle_height + count_height + 40), color, txt_color
456
+ )
457
+
458
+ def plot_distance_and_line(
459
+ self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
460
+ ):
461
+ """
462
+ Plot the distance and line between two centroids on the frame.
463
+
464
+ Args:
465
+ pixels_distance (float): Pixels distance between two bbox centroids.
466
+ centroids (List[Tuple[int, int]]): Bounding box centroids data.
467
+ line_color (Tuple[int, int, int], optional): Distance line color.
468
+ centroid_color (Tuple[int, int, int], optional): Bounding box centroid color.
469
+ """
470
+ # Get the text size
471
+ text = f"Pixels Distance: {pixels_distance:.2f}"
472
+ (text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
473
+
474
+ # Define corners with 10-pixel margin and draw rectangle
475
+ cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
476
+
477
+ # Calculate the position for the text with a 10-pixel margin and draw text
478
+ text_position = (25, 25 + text_height_m + 10)
479
+ cv2.putText(
480
+ self.im,
481
+ text,
482
+ text_position,
483
+ 0,
484
+ self.sf,
485
+ (255, 255, 255),
486
+ self.tf,
487
+ cv2.LINE_AA,
488
+ )
489
+
490
+ cv2.line(self.im, centroids[0], centroids[1], line_color, 3)
491
+ cv2.circle(self.im, centroids[0], 6, centroid_color, -1)
492
+ cv2.circle(self.im, centroids[1], 6, centroid_color, -1)
493
+
494
+ def display_objects_labels(self, im0, text, txt_color, bg_color, x_center, y_center, margin):
495
+ """
496
+ Display the bounding boxes labels in parking management app.
497
+
498
+ Args:
499
+ im0 (np.ndarray): Inference image.
500
+ text (str): Object/class name.
501
+ txt_color (Tuple[int, int, int]): Display color for text foreground.
502
+ bg_color (Tuple[int, int, int]): Display color for text background.
503
+ x_center (float): The x position center point for bounding box.
504
+ y_center (float): The y position center point for bounding box.
505
+ margin (int): The gap between text and rectangle for better display.
506
+ """
507
+ text_size = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0]
508
+ text_x = x_center - text_size[0] // 2
509
+ text_y = y_center + text_size[1] // 2
510
+
511
+ rect_x1 = text_x - margin
512
+ rect_y1 = text_y - text_size[1] - margin
513
+ rect_x2 = text_x + text_size[0] + margin
514
+ rect_y2 = text_y + margin
515
+ cv2.rectangle(
516
+ im0,
517
+ (int(rect_x1), int(rect_y1)),
518
+ (int(rect_x2), int(rect_y2)),
519
+ tuple(map(int, bg_color)), # Ensure color values are int
520
+ -1,
521
+ )
522
+
523
+ cv2.putText(
524
+ im0,
525
+ text,
526
+ (int(text_x), int(text_y)),
527
+ 0,
528
+ self.sf,
529
+ tuple(map(int, txt_color)), # Ensure color values are int
530
+ self.tf,
531
+ lineType=cv2.LINE_AA,
532
+ )
533
+
534
+ def sweep_annotator(self, line_x=0, line_y=0, label=None, color=(221, 0, 186), txt_color=(255, 255, 255)):
535
+ """
536
+ Draw a sweep annotation line and an optional label.
537
+
538
+ Args:
539
+ line_x (int): The x-coordinate of the sweep line.
540
+ line_y (int): The y-coordinate limit of the sweep line.
541
+ label (str, optional): Text label to be drawn in center of sweep line. If None, no label is drawn.
542
+ color (Tuple[int, int, int]): RGB color for the line and label background.
543
+ txt_color (Tuple[int, int, int]): RGB color for the label text.
544
+ """
545
+ # Draw the sweep line
546
+ cv2.line(self.im, (line_x, 0), (line_x, line_y), color, self.tf * 2)
547
+
548
+ # Draw label, if provided
549
+ if label:
550
+ (text_width, text_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf, self.tf)
551
+ cv2.rectangle(
552
+ self.im,
553
+ (line_x - text_width // 2 - 10, line_y // 2 - text_height // 2 - 10),
554
+ (line_x + text_width // 2 + 10, line_y // 2 + text_height // 2 + 10),
555
+ color,
556
+ -1,
557
+ )
558
+ cv2.putText(
559
+ self.im,
560
+ label,
561
+ (line_x - text_width // 2, line_y // 2 + text_height // 2),
562
+ cv2.FONT_HERSHEY_SIMPLEX,
563
+ self.sf,
564
+ txt_color,
565
+ self.tf,
566
+ )
567
+
568
+ def visioneye(self, box, center_point, color=(235, 219, 11), pin_color=(255, 0, 255)):
569
+ """
570
+ Perform pinpoint human-vision eye mapping and plotting.
571
+
572
+ Args:
573
+ box (List[float]): Bounding box coordinates in format [x1, y1, x2, y2].
574
+ center_point (Tuple[int, int]): Center point for vision eye view.
575
+ color (Tuple[int, int, int]): Object centroid and line color.
576
+ pin_color (Tuple[int, int, int]): Visioneye point color.
577
+ """
578
+ center_bbox = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
579
+ cv2.circle(self.im, center_point, self.tf * 2, pin_color, -1)
580
+ cv2.circle(self.im, center_bbox, self.tf * 2, color, -1)
581
+ cv2.line(self.im, center_point, center_bbox, color, self.tf)
582
+
583
+ def circle_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), margin=2):
584
+ """
585
+ Draw a label with a background circle centered within a given bounding box.
586
+
587
+ Args:
588
+ box (Tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
589
+ label (str): The text label to be displayed.
590
+ color (Tuple[int, int, int]): The background color of the circle (B, G, R).
591
+ txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
592
+ margin (int): The margin between the text and the circle border.
593
+ """
594
+ if len(label) > 3:
595
+ LOGGER.warning(f"Length of label is {len(label)}, only first 3 letters will be used for circle annotation.")
596
+ label = label[:3]
597
+
598
+ # Calculate the center of the box
599
+ x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
600
+ # Get the text size
601
+ text_size = cv2.getTextSize(str(label), cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.15, self.tf)[0]
602
+ # Calculate the required radius to fit the text with the margin
603
+ required_radius = int(((text_size[0] ** 2 + text_size[1] ** 2) ** 0.5) / 2) + margin
604
+ # Draw the circle with the required radius
605
+ cv2.circle(self.im, (x_center, y_center), required_radius, color, -1)
606
+ # Calculate the position for the text
607
+ text_x = x_center - text_size[0] // 2
608
+ text_y = y_center + text_size[1] // 2
609
+ # Draw the text
610
+ cv2.putText(
611
+ self.im,
612
+ str(label),
613
+ (text_x, text_y),
614
+ cv2.FONT_HERSHEY_SIMPLEX,
615
+ self.sf - 0.15,
616
+ self.get_txt_color(color, txt_color),
617
+ self.tf,
618
+ lineType=cv2.LINE_AA,
619
+ )
620
+
621
+ def text_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), margin=5):
622
+ """
623
+ Draw a label with a background rectangle centered within a given bounding box.
624
+
625
+ Args:
626
+ box (Tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
627
+ label (str): The text label to be displayed.
628
+ color (Tuple[int, int, int]): The background color of the rectangle (B, G, R).
629
+ txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
630
+ margin (int): The margin between the text and the rectangle border.
631
+ """
632
+ # Calculate the center of the bounding box
633
+ x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
634
+ # Get the size of the text
635
+ text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.1, self.tf)[0]
636
+ # Calculate the top-left corner of the text (to center it)
637
+ text_x = x_center - text_size[0] // 2
638
+ text_y = y_center + text_size[1] // 2
639
+ # Calculate the coordinates of the background rectangle
640
+ rect_x1 = text_x - margin
641
+ rect_y1 = text_y - text_size[1] - margin
642
+ rect_x2 = text_x + text_size[0] + margin
643
+ rect_y2 = text_y + margin
644
+ # Draw the background rectangle
645
+ cv2.rectangle(self.im, (rect_x1, rect_y1), (rect_x2, rect_y2), color, -1)
646
+ # Draw the text on top of the rectangle
647
+ cv2.putText(
648
+ self.im,
649
+ label,
650
+ (text_x, text_y),
651
+ cv2.FONT_HERSHEY_SIMPLEX,
652
+ self.sf - 0.1,
653
+ self.get_txt_color(color, txt_color),
654
+ self.tf,
655
+ lineType=cv2.LINE_AA,
656
+ )
657
+
658
+
659
+ class SolutionResults:
660
+ """
661
+ A class to encapsulate the results of Ultralytics Solutions.
662
+
663
+ This class is designed to store and manage various outputs generated by the solution pipeline, including counts,
664
+ angles, and workout stages.
665
+
666
+ Attributes:
667
+ plot_im (np.ndarray): Processed image with counts, blurred, or other effects from solutions.
668
+ in_count (int): The total number of "in" counts in a video stream.
669
+ out_count (int): The total number of "out" counts in a video stream.
670
+ classwise_count (Dict[str, int]): A dictionary containing counts of objects categorized by class.
671
+ queue_count (int): The count of objects in a queue or waiting area.
672
+ workout_count (int): The count of workout repetitions.
673
+ workout_angle (float): The angle calculated during a workout exercise.
674
+ workout_stage (str): The current stage of the workout.
675
+ pixels_distance (float): The calculated distance in pixels between two points or objects.
676
+ available_slots (int): The number of available slots in a monitored area.
677
+ filled_slots (int): The number of filled slots in a monitored area.
678
+ email_sent (bool): A flag indicating whether an email notification was sent.
679
+ total_tracks (int): The total number of tracked objects.
680
+ region_counts (dict): The count of objects within a specific region.
681
+ speed_dict (Dict[str, float]): A dictionary containing speed information for tracked objects.
682
+ total_crop_objects (int): Total number of cropped objects using ObjectCropper class.
683
+ """
684
+
685
+ def __init__(self, **kwargs):
686
+ """
687
+ Initialize a SolutionResults object with default or user-specified values.
688
+
689
+ Args:
690
+ **kwargs (Any): Optional arguments to override default attribute values.
691
+ """
692
+ self.plot_im = None
693
+ self.in_count = 0
694
+ self.out_count = 0
695
+ self.classwise_count = {}
696
+ self.queue_count = 0
697
+ self.workout_count = 0
698
+ self.workout_angle = 0.0
699
+ self.workout_stage = None
700
+ self.pixels_distance = 0.0
701
+ self.available_slots = 0
702
+ self.filled_slots = 0
703
+ self.email_sent = False
704
+ self.total_tracks = 0
705
+ self.region_counts = {}
706
+ self.speed_dict = {}
707
+ self.total_crop_objects = 0
708
+
709
+ # Override with user-defined values
710
+ self.__dict__.update(kwargs)
711
+
712
+ def __str__(self):
713
+ """
714
+ Return a formatted string representation of the SolutionResults object.
715
+
716
+ Returns:
717
+ (str): A string representation listing non-null attributes.
718
+ """
719
+ attrs = {
720
+ k: v
721
+ for k, v in self.__dict__.items()
722
+ if k != "plot_im" and v not in [None, {}, 0, 0.0, False] # Exclude `plot_im` explicitly
723
+ }
724
+ return f"SolutionResults({', '.join(f'{k}={v}' for k, v in attrs.items())})"