dgenerate-ultralytics-headless 8.3.253__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (299) hide show
  1. dgenerate_ultralytics_headless-8.3.253.dist-info/METADATA +405 -0
  2. dgenerate_ultralytics_headless-8.3.253.dist-info/RECORD +299 -0
  3. dgenerate_ultralytics_headless-8.3.253.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.253.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.253.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.253.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +23 -0
  8. tests/conftest.py +59 -0
  9. tests/test_cli.py +131 -0
  10. tests/test_cuda.py +216 -0
  11. tests/test_engine.py +157 -0
  12. tests/test_exports.py +309 -0
  13. tests/test_integrations.py +151 -0
  14. tests/test_python.py +777 -0
  15. tests/test_solutions.py +371 -0
  16. ultralytics/__init__.py +48 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1028 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +78 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +447 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/TT100K.yaml +346 -0
  29. ultralytics/cfg/datasets/VOC.yaml +102 -0
  30. ultralytics/cfg/datasets/VisDrone.yaml +87 -0
  31. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  32. ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
  33. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  34. ultralytics/cfg/datasets/coco-pose.yaml +64 -0
  35. ultralytics/cfg/datasets/coco.yaml +118 -0
  36. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  37. ultralytics/cfg/datasets/coco128.yaml +101 -0
  38. ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
  39. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  40. ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
  41. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  42. ultralytics/cfg/datasets/coco8.yaml +101 -0
  43. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  44. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  45. ultralytics/cfg/datasets/dog-pose.yaml +52 -0
  46. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  47. ultralytics/cfg/datasets/dota8.yaml +35 -0
  48. ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
  49. ultralytics/cfg/datasets/kitti.yaml +27 -0
  50. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  51. ultralytics/cfg/datasets/medical-pills.yaml +21 -0
  52. ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
  53. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  54. ultralytics/cfg/datasets/signature.yaml +21 -0
  55. ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
  56. ultralytics/cfg/datasets/xView.yaml +155 -0
  57. ultralytics/cfg/default.yaml +130 -0
  58. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  59. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  60. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  61. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  62. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  63. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  64. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  65. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  67. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  68. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  69. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  70. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  71. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  72. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  73. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  74. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  75. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  77. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  78. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  79. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  80. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  81. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  82. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  83. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  84. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  85. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  86. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  87. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
  88. ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
  89. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  90. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  91. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  92. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  93. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  94. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  95. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  96. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  97. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  99. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  100. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  102. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  103. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  104. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  105. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  106. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  109. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  110. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  111. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  112. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  113. ultralytics/cfg/trackers/botsort.yaml +21 -0
  114. ultralytics/cfg/trackers/bytetrack.yaml +12 -0
  115. ultralytics/data/__init__.py +26 -0
  116. ultralytics/data/annotator.py +66 -0
  117. ultralytics/data/augment.py +2801 -0
  118. ultralytics/data/base.py +435 -0
  119. ultralytics/data/build.py +437 -0
  120. ultralytics/data/converter.py +855 -0
  121. ultralytics/data/dataset.py +834 -0
  122. ultralytics/data/loaders.py +704 -0
  123. ultralytics/data/scripts/download_weights.sh +18 -0
  124. ultralytics/data/scripts/get_coco.sh +61 -0
  125. ultralytics/data/scripts/get_coco128.sh +18 -0
  126. ultralytics/data/scripts/get_imagenet.sh +52 -0
  127. ultralytics/data/split.py +138 -0
  128. ultralytics/data/split_dota.py +344 -0
  129. ultralytics/data/utils.py +798 -0
  130. ultralytics/engine/__init__.py +1 -0
  131. ultralytics/engine/exporter.py +1580 -0
  132. ultralytics/engine/model.py +1125 -0
  133. ultralytics/engine/predictor.py +508 -0
  134. ultralytics/engine/results.py +1522 -0
  135. ultralytics/engine/trainer.py +977 -0
  136. ultralytics/engine/tuner.py +449 -0
  137. ultralytics/engine/validator.py +387 -0
  138. ultralytics/hub/__init__.py +166 -0
  139. ultralytics/hub/auth.py +151 -0
  140. ultralytics/hub/google/__init__.py +174 -0
  141. ultralytics/hub/session.py +422 -0
  142. ultralytics/hub/utils.py +162 -0
  143. ultralytics/models/__init__.py +9 -0
  144. ultralytics/models/fastsam/__init__.py +7 -0
  145. ultralytics/models/fastsam/model.py +79 -0
  146. ultralytics/models/fastsam/predict.py +169 -0
  147. ultralytics/models/fastsam/utils.py +23 -0
  148. ultralytics/models/fastsam/val.py +38 -0
  149. ultralytics/models/nas/__init__.py +7 -0
  150. ultralytics/models/nas/model.py +98 -0
  151. ultralytics/models/nas/predict.py +56 -0
  152. ultralytics/models/nas/val.py +38 -0
  153. ultralytics/models/rtdetr/__init__.py +7 -0
  154. ultralytics/models/rtdetr/model.py +63 -0
  155. ultralytics/models/rtdetr/predict.py +88 -0
  156. ultralytics/models/rtdetr/train.py +89 -0
  157. ultralytics/models/rtdetr/val.py +216 -0
  158. ultralytics/models/sam/__init__.py +25 -0
  159. ultralytics/models/sam/amg.py +275 -0
  160. ultralytics/models/sam/build.py +365 -0
  161. ultralytics/models/sam/build_sam3.py +377 -0
  162. ultralytics/models/sam/model.py +169 -0
  163. ultralytics/models/sam/modules/__init__.py +1 -0
  164. ultralytics/models/sam/modules/blocks.py +1067 -0
  165. ultralytics/models/sam/modules/decoders.py +495 -0
  166. ultralytics/models/sam/modules/encoders.py +794 -0
  167. ultralytics/models/sam/modules/memory_attention.py +298 -0
  168. ultralytics/models/sam/modules/sam.py +1160 -0
  169. ultralytics/models/sam/modules/tiny_encoder.py +979 -0
  170. ultralytics/models/sam/modules/transformer.py +344 -0
  171. ultralytics/models/sam/modules/utils.py +512 -0
  172. ultralytics/models/sam/predict.py +3940 -0
  173. ultralytics/models/sam/sam3/__init__.py +3 -0
  174. ultralytics/models/sam/sam3/decoder.py +546 -0
  175. ultralytics/models/sam/sam3/encoder.py +529 -0
  176. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  177. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  178. ultralytics/models/sam/sam3/model_misc.py +199 -0
  179. ultralytics/models/sam/sam3/necks.py +129 -0
  180. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  181. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  182. ultralytics/models/sam/sam3/vitdet.py +547 -0
  183. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  184. ultralytics/models/utils/__init__.py +1 -0
  185. ultralytics/models/utils/loss.py +466 -0
  186. ultralytics/models/utils/ops.py +315 -0
  187. ultralytics/models/yolo/__init__.py +7 -0
  188. ultralytics/models/yolo/classify/__init__.py +7 -0
  189. ultralytics/models/yolo/classify/predict.py +90 -0
  190. ultralytics/models/yolo/classify/train.py +202 -0
  191. ultralytics/models/yolo/classify/val.py +216 -0
  192. ultralytics/models/yolo/detect/__init__.py +7 -0
  193. ultralytics/models/yolo/detect/predict.py +122 -0
  194. ultralytics/models/yolo/detect/train.py +227 -0
  195. ultralytics/models/yolo/detect/val.py +507 -0
  196. ultralytics/models/yolo/model.py +430 -0
  197. ultralytics/models/yolo/obb/__init__.py +7 -0
  198. ultralytics/models/yolo/obb/predict.py +56 -0
  199. ultralytics/models/yolo/obb/train.py +79 -0
  200. ultralytics/models/yolo/obb/val.py +302 -0
  201. ultralytics/models/yolo/pose/__init__.py +7 -0
  202. ultralytics/models/yolo/pose/predict.py +65 -0
  203. ultralytics/models/yolo/pose/train.py +110 -0
  204. ultralytics/models/yolo/pose/val.py +248 -0
  205. ultralytics/models/yolo/segment/__init__.py +7 -0
  206. ultralytics/models/yolo/segment/predict.py +109 -0
  207. ultralytics/models/yolo/segment/train.py +69 -0
  208. ultralytics/models/yolo/segment/val.py +307 -0
  209. ultralytics/models/yolo/world/__init__.py +5 -0
  210. ultralytics/models/yolo/world/train.py +173 -0
  211. ultralytics/models/yolo/world/train_world.py +178 -0
  212. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  213. ultralytics/models/yolo/yoloe/predict.py +162 -0
  214. ultralytics/models/yolo/yoloe/train.py +287 -0
  215. ultralytics/models/yolo/yoloe/train_seg.py +122 -0
  216. ultralytics/models/yolo/yoloe/val.py +206 -0
  217. ultralytics/nn/__init__.py +27 -0
  218. ultralytics/nn/autobackend.py +964 -0
  219. ultralytics/nn/modules/__init__.py +182 -0
  220. ultralytics/nn/modules/activation.py +54 -0
  221. ultralytics/nn/modules/block.py +1947 -0
  222. ultralytics/nn/modules/conv.py +669 -0
  223. ultralytics/nn/modules/head.py +1183 -0
  224. ultralytics/nn/modules/transformer.py +793 -0
  225. ultralytics/nn/modules/utils.py +159 -0
  226. ultralytics/nn/tasks.py +1768 -0
  227. ultralytics/nn/text_model.py +356 -0
  228. ultralytics/py.typed +1 -0
  229. ultralytics/solutions/__init__.py +41 -0
  230. ultralytics/solutions/ai_gym.py +108 -0
  231. ultralytics/solutions/analytics.py +264 -0
  232. ultralytics/solutions/config.py +107 -0
  233. ultralytics/solutions/distance_calculation.py +123 -0
  234. ultralytics/solutions/heatmap.py +125 -0
  235. ultralytics/solutions/instance_segmentation.py +86 -0
  236. ultralytics/solutions/object_blurrer.py +89 -0
  237. ultralytics/solutions/object_counter.py +190 -0
  238. ultralytics/solutions/object_cropper.py +87 -0
  239. ultralytics/solutions/parking_management.py +280 -0
  240. ultralytics/solutions/queue_management.py +93 -0
  241. ultralytics/solutions/region_counter.py +133 -0
  242. ultralytics/solutions/security_alarm.py +151 -0
  243. ultralytics/solutions/similarity_search.py +219 -0
  244. ultralytics/solutions/solutions.py +828 -0
  245. ultralytics/solutions/speed_estimation.py +114 -0
  246. ultralytics/solutions/streamlit_inference.py +260 -0
  247. ultralytics/solutions/templates/similarity-search.html +156 -0
  248. ultralytics/solutions/trackzone.py +88 -0
  249. ultralytics/solutions/vision_eye.py +67 -0
  250. ultralytics/trackers/__init__.py +7 -0
  251. ultralytics/trackers/basetrack.py +115 -0
  252. ultralytics/trackers/bot_sort.py +257 -0
  253. ultralytics/trackers/byte_tracker.py +469 -0
  254. ultralytics/trackers/track.py +116 -0
  255. ultralytics/trackers/utils/__init__.py +1 -0
  256. ultralytics/trackers/utils/gmc.py +339 -0
  257. ultralytics/trackers/utils/kalman_filter.py +482 -0
  258. ultralytics/trackers/utils/matching.py +154 -0
  259. ultralytics/utils/__init__.py +1450 -0
  260. ultralytics/utils/autobatch.py +118 -0
  261. ultralytics/utils/autodevice.py +205 -0
  262. ultralytics/utils/benchmarks.py +728 -0
  263. ultralytics/utils/callbacks/__init__.py +5 -0
  264. ultralytics/utils/callbacks/base.py +233 -0
  265. ultralytics/utils/callbacks/clearml.py +146 -0
  266. ultralytics/utils/callbacks/comet.py +625 -0
  267. ultralytics/utils/callbacks/dvc.py +197 -0
  268. ultralytics/utils/callbacks/hub.py +110 -0
  269. ultralytics/utils/callbacks/mlflow.py +134 -0
  270. ultralytics/utils/callbacks/neptune.py +126 -0
  271. ultralytics/utils/callbacks/platform.py +453 -0
  272. ultralytics/utils/callbacks/raytune.py +42 -0
  273. ultralytics/utils/callbacks/tensorboard.py +123 -0
  274. ultralytics/utils/callbacks/wb.py +188 -0
  275. ultralytics/utils/checks.py +1020 -0
  276. ultralytics/utils/cpu.py +85 -0
  277. ultralytics/utils/dist.py +123 -0
  278. ultralytics/utils/downloads.py +529 -0
  279. ultralytics/utils/errors.py +35 -0
  280. ultralytics/utils/events.py +113 -0
  281. ultralytics/utils/export/__init__.py +7 -0
  282. ultralytics/utils/export/engine.py +237 -0
  283. ultralytics/utils/export/imx.py +325 -0
  284. ultralytics/utils/export/tensorflow.py +231 -0
  285. ultralytics/utils/files.py +219 -0
  286. ultralytics/utils/git.py +137 -0
  287. ultralytics/utils/instance.py +484 -0
  288. ultralytics/utils/logger.py +506 -0
  289. ultralytics/utils/loss.py +849 -0
  290. ultralytics/utils/metrics.py +1563 -0
  291. ultralytics/utils/nms.py +337 -0
  292. ultralytics/utils/ops.py +664 -0
  293. ultralytics/utils/patches.py +201 -0
  294. ultralytics/utils/plotting.py +1047 -0
  295. ultralytics/utils/tal.py +404 -0
  296. ultralytics/utils/torch_utils.py +984 -0
  297. ultralytics/utils/tqdm.py +443 -0
  298. ultralytics/utils/triton.py +112 -0
  299. ultralytics/utils/tuner.py +168 -0
@@ -0,0 +1,264 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ from itertools import cycle
6
+ from typing import Any
7
+
8
+ import cv2
9
+ import numpy as np
10
+
11
+ from ultralytics.solutions.solutions import BaseSolution, SolutionResults # Import a parent class
12
+ from ultralytics.utils import plt_settings
13
+
14
+
15
+ class Analytics(BaseSolution):
16
+ """A class for creating and updating various types of charts for visual analytics.
17
+
18
+ This class extends BaseSolution to provide functionality for generating line, bar, pie, and area charts based on
19
+ object detection and tracking data.
20
+
21
+ Attributes:
22
+ type (str): The type of analytics chart to generate ('line', 'bar', 'pie', or 'area').
23
+ x_label (str): Label for the x-axis.
24
+ y_label (str): Label for the y-axis.
25
+ bg_color (str): Background color of the chart frame.
26
+ fg_color (str): Foreground color of the chart frame.
27
+ title (str): Title of the chart window.
28
+ max_points (int): Maximum number of data points to display on the chart.
29
+ fontsize (int): Font size for text display.
30
+ color_cycle (cycle): Cyclic iterator for chart colors.
31
+ total_counts (int): Total count of detected objects (used for line charts).
32
+ clswise_count (dict[str, int]): Dictionary for class-wise object counts.
33
+ fig (Figure): Matplotlib figure object for the chart.
34
+ ax (Axes): Matplotlib axes object for the chart.
35
+ canvas (FigureCanvasAgg): Canvas for rendering the chart.
36
+ lines (dict): Dictionary to store line objects for area charts.
37
+ color_mapping (dict[str, str]): Dictionary mapping class labels to colors for consistent visualization.
38
+
39
+ Methods:
40
+ process: Process image data and update the chart.
41
+ update_graph: Update the chart with new data points.
42
+
43
+ Examples:
44
+ >>> analytics = Analytics(analytics_type="line")
45
+ >>> frame = cv2.imread("image.jpg")
46
+ >>> results = analytics.process(frame, frame_number=1)
47
+ >>> cv2.imshow("Analytics", results.plot_im)
48
+ """
49
+
50
+ @plt_settings()
51
+ def __init__(self, **kwargs: Any) -> None:
52
+ """Initialize Analytics class with various chart types for visual data representation."""
53
+ super().__init__(**kwargs)
54
+
55
+ import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
56
+ from matplotlib.backends.backend_agg import FigureCanvasAgg
57
+ from matplotlib.figure import Figure
58
+
59
+ self.type = self.CFG["analytics_type"] # Chart type: "line", "pie", "bar", or "area".
60
+ self.x_label = "Classes" if self.type in {"bar", "pie"} else "Frame#"
61
+ self.y_label = "Total Counts"
62
+
63
+ # Predefined data
64
+ self.bg_color = "#F3F3F3" # background color of frame
65
+ self.fg_color = "#111E68" # foreground color of frame
66
+ self.title = "Ultralytics Solutions" # window name
67
+ self.max_points = 45 # maximum points to be drawn on window
68
+ self.fontsize = 25 # text font size for display
69
+ figsize = self.CFG["figsize"] # Output size, e.g. (12.8, 7.2) -> 1280x720.
70
+ self.color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
71
+
72
+ self.total_counts = 0 # Stores total counts for line charts.
73
+ self.clswise_count = {} # dictionary for class-wise counts
74
+ self.update_every = kwargs.get("update_every", 30) # Only update graph every 30 frames by default
75
+ self.last_plot_im = None # Cache of the last rendered chart
76
+
77
+ # Ensure line and area chart
78
+ if self.type in {"line", "area"}:
79
+ self.lines = {}
80
+ self.fig = Figure(facecolor=self.bg_color, figsize=figsize)
81
+ self.canvas = FigureCanvasAgg(self.fig) # Set common axis properties
82
+ self.ax = self.fig.add_subplot(111, facecolor=self.bg_color)
83
+ if self.type == "line":
84
+ (self.line,) = self.ax.plot([], [], color="cyan", linewidth=self.line_width)
85
+ elif self.type in {"bar", "pie"}:
86
+ # Initialize bar or pie plot
87
+ self.fig, self.ax = plt.subplots(figsize=figsize, facecolor=self.bg_color)
88
+ self.canvas = FigureCanvasAgg(self.fig) # Set common axis properties
89
+ self.ax.set_facecolor(self.bg_color)
90
+ self.color_mapping = {}
91
+
92
+ if self.type == "pie": # Ensure pie chart is circular
93
+ self.ax.axis("equal")
94
+
95
+ def process(self, im0: np.ndarray, frame_number: int) -> SolutionResults:
96
+ """Process image data and run object tracking to update analytics charts.
97
+
98
+ Args:
99
+ im0 (np.ndarray): Input image for processing.
100
+ frame_number (int): Video frame number for plotting the data.
101
+
102
+ Returns:
103
+ (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects)
104
+ and 'classwise_count' (dict, per-class object count).
105
+
106
+ Raises:
107
+ ValueError: If an unsupported chart type is specified.
108
+
109
+ Examples:
110
+ >>> analytics = Analytics(analytics_type="line")
111
+ >>> frame = np.zeros((480, 640, 3), dtype=np.uint8)
112
+ >>> results = analytics.process(frame, frame_number=1)
113
+ """
114
+ self.extract_tracks(im0) # Extract tracks
115
+ if self.type == "line":
116
+ for _ in self.boxes:
117
+ self.total_counts += 1
118
+ update_required = frame_number % self.update_every == 0 or self.last_plot_im is None
119
+ if update_required:
120
+ self.last_plot_im = self.update_graph(frame_number=frame_number)
121
+ plot_im = self.last_plot_im
122
+ self.total_counts = 0
123
+ elif self.type in {"pie", "bar", "area"}:
124
+ from collections import Counter
125
+
126
+ self.clswise_count = Counter(self.names[int(cls)] for cls in self.clss)
127
+ update_required = frame_number % self.update_every == 0 or self.last_plot_im is None
128
+ if update_required:
129
+ self.last_plot_im = self.update_graph(
130
+ frame_number=frame_number, count_dict=self.clswise_count, plot=self.type
131
+ )
132
+ plot_im = self.last_plot_im
133
+ else:
134
+ raise ValueError(f"Unsupported analytics_type='{self.type}'. Supported types: line, bar, pie, area.")
135
+
136
+ # Return results for downstream use.
137
+ return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids), classwise_count=self.clswise_count)
138
+
139
+ def update_graph(
140
+ self, frame_number: int, count_dict: dict[str, int] | None = None, plot: str = "line"
141
+ ) -> np.ndarray:
142
+ """Update the graph with new data for single or multiple classes.
143
+
144
+ Args:
145
+ frame_number (int): The current frame number.
146
+ count_dict (dict[str, int], optional): Dictionary with class names as keys and counts as values for multiple
147
+ classes. If None, updates a single line graph.
148
+ plot (str): Type of the plot. Options are 'line', 'bar', 'pie', or 'area'.
149
+
150
+ Returns:
151
+ (np.ndarray): Updated image containing the graph.
152
+
153
+ Examples:
154
+ >>> analytics = Analytics(analytics_type="bar")
155
+ >>> frame_num = 10
156
+ >>> results_dict = {"person": 5, "car": 3}
157
+ >>> updated_image = analytics.update_graph(frame_num, results_dict, plot="bar")
158
+ """
159
+ if count_dict is None:
160
+ # Single line update
161
+ x_data = np.append(self.line.get_xdata(), float(frame_number))
162
+ y_data = np.append(self.line.get_ydata(), float(self.total_counts))
163
+
164
+ if len(x_data) > self.max_points:
165
+ x_data, y_data = x_data[-self.max_points :], y_data[-self.max_points :]
166
+
167
+ self.line.set_data(x_data, y_data)
168
+ self.line.set_label("Counts")
169
+ self.line.set_color("#7b0068") # Pink color
170
+ self.line.set_marker("*")
171
+ self.line.set_markersize(self.line_width * 5)
172
+ else:
173
+ labels = list(count_dict.keys())
174
+ counts = list(count_dict.values())
175
+ if plot == "area":
176
+ color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
177
+ # Multiple lines or area update
178
+ x_data = self.ax.lines[0].get_xdata() if self.ax.lines else np.array([])
179
+ y_data_dict = {key: np.array([]) for key in count_dict.keys()}
180
+ if self.ax.lines:
181
+ for line, key in zip(self.ax.lines, count_dict.keys()):
182
+ y_data_dict[key] = line.get_ydata()
183
+
184
+ x_data = np.append(x_data, float(frame_number))
185
+ max_length = len(x_data)
186
+ for key in count_dict.keys():
187
+ y_data_dict[key] = np.append(y_data_dict[key], float(count_dict[key]))
188
+ if len(y_data_dict[key]) < max_length:
189
+ y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])))
190
+ if len(x_data) > self.max_points:
191
+ x_data = x_data[1:]
192
+ for key in count_dict.keys():
193
+ y_data_dict[key] = y_data_dict[key][1:]
194
+
195
+ self.ax.clear()
196
+ for key, y_data in y_data_dict.items():
197
+ color = next(color_cycle)
198
+ self.ax.fill_between(x_data, y_data, color=color, alpha=0.55)
199
+ self.ax.plot(
200
+ x_data,
201
+ y_data,
202
+ color=color,
203
+ linewidth=self.line_width,
204
+ marker="o",
205
+ markersize=self.line_width * 5,
206
+ label=f"{key} Data Points",
207
+ )
208
+ elif plot == "bar":
209
+ self.ax.clear() # clear bar data
210
+ for label in labels: # Map labels to colors
211
+ if label not in self.color_mapping:
212
+ self.color_mapping[label] = next(self.color_cycle)
213
+ colors = [self.color_mapping[label] for label in labels]
214
+ bars = self.ax.bar(labels, counts, color=colors)
215
+ for bar, count in zip(bars, counts):
216
+ self.ax.text(
217
+ bar.get_x() + bar.get_width() / 2,
218
+ bar.get_height(),
219
+ str(count),
220
+ ha="center",
221
+ va="bottom",
222
+ color=self.fg_color,
223
+ )
224
+ # Create the legend using labels from the bars
225
+ for bar, label in zip(bars, labels):
226
+ bar.set_label(label) # Assign label to each bar
227
+ self.ax.legend(loc="upper left", fontsize=13, facecolor=self.fg_color, edgecolor=self.fg_color)
228
+ elif plot == "pie":
229
+ total = sum(counts)
230
+ percentages = [size / total * 100 for size in counts]
231
+ self.ax.clear()
232
+
233
+ start_angle = 90
234
+ # Create pie chart and create legend labels with percentages
235
+ wedges, _ = self.ax.pie(
236
+ counts, labels=labels, startangle=start_angle, textprops={"color": self.fg_color}, autopct=None
237
+ )
238
+ legend_labels = [f"{label} ({percentage:.1f}%)" for label, percentage in zip(labels, percentages)]
239
+
240
+ # Assign the legend using the wedges and manually created labels
241
+ self.ax.legend(wedges, legend_labels, title="Classes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1))
242
+ self.fig.subplots_adjust(left=0.1, right=0.75) # Adjust layout to fit the legend
243
+
244
+ # Common plot settings
245
+ self.ax.set_facecolor("#f0f0f0") # Set to light gray or any other color you like
246
+ self.ax.grid(True, linestyle="--", linewidth=0.5, alpha=0.5) # Display grid for more data insights
247
+ self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
248
+ self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
249
+ self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
250
+
251
+ # Add and format legend
252
+ legend = self.ax.legend(loc="upper left", fontsize=13, facecolor=self.bg_color, edgecolor=self.bg_color)
253
+ for text in legend.get_texts():
254
+ text.set_color(self.fg_color)
255
+
256
+ # Redraw graph, update view, capture, and display the updated plot
257
+ self.ax.relim()
258
+ self.ax.autoscale_view()
259
+ self.canvas.draw()
260
+ im0 = np.array(self.canvas.renderer.buffer_rgba())
261
+ im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
262
+ self.display_output(im0)
263
+
264
+ return im0 # Return the image
@@ -0,0 +1,107 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Any
7
+
8
+ import cv2
9
+
10
+
11
+ @dataclass
12
+ class SolutionConfig:
13
+ """Manages configuration parameters for Ultralytics Vision AI solutions.
14
+
15
+ The SolutionConfig class serves as a centralized configuration container for all the Ultralytics solution modules:
16
+ https://docs.ultralytics.com/solutions/#solutions. It leverages Python `dataclass` for clear, type-safe, and
17
+ maintainable parameter definitions.
18
+
19
+ Attributes:
20
+ source (str, optional): Path to the input source (video, RTSP, etc.). Only usable with Solutions CLI.
21
+ model (str, optional): Path to the Ultralytics YOLO model to be used for inference.
22
+ classes (list[int], optional): List of class indices to filter detections.
23
+ show_conf (bool): Whether to show confidence scores on the visual output.
24
+ show_labels (bool): Whether to display class labels on visual output.
25
+ region (list[tuple[int, int]], optional): Polygonal region or line for object counting.
26
+ colormap (int, optional): OpenCV colormap constant for visual overlays (e.g., cv2.COLORMAP_JET).
27
+ show_in (bool): Whether to display count number for objects entering the region.
28
+ show_out (bool): Whether to display count number for objects leaving the region.
29
+ up_angle (float): Upper angle threshold used in pose-based workouts monitoring.
30
+ down_angle (int): Lower angle threshold used in pose-based workouts monitoring.
31
+ kpts (list[int]): Keypoint indices to monitor, e.g., for pose analytics.
32
+ analytics_type (str): Type of analytics to perform ("line", "area", "bar", "pie", etc.).
33
+ figsize (tuple[int, int], optional): Size of the matplotlib figure used for analytical plots (width, height).
34
+ blur_ratio (float): Ratio used to blur objects in the video frames (0.0 to 1.0).
35
+ vision_point (tuple[int, int]): Reference point for directional tracking or perspective drawing.
36
+ crop_dir (str): Directory path to save cropped detection images.
37
+ json_file (str): Path to a JSON file containing data for parking areas.
38
+ line_width (int): Width for visual display, e.g. bounding boxes, keypoints, and counts.
39
+ records (int): Number of detection records to send email alerts.
40
+ fps (float): Frame rate (Frames Per Second) for speed estimation calculation.
41
+ max_hist (int): Maximum number of historical points or states stored per tracked object for speed estimation.
42
+ meter_per_pixel (float): Scale for real-world measurement, used in speed or distance calculations.
43
+ max_speed (int): Maximum speed limit (e.g., km/h or mph) used in visual alerts or constraints.
44
+ show (bool): Whether to display the visual output on screen.
45
+ iou (float): Intersection-over-Union threshold for detection filtering.
46
+ conf (float): Confidence threshold for keeping predictions.
47
+ device (str, optional): Device to run inference on (e.g., 'cpu', '0' for CUDA GPU).
48
+ max_det (int): Maximum number of detections allowed per video frame.
49
+ half (bool): Whether to use FP16 precision (requires a supported CUDA device).
50
+ tracker (str): Path to tracking configuration YAML file (e.g., 'botsort.yaml').
51
+ verbose (bool): Enable verbose logging output for debugging or diagnostics.
52
+ data (str): Path to image directory used for similarity search.
53
+
54
+ Methods:
55
+ update: Update the configuration with user-defined keyword arguments and raise error on invalid keys.
56
+
57
+ Examples:
58
+ >>> from ultralytics.solutions.config import SolutionConfig
59
+ >>> cfg = SolutionConfig(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
60
+ >>> cfg.update(show=False, conf=0.3)
61
+ >>> print(cfg.model)
62
+ """
63
+
64
+ source: str | None = None
65
+ model: str | None = None
66
+ classes: list[int] | None = None
67
+ show_conf: bool = True
68
+ show_labels: bool = True
69
+ region: list[tuple[int, int]] | None = None
70
+ colormap: int | None = cv2.COLORMAP_DEEPGREEN
71
+ show_in: bool = True
72
+ show_out: bool = True
73
+ up_angle: float = 145.0
74
+ down_angle: int = 90
75
+ kpts: list[int] = field(default_factory=lambda: [6, 8, 10])
76
+ analytics_type: str = "line"
77
+ figsize: tuple[int, int] | None = (12.8, 7.2)
78
+ blur_ratio: float = 0.5
79
+ vision_point: tuple[int, int] = (20, 20)
80
+ crop_dir: str = "cropped-detections"
81
+ json_file: str = None
82
+ line_width: int = 2
83
+ records: int = 5
84
+ fps: float = 30.0
85
+ max_hist: int = 5
86
+ meter_per_pixel: float = 0.05
87
+ max_speed: int = 120
88
+ show: bool = False
89
+ iou: float = 0.7
90
+ conf: float = 0.25
91
+ device: str | None = None
92
+ max_det: int = 300
93
+ half: bool = False
94
+ tracker: str = "botsort.yaml"
95
+ verbose: bool = True
96
+ data: str = "images"
97
+
98
+ def update(self, **kwargs: Any):
99
+ """Update configuration parameters with new values provided as keyword arguments."""
100
+ for key, value in kwargs.items():
101
+ if hasattr(self, key):
102
+ setattr(self, key, value)
103
+ else:
104
+ url = "https://docs.ultralytics.com/solutions/#solutions-arguments"
105
+ raise ValueError(f"{key} is not a valid solution argument, see {url}")
106
+
107
+ return self
@@ -0,0 +1,123 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import math
4
+ from typing import Any
5
+
6
+ import cv2
7
+
8
+ from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
9
+ from ultralytics.utils.plotting import colors
10
+
11
+
12
+ class DistanceCalculation(BaseSolution):
13
+ """A class to calculate distance between two objects in a real-time video stream based on their tracks.
14
+
15
+ This class extends BaseSolution to provide functionality for selecting objects and calculating the distance between
16
+ them in a video stream using YOLO object detection and tracking.
17
+
18
+ Attributes:
19
+ left_mouse_count (int): Counter for left mouse button clicks.
20
+ selected_boxes (dict[int, Any]): Dictionary to store selected bounding boxes keyed by track ID.
21
+ centroids (list[list[int]]): List to store centroids of selected bounding boxes.
22
+
23
+ Methods:
24
+ mouse_event_for_distance: Handle mouse events for selecting objects in the video stream.
25
+ process: Process video frames and calculate the distance between selected objects.
26
+
27
+ Examples:
28
+ >>> distance_calc = DistanceCalculation()
29
+ >>> frame = cv2.imread("frame.jpg")
30
+ >>> results = distance_calc.process(frame)
31
+ >>> cv2.imshow("Distance Calculation", results.plot_im)
32
+ >>> cv2.waitKey(0)
33
+ """
34
+
35
+ def __init__(self, **kwargs: Any) -> None:
36
+ """Initialize the DistanceCalculation class for measuring object distances in video streams."""
37
+ super().__init__(**kwargs)
38
+
39
+ # Mouse event information
40
+ self.left_mouse_count = 0
41
+ self.selected_boxes: dict[int, list[float]] = {}
42
+ self.centroids: list[list[int]] = [] # Store centroids of selected objects
43
+
44
+ def mouse_event_for_distance(self, event: int, x: int, y: int, flags: int, param: Any) -> None:
45
+ """Handle mouse events to select regions in a real-time video stream for distance calculation.
46
+
47
+ Args:
48
+ event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN).
49
+ x (int): X-coordinate of the mouse pointer.
50
+ y (int): Y-coordinate of the mouse pointer.
51
+ flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY).
52
+ param (Any): Additional parameters passed to the function.
53
+
54
+ Examples:
55
+ >>> # Assuming 'dc' is an instance of DistanceCalculation
56
+ >>> cv2.setMouseCallback("window_name", dc.mouse_event_for_distance)
57
+ """
58
+ if event == cv2.EVENT_LBUTTONDOWN:
59
+ self.left_mouse_count += 1
60
+ if self.left_mouse_count <= 2:
61
+ for box, track_id in zip(self.boxes, self.track_ids):
62
+ if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
63
+ self.selected_boxes[track_id] = box
64
+
65
+ elif event == cv2.EVENT_RBUTTONDOWN:
66
+ self.selected_boxes = {}
67
+ self.left_mouse_count = 0
68
+
69
+ def process(self, im0) -> SolutionResults:
70
+ """Process a video frame and calculate the distance between two selected bounding boxes.
71
+
72
+ This method extracts tracks from the input frame, annotates bounding boxes, and calculates the distance between
73
+ two user-selected objects if they have been chosen.
74
+
75
+ Args:
76
+ im0 (np.ndarray): The input image frame to process.
77
+
78
+ Returns:
79
+ (SolutionResults): Contains processed image `plot_im`, `total_tracks` (int) representing the total number of
80
+ tracked objects, and `pixels_distance` (float) representing the distance between selected objects
81
+ in pixels.
82
+
83
+ Examples:
84
+ >>> import numpy as np
85
+ >>> from ultralytics.solutions import DistanceCalculation
86
+ >>> dc = DistanceCalculation()
87
+ >>> frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
88
+ >>> results = dc.process(frame)
89
+ >>> print(f"Distance: {results.pixels_distance:.2f} pixels")
90
+ """
91
+ self.extract_tracks(im0) # Extract tracks
92
+ annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
93
+
94
+ pixels_distance = 0
95
+ # Iterate over bounding boxes, track ids and classes index
96
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
97
+ annotator.box_label(box, color=colors(int(cls), True), label=self.adjust_box_label(cls, conf, track_id))
98
+
99
+ # Update selected boxes if they're being tracked
100
+ if len(self.selected_boxes) == 2:
101
+ for trk_id in self.selected_boxes.keys():
102
+ if trk_id == track_id:
103
+ self.selected_boxes[track_id] = box
104
+
105
+ if len(self.selected_boxes) == 2:
106
+ # Calculate centroids of selected boxes
107
+ self.centroids.extend(
108
+ [[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
109
+ )
110
+ # Calculate Euclidean distance between centroids
111
+ pixels_distance = math.sqrt(
112
+ (self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
113
+ )
114
+ annotator.plot_distance_and_line(pixels_distance, self.centroids)
115
+
116
+ self.centroids = [] # Reset centroids for next frame
117
+ plot_im = annotator.result()
118
+ self.display_output(plot_im) # Display output with base class function
119
+ if self.CFG.get("show") and self.env_check:
120
+ cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
121
+
122
+ # Return SolutionResults with processed image and calculated metrics
123
+ return SolutionResults(plot_im=plot_im, pixels_distance=pixels_distance, total_tracks=len(self.track_ids))
@@ -0,0 +1,125 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ import cv2
8
+ import numpy as np
9
+
10
+ from ultralytics.solutions.object_counter import ObjectCounter
11
+ from ultralytics.solutions.solutions import SolutionAnnotator, SolutionResults
12
+
13
+
14
+ class Heatmap(ObjectCounter):
15
+ """A class to draw heatmaps in real-time video streams based on object tracks.
16
+
17
+ This class extends the ObjectCounter class to generate and visualize heatmaps of object movements in video
18
+ streams. It uses tracked object positions to create a cumulative heatmap effect over time.
19
+
20
+ Attributes:
21
+ initialized (bool): Flag indicating whether the heatmap has been initialized.
22
+ colormap (int): OpenCV colormap used for heatmap visualization.
23
+ heatmap (np.ndarray): Array storing the cumulative heatmap data.
24
+ annotator (SolutionAnnotator): Object for drawing annotations on the image.
25
+
26
+ Methods:
27
+ heatmap_effect: Calculate and update the heatmap effect for a given bounding box.
28
+ process: Generate and apply the heatmap effect to each frame.
29
+
30
+ Examples:
31
+ >>> from ultralytics.solutions import Heatmap
32
+ >>> heatmap = Heatmap(model="yolo11n.pt", colormap=cv2.COLORMAP_JET)
33
+ >>> frame = cv2.imread("frame.jpg")
34
+ >>> processed_frame = heatmap.process(frame)
35
+ """
36
+
37
+ def __init__(self, **kwargs: Any) -> None:
38
+ """Initialize the Heatmap class for real-time video stream heatmap generation based on object tracks.
39
+
40
+ Args:
41
+ **kwargs (Any): Keyword arguments passed to the parent ObjectCounter class.
42
+ """
43
+ super().__init__(**kwargs)
44
+
45
+ self.initialized = False # Flag for heatmap initialization
46
+ if self.region is not None: # Check if user provided the region coordinates
47
+ self.initialize_region()
48
+
49
+ # Store colormap
50
+ self.colormap = self.CFG["colormap"]
51
+ self.heatmap = None
52
+
53
+ def heatmap_effect(self, box: list[float]) -> None:
54
+ """Efficiently calculate heatmap area and effect location for applying colormap.
55
+
56
+ Args:
57
+ box (list[float]): Bounding box coordinates [x0, y0, x1, y1].
58
+ """
59
+ x0, y0, x1, y1 = map(int, box)
60
+ radius_squared = (min(x1 - x0, y1 - y0) // 2) ** 2
61
+
62
+ # Create a meshgrid with region of interest (ROI) for vectorized distance calculations
63
+ xv, yv = np.meshgrid(np.arange(x0, x1), np.arange(y0, y1))
64
+
65
+ # Calculate squared distances from the center
66
+ dist_squared = (xv - ((x0 + x1) // 2)) ** 2 + (yv - ((y0 + y1) // 2)) ** 2
67
+
68
+ # Create a mask of points within the radius
69
+ within_radius = dist_squared <= radius_squared
70
+
71
+ # Update only the values within the bounding box in a single vectorized operation
72
+ self.heatmap[y0:y1, x0:x1][within_radius] += 2
73
+
74
+ def process(self, im0: np.ndarray) -> SolutionResults:
75
+ """Generate heatmap for each frame using Ultralytics tracking.
76
+
77
+ Args:
78
+ im0 (np.ndarray): Input image array for processing.
79
+
80
+ Returns:
81
+ (SolutionResults): Contains processed image `plot_im`, 'in_count' (int, count of objects entering the
82
+ region), 'out_count' (int, count of objects exiting the region), 'classwise_count' (dict, per-class
83
+ object count), and 'total_tracks' (int, total number of tracked objects).
84
+ """
85
+ if not self.initialized:
86
+ self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
87
+ self.initialized = True # Initialize heatmap only once
88
+
89
+ self.extract_tracks(im0) # Extract tracks
90
+ self.annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
91
+
92
+ # Iterate over bounding boxes, track ids and classes index
93
+ for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
94
+ # Apply heatmap effect for the bounding box
95
+ self.heatmap_effect(box)
96
+
97
+ if self.region is not None:
98
+ self.annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
99
+ self.store_tracking_history(track_id, box) # Store track history
100
+ # Get previous position if available
101
+ prev_position = None
102
+ if len(self.track_history[track_id]) > 1:
103
+ prev_position = self.track_history[track_id][-2]
104
+ self.count_objects(self.track_history[track_id][-1], track_id, prev_position, cls) # object counting
105
+
106
+ plot_im = self.annotator.result()
107
+ if self.region is not None:
108
+ self.display_counts(plot_im) # Display the counts on the frame
109
+
110
+ # Normalize, apply colormap to heatmap and combine with original image
111
+ if self.track_data.is_track:
112
+ normalized_heatmap = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
113
+ colored_heatmap = cv2.applyColorMap(normalized_heatmap, self.colormap)
114
+ plot_im = cv2.addWeighted(plot_im, 0.5, colored_heatmap, 0.5, 0)
115
+
116
+ self.display_output(plot_im) # Display output with base class function
117
+
118
+ # Return SolutionResults
119
+ return SolutionResults(
120
+ plot_im=plot_im,
121
+ in_count=self.in_count,
122
+ out_count=self.out_count,
123
+ classwise_count=dict(self.classwise_count),
124
+ total_tracks=len(self.track_ids),
125
+ )