dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
- tests/__init__.py +22 -0
- tests/conftest.py +83 -0
- tests/test_cli.py +138 -0
- tests/test_cuda.py +215 -0
- tests/test_engine.py +131 -0
- tests/test_exports.py +236 -0
- tests/test_integrations.py +154 -0
- tests/test_python.py +694 -0
- tests/test_solutions.py +187 -0
- ultralytics/__init__.py +30 -0
- ultralytics/assets/bus.jpg +0 -0
- ultralytics/assets/zidane.jpg +0 -0
- ultralytics/cfg/__init__.py +1023 -0
- ultralytics/cfg/datasets/Argoverse.yaml +77 -0
- ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
- ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
- ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
- ultralytics/cfg/datasets/Objects365.yaml +443 -0
- ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
- ultralytics/cfg/datasets/VOC.yaml +106 -0
- ultralytics/cfg/datasets/VisDrone.yaml +77 -0
- ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
- ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
- ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
- ultralytics/cfg/datasets/coco-pose.yaml +42 -0
- ultralytics/cfg/datasets/coco.yaml +118 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco128.yaml +101 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
- ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
- ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco8.yaml +101 -0
- ultralytics/cfg/datasets/crack-seg.yaml +22 -0
- ultralytics/cfg/datasets/dog-pose.yaml +24 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
- ultralytics/cfg/datasets/dota8.yaml +35 -0
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
- ultralytics/cfg/datasets/lvis.yaml +1240 -0
- ultralytics/cfg/datasets/medical-pills.yaml +22 -0
- ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
- ultralytics/cfg/datasets/package-seg.yaml +22 -0
- ultralytics/cfg/datasets/signature.yaml +21 -0
- ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
- ultralytics/cfg/datasets/xView.yaml +155 -0
- ultralytics/cfg/default.yaml +127 -0
- ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
- ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
- ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
- ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
- ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
- ultralytics/cfg/models/11/yolo11.yaml +50 -0
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
- ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
- ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
- ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
- ultralytics/cfg/models/12/yolo12.yaml +48 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
- ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
- ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
- ultralytics/cfg/models/v3/yolov3.yaml +49 -0
- ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
- ultralytics/cfg/models/v5/yolov5.yaml +51 -0
- ultralytics/cfg/models/v6/yolov6.yaml +56 -0
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
- ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
- ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8.yaml +49 -0
- ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
- ultralytics/cfg/trackers/botsort.yaml +22 -0
- ultralytics/cfg/trackers/bytetrack.yaml +14 -0
- ultralytics/data/__init__.py +26 -0
- ultralytics/data/annotator.py +66 -0
- ultralytics/data/augment.py +2945 -0
- ultralytics/data/base.py +438 -0
- ultralytics/data/build.py +258 -0
- ultralytics/data/converter.py +754 -0
- ultralytics/data/dataset.py +834 -0
- ultralytics/data/loaders.py +676 -0
- ultralytics/data/scripts/download_weights.sh +18 -0
- ultralytics/data/scripts/get_coco.sh +61 -0
- ultralytics/data/scripts/get_coco128.sh +18 -0
- ultralytics/data/scripts/get_imagenet.sh +52 -0
- ultralytics/data/split.py +125 -0
- ultralytics/data/split_dota.py +325 -0
- ultralytics/data/utils.py +777 -0
- ultralytics/engine/__init__.py +1 -0
- ultralytics/engine/exporter.py +1519 -0
- ultralytics/engine/model.py +1156 -0
- ultralytics/engine/predictor.py +502 -0
- ultralytics/engine/results.py +1840 -0
- ultralytics/engine/trainer.py +853 -0
- ultralytics/engine/tuner.py +243 -0
- ultralytics/engine/validator.py +377 -0
- ultralytics/hub/__init__.py +168 -0
- ultralytics/hub/auth.py +137 -0
- ultralytics/hub/google/__init__.py +176 -0
- ultralytics/hub/session.py +446 -0
- ultralytics/hub/utils.py +248 -0
- ultralytics/models/__init__.py +9 -0
- ultralytics/models/fastsam/__init__.py +7 -0
- ultralytics/models/fastsam/model.py +61 -0
- ultralytics/models/fastsam/predict.py +181 -0
- ultralytics/models/fastsam/utils.py +24 -0
- ultralytics/models/fastsam/val.py +40 -0
- ultralytics/models/nas/__init__.py +7 -0
- ultralytics/models/nas/model.py +102 -0
- ultralytics/models/nas/predict.py +58 -0
- ultralytics/models/nas/val.py +39 -0
- ultralytics/models/rtdetr/__init__.py +7 -0
- ultralytics/models/rtdetr/model.py +63 -0
- ultralytics/models/rtdetr/predict.py +84 -0
- ultralytics/models/rtdetr/train.py +85 -0
- ultralytics/models/rtdetr/val.py +191 -0
- ultralytics/models/sam/__init__.py +6 -0
- ultralytics/models/sam/amg.py +260 -0
- ultralytics/models/sam/build.py +358 -0
- ultralytics/models/sam/model.py +170 -0
- ultralytics/models/sam/modules/__init__.py +1 -0
- ultralytics/models/sam/modules/blocks.py +1129 -0
- ultralytics/models/sam/modules/decoders.py +515 -0
- ultralytics/models/sam/modules/encoders.py +854 -0
- ultralytics/models/sam/modules/memory_attention.py +299 -0
- ultralytics/models/sam/modules/sam.py +1006 -0
- ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
- ultralytics/models/sam/modules/transformer.py +351 -0
- ultralytics/models/sam/modules/utils.py +394 -0
- ultralytics/models/sam/predict.py +1605 -0
- ultralytics/models/utils/__init__.py +1 -0
- ultralytics/models/utils/loss.py +455 -0
- ultralytics/models/utils/ops.py +268 -0
- ultralytics/models/yolo/__init__.py +7 -0
- ultralytics/models/yolo/classify/__init__.py +7 -0
- ultralytics/models/yolo/classify/predict.py +88 -0
- ultralytics/models/yolo/classify/train.py +233 -0
- ultralytics/models/yolo/classify/val.py +215 -0
- ultralytics/models/yolo/detect/__init__.py +7 -0
- ultralytics/models/yolo/detect/predict.py +124 -0
- ultralytics/models/yolo/detect/train.py +217 -0
- ultralytics/models/yolo/detect/val.py +451 -0
- ultralytics/models/yolo/model.py +354 -0
- ultralytics/models/yolo/obb/__init__.py +7 -0
- ultralytics/models/yolo/obb/predict.py +66 -0
- ultralytics/models/yolo/obb/train.py +81 -0
- ultralytics/models/yolo/obb/val.py +283 -0
- ultralytics/models/yolo/pose/__init__.py +7 -0
- ultralytics/models/yolo/pose/predict.py +79 -0
- ultralytics/models/yolo/pose/train.py +154 -0
- ultralytics/models/yolo/pose/val.py +394 -0
- ultralytics/models/yolo/segment/__init__.py +7 -0
- ultralytics/models/yolo/segment/predict.py +113 -0
- ultralytics/models/yolo/segment/train.py +123 -0
- ultralytics/models/yolo/segment/val.py +428 -0
- ultralytics/models/yolo/world/__init__.py +5 -0
- ultralytics/models/yolo/world/train.py +119 -0
- ultralytics/models/yolo/world/train_world.py +176 -0
- ultralytics/models/yolo/yoloe/__init__.py +22 -0
- ultralytics/models/yolo/yoloe/predict.py +169 -0
- ultralytics/models/yolo/yoloe/train.py +298 -0
- ultralytics/models/yolo/yoloe/train_seg.py +124 -0
- ultralytics/models/yolo/yoloe/val.py +191 -0
- ultralytics/nn/__init__.py +29 -0
- ultralytics/nn/autobackend.py +842 -0
- ultralytics/nn/modules/__init__.py +182 -0
- ultralytics/nn/modules/activation.py +53 -0
- ultralytics/nn/modules/block.py +1966 -0
- ultralytics/nn/modules/conv.py +712 -0
- ultralytics/nn/modules/head.py +880 -0
- ultralytics/nn/modules/transformer.py +713 -0
- ultralytics/nn/modules/utils.py +164 -0
- ultralytics/nn/tasks.py +1627 -0
- ultralytics/nn/text_model.py +351 -0
- ultralytics/solutions/__init__.py +41 -0
- ultralytics/solutions/ai_gym.py +116 -0
- ultralytics/solutions/analytics.py +252 -0
- ultralytics/solutions/config.py +106 -0
- ultralytics/solutions/distance_calculation.py +124 -0
- ultralytics/solutions/heatmap.py +127 -0
- ultralytics/solutions/instance_segmentation.py +84 -0
- ultralytics/solutions/object_blurrer.py +90 -0
- ultralytics/solutions/object_counter.py +195 -0
- ultralytics/solutions/object_cropper.py +84 -0
- ultralytics/solutions/parking_management.py +273 -0
- ultralytics/solutions/queue_management.py +93 -0
- ultralytics/solutions/region_counter.py +120 -0
- ultralytics/solutions/security_alarm.py +154 -0
- ultralytics/solutions/similarity_search.py +172 -0
- ultralytics/solutions/solutions.py +724 -0
- ultralytics/solutions/speed_estimation.py +110 -0
- ultralytics/solutions/streamlit_inference.py +196 -0
- ultralytics/solutions/templates/similarity-search.html +160 -0
- ultralytics/solutions/trackzone.py +88 -0
- ultralytics/solutions/vision_eye.py +68 -0
- ultralytics/trackers/__init__.py +7 -0
- ultralytics/trackers/basetrack.py +124 -0
- ultralytics/trackers/bot_sort.py +260 -0
- ultralytics/trackers/byte_tracker.py +480 -0
- ultralytics/trackers/track.py +125 -0
- ultralytics/trackers/utils/__init__.py +1 -0
- ultralytics/trackers/utils/gmc.py +376 -0
- ultralytics/trackers/utils/kalman_filter.py +493 -0
- ultralytics/trackers/utils/matching.py +157 -0
- ultralytics/utils/__init__.py +1435 -0
- ultralytics/utils/autobatch.py +106 -0
- ultralytics/utils/autodevice.py +174 -0
- ultralytics/utils/benchmarks.py +695 -0
- ultralytics/utils/callbacks/__init__.py +5 -0
- ultralytics/utils/callbacks/base.py +234 -0
- ultralytics/utils/callbacks/clearml.py +153 -0
- ultralytics/utils/callbacks/comet.py +552 -0
- ultralytics/utils/callbacks/dvc.py +205 -0
- ultralytics/utils/callbacks/hub.py +108 -0
- ultralytics/utils/callbacks/mlflow.py +138 -0
- ultralytics/utils/callbacks/neptune.py +140 -0
- ultralytics/utils/callbacks/raytune.py +43 -0
- ultralytics/utils/callbacks/tensorboard.py +132 -0
- ultralytics/utils/callbacks/wb.py +185 -0
- ultralytics/utils/checks.py +897 -0
- ultralytics/utils/dist.py +119 -0
- ultralytics/utils/downloads.py +499 -0
- ultralytics/utils/errors.py +43 -0
- ultralytics/utils/export.py +219 -0
- ultralytics/utils/files.py +221 -0
- ultralytics/utils/instance.py +499 -0
- ultralytics/utils/loss.py +813 -0
- ultralytics/utils/metrics.py +1356 -0
- ultralytics/utils/ops.py +885 -0
- ultralytics/utils/patches.py +143 -0
- ultralytics/utils/plotting.py +1011 -0
- ultralytics/utils/tal.py +416 -0
- ultralytics/utils/torch_utils.py +990 -0
- ultralytics/utils/triton.py +116 -0
- ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,252 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from itertools import cycle
|
4
|
+
|
5
|
+
import cv2
|
6
|
+
import numpy as np
|
7
|
+
|
8
|
+
from ultralytics.solutions.solutions import BaseSolution, SolutionResults # Import a parent class
|
9
|
+
|
10
|
+
|
11
|
+
class Analytics(BaseSolution):
|
12
|
+
"""
|
13
|
+
A class for creating and updating various types of charts for visual analytics.
|
14
|
+
|
15
|
+
This class extends BaseSolution to provide functionality for generating line, bar, pie, and area charts
|
16
|
+
based on object detection and tracking data.
|
17
|
+
|
18
|
+
Attributes:
|
19
|
+
type (str): The type of analytics chart to generate ('line', 'bar', 'pie', or 'area').
|
20
|
+
x_label (str): Label for the x-axis.
|
21
|
+
y_label (str): Label for the y-axis.
|
22
|
+
bg_color (str): Background color of the chart frame.
|
23
|
+
fg_color (str): Foreground color of the chart frame.
|
24
|
+
title (str): Title of the chart window.
|
25
|
+
max_points (int): Maximum number of data points to display on the chart.
|
26
|
+
fontsize (int): Font size for text display.
|
27
|
+
color_cycle (cycle): Cyclic iterator for chart colors.
|
28
|
+
total_counts (int): Total count of detected objects (used for line charts).
|
29
|
+
clswise_count (Dict[str, int]): Dictionary for class-wise object counts.
|
30
|
+
fig (Figure): Matplotlib figure object for the chart.
|
31
|
+
ax (Axes): Matplotlib axes object for the chart.
|
32
|
+
canvas (FigureCanvasAgg): Canvas for rendering the chart.
|
33
|
+
lines (dict): Dictionary to store line objects for area charts.
|
34
|
+
color_mapping (Dict[str, str]): Dictionary mapping class labels to colors for consistent visualization.
|
35
|
+
|
36
|
+
Methods:
|
37
|
+
process: Process image data and update the chart.
|
38
|
+
update_graph: Update the chart with new data points.
|
39
|
+
|
40
|
+
Examples:
|
41
|
+
>>> analytics = Analytics(analytics_type="line")
|
42
|
+
>>> frame = cv2.imread("image.jpg")
|
43
|
+
>>> results = analytics.process(frame, frame_number=1)
|
44
|
+
>>> cv2.imshow("Analytics", results.plot_im)
|
45
|
+
"""
|
46
|
+
|
47
|
+
def __init__(self, **kwargs):
|
48
|
+
"""Initialize Analytics class with various chart types for visual data representation."""
|
49
|
+
super().__init__(**kwargs)
|
50
|
+
|
51
|
+
import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
|
52
|
+
from matplotlib.backends.backend_agg import FigureCanvasAgg
|
53
|
+
from matplotlib.figure import Figure
|
54
|
+
|
55
|
+
self.type = self.CFG["analytics_type"] # type of analytics i.e "line", "pie", "bar" or "area" charts.
|
56
|
+
self.x_label = "Classes" if self.type in {"bar", "pie"} else "Frame#"
|
57
|
+
self.y_label = "Total Counts"
|
58
|
+
|
59
|
+
# Predefined data
|
60
|
+
self.bg_color = "#F3F3F3" # background color of frame
|
61
|
+
self.fg_color = "#111E68" # foreground color of frame
|
62
|
+
self.title = "Ultralytics Solutions" # window name
|
63
|
+
self.max_points = 45 # maximum points to be drawn on window
|
64
|
+
self.fontsize = 25 # text font size for display
|
65
|
+
figsize = self.CFG["figsize"] # set output image size i.e (12.8, 7.2) -> w = 1280, h = 720
|
66
|
+
self.color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
|
67
|
+
|
68
|
+
self.total_counts = 0 # count variable for storing total counts i.e. for line
|
69
|
+
self.clswise_count = {} # dictionary for class-wise counts
|
70
|
+
|
71
|
+
# Ensure line and area chart
|
72
|
+
if self.type in {"line", "area"}:
|
73
|
+
self.lines = {}
|
74
|
+
self.fig = Figure(facecolor=self.bg_color, figsize=figsize)
|
75
|
+
self.canvas = FigureCanvasAgg(self.fig) # Set common axis properties
|
76
|
+
self.ax = self.fig.add_subplot(111, facecolor=self.bg_color)
|
77
|
+
if self.type == "line":
|
78
|
+
(self.line,) = self.ax.plot([], [], color="cyan", linewidth=self.line_width)
|
79
|
+
elif self.type in {"bar", "pie"}:
|
80
|
+
# Initialize bar or pie plot
|
81
|
+
self.fig, self.ax = plt.subplots(figsize=figsize, facecolor=self.bg_color)
|
82
|
+
self.canvas = FigureCanvasAgg(self.fig) # Set common axis properties
|
83
|
+
self.ax.set_facecolor(self.bg_color)
|
84
|
+
self.color_mapping = {}
|
85
|
+
|
86
|
+
if self.type == "pie": # Ensure pie chart is circular
|
87
|
+
self.ax.axis("equal")
|
88
|
+
|
89
|
+
def process(self, im0, frame_number):
|
90
|
+
"""
|
91
|
+
Process image data and run object tracking to update analytics charts.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
im0 (np.ndarray): Input image for processing.
|
95
|
+
frame_number (int): Video frame number for plotting the data.
|
96
|
+
|
97
|
+
Returns:
|
98
|
+
(SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects)
|
99
|
+
and 'classwise_count' (dict, per-class object count).
|
100
|
+
|
101
|
+
Raises:
|
102
|
+
ModuleNotFoundError: If an unsupported chart type is specified.
|
103
|
+
|
104
|
+
Examples:
|
105
|
+
>>> analytics = Analytics(analytics_type="line")
|
106
|
+
>>> frame = np.zeros((480, 640, 3), dtype=np.uint8)
|
107
|
+
>>> results = analytics.process(frame, frame_number=1)
|
108
|
+
"""
|
109
|
+
self.extract_tracks(im0) # Extract tracks
|
110
|
+
if self.type == "line":
|
111
|
+
for _ in self.boxes:
|
112
|
+
self.total_counts += 1
|
113
|
+
plot_im = self.update_graph(frame_number=frame_number)
|
114
|
+
self.total_counts = 0
|
115
|
+
elif self.type in {"pie", "bar", "area"}:
|
116
|
+
self.clswise_count = {}
|
117
|
+
for cls in self.clss:
|
118
|
+
if self.names[int(cls)] in self.clswise_count:
|
119
|
+
self.clswise_count[self.names[int(cls)]] += 1
|
120
|
+
else:
|
121
|
+
self.clswise_count[self.names[int(cls)]] = 1
|
122
|
+
plot_im = self.update_graph(frame_number=frame_number, count_dict=self.clswise_count, plot=self.type)
|
123
|
+
else:
|
124
|
+
raise ModuleNotFoundError(f"{self.type} chart is not supported ❌")
|
125
|
+
|
126
|
+
# return output dictionary with summary for more usage
|
127
|
+
return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids), classwise_count=self.clswise_count)
|
128
|
+
|
129
|
+
def update_graph(self, frame_number, count_dict=None, plot="line"):
|
130
|
+
"""
|
131
|
+
Update the graph with new data for single or multiple classes.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
frame_number (int): The current frame number.
|
135
|
+
count_dict (Dict[str, int] | None): Dictionary with class names as keys and counts as values for multiple
|
136
|
+
classes. If None, updates a single line graph.
|
137
|
+
plot (str): Type of the plot. Options are 'line', 'bar', 'pie', or 'area'.
|
138
|
+
|
139
|
+
Returns:
|
140
|
+
(np.ndarray): Updated image containing the graph.
|
141
|
+
|
142
|
+
Examples:
|
143
|
+
>>> analytics = Analytics(analytics_type="bar")
|
144
|
+
>>> frame_num = 10
|
145
|
+
>>> results_dict = {"person": 5, "car": 3}
|
146
|
+
>>> updated_image = analytics.update_graph(frame_num, results_dict, plot="bar")
|
147
|
+
"""
|
148
|
+
if count_dict is None:
|
149
|
+
# Single line update
|
150
|
+
x_data = np.append(self.line.get_xdata(), float(frame_number))
|
151
|
+
y_data = np.append(self.line.get_ydata(), float(self.total_counts))
|
152
|
+
|
153
|
+
if len(x_data) > self.max_points:
|
154
|
+
x_data, y_data = x_data[-self.max_points :], y_data[-self.max_points :]
|
155
|
+
|
156
|
+
self.line.set_data(x_data, y_data)
|
157
|
+
self.line.set_label("Counts")
|
158
|
+
self.line.set_color("#7b0068") # Pink color
|
159
|
+
self.line.set_marker("*")
|
160
|
+
self.line.set_markersize(self.line_width * 5)
|
161
|
+
else:
|
162
|
+
labels = list(count_dict.keys())
|
163
|
+
counts = list(count_dict.values())
|
164
|
+
if plot == "area":
|
165
|
+
color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
|
166
|
+
# Multiple lines or area update
|
167
|
+
x_data = self.ax.lines[0].get_xdata() if self.ax.lines else np.array([])
|
168
|
+
y_data_dict = {key: np.array([]) for key in count_dict.keys()}
|
169
|
+
if self.ax.lines:
|
170
|
+
for line, key in zip(self.ax.lines, count_dict.keys()):
|
171
|
+
y_data_dict[key] = line.get_ydata()
|
172
|
+
|
173
|
+
x_data = np.append(x_data, float(frame_number))
|
174
|
+
max_length = len(x_data)
|
175
|
+
for key in count_dict.keys():
|
176
|
+
y_data_dict[key] = np.append(y_data_dict[key], float(count_dict[key]))
|
177
|
+
if len(y_data_dict[key]) < max_length:
|
178
|
+
y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])))
|
179
|
+
if len(x_data) > self.max_points:
|
180
|
+
x_data = x_data[1:]
|
181
|
+
for key in count_dict.keys():
|
182
|
+
y_data_dict[key] = y_data_dict[key][1:]
|
183
|
+
|
184
|
+
self.ax.clear()
|
185
|
+
for key, y_data in y_data_dict.items():
|
186
|
+
color = next(color_cycle)
|
187
|
+
self.ax.fill_between(x_data, y_data, color=color, alpha=0.7)
|
188
|
+
self.ax.plot(
|
189
|
+
x_data,
|
190
|
+
y_data,
|
191
|
+
color=color,
|
192
|
+
linewidth=self.line_width,
|
193
|
+
marker="o",
|
194
|
+
markersize=self.line_width * 5,
|
195
|
+
label=f"{key} Data Points",
|
196
|
+
)
|
197
|
+
if plot == "bar":
|
198
|
+
self.ax.clear() # clear bar data
|
199
|
+
for label in labels: # Map labels to colors
|
200
|
+
if label not in self.color_mapping:
|
201
|
+
self.color_mapping[label] = next(self.color_cycle)
|
202
|
+
colors = [self.color_mapping[label] for label in labels]
|
203
|
+
bars = self.ax.bar(labels, counts, color=colors)
|
204
|
+
for bar, count in zip(bars, counts):
|
205
|
+
self.ax.text(
|
206
|
+
bar.get_x() + bar.get_width() / 2,
|
207
|
+
bar.get_height(),
|
208
|
+
str(count),
|
209
|
+
ha="center",
|
210
|
+
va="bottom",
|
211
|
+
color=self.fg_color,
|
212
|
+
)
|
213
|
+
# Create the legend using labels from the bars
|
214
|
+
for bar, label in zip(bars, labels):
|
215
|
+
bar.set_label(label) # Assign label to each bar
|
216
|
+
self.ax.legend(loc="upper left", fontsize=13, facecolor=self.fg_color, edgecolor=self.fg_color)
|
217
|
+
if plot == "pie":
|
218
|
+
total = sum(counts)
|
219
|
+
percentages = [size / total * 100 for size in counts]
|
220
|
+
start_angle = 90
|
221
|
+
self.ax.clear()
|
222
|
+
|
223
|
+
# Create pie chart and create legend labels with percentages
|
224
|
+
wedges, _ = self.ax.pie(
|
225
|
+
counts, labels=labels, startangle=start_angle, textprops={"color": self.fg_color}, autopct=None
|
226
|
+
)
|
227
|
+
legend_labels = [f"{label} ({percentage:.1f}%)" for label, percentage in zip(labels, percentages)]
|
228
|
+
|
229
|
+
# Assign the legend using the wedges and manually created labels
|
230
|
+
self.ax.legend(wedges, legend_labels, title="Classes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1))
|
231
|
+
self.fig.subplots_adjust(left=0.1, right=0.75) # Adjust layout to fit the legend
|
232
|
+
|
233
|
+
# Common plot settings
|
234
|
+
self.ax.set_facecolor("#f0f0f0") # Set to light gray or any other color you like
|
235
|
+
self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
|
236
|
+
self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
|
237
|
+
self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
|
238
|
+
|
239
|
+
# Add and format legend
|
240
|
+
legend = self.ax.legend(loc="upper left", fontsize=13, facecolor=self.bg_color, edgecolor=self.bg_color)
|
241
|
+
for text in legend.get_texts():
|
242
|
+
text.set_color(self.fg_color)
|
243
|
+
|
244
|
+
# Redraw graph, update view, capture, and display the updated plot
|
245
|
+
self.ax.relim()
|
246
|
+
self.ax.autoscale_view()
|
247
|
+
self.canvas.draw()
|
248
|
+
im0 = np.array(self.canvas.renderer.buffer_rgba())
|
249
|
+
im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
|
250
|
+
self.display_output(im0)
|
251
|
+
|
252
|
+
return im0 # Return the image
|
@@ -0,0 +1,106 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from dataclasses import dataclass, field
|
4
|
+
from typing import List, Optional, Tuple
|
5
|
+
|
6
|
+
import cv2
|
7
|
+
|
8
|
+
|
9
|
+
@dataclass
|
10
|
+
class SolutionConfig:
|
11
|
+
"""
|
12
|
+
Manages configuration parameters for Ultralytics Vision AI solutions.
|
13
|
+
|
14
|
+
The SolutionConfig class serves as a centralized configuration container for all the
|
15
|
+
Ultralytics solution modules: https://docs.ultralytics.com/solutions/#solutions.
|
16
|
+
It leverages Python `dataclass` for clear, type-safe, and maintainable parameter definitions.
|
17
|
+
|
18
|
+
Attributes:
|
19
|
+
source (Optional[str]): Path to the input source (video, RTSP, etc.). Only usable with Solutions CLI.
|
20
|
+
model (Optional[str]): Path to the Ultralytics YOLO model to be used for inference.
|
21
|
+
classes (Optional[List[int]]): List of class indices to filter detections.
|
22
|
+
show_conf (bool): Whether to show confidence scores on the visual output.
|
23
|
+
show_labels (bool): Whether to display class labels on visual output.
|
24
|
+
region (Optional[List[Tuple[int, int]]]): Polygonal region or line for object counting.
|
25
|
+
colormap (Optional[int]): OpenCV colormap constant for visual overlays (e.g., cv2.COLORMAP_JET).
|
26
|
+
show_in (bool): Whether to display count number for objects entering the region.
|
27
|
+
show_out (bool): Whether to display count number for objects leaving the region.
|
28
|
+
up_angle (float): Upper angle threshold used in pose-based workouts monitoring.
|
29
|
+
down_angle (int): Lower angle threshold used in pose-based workouts monitoring.
|
30
|
+
kpts (List[int]): Keypoint indices to monitor, e.g., for pose analytics.
|
31
|
+
analytics_type (str): Type of analytics to perform ("line", "area", "bar", "pie", etc.).
|
32
|
+
figsize (Optional[Tuple[int, int]]): Size of the matplotlib figure used for analytical plots (width, height).
|
33
|
+
blur_ratio (float): Ratio used to blur objects in the video frames (0.0 to 1.0).
|
34
|
+
vision_point (Tuple[int, int]): Reference point for directional tracking or perspective drawing.
|
35
|
+
crop_dir (str): Directory path to save cropped detection images.
|
36
|
+
json_file (str): Path to a JSON file containing data for parking areas.
|
37
|
+
line_width (int): Width for visual display i.e. bounding boxes, keypoints, counts.
|
38
|
+
records (int): Number of detection records to send email alerts.
|
39
|
+
fps (float): Frame rate (Frames Per Second) for speed estimation calculation.
|
40
|
+
max_hist (int): Maximum number of historical points or states stored per tracked object for speed estimation.
|
41
|
+
meter_per_pixel (float): Scale for real-world measurement, used in speed or distance calculations.
|
42
|
+
max_speed (int): Maximum speed limit (e.g., km/h or mph) used in visual alerts or constraints.
|
43
|
+
show (bool): Whether to display the visual output on screen.
|
44
|
+
iou (float): Intersection-over-Union threshold for detection filtering.
|
45
|
+
conf (float): Confidence threshold for keeping predictions.
|
46
|
+
device (Optional[str]): Device to run inference on (e.g., 'cpu', '0' for CUDA GPU).
|
47
|
+
max_det (int): Maximum number of detections allowed per video frame.
|
48
|
+
half (bool): Whether to use FP16 precision (requires a supported CUDA device).
|
49
|
+
tracker (str): Path to tracking configuration YAML file (e.g., 'botsort.yaml').
|
50
|
+
verbose (bool): Enable verbose logging output for debugging or diagnostics.
|
51
|
+
data (str): Path to image directory used for similarity search.
|
52
|
+
|
53
|
+
Methods:
|
54
|
+
update: Update the configuration with user-defined keyword arguments and raise error on invalid keys.
|
55
|
+
|
56
|
+
Examples:
|
57
|
+
>>> from ultralytics.solutions.config import SolutionConfig
|
58
|
+
>>> cfg = SolutionConfig(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
|
59
|
+
>>> cfg.update(show=False, conf=0.3)
|
60
|
+
>>> print(cfg.model)
|
61
|
+
"""
|
62
|
+
|
63
|
+
source: Optional[str] = None
|
64
|
+
model: Optional[str] = None
|
65
|
+
classes: Optional[List[int]] = None
|
66
|
+
show_conf: bool = True
|
67
|
+
show_labels: bool = True
|
68
|
+
region: Optional[List[Tuple[int, int]]] = None
|
69
|
+
colormap: Optional[int] = cv2.COLORMAP_DEEPGREEN
|
70
|
+
show_in: bool = True
|
71
|
+
show_out: bool = True
|
72
|
+
up_angle: float = 145.0
|
73
|
+
down_angle: int = 90
|
74
|
+
kpts: List[int] = field(default_factory=lambda: [6, 8, 10])
|
75
|
+
analytics_type: str = "line"
|
76
|
+
figsize: Optional[Tuple[int, int]] = (12.8, 7.2)
|
77
|
+
blur_ratio: float = 0.5
|
78
|
+
vision_point: Tuple[int, int] = (20, 20)
|
79
|
+
crop_dir: str = "cropped-detections"
|
80
|
+
json_file: str = None
|
81
|
+
line_width: int = 2
|
82
|
+
records: int = 5
|
83
|
+
fps: float = 30.0
|
84
|
+
max_hist: int = 5
|
85
|
+
meter_per_pixel: float = 0.05
|
86
|
+
max_speed: int = 120
|
87
|
+
show: bool = False
|
88
|
+
iou: float = 0.7
|
89
|
+
conf: float = 0.25
|
90
|
+
device: Optional[str] = None
|
91
|
+
max_det: int = 300
|
92
|
+
half: bool = False
|
93
|
+
tracker: str = "botsort.yaml"
|
94
|
+
verbose: bool = True
|
95
|
+
data: str = "images"
|
96
|
+
|
97
|
+
def update(self, **kwargs):
|
98
|
+
"""Update configuration parameters with new values provided as keyword arguments."""
|
99
|
+
for key, value in kwargs.items():
|
100
|
+
if hasattr(self, key):
|
101
|
+
setattr(self, key, value)
|
102
|
+
else:
|
103
|
+
raise ValueError(
|
104
|
+
f"❌ {key} is not a valid solution argument, available arguments here: https://docs.ultralytics.com/solutions/#solutions-arguments"
|
105
|
+
)
|
106
|
+
return self
|
@@ -0,0 +1,124 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import math
|
4
|
+
|
5
|
+
import cv2
|
6
|
+
|
7
|
+
from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
|
8
|
+
from ultralytics.utils.plotting import colors
|
9
|
+
|
10
|
+
|
11
|
+
class DistanceCalculation(BaseSolution):
|
12
|
+
"""
|
13
|
+
A class to calculate distance between two objects in a real-time video stream based on their tracks.
|
14
|
+
|
15
|
+
This class extends BaseSolution to provide functionality for selecting objects and calculating the distance
|
16
|
+
between them in a video stream using YOLO object detection and tracking.
|
17
|
+
|
18
|
+
Attributes:
|
19
|
+
left_mouse_count (int): Counter for left mouse button clicks.
|
20
|
+
selected_boxes (Dict[int, List[float]]): Dictionary to store selected bounding boxes and their track IDs.
|
21
|
+
centroids (List[List[int]]): List to store centroids of selected bounding boxes.
|
22
|
+
|
23
|
+
Methods:
|
24
|
+
mouse_event_for_distance: Handles mouse events for selecting objects in the video stream.
|
25
|
+
process: Processes video frames and calculates the distance between selected objects.
|
26
|
+
|
27
|
+
Examples:
|
28
|
+
>>> distance_calc = DistanceCalculation()
|
29
|
+
>>> frame = cv2.imread("frame.jpg")
|
30
|
+
>>> results = distance_calc.process(frame)
|
31
|
+
>>> cv2.imshow("Distance Calculation", results.plot_im)
|
32
|
+
>>> cv2.waitKey(0)
|
33
|
+
"""
|
34
|
+
|
35
|
+
def __init__(self, **kwargs):
|
36
|
+
"""Initializes the DistanceCalculation class for measuring object distances in video streams."""
|
37
|
+
super().__init__(**kwargs)
|
38
|
+
|
39
|
+
# Mouse event information
|
40
|
+
self.left_mouse_count = 0
|
41
|
+
self.selected_boxes = {}
|
42
|
+
self.centroids = [] # Store centroids of selected objects
|
43
|
+
|
44
|
+
def mouse_event_for_distance(self, event, x, y, flags, param):
|
45
|
+
"""
|
46
|
+
Handles mouse events to select regions in a real-time video stream for distance calculation.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN).
|
50
|
+
x (int): X-coordinate of the mouse pointer.
|
51
|
+
y (int): Y-coordinate of the mouse pointer.
|
52
|
+
flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY).
|
53
|
+
param (Any): Additional parameters passed to the function.
|
54
|
+
|
55
|
+
Examples:
|
56
|
+
>>> # Assuming 'dc' is an instance of DistanceCalculation
|
57
|
+
>>> cv2.setMouseCallback("window_name", dc.mouse_event_for_distance)
|
58
|
+
"""
|
59
|
+
if event == cv2.EVENT_LBUTTONDOWN:
|
60
|
+
self.left_mouse_count += 1
|
61
|
+
if self.left_mouse_count <= 2:
|
62
|
+
for box, track_id in zip(self.boxes, self.track_ids):
|
63
|
+
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
|
64
|
+
self.selected_boxes[track_id] = box
|
65
|
+
|
66
|
+
elif event == cv2.EVENT_RBUTTONDOWN:
|
67
|
+
self.selected_boxes = {}
|
68
|
+
self.left_mouse_count = 0
|
69
|
+
|
70
|
+
def process(self, im0):
|
71
|
+
"""
|
72
|
+
Processes a video frame and calculates the distance between two selected bounding boxes.
|
73
|
+
|
74
|
+
This method extracts tracks from the input frame, annotates bounding boxes, and calculates the distance
|
75
|
+
between two user-selected objects if they have been chosen.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
im0 (numpy.ndarray): The input image frame to process.
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
(SolutionResults): Contains processed image `plot_im`, `total_tracks` (int) representing the total number
|
82
|
+
of tracked objects, and `pixels_distance` (float) representing the distance between selected objects
|
83
|
+
in pixels.
|
84
|
+
|
85
|
+
Examples:
|
86
|
+
>>> import numpy as np
|
87
|
+
>>> from ultralytics.solutions import DistanceCalculation
|
88
|
+
>>> dc = DistanceCalculation()
|
89
|
+
>>> frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
90
|
+
>>> results = dc.process(frame)
|
91
|
+
>>> print(f"Distance: {results.pixels_distance:.2f} pixels")
|
92
|
+
"""
|
93
|
+
self.extract_tracks(im0) # Extract tracks
|
94
|
+
annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
|
95
|
+
|
96
|
+
pixels_distance = 0
|
97
|
+
# Iterate over bounding boxes, track ids and classes index
|
98
|
+
for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
|
99
|
+
annotator.box_label(box, color=colors(int(cls), True), label=self.adjust_box_label(cls, conf, track_id))
|
100
|
+
|
101
|
+
# Update selected boxes if they're being tracked
|
102
|
+
if len(self.selected_boxes) == 2:
|
103
|
+
for trk_id in self.selected_boxes.keys():
|
104
|
+
if trk_id == track_id:
|
105
|
+
self.selected_boxes[track_id] = box
|
106
|
+
|
107
|
+
if len(self.selected_boxes) == 2:
|
108
|
+
# Calculate centroids of selected boxes
|
109
|
+
self.centroids.extend(
|
110
|
+
[[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
|
111
|
+
)
|
112
|
+
# Calculate Euclidean distance between centroids
|
113
|
+
pixels_distance = math.sqrt(
|
114
|
+
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
|
115
|
+
)
|
116
|
+
annotator.plot_distance_and_line(pixels_distance, self.centroids)
|
117
|
+
|
118
|
+
self.centroids = [] # Reset centroids for next frame
|
119
|
+
plot_im = annotator.result()
|
120
|
+
self.display_output(plot_im) # Display output with base class function
|
121
|
+
cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
|
122
|
+
|
123
|
+
# Return SolutionResults with processed image and calculated metrics
|
124
|
+
return SolutionResults(plot_im=plot_im, pixels_distance=pixels_distance, total_tracks=len(self.track_ids))
|
@@ -0,0 +1,127 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import cv2
|
4
|
+
import numpy as np
|
5
|
+
|
6
|
+
from ultralytics.solutions.object_counter import ObjectCounter
|
7
|
+
from ultralytics.solutions.solutions import SolutionAnnotator, SolutionResults
|
8
|
+
|
9
|
+
|
10
|
+
class Heatmap(ObjectCounter):
|
11
|
+
"""
|
12
|
+
A class to draw heatmaps in real-time video streams based on object tracks.
|
13
|
+
|
14
|
+
This class extends the ObjectCounter class to generate and visualize heatmaps of object movements in video
|
15
|
+
streams. It uses tracked object positions to create a cumulative heatmap effect over time.
|
16
|
+
|
17
|
+
Attributes:
|
18
|
+
initialized (bool): Flag indicating whether the heatmap has been initialized.
|
19
|
+
colormap (int): OpenCV colormap used for heatmap visualization.
|
20
|
+
heatmap (np.ndarray): Array storing the cumulative heatmap data.
|
21
|
+
annotator (SolutionAnnotator): Object for drawing annotations on the image.
|
22
|
+
|
23
|
+
Methods:
|
24
|
+
heatmap_effect: Calculate and update the heatmap effect for a given bounding box.
|
25
|
+
process: Generate and apply the heatmap effect to each frame.
|
26
|
+
|
27
|
+
Examples:
|
28
|
+
>>> from ultralytics.solutions import Heatmap
|
29
|
+
>>> heatmap = Heatmap(model="yolo11n.pt", colormap=cv2.COLORMAP_JET)
|
30
|
+
>>> frame = cv2.imread("frame.jpg")
|
31
|
+
>>> processed_frame = heatmap.process(frame)
|
32
|
+
"""
|
33
|
+
|
34
|
+
def __init__(self, **kwargs):
|
35
|
+
"""
|
36
|
+
Initialize the Heatmap class for real-time video stream heatmap generation based on object tracks.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
**kwargs (Any): Keyword arguments passed to the parent ObjectCounter class.
|
40
|
+
"""
|
41
|
+
super().__init__(**kwargs)
|
42
|
+
|
43
|
+
self.initialized = False # Flag for heatmap initialization
|
44
|
+
if self.region is not None: # Check if user provided the region coordinates
|
45
|
+
self.initialize_region()
|
46
|
+
|
47
|
+
# Store colormap
|
48
|
+
self.colormap = self.CFG["colormap"]
|
49
|
+
self.heatmap = None
|
50
|
+
|
51
|
+
def heatmap_effect(self, box):
|
52
|
+
"""
|
53
|
+
Efficiently calculate heatmap area and effect location for applying colormap.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
box (List[float]): Bounding box coordinates [x0, y0, x1, y1].
|
57
|
+
"""
|
58
|
+
x0, y0, x1, y1 = map(int, box)
|
59
|
+
radius_squared = (min(x1 - x0, y1 - y0) // 2) ** 2
|
60
|
+
|
61
|
+
# Create a meshgrid with region of interest (ROI) for vectorized distance calculations
|
62
|
+
xv, yv = np.meshgrid(np.arange(x0, x1), np.arange(y0, y1))
|
63
|
+
|
64
|
+
# Calculate squared distances from the center
|
65
|
+
dist_squared = (xv - ((x0 + x1) // 2)) ** 2 + (yv - ((y0 + y1) // 2)) ** 2
|
66
|
+
|
67
|
+
# Create a mask of points within the radius
|
68
|
+
within_radius = dist_squared <= radius_squared
|
69
|
+
|
70
|
+
# Update only the values within the bounding box in a single vectorized operation
|
71
|
+
self.heatmap[y0:y1, x0:x1][within_radius] += 2
|
72
|
+
|
73
|
+
def process(self, im0):
|
74
|
+
"""
|
75
|
+
Generate heatmap for each frame using Ultralytics.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
im0 (np.ndarray): Input image array for processing.
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
(SolutionResults): Contains processed image `plot_im`,
|
82
|
+
'in_count' (int, count of objects entering the region),
|
83
|
+
'out_count' (int, count of objects exiting the region),
|
84
|
+
'classwise_count' (dict, per-class object count), and
|
85
|
+
'total_tracks' (int, total number of tracked objects).
|
86
|
+
"""
|
87
|
+
if not self.initialized:
|
88
|
+
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
|
89
|
+
self.initialized = True # Initialize heatmap only once
|
90
|
+
|
91
|
+
self.extract_tracks(im0) # Extract tracks
|
92
|
+
self.annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
|
93
|
+
|
94
|
+
# Iterate over bounding boxes, track ids and classes index
|
95
|
+
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
|
96
|
+
# Apply heatmap effect for the bounding box
|
97
|
+
self.heatmap_effect(box)
|
98
|
+
|
99
|
+
if self.region is not None:
|
100
|
+
self.annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
|
101
|
+
self.store_tracking_history(track_id, box) # Store track history
|
102
|
+
# Get previous position if available
|
103
|
+
prev_position = None
|
104
|
+
if len(self.track_history[track_id]) > 1:
|
105
|
+
prev_position = self.track_history[track_id][-2]
|
106
|
+
self.count_objects(self.track_history[track_id][-1], track_id, prev_position, cls) # object counting
|
107
|
+
|
108
|
+
plot_im = self.annotator.result()
|
109
|
+
if self.region is not None:
|
110
|
+
self.display_counts(plot_im) # Display the counts on the frame
|
111
|
+
|
112
|
+
# Normalize, apply colormap to heatmap and combine with original image
|
113
|
+
if self.track_data.id is not None:
|
114
|
+
normalized_heatmap = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
|
115
|
+
colored_heatmap = cv2.applyColorMap(normalized_heatmap, self.colormap)
|
116
|
+
plot_im = cv2.addWeighted(plot_im, 0.5, colored_heatmap, 0.5, 0)
|
117
|
+
|
118
|
+
self.display_output(plot_im) # Display output with base class function
|
119
|
+
|
120
|
+
# Return SolutionResults
|
121
|
+
return SolutionResults(
|
122
|
+
plot_im=plot_im,
|
123
|
+
in_count=self.in_count,
|
124
|
+
out_count=self.out_count,
|
125
|
+
classwise_count=dict(self.classwise_counts),
|
126
|
+
total_tracks=len(self.track_ids),
|
127
|
+
)
|