dgenerate-ultralytics-headless 8.3.137__py3-none-any.whl → 8.3.224__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/METADATA +41 -34
- dgenerate_ultralytics_headless-8.3.224.dist-info/RECORD +285 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/WHEEL +1 -1
- tests/__init__.py +7 -6
- tests/conftest.py +15 -39
- tests/test_cli.py +17 -17
- tests/test_cuda.py +17 -8
- tests/test_engine.py +36 -10
- tests/test_exports.py +98 -37
- tests/test_integrations.py +12 -15
- tests/test_python.py +126 -82
- tests/test_solutions.py +319 -135
- ultralytics/__init__.py +27 -9
- ultralytics/cfg/__init__.py +83 -87
- ultralytics/cfg/datasets/Argoverse.yaml +4 -4
- ultralytics/cfg/datasets/DOTAv1.5.yaml +2 -2
- ultralytics/cfg/datasets/DOTAv1.yaml +2 -2
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +2 -2
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +4 -5
- ultralytics/cfg/datasets/ImageNet.yaml +3 -3
- ultralytics/cfg/datasets/Objects365.yaml +24 -20
- ultralytics/cfg/datasets/SKU-110K.yaml +9 -9
- ultralytics/cfg/datasets/VOC.yaml +10 -13
- ultralytics/cfg/datasets/VisDrone.yaml +43 -33
- ultralytics/cfg/datasets/african-wildlife.yaml +5 -5
- ultralytics/cfg/datasets/brain-tumor.yaml +4 -5
- ultralytics/cfg/datasets/carparts-seg.yaml +5 -5
- ultralytics/cfg/datasets/coco-pose.yaml +26 -4
- ultralytics/cfg/datasets/coco.yaml +4 -4
- ultralytics/cfg/datasets/coco128-seg.yaml +2 -2
- ultralytics/cfg/datasets/coco128.yaml +2 -2
- ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +2 -2
- ultralytics/cfg/datasets/coco8-pose.yaml +23 -2
- ultralytics/cfg/datasets/coco8-seg.yaml +2 -2
- ultralytics/cfg/datasets/coco8.yaml +2 -2
- ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
- ultralytics/cfg/datasets/crack-seg.yaml +5 -5
- ultralytics/cfg/datasets/dog-pose.yaml +32 -4
- ultralytics/cfg/datasets/dota8-multispectral.yaml +2 -2
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +29 -4
- ultralytics/cfg/datasets/lvis.yaml +9 -9
- ultralytics/cfg/datasets/medical-pills.yaml +4 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +7 -10
- ultralytics/cfg/datasets/package-seg.yaml +5 -5
- ultralytics/cfg/datasets/signature.yaml +4 -4
- ultralytics/cfg/datasets/tiger-pose.yaml +20 -4
- ultralytics/cfg/datasets/xView.yaml +5 -5
- ultralytics/cfg/default.yaml +96 -93
- ultralytics/cfg/trackers/botsort.yaml +16 -17
- ultralytics/cfg/trackers/bytetrack.yaml +9 -11
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +12 -12
- ultralytics/data/augment.py +531 -564
- ultralytics/data/base.py +76 -81
- ultralytics/data/build.py +206 -42
- ultralytics/data/converter.py +179 -78
- ultralytics/data/dataset.py +121 -121
- ultralytics/data/loaders.py +114 -91
- ultralytics/data/split.py +28 -15
- ultralytics/data/split_dota.py +67 -48
- ultralytics/data/utils.py +110 -89
- ultralytics/engine/exporter.py +422 -460
- ultralytics/engine/model.py +224 -252
- ultralytics/engine/predictor.py +94 -89
- ultralytics/engine/results.py +345 -595
- ultralytics/engine/trainer.py +231 -134
- ultralytics/engine/tuner.py +279 -73
- ultralytics/engine/validator.py +53 -46
- ultralytics/hub/__init__.py +26 -28
- ultralytics/hub/auth.py +30 -16
- ultralytics/hub/google/__init__.py +34 -36
- ultralytics/hub/session.py +53 -77
- ultralytics/hub/utils.py +23 -109
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +36 -18
- ultralytics/models/fastsam/predict.py +33 -44
- ultralytics/models/fastsam/utils.py +4 -5
- ultralytics/models/fastsam/val.py +12 -14
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +16 -20
- ultralytics/models/nas/predict.py +12 -14
- ultralytics/models/nas/val.py +4 -5
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +9 -9
- ultralytics/models/rtdetr/predict.py +22 -17
- ultralytics/models/rtdetr/train.py +20 -16
- ultralytics/models/rtdetr/val.py +79 -59
- ultralytics/models/sam/__init__.py +8 -2
- ultralytics/models/sam/amg.py +53 -38
- ultralytics/models/sam/build.py +29 -31
- ultralytics/models/sam/model.py +33 -38
- ultralytics/models/sam/modules/blocks.py +159 -182
- ultralytics/models/sam/modules/decoders.py +38 -47
- ultralytics/models/sam/modules/encoders.py +114 -133
- ultralytics/models/sam/modules/memory_attention.py +38 -31
- ultralytics/models/sam/modules/sam.py +114 -93
- ultralytics/models/sam/modules/tiny_encoder.py +268 -291
- ultralytics/models/sam/modules/transformer.py +59 -66
- ultralytics/models/sam/modules/utils.py +55 -72
- ultralytics/models/sam/predict.py +745 -341
- ultralytics/models/utils/loss.py +118 -107
- ultralytics/models/utils/ops.py +118 -71
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +28 -26
- ultralytics/models/yolo/classify/train.py +50 -81
- ultralytics/models/yolo/classify/val.py +68 -61
- ultralytics/models/yolo/detect/predict.py +12 -15
- ultralytics/models/yolo/detect/train.py +56 -46
- ultralytics/models/yolo/detect/val.py +279 -223
- ultralytics/models/yolo/model.py +167 -86
- ultralytics/models/yolo/obb/predict.py +7 -11
- ultralytics/models/yolo/obb/train.py +23 -25
- ultralytics/models/yolo/obb/val.py +107 -99
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +12 -14
- ultralytics/models/yolo/pose/train.py +31 -69
- ultralytics/models/yolo/pose/val.py +119 -254
- ultralytics/models/yolo/segment/predict.py +21 -25
- ultralytics/models/yolo/segment/train.py +12 -66
- ultralytics/models/yolo/segment/val.py +126 -305
- ultralytics/models/yolo/world/train.py +53 -45
- ultralytics/models/yolo/world/train_world.py +51 -32
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +30 -37
- ultralytics/models/yolo/yoloe/train.py +89 -71
- ultralytics/models/yolo/yoloe/train_seg.py +15 -17
- ultralytics/models/yolo/yoloe/val.py +56 -41
- ultralytics/nn/__init__.py +9 -11
- ultralytics/nn/autobackend.py +179 -107
- ultralytics/nn/modules/__init__.py +67 -67
- ultralytics/nn/modules/activation.py +8 -7
- ultralytics/nn/modules/block.py +302 -323
- ultralytics/nn/modules/conv.py +61 -104
- ultralytics/nn/modules/head.py +488 -186
- ultralytics/nn/modules/transformer.py +183 -123
- ultralytics/nn/modules/utils.py +15 -20
- ultralytics/nn/tasks.py +327 -203
- ultralytics/nn/text_model.py +81 -65
- ultralytics/py.typed +1 -0
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +19 -27
- ultralytics/solutions/analytics.py +36 -26
- ultralytics/solutions/config.py +29 -28
- ultralytics/solutions/distance_calculation.py +23 -24
- ultralytics/solutions/heatmap.py +17 -19
- ultralytics/solutions/instance_segmentation.py +21 -19
- ultralytics/solutions/object_blurrer.py +16 -17
- ultralytics/solutions/object_counter.py +48 -53
- ultralytics/solutions/object_cropper.py +22 -16
- ultralytics/solutions/parking_management.py +61 -58
- ultralytics/solutions/queue_management.py +19 -19
- ultralytics/solutions/region_counter.py +63 -50
- ultralytics/solutions/security_alarm.py +22 -25
- ultralytics/solutions/similarity_search.py +107 -60
- ultralytics/solutions/solutions.py +343 -262
- ultralytics/solutions/speed_estimation.py +35 -31
- ultralytics/solutions/streamlit_inference.py +104 -40
- ultralytics/solutions/templates/similarity-search.html +31 -24
- ultralytics/solutions/trackzone.py +24 -24
- ultralytics/solutions/vision_eye.py +11 -12
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +18 -27
- ultralytics/trackers/bot_sort.py +48 -39
- ultralytics/trackers/byte_tracker.py +94 -94
- ultralytics/trackers/track.py +7 -16
- ultralytics/trackers/utils/gmc.py +37 -69
- ultralytics/trackers/utils/kalman_filter.py +68 -76
- ultralytics/trackers/utils/matching.py +13 -17
- ultralytics/utils/__init__.py +251 -275
- ultralytics/utils/autobatch.py +19 -7
- ultralytics/utils/autodevice.py +68 -38
- ultralytics/utils/benchmarks.py +169 -130
- ultralytics/utils/callbacks/base.py +12 -13
- ultralytics/utils/callbacks/clearml.py +14 -15
- ultralytics/utils/callbacks/comet.py +139 -66
- ultralytics/utils/callbacks/dvc.py +19 -27
- ultralytics/utils/callbacks/hub.py +8 -6
- ultralytics/utils/callbacks/mlflow.py +6 -10
- ultralytics/utils/callbacks/neptune.py +11 -19
- ultralytics/utils/callbacks/platform.py +73 -0
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +9 -12
- ultralytics/utils/callbacks/wb.py +33 -30
- ultralytics/utils/checks.py +163 -114
- ultralytics/utils/cpu.py +89 -0
- ultralytics/utils/dist.py +24 -20
- ultralytics/utils/downloads.py +176 -146
- ultralytics/utils/errors.py +11 -13
- ultralytics/utils/events.py +113 -0
- ultralytics/utils/export/__init__.py +7 -0
- ultralytics/utils/{export.py → export/engine.py} +81 -63
- ultralytics/utils/export/imx.py +294 -0
- ultralytics/utils/export/tensorflow.py +217 -0
- ultralytics/utils/files.py +33 -36
- ultralytics/utils/git.py +137 -0
- ultralytics/utils/instance.py +105 -120
- ultralytics/utils/logger.py +404 -0
- ultralytics/utils/loss.py +99 -61
- ultralytics/utils/metrics.py +649 -478
- ultralytics/utils/nms.py +337 -0
- ultralytics/utils/ops.py +263 -451
- ultralytics/utils/patches.py +70 -31
- ultralytics/utils/plotting.py +253 -223
- ultralytics/utils/tal.py +48 -61
- ultralytics/utils/torch_utils.py +244 -251
- ultralytics/utils/tqdm.py +438 -0
- ultralytics/utils/triton.py +22 -23
- ultralytics/utils/tuner.py +11 -10
- dgenerate_ultralytics_headless-8.3.137.dist-info/RECORD +0 -272
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/top_level.txt +0 -0
ultralytics/solutions/config.py
CHANGED
|
@@ -1,37 +1,38 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from dataclasses import dataclass, field
|
|
4
|
-
from typing import
|
|
6
|
+
from typing import Any
|
|
5
7
|
|
|
6
8
|
import cv2
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
@dataclass
|
|
10
12
|
class SolutionConfig:
|
|
11
|
-
"""
|
|
12
|
-
Manages configuration parameters for Ultralytics Vision AI solutions.
|
|
13
|
+
"""Manages configuration parameters for Ultralytics Vision AI solutions.
|
|
13
14
|
|
|
14
|
-
The SolutionConfig class serves as a centralized configuration container for all the
|
|
15
|
-
|
|
16
|
-
|
|
15
|
+
The SolutionConfig class serves as a centralized configuration container for all the Ultralytics solution modules:
|
|
16
|
+
https://docs.ultralytics.com/solutions/#solutions. It leverages Python `dataclass` for clear, type-safe, and
|
|
17
|
+
maintainable parameter definitions.
|
|
17
18
|
|
|
18
19
|
Attributes:
|
|
19
|
-
source (
|
|
20
|
-
model (
|
|
21
|
-
classes (
|
|
20
|
+
source (str, optional): Path to the input source (video, RTSP, etc.). Only usable with Solutions CLI.
|
|
21
|
+
model (str, optional): Path to the Ultralytics YOLO model to be used for inference.
|
|
22
|
+
classes (list[int], optional): List of class indices to filter detections.
|
|
22
23
|
show_conf (bool): Whether to show confidence scores on the visual output.
|
|
23
24
|
show_labels (bool): Whether to display class labels on visual output.
|
|
24
|
-
region (
|
|
25
|
-
colormap (
|
|
25
|
+
region (list[tuple[int, int]], optional): Polygonal region or line for object counting.
|
|
26
|
+
colormap (int, optional): OpenCV colormap constant for visual overlays (e.g., cv2.COLORMAP_JET).
|
|
26
27
|
show_in (bool): Whether to display count number for objects entering the region.
|
|
27
28
|
show_out (bool): Whether to display count number for objects leaving the region.
|
|
28
29
|
up_angle (float): Upper angle threshold used in pose-based workouts monitoring.
|
|
29
30
|
down_angle (int): Lower angle threshold used in pose-based workouts monitoring.
|
|
30
|
-
kpts (
|
|
31
|
+
kpts (list[int]): Keypoint indices to monitor, e.g., for pose analytics.
|
|
31
32
|
analytics_type (str): Type of analytics to perform ("line", "area", "bar", "pie", etc.).
|
|
32
|
-
figsize (
|
|
33
|
+
figsize (tuple[int, int], optional): Size of the matplotlib figure used for analytical plots (width, height).
|
|
33
34
|
blur_ratio (float): Ratio used to blur objects in the video frames (0.0 to 1.0).
|
|
34
|
-
vision_point (
|
|
35
|
+
vision_point (tuple[int, int]): Reference point for directional tracking or perspective drawing.
|
|
35
36
|
crop_dir (str): Directory path to save cropped detection images.
|
|
36
37
|
json_file (str): Path to a JSON file containing data for parking areas.
|
|
37
38
|
line_width (int): Width for visual display i.e. bounding boxes, keypoints, counts.
|
|
@@ -43,7 +44,7 @@ class SolutionConfig:
|
|
|
43
44
|
show (bool): Whether to display the visual output on screen.
|
|
44
45
|
iou (float): Intersection-over-Union threshold for detection filtering.
|
|
45
46
|
conf (float): Confidence threshold for keeping predictions.
|
|
46
|
-
device (
|
|
47
|
+
device (str, optional): Device to run inference on (e.g., 'cpu', '0' for CUDA GPU).
|
|
47
48
|
max_det (int): Maximum number of detections allowed per video frame.
|
|
48
49
|
half (bool): Whether to use FP16 precision (requires a supported CUDA device).
|
|
49
50
|
tracker (str): Path to tracking configuration YAML file (e.g., 'botsort.yaml').
|
|
@@ -60,22 +61,22 @@ class SolutionConfig:
|
|
|
60
61
|
>>> print(cfg.model)
|
|
61
62
|
"""
|
|
62
63
|
|
|
63
|
-
source:
|
|
64
|
-
model:
|
|
65
|
-
classes:
|
|
64
|
+
source: str | None = None
|
|
65
|
+
model: str | None = None
|
|
66
|
+
classes: list[int] | None = None
|
|
66
67
|
show_conf: bool = True
|
|
67
68
|
show_labels: bool = True
|
|
68
|
-
region:
|
|
69
|
-
colormap:
|
|
69
|
+
region: list[tuple[int, int]] | None = None
|
|
70
|
+
colormap: int | None = cv2.COLORMAP_DEEPGREEN
|
|
70
71
|
show_in: bool = True
|
|
71
72
|
show_out: bool = True
|
|
72
73
|
up_angle: float = 145.0
|
|
73
74
|
down_angle: int = 90
|
|
74
|
-
kpts:
|
|
75
|
+
kpts: list[int] = field(default_factory=lambda: [6, 8, 10])
|
|
75
76
|
analytics_type: str = "line"
|
|
76
|
-
figsize:
|
|
77
|
+
figsize: tuple[int, int] | None = (12.8, 7.2)
|
|
77
78
|
blur_ratio: float = 0.5
|
|
78
|
-
vision_point:
|
|
79
|
+
vision_point: tuple[int, int] = (20, 20)
|
|
79
80
|
crop_dir: str = "cropped-detections"
|
|
80
81
|
json_file: str = None
|
|
81
82
|
line_width: int = 2
|
|
@@ -87,20 +88,20 @@ class SolutionConfig:
|
|
|
87
88
|
show: bool = False
|
|
88
89
|
iou: float = 0.7
|
|
89
90
|
conf: float = 0.25
|
|
90
|
-
device:
|
|
91
|
+
device: str | None = None
|
|
91
92
|
max_det: int = 300
|
|
92
93
|
half: bool = False
|
|
93
94
|
tracker: str = "botsort.yaml"
|
|
94
95
|
verbose: bool = True
|
|
95
96
|
data: str = "images"
|
|
96
97
|
|
|
97
|
-
def update(self, **kwargs):
|
|
98
|
+
def update(self, **kwargs: Any):
|
|
98
99
|
"""Update configuration parameters with new values provided as keyword arguments."""
|
|
99
100
|
for key, value in kwargs.items():
|
|
100
101
|
if hasattr(self, key):
|
|
101
102
|
setattr(self, key, value)
|
|
102
103
|
else:
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
104
|
+
url = "https://docs.ultralytics.com/solutions/#solutions-arguments"
|
|
105
|
+
raise ValueError(f"{key} is not a valid solution argument, see {url}")
|
|
106
|
+
|
|
106
107
|
return self
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
3
|
import math
|
|
4
|
+
from typing import Any
|
|
4
5
|
|
|
5
6
|
import cv2
|
|
6
7
|
|
|
@@ -9,20 +10,19 @@ from ultralytics.utils.plotting import colors
|
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class DistanceCalculation(BaseSolution):
|
|
12
|
-
"""
|
|
13
|
-
A class to calculate distance between two objects in a real-time video stream based on their tracks.
|
|
13
|
+
"""A class to calculate distance between two objects in a real-time video stream based on their tracks.
|
|
14
14
|
|
|
15
|
-
This class extends BaseSolution to provide functionality for selecting objects and calculating the distance
|
|
16
|
-
|
|
15
|
+
This class extends BaseSolution to provide functionality for selecting objects and calculating the distance between
|
|
16
|
+
them in a video stream using YOLO object detection and tracking.
|
|
17
17
|
|
|
18
18
|
Attributes:
|
|
19
19
|
left_mouse_count (int): Counter for left mouse button clicks.
|
|
20
|
-
selected_boxes (
|
|
21
|
-
centroids (
|
|
20
|
+
selected_boxes (dict[int, list[float]]): Dictionary to store selected bounding boxes and their track IDs.
|
|
21
|
+
centroids (list[list[int]]): List to store centroids of selected bounding boxes.
|
|
22
22
|
|
|
23
23
|
Methods:
|
|
24
|
-
mouse_event_for_distance:
|
|
25
|
-
process:
|
|
24
|
+
mouse_event_for_distance: Handle mouse events for selecting objects in the video stream.
|
|
25
|
+
process: Process video frames and calculate the distance between selected objects.
|
|
26
26
|
|
|
27
27
|
Examples:
|
|
28
28
|
>>> distance_calc = DistanceCalculation()
|
|
@@ -32,18 +32,17 @@ class DistanceCalculation(BaseSolution):
|
|
|
32
32
|
>>> cv2.waitKey(0)
|
|
33
33
|
"""
|
|
34
34
|
|
|
35
|
-
def __init__(self, **kwargs):
|
|
36
|
-
"""
|
|
35
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
36
|
+
"""Initialize the DistanceCalculation class for measuring object distances in video streams."""
|
|
37
37
|
super().__init__(**kwargs)
|
|
38
38
|
|
|
39
39
|
# Mouse event information
|
|
40
40
|
self.left_mouse_count = 0
|
|
41
|
-
self.selected_boxes = {}
|
|
42
|
-
self.centroids = [] # Store centroids of selected objects
|
|
41
|
+
self.selected_boxes: dict[int, list[float]] = {}
|
|
42
|
+
self.centroids: list[list[int]] = [] # Store centroids of selected objects
|
|
43
43
|
|
|
44
|
-
def mouse_event_for_distance(self, event, x, y, flags, param):
|
|
45
|
-
"""
|
|
46
|
-
Handles mouse events to select regions in a real-time video stream for distance calculation.
|
|
44
|
+
def mouse_event_for_distance(self, event: int, x: int, y: int, flags: int, param: Any) -> None:
|
|
45
|
+
"""Handle mouse events to select regions in a real-time video stream for distance calculation.
|
|
47
46
|
|
|
48
47
|
Args:
|
|
49
48
|
event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN).
|
|
@@ -67,19 +66,18 @@ class DistanceCalculation(BaseSolution):
|
|
|
67
66
|
self.selected_boxes = {}
|
|
68
67
|
self.left_mouse_count = 0
|
|
69
68
|
|
|
70
|
-
def process(self, im0):
|
|
71
|
-
"""
|
|
72
|
-
Processes a video frame and calculates the distance between two selected bounding boxes.
|
|
69
|
+
def process(self, im0) -> SolutionResults:
|
|
70
|
+
"""Process a video frame and calculate the distance between two selected bounding boxes.
|
|
73
71
|
|
|
74
|
-
This method extracts tracks from the input frame, annotates bounding boxes, and calculates the distance
|
|
75
|
-
|
|
72
|
+
This method extracts tracks from the input frame, annotates bounding boxes, and calculates the distance between
|
|
73
|
+
two user-selected objects if they have been chosen.
|
|
76
74
|
|
|
77
75
|
Args:
|
|
78
|
-
im0 (
|
|
76
|
+
im0 (np.ndarray): The input image frame to process.
|
|
79
77
|
|
|
80
78
|
Returns:
|
|
81
|
-
(SolutionResults): Contains processed image `plot_im`, `total_tracks` (int) representing the total number
|
|
82
|
-
|
|
79
|
+
(SolutionResults): Contains processed image `plot_im`, `total_tracks` (int) representing the total number of
|
|
80
|
+
tracked objects, and `pixels_distance` (float) representing the distance between selected objects
|
|
83
81
|
in pixels.
|
|
84
82
|
|
|
85
83
|
Examples:
|
|
@@ -118,7 +116,8 @@ class DistanceCalculation(BaseSolution):
|
|
|
118
116
|
self.centroids = [] # Reset centroids for next frame
|
|
119
117
|
plot_im = annotator.result()
|
|
120
118
|
self.display_output(plot_im) # Display output with base class function
|
|
121
|
-
|
|
119
|
+
if self.CFG.get("show") and self.env_check:
|
|
120
|
+
cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
|
|
122
121
|
|
|
123
122
|
# Return SolutionResults with processed image and calculated metrics
|
|
124
123
|
return SolutionResults(plot_im=plot_im, pixels_distance=pixels_distance, total_tracks=len(self.track_ids))
|
ultralytics/solutions/heatmap.py
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
3
7
|
import cv2
|
|
4
8
|
import numpy as np
|
|
5
9
|
|
|
@@ -8,8 +12,7 @@ from ultralytics.solutions.solutions import SolutionAnnotator, SolutionResults
|
|
|
8
12
|
|
|
9
13
|
|
|
10
14
|
class Heatmap(ObjectCounter):
|
|
11
|
-
"""
|
|
12
|
-
A class to draw heatmaps in real-time video streams based on object tracks.
|
|
15
|
+
"""A class to draw heatmaps in real-time video streams based on object tracks.
|
|
13
16
|
|
|
14
17
|
This class extends the ObjectCounter class to generate and visualize heatmaps of object movements in video
|
|
15
18
|
streams. It uses tracked object positions to create a cumulative heatmap effect over time.
|
|
@@ -31,9 +34,8 @@ class Heatmap(ObjectCounter):
|
|
|
31
34
|
>>> processed_frame = heatmap.process(frame)
|
|
32
35
|
"""
|
|
33
36
|
|
|
34
|
-
def __init__(self, **kwargs):
|
|
35
|
-
"""
|
|
36
|
-
Initialize the Heatmap class for real-time video stream heatmap generation based on object tracks.
|
|
37
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
38
|
+
"""Initialize the Heatmap class for real-time video stream heatmap generation based on object tracks.
|
|
37
39
|
|
|
38
40
|
Args:
|
|
39
41
|
**kwargs (Any): Keyword arguments passed to the parent ObjectCounter class.
|
|
@@ -48,12 +50,11 @@ class Heatmap(ObjectCounter):
|
|
|
48
50
|
self.colormap = self.CFG["colormap"]
|
|
49
51
|
self.heatmap = None
|
|
50
52
|
|
|
51
|
-
def heatmap_effect(self, box):
|
|
52
|
-
"""
|
|
53
|
-
Efficiently calculate heatmap area and effect location for applying colormap.
|
|
53
|
+
def heatmap_effect(self, box: list[float]) -> None:
|
|
54
|
+
"""Efficiently calculate heatmap area and effect location for applying colormap.
|
|
54
55
|
|
|
55
56
|
Args:
|
|
56
|
-
box (
|
|
57
|
+
box (list[float]): Bounding box coordinates [x0, y0, x1, y1].
|
|
57
58
|
"""
|
|
58
59
|
x0, y0, x1, y1 = map(int, box)
|
|
59
60
|
radius_squared = (min(x1 - x0, y1 - y0) // 2) ** 2
|
|
@@ -70,19 +71,16 @@ class Heatmap(ObjectCounter):
|
|
|
70
71
|
# Update only the values within the bounding box in a single vectorized operation
|
|
71
72
|
self.heatmap[y0:y1, x0:x1][within_radius] += 2
|
|
72
73
|
|
|
73
|
-
def process(self, im0):
|
|
74
|
-
"""
|
|
75
|
-
Generate heatmap for each frame using Ultralytics.
|
|
74
|
+
def process(self, im0: np.ndarray) -> SolutionResults:
|
|
75
|
+
"""Generate heatmap for each frame using Ultralytics tracking.
|
|
76
76
|
|
|
77
77
|
Args:
|
|
78
78
|
im0 (np.ndarray): Input image array for processing.
|
|
79
79
|
|
|
80
80
|
Returns:
|
|
81
|
-
(SolutionResults): Contains processed image `plot_im`,
|
|
82
|
-
'
|
|
83
|
-
'
|
|
84
|
-
'classwise_count' (dict, per-class object count), and
|
|
85
|
-
'total_tracks' (int, total number of tracked objects).
|
|
81
|
+
(SolutionResults): Contains processed image `plot_im`, 'in_count' (int, count of objects entering the
|
|
82
|
+
region), 'out_count' (int, count of objects exiting the region), 'classwise_count' (dict, per-class
|
|
83
|
+
object count), and 'total_tracks' (int, total number of tracked objects).
|
|
86
84
|
"""
|
|
87
85
|
if not self.initialized:
|
|
88
86
|
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
|
|
@@ -110,7 +108,7 @@ class Heatmap(ObjectCounter):
|
|
|
110
108
|
self.display_counts(plot_im) # Display the counts on the frame
|
|
111
109
|
|
|
112
110
|
# Normalize, apply colormap to heatmap and combine with original image
|
|
113
|
-
if self.track_data.
|
|
111
|
+
if self.track_data.is_track:
|
|
114
112
|
normalized_heatmap = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
|
|
115
113
|
colored_heatmap = cv2.applyColorMap(normalized_heatmap, self.colormap)
|
|
116
114
|
plot_im = cv2.addWeighted(plot_im, 0.5, colored_heatmap, 0.5, 0)
|
|
@@ -122,6 +120,6 @@ class Heatmap(ObjectCounter):
|
|
|
122
120
|
plot_im=plot_im,
|
|
123
121
|
in_count=self.in_count,
|
|
124
122
|
out_count=self.out_count,
|
|
125
|
-
classwise_count=dict(self.
|
|
123
|
+
classwise_count=dict(self.classwise_count),
|
|
126
124
|
total_tracks=len(self.track_ids),
|
|
127
125
|
)
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
3
5
|
from ultralytics.engine.results import Results
|
|
4
6
|
from ultralytics.solutions.solutions import BaseSolution, SolutionResults
|
|
5
7
|
|
|
6
8
|
|
|
7
9
|
class InstanceSegmentation(BaseSolution):
|
|
8
|
-
"""
|
|
9
|
-
A class to manage instance segmentation in images or video streams.
|
|
10
|
+
"""A class to manage instance segmentation in images or video streams.
|
|
10
11
|
|
|
11
12
|
This class extends the BaseSolution class and provides functionality for performing instance segmentation, including
|
|
12
13
|
drawing segmented masks with bounding boxes and labels.
|
|
@@ -14,10 +15,13 @@ class InstanceSegmentation(BaseSolution):
|
|
|
14
15
|
Attributes:
|
|
15
16
|
model (str): The segmentation model to use for inference.
|
|
16
17
|
line_width (int): Width of the bounding box and text lines.
|
|
17
|
-
names (
|
|
18
|
-
clss (
|
|
19
|
-
track_ids (
|
|
20
|
-
masks (
|
|
18
|
+
names (dict[int, str]): Dictionary mapping class indices to class names.
|
|
19
|
+
clss (list[int]): List of detected class indices.
|
|
20
|
+
track_ids (list[int]): List of track IDs for detected instances.
|
|
21
|
+
masks (list[np.ndarray]): List of segmentation masks for detected instances.
|
|
22
|
+
show_conf (bool): Whether to display confidence scores.
|
|
23
|
+
show_labels (bool): Whether to display class labels.
|
|
24
|
+
show_boxes (bool): Whether to display bounding boxes.
|
|
21
25
|
|
|
22
26
|
Methods:
|
|
23
27
|
process: Process the input image to perform instance segmentation and annotate results.
|
|
@@ -26,17 +30,16 @@ class InstanceSegmentation(BaseSolution):
|
|
|
26
30
|
Examples:
|
|
27
31
|
>>> segmenter = InstanceSegmentation()
|
|
28
32
|
>>> frame = cv2.imread("frame.jpg")
|
|
29
|
-
>>> results = segmenter.
|
|
30
|
-
>>> print(f"Total segmented instances: {results
|
|
33
|
+
>>> results = segmenter.process(frame)
|
|
34
|
+
>>> print(f"Total segmented instances: {results.total_tracks}")
|
|
31
35
|
"""
|
|
32
36
|
|
|
33
|
-
def __init__(self, **kwargs):
|
|
34
|
-
"""
|
|
35
|
-
Initialize the InstanceSegmentation class for detecting and annotating segmented instances.
|
|
37
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
38
|
+
"""Initialize the InstanceSegmentation class for detecting and annotating segmented instances.
|
|
36
39
|
|
|
37
40
|
Args:
|
|
38
|
-
**kwargs (Any): Keyword arguments passed to the BaseSolution parent class
|
|
39
|
-
model (str): Model name or path, defaults to "yolo11n-seg.pt".
|
|
41
|
+
**kwargs (Any): Keyword arguments passed to the BaseSolution parent class including:
|
|
42
|
+
- model (str): Model name or path, defaults to "yolo11n-seg.pt".
|
|
40
43
|
"""
|
|
41
44
|
kwargs["model"] = kwargs.get("model", "yolo11n-seg.pt")
|
|
42
45
|
super().__init__(**kwargs)
|
|
@@ -45,12 +48,11 @@ class InstanceSegmentation(BaseSolution):
|
|
|
45
48
|
self.show_labels = self.CFG.get("show_labels", True)
|
|
46
49
|
self.show_boxes = self.CFG.get("show_boxes", True)
|
|
47
50
|
|
|
48
|
-
def process(self, im0):
|
|
49
|
-
"""
|
|
50
|
-
Perform instance segmentation on the input image and annotate the results.
|
|
51
|
+
def process(self, im0) -> SolutionResults:
|
|
52
|
+
"""Perform instance segmentation on the input image and annotate the results.
|
|
51
53
|
|
|
52
54
|
Args:
|
|
53
|
-
im0 (
|
|
55
|
+
im0 (np.ndarray): The input image for segmentation.
|
|
54
56
|
|
|
55
57
|
Returns:
|
|
56
58
|
(SolutionResults): Object containing the annotated image and total number of tracked instances.
|
|
@@ -58,11 +60,11 @@ class InstanceSegmentation(BaseSolution):
|
|
|
58
60
|
Examples:
|
|
59
61
|
>>> segmenter = InstanceSegmentation()
|
|
60
62
|
>>> frame = cv2.imread("image.jpg")
|
|
61
|
-
>>> summary = segmenter.
|
|
63
|
+
>>> summary = segmenter.process(frame)
|
|
62
64
|
>>> print(summary)
|
|
63
65
|
"""
|
|
64
66
|
self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
|
|
65
|
-
self.masks = getattr(self.tracks
|
|
67
|
+
self.masks = getattr(self.tracks, "masks", None)
|
|
66
68
|
|
|
67
69
|
# Iterate over detected classes, track IDs, and segmentation masks
|
|
68
70
|
if self.masks is None:
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
3
5
|
import cv2
|
|
4
6
|
|
|
5
7
|
from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
|
|
@@ -8,8 +10,7 @@ from ultralytics.utils.plotting import colors
|
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
class ObjectBlurrer(BaseSolution):
|
|
11
|
-
"""
|
|
12
|
-
A class to manage the blurring of detected objects in a real-time video stream.
|
|
13
|
+
"""A class to manage the blurring of detected objects in a real-time video stream.
|
|
13
14
|
|
|
14
15
|
This class extends the BaseSolution class and provides functionality for blurring objects based on detected bounding
|
|
15
16
|
boxes. The blurred areas are updated directly in the input image, allowing for privacy preservation or other effects.
|
|
@@ -20,9 +21,9 @@ class ObjectBlurrer(BaseSolution):
|
|
|
20
21
|
conf (float): Confidence threshold for object detection.
|
|
21
22
|
|
|
22
23
|
Methods:
|
|
23
|
-
process:
|
|
24
|
-
extract_tracks:
|
|
25
|
-
display_output:
|
|
24
|
+
process: Apply a blurring effect to detected objects in the input image.
|
|
25
|
+
extract_tracks: Extract tracking information from detected objects.
|
|
26
|
+
display_output: Display the processed output image.
|
|
26
27
|
|
|
27
28
|
Examples:
|
|
28
29
|
>>> blurrer = ObjectBlurrer()
|
|
@@ -31,13 +32,12 @@ class ObjectBlurrer(BaseSolution):
|
|
|
31
32
|
>>> print(f"Total blurred objects: {processed_results.total_tracks}")
|
|
32
33
|
"""
|
|
33
34
|
|
|
34
|
-
def __init__(self, **kwargs):
|
|
35
|
-
"""
|
|
36
|
-
Initialize the ObjectBlurrer class for applying a blur effect to objects detected in video streams or images.
|
|
35
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
36
|
+
"""Initialize the ObjectBlurrer class for applying a blur effect to objects detected in video streams or images.
|
|
37
37
|
|
|
38
38
|
Args:
|
|
39
|
-
**kwargs (Any): Keyword arguments passed to the parent class and for configuration
|
|
40
|
-
blur_ratio (float): Intensity of the blur effect (0.1-1.0, default=0.5).
|
|
39
|
+
**kwargs (Any): Keyword arguments passed to the parent class and for configuration including:
|
|
40
|
+
- blur_ratio (float): Intensity of the blur effect (0.1-1.0, default=0.5).
|
|
41
41
|
"""
|
|
42
42
|
super().__init__(**kwargs)
|
|
43
43
|
blur_ratio = self.CFG["blur_ratio"]
|
|
@@ -46,19 +46,18 @@ class ObjectBlurrer(BaseSolution):
|
|
|
46
46
|
blur_ratio = 0.5
|
|
47
47
|
self.blur_ratio = int(blur_ratio * 100)
|
|
48
48
|
|
|
49
|
-
def process(self, im0):
|
|
50
|
-
"""
|
|
51
|
-
Apply a blurring effect to detected objects in the input image.
|
|
49
|
+
def process(self, im0) -> SolutionResults:
|
|
50
|
+
"""Apply a blurring effect to detected objects in the input image.
|
|
52
51
|
|
|
53
|
-
This method extracts tracking information, applies blur to regions corresponding to detected objects,
|
|
54
|
-
|
|
52
|
+
This method extracts tracking information, applies blur to regions corresponding to detected objects, and
|
|
53
|
+
annotates the image with bounding boxes.
|
|
55
54
|
|
|
56
55
|
Args:
|
|
57
|
-
im0 (
|
|
56
|
+
im0 (np.ndarray): The input image containing detected objects.
|
|
58
57
|
|
|
59
58
|
Returns:
|
|
60
59
|
(SolutionResults): Object containing the processed image and number of tracked objects.
|
|
61
|
-
- plot_im (
|
|
60
|
+
- plot_im (np.ndarray): The annotated output image with blurred objects.
|
|
62
61
|
- total_tracks (int): The total number of tracked objects in the frame.
|
|
63
62
|
|
|
64
63
|
Examples:
|