dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
- tests/__init__.py +22 -0
- tests/conftest.py +83 -0
- tests/test_cli.py +138 -0
- tests/test_cuda.py +215 -0
- tests/test_engine.py +131 -0
- tests/test_exports.py +236 -0
- tests/test_integrations.py +154 -0
- tests/test_python.py +694 -0
- tests/test_solutions.py +187 -0
- ultralytics/__init__.py +30 -0
- ultralytics/assets/bus.jpg +0 -0
- ultralytics/assets/zidane.jpg +0 -0
- ultralytics/cfg/__init__.py +1023 -0
- ultralytics/cfg/datasets/Argoverse.yaml +77 -0
- ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
- ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
- ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
- ultralytics/cfg/datasets/Objects365.yaml +443 -0
- ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
- ultralytics/cfg/datasets/VOC.yaml +106 -0
- ultralytics/cfg/datasets/VisDrone.yaml +77 -0
- ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
- ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
- ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
- ultralytics/cfg/datasets/coco-pose.yaml +42 -0
- ultralytics/cfg/datasets/coco.yaml +118 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco128.yaml +101 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
- ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
- ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco8.yaml +101 -0
- ultralytics/cfg/datasets/crack-seg.yaml +22 -0
- ultralytics/cfg/datasets/dog-pose.yaml +24 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
- ultralytics/cfg/datasets/dota8.yaml +35 -0
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
- ultralytics/cfg/datasets/lvis.yaml +1240 -0
- ultralytics/cfg/datasets/medical-pills.yaml +22 -0
- ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
- ultralytics/cfg/datasets/package-seg.yaml +22 -0
- ultralytics/cfg/datasets/signature.yaml +21 -0
- ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
- ultralytics/cfg/datasets/xView.yaml +155 -0
- ultralytics/cfg/default.yaml +127 -0
- ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
- ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
- ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
- ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
- ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
- ultralytics/cfg/models/11/yolo11.yaml +50 -0
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
- ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
- ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
- ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
- ultralytics/cfg/models/12/yolo12.yaml +48 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
- ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
- ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
- ultralytics/cfg/models/v3/yolov3.yaml +49 -0
- ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
- ultralytics/cfg/models/v5/yolov5.yaml +51 -0
- ultralytics/cfg/models/v6/yolov6.yaml +56 -0
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
- ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
- ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8.yaml +49 -0
- ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
- ultralytics/cfg/trackers/botsort.yaml +22 -0
- ultralytics/cfg/trackers/bytetrack.yaml +14 -0
- ultralytics/data/__init__.py +26 -0
- ultralytics/data/annotator.py +66 -0
- ultralytics/data/augment.py +2945 -0
- ultralytics/data/base.py +438 -0
- ultralytics/data/build.py +258 -0
- ultralytics/data/converter.py +754 -0
- ultralytics/data/dataset.py +834 -0
- ultralytics/data/loaders.py +676 -0
- ultralytics/data/scripts/download_weights.sh +18 -0
- ultralytics/data/scripts/get_coco.sh +61 -0
- ultralytics/data/scripts/get_coco128.sh +18 -0
- ultralytics/data/scripts/get_imagenet.sh +52 -0
- ultralytics/data/split.py +125 -0
- ultralytics/data/split_dota.py +325 -0
- ultralytics/data/utils.py +777 -0
- ultralytics/engine/__init__.py +1 -0
- ultralytics/engine/exporter.py +1519 -0
- ultralytics/engine/model.py +1156 -0
- ultralytics/engine/predictor.py +502 -0
- ultralytics/engine/results.py +1840 -0
- ultralytics/engine/trainer.py +853 -0
- ultralytics/engine/tuner.py +243 -0
- ultralytics/engine/validator.py +377 -0
- ultralytics/hub/__init__.py +168 -0
- ultralytics/hub/auth.py +137 -0
- ultralytics/hub/google/__init__.py +176 -0
- ultralytics/hub/session.py +446 -0
- ultralytics/hub/utils.py +248 -0
- ultralytics/models/__init__.py +9 -0
- ultralytics/models/fastsam/__init__.py +7 -0
- ultralytics/models/fastsam/model.py +61 -0
- ultralytics/models/fastsam/predict.py +181 -0
- ultralytics/models/fastsam/utils.py +24 -0
- ultralytics/models/fastsam/val.py +40 -0
- ultralytics/models/nas/__init__.py +7 -0
- ultralytics/models/nas/model.py +102 -0
- ultralytics/models/nas/predict.py +58 -0
- ultralytics/models/nas/val.py +39 -0
- ultralytics/models/rtdetr/__init__.py +7 -0
- ultralytics/models/rtdetr/model.py +63 -0
- ultralytics/models/rtdetr/predict.py +84 -0
- ultralytics/models/rtdetr/train.py +85 -0
- ultralytics/models/rtdetr/val.py +191 -0
- ultralytics/models/sam/__init__.py +6 -0
- ultralytics/models/sam/amg.py +260 -0
- ultralytics/models/sam/build.py +358 -0
- ultralytics/models/sam/model.py +170 -0
- ultralytics/models/sam/modules/__init__.py +1 -0
- ultralytics/models/sam/modules/blocks.py +1129 -0
- ultralytics/models/sam/modules/decoders.py +515 -0
- ultralytics/models/sam/modules/encoders.py +854 -0
- ultralytics/models/sam/modules/memory_attention.py +299 -0
- ultralytics/models/sam/modules/sam.py +1006 -0
- ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
- ultralytics/models/sam/modules/transformer.py +351 -0
- ultralytics/models/sam/modules/utils.py +394 -0
- ultralytics/models/sam/predict.py +1605 -0
- ultralytics/models/utils/__init__.py +1 -0
- ultralytics/models/utils/loss.py +455 -0
- ultralytics/models/utils/ops.py +268 -0
- ultralytics/models/yolo/__init__.py +7 -0
- ultralytics/models/yolo/classify/__init__.py +7 -0
- ultralytics/models/yolo/classify/predict.py +88 -0
- ultralytics/models/yolo/classify/train.py +233 -0
- ultralytics/models/yolo/classify/val.py +215 -0
- ultralytics/models/yolo/detect/__init__.py +7 -0
- ultralytics/models/yolo/detect/predict.py +124 -0
- ultralytics/models/yolo/detect/train.py +217 -0
- ultralytics/models/yolo/detect/val.py +451 -0
- ultralytics/models/yolo/model.py +354 -0
- ultralytics/models/yolo/obb/__init__.py +7 -0
- ultralytics/models/yolo/obb/predict.py +66 -0
- ultralytics/models/yolo/obb/train.py +81 -0
- ultralytics/models/yolo/obb/val.py +283 -0
- ultralytics/models/yolo/pose/__init__.py +7 -0
- ultralytics/models/yolo/pose/predict.py +79 -0
- ultralytics/models/yolo/pose/train.py +154 -0
- ultralytics/models/yolo/pose/val.py +394 -0
- ultralytics/models/yolo/segment/__init__.py +7 -0
- ultralytics/models/yolo/segment/predict.py +113 -0
- ultralytics/models/yolo/segment/train.py +123 -0
- ultralytics/models/yolo/segment/val.py +428 -0
- ultralytics/models/yolo/world/__init__.py +5 -0
- ultralytics/models/yolo/world/train.py +119 -0
- ultralytics/models/yolo/world/train_world.py +176 -0
- ultralytics/models/yolo/yoloe/__init__.py +22 -0
- ultralytics/models/yolo/yoloe/predict.py +169 -0
- ultralytics/models/yolo/yoloe/train.py +298 -0
- ultralytics/models/yolo/yoloe/train_seg.py +124 -0
- ultralytics/models/yolo/yoloe/val.py +191 -0
- ultralytics/nn/__init__.py +29 -0
- ultralytics/nn/autobackend.py +842 -0
- ultralytics/nn/modules/__init__.py +182 -0
- ultralytics/nn/modules/activation.py +53 -0
- ultralytics/nn/modules/block.py +1966 -0
- ultralytics/nn/modules/conv.py +712 -0
- ultralytics/nn/modules/head.py +880 -0
- ultralytics/nn/modules/transformer.py +713 -0
- ultralytics/nn/modules/utils.py +164 -0
- ultralytics/nn/tasks.py +1627 -0
- ultralytics/nn/text_model.py +351 -0
- ultralytics/solutions/__init__.py +41 -0
- ultralytics/solutions/ai_gym.py +116 -0
- ultralytics/solutions/analytics.py +252 -0
- ultralytics/solutions/config.py +106 -0
- ultralytics/solutions/distance_calculation.py +124 -0
- ultralytics/solutions/heatmap.py +127 -0
- ultralytics/solutions/instance_segmentation.py +84 -0
- ultralytics/solutions/object_blurrer.py +90 -0
- ultralytics/solutions/object_counter.py +195 -0
- ultralytics/solutions/object_cropper.py +84 -0
- ultralytics/solutions/parking_management.py +273 -0
- ultralytics/solutions/queue_management.py +93 -0
- ultralytics/solutions/region_counter.py +120 -0
- ultralytics/solutions/security_alarm.py +154 -0
- ultralytics/solutions/similarity_search.py +172 -0
- ultralytics/solutions/solutions.py +724 -0
- ultralytics/solutions/speed_estimation.py +110 -0
- ultralytics/solutions/streamlit_inference.py +196 -0
- ultralytics/solutions/templates/similarity-search.html +160 -0
- ultralytics/solutions/trackzone.py +88 -0
- ultralytics/solutions/vision_eye.py +68 -0
- ultralytics/trackers/__init__.py +7 -0
- ultralytics/trackers/basetrack.py +124 -0
- ultralytics/trackers/bot_sort.py +260 -0
- ultralytics/trackers/byte_tracker.py +480 -0
- ultralytics/trackers/track.py +125 -0
- ultralytics/trackers/utils/__init__.py +1 -0
- ultralytics/trackers/utils/gmc.py +376 -0
- ultralytics/trackers/utils/kalman_filter.py +493 -0
- ultralytics/trackers/utils/matching.py +157 -0
- ultralytics/utils/__init__.py +1435 -0
- ultralytics/utils/autobatch.py +106 -0
- ultralytics/utils/autodevice.py +174 -0
- ultralytics/utils/benchmarks.py +695 -0
- ultralytics/utils/callbacks/__init__.py +5 -0
- ultralytics/utils/callbacks/base.py +234 -0
- ultralytics/utils/callbacks/clearml.py +153 -0
- ultralytics/utils/callbacks/comet.py +552 -0
- ultralytics/utils/callbacks/dvc.py +205 -0
- ultralytics/utils/callbacks/hub.py +108 -0
- ultralytics/utils/callbacks/mlflow.py +138 -0
- ultralytics/utils/callbacks/neptune.py +140 -0
- ultralytics/utils/callbacks/raytune.py +43 -0
- ultralytics/utils/callbacks/tensorboard.py +132 -0
- ultralytics/utils/callbacks/wb.py +185 -0
- ultralytics/utils/checks.py +897 -0
- ultralytics/utils/dist.py +119 -0
- ultralytics/utils/downloads.py +499 -0
- ultralytics/utils/errors.py +43 -0
- ultralytics/utils/export.py +219 -0
- ultralytics/utils/files.py +221 -0
- ultralytics/utils/instance.py +499 -0
- ultralytics/utils/loss.py +813 -0
- ultralytics/utils/metrics.py +1356 -0
- ultralytics/utils/ops.py +885 -0
- ultralytics/utils/patches.py +143 -0
- ultralytics/utils/plotting.py +1011 -0
- ultralytics/utils/tal.py +416 -0
- ultralytics/utils/torch_utils.py +990 -0
- ultralytics/utils/triton.py +116 -0
- ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,124 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
"""Module defines the base classes and structures for object tracking in YOLO."""
|
3
|
+
|
4
|
+
from collections import OrderedDict
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
|
8
|
+
|
9
|
+
class TrackState:
|
10
|
+
"""
|
11
|
+
Enumeration class representing the possible states of an object being tracked.
|
12
|
+
|
13
|
+
Attributes:
|
14
|
+
New (int): State when the object is newly detected.
|
15
|
+
Tracked (int): State when the object is successfully tracked in subsequent frames.
|
16
|
+
Lost (int): State when the object is no longer tracked.
|
17
|
+
Removed (int): State when the object is removed from tracking.
|
18
|
+
|
19
|
+
Examples:
|
20
|
+
>>> state = TrackState.New
|
21
|
+
>>> if state == TrackState.New:
|
22
|
+
>>> print("Object is newly detected.")
|
23
|
+
"""
|
24
|
+
|
25
|
+
New = 0
|
26
|
+
Tracked = 1
|
27
|
+
Lost = 2
|
28
|
+
Removed = 3
|
29
|
+
|
30
|
+
|
31
|
+
class BaseTrack:
|
32
|
+
"""
|
33
|
+
Base class for object tracking, providing foundational attributes and methods.
|
34
|
+
|
35
|
+
Attributes:
|
36
|
+
_count (int): Class-level counter for unique track IDs.
|
37
|
+
track_id (int): Unique identifier for the track.
|
38
|
+
is_activated (bool): Flag indicating whether the track is currently active.
|
39
|
+
state (TrackState): Current state of the track.
|
40
|
+
history (OrderedDict): Ordered history of the track's states.
|
41
|
+
features (list): List of features extracted from the object for tracking.
|
42
|
+
curr_feature (Any): The current feature of the object being tracked.
|
43
|
+
score (float): The confidence score of the tracking.
|
44
|
+
start_frame (int): The frame number where tracking started.
|
45
|
+
frame_id (int): The most recent frame ID processed by the track.
|
46
|
+
time_since_update (int): Frames passed since the last update.
|
47
|
+
location (tuple): The location of the object in the context of multi-camera tracking.
|
48
|
+
|
49
|
+
Methods:
|
50
|
+
end_frame: Returns the ID of the last frame where the object was tracked.
|
51
|
+
next_id: Increments and returns the next global track ID.
|
52
|
+
activate: Abstract method to activate the track.
|
53
|
+
predict: Abstract method to predict the next state of the track.
|
54
|
+
update: Abstract method to update the track with new data.
|
55
|
+
mark_lost: Marks the track as lost.
|
56
|
+
mark_removed: Marks the track as removed.
|
57
|
+
reset_id: Resets the global track ID counter.
|
58
|
+
|
59
|
+
Examples:
|
60
|
+
Initialize a new track and mark it as lost:
|
61
|
+
>>> track = BaseTrack()
|
62
|
+
>>> track.mark_lost()
|
63
|
+
>>> print(track.state) # Output: 2 (TrackState.Lost)
|
64
|
+
"""
|
65
|
+
|
66
|
+
_count = 0
|
67
|
+
|
68
|
+
def __init__(self):
|
69
|
+
"""
|
70
|
+
Initialize a new track with a unique ID and foundational tracking attributes.
|
71
|
+
|
72
|
+
Examples:
|
73
|
+
Initialize a new track
|
74
|
+
>>> track = BaseTrack()
|
75
|
+
>>> print(track.track_id)
|
76
|
+
0
|
77
|
+
"""
|
78
|
+
self.track_id = 0
|
79
|
+
self.is_activated = False
|
80
|
+
self.state = TrackState.New
|
81
|
+
self.history = OrderedDict()
|
82
|
+
self.features = []
|
83
|
+
self.curr_feature = None
|
84
|
+
self.score = 0
|
85
|
+
self.start_frame = 0
|
86
|
+
self.frame_id = 0
|
87
|
+
self.time_since_update = 0
|
88
|
+
self.location = (np.inf, np.inf)
|
89
|
+
|
90
|
+
@property
|
91
|
+
def end_frame(self):
|
92
|
+
"""Returns the ID of the most recent frame where the object was tracked."""
|
93
|
+
return self.frame_id
|
94
|
+
|
95
|
+
@staticmethod
|
96
|
+
def next_id():
|
97
|
+
"""Increment and return the next unique global track ID for object tracking."""
|
98
|
+
BaseTrack._count += 1
|
99
|
+
return BaseTrack._count
|
100
|
+
|
101
|
+
def activate(self, *args):
|
102
|
+
"""Activates the track with provided arguments, initializing necessary attributes for tracking."""
|
103
|
+
raise NotImplementedError
|
104
|
+
|
105
|
+
def predict(self):
|
106
|
+
"""Predicts the next state of the track based on the current state and tracking model."""
|
107
|
+
raise NotImplementedError
|
108
|
+
|
109
|
+
def update(self, *args, **kwargs):
|
110
|
+
"""Updates the track with new observations and data, modifying its state and attributes accordingly."""
|
111
|
+
raise NotImplementedError
|
112
|
+
|
113
|
+
def mark_lost(self):
|
114
|
+
"""Marks the track as lost by updating its state to TrackState.Lost."""
|
115
|
+
self.state = TrackState.Lost
|
116
|
+
|
117
|
+
def mark_removed(self):
|
118
|
+
"""Marks the track as removed by setting its state to TrackState.Removed."""
|
119
|
+
self.state = TrackState.Removed
|
120
|
+
|
121
|
+
@staticmethod
|
122
|
+
def reset_id():
|
123
|
+
"""Reset the global track ID counter to its initial value."""
|
124
|
+
BaseTrack._count = 0
|
@@ -0,0 +1,260 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from collections import deque
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import torch
|
7
|
+
|
8
|
+
from ultralytics.utils.ops import xywh2xyxy
|
9
|
+
from ultralytics.utils.plotting import save_one_box
|
10
|
+
|
11
|
+
from .basetrack import TrackState
|
12
|
+
from .byte_tracker import BYTETracker, STrack
|
13
|
+
from .utils import matching
|
14
|
+
from .utils.gmc import GMC
|
15
|
+
from .utils.kalman_filter import KalmanFilterXYWH
|
16
|
+
|
17
|
+
|
18
|
+
class BOTrack(STrack):
|
19
|
+
"""
|
20
|
+
An extended version of the STrack class for YOLO, adding object tracking features.
|
21
|
+
|
22
|
+
This class extends the STrack class to include additional functionalities for object tracking, such as feature
|
23
|
+
smoothing, Kalman filter prediction, and reactivation of tracks.
|
24
|
+
|
25
|
+
Attributes:
|
26
|
+
shared_kalman (KalmanFilterXYWH): A shared Kalman filter for all instances of BOTrack.
|
27
|
+
smooth_feat (np.ndarray): Smoothed feature vector.
|
28
|
+
curr_feat (np.ndarray): Current feature vector.
|
29
|
+
features (deque): A deque to store feature vectors with a maximum length defined by `feat_history`.
|
30
|
+
alpha (float): Smoothing factor for the exponential moving average of features.
|
31
|
+
mean (np.ndarray): The mean state of the Kalman filter.
|
32
|
+
covariance (np.ndarray): The covariance matrix of the Kalman filter.
|
33
|
+
|
34
|
+
Methods:
|
35
|
+
update_features: Update features vector and smooth it using exponential moving average.
|
36
|
+
predict: Predict the mean and covariance using Kalman filter.
|
37
|
+
re_activate: Reactivate a track with updated features and optionally new ID.
|
38
|
+
update: Update the track with new detection and frame ID.
|
39
|
+
tlwh: Property that gets the current position in tlwh format `(top left x, top left y, width, height)`.
|
40
|
+
multi_predict: Predict the mean and covariance of multiple object tracks using shared Kalman filter.
|
41
|
+
convert_coords: Convert tlwh bounding box coordinates to xywh format.
|
42
|
+
tlwh_to_xywh: Convert bounding box to xywh format `(center x, center y, width, height)`.
|
43
|
+
|
44
|
+
Examples:
|
45
|
+
Create a BOTrack instance and update its features
|
46
|
+
>>> bo_track = BOTrack(tlwh=[100, 50, 80, 40], score=0.9, cls=1, feat=np.random.rand(128))
|
47
|
+
>>> bo_track.predict()
|
48
|
+
>>> new_track = BOTrack(tlwh=[110, 60, 80, 40], score=0.85, cls=1, feat=np.random.rand(128))
|
49
|
+
>>> bo_track.update(new_track, frame_id=2)
|
50
|
+
"""
|
51
|
+
|
52
|
+
shared_kalman = KalmanFilterXYWH()
|
53
|
+
|
54
|
+
def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
|
55
|
+
"""
|
56
|
+
Initialize a BOTrack object with temporal parameters, such as feature history, alpha, and current features.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
tlwh (np.ndarray): Bounding box coordinates in tlwh format (top left x, top left y, width, height).
|
60
|
+
score (float): Confidence score of the detection.
|
61
|
+
cls (int): Class ID of the detected object.
|
62
|
+
feat (np.ndarray | None): Feature vector associated with the detection.
|
63
|
+
feat_history (int): Maximum length of the feature history deque.
|
64
|
+
|
65
|
+
Examples:
|
66
|
+
Initialize a BOTrack object with bounding box, score, class ID, and feature vector
|
67
|
+
>>> tlwh = np.array([100, 50, 80, 120])
|
68
|
+
>>> score = 0.9
|
69
|
+
>>> cls = 1
|
70
|
+
>>> feat = np.random.rand(128)
|
71
|
+
>>> bo_track = BOTrack(tlwh, score, cls, feat)
|
72
|
+
"""
|
73
|
+
super().__init__(tlwh, score, cls)
|
74
|
+
|
75
|
+
self.smooth_feat = None
|
76
|
+
self.curr_feat = None
|
77
|
+
if feat is not None:
|
78
|
+
self.update_features(feat)
|
79
|
+
self.features = deque([], maxlen=feat_history)
|
80
|
+
self.alpha = 0.9
|
81
|
+
|
82
|
+
def update_features(self, feat):
|
83
|
+
"""Update the feature vector and apply exponential moving average smoothing."""
|
84
|
+
feat /= np.linalg.norm(feat)
|
85
|
+
self.curr_feat = feat
|
86
|
+
if self.smooth_feat is None:
|
87
|
+
self.smooth_feat = feat
|
88
|
+
else:
|
89
|
+
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
|
90
|
+
self.features.append(feat)
|
91
|
+
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
|
92
|
+
|
93
|
+
def predict(self):
|
94
|
+
"""Predict the object's future state using the Kalman filter to update its mean and covariance."""
|
95
|
+
mean_state = self.mean.copy()
|
96
|
+
if self.state != TrackState.Tracked:
|
97
|
+
mean_state[6] = 0
|
98
|
+
mean_state[7] = 0
|
99
|
+
|
100
|
+
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
|
101
|
+
|
102
|
+
def re_activate(self, new_track, frame_id, new_id=False):
|
103
|
+
"""Reactivate a track with updated features and optionally assign a new ID."""
|
104
|
+
if new_track.curr_feat is not None:
|
105
|
+
self.update_features(new_track.curr_feat)
|
106
|
+
super().re_activate(new_track, frame_id, new_id)
|
107
|
+
|
108
|
+
def update(self, new_track, frame_id):
|
109
|
+
"""Update the track with new detection information and the current frame ID."""
|
110
|
+
if new_track.curr_feat is not None:
|
111
|
+
self.update_features(new_track.curr_feat)
|
112
|
+
super().update(new_track, frame_id)
|
113
|
+
|
114
|
+
@property
|
115
|
+
def tlwh(self):
|
116
|
+
"""Return the current bounding box position in `(top left x, top left y, width, height)` format."""
|
117
|
+
if self.mean is None:
|
118
|
+
return self._tlwh.copy()
|
119
|
+
ret = self.mean[:4].copy()
|
120
|
+
ret[:2] -= ret[2:] / 2
|
121
|
+
return ret
|
122
|
+
|
123
|
+
@staticmethod
|
124
|
+
def multi_predict(stracks):
|
125
|
+
"""Predict the mean and covariance for multiple object tracks using a shared Kalman filter."""
|
126
|
+
if len(stracks) <= 0:
|
127
|
+
return
|
128
|
+
multi_mean = np.asarray([st.mean.copy() for st in stracks])
|
129
|
+
multi_covariance = np.asarray([st.covariance for st in stracks])
|
130
|
+
for i, st in enumerate(stracks):
|
131
|
+
if st.state != TrackState.Tracked:
|
132
|
+
multi_mean[i][6] = 0
|
133
|
+
multi_mean[i][7] = 0
|
134
|
+
multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
|
135
|
+
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
|
136
|
+
stracks[i].mean = mean
|
137
|
+
stracks[i].covariance = cov
|
138
|
+
|
139
|
+
def convert_coords(self, tlwh):
|
140
|
+
"""Convert tlwh bounding box coordinates to xywh format."""
|
141
|
+
return self.tlwh_to_xywh(tlwh)
|
142
|
+
|
143
|
+
@staticmethod
|
144
|
+
def tlwh_to_xywh(tlwh):
|
145
|
+
"""Convert bounding box from tlwh (top-left-width-height) to xywh (center-x-center-y-width-height) format."""
|
146
|
+
ret = np.asarray(tlwh).copy()
|
147
|
+
ret[:2] += ret[2:] / 2
|
148
|
+
return ret
|
149
|
+
|
150
|
+
|
151
|
+
class BOTSORT(BYTETracker):
|
152
|
+
"""
|
153
|
+
An extended version of the BYTETracker class for YOLO, designed for object tracking with ReID and GMC algorithm.
|
154
|
+
|
155
|
+
Attributes:
|
156
|
+
proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections.
|
157
|
+
appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections.
|
158
|
+
encoder (Any): Object to handle ReID embeddings, set to None if ReID is not enabled.
|
159
|
+
gmc (GMC): An instance of the GMC algorithm for data association.
|
160
|
+
args (Any): Parsed command-line arguments containing tracking parameters.
|
161
|
+
|
162
|
+
Methods:
|
163
|
+
get_kalmanfilter: Return an instance of KalmanFilterXYWH for object tracking.
|
164
|
+
init_track: Initialize track with detections, scores, and classes.
|
165
|
+
get_dists: Get distances between tracks and detections using IoU and (optionally) ReID.
|
166
|
+
multi_predict: Predict and track multiple objects with a YOLO model.
|
167
|
+
reset: Reset the BOTSORT tracker to its initial state.
|
168
|
+
|
169
|
+
Examples:
|
170
|
+
Initialize BOTSORT and process detections
|
171
|
+
>>> bot_sort = BOTSORT(args, frame_rate=30)
|
172
|
+
>>> bot_sort.init_track(dets, scores, cls, img)
|
173
|
+
>>> bot_sort.multi_predict(tracks)
|
174
|
+
|
175
|
+
Note:
|
176
|
+
The class is designed to work with a YOLO object detection model and supports ReID only if enabled via args.
|
177
|
+
"""
|
178
|
+
|
179
|
+
def __init__(self, args, frame_rate=30):
|
180
|
+
"""
|
181
|
+
Initialize BOTSORT object with ReID module and GMC algorithm.
|
182
|
+
|
183
|
+
Args:
|
184
|
+
args (object): Parsed command-line arguments containing tracking parameters.
|
185
|
+
frame_rate (int): Frame rate of the video being processed.
|
186
|
+
|
187
|
+
Examples:
|
188
|
+
Initialize BOTSORT with command-line arguments and a specified frame rate:
|
189
|
+
>>> args = parse_args()
|
190
|
+
>>> bot_sort = BOTSORT(args, frame_rate=30)
|
191
|
+
"""
|
192
|
+
super().__init__(args, frame_rate)
|
193
|
+
self.gmc = GMC(method=args.gmc_method)
|
194
|
+
|
195
|
+
# ReID module
|
196
|
+
self.proximity_thresh = args.proximity_thresh
|
197
|
+
self.appearance_thresh = args.appearance_thresh
|
198
|
+
self.encoder = (
|
199
|
+
(lambda feats, s: [f.cpu().numpy() for f in feats]) # native features do not require any model
|
200
|
+
if args.with_reid and self.args.model == "auto"
|
201
|
+
else ReID(args.model)
|
202
|
+
if args.with_reid
|
203
|
+
else None
|
204
|
+
)
|
205
|
+
|
206
|
+
def get_kalmanfilter(self):
|
207
|
+
"""Return an instance of KalmanFilterXYWH for predicting and updating object states in the tracking process."""
|
208
|
+
return KalmanFilterXYWH()
|
209
|
+
|
210
|
+
def init_track(self, dets, scores, cls, img=None):
|
211
|
+
"""Initialize object tracks using detection bounding boxes, scores, class labels, and optional ReID features."""
|
212
|
+
if len(dets) == 0:
|
213
|
+
return []
|
214
|
+
if self.args.with_reid and self.encoder is not None:
|
215
|
+
features_keep = self.encoder(img, dets)
|
216
|
+
return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
|
217
|
+
else:
|
218
|
+
return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
|
219
|
+
|
220
|
+
def get_dists(self, tracks, detections):
|
221
|
+
"""Calculate distances between tracks and detections using IoU and optionally ReID embeddings."""
|
222
|
+
dists = matching.iou_distance(tracks, detections)
|
223
|
+
dists_mask = dists > (1 - self.proximity_thresh)
|
224
|
+
|
225
|
+
if self.args.fuse_score:
|
226
|
+
dists = matching.fuse_score(dists, detections)
|
227
|
+
|
228
|
+
if self.args.with_reid and self.encoder is not None:
|
229
|
+
emb_dists = matching.embedding_distance(tracks, detections) / 2.0
|
230
|
+
emb_dists[emb_dists > (1 - self.appearance_thresh)] = 1.0
|
231
|
+
emb_dists[dists_mask] = 1.0
|
232
|
+
dists = np.minimum(dists, emb_dists)
|
233
|
+
return dists
|
234
|
+
|
235
|
+
def multi_predict(self, tracks):
|
236
|
+
"""Predict the mean and covariance of multiple object tracks using a shared Kalman filter."""
|
237
|
+
BOTrack.multi_predict(tracks)
|
238
|
+
|
239
|
+
def reset(self):
|
240
|
+
"""Reset the BOTSORT tracker to its initial state, clearing all tracked objects and internal states."""
|
241
|
+
super().reset()
|
242
|
+
self.gmc.reset_params()
|
243
|
+
|
244
|
+
|
245
|
+
class ReID:
|
246
|
+
"""YOLO model as encoder for re-identification."""
|
247
|
+
|
248
|
+
def __init__(self, model):
|
249
|
+
"""Initialize encoder for re-identification."""
|
250
|
+
from ultralytics import YOLO
|
251
|
+
|
252
|
+
self.model = YOLO(model)
|
253
|
+
self.model(embed=[len(self.model.model.model) - 2 if ".pt" in model else -1], verbose=False) # initialize
|
254
|
+
|
255
|
+
def __call__(self, img, dets):
|
256
|
+
"""Extract embeddings for detected objects."""
|
257
|
+
feats = self.model([save_one_box(det, img, save=False) for det in xywh2xyxy(torch.from_numpy(dets[:, :4]))])
|
258
|
+
if len(feats) != dets.shape[0] and feats[0].shape[0] == dets.shape[0]:
|
259
|
+
feats = feats[0] # batched prediction with non-PyTorch backend
|
260
|
+
return [f.cpu().numpy() for f in feats]
|