ultralytics 8.2.76__py3-none-any.whl → 8.2.77__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +1 -1
- ultralytics/engine/results.py +19 -5
- ultralytics/engine/trainer.py +3 -1
- ultralytics/models/yolo/detect/train.py +1 -1
- ultralytics/trackers/basetrack.py +31 -12
- ultralytics/trackers/bot_sort.py +58 -24
- ultralytics/trackers/byte_tracker.py +75 -42
- ultralytics/trackers/track.py +17 -2
- ultralytics/trackers/utils/gmc.py +52 -38
- ultralytics/trackers/utils/kalman_filter.py +162 -31
- ultralytics/trackers/utils/matching.py +38 -14
- ultralytics/utils/__init__.py +1 -1
- ultralytics/utils/files.py +69 -34
- ultralytics/utils/plotting.py +11 -3
- {ultralytics-8.2.76.dist-info → ultralytics-8.2.77.dist-info}/METADATA +2 -2
- {ultralytics-8.2.76.dist-info → ultralytics-8.2.77.dist-info}/RECORD +21 -21
- {ultralytics-8.2.76.dist-info → ultralytics-8.2.77.dist-info}/WHEEL +1 -1
- {ultralytics-8.2.76.dist-info → ultralytics-8.2.77.dist-info}/LICENSE +0 -0
- {ultralytics-8.2.76.dist-info → ultralytics-8.2.77.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.2.76.dist-info → ultralytics-8.2.77.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
|
@@ -81,7 +81,7 @@ CLI_HELP_MSG = f"""
|
|
|
81
81
|
5. Explore your datasets using semantic search and SQL with a simple GUI powered by Ultralytics Explorer API
|
|
82
82
|
yolo explorer data=data.yaml model=yolov8n.pt
|
|
83
83
|
|
|
84
|
-
6. Streamlit real-time
|
|
84
|
+
6. Streamlit real-time webcam inference GUI
|
|
85
85
|
yolo streamlit-predict
|
|
86
86
|
|
|
87
87
|
7. Run special commands:
|
ultralytics/engine/results.py
CHANGED
|
@@ -460,6 +460,7 @@ class Results(SimpleClass):
|
|
|
460
460
|
show=False,
|
|
461
461
|
save=False,
|
|
462
462
|
filename=None,
|
|
463
|
+
color_mode="class",
|
|
463
464
|
):
|
|
464
465
|
"""
|
|
465
466
|
Plots detection results on an input RGB image.
|
|
@@ -481,6 +482,7 @@ class Results(SimpleClass):
|
|
|
481
482
|
show (bool): Whether to display the annotated image.
|
|
482
483
|
save (bool): Whether to save the annotated image.
|
|
483
484
|
filename (str | None): Filename to save image if save is True.
|
|
485
|
+
color_mode (bool): Specify the color mode, e.g., 'instance' or 'class'. Default to 'class'.
|
|
484
486
|
|
|
485
487
|
Returns:
|
|
486
488
|
(np.ndarray): Annotated image as a numpy array.
|
|
@@ -491,6 +493,7 @@ class Results(SimpleClass):
|
|
|
491
493
|
... im = result.plot()
|
|
492
494
|
... im.show()
|
|
493
495
|
"""
|
|
496
|
+
assert color_mode in {"instance", "class"}, f"Expected color_mode='instance' or 'class', not {color_mode}."
|
|
494
497
|
if img is None and isinstance(self.orig_img, torch.Tensor):
|
|
495
498
|
img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).to(torch.uint8).cpu().numpy()
|
|
496
499
|
|
|
@@ -519,17 +522,22 @@ class Results(SimpleClass):
|
|
|
519
522
|
.contiguous()
|
|
520
523
|
/ 255
|
|
521
524
|
)
|
|
522
|
-
idx = pred_boxes.cls if pred_boxes else range(len(pred_masks))
|
|
525
|
+
idx = pred_boxes.cls if pred_boxes and color_mode == "class" else reversed(range(len(pred_masks)))
|
|
523
526
|
annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu)
|
|
524
527
|
|
|
525
528
|
# Plot Detect results
|
|
526
529
|
if pred_boxes is not None and show_boxes:
|
|
527
|
-
for d in reversed(pred_boxes):
|
|
530
|
+
for i, d in enumerate(reversed(pred_boxes)):
|
|
528
531
|
c, conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())
|
|
529
532
|
name = ("" if id is None else f"id:{id} ") + names[c]
|
|
530
533
|
label = (f"{name} {conf:.2f}" if conf else name) if labels else None
|
|
531
534
|
box = d.xyxyxyxy.reshape(-1, 4, 2).squeeze() if is_obb else d.xyxy.squeeze()
|
|
532
|
-
annotator.box_label(
|
|
535
|
+
annotator.box_label(
|
|
536
|
+
box,
|
|
537
|
+
label,
|
|
538
|
+
color=colors(i if color_mode == "instance" else c, True),
|
|
539
|
+
rotated=is_obb,
|
|
540
|
+
)
|
|
533
541
|
|
|
534
542
|
# Plot Classify results
|
|
535
543
|
if pred_probs is not None and show_probs:
|
|
@@ -539,8 +547,14 @@ class Results(SimpleClass):
|
|
|
539
547
|
|
|
540
548
|
# Plot Pose results
|
|
541
549
|
if self.keypoints is not None:
|
|
542
|
-
for k in reversed(self.keypoints.data):
|
|
543
|
-
annotator.kpts(
|
|
550
|
+
for i, k in enumerate(reversed(self.keypoints.data)):
|
|
551
|
+
annotator.kpts(
|
|
552
|
+
k,
|
|
553
|
+
self.orig_shape,
|
|
554
|
+
radius=kpt_radius,
|
|
555
|
+
kpt_line=kpt_line,
|
|
556
|
+
kpt_color=colors(i, True) if color_mode == "instance" else None,
|
|
557
|
+
)
|
|
544
558
|
|
|
545
559
|
# Show results
|
|
546
560
|
if show:
|
ultralytics/engine/trainer.py
CHANGED
|
@@ -174,9 +174,11 @@ class BaseTrainer:
|
|
|
174
174
|
world_size = len(self.args.device.split(","))
|
|
175
175
|
elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
|
|
176
176
|
world_size = len(self.args.device)
|
|
177
|
+
elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
|
|
178
|
+
world_size = 0
|
|
177
179
|
elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
|
|
178
180
|
world_size = 1 # default to device 0
|
|
179
|
-
else: # i.e. device=
|
|
181
|
+
else: # i.e. device=None or device=''
|
|
180
182
|
world_size = 0
|
|
181
183
|
|
|
182
184
|
# Run subprocess if DDP training, else train normally
|
|
@@ -60,7 +60,7 @@ class DetectionTrainer(BaseTrainer):
|
|
|
60
60
|
if self.args.multi_scale:
|
|
61
61
|
imgs = batch["img"]
|
|
62
62
|
sz = (
|
|
63
|
-
random.randrange(self.args.imgsz * 0.5, self.args.imgsz * 1.5 + self.stride)
|
|
63
|
+
random.randrange(int(self.args.imgsz * 0.5), int(self.args.imgsz * 1.5 + self.stride))
|
|
64
64
|
// self.stride
|
|
65
65
|
* self.stride
|
|
66
66
|
) # size
|
|
@@ -15,6 +15,11 @@ class TrackState:
|
|
|
15
15
|
Tracked (int): State when the object is successfully tracked in subsequent frames.
|
|
16
16
|
Lost (int): State when the object is no longer tracked.
|
|
17
17
|
Removed (int): State when the object is removed from tracking.
|
|
18
|
+
|
|
19
|
+
Examples:
|
|
20
|
+
>>> state = TrackState.New
|
|
21
|
+
>>> if state == TrackState.New:
|
|
22
|
+
>>> print("Object is newly detected.")
|
|
18
23
|
"""
|
|
19
24
|
|
|
20
25
|
New = 0
|
|
@@ -33,13 +38,13 @@ class BaseTrack:
|
|
|
33
38
|
is_activated (bool): Flag indicating whether the track is currently active.
|
|
34
39
|
state (TrackState): Current state of the track.
|
|
35
40
|
history (OrderedDict): Ordered history of the track's states.
|
|
36
|
-
features (
|
|
37
|
-
curr_feature (
|
|
41
|
+
features (List): List of features extracted from the object for tracking.
|
|
42
|
+
curr_feature (Any): The current feature of the object being tracked.
|
|
38
43
|
score (float): The confidence score of the tracking.
|
|
39
44
|
start_frame (int): The frame number where tracking started.
|
|
40
45
|
frame_id (int): The most recent frame ID processed by the track.
|
|
41
46
|
time_since_update (int): Frames passed since the last update.
|
|
42
|
-
location (
|
|
47
|
+
location (Tuple): The location of the object in the context of multi-camera tracking.
|
|
43
48
|
|
|
44
49
|
Methods:
|
|
45
50
|
end_frame: Returns the ID of the last frame where the object was tracked.
|
|
@@ -50,12 +55,26 @@ class BaseTrack:
|
|
|
50
55
|
mark_lost: Marks the track as lost.
|
|
51
56
|
mark_removed: Marks the track as removed.
|
|
52
57
|
reset_id: Resets the global track ID counter.
|
|
58
|
+
|
|
59
|
+
Examples:
|
|
60
|
+
Initialize a new track and mark it as lost:
|
|
61
|
+
>>> track = BaseTrack()
|
|
62
|
+
>>> track.mark_lost()
|
|
63
|
+
>>> print(track.state) # Output: 2 (TrackState.Lost)
|
|
53
64
|
"""
|
|
54
65
|
|
|
55
66
|
_count = 0
|
|
56
67
|
|
|
57
68
|
def __init__(self):
|
|
58
|
-
"""
|
|
69
|
+
"""
|
|
70
|
+
Initializes a new track with a unique ID and foundational tracking attributes.
|
|
71
|
+
|
|
72
|
+
Examples:
|
|
73
|
+
Initialize a new track
|
|
74
|
+
>>> track = BaseTrack()
|
|
75
|
+
>>> print(track.track_id)
|
|
76
|
+
0
|
|
77
|
+
"""
|
|
59
78
|
self.track_id = 0
|
|
60
79
|
self.is_activated = False
|
|
61
80
|
self.state = TrackState.New
|
|
@@ -70,36 +89,36 @@ class BaseTrack:
|
|
|
70
89
|
|
|
71
90
|
@property
|
|
72
91
|
def end_frame(self):
|
|
73
|
-
"""
|
|
92
|
+
"""Returns the ID of the most recent frame where the object was tracked."""
|
|
74
93
|
return self.frame_id
|
|
75
94
|
|
|
76
95
|
@staticmethod
|
|
77
96
|
def next_id():
|
|
78
|
-
"""Increment and return the global track ID
|
|
97
|
+
"""Increment and return the next unique global track ID for object tracking."""
|
|
79
98
|
BaseTrack._count += 1
|
|
80
99
|
return BaseTrack._count
|
|
81
100
|
|
|
82
101
|
def activate(self, *args):
|
|
83
|
-
"""
|
|
102
|
+
"""Activates the track with provided arguments, initializing necessary attributes for tracking."""
|
|
84
103
|
raise NotImplementedError
|
|
85
104
|
|
|
86
105
|
def predict(self):
|
|
87
|
-
"""
|
|
106
|
+
"""Predicts the next state of the track based on the current state and tracking model."""
|
|
88
107
|
raise NotImplementedError
|
|
89
108
|
|
|
90
109
|
def update(self, *args, **kwargs):
|
|
91
|
-
"""
|
|
110
|
+
"""Updates the track with new observations and data, modifying its state and attributes accordingly."""
|
|
92
111
|
raise NotImplementedError
|
|
93
112
|
|
|
94
113
|
def mark_lost(self):
|
|
95
|
-
"""
|
|
114
|
+
"""Marks the track as lost by updating its state to TrackState.Lost."""
|
|
96
115
|
self.state = TrackState.Lost
|
|
97
116
|
|
|
98
117
|
def mark_removed(self):
|
|
99
|
-
"""
|
|
118
|
+
"""Marks the track as removed by setting its state to TrackState.Removed."""
|
|
100
119
|
self.state = TrackState.Removed
|
|
101
120
|
|
|
102
121
|
@staticmethod
|
|
103
122
|
def reset_id():
|
|
104
|
-
"""Reset the global track ID counter."""
|
|
123
|
+
"""Reset the global track ID counter to its initial value."""
|
|
105
124
|
BaseTrack._count = 0
|
ultralytics/trackers/bot_sort.py
CHANGED
|
@@ -15,6 +15,9 @@ class BOTrack(STrack):
|
|
|
15
15
|
"""
|
|
16
16
|
An extended version of the STrack class for YOLOv8, adding object tracking features.
|
|
17
17
|
|
|
18
|
+
This class extends the STrack class to include additional functionalities for object tracking, such as feature
|
|
19
|
+
smoothing, Kalman filter prediction, and reactivation of tracks.
|
|
20
|
+
|
|
18
21
|
Attributes:
|
|
19
22
|
shared_kalman (KalmanFilterXYWH): A shared Kalman filter for all instances of BOTrack.
|
|
20
23
|
smooth_feat (np.ndarray): Smoothed feature vector.
|
|
@@ -34,16 +37,35 @@ class BOTrack(STrack):
|
|
|
34
37
|
convert_coords(tlwh): Converts tlwh bounding box coordinates to xywh format.
|
|
35
38
|
tlwh_to_xywh(tlwh): Convert bounding box to xywh format `(center x, center y, width, height)`.
|
|
36
39
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
bo_track.
|
|
40
|
-
bo_track.
|
|
40
|
+
Examples:
|
|
41
|
+
Create a BOTrack instance and update its features
|
|
42
|
+
>>> bo_track = BOTrack(tlwh=[100, 50, 80, 40], score=0.9, cls=1, feat=np.random.rand(128))
|
|
43
|
+
>>> bo_track.predict()
|
|
44
|
+
>>> new_track = BOTrack(tlwh=[110, 60, 80, 40], score=0.85, cls=1, feat=np.random.rand(128))
|
|
45
|
+
>>> bo_track.update(new_track, frame_id=2)
|
|
41
46
|
"""
|
|
42
47
|
|
|
43
48
|
shared_kalman = KalmanFilterXYWH()
|
|
44
49
|
|
|
45
50
|
def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
|
|
46
|
-
"""
|
|
51
|
+
"""
|
|
52
|
+
Initialize a BOTrack object with temporal parameters, such as feature history, alpha, and current features.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
tlwh (np.ndarray): Bounding box coordinates in tlwh format (top left x, top left y, width, height).
|
|
56
|
+
score (float): Confidence score of the detection.
|
|
57
|
+
cls (int): Class ID of the detected object.
|
|
58
|
+
feat (np.ndarray | None): Feature vector associated with the detection.
|
|
59
|
+
feat_history (int): Maximum length of the feature history deque.
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
Initialize a BOTrack object with bounding box, score, class ID, and feature vector
|
|
63
|
+
>>> tlwh = np.array([100, 50, 80, 120])
|
|
64
|
+
>>> score = 0.9
|
|
65
|
+
>>> cls = 1
|
|
66
|
+
>>> feat = np.random.rand(128)
|
|
67
|
+
>>> bo_track = BOTrack(tlwh, score, cls, feat)
|
|
68
|
+
"""
|
|
47
69
|
super().__init__(tlwh, score, cls)
|
|
48
70
|
|
|
49
71
|
self.smooth_feat = None
|
|
@@ -54,7 +76,7 @@ class BOTrack(STrack):
|
|
|
54
76
|
self.alpha = 0.9
|
|
55
77
|
|
|
56
78
|
def update_features(self, feat):
|
|
57
|
-
"""Update
|
|
79
|
+
"""Update the feature vector and apply exponential moving average smoothing."""
|
|
58
80
|
feat /= np.linalg.norm(feat)
|
|
59
81
|
self.curr_feat = feat
|
|
60
82
|
if self.smooth_feat is None:
|
|
@@ -65,7 +87,7 @@ class BOTrack(STrack):
|
|
|
65
87
|
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
|
|
66
88
|
|
|
67
89
|
def predict(self):
|
|
68
|
-
"""Predicts the
|
|
90
|
+
"""Predicts the object's future state using the Kalman filter to update its mean and covariance."""
|
|
69
91
|
mean_state = self.mean.copy()
|
|
70
92
|
if self.state != TrackState.Tracked:
|
|
71
93
|
mean_state[6] = 0
|
|
@@ -80,14 +102,14 @@ class BOTrack(STrack):
|
|
|
80
102
|
super().re_activate(new_track, frame_id, new_id)
|
|
81
103
|
|
|
82
104
|
def update(self, new_track, frame_id):
|
|
83
|
-
"""
|
|
105
|
+
"""Updates the YOLOv8 instance with new track information and the current frame ID."""
|
|
84
106
|
if new_track.curr_feat is not None:
|
|
85
107
|
self.update_features(new_track.curr_feat)
|
|
86
108
|
super().update(new_track, frame_id)
|
|
87
109
|
|
|
88
110
|
@property
|
|
89
111
|
def tlwh(self):
|
|
90
|
-
"""
|
|
112
|
+
"""Returns the current bounding box position in `(top left x, top left y, width, height)` format."""
|
|
91
113
|
if self.mean is None:
|
|
92
114
|
return self._tlwh.copy()
|
|
93
115
|
ret = self.mean[:4].copy()
|
|
@@ -96,7 +118,7 @@ class BOTrack(STrack):
|
|
|
96
118
|
|
|
97
119
|
@staticmethod
|
|
98
120
|
def multi_predict(stracks):
|
|
99
|
-
"""Predicts the mean and covariance
|
|
121
|
+
"""Predicts the mean and covariance for multiple object tracks using a shared Kalman filter."""
|
|
100
122
|
if len(stracks) <= 0:
|
|
101
123
|
return
|
|
102
124
|
multi_mean = np.asarray([st.mean.copy() for st in stracks])
|
|
@@ -111,12 +133,12 @@ class BOTrack(STrack):
|
|
|
111
133
|
stracks[i].covariance = cov
|
|
112
134
|
|
|
113
135
|
def convert_coords(self, tlwh):
|
|
114
|
-
"""Converts
|
|
136
|
+
"""Converts tlwh bounding box coordinates to xywh format."""
|
|
115
137
|
return self.tlwh_to_xywh(tlwh)
|
|
116
138
|
|
|
117
139
|
@staticmethod
|
|
118
140
|
def tlwh_to_xywh(tlwh):
|
|
119
|
-
"""Convert bounding box to
|
|
141
|
+
"""Convert bounding box from tlwh (top-left-width-height) to xywh (center-x-center-y-width-height) format."""
|
|
120
142
|
ret = np.asarray(tlwh).copy()
|
|
121
143
|
ret[:2] += ret[2:] / 2
|
|
122
144
|
return ret
|
|
@@ -129,9 +151,9 @@ class BOTSORT(BYTETracker):
|
|
|
129
151
|
Attributes:
|
|
130
152
|
proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections.
|
|
131
153
|
appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections.
|
|
132
|
-
encoder (
|
|
154
|
+
encoder (Any): Object to handle ReID embeddings, set to None if ReID is not enabled.
|
|
133
155
|
gmc (GMC): An instance of the GMC algorithm for data association.
|
|
134
|
-
args (
|
|
156
|
+
args (Any): Parsed command-line arguments containing tracking parameters.
|
|
135
157
|
|
|
136
158
|
Methods:
|
|
137
159
|
get_kalmanfilter(): Returns an instance of KalmanFilterXYWH for object tracking.
|
|
@@ -139,17 +161,29 @@ class BOTSORT(BYTETracker):
|
|
|
139
161
|
get_dists(tracks, detections): Get distances between tracks and detections using IoU and (optionally) ReID.
|
|
140
162
|
multi_predict(tracks): Predict and track multiple objects with YOLOv8 model.
|
|
141
163
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
bot_sort
|
|
145
|
-
bot_sort.
|
|
164
|
+
Examples:
|
|
165
|
+
Initialize BOTSORT and process detections
|
|
166
|
+
>>> bot_sort = BOTSORT(args, frame_rate=30)
|
|
167
|
+
>>> bot_sort.init_track(dets, scores, cls, img)
|
|
168
|
+
>>> bot_sort.multi_predict(tracks)
|
|
146
169
|
|
|
147
170
|
Note:
|
|
148
171
|
The class is designed to work with the YOLOv8 object detection model and supports ReID only if enabled via args.
|
|
149
172
|
"""
|
|
150
173
|
|
|
151
174
|
def __init__(self, args, frame_rate=30):
|
|
152
|
-
"""
|
|
175
|
+
"""
|
|
176
|
+
Initialize YOLOv8 object with ReID module and GMC algorithm.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
args (object): Parsed command-line arguments containing tracking parameters.
|
|
180
|
+
frame_rate (int): Frame rate of the video being processed.
|
|
181
|
+
|
|
182
|
+
Examples:
|
|
183
|
+
Initialize BOTSORT with command-line arguments and a specified frame rate:
|
|
184
|
+
>>> args = parse_args()
|
|
185
|
+
>>> bot_sort = BOTSORT(args, frame_rate=30)
|
|
186
|
+
"""
|
|
153
187
|
super().__init__(args, frame_rate)
|
|
154
188
|
# ReID module
|
|
155
189
|
self.proximity_thresh = args.proximity_thresh
|
|
@@ -161,11 +195,11 @@ class BOTSORT(BYTETracker):
|
|
|
161
195
|
self.gmc = GMC(method=args.gmc_method)
|
|
162
196
|
|
|
163
197
|
def get_kalmanfilter(self):
|
|
164
|
-
"""Returns an instance of KalmanFilterXYWH for object tracking."""
|
|
198
|
+
"""Returns an instance of KalmanFilterXYWH for predicting and updating object states in the tracking process."""
|
|
165
199
|
return KalmanFilterXYWH()
|
|
166
200
|
|
|
167
201
|
def init_track(self, dets, scores, cls, img=None):
|
|
168
|
-
"""Initialize
|
|
202
|
+
"""Initialize object tracks using detection bounding boxes, scores, class labels, and optional ReID features."""
|
|
169
203
|
if len(dets) == 0:
|
|
170
204
|
return []
|
|
171
205
|
if self.args.with_reid and self.encoder is not None:
|
|
@@ -175,7 +209,7 @@ class BOTSORT(BYTETracker):
|
|
|
175
209
|
return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
|
|
176
210
|
|
|
177
211
|
def get_dists(self, tracks, detections):
|
|
178
|
-
"""
|
|
212
|
+
"""Calculates distances between tracks and detections using IoU and optionally ReID embeddings."""
|
|
179
213
|
dists = matching.iou_distance(tracks, detections)
|
|
180
214
|
dists_mask = dists > self.proximity_thresh
|
|
181
215
|
|
|
@@ -190,10 +224,10 @@ class BOTSORT(BYTETracker):
|
|
|
190
224
|
return dists
|
|
191
225
|
|
|
192
226
|
def multi_predict(self, tracks):
|
|
193
|
-
"""
|
|
227
|
+
"""Predicts the mean and covariance of multiple object tracks using a shared Kalman filter."""
|
|
194
228
|
BOTrack.multi_predict(tracks)
|
|
195
229
|
|
|
196
230
|
def reset(self):
|
|
197
|
-
"""
|
|
231
|
+
"""Resets the BOTSORT tracker to its initial state, clearing all tracked objects and internal states."""
|
|
198
232
|
super().reset()
|
|
199
233
|
self.gmc.reset_params()
|