dgenerate-ultralytics-headless 8.3.145__py3-none-any.whl → 8.3.146__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.145.dist-info → dgenerate_ultralytics_headless-8.3.146.dist-info}/METADATA +1 -1
- {dgenerate_ultralytics_headless-8.3.145.dist-info → dgenerate_ultralytics_headless-8.3.146.dist-info}/RECORD +31 -30
- {dgenerate_ultralytics_headless-8.3.145.dist-info → dgenerate_ultralytics_headless-8.3.146.dist-info}/WHEEL +1 -1
- tests/__init__.py +3 -0
- tests/test_cli.py +2 -7
- tests/test_python.py +42 -12
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +0 -1
- ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
- ultralytics/data/augment.py +2 -2
- ultralytics/engine/model.py +3 -3
- ultralytics/engine/validator.py +1 -1
- ultralytics/models/nas/model.py +0 -8
- ultralytics/models/yolo/classify/val.py +1 -5
- ultralytics/models/yolo/detect/val.py +9 -16
- ultralytics/models/yolo/obb/val.py +24 -17
- ultralytics/models/yolo/pose/val.py +19 -14
- ultralytics/models/yolo/segment/val.py +52 -44
- ultralytics/solutions/analytics.py +17 -9
- ultralytics/solutions/object_counter.py +2 -4
- ultralytics/trackers/bot_sort.py +4 -2
- ultralytics/utils/__init__.py +1 -2
- ultralytics/utils/benchmarks.py +15 -15
- ultralytics/utils/checks.py +10 -5
- ultralytics/utils/downloads.py +1 -0
- ultralytics/utils/metrics.py +25 -26
- ultralytics/utils/plotting.py +10 -7
- ultralytics/utils/torch_utils.py +2 -2
- {dgenerate_ultralytics_headless-8.3.145.dist-info → dgenerate_ultralytics_headless-8.3.146.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.145.dist-info → dgenerate_ultralytics_headless-8.3.146.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.145.dist-info → dgenerate_ultralytics_headless-8.3.146.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
3
|
from pathlib import Path
|
4
|
-
from typing import Dict, List, Tuple, Union
|
4
|
+
from typing import Any, Dict, List, Tuple, Union
|
5
5
|
|
6
6
|
import torch
|
7
7
|
|
@@ -40,7 +40,7 @@ class OBBValidator(DetectionValidator):
|
|
40
40
|
>>> validator(model=args["model"])
|
41
41
|
"""
|
42
42
|
|
43
|
-
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
43
|
+
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
|
44
44
|
"""
|
45
45
|
Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics.
|
46
46
|
|
@@ -58,8 +58,13 @@ class OBBValidator(DetectionValidator):
|
|
58
58
|
self.args.task = "obb"
|
59
59
|
self.metrics = OBBMetrics(save_dir=self.save_dir, plot=True)
|
60
60
|
|
61
|
-
def init_metrics(self, model):
|
62
|
-
"""
|
61
|
+
def init_metrics(self, model: torch.nn.Module) -> None:
|
62
|
+
"""
|
63
|
+
Initialize evaluation metrics for YOLO obb validation.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
model (torch.nn.Module): Model to validate.
|
67
|
+
"""
|
63
68
|
super().init_metrics(model)
|
64
69
|
val = self.data.get(self.args.split, "") # validation path
|
65
70
|
self.is_dota = isinstance(val, str) and "DOTA" in val # check if dataset is DOTA format
|
@@ -94,7 +99,7 @@ class OBBValidator(DetectionValidator):
|
|
94
99
|
|
95
100
|
Args:
|
96
101
|
si (int): Batch index to process.
|
97
|
-
batch (
|
102
|
+
batch (Dict[str, Any]): Dictionary containing batch data with keys:
|
98
103
|
- batch_idx: Tensor of batch indices
|
99
104
|
- cls: Tensor of class labels
|
100
105
|
- bboxes: Tensor of bounding boxes
|
@@ -103,7 +108,7 @@ class OBBValidator(DetectionValidator):
|
|
103
108
|
- ratio_pad: Ratio and padding information
|
104
109
|
|
105
110
|
Returns:
|
106
|
-
(
|
111
|
+
(Dict[str, Any]): Prepared batch data with scaled bounding boxes and metadata.
|
107
112
|
"""
|
108
113
|
idx = batch["batch_idx"] == si
|
109
114
|
cls = batch["cls"][idx].squeeze(-1)
|
@@ -116,7 +121,7 @@ class OBBValidator(DetectionValidator):
|
|
116
121
|
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels
|
117
122
|
return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
|
118
123
|
|
119
|
-
def _prepare_pred(self, pred: torch.Tensor, pbatch: Dict) -> torch.Tensor:
|
124
|
+
def _prepare_pred(self, pred: torch.Tensor, pbatch: Dict[str, Any]) -> torch.Tensor:
|
120
125
|
"""
|
121
126
|
Prepare predictions by scaling bounding boxes to original image dimensions.
|
122
127
|
|
@@ -125,7 +130,7 @@ class OBBValidator(DetectionValidator):
|
|
125
130
|
|
126
131
|
Args:
|
127
132
|
pred (torch.Tensor): Prediction tensor containing bounding box coordinates and other information.
|
128
|
-
pbatch (
|
133
|
+
pbatch (Dict[str, Any]): Dictionary containing batch information with keys:
|
129
134
|
- imgsz (tuple): Model input image size.
|
130
135
|
- ori_shape (tuple): Original image shape.
|
131
136
|
- ratio_pad (tuple): Ratio and padding information for scaling.
|
@@ -139,13 +144,13 @@ class OBBValidator(DetectionValidator):
|
|
139
144
|
) # native-space pred
|
140
145
|
return predn
|
141
146
|
|
142
|
-
def plot_predictions(self, batch: Dict, preds: List[torch.Tensor], ni: int):
|
147
|
+
def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int) -> None:
|
143
148
|
"""
|
144
149
|
Plot predicted bounding boxes on input images and save the result.
|
145
150
|
|
146
151
|
Args:
|
147
|
-
batch (
|
148
|
-
preds (
|
152
|
+
batch (Dict[str, Any]): Batch data containing images, file paths, and other metadata.
|
153
|
+
preds (List[torch.Tensor]): List of prediction tensors for each image in the batch.
|
149
154
|
ni (int): Batch index used for naming the output file.
|
150
155
|
|
151
156
|
Examples:
|
@@ -163,7 +168,7 @@ class OBBValidator(DetectionValidator):
|
|
163
168
|
on_plot=self.on_plot,
|
164
169
|
) # pred
|
165
170
|
|
166
|
-
def pred_to_json(self, predn: torch.Tensor, filename: Union[str, Path]):
|
171
|
+
def pred_to_json(self, predn: torch.Tensor, filename: Union[str, Path]) -> None:
|
167
172
|
"""
|
168
173
|
Convert YOLO predictions to COCO JSON format with rotated bounding box information.
|
169
174
|
|
@@ -192,7 +197,9 @@ class OBBValidator(DetectionValidator):
|
|
192
197
|
}
|
193
198
|
)
|
194
199
|
|
195
|
-
def save_one_txt(
|
200
|
+
def save_one_txt(
|
201
|
+
self, predn: torch.Tensor, save_conf: bool, shape: Tuple[int, int], file: Union[Path, str]
|
202
|
+
) -> None:
|
196
203
|
"""
|
197
204
|
Save YOLO OBB detections to a text file in normalized coordinates.
|
198
205
|
|
@@ -200,7 +207,7 @@ class OBBValidator(DetectionValidator):
|
|
200
207
|
predn (torch.Tensor): Predicted detections with shape (N, 7) containing bounding boxes, confidence scores,
|
201
208
|
class predictions, and angles in format (x, y, w, h, conf, cls, angle).
|
202
209
|
save_conf (bool): Whether to save confidence scores in the text file.
|
203
|
-
shape (
|
210
|
+
shape (Tuple[int, int]): Original image shape in format (height, width).
|
204
211
|
file (Path | str): Output file path to save detections.
|
205
212
|
|
206
213
|
Examples:
|
@@ -222,15 +229,15 @@ class OBBValidator(DetectionValidator):
|
|
222
229
|
obb=obb,
|
223
230
|
).save_txt(file, save_conf=save_conf)
|
224
231
|
|
225
|
-
def eval_json(self, stats: Dict) -> Dict:
|
232
|
+
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
226
233
|
"""
|
227
234
|
Evaluate YOLO output in JSON format and save predictions in DOTA format.
|
228
235
|
|
229
236
|
Args:
|
230
|
-
stats (
|
237
|
+
stats (Dict[str, Any]): Performance statistics dictionary.
|
231
238
|
|
232
239
|
Returns:
|
233
|
-
(
|
240
|
+
(Dict[str, Any]): Updated performance statistics.
|
234
241
|
"""
|
235
242
|
if self.args.save_json and self.is_dota and len(self.jdict):
|
236
243
|
import json
|
@@ -49,7 +49,7 @@ class PoseValidator(DetectionValidator):
|
|
49
49
|
>>> validator()
|
50
50
|
"""
|
51
51
|
|
52
|
-
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
52
|
+
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
|
53
53
|
"""
|
54
54
|
Initialize a PoseValidator object for pose estimation validation.
|
55
55
|
|
@@ -107,8 +107,13 @@ class PoseValidator(DetectionValidator):
|
|
107
107
|
"mAP50-95)",
|
108
108
|
)
|
109
109
|
|
110
|
-
def init_metrics(self, model):
|
111
|
-
"""
|
110
|
+
def init_metrics(self, model: torch.nn.Module) -> None:
|
111
|
+
"""
|
112
|
+
Initialize evaluation metrics for YOLO pose validation.
|
113
|
+
|
114
|
+
Args:
|
115
|
+
model (torch.nn.Module): Model to validate.
|
116
|
+
"""
|
112
117
|
super().init_metrics(model)
|
113
118
|
self.kpt_shape = self.data["kpt_shape"]
|
114
119
|
is_pose = self.kpt_shape == [17, 3]
|
@@ -122,10 +127,10 @@ class PoseValidator(DetectionValidator):
|
|
122
127
|
|
123
128
|
Args:
|
124
129
|
si (int): Batch index.
|
125
|
-
batch (
|
130
|
+
batch (Dict[str, Any]): Dictionary containing batch data with keys like 'keypoints', 'batch_idx', etc.
|
126
131
|
|
127
132
|
Returns:
|
128
|
-
(
|
133
|
+
(Dict[str, Any]): Prepared batch with keypoints scaled to original image dimensions.
|
129
134
|
|
130
135
|
Notes:
|
131
136
|
This method extends the parent class's _prepare_batch method by adding keypoint processing.
|
@@ -151,7 +156,7 @@ class PoseValidator(DetectionValidator):
|
|
151
156
|
|
152
157
|
Args:
|
153
158
|
pred (torch.Tensor): Raw prediction tensor from the model.
|
154
|
-
pbatch (
|
159
|
+
pbatch (Dict[str, Any]): Processed batch dictionary containing image information including:
|
155
160
|
- imgsz: Image size used for inference
|
156
161
|
- ori_shape: Original image shape
|
157
162
|
- ratio_pad: Ratio and padding information for coordinate scaling
|
@@ -166,7 +171,7 @@ class PoseValidator(DetectionValidator):
|
|
166
171
|
ops.scale_coords(pbatch["imgsz"], pred_kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])
|
167
172
|
return predn, pred_kpts
|
168
173
|
|
169
|
-
def update_metrics(self, preds: List[torch.Tensor], batch: Dict[str, Any]):
|
174
|
+
def update_metrics(self, preds: List[torch.Tensor], batch: Dict[str, Any]) -> None:
|
170
175
|
"""
|
171
176
|
Update metrics with new predictions and ground truth data.
|
172
177
|
|
@@ -175,7 +180,7 @@ class PoseValidator(DetectionValidator):
|
|
175
180
|
|
176
181
|
Args:
|
177
182
|
preds (List[torch.Tensor]): List of prediction tensors from the model.
|
178
|
-
batch (
|
183
|
+
batch (Dict[str, Any]): Batch data containing images and ground truth annotations.
|
179
184
|
"""
|
180
185
|
for si, pred in enumerate(preds):
|
181
186
|
self.seen += 1
|
@@ -266,12 +271,12 @@ class PoseValidator(DetectionValidator):
|
|
266
271
|
|
267
272
|
return self.match_predictions(detections[:, 5], gt_cls, iou)
|
268
273
|
|
269
|
-
def plot_val_samples(self, batch: Dict[str, Any], ni: int):
|
274
|
+
def plot_val_samples(self, batch: Dict[str, Any], ni: int) -> None:
|
270
275
|
"""
|
271
276
|
Plot and save validation set samples with ground truth bounding boxes and keypoints.
|
272
277
|
|
273
278
|
Args:
|
274
|
-
batch (
|
279
|
+
batch (Dict[str, Any]): Dictionary containing batch data with keys:
|
275
280
|
- img (torch.Tensor): Batch of images
|
276
281
|
- batch_idx (torch.Tensor): Batch indices for each image
|
277
282
|
- cls (torch.Tensor): Class labels
|
@@ -292,12 +297,12 @@ class PoseValidator(DetectionValidator):
|
|
292
297
|
on_plot=self.on_plot,
|
293
298
|
)
|
294
299
|
|
295
|
-
def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int):
|
300
|
+
def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int) -> None:
|
296
301
|
"""
|
297
302
|
Plot and save model predictions with bounding boxes and keypoints.
|
298
303
|
|
299
304
|
Args:
|
300
|
-
batch (
|
305
|
+
batch (Dict[str, Any]): Dictionary containing batch data including images, file paths, and other metadata.
|
301
306
|
preds (List[torch.Tensor]): List of prediction tensors from the model, each containing bounding boxes,
|
302
307
|
confidence scores, class predictions, and keypoints.
|
303
308
|
ni (int): Batch index used for naming the output file.
|
@@ -323,7 +328,7 @@ class PoseValidator(DetectionValidator):
|
|
323
328
|
save_conf: bool,
|
324
329
|
shape: Tuple[int, int],
|
325
330
|
file: Path,
|
326
|
-
):
|
331
|
+
) -> None:
|
327
332
|
"""
|
328
333
|
Save YOLO pose detections to a text file in normalized coordinates.
|
329
334
|
|
@@ -349,7 +354,7 @@ class PoseValidator(DetectionValidator):
|
|
349
354
|
keypoints=pred_kpts,
|
350
355
|
).save_txt(file, save_conf=save_conf)
|
351
356
|
|
352
|
-
def pred_to_json(self, predn: torch.Tensor, filename: str):
|
357
|
+
def pred_to_json(self, predn: torch.Tensor, filename: str) -> None:
|
353
358
|
"""
|
354
359
|
Convert YOLO predictions to COCO JSON format.
|
355
360
|
|
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
from multiprocessing.pool import ThreadPool
|
4
4
|
from pathlib import Path
|
5
|
+
from typing import Any, Dict, List, Optional, Tuple
|
5
6
|
|
6
7
|
import numpy as np
|
7
8
|
import torch
|
@@ -35,7 +36,7 @@ class SegmentationValidator(DetectionValidator):
|
|
35
36
|
>>> validator()
|
36
37
|
"""
|
37
38
|
|
38
|
-
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
39
|
+
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
|
39
40
|
"""
|
40
41
|
Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.
|
41
42
|
|
@@ -52,13 +53,21 @@ class SegmentationValidator(DetectionValidator):
|
|
52
53
|
self.args.task = "segment"
|
53
54
|
self.metrics = SegmentMetrics(save_dir=self.save_dir)
|
54
55
|
|
55
|
-
def preprocess(self, batch):
|
56
|
-
"""
|
56
|
+
def preprocess(self, batch: Dict[str, Any]) -> Dict[str, Any]:
|
57
|
+
"""
|
58
|
+
Preprocess batch of images for YOLO segmentation validation.
|
59
|
+
|
60
|
+
Args:
|
61
|
+
batch (Dict[str, Any]): Batch containing images and annotations.
|
62
|
+
|
63
|
+
Returns:
|
64
|
+
(Dict[str, Any]): Preprocessed batch.
|
65
|
+
"""
|
57
66
|
batch = super().preprocess(batch)
|
58
67
|
batch["masks"] = batch["masks"].to(self.device).float()
|
59
68
|
return batch
|
60
69
|
|
61
|
-
def init_metrics(self, model):
|
70
|
+
def init_metrics(self, model: torch.nn.Module) -> None:
|
62
71
|
"""
|
63
72
|
Initialize metrics and select mask processing function based on save_json flag.
|
64
73
|
|
@@ -73,7 +82,7 @@ class SegmentationValidator(DetectionValidator):
|
|
73
82
|
self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
|
74
83
|
self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
|
75
84
|
|
76
|
-
def get_desc(self):
|
85
|
+
def get_desc(self) -> str:
|
77
86
|
"""Return a formatted description of evaluation metrics."""
|
78
87
|
return ("%22s" + "%11s" * 10) % (
|
79
88
|
"Class",
|
@@ -89,44 +98,46 @@ class SegmentationValidator(DetectionValidator):
|
|
89
98
|
"mAP50-95)",
|
90
99
|
)
|
91
100
|
|
92
|
-
def postprocess(self, preds):
|
101
|
+
def postprocess(self, preds: List[torch.Tensor]) -> Tuple[List[torch.Tensor], torch.Tensor]:
|
93
102
|
"""
|
94
103
|
Post-process YOLO predictions and return output detections with proto.
|
95
104
|
|
96
105
|
Args:
|
97
|
-
preds (
|
106
|
+
preds (List[torch.Tensor]): Raw predictions from the model.
|
98
107
|
|
99
108
|
Returns:
|
100
|
-
p (torch.Tensor): Processed detection predictions.
|
109
|
+
p (List[torch.Tensor]): Processed detection predictions.
|
101
110
|
proto (torch.Tensor): Prototype masks for segmentation.
|
102
111
|
"""
|
103
112
|
p = super().postprocess(preds[0])
|
104
113
|
proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
|
105
114
|
return p, proto
|
106
115
|
|
107
|
-
def _prepare_batch(self, si, batch):
|
116
|
+
def _prepare_batch(self, si: int, batch: Dict[str, Any]) -> Dict[str, Any]:
|
108
117
|
"""
|
109
118
|
Prepare a batch for training or inference by processing images and targets.
|
110
119
|
|
111
120
|
Args:
|
112
121
|
si (int): Batch index.
|
113
|
-
batch (
|
122
|
+
batch (Dict[str, Any]): Batch data containing images and annotations.
|
114
123
|
|
115
124
|
Returns:
|
116
|
-
(
|
125
|
+
(Dict[str, Any]): Prepared batch with processed annotations.
|
117
126
|
"""
|
118
127
|
prepared_batch = super()._prepare_batch(si, batch)
|
119
128
|
midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si
|
120
129
|
prepared_batch["masks"] = batch["masks"][midx]
|
121
130
|
return prepared_batch
|
122
131
|
|
123
|
-
def _prepare_pred(
|
132
|
+
def _prepare_pred(
|
133
|
+
self, pred: torch.Tensor, pbatch: Dict[str, Any], proto: torch.Tensor
|
134
|
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
124
135
|
"""
|
125
136
|
Prepare predictions for evaluation by processing bounding boxes and masks.
|
126
137
|
|
127
138
|
Args:
|
128
139
|
pred (torch.Tensor): Raw predictions from the model.
|
129
|
-
pbatch (
|
140
|
+
pbatch (Dict[str, Any]): Prepared batch information.
|
130
141
|
proto (torch.Tensor): Prototype masks for segmentation.
|
131
142
|
|
132
143
|
Returns:
|
@@ -137,13 +148,13 @@ class SegmentationValidator(DetectionValidator):
|
|
137
148
|
pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=pbatch["imgsz"])
|
138
149
|
return predn, pred_masks
|
139
150
|
|
140
|
-
def update_metrics(self, preds, batch):
|
151
|
+
def update_metrics(self, preds: Tuple[List[torch.Tensor], torch.Tensor], batch: Dict[str, Any]) -> None:
|
141
152
|
"""
|
142
153
|
Update metrics with the current batch predictions and targets.
|
143
154
|
|
144
155
|
Args:
|
145
|
-
preds (
|
146
|
-
batch (
|
156
|
+
preds (Tuple[List[torch.Tensor], torch.Tensor]): List of predictions from the model.
|
157
|
+
batch (Dict[str, Any]): Batch data containing ground truth.
|
147
158
|
"""
|
148
159
|
for si, (pred, proto) in enumerate(zip(preds[0], preds[1])):
|
149
160
|
self.seen += 1
|
@@ -214,21 +225,16 @@ class SegmentationValidator(DetectionValidator):
|
|
214
225
|
self.save_dir / "labels" / f"{Path(batch['im_file'][si]).stem}.txt",
|
215
226
|
)
|
216
227
|
|
217
|
-
def
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
"""
|
228
|
-
self.metrics.speed = self.speed
|
229
|
-
self.metrics.confusion_matrix = self.confusion_matrix
|
230
|
-
|
231
|
-
def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False):
|
228
|
+
def _process_batch(
|
229
|
+
self,
|
230
|
+
detections: torch.Tensor,
|
231
|
+
gt_bboxes: torch.Tensor,
|
232
|
+
gt_cls: torch.Tensor,
|
233
|
+
pred_masks: Optional[torch.Tensor] = None,
|
234
|
+
gt_masks: Optional[torch.Tensor] = None,
|
235
|
+
overlap: Optional[bool] = False,
|
236
|
+
masks: Optional[bool] = False,
|
237
|
+
) -> torch.Tensor:
|
232
238
|
"""
|
233
239
|
Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
|
234
240
|
|
@@ -241,8 +247,8 @@ class SegmentationValidator(DetectionValidator):
|
|
241
247
|
pred_masks (torch.Tensor, optional): Tensor representing predicted masks, if available. The shape should
|
242
248
|
match the ground truth masks.
|
243
249
|
gt_masks (torch.Tensor, optional): Tensor of shape (M, H, W) representing ground truth masks, if available.
|
244
|
-
overlap (bool): Flag indicating if overlapping masks should be considered.
|
245
|
-
masks (bool): Flag indicating if the batch contains mask data.
|
250
|
+
overlap (bool, optional): Flag indicating if overlapping masks should be considered.
|
251
|
+
masks (bool, optional): Flag indicating if the batch contains mask data.
|
246
252
|
|
247
253
|
Returns:
|
248
254
|
(torch.Tensor): A correct prediction matrix of shape (N, 10), where 10 represents different IoU levels.
|
@@ -272,12 +278,12 @@ class SegmentationValidator(DetectionValidator):
|
|
272
278
|
|
273
279
|
return self.match_predictions(detections[:, 5], gt_cls, iou)
|
274
280
|
|
275
|
-
def plot_val_samples(self, batch, ni):
|
281
|
+
def plot_val_samples(self, batch: Dict[str, Any], ni: int) -> None:
|
276
282
|
"""
|
277
283
|
Plot validation samples with bounding box labels and masks.
|
278
284
|
|
279
285
|
Args:
|
280
|
-
batch (
|
286
|
+
batch (Dict[str, Any]): Batch containing images and annotations.
|
281
287
|
ni (int): Batch index.
|
282
288
|
"""
|
283
289
|
plot_images(
|
@@ -292,13 +298,13 @@ class SegmentationValidator(DetectionValidator):
|
|
292
298
|
on_plot=self.on_plot,
|
293
299
|
)
|
294
300
|
|
295
|
-
def plot_predictions(self, batch, preds, ni):
|
301
|
+
def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int) -> None:
|
296
302
|
"""
|
297
303
|
Plot batch predictions with masks and bounding boxes.
|
298
304
|
|
299
305
|
Args:
|
300
|
-
batch (
|
301
|
-
preds (
|
306
|
+
batch (Dict[str, Any]): Batch containing images and annotations.
|
307
|
+
preds (List[torch.Tensor]): List of predictions from the model.
|
302
308
|
ni (int): Batch index.
|
303
309
|
"""
|
304
310
|
plot_images(
|
@@ -312,15 +318,17 @@ class SegmentationValidator(DetectionValidator):
|
|
312
318
|
) # pred
|
313
319
|
self.plot_masks.clear()
|
314
320
|
|
315
|
-
def save_one_txt(
|
321
|
+
def save_one_txt(
|
322
|
+
self, predn: torch.Tensor, pred_masks: torch.Tensor, save_conf: bool, shape: Tuple[int, int], file: Path
|
323
|
+
) -> None:
|
316
324
|
"""
|
317
325
|
Save YOLO detections to a txt file in normalized coordinates in a specific format.
|
318
326
|
|
319
327
|
Args:
|
320
|
-
predn (torch.Tensor): Predictions in the format
|
328
|
+
predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
|
321
329
|
pred_masks (torch.Tensor): Predicted masks.
|
322
330
|
save_conf (bool): Whether to save confidence scores.
|
323
|
-
shape (
|
331
|
+
shape (Tuple[int, int]): Shape of the original image.
|
324
332
|
file (Path): File path to save the detections.
|
325
333
|
"""
|
326
334
|
from ultralytics.engine.results import Results
|
@@ -333,7 +341,7 @@ class SegmentationValidator(DetectionValidator):
|
|
333
341
|
masks=pred_masks,
|
334
342
|
).save_txt(file, save_conf=save_conf)
|
335
343
|
|
336
|
-
def pred_to_json(self, predn, filename, pred_masks):
|
344
|
+
def pred_to_json(self, predn: torch.Tensor, filename: str, pred_masks: torch.Tensor) -> None:
|
337
345
|
"""
|
338
346
|
Save one JSON result for COCO evaluation.
|
339
347
|
|
@@ -371,8 +379,8 @@ class SegmentationValidator(DetectionValidator):
|
|
371
379
|
}
|
372
380
|
)
|
373
381
|
|
374
|
-
def eval_json(self, stats):
|
375
|
-
"""Return COCO-style
|
382
|
+
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
383
|
+
"""Return COCO-style instance segmentation evaluation metrics."""
|
376
384
|
if self.args.save_json and (self.is_lvis or self.is_coco) and len(self.jdict):
|
377
385
|
pred_json = self.save_dir / "predictions.json" # predictions
|
378
386
|
|
@@ -68,6 +68,8 @@ class Analytics(BaseSolution):
|
|
68
68
|
|
69
69
|
self.total_counts = 0 # count variable for storing total counts i.e. for line
|
70
70
|
self.clswise_count = {} # dictionary for class-wise counts
|
71
|
+
self.update_every = kwargs.get("update_every", 30) # Only update graph every 30 frames by default
|
72
|
+
self.last_plot_im = None # Cache of the last rendered chart
|
71
73
|
|
72
74
|
# Ensure line and area chart
|
73
75
|
if self.type in {"line", "area"}:
|
@@ -111,16 +113,21 @@ class Analytics(BaseSolution):
|
|
111
113
|
if self.type == "line":
|
112
114
|
for _ in self.boxes:
|
113
115
|
self.total_counts += 1
|
114
|
-
|
116
|
+
update_required = frame_number % self.update_every == 0 or self.last_plot_im is None
|
117
|
+
if update_required:
|
118
|
+
self.last_plot_im = self.update_graph(frame_number=frame_number)
|
119
|
+
plot_im = self.last_plot_im
|
115
120
|
self.total_counts = 0
|
116
121
|
elif self.type in {"pie", "bar", "area"}:
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
122
|
+
from collections import Counter
|
123
|
+
|
124
|
+
self.clswise_count = Counter(self.names[int(cls)] for cls in self.clss)
|
125
|
+
update_required = frame_number % self.update_every == 0 or self.last_plot_im is None
|
126
|
+
if update_required:
|
127
|
+
self.last_plot_im = self.update_graph(
|
128
|
+
frame_number=frame_number, count_dict=self.clswise_count, plot=self.type
|
129
|
+
)
|
130
|
+
plot_im = self.last_plot_im
|
124
131
|
else:
|
125
132
|
raise ModuleNotFoundError(f"{self.type} chart is not supported ❌")
|
126
133
|
|
@@ -187,7 +194,7 @@ class Analytics(BaseSolution):
|
|
187
194
|
self.ax.clear()
|
188
195
|
for key, y_data in y_data_dict.items():
|
189
196
|
color = next(color_cycle)
|
190
|
-
self.ax.fill_between(x_data, y_data, color=color, alpha=0.
|
197
|
+
self.ax.fill_between(x_data, y_data, color=color, alpha=0.55)
|
191
198
|
self.ax.plot(
|
192
199
|
x_data,
|
193
200
|
y_data,
|
@@ -235,6 +242,7 @@ class Analytics(BaseSolution):
|
|
235
242
|
|
236
243
|
# Common plot settings
|
237
244
|
self.ax.set_facecolor("#f0f0f0") # Set to light gray or any other color you like
|
245
|
+
self.ax.grid(True, linestyle="--", linewidth=0.5, alpha=0.5) # Display grid for more data insights
|
238
246
|
self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
|
239
247
|
self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
|
240
248
|
self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
|
@@ -79,8 +79,7 @@ class ObjectCounter(BaseSolution):
|
|
79
79
|
return
|
80
80
|
|
81
81
|
if len(self.region) == 2: # Linear region (defined as a line segment)
|
82
|
-
|
83
|
-
if line.intersects(self.LineString([prev_position, current_centroid])):
|
82
|
+
if self.r_s.intersects(self.LineString([prev_position, current_centroid])):
|
84
83
|
# Determine orientation of the region (vertical or horizontal)
|
85
84
|
if abs(self.region[0][0] - self.region[1][0]) < abs(self.region[0][1] - self.region[1][1]):
|
86
85
|
# Vertical region: Compare x-coordinates to determine direction
|
@@ -100,8 +99,7 @@ class ObjectCounter(BaseSolution):
|
|
100
99
|
self.counted_ids.append(track_id)
|
101
100
|
|
102
101
|
elif len(self.region) > 2: # Polygonal region
|
103
|
-
|
104
|
-
if polygon.contains(self.Point(current_centroid)):
|
102
|
+
if self.r_s.contains(self.Point(current_centroid)):
|
105
103
|
# Determine motion direction for vertical or horizontal polygons
|
106
104
|
region_width = max(p[0] for p in self.region) - min(p[0] for p in self.region)
|
107
105
|
region_height = max(p[1] for p in self.region) - min(p[1] for p in self.region)
|
ultralytics/trackers/bot_sort.py
CHANGED
@@ -260,11 +260,13 @@ class ReID:
|
|
260
260
|
from ultralytics import YOLO
|
261
261
|
|
262
262
|
self.model = YOLO(model)
|
263
|
-
self.model(embed=[len(self.model.model.model) - 2 if ".pt" in model else -1], verbose=False) #
|
263
|
+
self.model(embed=[len(self.model.model.model) - 2 if ".pt" in model else -1], verbose=False, save=False) # init
|
264
264
|
|
265
265
|
def __call__(self, img: np.ndarray, dets: np.ndarray) -> List[np.ndarray]:
|
266
266
|
"""Extract embeddings for detected objects."""
|
267
|
-
feats = self.model
|
267
|
+
feats = self.model.predictor(
|
268
|
+
[save_one_box(det, img, save=False) for det in xywh2xyxy(torch.from_numpy(dets[:, :4]))]
|
269
|
+
)
|
268
270
|
if len(feats) != dets.shape[0] and feats[0].shape[0] == dets.shape[0]:
|
269
271
|
feats = feats[0] # batched prediction with non-PyTorch backend
|
270
272
|
return [f.cpu().numpy() for f in feats]
|
ultralytics/utils/__init__.py
CHANGED
@@ -841,8 +841,7 @@ def is_docker() -> bool:
|
|
841
841
|
(bool): True if the script is running inside a Docker container, False otherwise.
|
842
842
|
"""
|
843
843
|
try:
|
844
|
-
|
845
|
-
return "docker" in f.read()
|
844
|
+
return os.path.exists("/.dockerenv")
|
846
845
|
except Exception:
|
847
846
|
return False
|
848
847
|
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -106,41 +106,41 @@ def benchmark(
|
|
106
106
|
if format_arg:
|
107
107
|
formats = frozenset(export_formats()["Argument"])
|
108
108
|
assert format in formats, f"Expected format to be one of {formats}, but got '{format_arg}'."
|
109
|
-
for
|
109
|
+
for name, format, suffix, cpu, gpu, _ in zip(*export_formats().values()):
|
110
110
|
emoji, filename = "❌", None # export defaults
|
111
111
|
try:
|
112
112
|
if format_arg and format_arg != format:
|
113
113
|
continue
|
114
114
|
|
115
115
|
# Checks
|
116
|
-
if
|
116
|
+
if format == "pb":
|
117
117
|
assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task"
|
118
|
-
elif
|
118
|
+
elif format == "edgetpu":
|
119
119
|
assert LINUX and not ARM64, "Edge TPU export only supported on non-aarch64 Linux"
|
120
|
-
elif
|
120
|
+
elif format in {"coreml", "tfjs"}:
|
121
121
|
assert MACOS or (LINUX and not ARM64), (
|
122
122
|
"CoreML and TF.js export only supported on macOS and non-aarch64 Linux"
|
123
123
|
)
|
124
|
-
if
|
124
|
+
if format == "coreml":
|
125
125
|
assert not IS_PYTHON_3_13, "CoreML not supported on Python 3.13"
|
126
|
-
if
|
126
|
+
if format in {"saved_model", "pb", "tflite", "edgetpu", "tfjs"}:
|
127
127
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
|
128
128
|
# assert not IS_PYTHON_MINIMUM_3_12, "TFLite exports not supported on Python>=3.12 yet"
|
129
|
-
if
|
129
|
+
if format == "paddle":
|
130
130
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet"
|
131
131
|
assert model.task != "obb", "Paddle OBB bug https://github.com/PaddlePaddle/Paddle/issues/72024"
|
132
132
|
assert not is_end2end, "End-to-end models not supported by PaddlePaddle yet"
|
133
133
|
assert (LINUX and not IS_JETSON) or MACOS, "Windows and Jetson Paddle exports not supported yet"
|
134
|
-
if
|
134
|
+
if format == "mnn":
|
135
135
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 MNN exports not supported yet"
|
136
|
-
if
|
136
|
+
if format == "ncnn":
|
137
137
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 NCNN exports not supported yet"
|
138
|
-
if
|
138
|
+
if format == "imx":
|
139
139
|
assert not is_end2end
|
140
140
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
141
141
|
assert model.task == "detect", "IMX only supported for detection task"
|
142
142
|
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8" # TODO: enable for YOLO11
|
143
|
-
if
|
143
|
+
if format == "rknn":
|
144
144
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
|
145
145
|
assert not is_end2end, "End-to-end models not supported by RKNN yet"
|
146
146
|
assert LINUX, "RKNN only supported on Linux"
|
@@ -163,10 +163,10 @@ def benchmark(
|
|
163
163
|
emoji = "❎" # indicates export succeeded
|
164
164
|
|
165
165
|
# Predict
|
166
|
-
assert model.task != "pose" or
|
167
|
-
assert
|
168
|
-
assert
|
169
|
-
if
|
166
|
+
assert model.task != "pose" or format != "pb", "GraphDef Pose inference is not supported"
|
167
|
+
assert format not in {"edgetpu", "tfjs"}, "inference not supported"
|
168
|
+
assert format != "coreml" or platform.system() == "Darwin", "inference only supported on macOS>=10.13"
|
169
|
+
if format == "ncnn":
|
170
170
|
assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
|
171
171
|
exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half, verbose=False)
|
172
172
|
|