dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
- tests/__init__.py +22 -0
- tests/conftest.py +83 -0
- tests/test_cli.py +138 -0
- tests/test_cuda.py +215 -0
- tests/test_engine.py +131 -0
- tests/test_exports.py +236 -0
- tests/test_integrations.py +154 -0
- tests/test_python.py +694 -0
- tests/test_solutions.py +187 -0
- ultralytics/__init__.py +30 -0
- ultralytics/assets/bus.jpg +0 -0
- ultralytics/assets/zidane.jpg +0 -0
- ultralytics/cfg/__init__.py +1023 -0
- ultralytics/cfg/datasets/Argoverse.yaml +77 -0
- ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
- ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
- ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
- ultralytics/cfg/datasets/Objects365.yaml +443 -0
- ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
- ultralytics/cfg/datasets/VOC.yaml +106 -0
- ultralytics/cfg/datasets/VisDrone.yaml +77 -0
- ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
- ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
- ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
- ultralytics/cfg/datasets/coco-pose.yaml +42 -0
- ultralytics/cfg/datasets/coco.yaml +118 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco128.yaml +101 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
- ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
- ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco8.yaml +101 -0
- ultralytics/cfg/datasets/crack-seg.yaml +22 -0
- ultralytics/cfg/datasets/dog-pose.yaml +24 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
- ultralytics/cfg/datasets/dota8.yaml +35 -0
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
- ultralytics/cfg/datasets/lvis.yaml +1240 -0
- ultralytics/cfg/datasets/medical-pills.yaml +22 -0
- ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
- ultralytics/cfg/datasets/package-seg.yaml +22 -0
- ultralytics/cfg/datasets/signature.yaml +21 -0
- ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
- ultralytics/cfg/datasets/xView.yaml +155 -0
- ultralytics/cfg/default.yaml +127 -0
- ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
- ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
- ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
- ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
- ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
- ultralytics/cfg/models/11/yolo11.yaml +50 -0
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
- ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
- ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
- ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
- ultralytics/cfg/models/12/yolo12.yaml +48 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
- ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
- ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
- ultralytics/cfg/models/v3/yolov3.yaml +49 -0
- ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
- ultralytics/cfg/models/v5/yolov5.yaml +51 -0
- ultralytics/cfg/models/v6/yolov6.yaml +56 -0
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
- ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
- ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8.yaml +49 -0
- ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
- ultralytics/cfg/trackers/botsort.yaml +22 -0
- ultralytics/cfg/trackers/bytetrack.yaml +14 -0
- ultralytics/data/__init__.py +26 -0
- ultralytics/data/annotator.py +66 -0
- ultralytics/data/augment.py +2945 -0
- ultralytics/data/base.py +438 -0
- ultralytics/data/build.py +258 -0
- ultralytics/data/converter.py +754 -0
- ultralytics/data/dataset.py +834 -0
- ultralytics/data/loaders.py +676 -0
- ultralytics/data/scripts/download_weights.sh +18 -0
- ultralytics/data/scripts/get_coco.sh +61 -0
- ultralytics/data/scripts/get_coco128.sh +18 -0
- ultralytics/data/scripts/get_imagenet.sh +52 -0
- ultralytics/data/split.py +125 -0
- ultralytics/data/split_dota.py +325 -0
- ultralytics/data/utils.py +777 -0
- ultralytics/engine/__init__.py +1 -0
- ultralytics/engine/exporter.py +1519 -0
- ultralytics/engine/model.py +1156 -0
- ultralytics/engine/predictor.py +502 -0
- ultralytics/engine/results.py +1840 -0
- ultralytics/engine/trainer.py +853 -0
- ultralytics/engine/tuner.py +243 -0
- ultralytics/engine/validator.py +377 -0
- ultralytics/hub/__init__.py +168 -0
- ultralytics/hub/auth.py +137 -0
- ultralytics/hub/google/__init__.py +176 -0
- ultralytics/hub/session.py +446 -0
- ultralytics/hub/utils.py +248 -0
- ultralytics/models/__init__.py +9 -0
- ultralytics/models/fastsam/__init__.py +7 -0
- ultralytics/models/fastsam/model.py +61 -0
- ultralytics/models/fastsam/predict.py +181 -0
- ultralytics/models/fastsam/utils.py +24 -0
- ultralytics/models/fastsam/val.py +40 -0
- ultralytics/models/nas/__init__.py +7 -0
- ultralytics/models/nas/model.py +102 -0
- ultralytics/models/nas/predict.py +58 -0
- ultralytics/models/nas/val.py +39 -0
- ultralytics/models/rtdetr/__init__.py +7 -0
- ultralytics/models/rtdetr/model.py +63 -0
- ultralytics/models/rtdetr/predict.py +84 -0
- ultralytics/models/rtdetr/train.py +85 -0
- ultralytics/models/rtdetr/val.py +191 -0
- ultralytics/models/sam/__init__.py +6 -0
- ultralytics/models/sam/amg.py +260 -0
- ultralytics/models/sam/build.py +358 -0
- ultralytics/models/sam/model.py +170 -0
- ultralytics/models/sam/modules/__init__.py +1 -0
- ultralytics/models/sam/modules/blocks.py +1129 -0
- ultralytics/models/sam/modules/decoders.py +515 -0
- ultralytics/models/sam/modules/encoders.py +854 -0
- ultralytics/models/sam/modules/memory_attention.py +299 -0
- ultralytics/models/sam/modules/sam.py +1006 -0
- ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
- ultralytics/models/sam/modules/transformer.py +351 -0
- ultralytics/models/sam/modules/utils.py +394 -0
- ultralytics/models/sam/predict.py +1605 -0
- ultralytics/models/utils/__init__.py +1 -0
- ultralytics/models/utils/loss.py +455 -0
- ultralytics/models/utils/ops.py +268 -0
- ultralytics/models/yolo/__init__.py +7 -0
- ultralytics/models/yolo/classify/__init__.py +7 -0
- ultralytics/models/yolo/classify/predict.py +88 -0
- ultralytics/models/yolo/classify/train.py +233 -0
- ultralytics/models/yolo/classify/val.py +215 -0
- ultralytics/models/yolo/detect/__init__.py +7 -0
- ultralytics/models/yolo/detect/predict.py +124 -0
- ultralytics/models/yolo/detect/train.py +217 -0
- ultralytics/models/yolo/detect/val.py +451 -0
- ultralytics/models/yolo/model.py +354 -0
- ultralytics/models/yolo/obb/__init__.py +7 -0
- ultralytics/models/yolo/obb/predict.py +66 -0
- ultralytics/models/yolo/obb/train.py +81 -0
- ultralytics/models/yolo/obb/val.py +283 -0
- ultralytics/models/yolo/pose/__init__.py +7 -0
- ultralytics/models/yolo/pose/predict.py +79 -0
- ultralytics/models/yolo/pose/train.py +154 -0
- ultralytics/models/yolo/pose/val.py +394 -0
- ultralytics/models/yolo/segment/__init__.py +7 -0
- ultralytics/models/yolo/segment/predict.py +113 -0
- ultralytics/models/yolo/segment/train.py +123 -0
- ultralytics/models/yolo/segment/val.py +428 -0
- ultralytics/models/yolo/world/__init__.py +5 -0
- ultralytics/models/yolo/world/train.py +119 -0
- ultralytics/models/yolo/world/train_world.py +176 -0
- ultralytics/models/yolo/yoloe/__init__.py +22 -0
- ultralytics/models/yolo/yoloe/predict.py +169 -0
- ultralytics/models/yolo/yoloe/train.py +298 -0
- ultralytics/models/yolo/yoloe/train_seg.py +124 -0
- ultralytics/models/yolo/yoloe/val.py +191 -0
- ultralytics/nn/__init__.py +29 -0
- ultralytics/nn/autobackend.py +842 -0
- ultralytics/nn/modules/__init__.py +182 -0
- ultralytics/nn/modules/activation.py +53 -0
- ultralytics/nn/modules/block.py +1966 -0
- ultralytics/nn/modules/conv.py +712 -0
- ultralytics/nn/modules/head.py +880 -0
- ultralytics/nn/modules/transformer.py +713 -0
- ultralytics/nn/modules/utils.py +164 -0
- ultralytics/nn/tasks.py +1627 -0
- ultralytics/nn/text_model.py +351 -0
- ultralytics/solutions/__init__.py +41 -0
- ultralytics/solutions/ai_gym.py +116 -0
- ultralytics/solutions/analytics.py +252 -0
- ultralytics/solutions/config.py +106 -0
- ultralytics/solutions/distance_calculation.py +124 -0
- ultralytics/solutions/heatmap.py +127 -0
- ultralytics/solutions/instance_segmentation.py +84 -0
- ultralytics/solutions/object_blurrer.py +90 -0
- ultralytics/solutions/object_counter.py +195 -0
- ultralytics/solutions/object_cropper.py +84 -0
- ultralytics/solutions/parking_management.py +273 -0
- ultralytics/solutions/queue_management.py +93 -0
- ultralytics/solutions/region_counter.py +120 -0
- ultralytics/solutions/security_alarm.py +154 -0
- ultralytics/solutions/similarity_search.py +172 -0
- ultralytics/solutions/solutions.py +724 -0
- ultralytics/solutions/speed_estimation.py +110 -0
- ultralytics/solutions/streamlit_inference.py +196 -0
- ultralytics/solutions/templates/similarity-search.html +160 -0
- ultralytics/solutions/trackzone.py +88 -0
- ultralytics/solutions/vision_eye.py +68 -0
- ultralytics/trackers/__init__.py +7 -0
- ultralytics/trackers/basetrack.py +124 -0
- ultralytics/trackers/bot_sort.py +260 -0
- ultralytics/trackers/byte_tracker.py +480 -0
- ultralytics/trackers/track.py +125 -0
- ultralytics/trackers/utils/__init__.py +1 -0
- ultralytics/trackers/utils/gmc.py +376 -0
- ultralytics/trackers/utils/kalman_filter.py +493 -0
- ultralytics/trackers/utils/matching.py +157 -0
- ultralytics/utils/__init__.py +1435 -0
- ultralytics/utils/autobatch.py +106 -0
- ultralytics/utils/autodevice.py +174 -0
- ultralytics/utils/benchmarks.py +695 -0
- ultralytics/utils/callbacks/__init__.py +5 -0
- ultralytics/utils/callbacks/base.py +234 -0
- ultralytics/utils/callbacks/clearml.py +153 -0
- ultralytics/utils/callbacks/comet.py +552 -0
- ultralytics/utils/callbacks/dvc.py +205 -0
- ultralytics/utils/callbacks/hub.py +108 -0
- ultralytics/utils/callbacks/mlflow.py +138 -0
- ultralytics/utils/callbacks/neptune.py +140 -0
- ultralytics/utils/callbacks/raytune.py +43 -0
- ultralytics/utils/callbacks/tensorboard.py +132 -0
- ultralytics/utils/callbacks/wb.py +185 -0
- ultralytics/utils/checks.py +897 -0
- ultralytics/utils/dist.py +119 -0
- ultralytics/utils/downloads.py +499 -0
- ultralytics/utils/errors.py +43 -0
- ultralytics/utils/export.py +219 -0
- ultralytics/utils/files.py +221 -0
- ultralytics/utils/instance.py +499 -0
- ultralytics/utils/loss.py +813 -0
- ultralytics/utils/metrics.py +1356 -0
- ultralytics/utils/ops.py +885 -0
- ultralytics/utils/patches.py +143 -0
- ultralytics/utils/plotting.py +1011 -0
- ultralytics/utils/tal.py +416 -0
- ultralytics/utils/torch_utils.py +990 -0
- ultralytics/utils/triton.py +116 -0
- ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,428 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from multiprocessing.pool import ThreadPool
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
import torch.nn.functional as F
|
9
|
+
|
10
|
+
from ultralytics.models.yolo.detect import DetectionValidator
|
11
|
+
from ultralytics.utils import LOGGER, NUM_THREADS, ops
|
12
|
+
from ultralytics.utils.checks import check_requirements
|
13
|
+
from ultralytics.utils.metrics import SegmentMetrics, box_iou, mask_iou
|
14
|
+
from ultralytics.utils.plotting import output_to_target, plot_images
|
15
|
+
|
16
|
+
|
17
|
+
class SegmentationValidator(DetectionValidator):
|
18
|
+
"""
|
19
|
+
A class extending the DetectionValidator class for validation based on a segmentation model.
|
20
|
+
|
21
|
+
This validator handles the evaluation of segmentation models, processing both bounding box and mask predictions
|
22
|
+
to compute metrics such as mAP for both detection and segmentation tasks.
|
23
|
+
|
24
|
+
Attributes:
|
25
|
+
plot_masks (list): List to store masks for plotting.
|
26
|
+
process (callable): Function to process masks based on save_json and save_txt flags.
|
27
|
+
args (namespace): Arguments for the validator.
|
28
|
+
metrics (SegmentMetrics): Metrics calculator for segmentation tasks.
|
29
|
+
stats (dict): Dictionary to store statistics during validation.
|
30
|
+
|
31
|
+
Examples:
|
32
|
+
>>> from ultralytics.models.yolo.segment import SegmentationValidator
|
33
|
+
>>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
|
34
|
+
>>> validator = SegmentationValidator(args=args)
|
35
|
+
>>> validator()
|
36
|
+
"""
|
37
|
+
|
38
|
+
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
39
|
+
"""
|
40
|
+
Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
|
44
|
+
save_dir (Path, optional): Directory to save results.
|
45
|
+
pbar (Any, optional): Progress bar for displaying progress.
|
46
|
+
args (namespace, optional): Arguments for the validator.
|
47
|
+
_callbacks (list, optional): List of callback functions.
|
48
|
+
"""
|
49
|
+
super().__init__(dataloader, save_dir, pbar, args, _callbacks)
|
50
|
+
self.plot_masks = None
|
51
|
+
self.process = None
|
52
|
+
self.args.task = "segment"
|
53
|
+
self.metrics = SegmentMetrics(save_dir=self.save_dir)
|
54
|
+
|
55
|
+
def preprocess(self, batch):
|
56
|
+
"""Preprocess batch by converting masks to float and sending to device."""
|
57
|
+
batch = super().preprocess(batch)
|
58
|
+
batch["masks"] = batch["masks"].to(self.device).float()
|
59
|
+
return batch
|
60
|
+
|
61
|
+
def init_metrics(self, model):
|
62
|
+
"""
|
63
|
+
Initialize metrics and select mask processing function based on save_json flag.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
model (torch.nn.Module): Model to validate.
|
67
|
+
"""
|
68
|
+
super().init_metrics(model)
|
69
|
+
self.plot_masks = []
|
70
|
+
if self.args.save_json:
|
71
|
+
check_requirements("pycocotools>=2.0.6")
|
72
|
+
# more accurate vs faster
|
73
|
+
self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
|
74
|
+
self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
|
75
|
+
|
76
|
+
def get_desc(self):
|
77
|
+
"""Return a formatted description of evaluation metrics."""
|
78
|
+
return ("%22s" + "%11s" * 10) % (
|
79
|
+
"Class",
|
80
|
+
"Images",
|
81
|
+
"Instances",
|
82
|
+
"Box(P",
|
83
|
+
"R",
|
84
|
+
"mAP50",
|
85
|
+
"mAP50-95)",
|
86
|
+
"Mask(P",
|
87
|
+
"R",
|
88
|
+
"mAP50",
|
89
|
+
"mAP50-95)",
|
90
|
+
)
|
91
|
+
|
92
|
+
def postprocess(self, preds):
|
93
|
+
"""
|
94
|
+
Post-process YOLO predictions and return output detections with proto.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
preds (list): Raw predictions from the model.
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
p (torch.Tensor): Processed detection predictions.
|
101
|
+
proto (torch.Tensor): Prototype masks for segmentation.
|
102
|
+
"""
|
103
|
+
p = super().postprocess(preds[0])
|
104
|
+
proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
|
105
|
+
return p, proto
|
106
|
+
|
107
|
+
def _prepare_batch(self, si, batch):
|
108
|
+
"""
|
109
|
+
Prepare a batch for training or inference by processing images and targets.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
si (int): Batch index.
|
113
|
+
batch (dict): Batch data containing images and targets.
|
114
|
+
|
115
|
+
Returns:
|
116
|
+
(dict): Prepared batch with processed images and targets.
|
117
|
+
"""
|
118
|
+
prepared_batch = super()._prepare_batch(si, batch)
|
119
|
+
midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si
|
120
|
+
prepared_batch["masks"] = batch["masks"][midx]
|
121
|
+
return prepared_batch
|
122
|
+
|
123
|
+
def _prepare_pred(self, pred, pbatch, proto):
|
124
|
+
"""
|
125
|
+
Prepare predictions for evaluation by processing bounding boxes and masks.
|
126
|
+
|
127
|
+
Args:
|
128
|
+
pred (torch.Tensor): Raw predictions from the model.
|
129
|
+
pbatch (dict): Prepared batch data.
|
130
|
+
proto (torch.Tensor): Prototype masks for segmentation.
|
131
|
+
|
132
|
+
Returns:
|
133
|
+
predn (torch.Tensor): Processed bounding box predictions.
|
134
|
+
pred_masks (torch.Tensor): Processed mask predictions.
|
135
|
+
"""
|
136
|
+
predn = super()._prepare_pred(pred, pbatch)
|
137
|
+
pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=pbatch["imgsz"])
|
138
|
+
return predn, pred_masks
|
139
|
+
|
140
|
+
def update_metrics(self, preds, batch):
|
141
|
+
"""
|
142
|
+
Update metrics with the current batch predictions and targets.
|
143
|
+
|
144
|
+
Args:
|
145
|
+
preds (list): Predictions from the model.
|
146
|
+
batch (dict): Batch data containing images and targets.
|
147
|
+
"""
|
148
|
+
for si, (pred, proto) in enumerate(zip(preds[0], preds[1])):
|
149
|
+
self.seen += 1
|
150
|
+
npr = len(pred)
|
151
|
+
stat = dict(
|
152
|
+
conf=torch.zeros(0, device=self.device),
|
153
|
+
pred_cls=torch.zeros(0, device=self.device),
|
154
|
+
tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device),
|
155
|
+
tp_m=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device),
|
156
|
+
)
|
157
|
+
pbatch = self._prepare_batch(si, batch)
|
158
|
+
cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox")
|
159
|
+
nl = len(cls)
|
160
|
+
stat["target_cls"] = cls
|
161
|
+
stat["target_img"] = cls.unique()
|
162
|
+
if npr == 0:
|
163
|
+
if nl:
|
164
|
+
for k in self.stats.keys():
|
165
|
+
self.stats[k].append(stat[k])
|
166
|
+
if self.args.plots:
|
167
|
+
self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls)
|
168
|
+
continue
|
169
|
+
|
170
|
+
# Masks
|
171
|
+
gt_masks = pbatch.pop("masks")
|
172
|
+
# Predictions
|
173
|
+
if self.args.single_cls:
|
174
|
+
pred[:, 5] = 0
|
175
|
+
predn, pred_masks = self._prepare_pred(pred, pbatch, proto)
|
176
|
+
stat["conf"] = predn[:, 4]
|
177
|
+
stat["pred_cls"] = predn[:, 5]
|
178
|
+
|
179
|
+
# Evaluate
|
180
|
+
if nl:
|
181
|
+
stat["tp"] = self._process_batch(predn, bbox, cls)
|
182
|
+
stat["tp_m"] = self._process_batch(
|
183
|
+
predn, bbox, cls, pred_masks, gt_masks, self.args.overlap_mask, masks=True
|
184
|
+
)
|
185
|
+
if self.args.plots:
|
186
|
+
self.confusion_matrix.process_batch(predn, bbox, cls)
|
187
|
+
|
188
|
+
for k in self.stats.keys():
|
189
|
+
self.stats[k].append(stat[k])
|
190
|
+
|
191
|
+
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
192
|
+
if self.args.plots and self.batch_i < 3:
|
193
|
+
self.plot_masks.append(pred_masks[:50].cpu()) # Limit plotted items for speed
|
194
|
+
if pred_masks.shape[0] > 50:
|
195
|
+
LOGGER.warning("Limiting validation plots to first 50 items per image for speed...")
|
196
|
+
|
197
|
+
# Save
|
198
|
+
if self.args.save_json:
|
199
|
+
self.pred_to_json(
|
200
|
+
predn,
|
201
|
+
batch["im_file"][si],
|
202
|
+
ops.scale_image(
|
203
|
+
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
|
204
|
+
pbatch["ori_shape"],
|
205
|
+
ratio_pad=batch["ratio_pad"][si],
|
206
|
+
),
|
207
|
+
)
|
208
|
+
if self.args.save_txt:
|
209
|
+
self.save_one_txt(
|
210
|
+
predn,
|
211
|
+
pred_masks,
|
212
|
+
self.args.save_conf,
|
213
|
+
pbatch["ori_shape"],
|
214
|
+
self.save_dir / "labels" / f"{Path(batch['im_file'][si]).stem}.txt",
|
215
|
+
)
|
216
|
+
|
217
|
+
def finalize_metrics(self, *args, **kwargs):
|
218
|
+
"""
|
219
|
+
Finalize evaluation metrics by setting the speed attribute in the metrics object.
|
220
|
+
|
221
|
+
This method is called at the end of validation to set the processing speed for the metrics calculations.
|
222
|
+
It transfers the validator's speed measurement to the metrics object for reporting.
|
223
|
+
|
224
|
+
Args:
|
225
|
+
*args (Any): Variable length argument list.
|
226
|
+
**kwargs (Any): Arbitrary keyword arguments.
|
227
|
+
"""
|
228
|
+
self.metrics.speed = self.speed
|
229
|
+
self.metrics.confusion_matrix = self.confusion_matrix
|
230
|
+
|
231
|
+
def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False):
|
232
|
+
"""
|
233
|
+
Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
|
234
|
+
|
235
|
+
Args:
|
236
|
+
detections (torch.Tensor): Tensor of shape (N, 6) representing detected bounding boxes and
|
237
|
+
associated confidence scores and class indices. Each row is of the format [x1, y1, x2, y2, conf, class].
|
238
|
+
gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground truth bounding box coordinates.
|
239
|
+
Each row is of the format [x1, y1, x2, y2].
|
240
|
+
gt_cls (torch.Tensor): Tensor of shape (M,) representing ground truth class indices.
|
241
|
+
pred_masks (torch.Tensor, optional): Tensor representing predicted masks, if available. The shape should
|
242
|
+
match the ground truth masks.
|
243
|
+
gt_masks (torch.Tensor, optional): Tensor of shape (M, H, W) representing ground truth masks, if available.
|
244
|
+
overlap (bool): Flag indicating if overlapping masks should be considered.
|
245
|
+
masks (bool): Flag indicating if the batch contains mask data.
|
246
|
+
|
247
|
+
Returns:
|
248
|
+
(torch.Tensor): A correct prediction matrix of shape (N, 10), where 10 represents different IoU levels.
|
249
|
+
|
250
|
+
Note:
|
251
|
+
- If `masks` is True, the function computes IoU between predicted and ground truth masks.
|
252
|
+
- If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.
|
253
|
+
|
254
|
+
Examples:
|
255
|
+
>>> detections = torch.tensor([[25, 30, 200, 300, 0.8, 1], [50, 60, 180, 290, 0.75, 0]])
|
256
|
+
>>> gt_bboxes = torch.tensor([[24, 29, 199, 299], [55, 65, 185, 295]])
|
257
|
+
>>> gt_cls = torch.tensor([1, 0])
|
258
|
+
>>> correct_preds = validator._process_batch(detections, gt_bboxes, gt_cls)
|
259
|
+
"""
|
260
|
+
if masks:
|
261
|
+
if overlap:
|
262
|
+
nl = len(gt_cls)
|
263
|
+
index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
|
264
|
+
gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
|
265
|
+
gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
|
266
|
+
if gt_masks.shape[1:] != pred_masks.shape[1:]:
|
267
|
+
gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0]
|
268
|
+
gt_masks = gt_masks.gt_(0.5)
|
269
|
+
iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
|
270
|
+
else: # boxes
|
271
|
+
iou = box_iou(gt_bboxes, detections[:, :4])
|
272
|
+
|
273
|
+
return self.match_predictions(detections[:, 5], gt_cls, iou)
|
274
|
+
|
275
|
+
def plot_val_samples(self, batch, ni):
|
276
|
+
"""
|
277
|
+
Plot validation samples with bounding box labels and masks.
|
278
|
+
|
279
|
+
Args:
|
280
|
+
batch (dict): Batch data containing images and targets.
|
281
|
+
ni (int): Batch index.
|
282
|
+
"""
|
283
|
+
plot_images(
|
284
|
+
batch["img"],
|
285
|
+
batch["batch_idx"],
|
286
|
+
batch["cls"].squeeze(-1),
|
287
|
+
batch["bboxes"],
|
288
|
+
masks=batch["masks"],
|
289
|
+
paths=batch["im_file"],
|
290
|
+
fname=self.save_dir / f"val_batch{ni}_labels.jpg",
|
291
|
+
names=self.names,
|
292
|
+
on_plot=self.on_plot,
|
293
|
+
)
|
294
|
+
|
295
|
+
def plot_predictions(self, batch, preds, ni):
|
296
|
+
"""
|
297
|
+
Plot batch predictions with masks and bounding boxes.
|
298
|
+
|
299
|
+
Args:
|
300
|
+
batch (dict): Batch data containing images.
|
301
|
+
preds (list): Predictions from the model.
|
302
|
+
ni (int): Batch index.
|
303
|
+
"""
|
304
|
+
plot_images(
|
305
|
+
batch["img"],
|
306
|
+
*output_to_target(preds[0], max_det=50), # not set to self.args.max_det due to slow plotting speed
|
307
|
+
torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks,
|
308
|
+
paths=batch["im_file"],
|
309
|
+
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
|
310
|
+
names=self.names,
|
311
|
+
on_plot=self.on_plot,
|
312
|
+
) # pred
|
313
|
+
self.plot_masks.clear()
|
314
|
+
|
315
|
+
def save_one_txt(self, predn, pred_masks, save_conf, shape, file):
|
316
|
+
"""
|
317
|
+
Save YOLO detections to a txt file in normalized coordinates in a specific format.
|
318
|
+
|
319
|
+
Args:
|
320
|
+
predn (torch.Tensor): Predictions in the format [x1, y1, x2, y2, conf, cls].
|
321
|
+
pred_masks (torch.Tensor): Predicted masks.
|
322
|
+
save_conf (bool): Whether to save confidence scores.
|
323
|
+
shape (tuple): Original image shape.
|
324
|
+
file (Path): File path to save the detections.
|
325
|
+
"""
|
326
|
+
from ultralytics.engine.results import Results
|
327
|
+
|
328
|
+
Results(
|
329
|
+
np.zeros((shape[0], shape[1]), dtype=np.uint8),
|
330
|
+
path=None,
|
331
|
+
names=self.names,
|
332
|
+
boxes=predn[:, :6],
|
333
|
+
masks=pred_masks,
|
334
|
+
).save_txt(file, save_conf=save_conf)
|
335
|
+
|
336
|
+
def pred_to_json(self, predn, filename, pred_masks):
|
337
|
+
"""
|
338
|
+
Save one JSON result for COCO evaluation.
|
339
|
+
|
340
|
+
Args:
|
341
|
+
predn (torch.Tensor): Predictions in the format [x1, y1, x2, y2, conf, cls].
|
342
|
+
filename (str): Image filename.
|
343
|
+
pred_masks (numpy.ndarray): Predicted masks.
|
344
|
+
|
345
|
+
Examples:
|
346
|
+
>>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
347
|
+
"""
|
348
|
+
from pycocotools.mask import encode # noqa
|
349
|
+
|
350
|
+
def single_encode(x):
|
351
|
+
"""Encode predicted masks as RLE and append results to jdict."""
|
352
|
+
rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
|
353
|
+
rle["counts"] = rle["counts"].decode("utf-8")
|
354
|
+
return rle
|
355
|
+
|
356
|
+
stem = Path(filename).stem
|
357
|
+
image_id = int(stem) if stem.isnumeric() else stem
|
358
|
+
box = ops.xyxy2xywh(predn[:, :4]) # xywh
|
359
|
+
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
360
|
+
pred_masks = np.transpose(pred_masks, (2, 0, 1))
|
361
|
+
with ThreadPool(NUM_THREADS) as pool:
|
362
|
+
rles = pool.map(single_encode, pred_masks)
|
363
|
+
for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
|
364
|
+
self.jdict.append(
|
365
|
+
{
|
366
|
+
"image_id": image_id,
|
367
|
+
"category_id": self.class_map[int(p[5])],
|
368
|
+
"bbox": [round(x, 3) for x in b],
|
369
|
+
"score": round(p[4], 5),
|
370
|
+
"segmentation": rles[i],
|
371
|
+
}
|
372
|
+
)
|
373
|
+
|
374
|
+
def eval_json(self, stats):
|
375
|
+
"""Return COCO-style object detection evaluation metrics."""
|
376
|
+
if self.args.save_json and (self.is_lvis or self.is_coco) and len(self.jdict):
|
377
|
+
pred_json = self.save_dir / "predictions.json" # predictions
|
378
|
+
|
379
|
+
anno_json = (
|
380
|
+
self.data["path"]
|
381
|
+
/ "annotations"
|
382
|
+
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
|
383
|
+
) # annotations
|
384
|
+
|
385
|
+
pkg = "pycocotools" if self.is_coco else "lvis"
|
386
|
+
LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
|
387
|
+
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
388
|
+
for x in anno_json, pred_json:
|
389
|
+
assert x.is_file(), f"{x} file not found"
|
390
|
+
check_requirements("pycocotools>=2.0.6" if self.is_coco else "lvis>=0.5.3")
|
391
|
+
if self.is_coco:
|
392
|
+
from pycocotools.coco import COCO # noqa
|
393
|
+
from pycocotools.cocoeval import COCOeval # noqa
|
394
|
+
|
395
|
+
anno = COCO(str(anno_json)) # init annotations api
|
396
|
+
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
|
397
|
+
vals = [COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm")]
|
398
|
+
else:
|
399
|
+
from lvis import LVIS, LVISEval
|
400
|
+
|
401
|
+
anno = LVIS(str(anno_json))
|
402
|
+
pred = anno._load_json(str(pred_json))
|
403
|
+
vals = [LVISEval(anno, pred, "bbox"), LVISEval(anno, pred, "segm")]
|
404
|
+
|
405
|
+
for i, eval in enumerate(vals):
|
406
|
+
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
|
407
|
+
eval.evaluate()
|
408
|
+
eval.accumulate()
|
409
|
+
eval.summarize()
|
410
|
+
if self.is_lvis:
|
411
|
+
eval.print_results()
|
412
|
+
idx = i * 4 + 2
|
413
|
+
# update mAP50-95 and mAP50
|
414
|
+
stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = (
|
415
|
+
eval.stats[:2] if self.is_coco else [eval.results["AP"], eval.results["AP50"]]
|
416
|
+
)
|
417
|
+
if self.is_lvis:
|
418
|
+
tag = "B" if i == 0 else "M"
|
419
|
+
stats[f"metrics/APr({tag})"] = eval.results["APr"]
|
420
|
+
stats[f"metrics/APc({tag})"] = eval.results["APc"]
|
421
|
+
stats[f"metrics/APf({tag})"] = eval.results["APf"]
|
422
|
+
|
423
|
+
if self.is_lvis:
|
424
|
+
stats["fitness"] = stats["metrics/mAP50-95(B)"]
|
425
|
+
|
426
|
+
except Exception as e:
|
427
|
+
LOGGER.warning(f"{pkg} unable to run: {e}")
|
428
|
+
return stats
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import itertools
|
4
|
+
|
5
|
+
from ultralytics.data import build_yolo_dataset
|
6
|
+
from ultralytics.models import yolo
|
7
|
+
from ultralytics.nn.tasks import WorldModel
|
8
|
+
from ultralytics.utils import DEFAULT_CFG, RANK, checks
|
9
|
+
from ultralytics.utils.torch_utils import de_parallel
|
10
|
+
|
11
|
+
|
12
|
+
def on_pretrain_routine_end(trainer):
|
13
|
+
"""Callback to set up model classes and text encoder at the end of the pretrain routine."""
|
14
|
+
if RANK in {-1, 0}:
|
15
|
+
# Set class names for evaluation
|
16
|
+
names = [name.split("/")[0] for name in list(trainer.test_loader.dataset.data["names"].values())]
|
17
|
+
de_parallel(trainer.ema.ema).set_classes(names, cache_clip_model=False)
|
18
|
+
device = next(trainer.model.parameters()).device
|
19
|
+
trainer.text_model, _ = trainer.clip.load("ViT-B/32", device=device)
|
20
|
+
for p in trainer.text_model.parameters():
|
21
|
+
p.requires_grad_(False)
|
22
|
+
|
23
|
+
|
24
|
+
class WorldTrainer(yolo.detect.DetectionTrainer):
|
25
|
+
"""
|
26
|
+
A class to fine-tune a world model on a close-set dataset.
|
27
|
+
|
28
|
+
This trainer extends the DetectionTrainer to support training YOLO World models, which combine
|
29
|
+
visual and textual features for improved object detection and understanding.
|
30
|
+
|
31
|
+
Attributes:
|
32
|
+
clip (module): The CLIP module for text-image understanding.
|
33
|
+
text_model (module): The text encoder model from CLIP.
|
34
|
+
model (WorldModel): The YOLO World model being trained.
|
35
|
+
data (dict): Dataset configuration containing class information.
|
36
|
+
args (dict): Training arguments and configuration.
|
37
|
+
|
38
|
+
Examples:
|
39
|
+
>>> from ultralytics.models.yolo.world import WorldModel
|
40
|
+
>>> args = dict(model="yolov8s-world.pt", data="coco8.yaml", epochs=3)
|
41
|
+
>>> trainer = WorldTrainer(overrides=args)
|
42
|
+
>>> trainer.train()
|
43
|
+
"""
|
44
|
+
|
45
|
+
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
46
|
+
"""
|
47
|
+
Initialize a WorldTrainer object with given arguments.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
cfg (dict): Configuration for the trainer.
|
51
|
+
overrides (dict, optional): Configuration overrides.
|
52
|
+
_callbacks (list, optional): List of callback functions.
|
53
|
+
"""
|
54
|
+
if overrides is None:
|
55
|
+
overrides = {}
|
56
|
+
super().__init__(cfg, overrides, _callbacks)
|
57
|
+
|
58
|
+
# Import and assign clip
|
59
|
+
try:
|
60
|
+
import clip
|
61
|
+
except ImportError:
|
62
|
+
checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
|
63
|
+
import clip
|
64
|
+
self.clip = clip
|
65
|
+
|
66
|
+
def get_model(self, cfg=None, weights=None, verbose=True):
|
67
|
+
"""
|
68
|
+
Return WorldModel initialized with specified config and weights.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
cfg (Dict | str, optional): Model configuration.
|
72
|
+
weights (str, optional): Path to pretrained weights.
|
73
|
+
verbose (bool): Whether to display model info.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
(WorldModel): Initialized WorldModel.
|
77
|
+
"""
|
78
|
+
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
|
79
|
+
# NOTE: Following the official config, nc hard-coded to 80 for now.
|
80
|
+
model = WorldModel(
|
81
|
+
cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
|
82
|
+
ch=self.data["channels"],
|
83
|
+
nc=min(self.data["nc"], 80),
|
84
|
+
verbose=verbose and RANK == -1,
|
85
|
+
)
|
86
|
+
if weights:
|
87
|
+
model.load(weights)
|
88
|
+
self.add_callback("on_pretrain_routine_end", on_pretrain_routine_end)
|
89
|
+
|
90
|
+
return model
|
91
|
+
|
92
|
+
def build_dataset(self, img_path, mode="train", batch=None):
|
93
|
+
"""
|
94
|
+
Build YOLO Dataset for training or validation.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
img_path (str): Path to the folder containing images.
|
98
|
+
mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
|
99
|
+
batch (int, optional): Size of batches, this is for `rect`.
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
(Dataset): YOLO dataset configured for training or validation.
|
103
|
+
"""
|
104
|
+
gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
|
105
|
+
return build_yolo_dataset(
|
106
|
+
self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs, multi_modal=mode == "train"
|
107
|
+
)
|
108
|
+
|
109
|
+
def preprocess_batch(self, batch):
|
110
|
+
"""Preprocess a batch of images and text for YOLOWorld training."""
|
111
|
+
batch = super().preprocess_batch(batch)
|
112
|
+
|
113
|
+
# Add text features
|
114
|
+
texts = list(itertools.chain(*batch["texts"]))
|
115
|
+
text_token = self.clip.tokenize(texts).to(batch["img"].device)
|
116
|
+
txt_feats = self.text_model.encode_text(text_token).to(dtype=batch["img"].dtype) # torch.float32
|
117
|
+
txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
|
118
|
+
batch["txt_feats"] = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
|
119
|
+
return batch
|