bplusplus 0.1.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bplusplus might be problematic. Click here for more details.
- bplusplus/__init__.py +5 -3
- bplusplus/{collect_images.py → collect.py} +3 -3
- bplusplus/prepare.py +573 -0
- bplusplus/train_validate.py +8 -64
- bplusplus/yolov5detect/__init__.py +1 -0
- bplusplus/yolov5detect/detect.py +444 -0
- bplusplus/yolov5detect/export.py +1530 -0
- bplusplus/yolov5detect/insect.yaml +8 -0
- bplusplus/yolov5detect/models/__init__.py +0 -0
- bplusplus/yolov5detect/models/common.py +1109 -0
- bplusplus/yolov5detect/models/experimental.py +130 -0
- bplusplus/yolov5detect/models/hub/anchors.yaml +56 -0
- bplusplus/yolov5detect/models/hub/yolov3-spp.yaml +52 -0
- bplusplus/yolov5detect/models/hub/yolov3-tiny.yaml +42 -0
- bplusplus/yolov5detect/models/hub/yolov3.yaml +52 -0
- bplusplus/yolov5detect/models/hub/yolov5-bifpn.yaml +49 -0
- bplusplus/yolov5detect/models/hub/yolov5-fpn.yaml +43 -0
- bplusplus/yolov5detect/models/hub/yolov5-p2.yaml +55 -0
- bplusplus/yolov5detect/models/hub/yolov5-p34.yaml +42 -0
- bplusplus/yolov5detect/models/hub/yolov5-p6.yaml +57 -0
- bplusplus/yolov5detect/models/hub/yolov5-p7.yaml +68 -0
- bplusplus/yolov5detect/models/hub/yolov5-panet.yaml +49 -0
- bplusplus/yolov5detect/models/hub/yolov5l6.yaml +61 -0
- bplusplus/yolov5detect/models/hub/yolov5m6.yaml +61 -0
- bplusplus/yolov5detect/models/hub/yolov5n6.yaml +61 -0
- bplusplus/yolov5detect/models/hub/yolov5s-LeakyReLU.yaml +50 -0
- bplusplus/yolov5detect/models/hub/yolov5s-ghost.yaml +49 -0
- bplusplus/yolov5detect/models/hub/yolov5s-transformer.yaml +49 -0
- bplusplus/yolov5detect/models/hub/yolov5s6.yaml +61 -0
- bplusplus/yolov5detect/models/hub/yolov5x6.yaml +61 -0
- bplusplus/yolov5detect/models/segment/yolov5l-seg.yaml +49 -0
- bplusplus/yolov5detect/models/segment/yolov5m-seg.yaml +49 -0
- bplusplus/yolov5detect/models/segment/yolov5n-seg.yaml +49 -0
- bplusplus/yolov5detect/models/segment/yolov5s-seg.yaml +49 -0
- bplusplus/yolov5detect/models/segment/yolov5x-seg.yaml +49 -0
- bplusplus/yolov5detect/models/tf.py +797 -0
- bplusplus/yolov5detect/models/yolo.py +495 -0
- bplusplus/yolov5detect/models/yolov5l.yaml +49 -0
- bplusplus/yolov5detect/models/yolov5m.yaml +49 -0
- bplusplus/yolov5detect/models/yolov5n.yaml +49 -0
- bplusplus/yolov5detect/models/yolov5s.yaml +49 -0
- bplusplus/yolov5detect/models/yolov5x.yaml +49 -0
- bplusplus/yolov5detect/utils/__init__.py +97 -0
- bplusplus/yolov5detect/utils/activations.py +134 -0
- bplusplus/yolov5detect/utils/augmentations.py +448 -0
- bplusplus/yolov5detect/utils/autoanchor.py +175 -0
- bplusplus/yolov5detect/utils/autobatch.py +70 -0
- bplusplus/yolov5detect/utils/aws/__init__.py +0 -0
- bplusplus/yolov5detect/utils/aws/mime.sh +26 -0
- bplusplus/yolov5detect/utils/aws/resume.py +41 -0
- bplusplus/yolov5detect/utils/aws/userdata.sh +27 -0
- bplusplus/yolov5detect/utils/callbacks.py +72 -0
- bplusplus/yolov5detect/utils/dataloaders.py +1385 -0
- bplusplus/yolov5detect/utils/docker/Dockerfile +73 -0
- bplusplus/yolov5detect/utils/docker/Dockerfile-arm64 +40 -0
- bplusplus/yolov5detect/utils/docker/Dockerfile-cpu +42 -0
- bplusplus/yolov5detect/utils/downloads.py +136 -0
- bplusplus/yolov5detect/utils/flask_rest_api/README.md +70 -0
- bplusplus/yolov5detect/utils/flask_rest_api/example_request.py +17 -0
- bplusplus/yolov5detect/utils/flask_rest_api/restapi.py +49 -0
- bplusplus/yolov5detect/utils/general.py +1294 -0
- bplusplus/yolov5detect/utils/google_app_engine/Dockerfile +25 -0
- bplusplus/yolov5detect/utils/google_app_engine/additional_requirements.txt +6 -0
- bplusplus/yolov5detect/utils/google_app_engine/app.yaml +16 -0
- bplusplus/yolov5detect/utils/loggers/__init__.py +476 -0
- bplusplus/yolov5detect/utils/loggers/clearml/README.md +222 -0
- bplusplus/yolov5detect/utils/loggers/clearml/__init__.py +0 -0
- bplusplus/yolov5detect/utils/loggers/clearml/clearml_utils.py +230 -0
- bplusplus/yolov5detect/utils/loggers/clearml/hpo.py +90 -0
- bplusplus/yolov5detect/utils/loggers/comet/README.md +250 -0
- bplusplus/yolov5detect/utils/loggers/comet/__init__.py +551 -0
- bplusplus/yolov5detect/utils/loggers/comet/comet_utils.py +151 -0
- bplusplus/yolov5detect/utils/loggers/comet/hpo.py +126 -0
- bplusplus/yolov5detect/utils/loggers/comet/optimizer_config.json +135 -0
- bplusplus/yolov5detect/utils/loggers/wandb/__init__.py +0 -0
- bplusplus/yolov5detect/utils/loggers/wandb/wandb_utils.py +210 -0
- bplusplus/yolov5detect/utils/loss.py +259 -0
- bplusplus/yolov5detect/utils/metrics.py +381 -0
- bplusplus/yolov5detect/utils/plots.py +517 -0
- bplusplus/yolov5detect/utils/segment/__init__.py +0 -0
- bplusplus/yolov5detect/utils/segment/augmentations.py +100 -0
- bplusplus/yolov5detect/utils/segment/dataloaders.py +366 -0
- bplusplus/yolov5detect/utils/segment/general.py +160 -0
- bplusplus/yolov5detect/utils/segment/loss.py +198 -0
- bplusplus/yolov5detect/utils/segment/metrics.py +225 -0
- bplusplus/yolov5detect/utils/segment/plots.py +152 -0
- bplusplus/yolov5detect/utils/torch_utils.py +482 -0
- bplusplus/yolov5detect/utils/triton.py +90 -0
- bplusplus-1.1.0.dist-info/METADATA +179 -0
- bplusplus-1.1.0.dist-info/RECORD +92 -0
- bplusplus/build_model.py +0 -38
- bplusplus-0.1.0.dist-info/METADATA +0 -91
- bplusplus-0.1.0.dist-info/RECORD +0 -8
- {bplusplus-0.1.0.dist-info → bplusplus-1.1.0.dist-info}/LICENSE +0 -0
- {bplusplus-0.1.0.dist-info → bplusplus-1.1.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
import torch.nn as nn
|
|
5
|
+
import torch.nn.functional as F
|
|
6
|
+
|
|
7
|
+
from ..general import xywh2xyxy
|
|
8
|
+
from ..loss import FocalLoss, smooth_BCE
|
|
9
|
+
from ..metrics import bbox_iou
|
|
10
|
+
from ..torch_utils import de_parallel
|
|
11
|
+
from .general import crop_mask
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ComputeLoss:
|
|
15
|
+
"""Computes the YOLOv5 model's loss components including classification, objectness, box, and mask losses."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, model, autobalance=False, overlap=False):
|
|
18
|
+
"""Initializes the compute loss function for YOLOv5 models with options for autobalancing and overlap
|
|
19
|
+
handling.
|
|
20
|
+
"""
|
|
21
|
+
self.sort_obj_iou = False
|
|
22
|
+
self.overlap = overlap
|
|
23
|
+
device = next(model.parameters()).device # get model device
|
|
24
|
+
h = model.hyp # hyperparameters
|
|
25
|
+
|
|
26
|
+
# Define criteria
|
|
27
|
+
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device))
|
|
28
|
+
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["obj_pw"]], device=device))
|
|
29
|
+
|
|
30
|
+
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
|
|
31
|
+
self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets
|
|
32
|
+
|
|
33
|
+
# Focal loss
|
|
34
|
+
g = h["fl_gamma"] # focal loss gamma
|
|
35
|
+
if g > 0:
|
|
36
|
+
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
|
|
37
|
+
|
|
38
|
+
m = de_parallel(model).model[-1] # Detect() module
|
|
39
|
+
self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
|
|
40
|
+
self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
|
|
41
|
+
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
|
|
42
|
+
self.na = m.na # number of anchors
|
|
43
|
+
self.nc = m.nc # number of classes
|
|
44
|
+
self.nl = m.nl # number of layers
|
|
45
|
+
self.nm = m.nm # number of masks
|
|
46
|
+
self.anchors = m.anchors
|
|
47
|
+
self.device = device
|
|
48
|
+
|
|
49
|
+
def __call__(self, preds, targets, masks): # predictions, targets, model
|
|
50
|
+
"""Evaluates YOLOv5 model's loss for given predictions, targets, and masks; returns total loss components."""
|
|
51
|
+
p, proto = preds
|
|
52
|
+
bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width
|
|
53
|
+
lcls = torch.zeros(1, device=self.device)
|
|
54
|
+
lbox = torch.zeros(1, device=self.device)
|
|
55
|
+
lobj = torch.zeros(1, device=self.device)
|
|
56
|
+
lseg = torch.zeros(1, device=self.device)
|
|
57
|
+
tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets
|
|
58
|
+
|
|
59
|
+
# Losses
|
|
60
|
+
for i, pi in enumerate(p): # layer index, layer predictions
|
|
61
|
+
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
|
|
62
|
+
tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
|
|
63
|
+
|
|
64
|
+
n = b.shape[0] # number of targets
|
|
65
|
+
if n:
|
|
66
|
+
pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions
|
|
67
|
+
|
|
68
|
+
# Box regression
|
|
69
|
+
pxy = pxy.sigmoid() * 2 - 0.5
|
|
70
|
+
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
|
|
71
|
+
pbox = torch.cat((pxy, pwh), 1) # predicted box
|
|
72
|
+
iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target)
|
|
73
|
+
lbox += (1.0 - iou).mean() # iou loss
|
|
74
|
+
|
|
75
|
+
# Objectness
|
|
76
|
+
iou = iou.detach().clamp(0).type(tobj.dtype)
|
|
77
|
+
if self.sort_obj_iou:
|
|
78
|
+
j = iou.argsort()
|
|
79
|
+
b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
|
|
80
|
+
if self.gr < 1:
|
|
81
|
+
iou = (1.0 - self.gr) + self.gr * iou
|
|
82
|
+
tobj[b, a, gj, gi] = iou # iou ratio
|
|
83
|
+
|
|
84
|
+
# Classification
|
|
85
|
+
if self.nc > 1: # cls loss (only if multiple classes)
|
|
86
|
+
t = torch.full_like(pcls, self.cn, device=self.device) # targets
|
|
87
|
+
t[range(n), tcls[i]] = self.cp
|
|
88
|
+
lcls += self.BCEcls(pcls, t) # BCE
|
|
89
|
+
|
|
90
|
+
# Mask regression
|
|
91
|
+
if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
|
|
92
|
+
masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0]
|
|
93
|
+
marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized
|
|
94
|
+
mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device))
|
|
95
|
+
for bi in b.unique():
|
|
96
|
+
j = b == bi # matching index
|
|
97
|
+
if self.overlap:
|
|
98
|
+
mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0)
|
|
99
|
+
else:
|
|
100
|
+
mask_gti = masks[tidxs[i]][j]
|
|
101
|
+
lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j])
|
|
102
|
+
|
|
103
|
+
obji = self.BCEobj(pi[..., 4], tobj)
|
|
104
|
+
lobj += obji * self.balance[i] # obj loss
|
|
105
|
+
if self.autobalance:
|
|
106
|
+
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
|
|
107
|
+
|
|
108
|
+
if self.autobalance:
|
|
109
|
+
self.balance = [x / self.balance[self.ssi] for x in self.balance]
|
|
110
|
+
lbox *= self.hyp["box"]
|
|
111
|
+
lobj *= self.hyp["obj"]
|
|
112
|
+
lcls *= self.hyp["cls"]
|
|
113
|
+
lseg *= self.hyp["box"] / bs
|
|
114
|
+
|
|
115
|
+
loss = lbox + lobj + lcls + lseg
|
|
116
|
+
return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach()
|
|
117
|
+
|
|
118
|
+
def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
|
|
119
|
+
"""Calculates and normalizes single mask loss for YOLOv5 between predicted and ground truth masks."""
|
|
120
|
+
pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80)
|
|
121
|
+
loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none")
|
|
122
|
+
return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
|
|
123
|
+
|
|
124
|
+
def build_targets(self, p, targets):
|
|
125
|
+
"""Prepares YOLOv5 targets for loss computation; inputs targets (image, class, x, y, w, h), output target
|
|
126
|
+
classes/boxes.
|
|
127
|
+
"""
|
|
128
|
+
na, nt = self.na, targets.shape[0] # number of anchors, targets
|
|
129
|
+
tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], []
|
|
130
|
+
gain = torch.ones(8, device=self.device) # normalized to gridspace gain
|
|
131
|
+
ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
|
|
132
|
+
if self.overlap:
|
|
133
|
+
batch = p[0].shape[0]
|
|
134
|
+
ti = []
|
|
135
|
+
for i in range(batch):
|
|
136
|
+
num = (targets[:, 0] == i).sum() # find number of targets of each image
|
|
137
|
+
ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num)
|
|
138
|
+
ti = torch.cat(ti, 1) # (na, nt)
|
|
139
|
+
else:
|
|
140
|
+
ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1)
|
|
141
|
+
targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices
|
|
142
|
+
|
|
143
|
+
g = 0.5 # bias
|
|
144
|
+
off = (
|
|
145
|
+
torch.tensor(
|
|
146
|
+
[
|
|
147
|
+
[0, 0],
|
|
148
|
+
[1, 0],
|
|
149
|
+
[0, 1],
|
|
150
|
+
[-1, 0],
|
|
151
|
+
[0, -1], # j,k,l,m
|
|
152
|
+
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
|
|
153
|
+
],
|
|
154
|
+
device=self.device,
|
|
155
|
+
).float()
|
|
156
|
+
* g
|
|
157
|
+
) # offsets
|
|
158
|
+
|
|
159
|
+
for i in range(self.nl):
|
|
160
|
+
anchors, shape = self.anchors[i], p[i].shape
|
|
161
|
+
gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain
|
|
162
|
+
|
|
163
|
+
# Match targets to anchors
|
|
164
|
+
t = targets * gain # shape(3,n,7)
|
|
165
|
+
if nt:
|
|
166
|
+
# Matches
|
|
167
|
+
r = t[..., 4:6] / anchors[:, None] # wh ratio
|
|
168
|
+
j = torch.max(r, 1 / r).max(2)[0] < self.hyp["anchor_t"] # compare
|
|
169
|
+
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
|
|
170
|
+
t = t[j] # filter
|
|
171
|
+
|
|
172
|
+
# Offsets
|
|
173
|
+
gxy = t[:, 2:4] # grid xy
|
|
174
|
+
gxi = gain[[2, 3]] - gxy # inverse
|
|
175
|
+
j, k = ((gxy % 1 < g) & (gxy > 1)).T
|
|
176
|
+
l, m = ((gxi % 1 < g) & (gxi > 1)).T
|
|
177
|
+
j = torch.stack((torch.ones_like(j), j, k, l, m))
|
|
178
|
+
t = t.repeat((5, 1, 1))[j]
|
|
179
|
+
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
|
|
180
|
+
else:
|
|
181
|
+
t = targets[0]
|
|
182
|
+
offsets = 0
|
|
183
|
+
|
|
184
|
+
# Define
|
|
185
|
+
bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors
|
|
186
|
+
(a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class
|
|
187
|
+
gij = (gxy - offsets).long()
|
|
188
|
+
gi, gj = gij.T # grid indices
|
|
189
|
+
|
|
190
|
+
# Append
|
|
191
|
+
indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid
|
|
192
|
+
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
|
|
193
|
+
anch.append(anchors[a]) # anchors
|
|
194
|
+
tcls.append(c) # class
|
|
195
|
+
tidxs.append(tidx)
|
|
196
|
+
xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized
|
|
197
|
+
|
|
198
|
+
return tcls, tbox, indices, anch, tidxs, xywhn
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
|
|
2
|
+
"""Model validation metrics."""
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
from ..metrics import ap_per_class
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def fitness(x):
|
|
10
|
+
"""Evaluates model fitness by a weighted sum of 8 metrics, `x`: [N,8] array, weights: [0.1, 0.9] for mAP and F1."""
|
|
11
|
+
w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
|
|
12
|
+
return (x[:, :8] * w).sum(1)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def ap_per_class_box_and_mask(
|
|
16
|
+
tp_m,
|
|
17
|
+
tp_b,
|
|
18
|
+
conf,
|
|
19
|
+
pred_cls,
|
|
20
|
+
target_cls,
|
|
21
|
+
plot=False,
|
|
22
|
+
save_dir=".",
|
|
23
|
+
names=(),
|
|
24
|
+
):
|
|
25
|
+
"""
|
|
26
|
+
Args:
|
|
27
|
+
tp_b: tp of boxes.
|
|
28
|
+
tp_m: tp of masks.
|
|
29
|
+
other arguments see `func: ap_per_class`.
|
|
30
|
+
"""
|
|
31
|
+
results_boxes = ap_per_class(
|
|
32
|
+
tp_b, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Box"
|
|
33
|
+
)[2:]
|
|
34
|
+
results_masks = ap_per_class(
|
|
35
|
+
tp_m, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Mask"
|
|
36
|
+
)[2:]
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
"boxes": {
|
|
40
|
+
"p": results_boxes[0],
|
|
41
|
+
"r": results_boxes[1],
|
|
42
|
+
"ap": results_boxes[3],
|
|
43
|
+
"f1": results_boxes[2],
|
|
44
|
+
"ap_class": results_boxes[4],
|
|
45
|
+
},
|
|
46
|
+
"masks": {
|
|
47
|
+
"p": results_masks[0],
|
|
48
|
+
"r": results_masks[1],
|
|
49
|
+
"ap": results_masks[3],
|
|
50
|
+
"f1": results_masks[2],
|
|
51
|
+
"ap_class": results_masks[4],
|
|
52
|
+
},
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class Metric:
|
|
57
|
+
"""Computes performance metrics like precision, recall, F1 score, and average precision for model evaluation."""
|
|
58
|
+
|
|
59
|
+
def __init__(self) -> None:
|
|
60
|
+
"""Initializes performance metric attributes for precision, recall, F1 score, average precision, and class
|
|
61
|
+
indices.
|
|
62
|
+
"""
|
|
63
|
+
self.p = [] # (nc, )
|
|
64
|
+
self.r = [] # (nc, )
|
|
65
|
+
self.f1 = [] # (nc, )
|
|
66
|
+
self.all_ap = [] # (nc, 10)
|
|
67
|
+
self.ap_class_index = [] # (nc, )
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def ap50(self):
|
|
71
|
+
"""
|
|
72
|
+
AP@0.5 of all classes.
|
|
73
|
+
|
|
74
|
+
Return:
|
|
75
|
+
(nc, ) or [].
|
|
76
|
+
"""
|
|
77
|
+
return self.all_ap[:, 0] if len(self.all_ap) else []
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def ap(self):
|
|
81
|
+
"""AP@0.5:0.95
|
|
82
|
+
Return:
|
|
83
|
+
(nc, ) or [].
|
|
84
|
+
"""
|
|
85
|
+
return self.all_ap.mean(1) if len(self.all_ap) else []
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def mp(self):
|
|
89
|
+
"""
|
|
90
|
+
Mean precision of all classes.
|
|
91
|
+
|
|
92
|
+
Return:
|
|
93
|
+
float.
|
|
94
|
+
"""
|
|
95
|
+
return self.p.mean() if len(self.p) else 0.0
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def mr(self):
|
|
99
|
+
"""
|
|
100
|
+
Mean recall of all classes.
|
|
101
|
+
|
|
102
|
+
Return:
|
|
103
|
+
float.
|
|
104
|
+
"""
|
|
105
|
+
return self.r.mean() if len(self.r) else 0.0
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def map50(self):
|
|
109
|
+
"""
|
|
110
|
+
Mean AP@0.5 of all classes.
|
|
111
|
+
|
|
112
|
+
Return:
|
|
113
|
+
float.
|
|
114
|
+
"""
|
|
115
|
+
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def map(self):
|
|
119
|
+
"""
|
|
120
|
+
Mean AP@0.5:0.95 of all classes.
|
|
121
|
+
|
|
122
|
+
Return:
|
|
123
|
+
float.
|
|
124
|
+
"""
|
|
125
|
+
return self.all_ap.mean() if len(self.all_ap) else 0.0
|
|
126
|
+
|
|
127
|
+
def mean_results(self):
|
|
128
|
+
"""Mean of results, return mp, mr, map50, map."""
|
|
129
|
+
return (self.mp, self.mr, self.map50, self.map)
|
|
130
|
+
|
|
131
|
+
def class_result(self, i):
|
|
132
|
+
"""Class-aware result, return p[i], r[i], ap50[i], ap[i]."""
|
|
133
|
+
return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
|
|
134
|
+
|
|
135
|
+
def get_maps(self, nc):
|
|
136
|
+
"""Calculates and returns mean Average Precision (mAP) for each class given number of classes `nc`."""
|
|
137
|
+
maps = np.zeros(nc) + self.map
|
|
138
|
+
for i, c in enumerate(self.ap_class_index):
|
|
139
|
+
maps[c] = self.ap[i]
|
|
140
|
+
return maps
|
|
141
|
+
|
|
142
|
+
def update(self, results):
|
|
143
|
+
"""
|
|
144
|
+
Args:
|
|
145
|
+
results: tuple(p, r, ap, f1, ap_class).
|
|
146
|
+
"""
|
|
147
|
+
p, r, all_ap, f1, ap_class_index = results
|
|
148
|
+
self.p = p
|
|
149
|
+
self.r = r
|
|
150
|
+
self.all_ap = all_ap
|
|
151
|
+
self.f1 = f1
|
|
152
|
+
self.ap_class_index = ap_class_index
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class Metrics:
|
|
156
|
+
"""Metric for boxes and masks."""
|
|
157
|
+
|
|
158
|
+
def __init__(self) -> None:
|
|
159
|
+
"""Initializes Metric objects for bounding boxes and masks to compute performance metrics in the Metrics
|
|
160
|
+
class.
|
|
161
|
+
"""
|
|
162
|
+
self.metric_box = Metric()
|
|
163
|
+
self.metric_mask = Metric()
|
|
164
|
+
|
|
165
|
+
def update(self, results):
|
|
166
|
+
"""
|
|
167
|
+
Args:
|
|
168
|
+
results: Dict{'boxes': Dict{}, 'masks': Dict{}}.
|
|
169
|
+
"""
|
|
170
|
+
self.metric_box.update(list(results["boxes"].values()))
|
|
171
|
+
self.metric_mask.update(list(results["masks"].values()))
|
|
172
|
+
|
|
173
|
+
def mean_results(self):
|
|
174
|
+
"""Computes and returns the mean results for both box and mask metrics by summing their individual means."""
|
|
175
|
+
return self.metric_box.mean_results() + self.metric_mask.mean_results()
|
|
176
|
+
|
|
177
|
+
def class_result(self, i):
|
|
178
|
+
"""Returns the sum of box and mask metric results for a specified class index `i`."""
|
|
179
|
+
return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
|
|
180
|
+
|
|
181
|
+
def get_maps(self, nc):
|
|
182
|
+
"""Calculates and returns the sum of mean average precisions (mAPs) for both box and mask metrics for `nc`
|
|
183
|
+
classes.
|
|
184
|
+
"""
|
|
185
|
+
return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
|
|
186
|
+
|
|
187
|
+
@property
|
|
188
|
+
def ap_class_index(self):
|
|
189
|
+
"""Returns the class index for average precision, shared by both box and mask metrics."""
|
|
190
|
+
return self.metric_box.ap_class_index
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
KEYS = [
|
|
194
|
+
"train/box_loss",
|
|
195
|
+
"train/seg_loss", # train loss
|
|
196
|
+
"train/obj_loss",
|
|
197
|
+
"train/cls_loss",
|
|
198
|
+
"metrics/precision(B)",
|
|
199
|
+
"metrics/recall(B)",
|
|
200
|
+
"metrics/mAP_0.5(B)",
|
|
201
|
+
"metrics/mAP_0.5:0.95(B)", # metrics
|
|
202
|
+
"metrics/precision(M)",
|
|
203
|
+
"metrics/recall(M)",
|
|
204
|
+
"metrics/mAP_0.5(M)",
|
|
205
|
+
"metrics/mAP_0.5:0.95(M)", # metrics
|
|
206
|
+
"val/box_loss",
|
|
207
|
+
"val/seg_loss", # val loss
|
|
208
|
+
"val/obj_loss",
|
|
209
|
+
"val/cls_loss",
|
|
210
|
+
"x/lr0",
|
|
211
|
+
"x/lr1",
|
|
212
|
+
"x/lr2",
|
|
213
|
+
]
|
|
214
|
+
|
|
215
|
+
BEST_KEYS = [
|
|
216
|
+
"best/epoch",
|
|
217
|
+
"best/precision(B)",
|
|
218
|
+
"best/recall(B)",
|
|
219
|
+
"best/mAP_0.5(B)",
|
|
220
|
+
"best/mAP_0.5:0.95(B)",
|
|
221
|
+
"best/precision(M)",
|
|
222
|
+
"best/recall(M)",
|
|
223
|
+
"best/mAP_0.5(M)",
|
|
224
|
+
"best/mAP_0.5:0.95(M)",
|
|
225
|
+
]
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import math
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
import cv2
|
|
8
|
+
import matplotlib.pyplot as plt
|
|
9
|
+
import numpy as np
|
|
10
|
+
import pandas as pd
|
|
11
|
+
import torch
|
|
12
|
+
|
|
13
|
+
from .. import threaded
|
|
14
|
+
from ..general import xywh2xyxy
|
|
15
|
+
from ..plots import Annotator, colors
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@threaded
|
|
19
|
+
def plot_images_and_masks(images, targets, masks, paths=None, fname="images.jpg", names=None):
|
|
20
|
+
"""Plots a grid of images, their labels, and masks with optional resizing and annotations, saving to fname."""
|
|
21
|
+
if isinstance(images, torch.Tensor):
|
|
22
|
+
images = images.cpu().float().numpy()
|
|
23
|
+
if isinstance(targets, torch.Tensor):
|
|
24
|
+
targets = targets.cpu().numpy()
|
|
25
|
+
if isinstance(masks, torch.Tensor):
|
|
26
|
+
masks = masks.cpu().numpy().astype(int)
|
|
27
|
+
|
|
28
|
+
max_size = 1920 # max image size
|
|
29
|
+
max_subplots = 16 # max image subplots, i.e. 4x4
|
|
30
|
+
bs, _, h, w = images.shape # batch size, _, height, width
|
|
31
|
+
bs = min(bs, max_subplots) # limit plot images
|
|
32
|
+
ns = np.ceil(bs**0.5) # number of subplots (square)
|
|
33
|
+
if np.max(images[0]) <= 1:
|
|
34
|
+
images *= 255 # de-normalise (optional)
|
|
35
|
+
|
|
36
|
+
# Build Image
|
|
37
|
+
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
|
|
38
|
+
for i, im in enumerate(images):
|
|
39
|
+
if i == max_subplots: # if last batch has fewer images than we expect
|
|
40
|
+
break
|
|
41
|
+
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
|
42
|
+
im = im.transpose(1, 2, 0)
|
|
43
|
+
mosaic[y : y + h, x : x + w, :] = im
|
|
44
|
+
|
|
45
|
+
# Resize (optional)
|
|
46
|
+
scale = max_size / ns / max(h, w)
|
|
47
|
+
if scale < 1:
|
|
48
|
+
h = math.ceil(scale * h)
|
|
49
|
+
w = math.ceil(scale * w)
|
|
50
|
+
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
|
|
51
|
+
|
|
52
|
+
# Annotate
|
|
53
|
+
fs = int((h + w) * ns * 0.01) # font size
|
|
54
|
+
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
|
|
55
|
+
for i in range(i + 1):
|
|
56
|
+
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
|
57
|
+
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
|
58
|
+
if paths:
|
|
59
|
+
annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
|
60
|
+
if len(targets) > 0:
|
|
61
|
+
idx = targets[:, 0] == i
|
|
62
|
+
ti = targets[idx] # image targets
|
|
63
|
+
|
|
64
|
+
boxes = xywh2xyxy(ti[:, 2:6]).T
|
|
65
|
+
classes = ti[:, 1].astype("int")
|
|
66
|
+
labels = ti.shape[1] == 6 # labels if no conf column
|
|
67
|
+
conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
|
|
68
|
+
|
|
69
|
+
if boxes.shape[1]:
|
|
70
|
+
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
|
|
71
|
+
boxes[[0, 2]] *= w # scale to pixels
|
|
72
|
+
boxes[[1, 3]] *= h
|
|
73
|
+
elif scale < 1: # absolute coords need scale if image scales
|
|
74
|
+
boxes *= scale
|
|
75
|
+
boxes[[0, 2]] += x
|
|
76
|
+
boxes[[1, 3]] += y
|
|
77
|
+
for j, box in enumerate(boxes.T.tolist()):
|
|
78
|
+
cls = classes[j]
|
|
79
|
+
color = colors(cls)
|
|
80
|
+
cls = names[cls] if names else cls
|
|
81
|
+
if labels or conf[j] > 0.25: # 0.25 conf thresh
|
|
82
|
+
label = f"{cls}" if labels else f"{cls} {conf[j]:.1f}"
|
|
83
|
+
annotator.box_label(box, label, color=color)
|
|
84
|
+
|
|
85
|
+
# Plot masks
|
|
86
|
+
if len(masks):
|
|
87
|
+
if masks.max() > 1.0: # mean that masks are overlap
|
|
88
|
+
image_masks = masks[[i]] # (1, 640, 640)
|
|
89
|
+
nl = len(ti)
|
|
90
|
+
index = np.arange(nl).reshape(nl, 1, 1) + 1
|
|
91
|
+
image_masks = np.repeat(image_masks, nl, axis=0)
|
|
92
|
+
image_masks = np.where(image_masks == index, 1.0, 0.0)
|
|
93
|
+
else:
|
|
94
|
+
image_masks = masks[idx]
|
|
95
|
+
|
|
96
|
+
im = np.asarray(annotator.im).copy()
|
|
97
|
+
for j, box in enumerate(boxes.T.tolist()):
|
|
98
|
+
if labels or conf[j] > 0.25: # 0.25 conf thresh
|
|
99
|
+
color = colors(classes[j])
|
|
100
|
+
mh, mw = image_masks[j].shape
|
|
101
|
+
if mh != h or mw != w:
|
|
102
|
+
mask = image_masks[j].astype(np.uint8)
|
|
103
|
+
mask = cv2.resize(mask, (w, h))
|
|
104
|
+
mask = mask.astype(bool)
|
|
105
|
+
else:
|
|
106
|
+
mask = image_masks[j].astype(bool)
|
|
107
|
+
with contextlib.suppress(Exception):
|
|
108
|
+
im[y : y + h, x : x + w, :][mask] = (
|
|
109
|
+
im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6
|
|
110
|
+
)
|
|
111
|
+
annotator.fromarray(im)
|
|
112
|
+
annotator.im.save(fname) # save
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def plot_results_with_masks(file="path/to/results.csv", dir="", best=True):
|
|
116
|
+
"""
|
|
117
|
+
Plots training results from CSV files, plotting best or last result highlights based on `best` parameter.
|
|
118
|
+
|
|
119
|
+
Example: from utils.plots import *; plot_results('path/to/results.csv')
|
|
120
|
+
"""
|
|
121
|
+
save_dir = Path(file).parent if file else Path(dir)
|
|
122
|
+
fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
|
|
123
|
+
ax = ax.ravel()
|
|
124
|
+
files = list(save_dir.glob("results*.csv"))
|
|
125
|
+
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
|
|
126
|
+
for f in files:
|
|
127
|
+
try:
|
|
128
|
+
data = pd.read_csv(f)
|
|
129
|
+
index = np.argmax(
|
|
130
|
+
0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + 0.1 * data.values[:, 11]
|
|
131
|
+
)
|
|
132
|
+
s = [x.strip() for x in data.columns]
|
|
133
|
+
x = data.values[:, 0]
|
|
134
|
+
for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]):
|
|
135
|
+
y = data.values[:, j]
|
|
136
|
+
# y[y == 0] = np.nan # don't show zero values
|
|
137
|
+
ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2)
|
|
138
|
+
if best:
|
|
139
|
+
# best
|
|
140
|
+
ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3)
|
|
141
|
+
ax[i].set_title(s[j] + f"\n{round(y[index], 5)}")
|
|
142
|
+
else:
|
|
143
|
+
# last
|
|
144
|
+
ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3)
|
|
145
|
+
ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}")
|
|
146
|
+
# if j in [8, 9, 10]: # share train and val loss y axes
|
|
147
|
+
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
|
148
|
+
except Exception as e:
|
|
149
|
+
print(f"Warning: Plotting error for {f}: {e}")
|
|
150
|
+
ax[1].legend()
|
|
151
|
+
fig.savefig(save_dir / "results.png", dpi=200)
|
|
152
|
+
plt.close()
|