bplusplus 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bplusplus might be problematic. Click here for more details.
- bplusplus/__init__.py +4 -2
- bplusplus/collect.py +69 -5
- bplusplus/hierarchical/test.py +670 -0
- bplusplus/hierarchical/train.py +676 -0
- bplusplus/prepare.py +228 -64
- bplusplus/resnet/test.py +473 -0
- bplusplus/resnet/train.py +329 -0
- bplusplus-1.2.0.dist-info/METADATA +249 -0
- bplusplus-1.2.0.dist-info/RECORD +12 -0
- bplusplus/yolov5detect/__init__.py +0 -1
- bplusplus/yolov5detect/detect.py +0 -444
- bplusplus/yolov5detect/export.py +0 -1530
- bplusplus/yolov5detect/insect.yaml +0 -8
- bplusplus/yolov5detect/models/__init__.py +0 -0
- bplusplus/yolov5detect/models/common.py +0 -1109
- bplusplus/yolov5detect/models/experimental.py +0 -130
- bplusplus/yolov5detect/models/hub/anchors.yaml +0 -56
- bplusplus/yolov5detect/models/hub/yolov3-spp.yaml +0 -52
- bplusplus/yolov5detect/models/hub/yolov3-tiny.yaml +0 -42
- bplusplus/yolov5detect/models/hub/yolov3.yaml +0 -52
- bplusplus/yolov5detect/models/hub/yolov5-bifpn.yaml +0 -49
- bplusplus/yolov5detect/models/hub/yolov5-fpn.yaml +0 -43
- bplusplus/yolov5detect/models/hub/yolov5-p2.yaml +0 -55
- bplusplus/yolov5detect/models/hub/yolov5-p34.yaml +0 -42
- bplusplus/yolov5detect/models/hub/yolov5-p6.yaml +0 -57
- bplusplus/yolov5detect/models/hub/yolov5-p7.yaml +0 -68
- bplusplus/yolov5detect/models/hub/yolov5-panet.yaml +0 -49
- bplusplus/yolov5detect/models/hub/yolov5l6.yaml +0 -61
- bplusplus/yolov5detect/models/hub/yolov5m6.yaml +0 -61
- bplusplus/yolov5detect/models/hub/yolov5n6.yaml +0 -61
- bplusplus/yolov5detect/models/hub/yolov5s-LeakyReLU.yaml +0 -50
- bplusplus/yolov5detect/models/hub/yolov5s-ghost.yaml +0 -49
- bplusplus/yolov5detect/models/hub/yolov5s-transformer.yaml +0 -49
- bplusplus/yolov5detect/models/hub/yolov5s6.yaml +0 -61
- bplusplus/yolov5detect/models/hub/yolov5x6.yaml +0 -61
- bplusplus/yolov5detect/models/segment/yolov5l-seg.yaml +0 -49
- bplusplus/yolov5detect/models/segment/yolov5m-seg.yaml +0 -49
- bplusplus/yolov5detect/models/segment/yolov5n-seg.yaml +0 -49
- bplusplus/yolov5detect/models/segment/yolov5s-seg.yaml +0 -49
- bplusplus/yolov5detect/models/segment/yolov5x-seg.yaml +0 -49
- bplusplus/yolov5detect/models/tf.py +0 -797
- bplusplus/yolov5detect/models/yolo.py +0 -495
- bplusplus/yolov5detect/models/yolov5l.yaml +0 -49
- bplusplus/yolov5detect/models/yolov5m.yaml +0 -49
- bplusplus/yolov5detect/models/yolov5n.yaml +0 -49
- bplusplus/yolov5detect/models/yolov5s.yaml +0 -49
- bplusplus/yolov5detect/models/yolov5x.yaml +0 -49
- bplusplus/yolov5detect/utils/__init__.py +0 -97
- bplusplus/yolov5detect/utils/activations.py +0 -134
- bplusplus/yolov5detect/utils/augmentations.py +0 -448
- bplusplus/yolov5detect/utils/autoanchor.py +0 -175
- bplusplus/yolov5detect/utils/autobatch.py +0 -70
- bplusplus/yolov5detect/utils/aws/__init__.py +0 -0
- bplusplus/yolov5detect/utils/aws/mime.sh +0 -26
- bplusplus/yolov5detect/utils/aws/resume.py +0 -41
- bplusplus/yolov5detect/utils/aws/userdata.sh +0 -27
- bplusplus/yolov5detect/utils/callbacks.py +0 -72
- bplusplus/yolov5detect/utils/dataloaders.py +0 -1385
- bplusplus/yolov5detect/utils/docker/Dockerfile +0 -73
- bplusplus/yolov5detect/utils/docker/Dockerfile-arm64 +0 -40
- bplusplus/yolov5detect/utils/docker/Dockerfile-cpu +0 -42
- bplusplus/yolov5detect/utils/downloads.py +0 -136
- bplusplus/yolov5detect/utils/flask_rest_api/README.md +0 -70
- bplusplus/yolov5detect/utils/flask_rest_api/example_request.py +0 -17
- bplusplus/yolov5detect/utils/flask_rest_api/restapi.py +0 -49
- bplusplus/yolov5detect/utils/general.py +0 -1294
- bplusplus/yolov5detect/utils/google_app_engine/Dockerfile +0 -25
- bplusplus/yolov5detect/utils/google_app_engine/additional_requirements.txt +0 -6
- bplusplus/yolov5detect/utils/google_app_engine/app.yaml +0 -16
- bplusplus/yolov5detect/utils/loggers/__init__.py +0 -476
- bplusplus/yolov5detect/utils/loggers/clearml/README.md +0 -222
- bplusplus/yolov5detect/utils/loggers/clearml/__init__.py +0 -0
- bplusplus/yolov5detect/utils/loggers/clearml/clearml_utils.py +0 -230
- bplusplus/yolov5detect/utils/loggers/clearml/hpo.py +0 -90
- bplusplus/yolov5detect/utils/loggers/comet/README.md +0 -250
- bplusplus/yolov5detect/utils/loggers/comet/__init__.py +0 -551
- bplusplus/yolov5detect/utils/loggers/comet/comet_utils.py +0 -151
- bplusplus/yolov5detect/utils/loggers/comet/hpo.py +0 -126
- bplusplus/yolov5detect/utils/loggers/comet/optimizer_config.json +0 -135
- bplusplus/yolov5detect/utils/loggers/wandb/__init__.py +0 -0
- bplusplus/yolov5detect/utils/loggers/wandb/wandb_utils.py +0 -210
- bplusplus/yolov5detect/utils/loss.py +0 -259
- bplusplus/yolov5detect/utils/metrics.py +0 -381
- bplusplus/yolov5detect/utils/plots.py +0 -517
- bplusplus/yolov5detect/utils/segment/__init__.py +0 -0
- bplusplus/yolov5detect/utils/segment/augmentations.py +0 -100
- bplusplus/yolov5detect/utils/segment/dataloaders.py +0 -366
- bplusplus/yolov5detect/utils/segment/general.py +0 -160
- bplusplus/yolov5detect/utils/segment/loss.py +0 -198
- bplusplus/yolov5detect/utils/segment/metrics.py +0 -225
- bplusplus/yolov5detect/utils/segment/plots.py +0 -152
- bplusplus/yolov5detect/utils/torch_utils.py +0 -482
- bplusplus/yolov5detect/utils/triton.py +0 -90
- bplusplus-1.1.0.dist-info/METADATA +0 -179
- bplusplus-1.1.0.dist-info/RECORD +0 -92
- {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/LICENSE +0 -0
- {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/WHEEL +0 -0
|
@@ -1,517 +0,0 @@
|
|
|
1
|
-
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
|
|
2
|
-
"""Plotting utils."""
|
|
3
|
-
|
|
4
|
-
import contextlib
|
|
5
|
-
import math
|
|
6
|
-
import os
|
|
7
|
-
from copy import copy
|
|
8
|
-
from pathlib import Path
|
|
9
|
-
|
|
10
|
-
import cv2
|
|
11
|
-
import matplotlib
|
|
12
|
-
import matplotlib.pyplot as plt
|
|
13
|
-
import numpy as np
|
|
14
|
-
import pandas as pd
|
|
15
|
-
import seaborn as sn
|
|
16
|
-
import torch
|
|
17
|
-
from PIL import Image, ImageDraw
|
|
18
|
-
from scipy.ndimage.filters import gaussian_filter1d
|
|
19
|
-
from ultralytics.utils.plotting import Annotator
|
|
20
|
-
|
|
21
|
-
from utils import TryExcept, threaded
|
|
22
|
-
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
|
|
23
|
-
from utils.metrics import fitness
|
|
24
|
-
|
|
25
|
-
# Settings
|
|
26
|
-
RANK = int(os.getenv("RANK", -1))
|
|
27
|
-
matplotlib.rc("font", **{"size": 11})
|
|
28
|
-
matplotlib.use("Agg") # for writing to files only
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class Colors:
|
|
32
|
-
"""Provides an RGB color palette derived from Ultralytics color scheme for visualization tasks."""
|
|
33
|
-
|
|
34
|
-
def __init__(self):
|
|
35
|
-
"""
|
|
36
|
-
Initializes the Colors class with a palette derived from Ultralytics color scheme, converting hex codes to RGB.
|
|
37
|
-
|
|
38
|
-
Colors derived from `hex = matplotlib.colors.TABLEAU_COLORS.values()`.
|
|
39
|
-
"""
|
|
40
|
-
hexs = (
|
|
41
|
-
"FF3838",
|
|
42
|
-
"FF9D97",
|
|
43
|
-
"FF701F",
|
|
44
|
-
"FFB21D",
|
|
45
|
-
"CFD231",
|
|
46
|
-
"48F90A",
|
|
47
|
-
"92CC17",
|
|
48
|
-
"3DDB86",
|
|
49
|
-
"1A9334",
|
|
50
|
-
"00D4BB",
|
|
51
|
-
"2C99A8",
|
|
52
|
-
"00C2FF",
|
|
53
|
-
"344593",
|
|
54
|
-
"6473FF",
|
|
55
|
-
"0018EC",
|
|
56
|
-
"8438FF",
|
|
57
|
-
"520085",
|
|
58
|
-
"CB38FF",
|
|
59
|
-
"FF95C8",
|
|
60
|
-
"FF37C7",
|
|
61
|
-
)
|
|
62
|
-
self.palette = [self.hex2rgb(f"#{c}") for c in hexs]
|
|
63
|
-
self.n = len(self.palette)
|
|
64
|
-
|
|
65
|
-
def __call__(self, i, bgr=False):
|
|
66
|
-
"""Returns color from palette by index `i`, in BGR format if `bgr=True`, else RGB; `i` is an integer index."""
|
|
67
|
-
c = self.palette[int(i) % self.n]
|
|
68
|
-
return (c[2], c[1], c[0]) if bgr else c
|
|
69
|
-
|
|
70
|
-
@staticmethod
|
|
71
|
-
def hex2rgb(h):
|
|
72
|
-
"""Converts hexadecimal color `h` to an RGB tuple (PIL-compatible) with order (R, G, B)."""
|
|
73
|
-
return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
colors = Colors() # create instance for 'from utils.plots import colors'
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp")):
|
|
80
|
-
"""
|
|
81
|
-
x: Features to be visualized
|
|
82
|
-
module_type: Module type
|
|
83
|
-
stage: Module stage within model
|
|
84
|
-
n: Maximum number of feature maps to plot
|
|
85
|
-
save_dir: Directory to save results.
|
|
86
|
-
"""
|
|
87
|
-
if ("Detect" not in module_type) and (
|
|
88
|
-
"Segment" not in module_type
|
|
89
|
-
): # 'Detect' for Object Detect task,'Segment' for Segment task
|
|
90
|
-
batch, channels, height, width = x.shape # batch, channels, height, width
|
|
91
|
-
if height > 1 and width > 1:
|
|
92
|
-
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
|
|
93
|
-
|
|
94
|
-
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
|
|
95
|
-
n = min(n, channels) # number of plots
|
|
96
|
-
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
|
|
97
|
-
ax = ax.ravel()
|
|
98
|
-
plt.subplots_adjust(wspace=0.05, hspace=0.05)
|
|
99
|
-
for i in range(n):
|
|
100
|
-
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
|
|
101
|
-
ax[i].axis("off")
|
|
102
|
-
|
|
103
|
-
LOGGER.info(f"Saving {f}... ({n}/{channels})")
|
|
104
|
-
plt.savefig(f, dpi=300, bbox_inches="tight")
|
|
105
|
-
plt.close()
|
|
106
|
-
np.save(str(f.with_suffix(".npy")), x[0].cpu().numpy()) # npy save
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
def hist2d(x, y, n=100):
|
|
110
|
-
"""
|
|
111
|
-
Generates a logarithmic 2D histogram, useful for visualizing label or evolution distributions.
|
|
112
|
-
|
|
113
|
-
Used in used in labels.png and evolve.png.
|
|
114
|
-
"""
|
|
115
|
-
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
|
|
116
|
-
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
|
|
117
|
-
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
|
|
118
|
-
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
|
|
119
|
-
return np.log(hist[xidx, yidx])
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
|
|
123
|
-
"""Applies a low-pass Butterworth filter to `data` with specified `cutoff`, `fs`, and `order`."""
|
|
124
|
-
from scipy.signal import butter, filtfilt
|
|
125
|
-
|
|
126
|
-
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
|
|
127
|
-
def butter_lowpass(cutoff, fs, order):
|
|
128
|
-
"""Applies a low-pass Butterworth filter to a signal with specified cutoff frequency, sample rate, and filter
|
|
129
|
-
order.
|
|
130
|
-
"""
|
|
131
|
-
nyq = 0.5 * fs
|
|
132
|
-
normal_cutoff = cutoff / nyq
|
|
133
|
-
return butter(order, normal_cutoff, btype="low", analog=False)
|
|
134
|
-
|
|
135
|
-
b, a = butter_lowpass(cutoff, fs, order=order)
|
|
136
|
-
return filtfilt(b, a, data) # forward-backward filter
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
def output_to_target(output, max_det=300):
|
|
140
|
-
"""Converts YOLOv5 model output to [batch_id, class_id, x, y, w, h, conf] format for plotting, limiting detections
|
|
141
|
-
to `max_det`.
|
|
142
|
-
"""
|
|
143
|
-
targets = []
|
|
144
|
-
for i, o in enumerate(output):
|
|
145
|
-
box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)
|
|
146
|
-
j = torch.full((conf.shape[0], 1), i)
|
|
147
|
-
targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))
|
|
148
|
-
return torch.cat(targets, 0).numpy()
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
@threaded
|
|
152
|
-
def plot_images(images, targets, paths=None, fname="images.jpg", names=None):
|
|
153
|
-
"""Plots an image grid with labels from YOLOv5 predictions or targets, saving to `fname`."""
|
|
154
|
-
if isinstance(images, torch.Tensor):
|
|
155
|
-
images = images.cpu().float().numpy()
|
|
156
|
-
if isinstance(targets, torch.Tensor):
|
|
157
|
-
targets = targets.cpu().numpy()
|
|
158
|
-
|
|
159
|
-
max_size = 1920 # max image size
|
|
160
|
-
max_subplots = 16 # max image subplots, i.e. 4x4
|
|
161
|
-
bs, _, h, w = images.shape # batch size, _, height, width
|
|
162
|
-
bs = min(bs, max_subplots) # limit plot images
|
|
163
|
-
ns = np.ceil(bs**0.5) # number of subplots (square)
|
|
164
|
-
if np.max(images[0]) <= 1:
|
|
165
|
-
images *= 255 # de-normalise (optional)
|
|
166
|
-
|
|
167
|
-
# Build Image
|
|
168
|
-
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
|
|
169
|
-
for i, im in enumerate(images):
|
|
170
|
-
if i == max_subplots: # if last batch has fewer images than we expect
|
|
171
|
-
break
|
|
172
|
-
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
|
173
|
-
im = im.transpose(1, 2, 0)
|
|
174
|
-
mosaic[y : y + h, x : x + w, :] = im
|
|
175
|
-
|
|
176
|
-
# Resize (optional)
|
|
177
|
-
scale = max_size / ns / max(h, w)
|
|
178
|
-
if scale < 1:
|
|
179
|
-
h = math.ceil(scale * h)
|
|
180
|
-
w = math.ceil(scale * w)
|
|
181
|
-
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
|
|
182
|
-
|
|
183
|
-
# Annotate
|
|
184
|
-
fs = int((h + w) * ns * 0.01) # font size
|
|
185
|
-
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
|
|
186
|
-
for i in range(i + 1):
|
|
187
|
-
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
|
188
|
-
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
|
189
|
-
if paths:
|
|
190
|
-
annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
|
191
|
-
if len(targets) > 0:
|
|
192
|
-
ti = targets[targets[:, 0] == i] # image targets
|
|
193
|
-
boxes = xywh2xyxy(ti[:, 2:6]).T
|
|
194
|
-
classes = ti[:, 1].astype("int")
|
|
195
|
-
labels = ti.shape[1] == 6 # labels if no conf column
|
|
196
|
-
conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
|
|
197
|
-
|
|
198
|
-
if boxes.shape[1]:
|
|
199
|
-
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
|
|
200
|
-
boxes[[0, 2]] *= w # scale to pixels
|
|
201
|
-
boxes[[1, 3]] *= h
|
|
202
|
-
elif scale < 1: # absolute coords need scale if image scales
|
|
203
|
-
boxes *= scale
|
|
204
|
-
boxes[[0, 2]] += x
|
|
205
|
-
boxes[[1, 3]] += y
|
|
206
|
-
for j, box in enumerate(boxes.T.tolist()):
|
|
207
|
-
cls = classes[j]
|
|
208
|
-
color = colors(cls)
|
|
209
|
-
cls = names[cls] if names else cls
|
|
210
|
-
if labels or conf[j] > 0.25: # 0.25 conf thresh
|
|
211
|
-
label = f"{cls}" if labels else f"{cls} {conf[j]:.1f}"
|
|
212
|
-
annotator.box_label(box, label, color=color)
|
|
213
|
-
annotator.im.save(fname) # save
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""):
|
|
217
|
-
"""Plots learning rate schedule for given optimizer and scheduler, saving plot to `save_dir`."""
|
|
218
|
-
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
|
|
219
|
-
y = []
|
|
220
|
-
for _ in range(epochs):
|
|
221
|
-
scheduler.step()
|
|
222
|
-
y.append(optimizer.param_groups[0]["lr"])
|
|
223
|
-
plt.plot(y, ".-", label="LR")
|
|
224
|
-
plt.xlabel("epoch")
|
|
225
|
-
plt.ylabel("LR")
|
|
226
|
-
plt.grid()
|
|
227
|
-
plt.xlim(0, epochs)
|
|
228
|
-
plt.ylim(0)
|
|
229
|
-
plt.savefig(Path(save_dir) / "LR.png", dpi=200)
|
|
230
|
-
plt.close()
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
def plot_val_txt():
|
|
234
|
-
"""
|
|
235
|
-
Plots 2D and 1D histograms of bounding box centers from 'val.txt' using matplotlib, saving as 'hist2d.png' and
|
|
236
|
-
'hist1d.png'.
|
|
237
|
-
|
|
238
|
-
Example: from utils.plots import *; plot_val()
|
|
239
|
-
"""
|
|
240
|
-
x = np.loadtxt("val.txt", dtype=np.float32)
|
|
241
|
-
box = xyxy2xywh(x[:, :4])
|
|
242
|
-
cx, cy = box[:, 0], box[:, 1]
|
|
243
|
-
|
|
244
|
-
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
|
|
245
|
-
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
|
|
246
|
-
ax.set_aspect("equal")
|
|
247
|
-
plt.savefig("hist2d.png", dpi=300)
|
|
248
|
-
|
|
249
|
-
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
|
|
250
|
-
ax[0].hist(cx, bins=600)
|
|
251
|
-
ax[1].hist(cy, bins=600)
|
|
252
|
-
plt.savefig("hist1d.png", dpi=200)
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
def plot_targets_txt():
|
|
256
|
-
"""
|
|
257
|
-
Plots histograms of object detection targets from 'targets.txt', saving the figure as 'targets.jpg'.
|
|
258
|
-
|
|
259
|
-
Example: from utils.plots import *; plot_targets_txt()
|
|
260
|
-
"""
|
|
261
|
-
x = np.loadtxt("targets.txt", dtype=np.float32).T
|
|
262
|
-
s = ["x targets", "y targets", "width targets", "height targets"]
|
|
263
|
-
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
|
264
|
-
ax = ax.ravel()
|
|
265
|
-
for i in range(4):
|
|
266
|
-
ax[i].hist(x[i], bins=100, label=f"{x[i].mean():.3g} +/- {x[i].std():.3g}")
|
|
267
|
-
ax[i].legend()
|
|
268
|
-
ax[i].set_title(s[i])
|
|
269
|
-
plt.savefig("targets.jpg", dpi=200)
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
def plot_val_study(file="", dir="", x=None):
|
|
273
|
-
"""
|
|
274
|
-
Plots validation study results from 'study*.txt' files in a directory or a specific file, comparing model
|
|
275
|
-
performance and speed.
|
|
276
|
-
|
|
277
|
-
Example: from utils.plots import *; plot_val_study()
|
|
278
|
-
"""
|
|
279
|
-
save_dir = Path(file).parent if file else Path(dir)
|
|
280
|
-
plot2 = False # plot additional results
|
|
281
|
-
if plot2:
|
|
282
|
-
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
|
|
283
|
-
|
|
284
|
-
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
|
|
285
|
-
# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
|
|
286
|
-
for f in sorted(save_dir.glob("study*.txt")):
|
|
287
|
-
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
|
|
288
|
-
x = np.arange(y.shape[1]) if x is None else np.array(x)
|
|
289
|
-
if plot2:
|
|
290
|
-
s = ["P", "R", "mAP@.5", "mAP@.5:.95", "t_preprocess (ms/img)", "t_inference (ms/img)", "t_NMS (ms/img)"]
|
|
291
|
-
for i in range(7):
|
|
292
|
-
ax[i].plot(x, y[i], ".-", linewidth=2, markersize=8)
|
|
293
|
-
ax[i].set_title(s[i])
|
|
294
|
-
|
|
295
|
-
j = y[3].argmax() + 1
|
|
296
|
-
ax2.plot(
|
|
297
|
-
y[5, 1:j],
|
|
298
|
-
y[3, 1:j] * 1e2,
|
|
299
|
-
".-",
|
|
300
|
-
linewidth=2,
|
|
301
|
-
markersize=8,
|
|
302
|
-
label=f.stem.replace("study_coco_", "").replace("yolo", "YOLO"),
|
|
303
|
-
)
|
|
304
|
-
|
|
305
|
-
ax2.plot(
|
|
306
|
-
1e3 / np.array([209, 140, 97, 58, 35, 18]),
|
|
307
|
-
[34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
|
|
308
|
-
"k.-",
|
|
309
|
-
linewidth=2,
|
|
310
|
-
markersize=8,
|
|
311
|
-
alpha=0.25,
|
|
312
|
-
label="EfficientDet",
|
|
313
|
-
)
|
|
314
|
-
|
|
315
|
-
ax2.grid(alpha=0.2)
|
|
316
|
-
ax2.set_yticks(np.arange(20, 60, 5))
|
|
317
|
-
ax2.set_xlim(0, 57)
|
|
318
|
-
ax2.set_ylim(25, 55)
|
|
319
|
-
ax2.set_xlabel("GPU Speed (ms/img)")
|
|
320
|
-
ax2.set_ylabel("COCO AP val")
|
|
321
|
-
ax2.legend(loc="lower right")
|
|
322
|
-
f = save_dir / "study.png"
|
|
323
|
-
print(f"Saving {f}...")
|
|
324
|
-
plt.savefig(f, dpi=300)
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
|
|
328
|
-
def plot_labels(labels, names=(), save_dir=Path("")):
|
|
329
|
-
"""Plots dataset labels, saving correlogram and label images, handles classes, and visualizes bounding boxes."""
|
|
330
|
-
LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
|
|
331
|
-
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
|
|
332
|
-
nc = int(c.max() + 1) # number of classes
|
|
333
|
-
x = pd.DataFrame(b.transpose(), columns=["x", "y", "width", "height"])
|
|
334
|
-
|
|
335
|
-
# seaborn correlogram
|
|
336
|
-
sn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
|
|
337
|
-
plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200)
|
|
338
|
-
plt.close()
|
|
339
|
-
|
|
340
|
-
# matplotlib labels
|
|
341
|
-
matplotlib.use("svg") # faster
|
|
342
|
-
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
|
|
343
|
-
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
|
|
344
|
-
with contextlib.suppress(Exception): # color histogram bars by class
|
|
345
|
-
[y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195
|
|
346
|
-
ax[0].set_ylabel("instances")
|
|
347
|
-
if 0 < len(names) < 30:
|
|
348
|
-
ax[0].set_xticks(range(len(names)))
|
|
349
|
-
ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
|
|
350
|
-
else:
|
|
351
|
-
ax[0].set_xlabel("classes")
|
|
352
|
-
sn.histplot(x, x="x", y="y", ax=ax[2], bins=50, pmax=0.9)
|
|
353
|
-
sn.histplot(x, x="width", y="height", ax=ax[3], bins=50, pmax=0.9)
|
|
354
|
-
|
|
355
|
-
# rectangles
|
|
356
|
-
labels[:, 1:3] = 0.5 # center
|
|
357
|
-
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
|
|
358
|
-
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
|
|
359
|
-
for cls, *box in labels[:1000]:
|
|
360
|
-
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
|
|
361
|
-
ax[1].imshow(img)
|
|
362
|
-
ax[1].axis("off")
|
|
363
|
-
|
|
364
|
-
for a in [0, 1, 2, 3]:
|
|
365
|
-
for s in ["top", "right", "left", "bottom"]:
|
|
366
|
-
ax[a].spines[s].set_visible(False)
|
|
367
|
-
|
|
368
|
-
plt.savefig(save_dir / "labels.jpg", dpi=200)
|
|
369
|
-
matplotlib.use("Agg")
|
|
370
|
-
plt.close()
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path("images.jpg")):
|
|
374
|
-
"""Displays a grid of images with optional labels and predictions, saving to a file."""
|
|
375
|
-
from utils.augmentations import denormalize
|
|
376
|
-
|
|
377
|
-
names = names or [f"class{i}" for i in range(1000)]
|
|
378
|
-
blocks = torch.chunk(
|
|
379
|
-
denormalize(im.clone()).cpu().float(), len(im), dim=0
|
|
380
|
-
) # select batch index 0, block by channels
|
|
381
|
-
n = min(len(blocks), nmax) # number of plots
|
|
382
|
-
m = min(8, round(n**0.5)) # 8 x 8 default
|
|
383
|
-
fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols
|
|
384
|
-
ax = ax.ravel() if m > 1 else [ax]
|
|
385
|
-
# plt.subplots_adjust(wspace=0.05, hspace=0.05)
|
|
386
|
-
for i in range(n):
|
|
387
|
-
ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0))
|
|
388
|
-
ax[i].axis("off")
|
|
389
|
-
if labels is not None:
|
|
390
|
-
s = names[labels[i]] + (f"—{names[pred[i]]}" if pred is not None else "")
|
|
391
|
-
ax[i].set_title(s, fontsize=8, verticalalignment="top")
|
|
392
|
-
plt.savefig(f, dpi=300, bbox_inches="tight")
|
|
393
|
-
plt.close()
|
|
394
|
-
if verbose:
|
|
395
|
-
LOGGER.info(f"Saving {f}")
|
|
396
|
-
if labels is not None:
|
|
397
|
-
LOGGER.info("True: " + " ".join(f"{names[i]:3s}" for i in labels[:nmax]))
|
|
398
|
-
if pred is not None:
|
|
399
|
-
LOGGER.info("Predicted:" + " ".join(f"{names[i]:3s}" for i in pred[:nmax]))
|
|
400
|
-
return f
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
def plot_evolve(evolve_csv="path/to/evolve.csv"):
|
|
404
|
-
"""
|
|
405
|
-
Plots hyperparameter evolution results from a given CSV, saving the plot and displaying best results.
|
|
406
|
-
|
|
407
|
-
Example: from utils.plots import *; plot_evolve()
|
|
408
|
-
"""
|
|
409
|
-
evolve_csv = Path(evolve_csv)
|
|
410
|
-
data = pd.read_csv(evolve_csv)
|
|
411
|
-
keys = [x.strip() for x in data.columns]
|
|
412
|
-
x = data.values
|
|
413
|
-
f = fitness(x)
|
|
414
|
-
j = np.argmax(f) # max fitness index
|
|
415
|
-
plt.figure(figsize=(10, 12), tight_layout=True)
|
|
416
|
-
matplotlib.rc("font", **{"size": 8})
|
|
417
|
-
print(f"Best results from row {j} of {evolve_csv}:")
|
|
418
|
-
for i, k in enumerate(keys[7:]):
|
|
419
|
-
v = x[:, 7 + i]
|
|
420
|
-
mu = v[j] # best single result
|
|
421
|
-
plt.subplot(6, 5, i + 1)
|
|
422
|
-
plt.scatter(v, f, c=hist2d(v, f, 20), cmap="viridis", alpha=0.8, edgecolors="none")
|
|
423
|
-
plt.plot(mu, f.max(), "k+", markersize=15)
|
|
424
|
-
plt.title(f"{k} = {mu:.3g}", fontdict={"size": 9}) # limit to 40 characters
|
|
425
|
-
if i % 5 != 0:
|
|
426
|
-
plt.yticks([])
|
|
427
|
-
print(f"{k:>15}: {mu:.3g}")
|
|
428
|
-
f = evolve_csv.with_suffix(".png") # filename
|
|
429
|
-
plt.savefig(f, dpi=200)
|
|
430
|
-
plt.close()
|
|
431
|
-
print(f"Saved {f}")
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
def plot_results(file="path/to/results.csv", dir=""):
|
|
435
|
-
"""
|
|
436
|
-
Plots training results from a 'results.csv' file; accepts file path and directory as arguments.
|
|
437
|
-
|
|
438
|
-
Example: from utils.plots import *; plot_results('path/to/results.csv')
|
|
439
|
-
"""
|
|
440
|
-
save_dir = Path(file).parent if file else Path(dir)
|
|
441
|
-
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
|
442
|
-
ax = ax.ravel()
|
|
443
|
-
files = list(save_dir.glob("results*.csv"))
|
|
444
|
-
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
|
|
445
|
-
for f in files:
|
|
446
|
-
try:
|
|
447
|
-
data = pd.read_csv(f)
|
|
448
|
-
s = [x.strip() for x in data.columns]
|
|
449
|
-
x = data.values[:, 0]
|
|
450
|
-
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
|
|
451
|
-
y = data.values[:, j].astype("float")
|
|
452
|
-
# y[y == 0] = np.nan # don't show zero values
|
|
453
|
-
ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results
|
|
454
|
-
ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line
|
|
455
|
-
ax[i].set_title(s[j], fontsize=12)
|
|
456
|
-
# if j in [8, 9, 10]: # share train and val loss y axes
|
|
457
|
-
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
|
458
|
-
except Exception as e:
|
|
459
|
-
LOGGER.info(f"Warning: Plotting error for {f}: {e}")
|
|
460
|
-
ax[1].legend()
|
|
461
|
-
fig.savefig(save_dir / "results.png", dpi=200)
|
|
462
|
-
plt.close()
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
def profile_idetection(start=0, stop=0, labels=(), save_dir=""):
|
|
466
|
-
"""
|
|
467
|
-
Plots per-image iDetection logs, comparing metrics like storage and performance over time.
|
|
468
|
-
|
|
469
|
-
Example: from utils.plots import *; profile_idetection()
|
|
470
|
-
"""
|
|
471
|
-
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
|
|
472
|
-
s = ["Images", "Free Storage (GB)", "RAM Usage (GB)", "Battery", "dt_raw (ms)", "dt_smooth (ms)", "real-world FPS"]
|
|
473
|
-
files = list(Path(save_dir).glob("frames*.txt"))
|
|
474
|
-
for fi, f in enumerate(files):
|
|
475
|
-
try:
|
|
476
|
-
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
|
|
477
|
-
n = results.shape[1] # number of rows
|
|
478
|
-
x = np.arange(start, min(stop, n) if stop else n)
|
|
479
|
-
results = results[:, x]
|
|
480
|
-
t = results[0] - results[0].min() # set t0=0s
|
|
481
|
-
results[0] = x
|
|
482
|
-
for i, a in enumerate(ax):
|
|
483
|
-
if i < len(results):
|
|
484
|
-
label = labels[fi] if len(labels) else f.stem.replace("frames_", "")
|
|
485
|
-
a.plot(t, results[i], marker=".", label=label, linewidth=1, markersize=5)
|
|
486
|
-
a.set_title(s[i])
|
|
487
|
-
a.set_xlabel("time (s)")
|
|
488
|
-
# if fi == len(files) - 1:
|
|
489
|
-
# a.set_ylim(bottom=0)
|
|
490
|
-
for side in ["top", "right"]:
|
|
491
|
-
a.spines[side].set_visible(False)
|
|
492
|
-
else:
|
|
493
|
-
a.remove()
|
|
494
|
-
except Exception as e:
|
|
495
|
-
print(f"Warning: Plotting error for {f}; {e}")
|
|
496
|
-
ax[1].legend()
|
|
497
|
-
plt.savefig(Path(save_dir) / "idetection_profile.png", dpi=200)
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True):
|
|
501
|
-
"""Crops and saves an image from bounding box `xyxy`, applied with `gain` and `pad`, optionally squares and adjusts
|
|
502
|
-
for BGR.
|
|
503
|
-
"""
|
|
504
|
-
xyxy = torch.tensor(xyxy).view(-1, 4)
|
|
505
|
-
b = xyxy2xywh(xyxy) # boxes
|
|
506
|
-
if square:
|
|
507
|
-
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
|
|
508
|
-
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
|
|
509
|
-
xyxy = xywh2xyxy(b).long()
|
|
510
|
-
clip_boxes(xyxy, im.shape)
|
|
511
|
-
crop = im[int(xyxy[0, 1]) : int(xyxy[0, 3]), int(xyxy[0, 0]) : int(xyxy[0, 2]), :: (1 if BGR else -1)]
|
|
512
|
-
if save:
|
|
513
|
-
file.parent.mkdir(parents=True, exist_ok=True) # make directory
|
|
514
|
-
f = str(increment_path(file).with_suffix(".jpg"))
|
|
515
|
-
# cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue
|
|
516
|
-
Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB
|
|
517
|
-
return crop
|
|
File without changes
|
|
@@ -1,100 +0,0 @@
|
|
|
1
|
-
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
|
|
2
|
-
"""Image augmentation functions."""
|
|
3
|
-
|
|
4
|
-
import math
|
|
5
|
-
import random
|
|
6
|
-
|
|
7
|
-
import cv2
|
|
8
|
-
import numpy as np
|
|
9
|
-
|
|
10
|
-
from ..augmentations import box_candidates
|
|
11
|
-
from ..general import resample_segments, segment2box
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def mixup(im, labels, segments, im2, labels2, segments2):
|
|
15
|
-
"""
|
|
16
|
-
Applies MixUp augmentation blending two images, labels, and segments with a random ratio.
|
|
17
|
-
|
|
18
|
-
See https://arxiv.org/pdf/1710.09412.pdf
|
|
19
|
-
"""
|
|
20
|
-
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
|
|
21
|
-
im = (im * r + im2 * (1 - r)).astype(np.uint8)
|
|
22
|
-
labels = np.concatenate((labels, labels2), 0)
|
|
23
|
-
segments = np.concatenate((segments, segments2), 0)
|
|
24
|
-
return im, labels, segments
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def random_perspective(
|
|
28
|
-
im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0)
|
|
29
|
-
):
|
|
30
|
-
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
|
|
31
|
-
# targets = [cls, xyxy]
|
|
32
|
-
"""Applies random perspective, rotation, scale, shear, and translation augmentations to an image and targets."""
|
|
33
|
-
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
|
|
34
|
-
width = im.shape[1] + border[1] * 2
|
|
35
|
-
|
|
36
|
-
# Center
|
|
37
|
-
C = np.eye(3)
|
|
38
|
-
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
|
|
39
|
-
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
|
|
40
|
-
|
|
41
|
-
# Perspective
|
|
42
|
-
P = np.eye(3)
|
|
43
|
-
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
|
|
44
|
-
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
|
|
45
|
-
|
|
46
|
-
# Rotation and Scale
|
|
47
|
-
R = np.eye(3)
|
|
48
|
-
a = random.uniform(-degrees, degrees)
|
|
49
|
-
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
|
50
|
-
s = random.uniform(1 - scale, 1 + scale)
|
|
51
|
-
# s = 2 ** random.uniform(-scale, scale)
|
|
52
|
-
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
|
|
53
|
-
|
|
54
|
-
# Shear
|
|
55
|
-
S = np.eye(3)
|
|
56
|
-
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
|
|
57
|
-
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
|
|
58
|
-
|
|
59
|
-
# Translation
|
|
60
|
-
T = np.eye(3)
|
|
61
|
-
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
|
|
62
|
-
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
|
|
63
|
-
|
|
64
|
-
# Combined rotation matrix
|
|
65
|
-
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
|
|
66
|
-
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
|
|
67
|
-
if perspective:
|
|
68
|
-
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
|
|
69
|
-
else: # affine
|
|
70
|
-
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
|
|
71
|
-
|
|
72
|
-
# Visualize
|
|
73
|
-
# import matplotlib.pyplot as plt
|
|
74
|
-
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
|
|
75
|
-
# ax[0].imshow(im[:, :, ::-1]) # base
|
|
76
|
-
# ax[1].imshow(im2[:, :, ::-1]) # warped
|
|
77
|
-
|
|
78
|
-
# Transform label coordinates
|
|
79
|
-
n = len(targets)
|
|
80
|
-
new_segments = []
|
|
81
|
-
if n:
|
|
82
|
-
new = np.zeros((n, 4))
|
|
83
|
-
segments = resample_segments(segments) # upsample
|
|
84
|
-
for i, segment in enumerate(segments):
|
|
85
|
-
xy = np.ones((len(segment), 3))
|
|
86
|
-
xy[:, :2] = segment
|
|
87
|
-
xy = xy @ M.T # transform
|
|
88
|
-
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
|
|
89
|
-
|
|
90
|
-
# clip
|
|
91
|
-
new[i] = segment2box(xy, width, height)
|
|
92
|
-
new_segments.append(xy)
|
|
93
|
-
|
|
94
|
-
# filter candidates
|
|
95
|
-
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
|
|
96
|
-
targets = targets[i]
|
|
97
|
-
targets[:, 1:5] = new[i]
|
|
98
|
-
new_segments = np.array(new_segments)[i]
|
|
99
|
-
|
|
100
|
-
return im, targets, new_segments
|