ultralytics 8.0.64__py3-none-any.whl → 8.0.66__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- ultralytics/__init__.py +1 -1
- ultralytics/datasets/coco-pose.yaml +38 -0
- ultralytics/datasets/coco8-pose.yaml +25 -0
- ultralytics/models/v8/yolov8-pose-p6.yaml +57 -0
- ultralytics/models/v8/yolov8-pose.yaml +47 -0
- ultralytics/nn/autobackend.py +7 -2
- ultralytics/nn/modules.py +33 -2
- ultralytics/nn/tasks.py +24 -7
- ultralytics/tracker/track.py +2 -3
- ultralytics/yolo/cfg/__init__.py +4 -4
- ultralytics/yolo/cfg/default.yaml +2 -0
- ultralytics/yolo/data/augment.py +24 -19
- ultralytics/yolo/data/build.py +4 -4
- ultralytics/yolo/data/dataset.py +9 -3
- ultralytics/yolo/data/utils.py +110 -34
- ultralytics/yolo/engine/exporter.py +9 -7
- ultralytics/yolo/engine/model.py +5 -4
- ultralytics/yolo/engine/predictor.py +1 -0
- ultralytics/yolo/engine/results.py +70 -56
- ultralytics/yolo/utils/benchmarks.py +4 -2
- ultralytics/yolo/utils/downloads.py +3 -3
- ultralytics/yolo/utils/instance.py +1 -1
- ultralytics/yolo/utils/loss.py +14 -0
- ultralytics/yolo/utils/metrics.py +111 -13
- ultralytics/yolo/utils/ops.py +30 -50
- ultralytics/yolo/utils/plotting.py +79 -4
- ultralytics/yolo/utils/torch_utils.py +11 -9
- ultralytics/yolo/v8/__init__.py +2 -2
- ultralytics/yolo/v8/detect/train.py +1 -1
- ultralytics/yolo/v8/detect/val.py +2 -2
- ultralytics/yolo/v8/pose/__init__.py +7 -0
- ultralytics/yolo/v8/pose/predict.py +103 -0
- ultralytics/yolo/v8/pose/train.py +170 -0
- ultralytics/yolo/v8/pose/val.py +213 -0
- ultralytics/yolo/v8/segment/val.py +3 -4
- {ultralytics-8.0.64.dist-info → ultralytics-8.0.66.dist-info}/METADATA +27 -2
- {ultralytics-8.0.64.dist-info → ultralytics-8.0.66.dist-info}/RECORD +41 -33
- {ultralytics-8.0.64.dist-info → ultralytics-8.0.66.dist-info}/LICENSE +0 -0
- {ultralytics-8.0.64.dist-info → ultralytics-8.0.66.dist-info}/WHEEL +0 -0
- {ultralytics-8.0.64.dist-info → ultralytics-8.0.66.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.0.64.dist-info → ultralytics-8.0.66.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# Ultralytics YOLO 🚀, GPL-3.0 license
|
|
2
|
+
|
|
3
|
+
from copy import copy
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
import torch.nn as nn
|
|
7
|
+
|
|
8
|
+
from ultralytics.nn.tasks import PoseModel
|
|
9
|
+
from ultralytics.yolo import v8
|
|
10
|
+
from ultralytics.yolo.utils import DEFAULT_CFG
|
|
11
|
+
from ultralytics.yolo.utils.loss import KeypointLoss
|
|
12
|
+
from ultralytics.yolo.utils.metrics import OKS_SIGMA
|
|
13
|
+
from ultralytics.yolo.utils.ops import xyxy2xywh
|
|
14
|
+
from ultralytics.yolo.utils.plotting import plot_images, plot_results
|
|
15
|
+
from ultralytics.yolo.utils.tal import make_anchors
|
|
16
|
+
from ultralytics.yolo.utils.torch_utils import de_parallel
|
|
17
|
+
from ultralytics.yolo.v8.detect.train import Loss
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# BaseTrainer python usage
|
|
21
|
+
class PoseTrainer(v8.detect.DetectionTrainer):
|
|
22
|
+
|
|
23
|
+
def __init__(self, cfg=DEFAULT_CFG, overrides=None):
|
|
24
|
+
if overrides is None:
|
|
25
|
+
overrides = {}
|
|
26
|
+
overrides['task'] = 'pose'
|
|
27
|
+
super().__init__(cfg, overrides)
|
|
28
|
+
|
|
29
|
+
def get_model(self, cfg=None, weights=None, verbose=True):
|
|
30
|
+
model = PoseModel(cfg, ch=3, nc=self.data['nc'], data_kpt_shape=self.data['kpt_shape'], verbose=verbose)
|
|
31
|
+
if weights:
|
|
32
|
+
model.load(weights)
|
|
33
|
+
|
|
34
|
+
return model
|
|
35
|
+
|
|
36
|
+
def set_model_attributes(self):
|
|
37
|
+
super().set_model_attributes()
|
|
38
|
+
self.model.kpt_shape = self.data['kpt_shape']
|
|
39
|
+
|
|
40
|
+
def get_validator(self):
|
|
41
|
+
self.loss_names = 'box_loss', 'pose_loss', 'kobj_loss', 'cls_loss', 'dfl_loss'
|
|
42
|
+
return v8.pose.PoseValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
|
|
43
|
+
|
|
44
|
+
def criterion(self, preds, batch):
|
|
45
|
+
if not hasattr(self, 'compute_loss'):
|
|
46
|
+
self.compute_loss = PoseLoss(de_parallel(self.model))
|
|
47
|
+
return self.compute_loss(preds, batch)
|
|
48
|
+
|
|
49
|
+
def plot_training_samples(self, batch, ni):
|
|
50
|
+
images = batch['img']
|
|
51
|
+
kpts = batch['keypoints']
|
|
52
|
+
cls = batch['cls'].squeeze(-1)
|
|
53
|
+
bboxes = batch['bboxes']
|
|
54
|
+
paths = batch['im_file']
|
|
55
|
+
batch_idx = batch['batch_idx']
|
|
56
|
+
plot_images(images,
|
|
57
|
+
batch_idx,
|
|
58
|
+
cls,
|
|
59
|
+
bboxes,
|
|
60
|
+
kpts=kpts,
|
|
61
|
+
paths=paths,
|
|
62
|
+
fname=self.save_dir / f'train_batch{ni}.jpg')
|
|
63
|
+
|
|
64
|
+
def plot_metrics(self):
|
|
65
|
+
plot_results(file=self.csv, pose=True) # save results.png
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# Criterion class for computing training losses
|
|
69
|
+
class PoseLoss(Loss):
|
|
70
|
+
|
|
71
|
+
def __init__(self, model): # model must be de-paralleled
|
|
72
|
+
super().__init__(model)
|
|
73
|
+
self.kpt_shape = model.model[-1].kpt_shape
|
|
74
|
+
self.bce_pose = nn.BCEWithLogitsLoss()
|
|
75
|
+
is_pose = self.kpt_shape == [17, 3]
|
|
76
|
+
nkpt = self.kpt_shape[0] # number of keypoints
|
|
77
|
+
sigmas = torch.from_numpy(OKS_SIGMA).to(self.device) if is_pose else torch.ones(nkpt, device=self.device) / nkpt
|
|
78
|
+
self.keypoint_loss = KeypointLoss(sigmas=sigmas)
|
|
79
|
+
|
|
80
|
+
def __call__(self, preds, batch):
|
|
81
|
+
loss = torch.zeros(5, device=self.device) # box, cls, dfl, kpt_location, kpt_visibility
|
|
82
|
+
feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1]
|
|
83
|
+
pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
|
|
84
|
+
(self.reg_max * 4, self.nc), 1)
|
|
85
|
+
|
|
86
|
+
# b, grids, ..
|
|
87
|
+
pred_scores = pred_scores.permute(0, 2, 1).contiguous()
|
|
88
|
+
pred_distri = pred_distri.permute(0, 2, 1).contiguous()
|
|
89
|
+
pred_kpts = pred_kpts.permute(0, 2, 1).contiguous()
|
|
90
|
+
|
|
91
|
+
dtype = pred_scores.dtype
|
|
92
|
+
imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
|
|
93
|
+
anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
|
|
94
|
+
|
|
95
|
+
# targets
|
|
96
|
+
batch_size = pred_scores.shape[0]
|
|
97
|
+
batch_idx = batch['batch_idx'].view(-1, 1)
|
|
98
|
+
targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
|
|
99
|
+
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
|
100
|
+
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
|
101
|
+
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
|
102
|
+
|
|
103
|
+
# pboxes
|
|
104
|
+
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
|
105
|
+
pred_kpts = self.kpts_decode(anchor_points, pred_kpts.view(batch_size, -1, *self.kpt_shape)) # (b, h*w, 17, 3)
|
|
106
|
+
|
|
107
|
+
_, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner(
|
|
108
|
+
pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype),
|
|
109
|
+
anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt)
|
|
110
|
+
|
|
111
|
+
target_scores_sum = max(target_scores.sum(), 1)
|
|
112
|
+
|
|
113
|
+
# cls loss
|
|
114
|
+
# loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
|
|
115
|
+
loss[3] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
|
|
116
|
+
|
|
117
|
+
# bbox loss
|
|
118
|
+
if fg_mask.sum():
|
|
119
|
+
target_bboxes /= stride_tensor
|
|
120
|
+
loss[0], loss[4] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores,
|
|
121
|
+
target_scores_sum, fg_mask)
|
|
122
|
+
keypoints = batch['keypoints'].to(self.device).float().clone()
|
|
123
|
+
keypoints[..., 0] *= imgsz[1]
|
|
124
|
+
keypoints[..., 1] *= imgsz[0]
|
|
125
|
+
for i in range(batch_size):
|
|
126
|
+
if fg_mask[i].sum():
|
|
127
|
+
idx = target_gt_idx[i][fg_mask[i]]
|
|
128
|
+
gt_kpt = keypoints[batch_idx.view(-1) == i][idx] # (n, 51)
|
|
129
|
+
gt_kpt[..., 0] /= stride_tensor[fg_mask[i]]
|
|
130
|
+
gt_kpt[..., 1] /= stride_tensor[fg_mask[i]]
|
|
131
|
+
area = xyxy2xywh(target_bboxes[i][fg_mask[i]])[:, 2:].prod(1, keepdim=True)
|
|
132
|
+
pred_kpt = pred_kpts[i][fg_mask[i]]
|
|
133
|
+
kpt_mask = gt_kpt[..., 2] != 0
|
|
134
|
+
loss[1] += self.keypoint_loss(pred_kpt, gt_kpt, kpt_mask, area) # pose loss
|
|
135
|
+
# kpt_score loss
|
|
136
|
+
if pred_kpt.shape[-1] == 3:
|
|
137
|
+
loss[2] += self.bce_pose(pred_kpt[..., 2], kpt_mask.float()) # keypoint obj loss
|
|
138
|
+
|
|
139
|
+
loss[0] *= self.hyp.box # box gain
|
|
140
|
+
loss[1] *= self.hyp.pose / batch_size # pose gain
|
|
141
|
+
loss[2] *= self.hyp.kobj / batch_size # kobj gain
|
|
142
|
+
loss[3] *= self.hyp.cls # cls gain
|
|
143
|
+
loss[4] *= self.hyp.dfl # dfl gain
|
|
144
|
+
|
|
145
|
+
return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl)
|
|
146
|
+
|
|
147
|
+
def kpts_decode(self, anchor_points, pred_kpts):
|
|
148
|
+
y = pred_kpts.clone()
|
|
149
|
+
y[..., :2] *= 2.0
|
|
150
|
+
y[..., 0] += anchor_points[:, [0]] - 0.5
|
|
151
|
+
y[..., 1] += anchor_points[:, [1]] - 0.5
|
|
152
|
+
return y
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def train(cfg=DEFAULT_CFG, use_python=False):
|
|
156
|
+
model = cfg.model or 'yolov8n-pose.yaml'
|
|
157
|
+
data = cfg.data or 'coco8-pose.yaml'
|
|
158
|
+
device = cfg.device if cfg.device is not None else ''
|
|
159
|
+
|
|
160
|
+
args = dict(model=model, data=data, device=device)
|
|
161
|
+
if use_python:
|
|
162
|
+
from ultralytics import YOLO
|
|
163
|
+
YOLO(model).train(**args)
|
|
164
|
+
else:
|
|
165
|
+
trainer = PoseTrainer(overrides=args)
|
|
166
|
+
trainer.train()
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
if __name__ == '__main__':
|
|
170
|
+
train()
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
# Ultralytics YOLO 🚀, GPL-3.0 license
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, ops
|
|
9
|
+
from ultralytics.yolo.utils.checks import check_requirements
|
|
10
|
+
from ultralytics.yolo.utils.metrics import OKS_SIGMA, PoseMetrics, box_iou, kpt_iou
|
|
11
|
+
from ultralytics.yolo.utils.plotting import output_to_target, plot_images
|
|
12
|
+
from ultralytics.yolo.v8.detect import DetectionValidator
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PoseValidator(DetectionValidator):
|
|
16
|
+
|
|
17
|
+
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None):
|
|
18
|
+
super().__init__(dataloader, save_dir, pbar, args)
|
|
19
|
+
self.args.task = 'pose'
|
|
20
|
+
self.metrics = PoseMetrics(save_dir=self.save_dir)
|
|
21
|
+
|
|
22
|
+
def preprocess(self, batch):
|
|
23
|
+
batch = super().preprocess(batch)
|
|
24
|
+
batch['keypoints'] = batch['keypoints'].to(self.device).float()
|
|
25
|
+
return batch
|
|
26
|
+
|
|
27
|
+
def get_desc(self):
|
|
28
|
+
return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Pose(P',
|
|
29
|
+
'R', 'mAP50', 'mAP50-95)')
|
|
30
|
+
|
|
31
|
+
def postprocess(self, preds):
|
|
32
|
+
preds = ops.non_max_suppression(preds,
|
|
33
|
+
self.args.conf,
|
|
34
|
+
self.args.iou,
|
|
35
|
+
labels=self.lb,
|
|
36
|
+
multi_label=True,
|
|
37
|
+
agnostic=self.args.single_cls,
|
|
38
|
+
max_det=self.args.max_det,
|
|
39
|
+
nc=self.nc)
|
|
40
|
+
return preds
|
|
41
|
+
|
|
42
|
+
def init_metrics(self, model):
|
|
43
|
+
super().init_metrics(model)
|
|
44
|
+
self.kpt_shape = self.data['kpt_shape']
|
|
45
|
+
is_pose = self.kpt_shape == [17, 3]
|
|
46
|
+
nkpt = self.kpt_shape[0]
|
|
47
|
+
self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt
|
|
48
|
+
|
|
49
|
+
def update_metrics(self, preds, batch):
|
|
50
|
+
# Metrics
|
|
51
|
+
for si, pred in enumerate(preds):
|
|
52
|
+
idx = batch['batch_idx'] == si
|
|
53
|
+
cls = batch['cls'][idx]
|
|
54
|
+
bbox = batch['bboxes'][idx]
|
|
55
|
+
kpts = batch['keypoints'][idx]
|
|
56
|
+
nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions
|
|
57
|
+
nk = kpts.shape[1] # number of keypoints
|
|
58
|
+
shape = batch['ori_shape'][si]
|
|
59
|
+
correct_kpts = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init
|
|
60
|
+
correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init
|
|
61
|
+
self.seen += 1
|
|
62
|
+
|
|
63
|
+
if npr == 0:
|
|
64
|
+
if nl:
|
|
65
|
+
self.stats.append((correct_bboxes, correct_kpts, *torch.zeros(
|
|
66
|
+
(2, 0), device=self.device), cls.squeeze(-1)))
|
|
67
|
+
if self.args.plots:
|
|
68
|
+
self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1))
|
|
69
|
+
continue
|
|
70
|
+
|
|
71
|
+
# Predictions
|
|
72
|
+
if self.args.single_cls:
|
|
73
|
+
pred[:, 5] = 0
|
|
74
|
+
predn = pred.clone()
|
|
75
|
+
ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape,
|
|
76
|
+
ratio_pad=batch['ratio_pad'][si]) # native-space pred
|
|
77
|
+
pred_kpts = predn[:, 6:].view(npr, nk, -1)
|
|
78
|
+
ops.scale_coords(batch['img'][si].shape[1:], pred_kpts, shape, ratio_pad=batch['ratio_pad'][si])
|
|
79
|
+
|
|
80
|
+
# Evaluate
|
|
81
|
+
if nl:
|
|
82
|
+
height, width = batch['img'].shape[2:]
|
|
83
|
+
tbox = ops.xywh2xyxy(bbox) * torch.tensor(
|
|
84
|
+
(width, height, width, height), device=self.device) # target boxes
|
|
85
|
+
ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape,
|
|
86
|
+
ratio_pad=batch['ratio_pad'][si]) # native-space labels
|
|
87
|
+
tkpts = kpts.clone()
|
|
88
|
+
tkpts[..., 0] *= width
|
|
89
|
+
tkpts[..., 1] *= height
|
|
90
|
+
tkpts = ops.scale_coords(batch['img'][si].shape[1:], tkpts, shape, ratio_pad=batch['ratio_pad'][si])
|
|
91
|
+
labelsn = torch.cat((cls, tbox), 1) # native-space labels
|
|
92
|
+
correct_bboxes = self._process_batch(predn[:, :6], labelsn)
|
|
93
|
+
correct_kpts = self._process_batch(predn[:, :6], labelsn, pred_kpts, tkpts)
|
|
94
|
+
if self.args.plots:
|
|
95
|
+
self.confusion_matrix.process_batch(predn, labelsn)
|
|
96
|
+
|
|
97
|
+
# Append correct_masks, correct_boxes, pconf, pcls, tcls
|
|
98
|
+
self.stats.append((correct_bboxes, correct_kpts, pred[:, 4], pred[:, 5], cls.squeeze(-1)))
|
|
99
|
+
|
|
100
|
+
# Save
|
|
101
|
+
if self.args.save_json:
|
|
102
|
+
self.pred_to_json(predn, batch['im_file'][si])
|
|
103
|
+
# if self.args.save_txt:
|
|
104
|
+
# save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
|
|
105
|
+
|
|
106
|
+
def _process_batch(self, detections, labels, pred_kpts=None, gt_kpts=None):
|
|
107
|
+
"""
|
|
108
|
+
Return correct prediction matrix
|
|
109
|
+
Arguments:
|
|
110
|
+
detections (array[N, 6]), x1, y1, x2, y2, conf, class
|
|
111
|
+
labels (array[M, 5]), class, x1, y1, x2, y2
|
|
112
|
+
pred_kpts (array[N, 51]), 51 = 17 * 3
|
|
113
|
+
gt_kpts (array[N, 51])
|
|
114
|
+
Returns:
|
|
115
|
+
correct (array[N, 10]), for 10 IoU levels
|
|
116
|
+
"""
|
|
117
|
+
if pred_kpts is not None and gt_kpts is not None:
|
|
118
|
+
# `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384
|
|
119
|
+
area = ops.xyxy2xywh(labels[:, 1:])[:, 2:].prod(1) * 0.53
|
|
120
|
+
iou = kpt_iou(gt_kpts, pred_kpts, sigma=self.sigma, area=area)
|
|
121
|
+
else: # boxes
|
|
122
|
+
iou = box_iou(labels[:, 1:], detections[:, :4])
|
|
123
|
+
|
|
124
|
+
correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool)
|
|
125
|
+
correct_class = labels[:, 0:1] == detections[:, 5]
|
|
126
|
+
for i in range(len(self.iouv)):
|
|
127
|
+
x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match
|
|
128
|
+
if x[0].shape[0]:
|
|
129
|
+
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]),
|
|
130
|
+
1).cpu().numpy() # [label, detect, iou]
|
|
131
|
+
if x[0].shape[0] > 1:
|
|
132
|
+
matches = matches[matches[:, 2].argsort()[::-1]]
|
|
133
|
+
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
|
134
|
+
# matches = matches[matches[:, 2].argsort()[::-1]]
|
|
135
|
+
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
|
136
|
+
correct[matches[:, 1].astype(int), i] = True
|
|
137
|
+
return torch.tensor(correct, dtype=torch.bool, device=detections.device)
|
|
138
|
+
|
|
139
|
+
def plot_val_samples(self, batch, ni):
|
|
140
|
+
plot_images(batch['img'],
|
|
141
|
+
batch['batch_idx'],
|
|
142
|
+
batch['cls'].squeeze(-1),
|
|
143
|
+
batch['bboxes'],
|
|
144
|
+
kpts=batch['keypoints'],
|
|
145
|
+
paths=batch['im_file'],
|
|
146
|
+
fname=self.save_dir / f'val_batch{ni}_labels.jpg',
|
|
147
|
+
names=self.names)
|
|
148
|
+
|
|
149
|
+
def plot_predictions(self, batch, preds, ni):
|
|
150
|
+
pred_kpts = torch.cat([p[:, 6:].view(-1, *self.kpt_shape)[:15] for p in preds], 0)
|
|
151
|
+
plot_images(batch['img'],
|
|
152
|
+
*output_to_target(preds, max_det=15),
|
|
153
|
+
kpts=pred_kpts,
|
|
154
|
+
paths=batch['im_file'],
|
|
155
|
+
fname=self.save_dir / f'val_batch{ni}_pred.jpg',
|
|
156
|
+
names=self.names) # pred
|
|
157
|
+
|
|
158
|
+
def pred_to_json(self, predn, filename):
|
|
159
|
+
stem = Path(filename).stem
|
|
160
|
+
image_id = int(stem) if stem.isnumeric() else stem
|
|
161
|
+
box = ops.xyxy2xywh(predn[:, :4]) # xywh
|
|
162
|
+
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
|
163
|
+
for p, b in zip(predn.tolist(), box.tolist()):
|
|
164
|
+
self.jdict.append({
|
|
165
|
+
'image_id': image_id,
|
|
166
|
+
'category_id': self.class_map[int(p[5])],
|
|
167
|
+
'bbox': [round(x, 3) for x in b],
|
|
168
|
+
'keypoints': p[6:],
|
|
169
|
+
'score': round(p[4], 5)})
|
|
170
|
+
|
|
171
|
+
def eval_json(self, stats):
|
|
172
|
+
if self.args.save_json and self.is_coco and len(self.jdict):
|
|
173
|
+
anno_json = self.data['path'] / 'annotations/person_keypoints_val2017.json' # annotations
|
|
174
|
+
pred_json = self.save_dir / 'predictions.json' # predictions
|
|
175
|
+
LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
|
|
176
|
+
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
|
177
|
+
check_requirements('pycocotools>=2.0.6')
|
|
178
|
+
from pycocotools.coco import COCO # noqa
|
|
179
|
+
from pycocotools.cocoeval import COCOeval # noqa
|
|
180
|
+
|
|
181
|
+
for x in anno_json, pred_json:
|
|
182
|
+
assert x.is_file(), f'{x} file not found'
|
|
183
|
+
anno = COCO(str(anno_json)) # init annotations api
|
|
184
|
+
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
|
|
185
|
+
for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'keypoints')]):
|
|
186
|
+
if self.is_coco:
|
|
187
|
+
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
|
|
188
|
+
eval.evaluate()
|
|
189
|
+
eval.accumulate()
|
|
190
|
+
eval.summarize()
|
|
191
|
+
idx = i * 4 + 2
|
|
192
|
+
stats[self.metrics.keys[idx + 1]], stats[
|
|
193
|
+
self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50
|
|
194
|
+
except Exception as e:
|
|
195
|
+
LOGGER.warning(f'pycocotools unable to run: {e}')
|
|
196
|
+
return stats
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def val(cfg=DEFAULT_CFG, use_python=False):
|
|
200
|
+
model = cfg.model or 'yolov8n-pose.pt'
|
|
201
|
+
data = cfg.data or 'coco128-pose.yaml'
|
|
202
|
+
|
|
203
|
+
args = dict(model=model, data=data)
|
|
204
|
+
if use_python:
|
|
205
|
+
from ultralytics import YOLO
|
|
206
|
+
YOLO(model).val(**args)
|
|
207
|
+
else:
|
|
208
|
+
validator = PoseValidator(args=args)
|
|
209
|
+
validator(model=args['model'])
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
if __name__ == '__main__':
|
|
213
|
+
val()
|
|
@@ -65,7 +65,7 @@ class SegmentationValidator(DetectionValidator):
|
|
|
65
65
|
|
|
66
66
|
if npr == 0:
|
|
67
67
|
if nl:
|
|
68
|
-
self.stats.append((
|
|
68
|
+
self.stats.append((correct_bboxes, correct_masks, *torch.zeros(
|
|
69
69
|
(2, 0), device=self.device), cls.squeeze(-1)))
|
|
70
70
|
if self.args.plots:
|
|
71
71
|
self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1))
|
|
@@ -103,7 +103,7 @@ class SegmentationValidator(DetectionValidator):
|
|
|
103
103
|
self.confusion_matrix.process_batch(predn, labelsn)
|
|
104
104
|
|
|
105
105
|
# Append correct_masks, correct_boxes, pconf, pcls, tcls
|
|
106
|
-
self.stats.append((
|
|
106
|
+
self.stats.append((correct_bboxes, correct_masks, pred[:, 4], pred[:, 5], cls.squeeze(-1)))
|
|
107
107
|
|
|
108
108
|
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
|
109
109
|
if self.args.plots and self.batch_i < 3:
|
|
@@ -220,8 +220,7 @@ class SegmentationValidator(DetectionValidator):
|
|
|
220
220
|
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
|
|
221
221
|
for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm')]):
|
|
222
222
|
if self.is_coco:
|
|
223
|
-
eval.params.imgIds = [int(Path(x).stem)
|
|
224
|
-
for x in self.dataloader.dataset.im_files] # images to eval
|
|
223
|
+
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
|
|
225
224
|
eval.evaluate()
|
|
226
225
|
eval.accumulate()
|
|
227
226
|
eval.summarize()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.0.
|
|
3
|
+
Version: 8.0.66
|
|
4
4
|
Summary: Ultralytics YOLOv8
|
|
5
5
|
Home-page: https://github.com/ultralytics/ultralytics
|
|
6
6
|
Author: Ultralytics
|
|
@@ -176,7 +176,10 @@ YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more example
|
|
|
176
176
|
|
|
177
177
|
## <div align="center">Models</div>
|
|
178
178
|
|
|
179
|
-
All YOLOv8 pretrained models are available here. Detect, Segment and Pose models are pretrained on
|
|
179
|
+
All YOLOv8 pretrained models are available here. Detect, Segment and Pose models are pretrained on
|
|
180
|
+
the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify
|
|
181
|
+
models are pretrained on
|
|
182
|
+
the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
|
|
180
183
|
|
|
181
184
|
[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest
|
|
182
185
|
Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
|
|
@@ -241,6 +244,28 @@ See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usag
|
|
|
241
244
|
|
|
242
245
|
</details>
|
|
243
246
|
|
|
247
|
+
<details><summary>Pose</summary>
|
|
248
|
+
|
|
249
|
+
See [Pose Docs](https://docs.ultralytics.com/tasks/) for usage examples with these models.
|
|
250
|
+
|
|
251
|
+
| Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>pose<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
|
|
252
|
+
| ---------------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
|
|
253
|
+
| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | - | 49.7 | - | - | 3.3 | 9.2 |
|
|
254
|
+
| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | - | 59.2 | - | - | 11.6 | 30.2 |
|
|
255
|
+
| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | - | 63.6 | - | - | 26.4 | 81.0 |
|
|
256
|
+
| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | - | 67.0 | - | - | 44.4 | 168.6 |
|
|
257
|
+
| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | - | 68.9 | - | - | 69.4 | 263.2 |
|
|
258
|
+
| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | - | 71.5 | - | - | 99.1 | 1066.4 |
|
|
259
|
+
|
|
260
|
+
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org)
|
|
261
|
+
dataset.
|
|
262
|
+
<br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
|
|
263
|
+
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/)
|
|
264
|
+
instance.
|
|
265
|
+
<br>Reproduce by `yolo val pose data=coco8-pose.yaml batch=1 device=0|cpu`
|
|
266
|
+
|
|
267
|
+
</details>
|
|
268
|
+
|
|
244
269
|
## <div align="center">Integrations</div>
|
|
245
270
|
|
|
246
271
|
<br>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
ultralytics/__init__.py,sha256=
|
|
1
|
+
ultralytics/__init__.py,sha256=oOVc92vx-QWmdYNEm-dfOr4GkkPh9t830W4a5oCLZ5Q,295
|
|
2
2
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
3
3
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
4
4
|
ultralytics/datasets/Argoverse.yaml,sha256=EOZ3aUOi9mgCVot4TMiUBXVuovIdTAZ1FSOy7G224vA,2750
|
|
@@ -8,9 +8,11 @@ ultralytics/datasets/Objects365.yaml,sha256=B4vt7iF_Yr9Qcd8-5udfAf7BYVP75SQBhrSd
|
|
|
8
8
|
ultralytics/datasets/SKU-110K.yaml,sha256=Iwnz9GoTH1fcd0VmplvWGvW9MPeybG0UKaFEIwH_s68,2436
|
|
9
9
|
ultralytics/datasets/VOC.yaml,sha256=cM9VTm8kd_WmZwZjSZ5zliHTwfXvbe-nPgJfSfD_nFs,3511
|
|
10
10
|
ultralytics/datasets/VisDrone.yaml,sha256=EKuXI2oVQ8sC5wNVzgD_6ZEVv6EWROt7SiNVY7_wk88,3013
|
|
11
|
+
ultralytics/datasets/coco-pose.yaml,sha256=bnFaDJF9ZMCg8t4qjhowfuWGJWFcLAqvo5eAltVTv3M,1546
|
|
11
12
|
ultralytics/datasets/coco.yaml,sha256=VVh6tOkTiXxDm_IYxLPTlecxjCJCvk06DMTfy-VVM30,2525
|
|
12
13
|
ultralytics/datasets/coco128-seg.yaml,sha256=v2ZZhMyxm1dGSXu1p4vJsPywaYfGa-78q3G6NIg1eY4,1861
|
|
13
14
|
ultralytics/datasets/coco128.yaml,sha256=Sb1F9i2lQLC0IsKsAnrOUdpU6TkXWD7eftcre0sfSic,1845
|
|
15
|
+
ultralytics/datasets/coco8-pose.yaml,sha256=esaL4WvRK_NgcRSB9G3WMIDdUiOc0buuGyL6-CJwKzw,894
|
|
14
16
|
ultralytics/datasets/coco8-seg.yaml,sha256=tsdE6xhhQ2EgjXvasn7YWJoAsvo7BpRUvHDs0JLwg2c,1796
|
|
15
17
|
ultralytics/datasets/coco8.yaml,sha256=YIlvnkEGEk9KOX9PtwQkr3gLvLY4CzSCMBXouP66WKQ,1776
|
|
16
18
|
ultralytics/datasets/xView.yaml,sha256=5JIVo4LOvkm8XJZ8Vxp-2SZMNG-K6ED9jfjG305-_cY,5177
|
|
@@ -26,15 +28,17 @@ ultralytics/models/v5/yolov5.yaml,sha256=jgUXGol5ynZROkXb9fCUwB5iIzHHrIDBLPVGkAq
|
|
|
26
28
|
ultralytics/models/v8/yolov8-cls.yaml,sha256=QLTZE6ckxsgrhki-X0S8EqLK0inMBuAV6nn4VCVazmc,919
|
|
27
29
|
ultralytics/models/v8/yolov8-p2.yaml,sha256=CkEyzoowGl4uryD1NJs_IhbszPB74k0jr_-PMgI5EwU,1750
|
|
28
30
|
ultralytics/models/v8/yolov8-p6.yaml,sha256=KrNCp46jg8LVvKftL90BhYjgCSfJ8KP5JgALY0J4wWA,1855
|
|
31
|
+
ultralytics/models/v8/yolov8-pose-p6.yaml,sha256=v_Biz744Hiu1ipkjR0L6FGmucgUThYN8wjDW4lr0Dfw,1952
|
|
32
|
+
ultralytics/models/v8/yolov8-pose.yaml,sha256=DhCAzlGzlMvgolvIeUXzYVRcdyRZE9w-_Cmpxpga4J8,1579
|
|
29
33
|
ultralytics/models/v8/yolov8-seg.yaml,sha256=LbwC_yaSMJrsHQYb2zrlCY0utQJN53L-Z6n-T8MwFRY,1489
|
|
30
34
|
ultralytics/models/v8/yolov8.yaml,sha256=Mt5Sza9rvG0OVJ0XhuZDZXPrqGW6pTbesXXPR0JfGVs,1912
|
|
31
35
|
ultralytics/nn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
-
ultralytics/nn/autobackend.py,sha256=
|
|
36
|
+
ultralytics/nn/autobackend.py,sha256=zR3gnXOiRJHlGJk1lps0EUIVW5pqmSjfQTeUc77A9lI,24083
|
|
33
37
|
ultralytics/nn/autoshape.py,sha256=oB3unCdTIMlvsheWx_q5PQjGh79uPD2m04lvTB5T5Ik,11839
|
|
34
|
-
ultralytics/nn/modules.py,sha256=
|
|
35
|
-
ultralytics/nn/tasks.py,sha256=
|
|
38
|
+
ultralytics/nn/modules.py,sha256=DbaF1MtcPO0qHAYQZ_Ha_cHImYY3rnnMFeR8IsEmgkY,20043
|
|
39
|
+
ultralytics/nn/tasks.py,sha256=b48fDKZKosQn389Hnvhqo1h7irJvq8dG_EIYOMqdDFg,26611
|
|
36
40
|
ultralytics/tracker/__init__.py,sha256=WAkNFWbZYTMiaK63GeBWYJ-HEcKwXOl2pTtmQTfOI5I,201
|
|
37
|
-
ultralytics/tracker/track.py,sha256=
|
|
41
|
+
ultralytics/tracker/track.py,sha256=_Z-8EYRCxi3RwNplTl3zJkiZDkpNXWAxQ_jP70PB2Ik,1509
|
|
38
42
|
ultralytics/tracker/cfg/botsort.yaml,sha256=YPqsA_iuLjLC6HxjgWREvaEBFnSRIR1F9q5Cfrmm0Aw,889
|
|
39
43
|
ultralytics/tracker/cfg/bytetrack.yaml,sha256=l7KWuUNux3GEoCOskwQDVxUvsG8xDKGGzTQ52tjf8sU,693
|
|
40
44
|
ultralytics/tracker/trackers/__init__.py,sha256=nkYYehL4XUhXmSUt7BFAENsYk9CRwB7mAW4yHddxAd8,170
|
|
@@ -46,40 +50,40 @@ ultralytics/tracker/utils/gmc.py,sha256=L5r7WAt7_UD6r4AU0XnledolqdeLqHnd4ejMY-7z
|
|
|
46
50
|
ultralytics/tracker/utils/kalman_filter.py,sha256=kthztWM9R7Gsl9f9FduqY-GuCQIgzVBFpjobCDJjy40,18219
|
|
47
51
|
ultralytics/tracker/utils/matching.py,sha256=o6d-7qXYLBxaV766v3GwUXa9SFO3qvmzdnfq1eBMw2U,7511
|
|
48
52
|
ultralytics/yolo/__init__.py,sha256=GZ9sffmGZl0xK2yjXZFyZroVpdcwmWrvGu6QwnEXY4w,93
|
|
49
|
-
ultralytics/yolo/cfg/__init__.py,sha256=
|
|
50
|
-
ultralytics/yolo/cfg/default.yaml,sha256=
|
|
53
|
+
ultralytics/yolo/cfg/__init__.py,sha256=aA0OtWJHeOzYsTtcPzB4nFH5fsk7CJa-yJ_3HnUo3yI,17596
|
|
54
|
+
ultralytics/yolo/cfg/default.yaml,sha256=fjKgLCJN621kI2XBovo7SGs_zEAfXo1wfh6U7XZa97c,6302
|
|
51
55
|
ultralytics/yolo/data/__init__.py,sha256=ui2V_756otLaIU1E_AtUflt9KsJta4ylLHt8rbKX-Dw,483
|
|
52
|
-
ultralytics/yolo/data/augment.py,sha256=
|
|
56
|
+
ultralytics/yolo/data/augment.py,sha256=ikrc-13omwlhqP3-UWnJlfU3RGLOyfMqJZJrvWDKvN4,31119
|
|
53
57
|
ultralytics/yolo/data/base.py,sha256=HadwteNfW8XxAg4I80IUYxhcVG3e3JcqHpc_2dp-vWw,8819
|
|
54
|
-
ultralytics/yolo/data/build.py,sha256=
|
|
55
|
-
ultralytics/yolo/data/dataset.py,sha256=
|
|
58
|
+
ultralytics/yolo/data/build.py,sha256=kRJaWB6uAtGSePpcob5bcWvPMfIjyL-TYGJcp9Kg9Yc,7872
|
|
59
|
+
ultralytics/yolo/data/dataset.py,sha256=CDZifLTdesDPgaAdIIE1ouiSEeWs2EEtxtUxcp9Au2M,12760
|
|
56
60
|
ultralytics/yolo/data/dataset_wrappers.py,sha256=IyP-MGHGO75LjX9mqL-17rLpT-m9_rHUZfop6W2y_mw,1330
|
|
57
|
-
ultralytics/yolo/data/utils.py,sha256=
|
|
61
|
+
ultralytics/yolo/data/utils.py,sha256=7NRxG6FVyIOmlskfW270t3tzx334-wal2Vnp7oD-gaM,22552
|
|
58
62
|
ultralytics/yolo/data/dataloaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
63
|
ultralytics/yolo/data/dataloaders/stream_loaders.py,sha256=rkRe98hh_jKIv-bt0tDRqjwbt7dVlKyy3es0FFeIdog,15194
|
|
60
64
|
ultralytics/yolo/data/dataloaders/v5augmentations.py,sha256=1cHLok8gmlgNMsGVIvjLJkkmIwfs2MU4ZJ-PwJEiYyE,17226
|
|
61
65
|
ultralytics/yolo/data/dataloaders/v5loader.py,sha256=vThacCrsXCVtfMiUA1scmEpvjN9ASrdU7Lk_33XFbyI,49861
|
|
62
66
|
ultralytics/yolo/engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
63
|
-
ultralytics/yolo/engine/exporter.py,sha256=
|
|
64
|
-
ultralytics/yolo/engine/model.py,sha256=
|
|
65
|
-
ultralytics/yolo/engine/predictor.py,sha256=
|
|
66
|
-
ultralytics/yolo/engine/results.py,sha256=
|
|
67
|
+
ultralytics/yolo/engine/exporter.py,sha256=u9sVuEdJybqXGIQkp1NOmrUcgN1vw932Y2LKlKC4EUc,41715
|
|
68
|
+
ultralytics/yolo/engine/model.py,sha256=k6JX6vIaI4uvdvQanZvu40VMqIYUu9oXdXFJo2k_Oys,17109
|
|
69
|
+
ultralytics/yolo/engine/predictor.py,sha256=v8fHYp0vXecRxE_8zFHl5aJ3QZKj5Pmpo2hswR3Kfx4,13536
|
|
70
|
+
ultralytics/yolo/engine/results.py,sha256=nu44NnAK_tksSImnlbpRCkeUuMmgrHzpXxt4QhNp5Jw,14114
|
|
67
71
|
ultralytics/yolo/engine/trainer.py,sha256=U3eYLYJGfMPOb019OXJTk_T26um4QNv6XQFaHsrYmg0,29877
|
|
68
72
|
ultralytics/yolo/engine/validator.py,sha256=B9Vu6KSQ1jiWOgPPo37hfGgpq4PeGIYlHzvCIaR43q4,10235
|
|
69
73
|
ultralytics/yolo/utils/__init__.py,sha256=8fNLkdZ0aWYxoLxfEYphZ4bkxEDafAgvCNxJPjl0L4I,23705
|
|
70
74
|
ultralytics/yolo/utils/autobatch.py,sha256=I09al9DZbo6aNyf-GS1N1WFBOWC4xmHB2Yt81SvDd1o,3823
|
|
71
|
-
ultralytics/yolo/utils/benchmarks.py,sha256=
|
|
75
|
+
ultralytics/yolo/utils/benchmarks.py,sha256=_fo6TzZ8YLEJOQa7rG9hjjg2XIkKd-NnoiYNas0tB3k,5414
|
|
72
76
|
ultralytics/yolo/utils/checks.py,sha256=tv1Mz4okutFNoRZ_YbUHur1Kf9-Q39hGjTYifRv_Skc,13841
|
|
73
77
|
ultralytics/yolo/utils/dist.py,sha256=6on1z6Jo6VbHwTg5V3yprEwJwkFUdNLJxf1gqDGx_Ks,2412
|
|
74
|
-
ultralytics/yolo/utils/downloads.py,sha256=
|
|
78
|
+
ultralytics/yolo/utils/downloads.py,sha256=_qQrVb8PGPRrvGNOxZqRdwwJdTvEqYMGPJyageL0fas,9562
|
|
75
79
|
ultralytics/yolo/utils/files.py,sha256=41egcQxsUgxN02UbcOGZfobeHQpOjVJu2Sg5fuKbyfo,3406
|
|
76
|
-
ultralytics/yolo/utils/instance.py,sha256=
|
|
77
|
-
ultralytics/yolo/utils/loss.py,sha256=
|
|
78
|
-
ultralytics/yolo/utils/metrics.py,sha256=
|
|
79
|
-
ultralytics/yolo/utils/ops.py,sha256=
|
|
80
|
-
ultralytics/yolo/utils/plotting.py,sha256=
|
|
80
|
+
ultralytics/yolo/utils/instance.py,sha256=rMxybRdrilqpWAOfUtH-YQx4wHNj2Y2lR48u2G8V4bk,11394
|
|
81
|
+
ultralytics/yolo/utils/loss.py,sha256=JTgCauhA-gHhYn7IaMhR5UAUVHG_wx3S8YR5XhkRs88,2889
|
|
82
|
+
ultralytics/yolo/utils/metrics.py,sha256=4nsmjk1-Gp5LLaoT4jo7zfox23qcF2drbqGzzI5EoR8,35011
|
|
83
|
+
ultralytics/yolo/utils/ops.py,sha256=3FX78RW3zZBlkh5ykCMLUDC88YZiE8fp3eDO7tC4JDg,27980
|
|
84
|
+
ultralytics/yolo/utils/plotting.py,sha256=RyCgUwfCKasHLUwgAsyoCvJaWUV1xhIejsZ6XeZD9CY,20717
|
|
81
85
|
ultralytics/yolo/utils/tal.py,sha256=mL-olmUSXUj9bAl1s11K21OopomZif3Q-SwdgBJ6Pvs,10233
|
|
82
|
-
ultralytics/yolo/utils/torch_utils.py,sha256=
|
|
86
|
+
ultralytics/yolo/utils/torch_utils.py,sha256=S8CD1-pPX6k0IGTb6Bue9_syYEftQjayJ7EDIT5l73Y,19694
|
|
83
87
|
ultralytics/yolo/utils/callbacks/__init__.py,sha256=D7j66dJn6N6hqmP8dop75_G0TiKR9a3SD6ceOyc3UNw,123
|
|
84
88
|
ultralytics/yolo/utils/callbacks/base.py,sha256=z2NLGBegE2zdtl_dDVPDK6yO_03o4f9_8JLUb7Kt-6I,3503
|
|
85
89
|
ultralytics/yolo/utils/callbacks/clearml.py,sha256=YwSkhC8ohAoISxDesXVv6k2Bnp9QCvJ1_9sUKhAk0bI,2145
|
|
@@ -87,22 +91,26 @@ ultralytics/yolo/utils/callbacks/comet.py,sha256=Y1dsAMovZ40iUXOnFVApuT05Wl9yChz
|
|
|
87
91
|
ultralytics/yolo/utils/callbacks/hub.py,sha256=TdGS9uA7zDU6IP_GWns76VUQxcE3dgXfUMj_qQR-Nd8,3154
|
|
88
92
|
ultralytics/yolo/utils/callbacks/mlflow.py,sha256=VvpMP4hrF5Tjgx_TNL7BCoo9pd6q-WZaIA6oIkYFDqE,2512
|
|
89
93
|
ultralytics/yolo/utils/callbacks/tensorboard.py,sha256=pCDad52mQSuT5aouEU57izkjGvG2OnAzxdk0mSysoOc,1297
|
|
90
|
-
ultralytics/yolo/v8/__init__.py,sha256=
|
|
94
|
+
ultralytics/yolo/v8/__init__.py,sha256=aEhezkHyW1R-wRF3HyagSCfMy6xhCZj6m4bbIAi6HT0,157
|
|
91
95
|
ultralytics/yolo/v8/classify/__init__.py,sha256=lm4bNTAfDS2ZTljojApMqAKDmDTVlFpq-EC5jjbS31Q,390
|
|
92
96
|
ultralytics/yolo/v8/classify/predict.py,sha256=QUeohoSb1R5sgLKBv_eI1M2vFh8BOe2CcZiepz2Ec4U,3162
|
|
93
97
|
ultralytics/yolo/v8/classify/train.py,sha256=4sajgp7_Tfj0SkV0LdLXwuN2sa1x4av-JNxbeYKMHL0,6359
|
|
94
98
|
ultralytics/yolo/v8/classify/val.py,sha256=vHmqMfJhPm2fGZGKlFWiQ5UsHxkCDYSzl7_D8P3YK20,2554
|
|
95
99
|
ultralytics/yolo/v8/detect/__init__.py,sha256=Z3h2Qu_ebeQFOOrl1OQgpyGkzWBrFJ5ygf7uL8X9Mkc,276
|
|
96
100
|
ultralytics/yolo/v8/detect/predict.py,sha256=XMGriSo24npeiPxNZ81W6BL-jB_qb45VtRWKaky1KY0,4370
|
|
97
|
-
ultralytics/yolo/v8/detect/train.py,sha256
|
|
98
|
-
ultralytics/yolo/v8/detect/val.py,sha256=
|
|
101
|
+
ultralytics/yolo/v8/detect/train.py,sha256=-wVP4smHE7q7K8AS91BoyHymM2Q5ePOUEPYtw5E5NxU,9886
|
|
102
|
+
ultralytics/yolo/v8/detect/val.py,sha256=Vku-GmDXKQBqtnOvcebiArcVjXQSpGfYEfSM8n5rP4A,12487
|
|
103
|
+
ultralytics/yolo/v8/pose/__init__.py,sha256=Q94UAg9QgQqi8EATtkllJUBV9weg7Uig8CcO0-ybLw0,246
|
|
104
|
+
ultralytics/yolo/v8/pose/predict.py,sha256=zohT09vdkrlIDo8qgj616HD5TwJfLRAbsD-sm_tNqQw,4619
|
|
105
|
+
ultralytics/yolo/v8/pose/train.py,sha256=0cZiQu15Fl6jWtxqtm3_Vtq6xgGGbwTsSYUYmahXKqM,7116
|
|
106
|
+
ultralytics/yolo/v8/pose/val.py,sha256=2sO_c9vmqgPFGSjTV25o5x71BxN8K6S2lFJjxzP9vmE,10023
|
|
99
107
|
ultralytics/yolo/v8/segment/__init__.py,sha256=1OK2oVv3A-iWPf_V4Jz0uXhBQZyLd7dlI5rAzxu_IvU,294
|
|
100
108
|
ultralytics/yolo/v8/segment/predict.py,sha256=hmgTGQTnk2nrS6XMLD7VgiLoXd-ZFuwNw7wKGGK0obo,5446
|
|
101
109
|
ultralytics/yolo/v8/segment/train.py,sha256=ryiPZl7F-EuE2pzXWlpgceDy6jK2nyFP8YHAOBBtL4M,7645
|
|
102
|
-
ultralytics/yolo/v8/segment/val.py,sha256=
|
|
103
|
-
ultralytics-8.0.
|
|
104
|
-
ultralytics-8.0.
|
|
105
|
-
ultralytics-8.0.
|
|
106
|
-
ultralytics-8.0.
|
|
107
|
-
ultralytics-8.0.
|
|
108
|
-
ultralytics-8.0.
|
|
110
|
+
ultralytics/yolo/v8/segment/val.py,sha256=MovmKcVjcN_ZOSu4oyu1EdHxZ5q-0F800hipaxLAysM,11966
|
|
111
|
+
ultralytics-8.0.66.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
112
|
+
ultralytics-8.0.66.dist-info/METADATA,sha256=D8LazZc0DYyLZIZ9zhWEiTWSO3ucn0ie5cp08iy_6hs,25956
|
|
113
|
+
ultralytics-8.0.66.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
114
|
+
ultralytics-8.0.66.dist-info/entry_points.txt,sha256=Ck1F6qKNokeHozQD5pmaFgXHL6dKyC2qCdyXao2e6Yg,103
|
|
115
|
+
ultralytics-8.0.66.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
116
|
+
ultralytics-8.0.66.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|