megadetector 5.0.5__py3-none-any.whl → 5.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/batch_processing/data_preparation/manage_local_batch.py +302 -263
- api/batch_processing/data_preparation/manage_video_batch.py +81 -2
- api/batch_processing/postprocessing/add_max_conf.py +1 -0
- api/batch_processing/postprocessing/categorize_detections_by_size.py +50 -19
- api/batch_processing/postprocessing/compare_batch_results.py +110 -60
- api/batch_processing/postprocessing/load_api_results.py +56 -70
- api/batch_processing/postprocessing/md_to_coco.py +1 -1
- api/batch_processing/postprocessing/md_to_labelme.py +2 -1
- api/batch_processing/postprocessing/postprocess_batch_results.py +240 -81
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +625 -0
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +71 -23
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +1 -1
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +227 -75
- api/batch_processing/postprocessing/subset_json_detector_output.py +132 -5
- api/batch_processing/postprocessing/top_folders_to_bottom.py +1 -1
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +2 -2
- classification/prepare_classification_script.py +191 -191
- data_management/coco_to_yolo.py +68 -45
- data_management/databases/integrity_check_json_db.py +7 -5
- data_management/generate_crops_from_cct.py +3 -3
- data_management/get_image_sizes.py +8 -6
- data_management/importers/add_timestamps_to_icct.py +79 -0
- data_management/importers/animl_results_to_md_results.py +160 -0
- data_management/importers/auckland_doc_test_to_json.py +4 -4
- data_management/importers/auckland_doc_to_json.py +1 -1
- data_management/importers/awc_to_json.py +5 -5
- data_management/importers/bellevue_to_json.py +5 -5
- data_management/importers/carrizo_shrubfree_2018.py +5 -5
- data_management/importers/carrizo_trail_cam_2017.py +5 -5
- data_management/importers/cct_field_adjustments.py +2 -3
- data_management/importers/channel_islands_to_cct.py +4 -4
- data_management/importers/ena24_to_json.py +5 -5
- data_management/importers/helena_to_cct.py +10 -10
- data_management/importers/idaho-camera-traps.py +12 -12
- data_management/importers/idfg_iwildcam_lila_prep.py +8 -8
- data_management/importers/jb_csv_to_json.py +4 -4
- data_management/importers/missouri_to_json.py +1 -1
- data_management/importers/noaa_seals_2019.py +1 -1
- data_management/importers/pc_to_json.py +5 -5
- data_management/importers/prepare-noaa-fish-data-for-lila.py +4 -4
- data_management/importers/prepare_zsl_imerit.py +5 -5
- data_management/importers/rspb_to_json.py +4 -4
- data_management/importers/save_the_elephants_survey_A.py +5 -5
- data_management/importers/save_the_elephants_survey_B.py +6 -6
- data_management/importers/snapshot_safari_importer.py +9 -9
- data_management/importers/snapshot_serengeti_lila.py +9 -9
- data_management/importers/timelapse_csv_set_to_json.py +5 -7
- data_management/importers/ubc_to_json.py +4 -4
- data_management/importers/umn_to_json.py +4 -4
- data_management/importers/wellington_to_json.py +1 -1
- data_management/importers/wi_to_json.py +2 -2
- data_management/importers/zamba_results_to_md_results.py +181 -0
- data_management/labelme_to_coco.py +35 -7
- data_management/labelme_to_yolo.py +229 -0
- data_management/lila/add_locations_to_island_camera_traps.py +1 -1
- data_management/lila/add_locations_to_nacti.py +147 -0
- data_management/lila/create_lila_blank_set.py +474 -0
- data_management/lila/create_lila_test_set.py +2 -1
- data_management/lila/create_links_to_md_results_files.py +106 -0
- data_management/lila/download_lila_subset.py +46 -21
- data_management/lila/generate_lila_per_image_labels.py +23 -14
- data_management/lila/get_lila_annotation_counts.py +17 -11
- data_management/lila/lila_common.py +14 -11
- data_management/lila/test_lila_metadata_urls.py +116 -0
- data_management/ocr_tools.py +829 -0
- data_management/resize_coco_dataset.py +13 -11
- data_management/yolo_output_to_md_output.py +84 -12
- data_management/yolo_to_coco.py +38 -20
- detection/process_video.py +36 -14
- detection/pytorch_detector.py +23 -8
- detection/run_detector.py +76 -19
- detection/run_detector_batch.py +178 -63
- detection/run_inference_with_yolov5_val.py +326 -57
- detection/run_tiled_inference.py +153 -43
- detection/video_utils.py +34 -8
- md_utils/ct_utils.py +172 -1
- md_utils/md_tests.py +372 -51
- md_utils/path_utils.py +167 -39
- md_utils/process_utils.py +26 -7
- md_utils/split_locations_into_train_val.py +215 -0
- md_utils/string_utils.py +10 -0
- md_utils/url_utils.py +0 -2
- md_utils/write_html_image_list.py +9 -26
- md_visualization/plot_utils.py +12 -8
- md_visualization/visualization_utils.py +106 -7
- md_visualization/visualize_db.py +16 -8
- md_visualization/visualize_detector_output.py +208 -97
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/METADATA +3 -6
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/RECORD +98 -121
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/WHEEL +1 -1
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +1 -1
- taxonomy_mapping/map_new_lila_datasets.py +43 -39
- taxonomy_mapping/prepare_lila_taxonomy_release.py +5 -2
- taxonomy_mapping/preview_lila_taxonomy.py +27 -27
- taxonomy_mapping/species_lookup.py +33 -13
- taxonomy_mapping/taxonomy_csv_checker.py +7 -5
- api/synchronous/api_core/yolov5/detect.py +0 -252
- api/synchronous/api_core/yolov5/export.py +0 -607
- api/synchronous/api_core/yolov5/hubconf.py +0 -146
- api/synchronous/api_core/yolov5/models/__init__.py +0 -0
- api/synchronous/api_core/yolov5/models/common.py +0 -738
- api/synchronous/api_core/yolov5/models/experimental.py +0 -104
- api/synchronous/api_core/yolov5/models/tf.py +0 -574
- api/synchronous/api_core/yolov5/models/yolo.py +0 -338
- api/synchronous/api_core/yolov5/train.py +0 -670
- api/synchronous/api_core/yolov5/utils/__init__.py +0 -36
- api/synchronous/api_core/yolov5/utils/activations.py +0 -103
- api/synchronous/api_core/yolov5/utils/augmentations.py +0 -284
- api/synchronous/api_core/yolov5/utils/autoanchor.py +0 -170
- api/synchronous/api_core/yolov5/utils/autobatch.py +0 -66
- api/synchronous/api_core/yolov5/utils/aws/__init__.py +0 -0
- api/synchronous/api_core/yolov5/utils/aws/resume.py +0 -40
- api/synchronous/api_core/yolov5/utils/benchmarks.py +0 -148
- api/synchronous/api_core/yolov5/utils/callbacks.py +0 -71
- api/synchronous/api_core/yolov5/utils/dataloaders.py +0 -1087
- api/synchronous/api_core/yolov5/utils/downloads.py +0 -178
- api/synchronous/api_core/yolov5/utils/flask_rest_api/example_request.py +0 -19
- api/synchronous/api_core/yolov5/utils/flask_rest_api/restapi.py +0 -46
- api/synchronous/api_core/yolov5/utils/general.py +0 -1018
- api/synchronous/api_core/yolov5/utils/loggers/__init__.py +0 -187
- api/synchronous/api_core/yolov5/utils/loggers/wandb/__init__.py +0 -0
- api/synchronous/api_core/yolov5/utils/loggers/wandb/log_dataset.py +0 -27
- api/synchronous/api_core/yolov5/utils/loggers/wandb/sweep.py +0 -41
- api/synchronous/api_core/yolov5/utils/loggers/wandb/wandb_utils.py +0 -577
- api/synchronous/api_core/yolov5/utils/loss.py +0 -234
- api/synchronous/api_core/yolov5/utils/metrics.py +0 -355
- api/synchronous/api_core/yolov5/utils/plots.py +0 -489
- api/synchronous/api_core/yolov5/utils/torch_utils.py +0 -314
- api/synchronous/api_core/yolov5/val.py +0 -394
- md_utils/matlab_porting_tools.py +0 -97
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/LICENSE +0 -0
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/top_level.txt +0 -0
|
@@ -1,394 +0,0 @@
|
|
|
1
|
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
|
2
|
-
"""
|
|
3
|
-
Validate a trained YOLOv5 model accuracy on a custom dataset
|
|
4
|
-
|
|
5
|
-
Usage:
|
|
6
|
-
$ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640
|
|
7
|
-
|
|
8
|
-
Usage - formats:
|
|
9
|
-
$ python path/to/val.py --weights yolov5s.pt # PyTorch
|
|
10
|
-
yolov5s.torchscript # TorchScript
|
|
11
|
-
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
|
12
|
-
yolov5s.xml # OpenVINO
|
|
13
|
-
yolov5s.engine # TensorRT
|
|
14
|
-
yolov5s.mlmodel # CoreML (macOS-only)
|
|
15
|
-
yolov5s_saved_model # TensorFlow SavedModel
|
|
16
|
-
yolov5s.pb # TensorFlow GraphDef
|
|
17
|
-
yolov5s.tflite # TensorFlow Lite
|
|
18
|
-
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
import argparse
|
|
22
|
-
import json
|
|
23
|
-
import os
|
|
24
|
-
import sys
|
|
25
|
-
from pathlib import Path
|
|
26
|
-
|
|
27
|
-
import numpy as np
|
|
28
|
-
import torch
|
|
29
|
-
from tqdm import tqdm
|
|
30
|
-
|
|
31
|
-
FILE = Path(__file__).resolve()
|
|
32
|
-
ROOT = FILE.parents[0] # YOLOv5 root directory
|
|
33
|
-
if str(ROOT) not in sys.path:
|
|
34
|
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
|
35
|
-
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
|
36
|
-
|
|
37
|
-
from models.common import DetectMultiBackend
|
|
38
|
-
from utils.callbacks import Callbacks
|
|
39
|
-
from utils.dataloaders import create_dataloader
|
|
40
|
-
from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml,
|
|
41
|
-
coco80_to_coco91_class, colorstr, emojis, increment_path, non_max_suppression, print_args,
|
|
42
|
-
scale_coords, xywh2xyxy, xyxy2xywh)
|
|
43
|
-
from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
|
|
44
|
-
from utils.plots import output_to_target, plot_images, plot_val_study
|
|
45
|
-
from utils.torch_utils import select_device, time_sync
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def save_one_txt(predn, save_conf, shape, file):
|
|
49
|
-
# Save one txt result
|
|
50
|
-
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
|
51
|
-
for *xyxy, conf, cls in predn.tolist():
|
|
52
|
-
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
|
53
|
-
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
|
54
|
-
with open(file, 'a') as f:
|
|
55
|
-
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def save_one_json(predn, jdict, path, class_map):
|
|
59
|
-
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
|
60
|
-
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
|
61
|
-
box = xyxy2xywh(predn[:, :4]) # xywh
|
|
62
|
-
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
|
63
|
-
for p, b in zip(predn.tolist(), box.tolist()):
|
|
64
|
-
jdict.append({
|
|
65
|
-
'image_id': image_id,
|
|
66
|
-
'category_id': class_map[int(p[5])],
|
|
67
|
-
'bbox': [round(x, 3) for x in b],
|
|
68
|
-
'score': round(p[4], 5)})
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def process_batch(detections, labels, iouv):
|
|
72
|
-
"""
|
|
73
|
-
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
|
|
74
|
-
Arguments:
|
|
75
|
-
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
|
76
|
-
labels (Array[M, 5]), class, x1, y1, x2, y2
|
|
77
|
-
Returns:
|
|
78
|
-
correct (Array[N, 10]), for 10 IoU levels
|
|
79
|
-
"""
|
|
80
|
-
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
|
|
81
|
-
iou = box_iou(labels[:, 1:], detections[:, :4])
|
|
82
|
-
correct_class = labels[:, 0:1] == detections[:, 5]
|
|
83
|
-
for i in range(len(iouv)):
|
|
84
|
-
x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
|
|
85
|
-
if x[0].shape[0]:
|
|
86
|
-
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
|
|
87
|
-
if x[0].shape[0] > 1:
|
|
88
|
-
matches = matches[matches[:, 2].argsort()[::-1]]
|
|
89
|
-
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
|
90
|
-
# matches = matches[matches[:, 2].argsort()[::-1]]
|
|
91
|
-
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
|
92
|
-
correct[matches[:, 1].astype(int), i] = True
|
|
93
|
-
return correct
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
@torch.no_grad()
|
|
97
|
-
def run(
|
|
98
|
-
data,
|
|
99
|
-
weights=None, # model.pt path(s)
|
|
100
|
-
batch_size=32, # batch size
|
|
101
|
-
imgsz=640, # inference size (pixels)
|
|
102
|
-
conf_thres=0.001, # confidence threshold
|
|
103
|
-
iou_thres=0.6, # NMS IoU threshold
|
|
104
|
-
task='val', # train, val, test, speed or study
|
|
105
|
-
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
|
106
|
-
workers=8, # max dataloader workers (per RANK in DDP mode)
|
|
107
|
-
single_cls=False, # treat as single-class dataset
|
|
108
|
-
augment=False, # augmented inference
|
|
109
|
-
verbose=False, # verbose output
|
|
110
|
-
save_txt=False, # save results to *.txt
|
|
111
|
-
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
|
112
|
-
save_conf=False, # save confidences in --save-txt labels
|
|
113
|
-
save_json=False, # save a COCO-JSON results file
|
|
114
|
-
project=ROOT / 'runs/val', # save to project/name
|
|
115
|
-
name='exp', # save to project/name
|
|
116
|
-
exist_ok=False, # existing project/name ok, do not increment
|
|
117
|
-
half=True, # use FP16 half-precision inference
|
|
118
|
-
dnn=False, # use OpenCV DNN for ONNX inference
|
|
119
|
-
model=None,
|
|
120
|
-
dataloader=None,
|
|
121
|
-
save_dir=Path(''),
|
|
122
|
-
plots=True,
|
|
123
|
-
callbacks=Callbacks(),
|
|
124
|
-
compute_loss=None,
|
|
125
|
-
):
|
|
126
|
-
# Initialize/load model and set device
|
|
127
|
-
training = model is not None
|
|
128
|
-
if training: # called by train.py
|
|
129
|
-
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
|
130
|
-
half &= device.type != 'cpu' # half precision only supported on CUDA
|
|
131
|
-
model.half() if half else model.float()
|
|
132
|
-
else: # called directly
|
|
133
|
-
device = select_device(device, batch_size=batch_size)
|
|
134
|
-
|
|
135
|
-
# Directories
|
|
136
|
-
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
|
137
|
-
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
|
138
|
-
|
|
139
|
-
# Load model
|
|
140
|
-
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
|
141
|
-
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
|
142
|
-
imgsz = check_img_size(imgsz, s=stride) # check image size
|
|
143
|
-
half = model.fp16 # FP16 supported on limited backends with CUDA
|
|
144
|
-
if engine:
|
|
145
|
-
batch_size = model.batch_size
|
|
146
|
-
else:
|
|
147
|
-
device = model.device
|
|
148
|
-
if not (pt or jit):
|
|
149
|
-
batch_size = 1 # export.py models default to batch-size 1
|
|
150
|
-
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
|
151
|
-
|
|
152
|
-
# Data
|
|
153
|
-
data = check_dataset(data) # check
|
|
154
|
-
|
|
155
|
-
# Configure
|
|
156
|
-
model.eval()
|
|
157
|
-
cuda = device.type != 'cpu'
|
|
158
|
-
is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
|
|
159
|
-
nc = 1 if single_cls else int(data['nc']) # number of classes
|
|
160
|
-
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
|
|
161
|
-
niou = iouv.numel()
|
|
162
|
-
|
|
163
|
-
# Dataloader
|
|
164
|
-
if not training:
|
|
165
|
-
if pt and not single_cls: # check --weights are trained on --data
|
|
166
|
-
ncm = model.model.nc
|
|
167
|
-
assert ncm == nc, f'{weights[0]} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
|
|
168
|
-
f'classes). Pass correct combination of --weights and --data that are trained together.'
|
|
169
|
-
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
|
|
170
|
-
pad = 0.0 if task in ('speed', 'benchmark') else 0.5
|
|
171
|
-
rect = False if task == 'benchmark' else pt # square inference for benchmarks
|
|
172
|
-
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
|
|
173
|
-
dataloader = create_dataloader(data[task],
|
|
174
|
-
imgsz,
|
|
175
|
-
batch_size,
|
|
176
|
-
stride,
|
|
177
|
-
single_cls,
|
|
178
|
-
pad=pad,
|
|
179
|
-
rect=rect,
|
|
180
|
-
workers=workers,
|
|
181
|
-
prefix=colorstr(f'{task}: '))[0]
|
|
182
|
-
|
|
183
|
-
seen = 0
|
|
184
|
-
confusion_matrix = ConfusionMatrix(nc=nc)
|
|
185
|
-
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
|
|
186
|
-
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
|
187
|
-
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
|
|
188
|
-
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
|
189
|
-
loss = torch.zeros(3, device=device)
|
|
190
|
-
jdict, stats, ap, ap_class = [], [], [], []
|
|
191
|
-
callbacks.run('on_val_start')
|
|
192
|
-
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
|
193
|
-
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
|
|
194
|
-
callbacks.run('on_val_batch_start')
|
|
195
|
-
t1 = time_sync()
|
|
196
|
-
if cuda:
|
|
197
|
-
im = im.to(device, non_blocking=True)
|
|
198
|
-
targets = targets.to(device)
|
|
199
|
-
im = im.half() if half else im.float() # uint8 to fp16/32
|
|
200
|
-
im /= 255 # 0 - 255 to 0.0 - 1.0
|
|
201
|
-
nb, _, height, width = im.shape # batch size, channels, height, width
|
|
202
|
-
t2 = time_sync()
|
|
203
|
-
dt[0] += t2 - t1
|
|
204
|
-
|
|
205
|
-
# Inference
|
|
206
|
-
out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
|
|
207
|
-
dt[1] += time_sync() - t2
|
|
208
|
-
|
|
209
|
-
# Loss
|
|
210
|
-
if compute_loss:
|
|
211
|
-
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
|
|
212
|
-
|
|
213
|
-
# NMS
|
|
214
|
-
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
|
|
215
|
-
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
|
|
216
|
-
t3 = time_sync()
|
|
217
|
-
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
|
|
218
|
-
dt[2] += time_sync() - t3
|
|
219
|
-
|
|
220
|
-
# Metrics
|
|
221
|
-
for si, pred in enumerate(out):
|
|
222
|
-
labels = targets[targets[:, 0] == si, 1:]
|
|
223
|
-
nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
|
|
224
|
-
path, shape = Path(paths[si]), shapes[si][0]
|
|
225
|
-
correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
|
226
|
-
seen += 1
|
|
227
|
-
|
|
228
|
-
if npr == 0:
|
|
229
|
-
if nl:
|
|
230
|
-
stats.append((correct, *torch.zeros((3, 0), device=device)))
|
|
231
|
-
continue
|
|
232
|
-
|
|
233
|
-
# Predictions
|
|
234
|
-
if single_cls:
|
|
235
|
-
pred[:, 5] = 0
|
|
236
|
-
predn = pred.clone()
|
|
237
|
-
scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
|
|
238
|
-
|
|
239
|
-
# Evaluate
|
|
240
|
-
if nl:
|
|
241
|
-
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
|
242
|
-
scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
|
|
243
|
-
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
|
|
244
|
-
correct = process_batch(predn, labelsn, iouv)
|
|
245
|
-
if plots:
|
|
246
|
-
confusion_matrix.process_batch(predn, labelsn)
|
|
247
|
-
stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
|
|
248
|
-
|
|
249
|
-
# Save/log
|
|
250
|
-
if save_txt:
|
|
251
|
-
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
|
|
252
|
-
if save_json:
|
|
253
|
-
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
|
|
254
|
-
callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
|
|
255
|
-
|
|
256
|
-
# Plot images
|
|
257
|
-
if plots and batch_i < 3:
|
|
258
|
-
plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
|
|
259
|
-
plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
|
|
260
|
-
|
|
261
|
-
callbacks.run('on_val_batch_end')
|
|
262
|
-
|
|
263
|
-
# Compute metrics
|
|
264
|
-
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
|
|
265
|
-
if len(stats) and stats[0].any():
|
|
266
|
-
tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
|
|
267
|
-
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
|
|
268
|
-
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
|
|
269
|
-
nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
|
|
270
|
-
else:
|
|
271
|
-
nt = torch.zeros(1)
|
|
272
|
-
|
|
273
|
-
# Print results
|
|
274
|
-
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
|
|
275
|
-
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
|
|
276
|
-
|
|
277
|
-
# Print results per class
|
|
278
|
-
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
|
279
|
-
for i, c in enumerate(ap_class):
|
|
280
|
-
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
|
|
281
|
-
|
|
282
|
-
# Print speeds
|
|
283
|
-
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
|
|
284
|
-
if not training:
|
|
285
|
-
shape = (batch_size, 3, imgsz, imgsz)
|
|
286
|
-
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
|
|
287
|
-
|
|
288
|
-
# Plots
|
|
289
|
-
if plots:
|
|
290
|
-
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
|
291
|
-
callbacks.run('on_val_end')
|
|
292
|
-
|
|
293
|
-
# Save JSON
|
|
294
|
-
if save_json and len(jdict):
|
|
295
|
-
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
|
296
|
-
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
|
|
297
|
-
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
|
298
|
-
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
|
299
|
-
with open(pred_json, 'w') as f:
|
|
300
|
-
json.dump(jdict, f)
|
|
301
|
-
|
|
302
|
-
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
|
303
|
-
check_requirements(['pycocotools'])
|
|
304
|
-
from pycocotools.coco import COCO
|
|
305
|
-
from pycocotools.cocoeval import COCOeval
|
|
306
|
-
|
|
307
|
-
anno = COCO(anno_json) # init annotations api
|
|
308
|
-
pred = anno.loadRes(pred_json) # init predictions api
|
|
309
|
-
eval = COCOeval(anno, pred, 'bbox')
|
|
310
|
-
if is_coco:
|
|
311
|
-
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
|
|
312
|
-
eval.evaluate()
|
|
313
|
-
eval.accumulate()
|
|
314
|
-
eval.summarize()
|
|
315
|
-
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
|
|
316
|
-
except Exception as e:
|
|
317
|
-
LOGGER.info(f'pycocotools unable to run: {e}')
|
|
318
|
-
|
|
319
|
-
# Return results
|
|
320
|
-
model.float() # for training
|
|
321
|
-
if not training:
|
|
322
|
-
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
|
323
|
-
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
|
324
|
-
maps = np.zeros(nc) + map
|
|
325
|
-
for i, c in enumerate(ap_class):
|
|
326
|
-
maps[c] = ap[i]
|
|
327
|
-
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
def parse_opt():
|
|
331
|
-
parser = argparse.ArgumentParser()
|
|
332
|
-
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
|
333
|
-
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
|
|
334
|
-
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
|
|
335
|
-
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
|
336
|
-
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
|
|
337
|
-
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
|
|
338
|
-
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
|
|
339
|
-
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
|
340
|
-
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
|
341
|
-
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
|
342
|
-
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
|
343
|
-
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
|
344
|
-
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
|
345
|
-
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
|
|
346
|
-
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
|
347
|
-
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
|
|
348
|
-
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
|
|
349
|
-
parser.add_argument('--name', default='exp', help='save to project/name')
|
|
350
|
-
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
|
351
|
-
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
|
352
|
-
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
|
353
|
-
opt = parser.parse_args()
|
|
354
|
-
opt.data = check_yaml(opt.data) # check YAML
|
|
355
|
-
opt.save_json |= opt.data.endswith('coco.yaml')
|
|
356
|
-
opt.save_txt |= opt.save_hybrid
|
|
357
|
-
print_args(vars(opt))
|
|
358
|
-
return opt
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
def main(opt):
|
|
362
|
-
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
|
363
|
-
|
|
364
|
-
if opt.task in ('train', 'val', 'test'): # run normally
|
|
365
|
-
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
|
366
|
-
LOGGER.info(emojis(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️'))
|
|
367
|
-
run(**vars(opt))
|
|
368
|
-
|
|
369
|
-
else:
|
|
370
|
-
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
|
371
|
-
opt.half = True # FP16 for fastest results
|
|
372
|
-
if opt.task == 'speed': # speed benchmarks
|
|
373
|
-
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
|
|
374
|
-
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
|
|
375
|
-
for opt.weights in weights:
|
|
376
|
-
run(**vars(opt), plots=False)
|
|
377
|
-
|
|
378
|
-
elif opt.task == 'study': # speed vs mAP benchmarks
|
|
379
|
-
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
|
|
380
|
-
for opt.weights in weights:
|
|
381
|
-
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
|
|
382
|
-
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
|
|
383
|
-
for opt.imgsz in x: # img-size
|
|
384
|
-
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
|
|
385
|
-
r, _, t = run(**vars(opt), plots=False)
|
|
386
|
-
y.append(r + t) # results and times
|
|
387
|
-
np.savetxt(f, y, fmt='%10.4g') # save
|
|
388
|
-
os.system('zip -r study.zip study_*.txt')
|
|
389
|
-
plot_val_study(x=x) # plot
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
if __name__ == "__main__":
|
|
393
|
-
opt = parse_opt()
|
|
394
|
-
main(opt)
|
md_utils/matlab_porting_tools.py
DELETED
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
########
|
|
2
|
-
#
|
|
3
|
-
# matlab_porting_tools.py
|
|
4
|
-
#
|
|
5
|
-
# Module containing a few ported Matlab functions that made it easier
|
|
6
|
-
# for me to port other, larger Matlab functions.
|
|
7
|
-
#
|
|
8
|
-
########
|
|
9
|
-
|
|
10
|
-
#%% Constants and imports
|
|
11
|
-
|
|
12
|
-
import ntpath
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
#%% sec2hms()
|
|
16
|
-
|
|
17
|
-
def sec2hms(tSeconds):
|
|
18
|
-
"""
|
|
19
|
-
function [str,h,m,s] = sec2hms(tSeconds,separator)
|
|
20
|
-
|
|
21
|
-
Convert a time in seconds to a string of the form:
|
|
22
|
-
|
|
23
|
-
1 hour, 2 minutes, 31.4 seconds
|
|
24
|
-
|
|
25
|
-
I prefer using the humanfriendly package for this, but I use this when
|
|
26
|
-
porting from Matlab.
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
# https://stackoverflow.com/questions/775049/python-time-seconds-to-hms
|
|
30
|
-
m, s = divmod(tSeconds, 60)
|
|
31
|
-
h, m = divmod(m, 60)
|
|
32
|
-
|
|
33
|
-
# colonString = '%d:%02d:%02d' % (h, m, s)
|
|
34
|
-
# return (colonString,verboseString)
|
|
35
|
-
|
|
36
|
-
hms = ''
|
|
37
|
-
separator = ', '
|
|
38
|
-
if (h > 0):
|
|
39
|
-
pluralString = ''
|
|
40
|
-
if (h > 1):
|
|
41
|
-
pluralString = 's'
|
|
42
|
-
hms = hms + '%d hour%s%s' % (h,pluralString,separator)
|
|
43
|
-
|
|
44
|
-
if (m > 0):
|
|
45
|
-
pluralString = ''
|
|
46
|
-
if (m > 1):
|
|
47
|
-
pluralString = 's'
|
|
48
|
-
hms = hms + '%d min%s%s' % (m,pluralString,separator)
|
|
49
|
-
|
|
50
|
-
hms = hms + '%3.3fsec' % s
|
|
51
|
-
|
|
52
|
-
return hms
|
|
53
|
-
|
|
54
|
-
#%% Test driver for sec2hms()
|
|
55
|
-
|
|
56
|
-
if False:
|
|
57
|
-
|
|
58
|
-
pass
|
|
59
|
-
|
|
60
|
-
#%%
|
|
61
|
-
|
|
62
|
-
TEST_VALUES = [60033, 30.4, 245234523454.1]
|
|
63
|
-
|
|
64
|
-
for n in TEST_VALUES:
|
|
65
|
-
s = sec2hms(n)
|
|
66
|
-
print('{} - {}'.format(n,s))
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
#%% read_lines_from_file()
|
|
70
|
-
|
|
71
|
-
def read_lines_from_file(filename):
|
|
72
|
-
|
|
73
|
-
with open(filename) as f:
|
|
74
|
-
content = f.readlines()
|
|
75
|
-
|
|
76
|
-
# Remove trailing newlines
|
|
77
|
-
content = [x.rstrip() for x in content]
|
|
78
|
-
|
|
79
|
-
return content
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
#%% write_lines_to_file()
|
|
83
|
-
|
|
84
|
-
def write_lines_to_file(lines, filename):
|
|
85
|
-
|
|
86
|
-
with open(filename,'w') as f:
|
|
87
|
-
for line in lines:
|
|
88
|
-
f.write(line+ '\n')
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
#%% string_ends_with()
|
|
92
|
-
|
|
93
|
-
def string_ends_with(s,query):
|
|
94
|
-
return s.endswith(query)
|
|
95
|
-
|
|
96
|
-
def string_starts_with(s,query):
|
|
97
|
-
return s.startswith(query)
|
|
File without changes
|
|
File without changes
|