megadetector 5.0.21__py3-none-any.whl → 5.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/data_management/cct_json_utils.py +143 -7
- megadetector/data_management/cct_to_md.py +12 -5
- megadetector/data_management/databases/integrity_check_json_db.py +83 -77
- megadetector/data_management/importers/raic_csv_to_md_results.py +416 -0
- megadetector/data_management/importers/zamba_results_to_md_results.py +1 -2
- megadetector/data_management/lila/create_lila_test_set.py +25 -11
- megadetector/data_management/lila/download_lila_subset.py +9 -2
- megadetector/data_management/lila/generate_lila_per_image_labels.py +3 -2
- megadetector/data_management/lila/test_lila_metadata_urls.py +5 -1
- megadetector/data_management/read_exif.py +10 -14
- megadetector/data_management/rename_images.py +1 -1
- megadetector/detection/process_video.py +14 -3
- megadetector/detection/pytorch_detector.py +15 -3
- megadetector/detection/run_detector.py +4 -3
- megadetector/detection/run_detector_batch.py +2 -2
- megadetector/detection/run_inference_with_yolov5_val.py +121 -13
- megadetector/detection/video_utils.py +21 -10
- megadetector/postprocessing/classification_postprocessing.py +1 -1
- megadetector/postprocessing/compare_batch_results.py +931 -142
- megadetector/postprocessing/detector_calibration.py +243 -45
- megadetector/postprocessing/md_to_coco.py +85 -20
- megadetector/postprocessing/postprocess_batch_results.py +0 -1
- megadetector/postprocessing/validate_batch_results.py +65 -15
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -12
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +1 -1
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +3 -1
- megadetector/utils/ct_utils.py +71 -14
- megadetector/utils/md_tests.py +9 -1
- megadetector/utils/path_utils.py +14 -7
- megadetector/utils/process_utils.py +9 -3
- megadetector/utils/write_html_image_list.py +5 -1
- megadetector/visualization/visualization_utils.py +211 -87
- {megadetector-5.0.21.dist-info → megadetector-5.0.23.dist-info}/METADATA +19 -18
- {megadetector-5.0.21.dist-info → megadetector-5.0.23.dist-info}/RECORD +37 -36
- {megadetector-5.0.21.dist-info → megadetector-5.0.23.dist-info}/WHEEL +1 -1
- {megadetector-5.0.21.dist-info → megadetector-5.0.23.dist-info}/LICENSE +0 -0
- {megadetector-5.0.21.dist-info → megadetector-5.0.23.dist-info}/top_level.txt +0 -0
|
@@ -10,6 +10,7 @@ versions of MegaDetector.
|
|
|
10
10
|
#%% Constants and imports
|
|
11
11
|
|
|
12
12
|
import random
|
|
13
|
+
import copy
|
|
13
14
|
|
|
14
15
|
from tqdm import tqdm
|
|
15
16
|
from enum import IntEnum
|
|
@@ -21,7 +22,7 @@ import matplotlib.pyplot as plt
|
|
|
21
22
|
|
|
22
23
|
from megadetector.postprocessing.validate_batch_results import \
|
|
23
24
|
validate_batch_results, ValidateBatchResultsOptions
|
|
24
|
-
from megadetector.utils.ct_utils import get_iou
|
|
25
|
+
from megadetector.utils.ct_utils import get_iou, max_none, is_iterable
|
|
25
26
|
|
|
26
27
|
|
|
27
28
|
#%% Classes
|
|
@@ -36,7 +37,7 @@ class CalibrationOptions:
|
|
|
36
37
|
#: IoU threshold used for determining whether two detections are the same
|
|
37
38
|
#:
|
|
38
39
|
#: When multiple detections match, we will only use the highest-matching IoU.
|
|
39
|
-
self.iou_threshold = 0.
|
|
40
|
+
self.iou_threshold = 0.6
|
|
40
41
|
|
|
41
42
|
#: Minimum confidence threshold to consider for calibration (should be lower than
|
|
42
43
|
#: the lowest value you would use in realistic situations)
|
|
@@ -52,9 +53,18 @@ class CalibrationOptions:
|
|
|
52
53
|
self.model_name_b = 'model_b'
|
|
53
54
|
|
|
54
55
|
#: Maximum number of samples to use for plotting or calibration per category,
|
|
55
|
-
#: or None to use all paired values.
|
|
56
|
+
#: or None to use all paired values. If separate_plots_by_category is False,
|
|
57
|
+
#: this is the overall number of points sampled.
|
|
56
58
|
self.max_samples_per_category = None
|
|
57
59
|
|
|
60
|
+
#: Should we make separate plots for each category? Mutually exclusive with
|
|
61
|
+
#: separate_plots_by_correctness.
|
|
62
|
+
self.separate_plots_by_category = True
|
|
63
|
+
|
|
64
|
+
#: Should we make separate plots for TPs/FPs? Mutually exclusive with
|
|
65
|
+
#: separate_plots_by_category.
|
|
66
|
+
self.separate_plots_by_correctness = False
|
|
67
|
+
|
|
58
68
|
#: List of category IDs to use for plotting comparisons, or None to plot
|
|
59
69
|
#: all categories.
|
|
60
70
|
self.categories_to_plot = None
|
|
@@ -62,15 +72,19 @@ class CalibrationOptions:
|
|
|
62
72
|
#: Optionally map category ID to name in plot labels
|
|
63
73
|
self.category_id_to_name = None
|
|
64
74
|
|
|
75
|
+
#: Enable additional debug output
|
|
76
|
+
self.verbose = True
|
|
77
|
+
|
|
65
78
|
# ...class CalibrationOptions
|
|
66
79
|
|
|
67
|
-
class
|
|
80
|
+
class CalibrationMatchColumns(IntEnum):
|
|
68
81
|
|
|
69
82
|
COLUMN_CONF_A = 0
|
|
70
83
|
COLUMN_CONF_B = 1
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
84
|
+
COLUMN_IOU = 2
|
|
85
|
+
COLUMN_I_IMAGE = 3
|
|
86
|
+
COLUMN_CATEGORY_ID = 4
|
|
87
|
+
COLUMN_MATCHES_GT = 5
|
|
74
88
|
|
|
75
89
|
class CalibrationResults:
|
|
76
90
|
"""
|
|
@@ -79,8 +93,12 @@ class CalibrationResults:
|
|
|
79
93
|
|
|
80
94
|
def __init__(self):
|
|
81
95
|
|
|
82
|
-
#: List of tuples: [conf_a, conf_b, iou, i_image, category_id]
|
|
83
|
-
|
|
96
|
+
#: List of tuples: [conf_a, conf_b, iou, i_image, category_id, matches_gt]
|
|
97
|
+
#:
|
|
98
|
+
#: If ground truth is supplied, [matches_gt] is a bool indicating whether either
|
|
99
|
+
#: of the detected boxes matches a ground truth box of the same category. If
|
|
100
|
+
#: ground truth is not supplied, [matches_gt] is None.
|
|
101
|
+
self.calibration_matches = []
|
|
84
102
|
|
|
85
103
|
#: Populated with the data loaded from json_filename_a if options.return_data is True
|
|
86
104
|
self.data_a = None
|
|
@@ -93,7 +111,7 @@ class CalibrationResults:
|
|
|
93
111
|
|
|
94
112
|
#%% Calibration functions
|
|
95
113
|
|
|
96
|
-
def compare_model_confidence_values(json_filename_a,json_filename_b,options=None):
|
|
114
|
+
def compare_model_confidence_values(json_filename_a,json_filename_b,json_filename_gt=None,options=None):
|
|
97
115
|
"""
|
|
98
116
|
Compare confidence values across two .json results files. Compares only detections that
|
|
99
117
|
can be matched by IoU, i.e., does not do anything with detections that only appear in one file.
|
|
@@ -103,6 +121,9 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
103
121
|
should refer to the same images as [json_filename_b]. Can also be a loaded results dict.
|
|
104
122
|
json_filename_b (str or dict): filename containing results from the second model to be compared;
|
|
105
123
|
should refer to the same images as [json_filename_a]. Can also be a loaded results dict.
|
|
124
|
+
json_filename_gt (str or dict, optional): filename containing ground truth; should refer to the
|
|
125
|
+
same images as [json_filename_a] and [json_filename_b]. Can also be a loaded results dict.
|
|
126
|
+
Should be in COCO format.
|
|
106
127
|
options (CalibrationOptions, optional): all the parameters used to control this process, see
|
|
107
128
|
CalibrationOptions for details
|
|
108
129
|
|
|
@@ -132,6 +153,16 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
132
153
|
assert isinstance(json_filename_b,dict)
|
|
133
154
|
results_b = json_filename_b
|
|
134
155
|
|
|
156
|
+
# Load ground truth, if supplied
|
|
157
|
+
gt_data = None
|
|
158
|
+
|
|
159
|
+
if json_filename_gt is not None:
|
|
160
|
+
if isinstance(json_filename_gt,str):
|
|
161
|
+
gt_data = validate_batch_results(json_filename_gt,
|
|
162
|
+
options=validation_options)
|
|
163
|
+
else:
|
|
164
|
+
assert isinstance(json_filename_gt,dict)
|
|
165
|
+
gt_data = json_filename_gt
|
|
135
166
|
|
|
136
167
|
## Make sure these results sets are comparable
|
|
137
168
|
|
|
@@ -148,6 +179,32 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
148
179
|
assert categories_a[k] == categories_b[k], 'Category mismatch'
|
|
149
180
|
|
|
150
181
|
|
|
182
|
+
## Load ground truth if necessary
|
|
183
|
+
|
|
184
|
+
gt_category_name_to_id = None
|
|
185
|
+
gt_image_id_to_annotations = None
|
|
186
|
+
image_filename_to_gt_im = None
|
|
187
|
+
|
|
188
|
+
if gt_data is not None:
|
|
189
|
+
|
|
190
|
+
gt_category_name_to_id = {}
|
|
191
|
+
for c in gt_data['categories']:
|
|
192
|
+
gt_category_name_to_id[c['name']] = c['id']
|
|
193
|
+
|
|
194
|
+
image_filename_to_gt_im = {}
|
|
195
|
+
for im in gt_data['images']:
|
|
196
|
+
assert 'width' in im and 'height' in im, \
|
|
197
|
+
'I can only compare against GT that has "width" and "height" fields'
|
|
198
|
+
image_filename_to_gt_im[im['file_name']] = im
|
|
199
|
+
|
|
200
|
+
assert set(image_filename_to_gt_im.keys()) == set(image_filenames_a), \
|
|
201
|
+
'Ground truth filename list does not match image filename list'
|
|
202
|
+
|
|
203
|
+
gt_image_id_to_annotations = defaultdict(list)
|
|
204
|
+
for ann in gt_data['annotations']:
|
|
205
|
+
gt_image_id_to_annotations[ann['image_id']].append(ann)
|
|
206
|
+
|
|
207
|
+
|
|
151
208
|
## Compare detections
|
|
152
209
|
|
|
153
210
|
image_filename_b_to_im = {}
|
|
@@ -158,10 +215,10 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
158
215
|
n_detections_a_queried = 0
|
|
159
216
|
n_detections_a_matched = 0
|
|
160
217
|
|
|
161
|
-
|
|
218
|
+
calibration_matches = []
|
|
162
219
|
|
|
163
220
|
# For each image
|
|
164
|
-
# im_a = results_a['images'][0]
|
|
221
|
+
# im_a = results_a['images'][0]
|
|
165
222
|
for i_image,im_a in tqdm(enumerate(results_a['images']),total=len(results_a['images'])):
|
|
166
223
|
|
|
167
224
|
fn = im_a['file']
|
|
@@ -172,6 +229,10 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
172
229
|
if 'detections' not in im_b or im_b['detections'] is None:
|
|
173
230
|
continue
|
|
174
231
|
|
|
232
|
+
im_gt = None
|
|
233
|
+
if gt_data is not None:
|
|
234
|
+
im_gt = image_filename_to_gt_im[fn]
|
|
235
|
+
|
|
175
236
|
# For each detection in result set A...
|
|
176
237
|
#
|
|
177
238
|
# det_a = im_a['detections'][0]
|
|
@@ -192,7 +253,8 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
192
253
|
|
|
193
254
|
best_iou = None
|
|
194
255
|
best_iou_conf = None
|
|
195
|
-
|
|
256
|
+
best_bbox_b = None
|
|
257
|
+
|
|
196
258
|
# For each detection in result set B...
|
|
197
259
|
#
|
|
198
260
|
# det_b = im_b['detections'][0]
|
|
@@ -220,24 +282,99 @@ def compare_model_confidence_values(json_filename_a,json_filename_b,options=None
|
|
|
220
282
|
if best_iou is None or iou > best_iou:
|
|
221
283
|
best_iou = iou
|
|
222
284
|
best_iou_conf = conf_b
|
|
285
|
+
best_bbox_b = bbox_b
|
|
223
286
|
|
|
224
287
|
# ...for each detection in im_b
|
|
225
288
|
|
|
289
|
+
# If we found a match between A and B
|
|
226
290
|
if best_iou is not None:
|
|
291
|
+
|
|
227
292
|
n_detections_a_matched += 1
|
|
228
|
-
|
|
229
|
-
|
|
293
|
+
|
|
294
|
+
# Does this pair of matched detections also match a ground truth box?
|
|
295
|
+
matches_gt = None
|
|
230
296
|
|
|
297
|
+
if im_gt is not None:
|
|
298
|
+
|
|
299
|
+
def max_iou_between_detection_and_gt(detection_box,category_name,im_gt,gt_annotations):
|
|
300
|
+
|
|
301
|
+
max_iou = None
|
|
302
|
+
|
|
303
|
+
# Which category ID are we looking for?
|
|
304
|
+
gt_category_id_for_detected_category_name = \
|
|
305
|
+
gt_category_name_to_id[category_name]
|
|
306
|
+
|
|
307
|
+
# For each GT annotation
|
|
308
|
+
#
|
|
309
|
+
# ann = gt_annotations[0]
|
|
310
|
+
for ann in gt_annotations:
|
|
311
|
+
|
|
312
|
+
# Only match against boxes in the same category
|
|
313
|
+
if ann['category_id'] != gt_category_id_for_detected_category_name:
|
|
314
|
+
continue
|
|
315
|
+
if 'bbox' not in ann:
|
|
316
|
+
continue
|
|
317
|
+
|
|
318
|
+
# Normalize this box
|
|
319
|
+
#
|
|
320
|
+
# COCO format: [x,y,width,height]
|
|
321
|
+
# normalized format: [x_min, y_min, width_of_box, height_of_box]
|
|
322
|
+
normalized_gt_box = [ann['bbox'][0]/im_gt['width'],ann['bbox'][1]/im_gt['height'],
|
|
323
|
+
ann['bbox'][2]/im_gt['width'],ann['bbox'][3]/im_gt['height']]
|
|
324
|
+
|
|
325
|
+
iou = get_iou(detection_box, normalized_gt_box)
|
|
326
|
+
if max_iou is None or iou > max_iou:
|
|
327
|
+
max_iou = iou
|
|
328
|
+
|
|
329
|
+
# ...for each gt box
|
|
330
|
+
|
|
331
|
+
return max_iou
|
|
332
|
+
|
|
333
|
+
# ...def min_iou_between_detections_and_gt(...)
|
|
334
|
+
|
|
335
|
+
gt_annotations = gt_image_id_to_annotations[im_gt['id']]
|
|
336
|
+
|
|
337
|
+
# If they matched, the A and B boxes have the same category by definition
|
|
338
|
+
category_name = categories_a[det_a['category']]
|
|
339
|
+
|
|
340
|
+
max_iou_with_bbox_a = max_iou_between_detection_and_gt(bbox_a,category_name,im_gt,gt_annotations)
|
|
341
|
+
max_iou_with_bbox_b = max_iou_between_detection_and_gt(best_bbox_b,category_name,im_gt,gt_annotations)
|
|
342
|
+
|
|
343
|
+
max_iou_with_either_detection_set = max_none(max_iou_with_bbox_a,
|
|
344
|
+
max_iou_with_bbox_b)
|
|
345
|
+
|
|
346
|
+
matches_gt = False
|
|
347
|
+
if (max_iou_with_either_detection_set is not None) and \
|
|
348
|
+
(max_iou_with_either_detection_set >= options.iou_threshold):
|
|
349
|
+
matches_gt = True
|
|
350
|
+
|
|
351
|
+
# ...if we have ground truth
|
|
352
|
+
|
|
353
|
+
conf_result = [conf_a,best_iou_conf,best_iou,i_image,category_id,matches_gt]
|
|
354
|
+
calibration_matches.append(conf_result)
|
|
355
|
+
|
|
356
|
+
# ...if we had a match between A and B
|
|
231
357
|
# ...for each detection in im_a
|
|
232
358
|
|
|
233
359
|
# ...for each image in result set A
|
|
234
360
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
361
|
+
if options.verbose:
|
|
362
|
+
|
|
363
|
+
print('\nOf {} detections in result set A, queried {}, matched {}'.format(
|
|
364
|
+
n_detections_a,n_detections_a_queried,n_detections_a_matched))
|
|
365
|
+
|
|
366
|
+
if gt_data is not None:
|
|
367
|
+
n_matches = 0
|
|
368
|
+
for m in calibration_matches:
|
|
369
|
+
assert m[CalibrationMatchColumns.COLUMN_MATCHES_GT] is not None
|
|
370
|
+
if m[CalibrationMatchColumns.COLUMN_MATCHES_GT]:
|
|
371
|
+
n_matches += 1
|
|
372
|
+
print('{} matches also matched ground truth'.format(n_matches))
|
|
373
|
+
|
|
374
|
+
assert len(calibration_matches) == n_detections_a_matched
|
|
238
375
|
|
|
239
376
|
calibration_results = CalibrationResults()
|
|
240
|
-
calibration_results.
|
|
377
|
+
calibration_results.calibration_matches = calibration_matches
|
|
241
378
|
|
|
242
379
|
if options.return_data:
|
|
243
380
|
calibration_results.data_a = results_a
|
|
@@ -272,31 +409,87 @@ def plot_matched_confidence_values(calibration_results,output_filename,options=N
|
|
|
272
409
|
if options is None:
|
|
273
410
|
options = CalibrationOptions()
|
|
274
411
|
|
|
275
|
-
|
|
276
|
-
|
|
412
|
+
assert not (options.separate_plots_by_category and \
|
|
413
|
+
options.separate_plots_by_correctness), \
|
|
414
|
+
'separate_plots_by_category and separate_plots_by_correctness are mutually exclusive'
|
|
277
415
|
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
category_id = m[ConfidenceMatchColumns.COLUMN_CONF_CATEGORY_ID]
|
|
281
|
-
category_to_matches[category_id].append(m)
|
|
416
|
+
category_id_to_name = None
|
|
417
|
+
category_to_samples = None
|
|
282
418
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
419
|
+
calibration_matches = calibration_results.calibration_matches
|
|
420
|
+
|
|
421
|
+
# If we're just lumping everything into one plot
|
|
422
|
+
if (not options.separate_plots_by_category) and (not options.separate_plots_by_correctness):
|
|
287
423
|
|
|
288
|
-
|
|
424
|
+
category_id_to_name = {'0':'all_categories'}
|
|
425
|
+
category_to_samples = {'0': []}
|
|
289
426
|
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
427
|
+
# Make everything category "0" (arbitrary)
|
|
428
|
+
calibration_matches = copy.deepcopy(calibration_matches)
|
|
429
|
+
for m in calibration_matches:
|
|
430
|
+
m[CalibrationMatchColumns.COLUMN_CATEGORY_ID] = '0'
|
|
431
|
+
if (options.max_samples_per_category is not None) and \
|
|
432
|
+
(len(calibration_matches) > options.max_samples_per_category):
|
|
433
|
+
calibration_matches = \
|
|
434
|
+
random.sample(calibration_matches,options.max_samples_per_category)
|
|
435
|
+
category_to_samples['0'] = calibration_matches
|
|
436
|
+
|
|
437
|
+
# If we're separating into lines for FPs and TPs (but not separating by category)
|
|
438
|
+
elif options.separate_plots_by_correctness:
|
|
439
|
+
|
|
440
|
+
assert not options.separate_plots_by_category
|
|
441
|
+
|
|
442
|
+
category_id_tp = '0'
|
|
443
|
+
category_id_fp = '1'
|
|
444
|
+
|
|
445
|
+
category_id_to_name = {category_id_tp:'TP', category_id_fp:'FP'}
|
|
446
|
+
category_to_samples = {category_id_tp: [], category_id_fp: []}
|
|
447
|
+
|
|
448
|
+
for m in calibration_matches:
|
|
449
|
+
assert m[CalibrationMatchColumns.COLUMN_MATCHES_GT] is not None, \
|
|
450
|
+
"Can't plot by correctness when GT status is not available for every match"
|
|
451
|
+
if m[CalibrationMatchColumns.COLUMN_MATCHES_GT]:
|
|
452
|
+
category_to_samples[category_id_tp].append(m)
|
|
453
|
+
else:
|
|
454
|
+
category_to_samples[category_id_fp].append(m)
|
|
455
|
+
|
|
456
|
+
# If we're separating by category
|
|
457
|
+
else:
|
|
458
|
+
|
|
459
|
+
assert options.separate_plots_by_category
|
|
296
460
|
|
|
297
|
-
|
|
298
|
-
|
|
461
|
+
category_to_samples = defaultdict(list)
|
|
462
|
+
|
|
463
|
+
category_to_matches = defaultdict(list)
|
|
464
|
+
for m in calibration_matches:
|
|
465
|
+
category_id = m[CalibrationMatchColumns.COLUMN_CATEGORY_ID]
|
|
466
|
+
category_to_matches[category_id].append(m)
|
|
467
|
+
|
|
468
|
+
category_id_to_name = None
|
|
469
|
+
if options.category_id_to_name is not None:
|
|
470
|
+
category_id_to_name = options.category_id_to_name
|
|
471
|
+
|
|
472
|
+
for i_category,category_id in enumerate(category_to_matches.keys()):
|
|
473
|
+
|
|
474
|
+
matches_this_category = category_to_matches[category_id]
|
|
475
|
+
|
|
476
|
+
if (options.max_samples_per_category is None) or \
|
|
477
|
+
(len(matches_this_category) <= options.max_samples_per_category):
|
|
478
|
+
category_to_samples[category_id] = matches_this_category
|
|
479
|
+
else:
|
|
480
|
+
assert len(matches_this_category) > options.max_samples_per_category
|
|
481
|
+
category_to_samples[category_id] = random.sample(matches_this_category,options.max_samples_per_category)
|
|
482
|
+
|
|
483
|
+
del category_to_matches
|
|
484
|
+
|
|
485
|
+
del calibration_matches
|
|
299
486
|
|
|
487
|
+
if options.verbose:
|
|
488
|
+
n_samples_for_histogram = 0
|
|
489
|
+
for c in category_to_samples:
|
|
490
|
+
n_samples_for_histogram += len(category_to_samples[c])
|
|
491
|
+
print('Creating a histogram based on {} samples'.format(n_samples_for_histogram))
|
|
492
|
+
|
|
300
493
|
categories_to_plot = list(category_to_samples.keys())
|
|
301
494
|
|
|
302
495
|
if options.categories_to_plot is not None:
|
|
@@ -311,16 +504,19 @@ def plot_matched_confidence_values(calibration_results,output_filename,options=N
|
|
|
311
504
|
# fig,axes = plt.subplots(nrows=n_subplots,ncols=1)
|
|
312
505
|
|
|
313
506
|
axes = fig.subplots(n_subplots, 1)
|
|
507
|
+
|
|
508
|
+
if not is_iterable(axes):
|
|
509
|
+
assert n_subplots == 1
|
|
510
|
+
axes = [axes]
|
|
314
511
|
|
|
315
512
|
# i_category = 0; category_id = categories_to_plot[i_category]
|
|
316
513
|
for i_category,category_id in enumerate(categories_to_plot):
|
|
317
514
|
|
|
318
515
|
ax = axes[i_category]
|
|
319
516
|
|
|
320
|
-
category_string = category_id
|
|
321
|
-
if
|
|
322
|
-
|
|
323
|
-
category_string = options.category_id_to_name[category_id]
|
|
517
|
+
category_string = str(category_id)
|
|
518
|
+
if (category_id_to_name is not None) and (category_id in category_id_to_name):
|
|
519
|
+
category_string = category_id_to_name[category_id]
|
|
324
520
|
|
|
325
521
|
samples_this_category = category_to_samples[category_id]
|
|
326
522
|
x = [m[0] for m in samples_this_category]
|
|
@@ -328,8 +524,11 @@ def plot_matched_confidence_values(calibration_results,output_filename,options=N
|
|
|
328
524
|
|
|
329
525
|
weights_a = np.ones_like(x)/float(len(x))
|
|
330
526
|
weights_b = np.ones_like(y)/float(len(y))
|
|
331
|
-
|
|
332
|
-
|
|
527
|
+
|
|
528
|
+
# Plot the first lie a little thicker so the second line will always show up
|
|
529
|
+
ax.hist(x,histtype='step',bins=n_hist_bins,density=False,color='red',weights=weights_a,linewidth=3.0)
|
|
530
|
+
ax.hist(y,histtype='step',bins=n_hist_bins,density=False,color='blue',weights=weights_b,linewidth=1.5)
|
|
531
|
+
|
|
333
532
|
ax.legend([options.model_name_a,options.model_name_b])
|
|
334
533
|
ax.set_ylabel(category_string)
|
|
335
534
|
# plt.tight_layout()
|
|
@@ -364,4 +563,3 @@ if False:
|
|
|
364
563
|
options.relative_path_base = r'g:\temp\test-videos'
|
|
365
564
|
validate_batch_results(json_filename,options)
|
|
366
565
|
|
|
367
|
-
|
|
@@ -3,7 +3,8 @@
|
|
|
3
3
|
md_to_coco.py
|
|
4
4
|
|
|
5
5
|
"Converts" MegaDetector output files to COCO format. "Converts" is in quotes because
|
|
6
|
-
this is an opinionated transformation that requires a confidence threshold
|
|
6
|
+
this is an opinionated transformation that requires a confidence threshold for most
|
|
7
|
+
applications.
|
|
7
8
|
|
|
8
9
|
Does not currently handle classification information.
|
|
9
10
|
|
|
@@ -18,6 +19,7 @@ import uuid
|
|
|
18
19
|
from tqdm import tqdm
|
|
19
20
|
|
|
20
21
|
from megadetector.visualization import visualization_utils as vis_utils
|
|
22
|
+
from megadetector.utils.path_utils import insert_before_extension
|
|
21
23
|
|
|
22
24
|
default_confidence_threshold = 0.15
|
|
23
25
|
|
|
@@ -33,24 +35,29 @@ def md_to_coco(md_results_file,
|
|
|
33
35
|
preserve_nonstandard_metadata=True,
|
|
34
36
|
include_failed_images=True,
|
|
35
37
|
include_annotations_without_bounding_boxes=True,
|
|
36
|
-
empty_category_id='0'
|
|
38
|
+
empty_category_id='0',
|
|
39
|
+
overwrite_behavior='skip',
|
|
40
|
+
verbose=True,
|
|
41
|
+
image_filename_to_size=None):
|
|
37
42
|
"""
|
|
38
43
|
"Converts" MegaDetector output files to COCO format. "Converts" is in quotes because
|
|
39
|
-
this is an opinionated transformation that requires a confidence threshold.
|
|
44
|
+
this is an opinionated transformation that typically requires a confidence threshold.
|
|
40
45
|
|
|
41
46
|
The default confidence threshold is not 0; the assumption is that by default, you are
|
|
42
47
|
going to treat the resulting COCO file as a set of labels. If you are using the resulting COCO
|
|
43
|
-
file to evaluate a detector,
|
|
44
|
-
values will be written to the semi-standard "score"
|
|
45
|
-
preserve_nonstandard_metadata is True.
|
|
48
|
+
file to *evaluate* a detector, rather than as a set of labels, you likely want a
|
|
49
|
+
confidence threshold of 0. Confidence values will be written to the semi-standard "score"
|
|
50
|
+
field for each image (regardless of the threshold) if preserve_nonstandard_metadata is True.
|
|
46
51
|
|
|
47
52
|
A folder of images is required if width and height information are not available
|
|
48
53
|
in the MD results file.
|
|
49
54
|
|
|
50
55
|
Args:
|
|
51
|
-
md_results_file (str): MD results .json file to convert to COCO
|
|
56
|
+
md_results_file (str): MD results .json file to convert to COCO
|
|
57
|
+
format
|
|
52
58
|
coco_output_file (str, optional): COCO .json file to write; if this is None, we'll return
|
|
53
|
-
a COCO-formatted dict, but won't write it to disk
|
|
59
|
+
a COCO-formatted dict, but won't write it to disk. If this is 'auto', we'll write to
|
|
60
|
+
[md_results_file_without_extension].coco.json.
|
|
54
61
|
image_folder (str, optional): folder of images, required if 'width' and 'height' are not
|
|
55
62
|
present in the MD results file (they are not required by the format)
|
|
56
63
|
confidence_threshold (float, optional): boxes below this confidence threshold will not be
|
|
@@ -60,8 +67,8 @@ def md_to_coco(md_results_file,
|
|
|
60
67
|
info (dict, optional): arbitrary metadata to include in an "info" field in the COCO-formatted
|
|
61
68
|
output
|
|
62
69
|
preserve_nonstandard_metadata (bool, optional): if this is True, confidence will be preserved in a
|
|
63
|
-
non-standard "
|
|
64
|
-
(e.g. EXIF metadata) will be propagated to COCO output
|
|
70
|
+
non-standard "score" field in each annotation, and any random fields present in each image's
|
|
71
|
+
data (e.g. EXIF metadata) will be propagated to COCO output
|
|
65
72
|
include_failed_images (bool, optional): if this is True, failed images will be propagated to COCO output
|
|
66
73
|
with a non-empty "failure" field and no other fields, otherwise failed images will be skipped.
|
|
67
74
|
include_annotations_without_bounding_boxes (bool, optional): if this is True, annotations with
|
|
@@ -69,22 +76,62 @@ def md_to_coco(md_results_file,
|
|
|
69
76
|
images will be represented with no annotations.
|
|
70
77
|
empty_category_id (str, optional): category ID reserved for the 'empty' class, should not be
|
|
71
78
|
attached to any bounding boxes
|
|
79
|
+
overwrite_behavior (str, optional): determines behavior if the output file exists ('skip' to skip conversion,
|
|
80
|
+
'overwrite' to overwrite the existing file, 'error' to raise an error, 'skip_if_valid' to skip conversion
|
|
81
|
+
if the .json file appears to be intact (does not verify COCO formatting, just intact-.json-ness))
|
|
82
|
+
verbose (bool, optional): enable debug output, including the progress bar,
|
|
83
|
+
image_filename_to_size (dict, optional): dictionary mapping relative image paths to (w,h) tuples. Reading
|
|
84
|
+
image sizes is the slowest step, so if you need to convert many results files at once for the same
|
|
85
|
+
set of images, things will be gobs faster if you read the image sizes in advance and pass them in
|
|
86
|
+
via this argument. The format used here is the same format output by parallel_get_image_sizes().
|
|
72
87
|
|
|
73
88
|
Returns:
|
|
74
89
|
dict: the COCO data dict, identical to what's written to [coco_output_file] if [coco_output_file]
|
|
75
90
|
is not None.
|
|
76
91
|
"""
|
|
92
|
+
|
|
93
|
+
assert isinstance(md_results_file,str)
|
|
94
|
+
assert os.path.isfile(md_results_file), \
|
|
95
|
+
'MD results file {} does not exist'.format(md_results_file)
|
|
77
96
|
|
|
97
|
+
if coco_output_file == 'auto':
|
|
98
|
+
coco_output_file = insert_before_extension(md_results_file,'coco')
|
|
99
|
+
|
|
100
|
+
if coco_output_file is not None:
|
|
101
|
+
if os.path.isfile(coco_output_file):
|
|
102
|
+
if overwrite_behavior == 'skip':
|
|
103
|
+
print('Skipping conversion of {}, output file {} exists'.format(
|
|
104
|
+
md_results_file,coco_output_file))
|
|
105
|
+
return None
|
|
106
|
+
elif overwrite_behavior == 'skip_if_valid':
|
|
107
|
+
output_file_is_valid = True
|
|
108
|
+
try:
|
|
109
|
+
with open(coco_output_file,'r') as f:
|
|
110
|
+
_ = json.load(f)
|
|
111
|
+
except Exception:
|
|
112
|
+
print('COCO file {} is invalid, proceeding with conversion'.format(
|
|
113
|
+
coco_output_file))
|
|
114
|
+
output_file_is_valid = False
|
|
115
|
+
if output_file_is_valid:
|
|
116
|
+
print('Skipping conversion of {}, output file {} exists and is valid'.format(
|
|
117
|
+
md_results_file,coco_output_file))
|
|
118
|
+
return None
|
|
119
|
+
elif overwrite_behavior == 'overwrite':
|
|
120
|
+
pass
|
|
121
|
+
elif overwrite_behavior == 'error':
|
|
122
|
+
raise ValueError('Output file {} exists'.format(coco_output_file))
|
|
123
|
+
|
|
78
124
|
with open(md_results_file,'r') as f:
|
|
79
125
|
md_results = json.load(f)
|
|
80
126
|
|
|
81
127
|
coco_images = []
|
|
82
128
|
coco_annotations = []
|
|
83
129
|
|
|
84
|
-
print('Converting MD results to COCO...'
|
|
130
|
+
print('Converting MD results file {} to COCO file {}...'.format(
|
|
131
|
+
md_results_file, coco_output_file))
|
|
85
132
|
|
|
86
133
|
# im = md_results['images'][0]
|
|
87
|
-
for im in tqdm(md_results['images']):
|
|
134
|
+
for im in tqdm(md_results['images'],disable=(not verbose)):
|
|
88
135
|
|
|
89
136
|
coco_im = {}
|
|
90
137
|
coco_im['id'] = im['file']
|
|
@@ -102,18 +149,36 @@ def md_to_coco(md_results_file,
|
|
|
102
149
|
h = None
|
|
103
150
|
|
|
104
151
|
if ('width' not in im) or ('height' not in im) or validate_image_sizes:
|
|
105
|
-
if image_folder is None:
|
|
106
|
-
raise ValueError('Must provide an image folder when height/width need to be read from images')
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
152
|
+
if (image_folder is None) and (image_filename_to_size is None):
|
|
153
|
+
raise ValueError('Must provide an image folder or a size mapping when height/width need to be read from images')
|
|
154
|
+
|
|
155
|
+
w = None; h = None
|
|
156
|
+
|
|
157
|
+
if image_filename_to_size is not None:
|
|
158
|
+
|
|
159
|
+
if im['file'] not in image_filename_to_size:
|
|
160
|
+
print('Warning: file {} not in image size mapping dict, reading from file'.format(im['file']))
|
|
161
|
+
else:
|
|
162
|
+
image_size = image_filename_to_size[im['file']]
|
|
163
|
+
if image_size is not None:
|
|
164
|
+
assert len(image_size) == 2
|
|
165
|
+
w = image_size[0]
|
|
166
|
+
h = image_size[1]
|
|
167
|
+
|
|
168
|
+
if w is None:
|
|
169
|
+
|
|
170
|
+
image_file_abs = os.path.join(image_folder,im['file'])
|
|
171
|
+
pil_im = vis_utils.open_image(image_file_abs)
|
|
172
|
+
w = pil_im.width
|
|
173
|
+
h = pil_im.height
|
|
174
|
+
|
|
111
175
|
if validate_image_sizes:
|
|
112
176
|
if 'width' in im:
|
|
113
177
|
assert im['width'] == w, 'Width mismatch for image {}'.format(im['file'])
|
|
114
178
|
if 'height' in im:
|
|
115
179
|
assert im['height'] == h, 'Height mismatch for image {}'.format(im['file'])
|
|
116
180
|
else:
|
|
181
|
+
|
|
117
182
|
w = im['width']
|
|
118
183
|
h = im['height']
|
|
119
184
|
|
|
@@ -203,9 +268,9 @@ def md_to_coco(md_results_file,
|
|
|
203
268
|
with open(coco_output_file,'w') as f:
|
|
204
269
|
json.dump(output_dict,f,indent=1)
|
|
205
270
|
|
|
206
|
-
return output_dict
|
|
271
|
+
return output_dict
|
|
207
272
|
|
|
208
|
-
# def md_to_coco(...)
|
|
273
|
+
# ...def md_to_coco(...)
|
|
209
274
|
|
|
210
275
|
|
|
211
276
|
#%% Interactive driver
|