megadetector 5.0.6__py3-none-any.whl → 5.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/batch_processing/data_preparation/manage_local_batch.py +297 -202
- api/batch_processing/data_preparation/manage_video_batch.py +7 -2
- api/batch_processing/postprocessing/add_max_conf.py +1 -0
- api/batch_processing/postprocessing/combine_api_outputs.py +2 -2
- api/batch_processing/postprocessing/compare_batch_results.py +111 -61
- api/batch_processing/postprocessing/convert_output_format.py +24 -6
- api/batch_processing/postprocessing/load_api_results.py +56 -72
- api/batch_processing/postprocessing/md_to_labelme.py +119 -51
- api/batch_processing/postprocessing/merge_detections.py +30 -5
- api/batch_processing/postprocessing/postprocess_batch_results.py +175 -55
- api/batch_processing/postprocessing/remap_detection_categories.py +163 -0
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +628 -0
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +71 -23
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +1 -1
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +224 -76
- api/batch_processing/postprocessing/subset_json_detector_output.py +132 -5
- api/batch_processing/postprocessing/top_folders_to_bottom.py +1 -1
- classification/prepare_classification_script.py +191 -191
- data_management/cct_json_utils.py +7 -2
- data_management/coco_to_labelme.py +263 -0
- data_management/coco_to_yolo.py +72 -48
- data_management/databases/integrity_check_json_db.py +75 -64
- data_management/databases/subset_json_db.py +1 -1
- data_management/generate_crops_from_cct.py +1 -1
- data_management/get_image_sizes.py +44 -26
- data_management/importers/animl_results_to_md_results.py +3 -5
- data_management/importers/noaa_seals_2019.py +2 -2
- data_management/importers/zamba_results_to_md_results.py +2 -2
- data_management/labelme_to_coco.py +264 -127
- data_management/labelme_to_yolo.py +96 -53
- data_management/lila/create_lila_blank_set.py +557 -0
- data_management/lila/create_lila_test_set.py +2 -1
- data_management/lila/create_links_to_md_results_files.py +1 -1
- data_management/lila/download_lila_subset.py +138 -45
- data_management/lila/generate_lila_per_image_labels.py +23 -14
- data_management/lila/get_lila_annotation_counts.py +16 -10
- data_management/lila/lila_common.py +15 -42
- data_management/lila/test_lila_metadata_urls.py +116 -0
- data_management/read_exif.py +65 -16
- data_management/remap_coco_categories.py +84 -0
- data_management/resize_coco_dataset.py +14 -31
- data_management/wi_download_csv_to_coco.py +239 -0
- data_management/yolo_output_to_md_output.py +40 -13
- data_management/yolo_to_coco.py +313 -100
- detection/process_video.py +36 -14
- detection/pytorch_detector.py +1 -1
- detection/run_detector.py +73 -18
- detection/run_detector_batch.py +116 -27
- detection/run_inference_with_yolov5_val.py +135 -27
- detection/run_tiled_inference.py +153 -43
- detection/tf_detector.py +2 -1
- detection/video_utils.py +4 -2
- md_utils/ct_utils.py +101 -6
- md_utils/md_tests.py +264 -17
- md_utils/path_utils.py +326 -47
- md_utils/process_utils.py +26 -7
- md_utils/split_locations_into_train_val.py +215 -0
- md_utils/string_utils.py +10 -0
- md_utils/url_utils.py +66 -3
- md_utils/write_html_image_list.py +12 -2
- md_visualization/visualization_utils.py +380 -74
- md_visualization/visualize_db.py +41 -10
- md_visualization/visualize_detector_output.py +185 -104
- {megadetector-5.0.6.dist-info → megadetector-5.0.8.dist-info}/METADATA +11 -13
- {megadetector-5.0.6.dist-info → megadetector-5.0.8.dist-info}/RECORD +74 -67
- {megadetector-5.0.6.dist-info → megadetector-5.0.8.dist-info}/WHEEL +1 -1
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +1 -1
- taxonomy_mapping/map_new_lila_datasets.py +43 -39
- taxonomy_mapping/prepare_lila_taxonomy_release.py +5 -2
- taxonomy_mapping/preview_lila_taxonomy.py +27 -27
- taxonomy_mapping/species_lookup.py +33 -13
- taxonomy_mapping/taxonomy_csv_checker.py +7 -5
- md_visualization/visualize_megadb.py +0 -183
- {megadetector-5.0.6.dist-info → megadetector-5.0.8.dist-info}/LICENSE +0 -0
- {megadetector-5.0.6.dist-info → megadetector-5.0.8.dist-info}/top_level.txt +0 -0
|
@@ -249,8 +249,12 @@ if False:
|
|
|
249
249
|
import os
|
|
250
250
|
import nbformat as nbf
|
|
251
251
|
|
|
252
|
-
|
|
253
|
-
'
|
|
252
|
+
if os.name == 'nt':
|
|
253
|
+
git_base = r'c:\git'
|
|
254
|
+
else:
|
|
255
|
+
git_base = os.path.expanduer('~/git')
|
|
256
|
+
|
|
257
|
+
input_py_file = git_base + '/MegaDetector/api/batch_processing/data_preparation/manage_video_batch.py'
|
|
254
258
|
assert os.path.isfile(input_py_file)
|
|
255
259
|
output_ipynb_file = input_py_file.replace('.py','.ipynb')
|
|
256
260
|
|
|
@@ -320,3 +324,4 @@ while(True):
|
|
|
320
324
|
write_code_cell(current_cell)
|
|
321
325
|
|
|
322
326
|
nbf.write(nb,output_ipynb_file)
|
|
327
|
+
|
|
@@ -48,7 +48,7 @@ def combine_api_output_files(input_files: List[str],
|
|
|
48
48
|
input_files: list of str, paths to JSON detection files
|
|
49
49
|
output_file: optional str, path to write merged JSON
|
|
50
50
|
require_uniqueness: bool, whether to require that the images in
|
|
51
|
-
each
|
|
51
|
+
each list of images be unique
|
|
52
52
|
"""
|
|
53
53
|
|
|
54
54
|
def print_if_verbose(s):
|
|
@@ -84,7 +84,7 @@ def combine_api_output_dictionaries(input_dicts: Iterable[Mapping[str, Any]],
|
|
|
84
84
|
input_dicts: list of dicts, each dict is the JSON of the detections
|
|
85
85
|
output file from the Batch Processing API
|
|
86
86
|
require_uniqueness: bool, whether to require that the images in
|
|
87
|
-
each
|
|
87
|
+
each input dict be unique
|
|
88
88
|
|
|
89
89
|
Returns: dict, represents the merged JSON
|
|
90
90
|
"""
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
#
|
|
5
5
|
# Compare sets of batch results; typically used to compare:
|
|
6
6
|
#
|
|
7
|
-
# * MegaDetector versions
|
|
7
|
+
# * Results from different MegaDetector versions
|
|
8
8
|
# * Results before/after RDE
|
|
9
9
|
# * Results with/without augmentation
|
|
10
10
|
#
|
|
@@ -36,9 +36,6 @@ from md_utils import path_utils
|
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
#%% Constants and support classes
|
|
39
|
-
|
|
40
|
-
# We will confirm that this matches what we load from each file
|
|
41
|
-
default_detection_categories = {'1': 'animal', '2': 'person', '3': 'vehicle'}
|
|
42
39
|
|
|
43
40
|
class PairwiseBatchComparisonOptions:
|
|
44
41
|
"""
|
|
@@ -52,8 +49,8 @@ class PairwiseBatchComparisonOptions:
|
|
|
52
49
|
results_description_a = None
|
|
53
50
|
results_description_b = None
|
|
54
51
|
|
|
55
|
-
detection_thresholds_a = {'animal':0.15,'person':0.15,'vehicle':0.15}
|
|
56
|
-
detection_thresholds_b = {'animal':0.15,'person':0.15,'vehicle':0.15}
|
|
52
|
+
detection_thresholds_a = {'animal':0.15,'person':0.15,'vehicle':0.15,'default':0.15}
|
|
53
|
+
detection_thresholds_b = {'animal':0.15,'person':0.15,'vehicle':0.15,'default':0.15}
|
|
57
54
|
|
|
58
55
|
rendering_confidence_threshold_a = 0.1
|
|
59
56
|
rendering_confidence_threshold_b = 0.1
|
|
@@ -71,16 +68,26 @@ class BatchComparisonOptions:
|
|
|
71
68
|
job_name = ''
|
|
72
69
|
|
|
73
70
|
max_images_per_category = 1000
|
|
71
|
+
max_images_per_page = None
|
|
74
72
|
colormap_a = ['Red']
|
|
75
73
|
colormap_b = ['RoyalBlue']
|
|
76
74
|
|
|
77
75
|
# Process-based parallelization isn't supported yet; this must be "True"
|
|
78
76
|
parallelize_rendering_with_threads = True
|
|
79
77
|
|
|
78
|
+
# List of filenames to include in the comparison, or None to use all files
|
|
79
|
+
filenames_to_include = None
|
|
80
|
+
|
|
81
|
+
# Compare only detections/non-detections, ignore categories (still renders categories)
|
|
82
|
+
class_agnostic_comparison = False
|
|
83
|
+
|
|
80
84
|
target_width = 800
|
|
81
85
|
n_rendering_workers = 20
|
|
82
86
|
random_seed = 0
|
|
83
87
|
|
|
88
|
+
# Default to sorting by filename
|
|
89
|
+
sort_by_confidence = False
|
|
90
|
+
|
|
84
91
|
error_on_non_matching_lists = True
|
|
85
92
|
|
|
86
93
|
pairwise_options = []
|
|
@@ -90,7 +97,7 @@ class BatchComparisonOptions:
|
|
|
90
97
|
|
|
91
98
|
class PairwiseBatchComparisonResults:
|
|
92
99
|
"""
|
|
93
|
-
The results from a single pairwise comparison
|
|
100
|
+
The results from a single pairwise comparison.
|
|
94
101
|
"""
|
|
95
102
|
|
|
96
103
|
html_content = None
|
|
@@ -98,7 +105,7 @@ class PairwiseBatchComparisonResults:
|
|
|
98
105
|
|
|
99
106
|
# A dictionary with keys including:
|
|
100
107
|
#
|
|
101
|
-
#
|
|
108
|
+
# common_detections
|
|
102
109
|
# common_non_detections
|
|
103
110
|
# detections_a_only
|
|
104
111
|
# detections_b_only
|
|
@@ -207,7 +214,8 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
207
214
|
# in the options object.
|
|
208
215
|
assert options.pairwise_options is None
|
|
209
216
|
|
|
210
|
-
|
|
217
|
+
if options.random_seed is not None:
|
|
218
|
+
random.seed(options.random_seed)
|
|
211
219
|
|
|
212
220
|
# Warn the user if some "detections" might not get rendered
|
|
213
221
|
max_classification_threshold_a = max(list(pairwise_options.detection_thresholds_a.values()))
|
|
@@ -241,10 +249,20 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
241
249
|
with open(pairwise_options.results_filename_b,'r') as f:
|
|
242
250
|
results_b = json.load(f)
|
|
243
251
|
|
|
244
|
-
#
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
252
|
+
# Don't let path separators confuse things
|
|
253
|
+
for im in results_a['images']:
|
|
254
|
+
if 'file' in im:
|
|
255
|
+
im['file'] = im['file'].replace('\\','/')
|
|
256
|
+
for im in results_b['images']:
|
|
257
|
+
if 'file' in im:
|
|
258
|
+
im['file'] = im['file'].replace('\\','/')
|
|
259
|
+
|
|
260
|
+
if not options.class_agnostic_comparison:
|
|
261
|
+
assert results_a['detection_categories'] == results_b['detection_categories'], \
|
|
262
|
+
"Cannot perform a class-sensitive comparison across results with different categories"
|
|
263
|
+
|
|
264
|
+
detection_categories_a = results_a['detection_categories']
|
|
265
|
+
detection_categories_b = results_b['detection_categories']
|
|
248
266
|
|
|
249
267
|
if pairwise_options.results_description_a is None:
|
|
250
268
|
if 'detector' not in results_a['info']:
|
|
@@ -273,7 +291,7 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
273
291
|
filenames_b_set = set([im['file'] for im in images_b])
|
|
274
292
|
|
|
275
293
|
if len(images_a) != len(images_b):
|
|
276
|
-
s = 'set A has {}
|
|
294
|
+
s = 'set A has {} images, set B has {}'.format(len(images_a),len(images_b))
|
|
277
295
|
if options.error_on_non_matching_lists:
|
|
278
296
|
raise ValueError(s)
|
|
279
297
|
else:
|
|
@@ -286,6 +304,10 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
286
304
|
assert len(filenames_a) == len(images_a)
|
|
287
305
|
assert len(filenames_b_set) == len(images_b)
|
|
288
306
|
|
|
307
|
+
if options.filenames_to_include is None:
|
|
308
|
+
filenames_to_compare = filenames_a
|
|
309
|
+
else:
|
|
310
|
+
filenames_to_compare = options.filenames_to_include
|
|
289
311
|
|
|
290
312
|
##%% Find differences
|
|
291
313
|
|
|
@@ -298,9 +320,9 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
298
320
|
detections_a_only = {}
|
|
299
321
|
detections_b_only = {}
|
|
300
322
|
class_transitions = {}
|
|
301
|
-
|
|
302
|
-
# fn =
|
|
303
|
-
for fn in tqdm(
|
|
323
|
+
|
|
324
|
+
# fn = filenames_to_compare[0]
|
|
325
|
+
for fn in tqdm(filenames_to_compare):
|
|
304
326
|
|
|
305
327
|
if fn not in filename_to_image_b:
|
|
306
328
|
|
|
@@ -330,14 +352,19 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
330
352
|
|
|
331
353
|
category_id = det['category']
|
|
332
354
|
|
|
333
|
-
if category_id not in
|
|
355
|
+
if category_id not in detection_categories_a:
|
|
334
356
|
print('Warning: unexpected category {} for model A on file {}'.format(category_id,fn))
|
|
335
357
|
invalid_category_error = True
|
|
336
358
|
break
|
|
337
359
|
|
|
338
360
|
conf = det['conf']
|
|
339
361
|
|
|
340
|
-
if
|
|
362
|
+
if detection_categories_a[category_id] in pairwise_options.detection_thresholds_a:
|
|
363
|
+
conf_thresh = pairwise_options.detection_thresholds_a[detection_categories_a[category_id]]
|
|
364
|
+
else:
|
|
365
|
+
conf_thresh = pairwise_options.detection_thresholds_a['default']
|
|
366
|
+
|
|
367
|
+
if conf >= conf_thresh:
|
|
341
368
|
categories_above_threshold_a.add(category_id)
|
|
342
369
|
|
|
343
370
|
if invalid_category_error:
|
|
@@ -349,14 +376,19 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
349
376
|
|
|
350
377
|
category_id = det['category']
|
|
351
378
|
|
|
352
|
-
if category_id not in
|
|
379
|
+
if category_id not in detection_categories_b:
|
|
353
380
|
print('Warning: unexpected category {} for model B on file {}'.format(category_id,fn))
|
|
354
381
|
invalid_category_error = True
|
|
355
382
|
break
|
|
356
383
|
|
|
357
384
|
conf = det['conf']
|
|
358
385
|
|
|
359
|
-
if
|
|
386
|
+
if detection_categories_b[category_id] in pairwise_options.detection_thresholds_b:
|
|
387
|
+
conf_thresh = pairwise_options.detection_thresholds_b[detection_categories_b[category_id]]
|
|
388
|
+
else:
|
|
389
|
+
conf_thresh = pairwise_options.detection_thresholds_a['default']
|
|
390
|
+
|
|
391
|
+
if conf >= conf_thresh:
|
|
360
392
|
categories_above_threshold_b.add(category_id)
|
|
361
393
|
|
|
362
394
|
if invalid_category_error:
|
|
@@ -368,7 +400,8 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
368
400
|
detection_b = (len(categories_above_threshold_b) > 0)
|
|
369
401
|
|
|
370
402
|
if detection_a and detection_b:
|
|
371
|
-
if categories_above_threshold_a == categories_above_threshold_b
|
|
403
|
+
if (categories_above_threshold_a == categories_above_threshold_b) or \
|
|
404
|
+
options.class_agnostic_comparison:
|
|
372
405
|
common_detections[fn] = im_pair
|
|
373
406
|
else:
|
|
374
407
|
class_transitions[fn] = im_pair
|
|
@@ -383,7 +416,7 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
383
416
|
# ...for each filename
|
|
384
417
|
|
|
385
418
|
print('Of {} files:\n{} common detections\n{} common non-detections\n{} A only\n{} B only\n{} class transitions'.format(
|
|
386
|
-
len(
|
|
419
|
+
len(filenames_to_compare),len(common_detections),
|
|
387
420
|
len(common_non_detections),len(detections_a_only),
|
|
388
421
|
len(detections_b_only),len(class_transitions)))
|
|
389
422
|
|
|
@@ -453,14 +486,16 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
453
486
|
# Choose detection pairs we're going to render for this category
|
|
454
487
|
image_pairs = categories_to_image_pairs[category]
|
|
455
488
|
image_filenames = list(image_pairs.keys())
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
489
|
+
|
|
490
|
+
if options.max_images_per_category is not None and options.max_images_per_category > 0:
|
|
491
|
+
if len(image_filenames) > options.max_images_per_category:
|
|
492
|
+
print('Sampling {} of {} image pairs for category {}'.format(
|
|
493
|
+
options.max_images_per_category,
|
|
494
|
+
len(image_filenames),
|
|
495
|
+
category))
|
|
496
|
+
image_filenames = random.sample(image_filenames,
|
|
497
|
+
options.max_images_per_category)
|
|
498
|
+
assert len(image_filenames) <= options.max_images_per_category
|
|
464
499
|
|
|
465
500
|
input_image_absolute_paths = [os.path.join(options.image_folder,fn) for fn in image_filenames]
|
|
466
501
|
|
|
@@ -492,15 +527,34 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
492
527
|
max_conf_b = maxempty([det['conf'] for det in image_b['detections']])
|
|
493
528
|
|
|
494
529
|
title = input_path_relative + ' (max conf {:.2f},{:.2f})'.format(max_conf_a,max_conf_b)
|
|
530
|
+
|
|
531
|
+
# Only used if sort_by_confidence is True
|
|
532
|
+
if category == 'common_detections':
|
|
533
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
534
|
+
elif category == 'common_non_detections':
|
|
535
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
536
|
+
elif category == 'detections_a_only':
|
|
537
|
+
sort_conf = max_conf_a
|
|
538
|
+
elif category == 'detections_b_only':
|
|
539
|
+
sort_conf = max_conf_b
|
|
540
|
+
elif category == 'class_transitions':
|
|
541
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
542
|
+
else:
|
|
543
|
+
print('Warning: unknown sort category {}'.format(category))
|
|
544
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
545
|
+
|
|
495
546
|
info = {
|
|
496
547
|
'filename': fn,
|
|
497
548
|
'title': title,
|
|
498
549
|
'textStyle': 'font-family:verdana,arial,calibri;font-size:' + \
|
|
499
550
|
'80%;text-align:left;margin-top:20;margin-bottom:5',
|
|
500
|
-
'linkTarget': urllib.parse.quote(input_image_absolute_paths[i_fn])
|
|
551
|
+
'linkTarget': urllib.parse.quote(input_image_absolute_paths[i_fn]),
|
|
552
|
+
'sort_conf':sort_conf
|
|
501
553
|
}
|
|
502
554
|
image_info.append(info)
|
|
503
555
|
|
|
556
|
+
# ...for each image
|
|
557
|
+
|
|
504
558
|
category_page_header_string = '<h1>{}</h1>'.format(categories_to_page_titles[category])
|
|
505
559
|
category_page_header_string += '<p style="font-weight:bold;">\n'
|
|
506
560
|
category_page_header_string += 'Model A: {}<br/>\n'.format(
|
|
@@ -521,11 +575,18 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
521
575
|
str(pairwise_options.rendering_confidence_threshold_b))
|
|
522
576
|
category_page_header_string += '</p>\n'
|
|
523
577
|
|
|
578
|
+
# Default to sorting by filename
|
|
579
|
+
if options.sort_by_confidence:
|
|
580
|
+
image_info = sorted(image_info, key=lambda d: d['sort_conf'], reverse=True)
|
|
581
|
+
else:
|
|
582
|
+
image_info = sorted(image_info, key=lambda d: d['filename'])
|
|
583
|
+
|
|
524
584
|
write_html_image_list(
|
|
525
585
|
category_html_filename,
|
|
526
586
|
images=image_info,
|
|
527
587
|
options={
|
|
528
|
-
'headerHtml': category_page_header_string
|
|
588
|
+
'headerHtml': category_page_header_string,
|
|
589
|
+
'maxFiguresPerHtmlFile': options.max_images_per_page
|
|
529
590
|
})
|
|
530
591
|
|
|
531
592
|
# ...for each category
|
|
@@ -559,7 +620,7 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
559
620
|
html_output_string += '<br/>'
|
|
560
621
|
|
|
561
622
|
html_output_string += ('Of {} total files:<br/><br/><div style="margin-left:15px;">{} common detections<br/>{} common non-detections<br/>{} A only<br/>{} B only<br/>{} class transitions</div><br/>'.format(
|
|
562
|
-
len(
|
|
623
|
+
len(filenames_to_compare),len(common_detections),
|
|
563
624
|
len(common_non_detections),len(detections_a_only),
|
|
564
625
|
len(detections_b_only),len(class_transitions)))
|
|
565
626
|
|
|
@@ -583,7 +644,7 @@ def pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
583
644
|
|
|
584
645
|
return pairwise_results
|
|
585
646
|
|
|
586
|
-
# ...def
|
|
647
|
+
# ...def pairwise_compare_batch_results()
|
|
587
648
|
|
|
588
649
|
|
|
589
650
|
def compare_batch_results(options):
|
|
@@ -663,12 +724,9 @@ def n_way_comparison(filenames,options,detection_thresholds=None,rendering_thres
|
|
|
663
724
|
pairwise_options.rendering_confidence_threshold_a = rendering_thresholds[i]
|
|
664
725
|
pairwise_options.rendering_confidence_threshold_b = rendering_thresholds[j]
|
|
665
726
|
|
|
666
|
-
pairwise_options.detection_thresholds_a = {'
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
pairwise_options.detection_thresholds_b = {'animal':detection_thresholds[j],
|
|
670
|
-
'person':detection_thresholds[j],
|
|
671
|
-
'vehicle':detection_thresholds[j]}
|
|
727
|
+
pairwise_options.detection_thresholds_a = {'default':detection_thresholds[i]}
|
|
728
|
+
pairwise_options.detection_thresholds_b = {'default':detection_thresholds[j]}
|
|
729
|
+
|
|
672
730
|
options.pairwise_options.append(pairwise_options)
|
|
673
731
|
|
|
674
732
|
return compare_batch_results(options)
|
|
@@ -679,32 +737,25 @@ def n_way_comparison(filenames,options,detection_thresholds=None,rendering_thres
|
|
|
679
737
|
#%% Interactive driver
|
|
680
738
|
|
|
681
739
|
if False:
|
|
682
|
-
|
|
683
|
-
#%% Running KGA test
|
|
684
|
-
|
|
685
|
-
# CUDA_VISIBLE_DEVICES=0 python run_detector_batch.py ~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt ~/data/KGA/ ~/data/KGA-5a.json --recursive --output_relative_filenames --quiet
|
|
686
|
-
# CUDA_VISIBLE_DEVICES=1 python run_detector_batch.py ~/models/camera_traps/megadetector/md_v5.0.0/md_v5b.0.0.pt ~/data/KGA/ ~/data/KGA-5b.json --recursive --output_relative_filenames --quiet
|
|
687
|
-
|
|
688
|
-
# python run_detector_batch.py ~/models/camera_traps/megadetector/md_v4.1.0/md_v4.1.0.pb ~/data/KGA ~/data/KGA-4.json --recursive --output_relative_filenames --quiet
|
|
689
|
-
|
|
690
|
-
# CUDA_VISIBLE_DEVICES=0 python run_detector_batch.py ~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt ~/data/KGA/ ~/data/KGA-5a-pillow-9.2.0.json --recursive --output_relative_filenames --quiet
|
|
691
|
-
|
|
692
|
-
|
|
740
|
+
|
|
693
741
|
#%% Test two-way comparison
|
|
694
742
|
|
|
695
743
|
options = BatchComparisonOptions()
|
|
696
744
|
|
|
697
|
-
options.parallelize_rendering_with_threads =
|
|
745
|
+
options.parallelize_rendering_with_threads = True
|
|
746
|
+
|
|
747
|
+
options.job_name = 'BCT'
|
|
748
|
+
options.output_folder = r'g:\temp\comparisons'
|
|
749
|
+
options.image_folder = r'g:\camera_traps\camera_trap_images'
|
|
750
|
+
options.max_images_per_category = 100
|
|
751
|
+
options.sort_by_confidence = True
|
|
698
752
|
|
|
699
|
-
options.job_name = 'KGA-test'
|
|
700
|
-
options.output_folder = os.path.expanduser('~/tmp/md-comparison-test')
|
|
701
|
-
options.image_folder = os.path.expanduser('~/data/KGA')
|
|
702
|
-
|
|
703
753
|
options.pairwise_options = []
|
|
704
754
|
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
os.path.
|
|
755
|
+
results_base = os.path.expanduser('~/postprocessing/bellevue-camera-traps')
|
|
756
|
+
filenames = [
|
|
757
|
+
os.path.join(results_base,r'bellevue-camera-traps-2023-12-05-v5a.0.0\combined_api_outputs\bellevue-camera-traps-2023-12-05-v5a.0.0_detections.json'),
|
|
758
|
+
os.path.join(results_base,r'bellevue-camera-traps-2023-12-05-aug-v5a.0.0\combined_api_outputs\bellevue-camera-traps-2023-12-05-aug-v5a.0.0_detections.json')
|
|
708
759
|
]
|
|
709
760
|
|
|
710
761
|
detection_thresholds = [0.15,0.15]
|
|
@@ -835,4 +886,3 @@ def main():
|
|
|
835
886
|
if __name__ == '__main__':
|
|
836
887
|
|
|
837
888
|
main()
|
|
838
|
-
|
|
@@ -4,8 +4,8 @@
|
|
|
4
4
|
#
|
|
5
5
|
# Converts between file formats output by our batch processing API. Currently
|
|
6
6
|
# supports json <--> csv conversion, but this should be the landing place for any
|
|
7
|
-
# conversion - including between
|
|
8
|
-
# future.
|
|
7
|
+
# conversion - including between hypothetical alternative .json versions - that we support
|
|
8
|
+
# in the future.
|
|
9
9
|
#
|
|
10
10
|
########
|
|
11
11
|
|
|
@@ -30,10 +30,13 @@ CONF_DIGITS = 3
|
|
|
30
30
|
#%% Conversion functions
|
|
31
31
|
|
|
32
32
|
def convert_json_to_csv(input_path,output_path=None,min_confidence=None,
|
|
33
|
-
omit_bounding_boxes=False,output_encoding=None
|
|
33
|
+
omit_bounding_boxes=False,output_encoding=None,
|
|
34
|
+
overwrite=True):
|
|
34
35
|
"""
|
|
35
36
|
Convert .json to .csv
|
|
36
37
|
|
|
38
|
+
If output_path is None, will convert x.json to x.csv.
|
|
39
|
+
|
|
37
40
|
TODO: this function should obviously be using Pandas or some other sensible structured
|
|
38
41
|
representation of tabular data. Even a list of dicts. This implementation is quite
|
|
39
42
|
brittle and depends on adding fields to every row in exactly the right order.
|
|
@@ -42,6 +45,10 @@ def convert_json_to_csv(input_path,output_path=None,min_confidence=None,
|
|
|
42
45
|
if output_path is None:
|
|
43
46
|
output_path = os.path.splitext(input_path)[0]+'.csv'
|
|
44
47
|
|
|
48
|
+
if os.path.isfile(output_path) and (not overwrite):
|
|
49
|
+
print('File {} exists, skipping json --> csv conversion'.format(output_path))
|
|
50
|
+
return
|
|
51
|
+
|
|
45
52
|
print('Loading json results from {}...'.format(input_path))
|
|
46
53
|
json_output = json.load(open(input_path))
|
|
47
54
|
|
|
@@ -73,7 +80,7 @@ def convert_json_to_csv(input_path,output_path=None,min_confidence=None,
|
|
|
73
80
|
|
|
74
81
|
n_classification_categories = len(classification_category_ids)
|
|
75
82
|
|
|
76
|
-
# There are several fields for which we add columns
|
|
83
|
+
# There are several .json fields for which we add .csv columns; other random bespoke fields
|
|
77
84
|
# will be ignored.
|
|
78
85
|
optional_fields = ['width','height','datetime','exif_metadata']
|
|
79
86
|
optional_fields_present = set()
|
|
@@ -104,7 +111,7 @@ def convert_json_to_csv(input_path,output_path=None,min_confidence=None,
|
|
|
104
111
|
if 'failure' in im and im['failure'] is not None:
|
|
105
112
|
row = [image_id, 'failure', im['failure']]
|
|
106
113
|
rows.append(row)
|
|
107
|
-
print('Skipping failed image {} ({})'.format(im['file'],im['failure']))
|
|
114
|
+
# print('Skipping failed image {} ({})'.format(im['file'],im['failure']))
|
|
108
115
|
continue
|
|
109
116
|
|
|
110
117
|
max_conf = ct_utils.get_max_conf(im)
|
|
@@ -193,12 +200,21 @@ def convert_json_to_csv(input_path,output_path=None,min_confidence=None,
|
|
|
193
200
|
writer.writerow(header)
|
|
194
201
|
writer.writerows(rows)
|
|
195
202
|
|
|
203
|
+
# ...def convert_json_to_csv(...)
|
|
204
|
+
|
|
196
205
|
|
|
197
|
-
def convert_csv_to_json(input_path,output_path=None):
|
|
206
|
+
def convert_csv_to_json(input_path,output_path=None,overwrite=True):
|
|
207
|
+
"""
|
|
208
|
+
Convert .csv to .json. If output_path is None, will convert x.csv to x.json.
|
|
209
|
+
"""
|
|
198
210
|
|
|
199
211
|
if output_path is None:
|
|
200
212
|
output_path = os.path.splitext(input_path)[0]+'.json'
|
|
201
213
|
|
|
214
|
+
if os.path.isfile(output_path) and (not overwrite):
|
|
215
|
+
print('File {} exists, skipping csv --> json conversion'.format(output_path))
|
|
216
|
+
return
|
|
217
|
+
|
|
202
218
|
# Format spec:
|
|
203
219
|
#
|
|
204
220
|
# https://github.com/agentmorris/MegaDetector/tree/master/api/batch_processing
|
|
@@ -259,6 +275,8 @@ def convert_csv_to_json(input_path,output_path=None):
|
|
|
259
275
|
json_out['images'] = images
|
|
260
276
|
|
|
261
277
|
json.dump(json_out,open(output_path,'w'),indent=1)
|
|
278
|
+
|
|
279
|
+
# ...def convert_csv_to_json(...)
|
|
262
280
|
|
|
263
281
|
|
|
264
282
|
#%% Interactive driver
|