megadetector 5.0.5__py3-none-any.whl → 5.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/batch_processing/data_preparation/manage_local_batch.py +302 -263
- api/batch_processing/data_preparation/manage_video_batch.py +81 -2
- api/batch_processing/postprocessing/add_max_conf.py +1 -0
- api/batch_processing/postprocessing/categorize_detections_by_size.py +50 -19
- api/batch_processing/postprocessing/compare_batch_results.py +110 -60
- api/batch_processing/postprocessing/load_api_results.py +56 -70
- api/batch_processing/postprocessing/md_to_coco.py +1 -1
- api/batch_processing/postprocessing/md_to_labelme.py +2 -1
- api/batch_processing/postprocessing/postprocess_batch_results.py +240 -81
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +625 -0
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +71 -23
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +1 -1
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +227 -75
- api/batch_processing/postprocessing/subset_json_detector_output.py +132 -5
- api/batch_processing/postprocessing/top_folders_to_bottom.py +1 -1
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +2 -2
- classification/prepare_classification_script.py +191 -191
- data_management/coco_to_yolo.py +68 -45
- data_management/databases/integrity_check_json_db.py +7 -5
- data_management/generate_crops_from_cct.py +3 -3
- data_management/get_image_sizes.py +8 -6
- data_management/importers/add_timestamps_to_icct.py +79 -0
- data_management/importers/animl_results_to_md_results.py +160 -0
- data_management/importers/auckland_doc_test_to_json.py +4 -4
- data_management/importers/auckland_doc_to_json.py +1 -1
- data_management/importers/awc_to_json.py +5 -5
- data_management/importers/bellevue_to_json.py +5 -5
- data_management/importers/carrizo_shrubfree_2018.py +5 -5
- data_management/importers/carrizo_trail_cam_2017.py +5 -5
- data_management/importers/cct_field_adjustments.py +2 -3
- data_management/importers/channel_islands_to_cct.py +4 -4
- data_management/importers/ena24_to_json.py +5 -5
- data_management/importers/helena_to_cct.py +10 -10
- data_management/importers/idaho-camera-traps.py +12 -12
- data_management/importers/idfg_iwildcam_lila_prep.py +8 -8
- data_management/importers/jb_csv_to_json.py +4 -4
- data_management/importers/missouri_to_json.py +1 -1
- data_management/importers/noaa_seals_2019.py +1 -1
- data_management/importers/pc_to_json.py +5 -5
- data_management/importers/prepare-noaa-fish-data-for-lila.py +4 -4
- data_management/importers/prepare_zsl_imerit.py +5 -5
- data_management/importers/rspb_to_json.py +4 -4
- data_management/importers/save_the_elephants_survey_A.py +5 -5
- data_management/importers/save_the_elephants_survey_B.py +6 -6
- data_management/importers/snapshot_safari_importer.py +9 -9
- data_management/importers/snapshot_serengeti_lila.py +9 -9
- data_management/importers/timelapse_csv_set_to_json.py +5 -7
- data_management/importers/ubc_to_json.py +4 -4
- data_management/importers/umn_to_json.py +4 -4
- data_management/importers/wellington_to_json.py +1 -1
- data_management/importers/wi_to_json.py +2 -2
- data_management/importers/zamba_results_to_md_results.py +181 -0
- data_management/labelme_to_coco.py +35 -7
- data_management/labelme_to_yolo.py +229 -0
- data_management/lila/add_locations_to_island_camera_traps.py +1 -1
- data_management/lila/add_locations_to_nacti.py +147 -0
- data_management/lila/create_lila_blank_set.py +474 -0
- data_management/lila/create_lila_test_set.py +2 -1
- data_management/lila/create_links_to_md_results_files.py +106 -0
- data_management/lila/download_lila_subset.py +46 -21
- data_management/lila/generate_lila_per_image_labels.py +23 -14
- data_management/lila/get_lila_annotation_counts.py +17 -11
- data_management/lila/lila_common.py +14 -11
- data_management/lila/test_lila_metadata_urls.py +116 -0
- data_management/ocr_tools.py +829 -0
- data_management/resize_coco_dataset.py +13 -11
- data_management/yolo_output_to_md_output.py +84 -12
- data_management/yolo_to_coco.py +38 -20
- detection/process_video.py +36 -14
- detection/pytorch_detector.py +23 -8
- detection/run_detector.py +76 -19
- detection/run_detector_batch.py +178 -63
- detection/run_inference_with_yolov5_val.py +326 -57
- detection/run_tiled_inference.py +153 -43
- detection/video_utils.py +34 -8
- md_utils/ct_utils.py +172 -1
- md_utils/md_tests.py +372 -51
- md_utils/path_utils.py +167 -39
- md_utils/process_utils.py +26 -7
- md_utils/split_locations_into_train_val.py +215 -0
- md_utils/string_utils.py +10 -0
- md_utils/url_utils.py +0 -2
- md_utils/write_html_image_list.py +9 -26
- md_visualization/plot_utils.py +12 -8
- md_visualization/visualization_utils.py +106 -7
- md_visualization/visualize_db.py +16 -8
- md_visualization/visualize_detector_output.py +208 -97
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/METADATA +3 -6
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/RECORD +98 -121
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/WHEEL +1 -1
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +1 -1
- taxonomy_mapping/map_new_lila_datasets.py +43 -39
- taxonomy_mapping/prepare_lila_taxonomy_release.py +5 -2
- taxonomy_mapping/preview_lila_taxonomy.py +27 -27
- taxonomy_mapping/species_lookup.py +33 -13
- taxonomy_mapping/taxonomy_csv_checker.py +7 -5
- api/synchronous/api_core/yolov5/detect.py +0 -252
- api/synchronous/api_core/yolov5/export.py +0 -607
- api/synchronous/api_core/yolov5/hubconf.py +0 -146
- api/synchronous/api_core/yolov5/models/__init__.py +0 -0
- api/synchronous/api_core/yolov5/models/common.py +0 -738
- api/synchronous/api_core/yolov5/models/experimental.py +0 -104
- api/synchronous/api_core/yolov5/models/tf.py +0 -574
- api/synchronous/api_core/yolov5/models/yolo.py +0 -338
- api/synchronous/api_core/yolov5/train.py +0 -670
- api/synchronous/api_core/yolov5/utils/__init__.py +0 -36
- api/synchronous/api_core/yolov5/utils/activations.py +0 -103
- api/synchronous/api_core/yolov5/utils/augmentations.py +0 -284
- api/synchronous/api_core/yolov5/utils/autoanchor.py +0 -170
- api/synchronous/api_core/yolov5/utils/autobatch.py +0 -66
- api/synchronous/api_core/yolov5/utils/aws/__init__.py +0 -0
- api/synchronous/api_core/yolov5/utils/aws/resume.py +0 -40
- api/synchronous/api_core/yolov5/utils/benchmarks.py +0 -148
- api/synchronous/api_core/yolov5/utils/callbacks.py +0 -71
- api/synchronous/api_core/yolov5/utils/dataloaders.py +0 -1087
- api/synchronous/api_core/yolov5/utils/downloads.py +0 -178
- api/synchronous/api_core/yolov5/utils/flask_rest_api/example_request.py +0 -19
- api/synchronous/api_core/yolov5/utils/flask_rest_api/restapi.py +0 -46
- api/synchronous/api_core/yolov5/utils/general.py +0 -1018
- api/synchronous/api_core/yolov5/utils/loggers/__init__.py +0 -187
- api/synchronous/api_core/yolov5/utils/loggers/wandb/__init__.py +0 -0
- api/synchronous/api_core/yolov5/utils/loggers/wandb/log_dataset.py +0 -27
- api/synchronous/api_core/yolov5/utils/loggers/wandb/sweep.py +0 -41
- api/synchronous/api_core/yolov5/utils/loggers/wandb/wandb_utils.py +0 -577
- api/synchronous/api_core/yolov5/utils/loss.py +0 -234
- api/synchronous/api_core/yolov5/utils/metrics.py +0 -355
- api/synchronous/api_core/yolov5/utils/plots.py +0 -489
- api/synchronous/api_core/yolov5/utils/torch_utils.py +0 -314
- api/synchronous/api_core/yolov5/val.py +0 -394
- md_utils/matlab_porting_tools.py +0 -97
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/LICENSE +0 -0
- {megadetector-5.0.5.dist-info → megadetector-5.0.7.dist-info}/top_level.txt +0 -0
detection/run_tiled_inference.py
CHANGED
|
@@ -29,6 +29,7 @@ from tqdm import tqdm
|
|
|
29
29
|
|
|
30
30
|
from detection.run_inference_with_yolov5_val import YoloInferenceOptions,run_inference_with_yolo_val
|
|
31
31
|
from detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
32
|
+
from detection.run_detector import try_download_known_detector
|
|
32
33
|
|
|
33
34
|
import torch
|
|
34
35
|
from torchvision import ops
|
|
@@ -234,7 +235,7 @@ def in_place_nms(md_results, iou_thres=0.45, verbose=True):
|
|
|
234
235
|
# i_image = 18; im = md_results['images'][i_image]
|
|
235
236
|
for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
|
|
236
237
|
|
|
237
|
-
if len(im['detections']) == 0:
|
|
238
|
+
if (im['detections'] is None) or (len(im['detections']) == 0):
|
|
238
239
|
continue
|
|
239
240
|
|
|
240
241
|
boxes = []
|
|
@@ -282,40 +283,52 @@ def in_place_nms(md_results, iou_thres=0.45, verbose=True):
|
|
|
282
283
|
|
|
283
284
|
def _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,overwrite):
|
|
284
285
|
"""
|
|
285
|
-
|
|
286
|
+
Private function to extract tiles for a single image.
|
|
286
287
|
|
|
287
|
-
|
|
288
|
-
|
|
288
|
+
Returns a dict with fields 'patches' (see extract_patch_from_image) and 'image_fn'.
|
|
289
|
+
|
|
290
|
+
If there is an error, 'patches' will be None and the 'error' field will contain
|
|
291
|
+
failure details. In that case, some tiles may still be generated.
|
|
289
292
|
"""
|
|
290
293
|
|
|
291
294
|
fn_abs = os.path.join(image_folder,fn_relative)
|
|
295
|
+
error = None
|
|
296
|
+
patches = []
|
|
292
297
|
|
|
293
298
|
image_name = path_utils.clean_filename(fn_relative,char_limit=None,force_lower=True)
|
|
294
299
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
300
|
+
try:
|
|
301
|
+
|
|
302
|
+
# Open the image
|
|
303
|
+
im = vis_utils.open_image(fn_abs)
|
|
304
|
+
image_size = [im.width,im.height]
|
|
305
|
+
|
|
306
|
+
# Generate patch boundaries (a list of [x,y] starting points)
|
|
307
|
+
patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
|
|
308
|
+
|
|
309
|
+
# Extract patches
|
|
310
|
+
#
|
|
311
|
+
# patch_xy = patch_boundaries[0]
|
|
312
|
+
for patch_xy in patch_boundaries:
|
|
298
313
|
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
patches = []
|
|
306
|
-
|
|
307
|
-
for patch_xy in patch_boundaries:
|
|
314
|
+
patch_info = extract_patch_from_image(im,patch_xy,patch_size,
|
|
315
|
+
patch_folder=tiling_folder,
|
|
316
|
+
image_name=image_name,
|
|
317
|
+
overwrite=overwrite)
|
|
318
|
+
patch_info['source_fn'] = fn_relative
|
|
319
|
+
patches.append(patch_info)
|
|
308
320
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
321
|
+
except Exception as e:
|
|
322
|
+
|
|
323
|
+
s = 'Patch generation error for {}: \n{}'.format(fn_relative,str(e))
|
|
324
|
+
print(s)
|
|
325
|
+
# patches = None
|
|
326
|
+
error = s
|
|
315
327
|
|
|
316
328
|
image_patch_info = {}
|
|
317
329
|
image_patch_info['patches'] = patches
|
|
318
330
|
image_patch_info['image_fn'] = fn_relative
|
|
331
|
+
image_patch_info['error'] = error
|
|
319
332
|
|
|
320
333
|
return image_patch_info
|
|
321
334
|
|
|
@@ -327,7 +340,8 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
327
340
|
checkpoint_path=None, checkpoint_frequency=-1, remove_tiles=False,
|
|
328
341
|
yolo_inference_options=None,
|
|
329
342
|
n_patch_extraction_workers=default_n_patch_extraction_workers,
|
|
330
|
-
overwrite_tiles=True
|
|
343
|
+
overwrite_tiles=True,
|
|
344
|
+
image_list=None):
|
|
331
345
|
"""
|
|
332
346
|
Run inference using [model_file] on the images in [image_folder], fist splitting each image up
|
|
333
347
|
into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
|
|
@@ -337,7 +351,8 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
337
351
|
[tiling_folder] can be any folder, but this function reserves the right to do whatever it wants
|
|
338
352
|
within that folder, including deleting everything, so it's best if it's a new folder.
|
|
339
353
|
Conceptually this folder is temporary, it's just helpful in this case to not actually
|
|
340
|
-
use the system temp folder, because the tile cache may be very large,
|
|
354
|
+
use the system temp folder, because the tile cache may be very large, so the caller may
|
|
355
|
+
want it to be on a specific drive.
|
|
341
356
|
|
|
342
357
|
tile_overlap is the fraction of overlap between tiles.
|
|
343
358
|
|
|
@@ -346,25 +361,54 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
346
361
|
if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
|
|
347
362
|
this case the model will be run with run_inference_with_yolov5_val. This is typically used to
|
|
348
363
|
run the model with test-time augmentation.
|
|
349
|
-
"""
|
|
364
|
+
"""
|
|
350
365
|
|
|
351
366
|
##%% Validate arguments
|
|
352
367
|
|
|
353
368
|
assert tile_overlap < 1 and tile_overlap >= 0, \
|
|
354
369
|
'Illegal tile overlap value {}'.format(tile_overlap)
|
|
355
370
|
|
|
371
|
+
if tile_size_x == -1:
|
|
372
|
+
tile_size_x = default_tile_size[0]
|
|
373
|
+
if tile_size_y == -1:
|
|
374
|
+
tile_size_y = default_tile_size[1]
|
|
375
|
+
|
|
356
376
|
patch_size = [tile_size_x,tile_size_y]
|
|
357
377
|
patch_stride = (round(patch_size[0]*(1.0-tile_overlap)),
|
|
358
378
|
round(patch_size[1]*(1.0-tile_overlap)))
|
|
359
379
|
|
|
360
380
|
os.makedirs(tiling_folder,exist_ok=True)
|
|
361
381
|
|
|
362
|
-
|
|
363
382
|
##%% List files
|
|
364
383
|
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
384
|
+
if image_list is None:
|
|
385
|
+
|
|
386
|
+
print('Enumerating images in {}'.format(image_folder))
|
|
387
|
+
image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
|
|
388
|
+
assert len(image_files_relative) > 0, 'No images found in folder {}'.format(image_folder)
|
|
389
|
+
|
|
390
|
+
else:
|
|
391
|
+
|
|
392
|
+
print('Loading image list from {}'.format(image_list))
|
|
393
|
+
with open(image_list,'r') as f:
|
|
394
|
+
image_files_relative = json.load(f)
|
|
395
|
+
n_absolute_paths = 0
|
|
396
|
+
for i_fn,fn in enumerate(image_files_relative):
|
|
397
|
+
if os.path.isabs(fn):
|
|
398
|
+
n_absolute_paths += 1
|
|
399
|
+
try:
|
|
400
|
+
fn_relative = os.path.relpath(fn,image_folder)
|
|
401
|
+
except ValueError:
|
|
402
|
+
'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
|
|
403
|
+
fn,image_folder)
|
|
404
|
+
raise
|
|
405
|
+
assert not fn_relative.startswith('..'), \
|
|
406
|
+
'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
|
|
407
|
+
fn,image_folder)
|
|
408
|
+
image_files_relative[i_fn] = fn_relative
|
|
409
|
+
if (n_absolute_paths != 0) and (n_absolute_paths != len(image_files_relative)):
|
|
410
|
+
raise ValueError('Illegal file list: converted {} of {} paths to relative'.format(
|
|
411
|
+
n_absolute_paths,len(image_files_relative)))
|
|
368
412
|
|
|
369
413
|
##%% Generate tiles
|
|
370
414
|
|
|
@@ -414,7 +458,7 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
414
458
|
image_files_relative),total=len(image_files_relative)))
|
|
415
459
|
|
|
416
460
|
# ...for each image
|
|
417
|
-
|
|
461
|
+
|
|
418
462
|
# Write tile information to file; this is just a debugging convenience
|
|
419
463
|
folder_name = path_utils.clean_filename(image_folder,force_lower=True)
|
|
420
464
|
if folder_name.startswith('_'):
|
|
@@ -424,9 +468,16 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
424
468
|
with open(tile_cache_file,'w') as f:
|
|
425
469
|
json.dump(all_image_patch_info,f,indent=1)
|
|
426
470
|
|
|
471
|
+
# Keep track of patches that failed
|
|
472
|
+
images_with_patch_errors = {}
|
|
473
|
+
for patch_info in all_image_patch_info:
|
|
474
|
+
if patch_info['error'] is not None:
|
|
475
|
+
images_with_patch_errors[patch_info['image_fn']] = patch_info
|
|
476
|
+
|
|
427
477
|
|
|
428
478
|
##%% Run inference on tiles
|
|
429
479
|
|
|
480
|
+
# When running with run_inference_with_yolov5_val, we'll pass the folder
|
|
430
481
|
if yolo_inference_options is not None:
|
|
431
482
|
|
|
432
483
|
patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
|
|
@@ -444,11 +495,16 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
444
495
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
445
496
|
with open(patch_level_output_file,'r') as f:
|
|
446
497
|
patch_level_results = json.load(f)
|
|
447
|
-
|
|
498
|
+
|
|
499
|
+
# For standard inference, we'll pass a list of files
|
|
448
500
|
else:
|
|
449
501
|
|
|
450
502
|
patch_file_names = []
|
|
451
503
|
for im in all_image_patch_info:
|
|
504
|
+
# If there was a patch generation error, don't run inference
|
|
505
|
+
if patch_info['error'] is not None:
|
|
506
|
+
assert im['image_fn'] in images_with_patch_errors
|
|
507
|
+
continue
|
|
452
508
|
for patch in im['patches']:
|
|
453
509
|
patch_file_names.append(patch['patch_fn'])
|
|
454
510
|
|
|
@@ -481,18 +537,44 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
481
537
|
image_fn_relative_to_patch_info = { x['image_fn']:x for x in all_image_patch_info }
|
|
482
538
|
|
|
483
539
|
# i_image = 0; image_fn_relative = image_files_relative[i_image]
|
|
484
|
-
for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),
|
|
540
|
+
for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),
|
|
541
|
+
total=len(image_files_relative)):
|
|
485
542
|
|
|
486
543
|
image_fn_abs = os.path.join(image_folder,image_fn_relative)
|
|
487
544
|
assert os.path.isfile(image_fn_abs)
|
|
488
545
|
|
|
489
546
|
output_im = {}
|
|
490
547
|
output_im['file'] = image_fn_relative
|
|
491
|
-
|
|
548
|
+
|
|
549
|
+
# If we had a patch generation error
|
|
550
|
+
if image_fn_relative in images_with_patch_errors:
|
|
492
551
|
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
552
|
+
patch_info = image_fn_relative_to_patch_info[image_fn_relative]
|
|
553
|
+
assert patch_info['error'] is not None
|
|
554
|
+
|
|
555
|
+
output_im['detections'] = None
|
|
556
|
+
output_im['failure'] = 'Patch generation error'
|
|
557
|
+
output_im['failure_details'] = patch_info['error']
|
|
558
|
+
image_level_results['images'].append(output_im)
|
|
559
|
+
continue
|
|
560
|
+
|
|
561
|
+
try:
|
|
562
|
+
pil_im = vis_utils.open_image(image_fn_abs)
|
|
563
|
+
image_w = pil_im.size[0]
|
|
564
|
+
image_h = pil_im.size[1]
|
|
565
|
+
|
|
566
|
+
# This would be a very unusual situation; we're reading back an image here that we already
|
|
567
|
+
# (successfully) read once during patch generation.
|
|
568
|
+
except Exception as e:
|
|
569
|
+
print('Warning: image read error after successful patch generation for {}:\n{}'.format(
|
|
570
|
+
image_fn_relative,str(e)))
|
|
571
|
+
output_im['detections'] = None
|
|
572
|
+
output_im['failure'] = 'Patch processing error'
|
|
573
|
+
output_im['failure_details'] = str(e)
|
|
574
|
+
image_level_results['images'].append(output_im)
|
|
575
|
+
continue
|
|
576
|
+
|
|
577
|
+
output_im['detections'] = []
|
|
496
578
|
|
|
497
579
|
image_patch_info = image_fn_relative_to_patch_info[image_fn_relative]
|
|
498
580
|
assert image_patch_info['patches'][0]['source_fn'] == image_fn_relative
|
|
@@ -520,6 +602,14 @@ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
|
520
602
|
assert patch_w == patch_size[0]
|
|
521
603
|
assert patch_h == patch_size[1]
|
|
522
604
|
|
|
605
|
+
# If there was an inference failure on one patch, report the image
|
|
606
|
+
# as an inference failure
|
|
607
|
+
if 'detections' not in patch_results:
|
|
608
|
+
assert 'failure' in patch_results
|
|
609
|
+
output_im['detections'] = None
|
|
610
|
+
output_im['failure'] = patch_results['failure']
|
|
611
|
+
break
|
|
612
|
+
|
|
523
613
|
# det = patch_results['detections'][0]
|
|
524
614
|
for det in patch_results['detections']:
|
|
525
615
|
|
|
@@ -703,7 +793,7 @@ def main():
|
|
|
703
793
|
help='Path to detector model file (.pb or .pt)')
|
|
704
794
|
parser.add_argument(
|
|
705
795
|
'image_folder',
|
|
706
|
-
help='Folder containing images for inference (always recursive)')
|
|
796
|
+
help='Folder containing images for inference (always recursive, unless image_list is supplied)')
|
|
707
797
|
parser.add_argument(
|
|
708
798
|
'tiling_folder',
|
|
709
799
|
help='Temporary folder where tiles and intermediate results will be stored')
|
|
@@ -729,6 +819,16 @@ def main():
|
|
|
729
819
|
type=float,
|
|
730
820
|
default=default_patch_overlap,
|
|
731
821
|
help=('Overlap between tiles [0,1] (defaults to {})'.format(default_patch_overlap)))
|
|
822
|
+
parser.add_argument(
|
|
823
|
+
'--overwrite_handling',
|
|
824
|
+
type=str,
|
|
825
|
+
default='skip',
|
|
826
|
+
help=('behavior when the targt file exists (skip/overwrite/error) (default skip)'))
|
|
827
|
+
parser.add_argument(
|
|
828
|
+
'--image_list',
|
|
829
|
+
type=str,
|
|
830
|
+
default=None,
|
|
831
|
+
help=('a .json list of relative filenames (or absolute paths contained within image_folder) to include'))
|
|
732
832
|
|
|
733
833
|
if len(sys.argv[1:]) == 0:
|
|
734
834
|
parser.print_help()
|
|
@@ -736,19 +836,29 @@ def main():
|
|
|
736
836
|
|
|
737
837
|
args = parser.parse_args()
|
|
738
838
|
|
|
739
|
-
|
|
839
|
+
model_file = try_download_known_detector(args.model_file)
|
|
840
|
+
assert os.path.exists(model_file), \
|
|
740
841
|
'detector file {} does not exist'.format(args.model_file)
|
|
741
|
-
|
|
842
|
+
|
|
742
843
|
if os.path.exists(args.output_file):
|
|
743
|
-
|
|
744
|
-
args.output_file))
|
|
844
|
+
if args.overwrite_handling == 'skip':
|
|
845
|
+
print('Warning: output file {} exists, skipping'.format(args.output_file))
|
|
846
|
+
return
|
|
847
|
+
elif args.overwrite_handling == 'overwrite':
|
|
848
|
+
print('Warning: output file {} exists, overwriting'.format(args.output_file))
|
|
849
|
+
elif args.overwrite_handling == 'error':
|
|
850
|
+
raise ValueError('Output file {} exists'.format(args.output_file))
|
|
851
|
+
else:
|
|
852
|
+
raise ValueError('Unknown output handling method {}'.format(args.overwrite_handling))
|
|
853
|
+
|
|
745
854
|
|
|
746
855
|
remove_tiles = (not args.no_remove_tiles)
|
|
747
856
|
|
|
748
|
-
run_tiled_inference(
|
|
857
|
+
run_tiled_inference(model_file, args.image_folder, args.tiling_folder, args.output_file,
|
|
749
858
|
tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
|
|
750
859
|
tile_overlap=args.tile_overlap,
|
|
751
|
-
remove_tiles=remove_tiles
|
|
860
|
+
remove_tiles=remove_tiles,
|
|
861
|
+
image_list=args.image_list)
|
|
752
862
|
|
|
753
863
|
if __name__ == '__main__':
|
|
754
864
|
main()
|
detection/video_utils.py
CHANGED
|
@@ -24,6 +24,8 @@ from md_utils import path_utils
|
|
|
24
24
|
|
|
25
25
|
from md_visualization import visualization_utils as vis_utils
|
|
26
26
|
|
|
27
|
+
default_fourcc = 'h264'
|
|
28
|
+
|
|
27
29
|
|
|
28
30
|
#%% Path utilities
|
|
29
31
|
|
|
@@ -49,24 +51,34 @@ def find_video_strings(strings: Iterable[str]) -> List[str]:
|
|
|
49
51
|
return [s for s in strings if is_video_file(s.lower())]
|
|
50
52
|
|
|
51
53
|
|
|
52
|
-
def find_videos(dirname: str, recursive: bool = False
|
|
54
|
+
def find_videos(dirname: str, recursive: bool = False,
|
|
55
|
+
convert_slashes: bool=False,
|
|
56
|
+
return_relative_paths: bool=False) -> List[str]:
|
|
53
57
|
"""
|
|
54
58
|
Finds all files in a directory that look like video file names. Returns
|
|
55
|
-
absolute paths.
|
|
59
|
+
absolute paths unless return_relative_paths is set. Uses the native
|
|
60
|
+
path separator unless convert_slashes is set.
|
|
56
61
|
"""
|
|
57
62
|
|
|
58
63
|
if recursive:
|
|
59
|
-
|
|
64
|
+
files = glob.glob(os.path.join(dirname, '**', '*.*'), recursive=True)
|
|
60
65
|
else:
|
|
61
|
-
|
|
62
|
-
|
|
66
|
+
files = glob.glob(os.path.join(dirname, '*.*'))
|
|
67
|
+
|
|
68
|
+
if return_relative_paths:
|
|
69
|
+
files = [os.path.relpath(fn,dirname) for fn in files]
|
|
70
|
+
|
|
71
|
+
if convert_slashes:
|
|
72
|
+
files = [fn.replace('\\', '/') for fn in files]
|
|
73
|
+
|
|
74
|
+
return find_video_strings(files)
|
|
63
75
|
|
|
64
76
|
|
|
65
77
|
#%% Function for rendering frames to video and vice-versa
|
|
66
78
|
|
|
67
79
|
# http://tsaith.github.io/combine-images-into-a-video-with-python-3-and-opencv-3.html
|
|
68
80
|
|
|
69
|
-
def frames_to_video(images, Fs, output_file_name, codec_spec=
|
|
81
|
+
def frames_to_video(images, Fs, output_file_name, codec_spec=default_fourcc):
|
|
70
82
|
"""
|
|
71
83
|
Given a list of image files and a sample rate, concatenate those images into
|
|
72
84
|
a video and write to [output_file_name].
|
|
@@ -222,7 +234,8 @@ def _video_to_frames_for_folder(relative_fn,input_folder,output_folder_base,ever
|
|
|
222
234
|
"""
|
|
223
235
|
|
|
224
236
|
input_fn_absolute = os.path.join(input_folder,relative_fn)
|
|
225
|
-
assert os.path.isfile(input_fn_absolute)
|
|
237
|
+
assert os.path.isfile(input_fn_absolute),\
|
|
238
|
+
'Could not find file {}'.format(input_fn_absolute)
|
|
226
239
|
|
|
227
240
|
# Create the target output folder
|
|
228
241
|
output_folder_video = os.path.join(output_folder_base,relative_fn)
|
|
@@ -244,6 +257,8 @@ def video_folder_to_frames(input_folder:str, output_folder_base:str,
|
|
|
244
257
|
"""
|
|
245
258
|
For every video file in input_folder, create a folder within output_folder_base, and
|
|
246
259
|
render every frame of the video to .jpg in that folder.
|
|
260
|
+
|
|
261
|
+
return frame_filenames_by_video,fs_by_video,input_files_full_paths
|
|
247
262
|
"""
|
|
248
263
|
|
|
249
264
|
# Recursively enumerate video files
|
|
@@ -298,6 +313,9 @@ class FrameToVideoOptions:
|
|
|
298
313
|
# zero-indexed
|
|
299
314
|
nth_highest_confidence = 1
|
|
300
315
|
|
|
316
|
+
# 'error' or 'skip_with_warning'
|
|
317
|
+
non_video_behavior = 'error'
|
|
318
|
+
|
|
301
319
|
|
|
302
320
|
def frame_results_to_video_results(input_file,output_file,options:FrameToVideoOptions = None):
|
|
303
321
|
"""
|
|
@@ -327,7 +345,15 @@ def frame_results_to_video_results(input_file,output_file,options:FrameToVideoOp
|
|
|
327
345
|
|
|
328
346
|
fn = im['file']
|
|
329
347
|
video_name = os.path.dirname(fn)
|
|
330
|
-
|
|
348
|
+
if not is_video_file(video_name):
|
|
349
|
+
if options.non_video_behavior == 'error':
|
|
350
|
+
raise ValueError('{} is not a video file'.format(video_name))
|
|
351
|
+
elif options.non_video_behavior == 'skip_with_warning':
|
|
352
|
+
print('Warning: {} is not a video file'.format(video_name))
|
|
353
|
+
continue
|
|
354
|
+
else:
|
|
355
|
+
raise ValueError('Unrecognized non-video handling behavior: {}'.format(
|
|
356
|
+
options.non_video_behavior))
|
|
331
357
|
video_to_frames[video_name].append(im)
|
|
332
358
|
|
|
333
359
|
print('Found {} unique videos in {} frame-level results'.format(
|
md_utils/ct_utils.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
#
|
|
3
3
|
# ct_utils.py
|
|
4
4
|
#
|
|
5
|
-
#
|
|
5
|
+
# Numeric/geometry utility functions
|
|
6
6
|
#
|
|
7
7
|
########
|
|
8
8
|
|
|
@@ -260,3 +260,174 @@ def get_max_conf(im):
|
|
|
260
260
|
if 'detections' in im and im['detections'] is not None and len(im['detections']) > 0:
|
|
261
261
|
max_conf = _get_max_conf_from_detections(im['detections'])
|
|
262
262
|
return max_conf
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def point_dist(p1,p2):
|
|
266
|
+
"""
|
|
267
|
+
Distance between two points, represented as length-two tuples.
|
|
268
|
+
"""
|
|
269
|
+
|
|
270
|
+
return math.sqrt( ((p1[0]-p2[0])**2) + ((p1[1]-p2[1])**2) )
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def rect_distance(r1, r2, format='x0y0x1y1'):
|
|
274
|
+
"""
|
|
275
|
+
Minimum distance between two axis-aligned rectangles, each represented as
|
|
276
|
+
(x0,y0,x1,y1) by default.
|
|
277
|
+
|
|
278
|
+
Can also specify "format" as x0y0wh for MD-style bbox formatting (x0,y0,w,h).
|
|
279
|
+
"""
|
|
280
|
+
|
|
281
|
+
assert format in ('x0y0x1y1','x0y0wh')
|
|
282
|
+
|
|
283
|
+
if format == 'x0y0wh':
|
|
284
|
+
# Convert to x0y0x1y1 without modifying the original rectangles
|
|
285
|
+
r1 = [r1[0],r1[1],r1[0]+r1[2],r1[1]+r1[3]]
|
|
286
|
+
r2 = [r2[0],r2[1],r2[0]+r2[2],r2[1]+r2[3]]
|
|
287
|
+
|
|
288
|
+
# https://stackoverflow.com/a/26178015
|
|
289
|
+
x1, y1, x1b, y1b = r1
|
|
290
|
+
x2, y2, x2b, y2b = r2
|
|
291
|
+
left = x2b < x1
|
|
292
|
+
right = x1b < x2
|
|
293
|
+
bottom = y2b < y1
|
|
294
|
+
top = y1b < y2
|
|
295
|
+
if top and left:
|
|
296
|
+
return point_dist((x1, y1b), (x2b, y2))
|
|
297
|
+
elif left and bottom:
|
|
298
|
+
return point_dist((x1, y1), (x2b, y2b))
|
|
299
|
+
elif bottom and right:
|
|
300
|
+
return point_dist((x1b, y1), (x2, y2b))
|
|
301
|
+
elif right and top:
|
|
302
|
+
return point_dist((x1b, y1b), (x2, y2))
|
|
303
|
+
elif left:
|
|
304
|
+
return x1 - x2b
|
|
305
|
+
elif right:
|
|
306
|
+
return x2 - x1b
|
|
307
|
+
elif bottom:
|
|
308
|
+
return y1 - y2b
|
|
309
|
+
elif top:
|
|
310
|
+
return y2 - y1b
|
|
311
|
+
else:
|
|
312
|
+
return 0.0
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def list_is_sorted(l):
|
|
316
|
+
"""
|
|
317
|
+
Returns True if the list [l] is sorted, else False.
|
|
318
|
+
"""
|
|
319
|
+
|
|
320
|
+
return all(l[i] <= l[i+1] for i in range(len(l)-1))
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def split_list_into_fixed_size_chunks(L,n):
|
|
324
|
+
"""
|
|
325
|
+
Split the list or tuple L into chunks of size n (allowing chunks of size n-1 if necessary,
|
|
326
|
+
i.e. len(L) does not have to be a multiple of n.
|
|
327
|
+
"""
|
|
328
|
+
|
|
329
|
+
return [L[i * n:(i + 1) * n] for i in range((len(L) + n - 1) // n )]
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def split_list_into_n_chunks(L, n, chunk_strategy='greedy'):
|
|
333
|
+
"""
|
|
334
|
+
Splits the list or tuple L into n equally-sized chunks (some chunks may be one
|
|
335
|
+
element smaller than others, i.e. len(L) does not have to be a multiple of n.
|
|
336
|
+
|
|
337
|
+
chunk_strategy can be "greedy" (default, if there are k samples per chunk, the first
|
|
338
|
+
k go into the first chunk) or "balanced" (alternate between chunks when pulling
|
|
339
|
+
items from the list).
|
|
340
|
+
"""
|
|
341
|
+
|
|
342
|
+
if chunk_strategy == 'greedy':
|
|
343
|
+
k, m = divmod(len(L), n)
|
|
344
|
+
return list(L[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
|
|
345
|
+
elif chunk_strategy == 'balanced':
|
|
346
|
+
chunks = [ [] for _ in range(n) ]
|
|
347
|
+
for i_item,item in enumerate(L):
|
|
348
|
+
i_chunk = i_item % n
|
|
349
|
+
chunks[i_chunk].append(item)
|
|
350
|
+
return chunks
|
|
351
|
+
else:
|
|
352
|
+
raise ValueError('Invalid chunk strategy: {}'.format(chunk_strategy))
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def sort_dictionary_by_value(d,sort_values=None,reverse=False):
|
|
356
|
+
"""
|
|
357
|
+
Sorts the dictionary [d] by value. If sort_values is None, uses d.values(),
|
|
358
|
+
otherwise uses the dictionary sort_values as the sorting criterion.
|
|
359
|
+
"""
|
|
360
|
+
|
|
361
|
+
if sort_values is None:
|
|
362
|
+
d = {k: v for k, v in sorted(d.items(), key=lambda item: item[1], reverse=reverse)}
|
|
363
|
+
else:
|
|
364
|
+
d = {k: v for k, v in sorted(d.items(), key=lambda item: sort_values[item[0]], reverse=reverse)}
|
|
365
|
+
return d
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def invert_dictionary(d):
|
|
369
|
+
"""
|
|
370
|
+
Create a new dictionary that maps d.values() to d.keys(). Does not check
|
|
371
|
+
uniqueness.
|
|
372
|
+
"""
|
|
373
|
+
|
|
374
|
+
return {v: k for k, v in d.items()}
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
def image_file_to_camera_folder(image_fn):
|
|
378
|
+
"""
|
|
379
|
+
Remove common overflow folders (e.g. RECNX101, RECNX102) from paths, i.e. turn:
|
|
380
|
+
|
|
381
|
+
a\b\c\RECNX101\image001.jpg
|
|
382
|
+
|
|
383
|
+
...into:
|
|
384
|
+
|
|
385
|
+
a\b\c
|
|
386
|
+
|
|
387
|
+
Returns the same thing as os.dirname() (i.e., just the folder name) if no overflow folders are
|
|
388
|
+
present.
|
|
389
|
+
|
|
390
|
+
Always converts backslashes to slashes.
|
|
391
|
+
"""
|
|
392
|
+
|
|
393
|
+
import re
|
|
394
|
+
|
|
395
|
+
# 100RECNX is the overflow folder style for Reconyx cameras
|
|
396
|
+
# 100EK113 is (for some reason) the overflow folder style for Bushnell cameras
|
|
397
|
+
# 100_BTCF is the overflow folder style for Browning cameras
|
|
398
|
+
# 100MEDIA is the overflow folder style used on a number of consumer-grade cameras
|
|
399
|
+
patterns = ['\/\d+RECNX\/','\/\d+EK\d+\/','\/\d+_BTCF\/','\/\d+MEDIA\/']
|
|
400
|
+
|
|
401
|
+
image_fn = image_fn.replace('\\','/')
|
|
402
|
+
for pat in patterns:
|
|
403
|
+
image_fn = re.sub(pat,'/',image_fn)
|
|
404
|
+
camera_folder = os.path.dirname(image_fn)
|
|
405
|
+
|
|
406
|
+
return camera_folder
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
#%% Test drivers
|
|
410
|
+
|
|
411
|
+
if False:
|
|
412
|
+
|
|
413
|
+
pass
|
|
414
|
+
|
|
415
|
+
#%% Test image_file_to_camera_folder()
|
|
416
|
+
|
|
417
|
+
relative_path = 'a/b/c/d/100EK113/blah.jpg'
|
|
418
|
+
print(image_file_to_camera_folder(relative_path))
|
|
419
|
+
|
|
420
|
+
relative_path = 'a/b/c/d/100RECNX/blah.jpg'
|
|
421
|
+
print(image_file_to_camera_folder(relative_path))
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
#%% Test a few rectangle distances
|
|
425
|
+
|
|
426
|
+
r1 = [0,0,1,1]; r2 = [0,0,1,1]; assert rect_distance(r1,r2)==0
|
|
427
|
+
r1 = [0,0,1,1]; r2 = [0,0,1,100]; assert rect_distance(r1,r2)==0
|
|
428
|
+
r1 = [0,0,1,1]; r2 = [1,1,2,2]; assert rect_distance(r1,r2)==0
|
|
429
|
+
r1 = [0,0,1,1]; r2 = [1.1,0,0,1.1]; assert abs(rect_distance(r1,r2)-.1) < 0.00001
|
|
430
|
+
|
|
431
|
+
r1 = [0.4,0.8,10,22]; r2 = [100, 101, 200, 210.4]; assert abs(rect_distance(r1,r2)-119.753) < 0.001
|
|
432
|
+
r1 = [0.4,0.8,10,22]; r2 = [101, 101, 200, 210.4]; assert abs(rect_distance(r1,r2)-120.507) < 0.001
|
|
433
|
+
r1 = [0.4,0.8,10,22]; r2 = [120, 120, 200, 210.4]; assert abs(rect_distance(r1,r2)-147.323) < 0.001
|