megadetector 5.0.14__py3-none-any.whl → 5.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (29) hide show
  1. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +387 -0
  2. megadetector/data_management/lila/generate_lila_per_image_labels.py +3 -3
  3. megadetector/data_management/lila/test_lila_metadata_urls.py +2 -2
  4. megadetector/data_management/remove_exif.py +61 -36
  5. megadetector/data_management/yolo_to_coco.py +25 -6
  6. megadetector/detection/process_video.py +261 -128
  7. megadetector/detection/pytorch_detector.py +13 -11
  8. megadetector/detection/run_detector.py +9 -2
  9. megadetector/detection/run_detector_batch.py +14 -2
  10. megadetector/detection/run_inference_with_yolov5_val.py +58 -10
  11. megadetector/detection/tf_detector.py +8 -2
  12. megadetector/detection/video_utils.py +204 -16
  13. megadetector/postprocessing/md_to_coco.py +31 -9
  14. megadetector/postprocessing/postprocess_batch_results.py +19 -3
  15. megadetector/postprocessing/subset_json_detector_output.py +22 -12
  16. megadetector/taxonomy_mapping/map_new_lila_datasets.py +3 -3
  17. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +2 -1
  18. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +1 -1
  19. megadetector/taxonomy_mapping/simple_image_download.py +5 -0
  20. megadetector/taxonomy_mapping/species_lookup.py +1 -1
  21. megadetector/utils/md_tests.py +362 -100
  22. megadetector/utils/path_utils.py +2 -2
  23. megadetector/utils/url_utils.py +7 -1
  24. megadetector/visualization/visualize_db.py +16 -0
  25. {megadetector-5.0.14.dist-info → megadetector-5.0.16.dist-info}/LICENSE +0 -0
  26. {megadetector-5.0.14.dist-info → megadetector-5.0.16.dist-info}/METADATA +2 -2
  27. {megadetector-5.0.14.dist-info → megadetector-5.0.16.dist-info}/RECORD +29 -28
  28. {megadetector-5.0.14.dist-info → megadetector-5.0.16.dist-info}/WHEEL +1 -1
  29. {megadetector-5.0.14.dist-info → megadetector-5.0.16.dist-info}/top_level.txt +0 -0
@@ -10,6 +10,8 @@ This module should not depend on anything else in this repo outside of the
10
10
  tests themselves, even if it means some duplicated code (e.g. for downloading files),
11
11
  since much of what it tries to test is, e.g., imports.
12
12
 
13
+ "Correctness" is determined by agreement with a file that this script fetches from lila.science.
14
+
13
15
  """
14
16
 
15
17
  #%% Imports and constants
@@ -27,6 +29,10 @@ import subprocess
27
29
  import argparse
28
30
  import inspect
29
31
 
32
+ #: IoU threshold used to determine whether boxes in two detection files likely correspond
33
+ #: to the same box.
34
+ iou_threshold_for_file_comparison = 0.9
35
+
30
36
 
31
37
  #%% Classes
32
38
 
@@ -108,7 +114,9 @@ class MDTestOptions:
108
114
 
109
115
  def get_expected_results_filename(gpu_is_available,
110
116
  model_string='mdv5a',
111
- test_type='images'):
117
+ test_type='image',
118
+ augment=False,
119
+ options=None):
112
120
  """
113
121
  Expected results vary just a little across inference environments, particularly
114
122
  between PT 1.x and 2.x, so when making sure things are working acceptably, we
@@ -150,7 +158,21 @@ def get_expected_results_filename(gpu_is_available,
150
158
  except Exception:
151
159
  pass
152
160
 
153
- return '{}-{}-results-{}-{}.json'.format(model_string,test_type,hw_string,pt_string)
161
+ aug_string = ''
162
+ if augment:
163
+ aug_string = 'augment-'
164
+
165
+ fn = '{}-{}{}-{}-{}.json'.format(model_string,aug_string,test_type,hw_string,pt_string)
166
+
167
+ from megadetector.utils.path_utils import insert_before_extension
168
+
169
+ if test_type == 'video':
170
+ fn = insert_before_extension(fn,'frames')
171
+
172
+ if options is not None and options.scratch_dir is not None:
173
+ fn = os.path.join(options.scratch_dir,fn)
174
+
175
+ return fn
154
176
 
155
177
 
156
178
  def download_test_data(options=None):
@@ -220,7 +242,12 @@ def download_test_data(options=None):
220
242
 
221
243
  # ...for each file in the zipfile
222
244
 
223
- # Warn if file are present that aren't expected
245
+ try:
246
+ zipf.close()
247
+ except Exception as e:
248
+ print('Warning: error closing zipfile:\n{}'.format(str(e)))
249
+
250
+ # Warn if files are present that aren't expected
224
251
  test_files = glob.glob(os.path.join(scratch_dir,'**/*'), recursive=True)
225
252
  test_files = [os.path.relpath(fn,scratch_dir).replace('\\','/') for fn in test_files]
226
253
  test_files_set = set(test_files)
@@ -336,6 +363,192 @@ def output_files_are_identical(fn1,fn2,verbose=False):
336
363
  # ...def output_files_are_identical(...)
337
364
 
338
365
 
366
+ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comparison=True):
367
+ """
368
+ Compare two lists of MD-formatted detections, matching detections across lists using IoU
369
+ criteria. Generally used to compare detections for the same image when two sets of results
370
+ are expected to be more or less the same.
371
+
372
+ Args:
373
+ detections_a (list): the first set of detection dicts
374
+ detections_b (list): the second set of detection dicts
375
+ options (MDTestOptions): options that determine tolerable differences between files
376
+ bidirectional_comparison (bool, optional): reverse the arguments and make a recursive
377
+ call.
378
+
379
+ Returns:
380
+ dict: a dictionary with keys 'max_conf_error' and 'max_coord_error'.
381
+ """
382
+ from megadetector.utils.ct_utils import get_iou
383
+
384
+ max_conf_error = 0
385
+ max_coord_error = 0
386
+
387
+ # i_det_a = 0
388
+ for i_det_a in range(0,len(detections_a)):
389
+
390
+ det_a = detections_a[i_det_a]
391
+
392
+ # Don't process very-low-confidence boxes
393
+ if det_a['conf'] < options.max_conf_error:
394
+ continue
395
+
396
+ matching_det_b = None
397
+ highest_iou = -1
398
+
399
+ # Find the closest match in the detections_b list
400
+
401
+ # i_det_b = 0
402
+ for i_det_b in range(0,len(detections_b)):
403
+
404
+ b_det = detections_b[i_det_b]
405
+
406
+ if b_det['category'] != det_a['category']:
407
+ continue
408
+
409
+ iou = get_iou(det_a['bbox'],b_det['bbox'])
410
+
411
+ # Is this likely the same detection as det_a?
412
+ if iou >= iou_threshold_for_file_comparison and iou > highest_iou:
413
+ matching_det_b = b_det
414
+ highest_iou = iou
415
+
416
+ # If there are no detections in this category in detections_b
417
+ if matching_det_b is None:
418
+ if det_a['conf'] > max_conf_error:
419
+ max_conf_error = det_a['conf']
420
+ # max_coord_error = 1.0
421
+ continue
422
+
423
+ assert det_a['category'] == matching_det_b['category']
424
+ conf_err = abs(det_a['conf'] - matching_det_b['conf'])
425
+ coord_differences = []
426
+ for i_coord in range(0,4):
427
+ coord_differences.append(abs(det_a['bbox'][i_coord]-\
428
+ matching_det_b['bbox'][i_coord]))
429
+ coord_err = max(coord_differences)
430
+
431
+ if conf_err >= max_conf_error:
432
+ max_conf_error = conf_err
433
+
434
+ if coord_err >= max_coord_error:
435
+ max_coord_error = coord_err
436
+
437
+ # ...for each detection in detections_a
438
+
439
+ if bidirectional_comparison:
440
+
441
+ reverse_comparison_results = compare_detection_lists(detections_b,
442
+ detections_a,
443
+ options,
444
+ bidirectional_comparison=False)
445
+
446
+ if reverse_comparison_results['max_conf_error'] > max_conf_error:
447
+ max_conf_error = reverse_comparison_results['max_conf_error']
448
+ if reverse_comparison_results['max_coord_error'] > max_coord_error:
449
+ max_coord_error = reverse_comparison_results['max_coord_error']
450
+
451
+ list_comparison_results = {}
452
+ list_comparison_results['max_coord_error'] = max_coord_error
453
+ list_comparison_results['max_conf_error'] = max_conf_error
454
+
455
+ return list_comparison_results
456
+
457
+ # ...def compare_detection_lists(...)
458
+
459
+
460
+ def compare_results(inference_output_file,expected_results_file,options):
461
+ """
462
+ Compare two MD-formatted output files that should be nearly identical, allowing small
463
+ changes (e.g. rounding differences). Generally used to compare a new results file to
464
+ an expected results file.
465
+
466
+ Args:
467
+ inference_output_file (str): the first results file to compare
468
+ expected_results_file (str): the second results file to compare
469
+ options (MDTestOptions): options that determine tolerable differences between files
470
+
471
+ Returns:
472
+ dict: dictionary with keys 'max_coord_error' and 'max_conf_error'
473
+ """
474
+
475
+ # Read results
476
+ with open(inference_output_file,'r') as f:
477
+ results_from_file = json.load(f) # noqa
478
+
479
+ with open(os.path.join(options.scratch_dir,expected_results_file),'r') as f:
480
+ expected_results = json.load(f)
481
+
482
+ filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
483
+ filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
484
+
485
+ assert len(filename_to_results) == len(filename_to_results_expected), \
486
+ 'Error: expected {} files in results, found {}'.format(
487
+ len(filename_to_results_expected),
488
+ len(filename_to_results))
489
+
490
+ max_conf_error = 0
491
+ max_conf_error_file = None
492
+
493
+ max_coord_error = 0
494
+ max_coord_error_file = None
495
+
496
+ # fn = next(iter(filename_to_results.keys()))
497
+ for fn in filename_to_results.keys():
498
+
499
+ actual_image_results = filename_to_results[fn]
500
+ expected_image_results = filename_to_results_expected[fn]
501
+
502
+ if 'failure' in actual_image_results:
503
+ assert 'failure' in expected_image_results and \
504
+ 'detections' not in actual_image_results and \
505
+ 'detections' not in expected_image_results
506
+ continue
507
+ assert 'failure' not in expected_image_results
508
+
509
+ actual_detections = actual_image_results['detections']
510
+ expected_detections = expected_image_results['detections']
511
+
512
+ comparison_results_this_image = compare_detection_lists(
513
+ detections_a=actual_detections,
514
+ detections_b=expected_detections,
515
+ options=options,
516
+ bidirectional_comparison=True)
517
+
518
+ if comparison_results_this_image['max_conf_error'] > max_conf_error:
519
+ max_conf_error = comparison_results_this_image['max_conf_error']
520
+ max_conf_error_file = fn
521
+
522
+ if comparison_results_this_image['max_coord_error'] > max_coord_error:
523
+ max_coord_error = comparison_results_this_image['max_coord_error']
524
+ max_coord_error_file = fn
525
+
526
+ # ...for each image
527
+
528
+ if not options.warning_mode:
529
+
530
+ assert max_conf_error <= options.max_conf_error, \
531
+ 'Confidence error {} is greater than allowable ({})'.format(
532
+ max_conf_error,options.max_conf_error)
533
+
534
+ assert max_coord_error <= options.max_coord_error, \
535
+ 'Coord error {} is greater than allowable ({})'.format(
536
+ max_coord_error,options.max_coord_error)
537
+
538
+ print('Max conf error: {} (file {})'.format(
539
+ max_conf_error,max_conf_error_file))
540
+ print('Max coord error: {} (file {})'.format(
541
+ max_coord_error,max_coord_error_file))
542
+
543
+ comparison_results = {}
544
+ comparison_results['max_conf_error'] = max_conf_error
545
+ comparison_results['max_coord_error'] = max_coord_error
546
+
547
+ return comparison_results
548
+
549
+ # ...def compare_results(...)
550
+
551
+
339
552
  def _args_to_object(args, obj):
340
553
  """
341
554
  Copies all fields from a Namespace (typically the output from parse_args) to an
@@ -440,6 +653,8 @@ def run_python_tests(options):
440
653
 
441
654
  ## Run inference on an image
442
655
 
656
+ print('\n** Running MD on a single image **\n')
657
+
443
658
  from megadetector.detection import run_detector
444
659
  from megadetector.visualization import visualization_utils as vis_utils
445
660
  image_fn = os.path.join(options.scratch_dir,options.test_images[0])
@@ -449,6 +664,8 @@ def run_python_tests(options):
449
664
 
450
665
 
451
666
  ## Run inference on a folder
667
+
668
+ print('\n** Running MD on a folder of images **\n')
452
669
 
453
670
  from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
454
671
  from megadetector.utils import path_utils
@@ -458,95 +675,41 @@ def run_python_tests(options):
458
675
  inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
459
676
  image_file_names = path_utils.find_images(image_folder,recursive=True)
460
677
  results = load_and_run_detector_batch(options.default_model, image_file_names, quiet=True)
461
- _ = write_results_to_file(results,inference_output_file,
462
- relative_path_base=image_folder,detector_file=options.default_model)
678
+ _ = write_results_to_file(results,
679
+ inference_output_file,
680
+ relative_path_base=image_folder,
681
+ detector_file=options.default_model)
463
682
 
464
- # Read results
465
- with open(inference_output_file,'r') as f:
466
- results_from_file = json.load(f) # noqa
467
683
 
468
-
469
684
  ## Verify results
470
-
471
- # Read expected results
472
- expected_results_filename = get_expected_results_filename(is_gpu_available(verbose=False))
473
685
 
474
- with open(os.path.join(options.scratch_dir,expected_results_filename),'r') as f:
475
- expected_results = json.load(f)
476
-
477
- filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
478
- filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
686
+ expected_results_file = get_expected_results_filename(is_gpu_available(verbose=False),
687
+ options=options)
688
+ compare_results(inference_output_file,expected_results_file,options)
689
+
690
+ # Make note of this filename, we will use it again later
691
+ inference_output_file_standard_inference = inference_output_file
479
692
 
480
- assert len(filename_to_results) == len(filename_to_results_expected), \
481
- 'Error: expected {} files in results, found {}'.format(
482
- len(filename_to_results_expected),
483
- len(filename_to_results))
484
693
 
485
- max_coord_error = 0
486
- max_conf_error = 0
694
+ ## Run and verify again with augmentation enabled
487
695
 
488
- # fn = next(iter(filename_to_results.keys()))
489
- for fn in filename_to_results.keys():
490
-
491
- actual_image_results = filename_to_results[fn]
492
- expected_image_results = filename_to_results_expected[fn]
493
-
494
- if 'failure' in actual_image_results:
495
- assert 'failure' in expected_image_results and \
496
- 'detections' not in actual_image_results and \
497
- 'detections' not in expected_image_results
498
- continue
499
- assert 'failure' not in expected_image_results
500
-
501
- actual_detections = actual_image_results['detections']
502
- expected_detections = expected_image_results['detections']
503
-
504
- s = 'expected {} detections for file {}, found {}'.format(
505
- len(expected_detections),fn,len(actual_detections))
506
- s += '\nExpected results file: {}\nActual results file: {}'.format(
507
- expected_results_filename,inference_output_file)
508
-
509
- if options.warning_mode:
510
- if len(actual_detections) != len(expected_detections):
511
- print('Warning: {}'.format(s))
512
- continue
513
- assert len(actual_detections) == len(expected_detections), \
514
- 'Error: {}'.format(s)
515
-
516
- # i_det = 0
517
- for i_det in range(0,len(actual_detections)):
518
- actual_det = actual_detections[i_det]
519
- expected_det = expected_detections[i_det]
520
- assert actual_det['category'] == expected_det['category']
521
- conf_err = abs(actual_det['conf'] - expected_det['conf'])
522
- coord_differences = []
523
- for i_coord in range(0,4):
524
- coord_differences.append(abs(actual_det['bbox'][i_coord]-expected_det['bbox'][i_coord]))
525
- coord_err = max(coord_differences)
526
-
527
- if conf_err > max_conf_error:
528
- max_conf_error = conf_err
529
- if coord_err > max_coord_error:
530
- max_coord_error = coord_err
531
-
532
- # ...for each detection
533
-
534
- # ...for each image
696
+ print('\n** Running MD on images with augmentation **\n')
535
697
 
536
- if not options.warning_mode:
537
-
538
- assert max_conf_error <= options.max_conf_error, \
539
- 'Confidence error {} is greater than allowable ({})'.format(
540
- max_conf_error,options.max_conf_error)
541
-
542
- assert max_coord_error <= options.max_coord_error, \
543
- 'Coord error {} is greater than allowable ({})'.format(
544
- max_coord_error,options.max_coord_error)
545
-
546
- print('Max conf error: {}'.format(max_conf_error))
547
- print('Max coord error: {}'.format(max_coord_error))
548
-
698
+ from megadetector.utils.path_utils import insert_before_extension
699
+
700
+ inference_output_file_augmented = insert_before_extension(inference_output_file,'augmented')
701
+ results = load_and_run_detector_batch(options.default_model, image_file_names, quiet=True, augment=True)
702
+ _ = write_results_to_file(results,
703
+ inference_output_file_augmented,
704
+ relative_path_base=image_folder,
705
+ detector_file=options.default_model)
549
706
 
707
+ expected_results_file_augmented = \
708
+ get_expected_results_filename(is_gpu_available(verbose=False),
709
+ augment=True,options=options)
710
+ compare_results(inference_output_file_augmented,expected_results_file_augmented,options)
711
+
712
+
550
713
  ## Postprocess results
551
714
 
552
715
  from megadetector.postprocessing.postprocess_batch_results import \
@@ -589,6 +752,7 @@ def run_python_tests(options):
589
752
 
590
753
  from megadetector.detection.run_inference_with_yolov5_val import \
591
754
  YoloInferenceOptions, run_inference_with_yolo_val
755
+ from megadetector.utils.path_utils import insert_before_extension
592
756
 
593
757
  inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
594
758
 
@@ -599,12 +763,23 @@ def run_python_tests(options):
599
763
  yolo_inference_options.model_filename = options.default_model
600
764
  yolo_inference_options.augment = False
601
765
  yolo_inference_options.overwrite_handling = 'overwrite'
766
+ from megadetector.detection.run_detector import DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
767
+ yolo_inference_options.conf_thres = DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
602
768
 
603
769
  run_inference_with_yolo_val(yolo_inference_options)
604
770
 
771
+ ## Confirm this matches the standard inference path
772
+
773
+ if False:
774
+ # TODO: compare_results() isn't quite ready for this yet
775
+ compare_results(inference_output_file=inference_output_file_yolo_val,
776
+ expected_results_file=inference_output_file_standard_inference,
777
+ options=options)
778
+
779
+
780
+
605
781
  # Run again, without symlinks this time
606
782
 
607
- from megadetector.utils.path_utils import insert_before_extension
608
783
  inference_output_file_yolo_val_no_links = insert_before_extension(inference_output_file_yolo_val,
609
784
  'no-links')
610
785
  yolo_inference_options.output_file = inference_output_file_yolo_val_no_links
@@ -647,7 +822,10 @@ def run_python_tests(options):
647
822
 
648
823
  ## Video test (single video)
649
824
 
825
+ print('\n** Running MD on a single video **\n')
826
+
650
827
  from megadetector.detection.process_video import ProcessVideoOptions, process_video
828
+ from megadetector.utils.path_utils import insert_before_extension
651
829
 
652
830
  video_options = ProcessVideoOptions()
653
831
  video_options.model_file = options.default_model
@@ -668,7 +846,7 @@ def run_python_tests(options):
668
846
  video_options.fourcc = options.video_fourcc
669
847
  # video_options.rendering_confidence_threshold = None
670
848
  # video_options.json_confidence_threshold = 0.005
671
- video_options.frame_sample = 5
849
+ video_options.frame_sample = 10
672
850
  video_options.n_cores = 5
673
851
  # video_options.debug_max_frames = -1
674
852
  # video_options.class_mapping_filename = None
@@ -683,38 +861,83 @@ def run_python_tests(options):
683
861
 
684
862
  ## Video test (folder)
685
863
 
864
+ print('\n** Running MD on a folder of videos **\n')
865
+
686
866
  from megadetector.detection.process_video import ProcessVideoOptions, process_video_folder
867
+ from megadetector.utils.path_utils import insert_before_extension
687
868
 
688
869
  video_options = ProcessVideoOptions()
689
870
  video_options.model_file = options.default_model
690
871
  video_options.input_video_file = os.path.join(options.scratch_dir,
691
872
  os.path.dirname(options.test_videos[0]))
692
873
  video_options.output_json_file = os.path.join(options.scratch_dir,'video_folder_output.json')
693
- # video_options.output_video_file = None
874
+ video_options.output_video_file = None
694
875
  video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
695
876
  video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
696
877
  video_options.render_output_video = False
697
- # video_options.keep_rendered_frames = False
698
- # video_options.keep_rendered_frames = False
878
+ video_options.keep_rendered_frames = False
879
+ video_options.keep_rendered_frames = False
699
880
  video_options.force_extracted_frame_folder_deletion = True
700
881
  video_options.force_rendered_frame_folder_deletion = True
701
- # video_options.reuse_results_if_available = False
702
- # video_options.reuse_frames_if_available = False
882
+ video_options.reuse_results_if_available = False
883
+ video_options.reuse_frames_if_available = False
703
884
  video_options.recursive = True
704
885
  video_options.verbose = True
705
886
  video_options.fourcc = options.video_fourcc
706
887
  # video_options.rendering_confidence_threshold = None
707
888
  # video_options.json_confidence_threshold = 0.005
708
- video_options.frame_sample = 5
709
- video_options.n_cores = 5
889
+ video_options.frame_sample = 10
890
+ video_options.n_cores = 5
891
+
892
+ # Force frame extraction to disk, since that's how we generated our expected results file
893
+ video_options.force_on_disk_frame_extraction = True
710
894
  # video_options.debug_max_frames = -1
711
895
  # video_options.class_mapping_filename = None
712
896
 
897
+ # Use quality == None, because we can't control whether YOLOv5 has patched cm2.imread,
898
+ # and therefore can't rely on using the quality parameter
899
+ video_options.quality = None
900
+ video_options.max_width = None
901
+
713
902
  _ = process_video_folder(video_options)
714
903
 
715
904
  assert os.path.isfile(video_options.output_json_file), \
716
905
  'Python video test failed to render output .json file'
906
+
907
+ frame_output_file = insert_before_extension(video_options.output_json_file,'frames')
908
+ assert os.path.isfile(frame_output_file)
909
+
910
+
911
+ ## Verify results
912
+
913
+ expected_results_file = \
914
+ get_expected_results_filename(is_gpu_available(verbose=False),test_type='video',options=options)
915
+ assert os.path.isfile(expected_results_file)
916
+ compare_results(frame_output_file,expected_results_file,options)
917
+
918
+
919
+ ## Run again, this time in memory, and make sure the results are *almost* the same
920
+
921
+ # They won't be quite the same, because the on-disk path goes through a jpeg intermediate
922
+
923
+ print('\n** Running MD on a folder of videos (in memory) **\n')
717
924
 
925
+ video_options.output_json_file = insert_before_extension(video_options.output_json_file,'in-memory')
926
+ video_options.force_on_disk_frame_extraction = False
927
+ _ = process_video_folder(video_options)
928
+
929
+ frame_output_file_in_memory = insert_before_extension(video_options.output_json_file,'frames')
930
+ assert os.path.isfile(frame_output_file_in_memory)
931
+
932
+ from copy import deepcopy
933
+ options_loose = deepcopy(options)
934
+ options_loose.max_conf_error = 0.05
935
+ options_loose.max_coord_error = 0.01
936
+
937
+ compare_results(inference_output_file=frame_output_file,
938
+ expected_results_file=frame_output_file_in_memory,
939
+ options=options_loose)
940
+
718
941
  # ...if we're not skipping video tests
719
942
 
720
943
  print('\n*** Finished module tests ***\n')
@@ -804,7 +1027,8 @@ def run_cli_tests(options):
804
1027
  cmd_results = execute_and_print(cmd)
805
1028
 
806
1029
  assert output_files_are_identical(fn1=inference_output_file,
807
- fn2=inference_output_file_checkpoint,verbose=True)
1030
+ fn2=inference_output_file_checkpoint,
1031
+ verbose=True)
808
1032
 
809
1033
 
810
1034
  ## Run again with the image queue enabled, make sure the results are the same
@@ -816,7 +1040,8 @@ def run_cli_tests(options):
816
1040
  cmd_results = execute_and_print(cmd)
817
1041
 
818
1042
  assert output_files_are_identical(fn1=inference_output_file,
819
- fn2=inference_output_file_queue,verbose=True)
1043
+ fn2=inference_output_file_queue,
1044
+ verbose=True)
820
1045
 
821
1046
 
822
1047
  ## Run again on multiple cores, make sure the results are the same
@@ -854,7 +1079,8 @@ def run_cli_tests(options):
854
1079
  del os.environ['CUDA_VISIBLE_DEVICES']
855
1080
 
856
1081
  assert output_files_are_identical(fn1=inference_output_file_cpu,
857
- fn2=inference_output_file_cpu_multicore,verbose=True)
1082
+ fn2=inference_output_file_cpu_multicore,
1083
+ verbose=True)
858
1084
 
859
1085
 
860
1086
  ## Postprocessing
@@ -925,7 +1151,7 @@ def run_cli_tests(options):
925
1151
  results_from_file = json.load(f) # noqa
926
1152
 
927
1153
 
928
- ## Run inference on a folder (augmented)
1154
+ ## Run inference on a folder (augmented, w/YOLOv5 val script)
929
1155
 
930
1156
  if options.yolo_working_dir is None:
931
1157
 
@@ -960,7 +1186,8 @@ def run_cli_tests(options):
960
1186
  cmd_results = execute_and_print(cmd)
961
1187
 
962
1188
  assert output_files_are_identical(fn1=inference_output_file_yolo_val,
963
- fn2=inference_output_file_yolo_val_checkpoint)
1189
+ fn2=inference_output_file_yolo_val_checkpoint,
1190
+ verbose=True)
964
1191
 
965
1192
  if not options.skip_video_tests:
966
1193
 
@@ -1080,19 +1307,21 @@ if False:
1080
1307
  options.cpu_execution_is_error = False
1081
1308
  options.skip_video_tests = False
1082
1309
  options.skip_python_tests = False
1083
- options.skip_cli_tests = False
1310
+ options.skip_cli_tests = True
1084
1311
  options.scratch_dir = None
1085
1312
  options.test_data_url = 'https://lila.science/public/md-test-package.zip'
1086
1313
  options.force_data_download = False
1087
1314
  options.force_data_unzip = False
1088
- options.warning_mode = True
1315
+ options.warning_mode = False
1089
1316
  options.max_coord_error = 0.001
1090
1317
  options.max_conf_error = 0.005
1091
1318
  options.cli_working_dir = r'c:\git\MegaDetector'
1092
1319
  options.yolo_working_dir = r'c:\git\yolov5-md'
1093
1320
 
1094
- import os
1095
1321
 
1322
+ #%%
1323
+
1324
+ import os
1096
1325
  if 'PYTHONPATH' not in os.environ or options.yolo_working_dir not in os.environ['PYTHONPATH']:
1097
1326
  os.environ['PYTHONPATH'] += ';' + options.yolo_working_dir
1098
1327
 
@@ -1218,3 +1447,36 @@ python md_tests.py --cli_working_dir "/mnt/c/git/MegaDetector" --yolo_working_di
1218
1447
 
1219
1448
  python -c "import md_tests; print(md_tests.get_expected_results_filename(True))"
1220
1449
  """
1450
+
1451
+
1452
+ #%% Scrap
1453
+
1454
+ if False:
1455
+
1456
+ pass
1457
+
1458
+ #%%
1459
+
1460
+ import sys; sys.path.append(r'c:\git\yolov5-md')
1461
+
1462
+ #%%
1463
+
1464
+ fn1 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt1.10.1.frames.json"
1465
+ fn2 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt1.10.1.frames.json"
1466
+ fn3 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt2.x.frames.json"
1467
+ fn4 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt2.x.frames.json"
1468
+
1469
+ assert all([os.path.isfile(fn) for fn in [fn1,fn2,fn3,fn4]])
1470
+ print(output_files_are_identical(fn1,fn1,verbose=False))
1471
+ print(output_files_are_identical(fn1,fn2,verbose=False))
1472
+ print(output_files_are_identical(fn1,fn3,verbose=False))
1473
+
1474
+ #%%
1475
+
1476
+ fn1 = r"G:\temp\md-test-package\mdv5a-image-gpu-pt1.10.1.json"
1477
+ fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-gpu-pt1.10.1.json"
1478
+ print(output_files_are_identical(fn1,fn2,verbose=True))
1479
+
1480
+ fn1 = r"G:\temp\md-test-package\mdv5a-image-cpu-pt1.10.1.json"
1481
+ fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-cpu-pt1.10.1.json"
1482
+ print(output_files_are_identical(fn1,fn2,verbose=True))
@@ -924,8 +924,8 @@ def zip_files_into_single_zipfile(input_files, output_fn, arc_name_base,
924
924
 
925
925
  def zip_folder(input_folder, output_fn=None, overwrite=False, verbose=False, compresslevel=9):
926
926
  """
927
- Recursively zip everything in [input_folder] into a single zipfile, storing outputs as relative
928
- paths.
927
+ Recursively zip everything in [input_folder] into a single zipfile, storing files as paths
928
+ relative to [input_folder].
929
929
 
930
930
  Args:
931
931
  input_folder (str): folder to zip