megadetector 5.0.15__py3-none-any.whl → 5.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (34) hide show
  1. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +387 -0
  2. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +28 -16
  3. megadetector/data_management/lila/generate_lila_per_image_labels.py +3 -3
  4. megadetector/data_management/lila/test_lila_metadata_urls.py +2 -2
  5. megadetector/data_management/remove_exif.py +61 -36
  6. megadetector/data_management/yolo_to_coco.py +25 -6
  7. megadetector/detection/process_video.py +270 -127
  8. megadetector/detection/pytorch_detector.py +13 -11
  9. megadetector/detection/run_detector.py +9 -2
  10. megadetector/detection/run_detector_batch.py +8 -1
  11. megadetector/detection/run_inference_with_yolov5_val.py +58 -10
  12. megadetector/detection/tf_detector.py +8 -2
  13. megadetector/detection/video_utils.py +214 -18
  14. megadetector/postprocessing/md_to_coco.py +31 -9
  15. megadetector/postprocessing/postprocess_batch_results.py +23 -7
  16. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +5 -2
  17. megadetector/postprocessing/subset_json_detector_output.py +22 -12
  18. megadetector/taxonomy_mapping/map_new_lila_datasets.py +3 -3
  19. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +2 -1
  20. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +1 -1
  21. megadetector/taxonomy_mapping/simple_image_download.py +5 -0
  22. megadetector/taxonomy_mapping/species_lookup.py +1 -1
  23. megadetector/utils/ct_utils.py +48 -0
  24. megadetector/utils/md_tests.py +231 -56
  25. megadetector/utils/path_utils.py +2 -2
  26. megadetector/utils/torch_test.py +32 -0
  27. megadetector/utils/url_utils.py +101 -4
  28. megadetector/visualization/visualization_utils.py +21 -6
  29. megadetector/visualization/visualize_db.py +16 -0
  30. {megadetector-5.0.15.dist-info → megadetector-5.0.17.dist-info}/LICENSE +0 -0
  31. {megadetector-5.0.15.dist-info → megadetector-5.0.17.dist-info}/METADATA +5 -7
  32. {megadetector-5.0.15.dist-info → megadetector-5.0.17.dist-info}/RECORD +34 -32
  33. {megadetector-5.0.15.dist-info → megadetector-5.0.17.dist-info}/WHEEL +1 -1
  34. {megadetector-5.0.15.dist-info → megadetector-5.0.17.dist-info}/top_level.txt +0 -0
@@ -29,6 +29,10 @@ import subprocess
29
29
  import argparse
30
30
  import inspect
31
31
 
32
+ #: IoU threshold used to determine whether boxes in two detection files likely correspond
33
+ #: to the same box.
34
+ iou_threshold_for_file_comparison = 0.9
35
+
32
36
 
33
37
  #%% Classes
34
38
 
@@ -359,6 +363,100 @@ def output_files_are_identical(fn1,fn2,verbose=False):
359
363
  # ...def output_files_are_identical(...)
360
364
 
361
365
 
366
+ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comparison=True):
367
+ """
368
+ Compare two lists of MD-formatted detections, matching detections across lists using IoU
369
+ criteria. Generally used to compare detections for the same image when two sets of results
370
+ are expected to be more or less the same.
371
+
372
+ Args:
373
+ detections_a (list): the first set of detection dicts
374
+ detections_b (list): the second set of detection dicts
375
+ options (MDTestOptions): options that determine tolerable differences between files
376
+ bidirectional_comparison (bool, optional): reverse the arguments and make a recursive
377
+ call.
378
+
379
+ Returns:
380
+ dict: a dictionary with keys 'max_conf_error' and 'max_coord_error'.
381
+ """
382
+ from megadetector.utils.ct_utils import get_iou
383
+
384
+ max_conf_error = 0
385
+ max_coord_error = 0
386
+
387
+ # i_det_a = 0
388
+ for i_det_a in range(0,len(detections_a)):
389
+
390
+ det_a = detections_a[i_det_a]
391
+
392
+ # Don't process very-low-confidence boxes
393
+ if det_a['conf'] < options.max_conf_error:
394
+ continue
395
+
396
+ matching_det_b = None
397
+ highest_iou = -1
398
+
399
+ # Find the closest match in the detections_b list
400
+
401
+ # i_det_b = 0
402
+ for i_det_b in range(0,len(detections_b)):
403
+
404
+ b_det = detections_b[i_det_b]
405
+
406
+ if b_det['category'] != det_a['category']:
407
+ continue
408
+
409
+ iou = get_iou(det_a['bbox'],b_det['bbox'])
410
+
411
+ # Is this likely the same detection as det_a?
412
+ if iou >= iou_threshold_for_file_comparison and iou > highest_iou:
413
+ matching_det_b = b_det
414
+ highest_iou = iou
415
+
416
+ # If there are no detections in this category in detections_b
417
+ if matching_det_b is None:
418
+ if det_a['conf'] > max_conf_error:
419
+ max_conf_error = det_a['conf']
420
+ # max_coord_error = 1.0
421
+ continue
422
+
423
+ assert det_a['category'] == matching_det_b['category']
424
+ conf_err = abs(det_a['conf'] - matching_det_b['conf'])
425
+ coord_differences = []
426
+ for i_coord in range(0,4):
427
+ coord_differences.append(abs(det_a['bbox'][i_coord]-\
428
+ matching_det_b['bbox'][i_coord]))
429
+ coord_err = max(coord_differences)
430
+
431
+ if conf_err >= max_conf_error:
432
+ max_conf_error = conf_err
433
+
434
+ if coord_err >= max_coord_error:
435
+ max_coord_error = coord_err
436
+
437
+ # ...for each detection in detections_a
438
+
439
+ if bidirectional_comparison:
440
+
441
+ reverse_comparison_results = compare_detection_lists(detections_b,
442
+ detections_a,
443
+ options,
444
+ bidirectional_comparison=False)
445
+
446
+ if reverse_comparison_results['max_conf_error'] > max_conf_error:
447
+ max_conf_error = reverse_comparison_results['max_conf_error']
448
+ if reverse_comparison_results['max_coord_error'] > max_coord_error:
449
+ max_coord_error = reverse_comparison_results['max_coord_error']
450
+
451
+ list_comparison_results = {}
452
+ list_comparison_results['max_coord_error'] = max_coord_error
453
+ list_comparison_results['max_conf_error'] = max_conf_error
454
+
455
+ return list_comparison_results
456
+
457
+ # ...def compare_detection_lists(...)
458
+
459
+
362
460
  def compare_results(inference_output_file,expected_results_file,options):
363
461
  """
364
462
  Compare two MD-formatted output files that should be nearly identical, allowing small
@@ -369,6 +467,9 @@ def compare_results(inference_output_file,expected_results_file,options):
369
467
  inference_output_file (str): the first results file to compare
370
468
  expected_results_file (str): the second results file to compare
371
469
  options (MDTestOptions): options that determine tolerable differences between files
470
+
471
+ Returns:
472
+ dict: dictionary with keys 'max_coord_error' and 'max_conf_error'
372
473
  """
373
474
 
374
475
  # Read results
@@ -386,8 +487,11 @@ def compare_results(inference_output_file,expected_results_file,options):
386
487
  len(filename_to_results_expected),
387
488
  len(filename_to_results))
388
489
 
389
- max_coord_error = 0
390
490
  max_conf_error = 0
491
+ max_conf_error_file = None
492
+
493
+ max_coord_error = 0
494
+ max_coord_error_file = None
391
495
 
392
496
  # fn = next(iter(filename_to_results.keys()))
393
497
  for fn in filename_to_results.keys():
@@ -405,51 +509,43 @@ def compare_results(inference_output_file,expected_results_file,options):
405
509
  actual_detections = actual_image_results['detections']
406
510
  expected_detections = expected_image_results['detections']
407
511
 
408
- s = 'expected {} detections for file {}, found {}'.format(
409
- len(expected_detections),fn,len(actual_detections))
410
- s += '\nExpected results file: {}\nActual results file: {}'.format(
411
- expected_results_file,inference_output_file)
512
+ comparison_results_this_image = compare_detection_lists(
513
+ detections_a=actual_detections,
514
+ detections_b=expected_detections,
515
+ options=options,
516
+ bidirectional_comparison=True)
412
517
 
413
- if options.warning_mode:
414
- if len(actual_detections) != len(expected_detections):
415
- print('Warning: {}'.format(s))
416
- continue
417
- assert len(actual_detections) == len(expected_detections), \
418
- 'Error: {}'.format(s)
419
-
420
- # i_det = 0
421
- for i_det in range(0,len(actual_detections)):
422
- actual_det = actual_detections[i_det]
423
- expected_det = expected_detections[i_det]
424
- assert actual_det['category'] == expected_det['category']
425
- conf_err = abs(actual_det['conf'] - expected_det['conf'])
426
- coord_differences = []
427
- for i_coord in range(0,4):
428
- coord_differences.append(abs(actual_det['bbox'][i_coord]-expected_det['bbox'][i_coord]))
429
- coord_err = max(coord_differences)
518
+ if comparison_results_this_image['max_conf_error'] > max_conf_error:
519
+ max_conf_error = comparison_results_this_image['max_conf_error']
520
+ max_conf_error_file = fn
430
521
 
431
- if conf_err > max_conf_error:
432
- max_conf_error = conf_err
433
- if coord_err > max_coord_error:
434
- max_coord_error = coord_err
435
-
436
- # ...for each detection
437
-
522
+ if comparison_results_this_image['max_coord_error'] > max_coord_error:
523
+ max_coord_error = comparison_results_this_image['max_coord_error']
524
+ max_coord_error_file = fn
525
+
438
526
  # ...for each image
439
527
 
440
528
  if not options.warning_mode:
441
529
 
442
530
  assert max_conf_error <= options.max_conf_error, \
443
- 'Confidence error {} is greater than allowable ({})'.format(
444
- max_conf_error,options.max_conf_error)
531
+ 'Confidence error {} is greater than allowable ({}), on file:\n{}'.format(
532
+ max_conf_error,options.max_conf_error,max_conf_error_file)
445
533
 
446
534
  assert max_coord_error <= options.max_coord_error, \
447
- 'Coord error {} is greater than allowable ({})'.format(
448
- max_coord_error,options.max_coord_error)
535
+ 'Coord error {} is greater than allowable ({}), on file:\n{}'.format(
536
+ max_coord_error,options.max_coord_error,max_coord_error_file)
449
537
 
450
- print('Max conf error: {}'.format(max_conf_error))
451
- print('Max coord error: {}'.format(max_coord_error))
538
+ print('Max conf error: {} (file {})'.format(
539
+ max_conf_error,max_conf_error_file))
540
+ print('Max coord error: {} (file {})'.format(
541
+ max_coord_error,max_coord_error_file))
452
542
 
543
+ comparison_results = {}
544
+ comparison_results['max_conf_error'] = max_conf_error
545
+ comparison_results['max_coord_error'] = max_coord_error
546
+
547
+ return comparison_results
548
+
453
549
  # ...def compare_results(...)
454
550
 
455
551
 
@@ -557,6 +653,8 @@ def run_python_tests(options):
557
653
 
558
654
  ## Run inference on an image
559
655
 
656
+ print('\n** Running MD on a single image (module) **\n')
657
+
560
658
  from megadetector.detection import run_detector
561
659
  from megadetector.visualization import visualization_utils as vis_utils
562
660
  image_fn = os.path.join(options.scratch_dir,options.test_images[0])
@@ -567,7 +665,7 @@ def run_python_tests(options):
567
665
 
568
666
  ## Run inference on a folder
569
667
 
570
- print('\n** Running MD on images **\n')
668
+ print('\n** Running MD on a folder of images (module) **\n')
571
669
 
572
670
  from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
573
671
  from megadetector.utils import path_utils
@@ -584,15 +682,18 @@ def run_python_tests(options):
584
682
 
585
683
 
586
684
  ## Verify results
587
-
685
+
588
686
  expected_results_file = get_expected_results_filename(is_gpu_available(verbose=False),
589
687
  options=options)
590
688
  compare_results(inference_output_file,expected_results_file,options)
591
689
 
592
-
690
+ # Make note of this filename, we will use it again later
691
+ inference_output_file_standard_inference = inference_output_file
692
+
693
+
593
694
  ## Run and verify again with augmentation enabled
594
695
 
595
- print('\n** Running MD on images with augmentation **\n')
696
+ print('\n** Running MD on images with augmentation (module) **\n')
596
697
 
597
698
  from megadetector.utils.path_utils import insert_before_extension
598
699
 
@@ -611,6 +712,8 @@ def run_python_tests(options):
611
712
 
612
713
  ## Postprocess results
613
714
 
715
+ print('\n** Post-processing results (module) **\n')
716
+
614
717
  from megadetector.postprocessing.postprocess_batch_results import \
615
718
  PostProcessingOptions,process_batch_results
616
719
  postprocessing_options = PostProcessingOptions()
@@ -626,6 +729,8 @@ def run_python_tests(options):
626
729
 
627
730
  ## Partial RDE test
628
731
 
732
+ print('\n** Testing RDE (module) **\n')
733
+
629
734
  from megadetector.postprocessing.repeat_detection_elimination.repeat_detections_core import \
630
735
  RepeatDetectionOptions, find_repeat_detections
631
736
 
@@ -648,9 +753,12 @@ def run_python_tests(options):
648
753
  print('Skipping YOLO val inference tests, no YOLO folder supplied')
649
754
 
650
755
  else:
651
-
756
+
757
+ print('\n** Running YOLO val inference test (module) **\n')
758
+
652
759
  from megadetector.detection.run_inference_with_yolov5_val import \
653
760
  YoloInferenceOptions, run_inference_with_yolo_val
761
+ from megadetector.utils.path_utils import insert_before_extension
654
762
 
655
763
  inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
656
764
 
@@ -661,12 +769,21 @@ def run_python_tests(options):
661
769
  yolo_inference_options.model_filename = options.default_model
662
770
  yolo_inference_options.augment = False
663
771
  yolo_inference_options.overwrite_handling = 'overwrite'
772
+ from megadetector.detection.run_detector import DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
773
+ yolo_inference_options.conf_thres = DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
664
774
 
665
775
  run_inference_with_yolo_val(yolo_inference_options)
666
776
 
777
+ ## Confirm this matches the standard inference path
778
+
779
+ if False:
780
+ # TODO: compare_results() isn't quite ready for this yet
781
+ compare_results(inference_output_file=inference_output_file_yolo_val,
782
+ expected_results_file=inference_output_file_standard_inference,
783
+ options=options)
784
+
667
785
  # Run again, without symlinks this time
668
786
 
669
- from megadetector.utils.path_utils import insert_before_extension
670
787
  inference_output_file_yolo_val_no_links = insert_before_extension(inference_output_file_yolo_val,
671
788
  'no-links')
672
789
  yolo_inference_options.output_file = inference_output_file_yolo_val_no_links
@@ -709,9 +826,10 @@ def run_python_tests(options):
709
826
 
710
827
  ## Video test (single video)
711
828
 
712
- print('\n** Running MD on a single video **\n')
829
+ print('\n** Running MD on a single video (module) **\n')
713
830
 
714
831
  from megadetector.detection.process_video import ProcessVideoOptions, process_video
832
+ from megadetector.utils.path_utils import insert_before_extension
715
833
 
716
834
  video_options = ProcessVideoOptions()
717
835
  video_options.model_file = options.default_model
@@ -747,25 +865,26 @@ def run_python_tests(options):
747
865
 
748
866
  ## Video test (folder)
749
867
 
750
- print('\n** Running MD on a folder of videos **\n')
868
+ print('\n** Running MD on a folder of videos (module) **\n')
751
869
 
752
870
  from megadetector.detection.process_video import ProcessVideoOptions, process_video_folder
871
+ from megadetector.utils.path_utils import insert_before_extension
753
872
 
754
873
  video_options = ProcessVideoOptions()
755
874
  video_options.model_file = options.default_model
756
875
  video_options.input_video_file = os.path.join(options.scratch_dir,
757
876
  os.path.dirname(options.test_videos[0]))
758
877
  video_options.output_json_file = os.path.join(options.scratch_dir,'video_folder_output.json')
759
- # video_options.output_video_file = None
878
+ video_options.output_video_file = None
760
879
  video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
761
880
  video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
762
881
  video_options.render_output_video = False
763
- # video_options.keep_rendered_frames = False
764
- # video_options.keep_rendered_frames = False
882
+ video_options.keep_rendered_frames = False
883
+ video_options.keep_rendered_frames = False
765
884
  video_options.force_extracted_frame_folder_deletion = True
766
885
  video_options.force_rendered_frame_folder_deletion = True
767
- # video_options.reuse_results_if_available = False
768
- # video_options.reuse_frames_if_available = False
886
+ video_options.reuse_results_if_available = False
887
+ video_options.reuse_frames_if_available = False
769
888
  video_options.recursive = True
770
889
  video_options.verbose = True
771
890
  video_options.fourcc = options.video_fourcc
@@ -773,12 +892,16 @@ def run_python_tests(options):
773
892
  # video_options.json_confidence_threshold = 0.005
774
893
  video_options.frame_sample = 10
775
894
  video_options.n_cores = 5
895
+
896
+ # Force frame extraction to disk, since that's how we generated our expected results file
897
+ video_options.force_on_disk_frame_extraction = True
776
898
  # video_options.debug_max_frames = -1
777
899
  # video_options.class_mapping_filename = None
778
900
 
779
901
  # Use quality == None, because we can't control whether YOLOv5 has patched cm2.imread,
780
902
  # and therefore can't rely on using the quality parameter
781
- video_options.quality = None
903
+ video_options.quality = None
904
+ video_options.max_width = None
782
905
 
783
906
  _ = process_video_folder(video_options)
784
907
 
@@ -796,6 +919,29 @@ def run_python_tests(options):
796
919
  assert os.path.isfile(expected_results_file)
797
920
  compare_results(frame_output_file,expected_results_file,options)
798
921
 
922
+
923
+ ## Run again, this time in memory, and make sure the results are *almost* the same
924
+
925
+ # They won't be quite the same, because the on-disk path goes through a jpeg intermediate
926
+
927
+ print('\n** Running MD on a folder of videos (in memory) (module) **\n')
928
+
929
+ video_options.output_json_file = insert_before_extension(video_options.output_json_file,'in-memory')
930
+ video_options.force_on_disk_frame_extraction = False
931
+ _ = process_video_folder(video_options)
932
+
933
+ frame_output_file_in_memory = insert_before_extension(video_options.output_json_file,'frames')
934
+ assert os.path.isfile(frame_output_file_in_memory)
935
+
936
+ from copy import deepcopy
937
+ options_loose = deepcopy(options)
938
+ options_loose.max_conf_error = 0.05
939
+ options_loose.max_coord_error = 0.01
940
+
941
+ compare_results(inference_output_file=frame_output_file,
942
+ expected_results_file=frame_output_file_in_memory,
943
+ options=options_loose)
944
+
799
945
  # ...if we're not skipping video tests
800
946
 
801
947
  print('\n*** Finished module tests ***\n')
@@ -835,6 +981,8 @@ def run_cli_tests(options):
835
981
 
836
982
  ## Run inference on an image
837
983
 
984
+ print('\n** Running MD on a single image (CLI) **\n')
985
+
838
986
  image_fn = os.path.join(options.scratch_dir,options.test_images[0])
839
987
  output_dir = os.path.join(options.scratch_dir,'single_image_test')
840
988
  if options.cli_working_dir is None:
@@ -857,6 +1005,7 @@ def run_cli_tests(options):
857
1005
 
858
1006
  ## Run inference on a folder
859
1007
 
1008
+ print('\n** Running MD on a folder (CLI) **\n')
860
1009
 
861
1010
  image_folder = os.path.join(options.scratch_dir,'md-test-images')
862
1011
  assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
@@ -876,6 +1025,8 @@ def run_cli_tests(options):
876
1025
 
877
1026
  ## Run again with checkpointing enabled, make sure the results are the same
878
1027
 
1028
+ print('\n** Running MD on a folder (with checkpoints) (CLI) **\n')
1029
+
879
1030
  from megadetector.utils.path_utils import insert_before_extension
880
1031
 
881
1032
  checkpoint_string = ' --checkpoint_frequency 5'
@@ -885,11 +1036,14 @@ def run_cli_tests(options):
885
1036
  cmd_results = execute_and_print(cmd)
886
1037
 
887
1038
  assert output_files_are_identical(fn1=inference_output_file,
888
- fn2=inference_output_file_checkpoint,verbose=True)
1039
+ fn2=inference_output_file_checkpoint,
1040
+ verbose=True)
889
1041
 
890
1042
 
891
1043
  ## Run again with the image queue enabled, make sure the results are the same
892
1044
 
1045
+ print('\n** Running MD on a folder (with image queue) (CLI) **\n')
1046
+
893
1047
  cmd = base_cmd + ' --use_image_queue'
894
1048
  from megadetector.utils.path_utils import insert_before_extension
895
1049
  inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
@@ -897,7 +1051,8 @@ def run_cli_tests(options):
897
1051
  cmd_results = execute_and_print(cmd)
898
1052
 
899
1053
  assert output_files_are_identical(fn1=inference_output_file,
900
- fn2=inference_output_file_queue,verbose=True)
1054
+ fn2=inference_output_file_queue,
1055
+ verbose=True)
901
1056
 
902
1057
 
903
1058
  ## Run again on multiple cores, make sure the results are the same
@@ -916,11 +1071,16 @@ def run_cli_tests(options):
916
1071
  if not gpu_available:
917
1072
  inference_output_file_cpu = inference_output_file
918
1073
  else:
1074
+
1075
+ print('\n** Running MD on a folder (single CPU) (CLI) **\n')
1076
+
919
1077
  inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
920
1078
  cmd = base_cmd
921
1079
  cmd = cmd.replace(inference_output_file,inference_output_file_cpu)
922
1080
  cmd_results = execute_and_print(cmd)
923
1081
 
1082
+ print('\n** Running MD on a folder (multiple CPUs) (CLI) **\n')
1083
+
924
1084
  cpu_string = ' --ncores 4'
925
1085
  cmd = base_cmd + cpu_string
926
1086
  from megadetector.utils.path_utils import insert_before_extension
@@ -935,11 +1095,14 @@ def run_cli_tests(options):
935
1095
  del os.environ['CUDA_VISIBLE_DEVICES']
936
1096
 
937
1097
  assert output_files_are_identical(fn1=inference_output_file_cpu,
938
- fn2=inference_output_file_cpu_multicore,verbose=True)
1098
+ fn2=inference_output_file_cpu_multicore,
1099
+ verbose=True)
939
1100
 
940
1101
 
941
1102
  ## Postprocessing
942
1103
 
1104
+ print('\n** Testing post-processing (CLI) **\n')
1105
+
943
1106
  postprocessing_output_dir = os.path.join(options.scratch_dir,'postprocessing_output_cli')
944
1107
 
945
1108
  if options.cli_working_dir is None:
@@ -954,6 +1117,8 @@ def run_cli_tests(options):
954
1117
 
955
1118
  ## RDE
956
1119
 
1120
+ print('\n** Running RDE (CLI) **\n')
1121
+
957
1122
  rde_output_dir = os.path.join(options.scratch_dir,'rde_output_cli')
958
1123
 
959
1124
  if options.cli_working_dir is None:
@@ -990,6 +1155,8 @@ def run_cli_tests(options):
990
1155
 
991
1156
  ## Run inference on a folder (tiled)
992
1157
 
1158
+ print('\n** Running tiled inference (CLI) **\n')
1159
+
993
1160
  image_folder = os.path.join(options.scratch_dir,'md-test-images')
994
1161
  tiling_folder = os.path.join(options.scratch_dir,'tiling-folder')
995
1162
  inference_output_file_tiled = os.path.join(options.scratch_dir,'folder_inference_output_tiled.json')
@@ -1014,6 +1181,8 @@ def run_cli_tests(options):
1014
1181
 
1015
1182
  else:
1016
1183
 
1184
+ print('\n** Running YOLOv5 val tests (CLI) **\n')
1185
+
1017
1186
  image_folder = os.path.join(options.scratch_dir,'md-test-images')
1018
1187
  yolo_results_folder = os.path.join(options.scratch_dir,'yolo-output-folder')
1019
1188
  yolo_symlink_folder = os.path.join(options.scratch_dir,'yolo-symlink_folder')
@@ -1041,12 +1210,15 @@ def run_cli_tests(options):
1041
1210
  cmd_results = execute_and_print(cmd)
1042
1211
 
1043
1212
  assert output_files_are_identical(fn1=inference_output_file_yolo_val,
1044
- fn2=inference_output_file_yolo_val_checkpoint)
1213
+ fn2=inference_output_file_yolo_val_checkpoint,
1214
+ verbose=True)
1045
1215
 
1046
1216
  if not options.skip_video_tests:
1047
1217
 
1048
1218
  ## Video test
1049
1219
 
1220
+ print('\n** Testing video rendering (CLI) **\n')
1221
+
1050
1222
  video_inference_output_file = os.path.join(options.scratch_dir,'video_inference_output.json')
1051
1223
  output_video_file = os.path.join(options.scratch_dir,'video_scratch/cli_rendered_video.mp4')
1052
1224
  frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder_cli')
@@ -1073,6 +1245,8 @@ def run_cli_tests(options):
1073
1245
 
1074
1246
  ## Run inference on a folder (with MDV5B, so we can do a comparison)
1075
1247
 
1248
+ print('\n** Running MDv5b (CLI) **\n')
1249
+
1076
1250
  image_folder = os.path.join(options.scratch_dir,'md-test-images')
1077
1251
  inference_output_file_alt = os.path.join(options.scratch_dir,'folder_inference_output_alt.json')
1078
1252
  if options.cli_working_dir is None:
@@ -1166,14 +1340,16 @@ if False:
1166
1340
  options.test_data_url = 'https://lila.science/public/md-test-package.zip'
1167
1341
  options.force_data_download = False
1168
1342
  options.force_data_unzip = False
1169
- options.warning_mode = True
1343
+ options.warning_mode = False
1170
1344
  options.max_coord_error = 0.001
1171
1345
  options.max_conf_error = 0.005
1172
1346
  options.cli_working_dir = r'c:\git\MegaDetector'
1173
1347
  options.yolo_working_dir = r'c:\git\yolov5-md'
1174
1348
 
1175
- import os
1176
1349
 
1350
+ #%%
1351
+
1352
+ import os
1177
1353
  if 'PYTHONPATH' not in os.environ or options.yolo_working_dir not in os.environ['PYTHONPATH']:
1178
1354
  os.environ['PYTHONPATH'] += ';' + options.yolo_working_dir
1179
1355
 
@@ -1332,4 +1508,3 @@ if False:
1332
1508
  fn1 = r"G:\temp\md-test-package\mdv5a-image-cpu-pt1.10.1.json"
1333
1509
  fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-cpu-pt1.10.1.json"
1334
1510
  print(output_files_are_identical(fn1,fn2,verbose=True))
1335
-
@@ -924,8 +924,8 @@ def zip_files_into_single_zipfile(input_files, output_fn, arc_name_base,
924
924
 
925
925
  def zip_folder(input_folder, output_fn=None, overwrite=False, verbose=False, compresslevel=9):
926
926
  """
927
- Recursively zip everything in [input_folder] into a single zipfile, storing outputs as relative
928
- paths.
927
+ Recursively zip everything in [input_folder] into a single zipfile, storing files as paths
928
+ relative to [input_folder].
929
929
 
930
930
  Args:
931
931
  input_folder (str): folder to zip
@@ -0,0 +1,32 @@
1
+ """
2
+
3
+ torch_test.py
4
+
5
+ Simple script to verify CUDA availability, used to verify a CUDA/PyTorch
6
+ environment.
7
+
8
+ """
9
+
10
+ def torch_test():
11
+ """
12
+ Print diagnostic information about Torch/CUDA status, including Torch/CUDA versions
13
+ and all available CUDA device names.
14
+ """
15
+
16
+ import torch
17
+
18
+ print('Torch version: {}'.format(str(torch.__version__)))
19
+ print('CUDA available: {}'.format(torch.cuda.is_available()))
20
+
21
+ device_ids = list(range(torch.cuda.device_count()))
22
+ print('Found {} CUDA devices:'.format(len(device_ids)))
23
+ for device_id in device_ids:
24
+ device_name = 'unknown'
25
+ try:
26
+ device_name = torch.cuda.get_device_name(device=device_id)
27
+ except Exception as e:
28
+ pass
29
+ print('{}: {}'.format(device_id,device_name))
30
+
31
+ if __name__ == '__main__':
32
+ torch_test()