megadetector 5.0.13__py3-none-any.whl → 5.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

@@ -10,6 +10,8 @@ This module should not depend on anything else in this repo outside of the
10
10
  tests themselves, even if it means some duplicated code (e.g. for downloading files),
11
11
  since much of what it tries to test is, e.g., imports.
12
12
 
13
+ "Correctness" is determined by agreement with a file that this script fetches from lila.science.
14
+
13
15
  """
14
16
 
15
17
  #%% Imports and constants
@@ -75,10 +77,12 @@ class MDTestOptions:
75
77
  self.max_coord_error = 0.001
76
78
 
77
79
  #: How much deviation from the expected confidence values should we allow before
78
- #: a disrepancy becomes an error?
80
+ #: a disrepancy becomes an error?
79
81
  self.max_conf_error = 0.005
80
82
 
81
83
  #: Current working directory when running CLI tests
84
+ #:
85
+ #: If this is None, we won't mess with the inherited working directory.
82
86
  self.cli_working_dir = None
83
87
 
84
88
  #: YOLOv5 installation, only relevant if we're testing run_inference_with_yolov5_val.
@@ -92,7 +96,7 @@ class MDTestOptions:
92
96
  #: Default model to use for testing (filename, URL, or well-known model string)
93
97
  self.default_model = 'MDV5A'
94
98
 
95
- #: For comparison tests, use a model that produces slightly different output
99
+ #: For comparison tests, use a model that produces slightly different output
96
100
  self.alt_model = 'MDV5B'
97
101
 
98
102
  #: PYTHONPATH to set for CLI tests; if None, inherits from the parent process. Only
@@ -104,7 +108,11 @@ class MDTestOptions:
104
108
 
105
109
  #%% Support functions
106
110
 
107
- def get_expected_results_filename(gpu_is_available):
111
+ def get_expected_results_filename(gpu_is_available,
112
+ model_string='mdv5a',
113
+ test_type='image',
114
+ augment=False,
115
+ options=None):
108
116
  """
109
117
  Expected results vary just a little across inference environments, particularly
110
118
  between PT 1.x and 2.x, so when making sure things are working acceptably, we
@@ -140,13 +148,27 @@ def get_expected_results_filename(gpu_is_available):
140
148
  import torch
141
149
  m1_inference = torch.backends.mps.is_built and torch.backends.mps.is_available()
142
150
  if m1_inference:
143
- print('I appear to be running on M1/M2 hardware')
151
+ print('I appear to be running on M1/M2 hardware, using pt1/cpu as the reference results')
144
152
  hw_string = 'cpu'
145
153
  pt_string = 'pt1.10.1'
146
154
  except Exception:
147
155
  pass
148
156
 
149
- return 'md-test-results-{}-{}.json'.format(hw_string,pt_string)
157
+ aug_string = ''
158
+ if augment:
159
+ aug_string = 'augment-'
160
+
161
+ fn = '{}-{}{}-{}-{}.json'.format(model_string,aug_string,test_type,hw_string,pt_string)
162
+
163
+ from megadetector.utils.path_utils import insert_before_extension
164
+
165
+ if test_type == 'video':
166
+ fn = insert_before_extension(fn,'frames')
167
+
168
+ if options is not None and options.scratch_dir is not None:
169
+ fn = os.path.join(options.scratch_dir,fn)
170
+
171
+ return fn
150
172
 
151
173
 
152
174
  def download_test_data(options=None):
@@ -216,7 +238,12 @@ def download_test_data(options=None):
216
238
 
217
239
  # ...for each file in the zipfile
218
240
 
219
- # Warn if file are present that aren't expected
241
+ try:
242
+ zipf.close()
243
+ except Exception as e:
244
+ print('Warning: error closing zipfile:\n{}'.format(str(e)))
245
+
246
+ # Warn if files are present that aren't expected
220
247
  test_files = glob.glob(os.path.join(scratch_dir,'**/*'), recursive=True)
221
248
  test_files = [os.path.relpath(fn,scratch_dir).replace('\\','/') for fn in test_files]
222
249
  test_files_set = set(test_files)
@@ -312,18 +339,19 @@ def output_files_are_identical(fn1,fn2,verbose=False):
312
339
  len(fn2_results['images']),fn2))
313
340
  return False
314
341
 
342
+ # i_image = 0; fn1_image = fn1_results['images'][i_image]
315
343
  for i_image,fn1_image in enumerate(fn1_results['images']):
316
344
 
317
345
  fn2_image = fn2_results['images'][i_image]
318
346
 
319
347
  if fn1_image['file'] != fn2_image['file']:
320
348
  if verbose:
321
- print('Filename difference: {} vs {} '.format(fn1_image['file'],fn1_image['file']))
349
+ print('Filename difference at {}: {} vs {} '.format(i_image,fn1_image['file'],fn1_image['file']))
322
350
  return False
323
351
 
324
352
  if fn1_image != fn2_image:
325
353
  if verbose:
326
- print('Image-level difference in image {}'.format(fn1_image['file']))
354
+ print('Image-level difference in image {}: {}'.format(i_image,fn1_image['file']))
327
355
  return False
328
356
 
329
357
  return True
@@ -331,6 +359,100 @@ def output_files_are_identical(fn1,fn2,verbose=False):
331
359
  # ...def output_files_are_identical(...)
332
360
 
333
361
 
362
+ def compare_results(inference_output_file,expected_results_file,options):
363
+ """
364
+ Compare two MD-formatted output files that should be nearly identical, allowing small
365
+ changes (e.g. rounding differences). Generally used to compare a new results file to
366
+ an expected results file.
367
+
368
+ Args:
369
+ inference_output_file (str): the first results file to compare
370
+ expected_results_file (str): the second results file to compare
371
+ options (MDTestOptions): options that determine tolerable differences between files
372
+ """
373
+
374
+ # Read results
375
+ with open(inference_output_file,'r') as f:
376
+ results_from_file = json.load(f) # noqa
377
+
378
+ with open(os.path.join(options.scratch_dir,expected_results_file),'r') as f:
379
+ expected_results = json.load(f)
380
+
381
+ filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
382
+ filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
383
+
384
+ assert len(filename_to_results) == len(filename_to_results_expected), \
385
+ 'Error: expected {} files in results, found {}'.format(
386
+ len(filename_to_results_expected),
387
+ len(filename_to_results))
388
+
389
+ max_coord_error = 0
390
+ max_conf_error = 0
391
+
392
+ # fn = next(iter(filename_to_results.keys()))
393
+ for fn in filename_to_results.keys():
394
+
395
+ actual_image_results = filename_to_results[fn]
396
+ expected_image_results = filename_to_results_expected[fn]
397
+
398
+ if 'failure' in actual_image_results:
399
+ assert 'failure' in expected_image_results and \
400
+ 'detections' not in actual_image_results and \
401
+ 'detections' not in expected_image_results
402
+ continue
403
+ assert 'failure' not in expected_image_results
404
+
405
+ actual_detections = actual_image_results['detections']
406
+ expected_detections = expected_image_results['detections']
407
+
408
+ s = 'expected {} detections for file {}, found {}'.format(
409
+ len(expected_detections),fn,len(actual_detections))
410
+ s += '\nExpected results file: {}\nActual results file: {}'.format(
411
+ expected_results_file,inference_output_file)
412
+
413
+ if options.warning_mode:
414
+ if len(actual_detections) != len(expected_detections):
415
+ print('Warning: {}'.format(s))
416
+ continue
417
+ assert len(actual_detections) == len(expected_detections), \
418
+ 'Error: {}'.format(s)
419
+
420
+ # i_det = 0
421
+ for i_det in range(0,len(actual_detections)):
422
+ actual_det = actual_detections[i_det]
423
+ expected_det = expected_detections[i_det]
424
+ assert actual_det['category'] == expected_det['category']
425
+ conf_err = abs(actual_det['conf'] - expected_det['conf'])
426
+ coord_differences = []
427
+ for i_coord in range(0,4):
428
+ coord_differences.append(abs(actual_det['bbox'][i_coord]-expected_det['bbox'][i_coord]))
429
+ coord_err = max(coord_differences)
430
+
431
+ if conf_err > max_conf_error:
432
+ max_conf_error = conf_err
433
+ if coord_err > max_coord_error:
434
+ max_coord_error = coord_err
435
+
436
+ # ...for each detection
437
+
438
+ # ...for each image
439
+
440
+ if not options.warning_mode:
441
+
442
+ assert max_conf_error <= options.max_conf_error, \
443
+ 'Confidence error {} is greater than allowable ({})'.format(
444
+ max_conf_error,options.max_conf_error)
445
+
446
+ assert max_coord_error <= options.max_coord_error, \
447
+ 'Coord error {} is greater than allowable ({})'.format(
448
+ max_coord_error,options.max_coord_error)
449
+
450
+ print('Max conf error: {}'.format(max_conf_error))
451
+ print('Max coord error: {}'.format(max_coord_error))
452
+
453
+ # ...def compare_results(...)
454
+
455
+
334
456
  def _args_to_object(args, obj):
335
457
  """
336
458
  Copies all fields from a Namespace (typically the output from parse_args) to an
@@ -382,7 +504,7 @@ def execute(cmd):
382
504
  return return_code
383
505
 
384
506
 
385
- def execute_and_print(cmd,print_output=True,catch_exceptions=False):
507
+ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=True):
386
508
  """
387
509
  Runs [cmd] (a single string) in a shell, capturing (and optionally printing) output.
388
510
 
@@ -395,8 +517,11 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False):
395
517
  (the content of stdout)
396
518
  """
397
519
 
520
+ if echo_command:
521
+ print('Running command:\n{}\n'.format(cmd))
522
+
398
523
  to_return = {'status':'unknown','output':''}
399
- output=[]
524
+ output = []
400
525
  try:
401
526
  for s in execute(cmd):
402
527
  output.append(s)
@@ -441,6 +566,8 @@ def run_python_tests(options):
441
566
 
442
567
 
443
568
  ## Run inference on a folder
569
+
570
+ print('\n** Running MD on images **\n')
444
571
 
445
572
  from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
446
573
  from megadetector.utils import path_utils
@@ -450,95 +577,38 @@ def run_python_tests(options):
450
577
  inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
451
578
  image_file_names = path_utils.find_images(image_folder,recursive=True)
452
579
  results = load_and_run_detector_batch(options.default_model, image_file_names, quiet=True)
453
- _ = write_results_to_file(results,inference_output_file,
454
- relative_path_base=image_folder,detector_file=options.default_model)
580
+ _ = write_results_to_file(results,
581
+ inference_output_file,
582
+ relative_path_base=image_folder,
583
+ detector_file=options.default_model)
455
584
 
456
- # Read results
457
- with open(inference_output_file,'r') as f:
458
- results_from_file = json.load(f) # noqa
459
585
 
460
-
461
586
  ## Verify results
462
587
 
463
- # Read expected results
464
- expected_results_filename = get_expected_results_filename(is_gpu_available(verbose=False))
465
-
466
- with open(os.path.join(options.scratch_dir,expected_results_filename),'r') as f:
467
- expected_results = json.load(f)
468
-
469
- filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
470
- filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
588
+ expected_results_file = get_expected_results_filename(is_gpu_available(verbose=False),
589
+ options=options)
590
+ compare_results(inference_output_file,expected_results_file,options)
591
+
592
+
593
+ ## Run and verify again with augmentation enabled
471
594
 
472
- assert len(filename_to_results) == len(filename_to_results_expected), \
473
- 'Error: expected {} files in results, found {}'.format(
474
- len(filename_to_results_expected),
475
- len(filename_to_results))
595
+ print('\n** Running MD on images with augmentation **\n')
476
596
 
477
- max_coord_error = 0
478
- max_conf_error = 0
597
+ from megadetector.utils.path_utils import insert_before_extension
479
598
 
480
- # fn = next(iter(filename_to_results.keys()))
481
- for fn in filename_to_results.keys():
482
-
483
- actual_image_results = filename_to_results[fn]
484
- expected_image_results = filename_to_results_expected[fn]
485
-
486
- if 'failure' in actual_image_results:
487
- assert 'failure' in expected_image_results and \
488
- 'detections' not in actual_image_results and \
489
- 'detections' not in expected_image_results
490
- continue
491
- assert 'failure' not in expected_image_results
492
-
493
- actual_detections = actual_image_results['detections']
494
- expected_detections = expected_image_results['detections']
495
-
496
- s = 'expected {} detections for file {}, found {}'.format(
497
- len(expected_detections),fn,len(actual_detections))
498
- s += '\nExpected results file: {}\nActual results file: {}'.format(
499
- expected_results_filename,inference_output_file)
500
-
501
- if options.warning_mode:
502
- if len(actual_detections) != len(expected_detections):
503
- print('Warning: {}'.format(s))
504
- continue
505
- assert len(actual_detections) == len(expected_detections), \
506
- 'Error: {}'.format(s)
507
-
508
- # i_det = 0
509
- for i_det in range(0,len(actual_detections)):
510
- actual_det = actual_detections[i_det]
511
- expected_det = expected_detections[i_det]
512
- assert actual_det['category'] == expected_det['category']
513
- conf_err = abs(actual_det['conf'] - expected_det['conf'])
514
- coord_differences = []
515
- for i_coord in range(0,4):
516
- coord_differences.append(abs(actual_det['bbox'][i_coord]-expected_det['bbox'][i_coord]))
517
- coord_err = max(coord_differences)
518
-
519
- if conf_err > max_conf_error:
520
- max_conf_error = conf_err
521
- if coord_err > max_coord_error:
522
- max_coord_error = coord_err
523
-
524
- # ...for each detection
599
+ inference_output_file_augmented = insert_before_extension(inference_output_file,'augmented')
600
+ results = load_and_run_detector_batch(options.default_model, image_file_names, quiet=True, augment=True)
601
+ _ = write_results_to_file(results,
602
+ inference_output_file_augmented,
603
+ relative_path_base=image_folder,
604
+ detector_file=options.default_model)
605
+
606
+ expected_results_file_augmented = \
607
+ get_expected_results_filename(is_gpu_available(verbose=False),
608
+ augment=True,options=options)
609
+ compare_results(inference_output_file_augmented,expected_results_file_augmented,options)
525
610
 
526
- # ...for each image
527
611
 
528
- if not options.warning_mode:
529
-
530
- assert max_conf_error <= options.max_conf_error, \
531
- 'Confidence error {} is greater than allowable ({})'.format(
532
- max_conf_error,options.max_conf_error)
533
-
534
- assert max_coord_error <= options.max_coord_error, \
535
- 'Coord error {} is greater than allowable ({})'.format(
536
- max_coord_error,options.max_coord_error)
537
-
538
- print('Max conf error: {}'.format(max_conf_error))
539
- print('Max coord error: {}'.format(max_coord_error))
540
-
541
-
542
612
  ## Postprocess results
543
613
 
544
614
  from megadetector.postprocessing.postprocess_batch_results import \
@@ -639,6 +709,8 @@ def run_python_tests(options):
639
709
 
640
710
  ## Video test (single video)
641
711
 
712
+ print('\n** Running MD on a single video **\n')
713
+
642
714
  from megadetector.detection.process_video import ProcessVideoOptions, process_video
643
715
 
644
716
  video_options = ProcessVideoOptions()
@@ -660,7 +732,7 @@ def run_python_tests(options):
660
732
  video_options.fourcc = options.video_fourcc
661
733
  # video_options.rendering_confidence_threshold = None
662
734
  # video_options.json_confidence_threshold = 0.005
663
- video_options.frame_sample = 5
735
+ video_options.frame_sample = 10
664
736
  video_options.n_cores = 5
665
737
  # video_options.debug_max_frames = -1
666
738
  # video_options.class_mapping_filename = None
@@ -675,6 +747,8 @@ def run_python_tests(options):
675
747
 
676
748
  ## Video test (folder)
677
749
 
750
+ print('\n** Running MD on a folder of videos **\n')
751
+
678
752
  from megadetector.detection.process_video import ProcessVideoOptions, process_video_folder
679
753
 
680
754
  video_options = ProcessVideoOptions()
@@ -697,15 +771,30 @@ def run_python_tests(options):
697
771
  video_options.fourcc = options.video_fourcc
698
772
  # video_options.rendering_confidence_threshold = None
699
773
  # video_options.json_confidence_threshold = 0.005
700
- video_options.frame_sample = 5
701
- video_options.n_cores = 5
774
+ video_options.frame_sample = 10
775
+ video_options.n_cores = 5
702
776
  # video_options.debug_max_frames = -1
703
777
  # video_options.class_mapping_filename = None
704
778
 
779
+ # Use quality == None, because we can't control whether YOLOv5 has patched cm2.imread,
780
+ # and therefore can't rely on using the quality parameter
781
+ video_options.quality = None
782
+
705
783
  _ = process_video_folder(video_options)
706
784
 
707
785
  assert os.path.isfile(video_options.output_json_file), \
708
786
  'Python video test failed to render output .json file'
787
+
788
+ frame_output_file = insert_before_extension(video_options.output_json_file,'frames')
789
+ assert os.path.isfile(frame_output_file)
790
+
791
+
792
+ ## Verify results
793
+
794
+ expected_results_file = \
795
+ get_expected_results_filename(is_gpu_available(verbose=False),test_type='video',options=options)
796
+ assert os.path.isfile(expected_results_file)
797
+ compare_results(frame_output_file,expected_results_file,options)
709
798
 
710
799
  # ...if we're not skipping video tests
711
800
 
@@ -754,7 +843,6 @@ def run_cli_tests(options):
754
843
  cmd = 'python megadetector/detection/run_detector.py'
755
844
  cmd += ' "{}" --image_file "{}" --output_dir "{}"'.format(
756
845
  options.default_model,image_fn,output_dir)
757
- print('Running: {}'.format(cmd))
758
846
  cmd_results = execute_and_print(cmd)
759
847
 
760
848
  if options.cpu_execution_is_error:
@@ -769,6 +857,7 @@ def run_cli_tests(options):
769
857
 
770
858
  ## Run inference on a folder
771
859
 
860
+
772
861
  image_folder = os.path.join(options.scratch_dir,'md-test-images')
773
862
  assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
774
863
  inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
@@ -780,24 +869,75 @@ def run_cli_tests(options):
780
869
  options.default_model,image_folder,inference_output_file)
781
870
  cmd += ' --output_relative_filenames --quiet --include_image_size'
782
871
  cmd += ' --include_image_timestamp --include_exif_data'
783
- print('Running: {}'.format(cmd))
784
872
  cmd_results = execute_and_print(cmd)
785
873
 
874
+ base_cmd = cmd
875
+
786
876
 
787
877
  ## Run again with checkpointing enabled, make sure the results are the same
788
878
 
789
- cmd += ' --checkpoint_frequency 5'
790
879
  from megadetector.utils.path_utils import insert_before_extension
880
+
881
+ checkpoint_string = ' --checkpoint_frequency 5'
882
+ cmd = base_cmd + checkpoint_string
791
883
  inference_output_file_checkpoint = insert_before_extension(inference_output_file,'_checkpoint')
792
- assert inference_output_file_checkpoint != inference_output_file
793
884
  cmd = cmd.replace(inference_output_file,inference_output_file_checkpoint)
794
- print('Running: {}'.format(cmd))
795
885
  cmd_results = execute_and_print(cmd)
796
886
 
797
887
  assert output_files_are_identical(fn1=inference_output_file,
798
888
  fn2=inference_output_file_checkpoint,verbose=True)
799
889
 
800
890
 
891
+ ## Run again with the image queue enabled, make sure the results are the same
892
+
893
+ cmd = base_cmd + ' --use_image_queue'
894
+ from megadetector.utils.path_utils import insert_before_extension
895
+ inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
896
+ cmd = cmd.replace(inference_output_file,inference_output_file_queue)
897
+ cmd_results = execute_and_print(cmd)
898
+
899
+ assert output_files_are_identical(fn1=inference_output_file,
900
+ fn2=inference_output_file_queue,verbose=True)
901
+
902
+
903
+ ## Run again on multiple cores, make sure the results are the same
904
+
905
+ # First run again on the CPU on a single thread if necessary, so we get a file that
906
+ # *should* be identical to the multicore version.
907
+
908
+ gpu_available = is_gpu_available(verbose=False)
909
+
910
+ cuda_visible_devices = None
911
+ if 'CUDA_VISIBLE_DEVICES' in os.environ:
912
+ cuda_visible_devices = os.environ['CUDA_VISIBLE_DEVICES']
913
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
914
+
915
+ # If we already ran on the CPU, no need to run again
916
+ if not gpu_available:
917
+ inference_output_file_cpu = inference_output_file
918
+ else:
919
+ inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
920
+ cmd = base_cmd
921
+ cmd = cmd.replace(inference_output_file,inference_output_file_cpu)
922
+ cmd_results = execute_and_print(cmd)
923
+
924
+ cpu_string = ' --ncores 4'
925
+ cmd = base_cmd + cpu_string
926
+ from megadetector.utils.path_utils import insert_before_extension
927
+ inference_output_file_cpu_multicore = insert_before_extension(inference_output_file,'multicore')
928
+ cmd = cmd.replace(inference_output_file,inference_output_file_cpu_multicore)
929
+ cmd_results = execute_and_print(cmd)
930
+
931
+ if cuda_visible_devices is not None:
932
+ print('Restoring CUDA_VISIBLE_DEVICES')
933
+ os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
934
+ else:
935
+ del os.environ['CUDA_VISIBLE_DEVICES']
936
+
937
+ assert output_files_are_identical(fn1=inference_output_file_cpu,
938
+ fn2=inference_output_file_cpu_multicore,verbose=True)
939
+
940
+
801
941
  ## Postprocessing
802
942
 
803
943
  postprocessing_output_dir = os.path.join(options.scratch_dir,'postprocessing_output_cli')
@@ -809,7 +949,6 @@ def run_cli_tests(options):
809
949
  cmd += ' "{}" "{}"'.format(
810
950
  inference_output_file,postprocessing_output_dir)
811
951
  cmd += ' --image_base_dir "{}"'.format(image_folder)
812
- print('Running: {}'.format(cmd))
813
952
  cmd_results = execute_and_print(cmd)
814
953
 
815
954
 
@@ -825,7 +964,6 @@ def run_cli_tests(options):
825
964
  cmd += ' --imageBase "{}"'.format(image_folder)
826
965
  cmd += ' --outputBase "{}"'.format(rde_output_dir)
827
966
  cmd += ' --occurrenceThreshold 1' # Use an absurd number here to make sure we get some suspicious detections
828
- print('Running: {}'.format(cmd))
829
967
  cmd_results = execute_and_print(cmd)
830
968
 
831
969
  # Find the latest filtering folder
@@ -844,7 +982,6 @@ def run_cli_tests(options):
844
982
  else:
845
983
  cmd = 'python megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py'
846
984
  cmd += ' "{}" "{}" "{}"'.format(inference_output_file,filtered_output_file,filtering_output_dir)
847
- print('Running: {}'.format(cmd))
848
985
  cmd_results = execute_and_print(cmd)
849
986
 
850
987
  assert os.path.isfile(filtered_output_file), \
@@ -863,14 +1000,13 @@ def run_cli_tests(options):
863
1000
  cmd += ' "{}" "{}" "{}" "{}"'.format(
864
1001
  options.default_model,image_folder,tiling_folder,inference_output_file_tiled)
865
1002
  cmd += ' --overwrite_handling overwrite'
866
- print('Running: {}'.format(cmd))
867
1003
  cmd_results = execute_and_print(cmd)
868
1004
 
869
1005
  with open(inference_output_file_tiled,'r') as f:
870
1006
  results_from_file = json.load(f) # noqa
871
1007
 
872
1008
 
873
- ## Run inference on a folder (augmented)
1009
+ ## Run inference on a folder (augmented, w/YOLOv5 val script)
874
1010
 
875
1011
  if options.yolo_working_dir is None:
876
1012
 
@@ -894,7 +1030,6 @@ def run_cli_tests(options):
894
1030
  cmd += ' --augment_enabled 1'
895
1031
  # cmd += ' --no_use_symlinks'
896
1032
  cmd += ' --overwrite_handling overwrite'
897
- print('Running: {}'.format(cmd))
898
1033
  cmd_results = execute_and_print(cmd)
899
1034
 
900
1035
  # Run again with checkpointing, make sure the output are identical
@@ -931,7 +1066,6 @@ def run_cli_tests(options):
931
1066
  cmd += ' --render_output_video --fourcc {}'.format(options.video_fourcc)
932
1067
  cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion --n_cores 5 --frame_sample 3'
933
1068
  cmd += ' --verbose'
934
- print('Running: {}'.format(cmd))
935
1069
  cmd_results = execute_and_print(cmd)
936
1070
 
937
1071
  # ...if we're not skipping video tests
@@ -949,7 +1083,6 @@ def run_cli_tests(options):
949
1083
  options.alt_model,image_folder,inference_output_file_alt)
950
1084
  cmd += ' --output_relative_filenames --quiet --include_image_size'
951
1085
  cmd += ' --include_image_timestamp --include_exif_data'
952
- print('Running: {}'.format(cmd))
953
1086
  cmd_results = execute_and_print(cmd)
954
1087
 
955
1088
  with open(inference_output_file_alt,'r') as f:
@@ -967,7 +1100,6 @@ def run_cli_tests(options):
967
1100
  else:
968
1101
  cmd = 'python megadetector/postprocessing/compare_batch_results.py'
969
1102
  cmd += ' "{}" "{}" {}'.format(comparison_output_folder,image_folder,results_files_string)
970
- print('Running: {}'.format(cmd))
971
1103
  cmd_results = execute_and_print(cmd)
972
1104
 
973
1105
  assert cmd_results['status'] == 0, 'Error generating comparison HTML'
@@ -1040,6 +1172,10 @@ if False:
1040
1172
  options.cli_working_dir = r'c:\git\MegaDetector'
1041
1173
  options.yolo_working_dir = r'c:\git\yolov5-md'
1042
1174
 
1175
+ import os
1176
+
1177
+ if 'PYTHONPATH' not in os.environ or options.yolo_working_dir not in os.environ['PYTHONPATH']:
1178
+ os.environ['PYTHONPATH'] += ';' + options.yolo_working_dir
1043
1179
 
1044
1180
  #%%
1045
1181
 
@@ -1150,15 +1286,50 @@ if __name__ == '__main__':
1150
1286
 
1151
1287
  #%% Sample invocations
1152
1288
 
1153
- """
1289
+ r"""
1154
1290
  # Windows
1155
1291
  set PYTHONPATH=c:\git\MegaDetector;c:\git\yolov5-md
1292
+ cd c:\git\MegaDetector\megadetector\utils
1156
1293
  python md_tests.py --cli_working_dir "c:\git\MegaDetector" --yolo_working_dir "c:\git\yolov5-md" --cli_test_pythonpath "c:\git\MegaDetector;c:\git\yolov5-md"
1157
1294
 
1158
1295
  # Linux
1159
1296
  export PYTHONPATH=/mnt/c/git/MegaDetector:/mnt/c/git/yolov5-md
1297
+ cd /mnt/c/git/MegaDetector/megadetector/utils
1160
1298
  python md_tests.py --cli_working_dir "/mnt/c/git/MegaDetector" --yolo_working_dir "/mnt/c/git/yolov5-md" --cli_test_pythonpath "/mnt/c/git/MegaDetector:/mnt/c/git/yolov5-md"
1161
1299
 
1162
1300
  python -c "import md_tests; print(md_tests.get_expected_results_filename(True))"
1163
1301
  """
1164
1302
 
1303
+
1304
+ #%% Scrap
1305
+
1306
+ if False:
1307
+
1308
+ pass
1309
+
1310
+ #%%
1311
+
1312
+ import sys; sys.path.append(r'c:\git\yolov5-md')
1313
+
1314
+ #%%
1315
+
1316
+ fn1 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt1.10.1.frames.json"
1317
+ fn2 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt1.10.1.frames.json"
1318
+ fn3 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt2.x.frames.json"
1319
+ fn4 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt2.x.frames.json"
1320
+
1321
+ assert all([os.path.isfile(fn) for fn in [fn1,fn2,fn3,fn4]])
1322
+ print(output_files_are_identical(fn1,fn1,verbose=False))
1323
+ print(output_files_are_identical(fn1,fn2,verbose=False))
1324
+ print(output_files_are_identical(fn1,fn3,verbose=False))
1325
+
1326
+ #%%
1327
+
1328
+ fn1 = r"G:\temp\md-test-package\mdv5a-image-gpu-pt1.10.1.json"
1329
+ fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-gpu-pt1.10.1.json"
1330
+ print(output_files_are_identical(fn1,fn2,verbose=True))
1331
+
1332
+ fn1 = r"G:\temp\md-test-package\mdv5a-image-cpu-pt1.10.1.json"
1333
+ fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-cpu-pt1.10.1.json"
1334
+ print(output_files_are_identical(fn1,fn2,verbose=True))
1335
+