megadetector 5.0.23__py3-none-any.whl → 5.0.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +2 -3
- megadetector/classification/merge_classification_detection_output.py +2 -2
- megadetector/data_management/coco_to_labelme.py +2 -1
- megadetector/data_management/databases/integrity_check_json_db.py +15 -14
- megadetector/data_management/databases/subset_json_db.py +49 -21
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +73 -69
- megadetector/data_management/lila/add_locations_to_nacti.py +114 -110
- megadetector/data_management/mewc_to_md.py +340 -0
- megadetector/data_management/speciesnet_to_md.py +41 -0
- megadetector/data_management/yolo_output_to_md_output.py +15 -8
- megadetector/detection/process_video.py +24 -7
- megadetector/detection/pytorch_detector.py +841 -160
- megadetector/detection/run_detector.py +341 -146
- megadetector/detection/run_detector_batch.py +307 -70
- megadetector/detection/run_inference_with_yolov5_val.py +61 -4
- megadetector/detection/tf_detector.py +6 -1
- megadetector/postprocessing/{combine_api_outputs.py → combine_batch_outputs.py} +10 -13
- megadetector/postprocessing/compare_batch_results.py +236 -7
- megadetector/postprocessing/create_crop_folder.py +358 -0
- megadetector/postprocessing/md_to_labelme.py +7 -7
- megadetector/postprocessing/md_to_wi.py +40 -0
- megadetector/postprocessing/merge_detections.py +1 -1
- megadetector/postprocessing/postprocess_batch_results.py +12 -5
- megadetector/postprocessing/separate_detections_into_folders.py +32 -4
- megadetector/postprocessing/validate_batch_results.py +9 -4
- megadetector/utils/ct_utils.py +236 -45
- megadetector/utils/directory_listing.py +3 -3
- megadetector/utils/gpu_test.py +125 -0
- megadetector/utils/md_tests.py +455 -116
- megadetector/utils/path_utils.py +43 -2
- megadetector/utils/wi_utils.py +2691 -0
- megadetector/visualization/visualization_utils.py +95 -18
- megadetector/visualization/visualize_db.py +25 -7
- megadetector/visualization/visualize_detector_output.py +60 -13
- {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/METADATA +11 -23
- {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/RECORD +39 -36
- {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/WHEEL +1 -1
- megadetector/detection/detector_training/__init__.py +0 -0
- megadetector/detection/detector_training/model_main_tf2.py +0 -114
- megadetector/utils/torch_test.py +0 -32
- {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/LICENSE +0 -0
- {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/top_level.txt +0 -0
megadetector/utils/md_tests.py
CHANGED
|
@@ -29,6 +29,8 @@ import subprocess
|
|
|
29
29
|
import argparse
|
|
30
30
|
import inspect
|
|
31
31
|
|
|
32
|
+
from copy import copy
|
|
33
|
+
|
|
32
34
|
|
|
33
35
|
#%% Classes
|
|
34
36
|
|
|
@@ -50,12 +52,21 @@ class MDTestOptions:
|
|
|
50
52
|
#: Skip tests related to video processing
|
|
51
53
|
self.skip_video_tests = False
|
|
52
54
|
|
|
55
|
+
#: Skip tests related to video rendering
|
|
56
|
+
self.skip_video_rendering_tests = False
|
|
57
|
+
|
|
53
58
|
#: Skip tests launched via Python functions (as opposed to CLIs)
|
|
54
59
|
self.skip_python_tests = False
|
|
55
60
|
|
|
56
61
|
#: Skip CLI tests
|
|
57
62
|
self.skip_cli_tests = False
|
|
58
63
|
|
|
64
|
+
#: Skip download tests
|
|
65
|
+
self.skip_download_tests = False
|
|
66
|
+
|
|
67
|
+
#: Skip force-CPU tests
|
|
68
|
+
self.skip_cpu_tests = False
|
|
69
|
+
|
|
59
70
|
#: Force a specific folder for temporary input/output
|
|
60
71
|
self.scratch_dir = None
|
|
61
72
|
|
|
@@ -106,7 +117,31 @@ class MDTestOptions:
|
|
|
106
117
|
#: IoU threshold used to determine whether boxes in two detection files likely correspond
|
|
107
118
|
#: to the same box.
|
|
108
119
|
self.iou_threshold_for_file_comparison = 0.85
|
|
109
|
-
|
|
120
|
+
|
|
121
|
+
#: Detector options passed to PTDetector
|
|
122
|
+
self.detector_options = {'compatibility_mode':'classic-test'}
|
|
123
|
+
|
|
124
|
+
#: Used to drive a series of tests (typically with a low value for
|
|
125
|
+
#: python_test_depth) over a folder of models.
|
|
126
|
+
self.model_folder = None
|
|
127
|
+
|
|
128
|
+
#: Used as a knob to control the level of Python tests, typically used when
|
|
129
|
+
#: we want to run a series of simple tests on a small number of models, rather
|
|
130
|
+
#: than a deep test of tests on a small number of models. The gestalt is that
|
|
131
|
+
#: this is a range from 0-100.
|
|
132
|
+
self.python_test_depth = 100
|
|
133
|
+
|
|
134
|
+
#: Currently should be 'all' or 'utils-only'
|
|
135
|
+
self.test_mode = 'all'
|
|
136
|
+
|
|
137
|
+
#: Number of cores to use for multi-CPU inference tests
|
|
138
|
+
self.n_cores_for_multiprocessing_tests = 2
|
|
139
|
+
|
|
140
|
+
#: Number of cores to use for multi-CPU video tests
|
|
141
|
+
self.n_cores_for_video_tests = 2
|
|
142
|
+
|
|
143
|
+
# ...def __init__()
|
|
144
|
+
|
|
110
145
|
# ...class MDTestOptions()
|
|
111
146
|
|
|
112
147
|
|
|
@@ -171,7 +206,7 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
171
206
|
|
|
172
207
|
if options is not None and options.scratch_dir is not None:
|
|
173
208
|
fn = os.path.join(options.scratch_dir,fn)
|
|
174
|
-
|
|
209
|
+
|
|
175
210
|
return fn
|
|
176
211
|
|
|
177
212
|
|
|
@@ -268,7 +303,7 @@ def download_test_data(options=None):
|
|
|
268
303
|
os.path.isfile(os.path.join(scratch_dir,fn))]
|
|
269
304
|
|
|
270
305
|
print('Finished unzipping and enumerating test data')
|
|
271
|
-
|
|
306
|
+
|
|
272
307
|
return options
|
|
273
308
|
|
|
274
309
|
# ...def download_test_data(...)
|
|
@@ -276,18 +311,24 @@ def download_test_data(options=None):
|
|
|
276
311
|
|
|
277
312
|
def is_gpu_available(verbose=True):
|
|
278
313
|
"""
|
|
279
|
-
Checks whether a GPU (including M1/M2 MPS) is available.
|
|
314
|
+
Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch. Returns
|
|
315
|
+
false if PT fails to import.
|
|
280
316
|
|
|
281
317
|
Args:
|
|
282
|
-
verbose (bool, optional): enable additional debug console output
|
|
318
|
+
verbose (bool, optional): enable additional debug console output
|
|
283
319
|
|
|
284
320
|
Returns:
|
|
285
|
-
bool: whether a GPU is available
|
|
321
|
+
bool: whether a GPU is available
|
|
286
322
|
"""
|
|
287
323
|
|
|
288
324
|
# Import torch inside this function, so we have a chance to set CUDA_VISIBLE_DEVICES
|
|
289
325
|
# before checking GPU availability.
|
|
290
|
-
|
|
326
|
+
try:
|
|
327
|
+
import torch
|
|
328
|
+
except Exception:
|
|
329
|
+
print('Warning: could not import torch')
|
|
330
|
+
return False
|
|
331
|
+
|
|
291
332
|
gpu_available = torch.cuda.is_available()
|
|
292
333
|
|
|
293
334
|
if gpu_available:
|
|
@@ -384,15 +425,21 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
384
425
|
|
|
385
426
|
max_conf_error = 0
|
|
386
427
|
max_coord_error = 0
|
|
387
|
-
|
|
428
|
+
|
|
429
|
+
max_conf_error_det_a = None
|
|
430
|
+
max_conf_error_det_b = None
|
|
431
|
+
|
|
432
|
+
max_coord_error_det_a = None
|
|
433
|
+
max_coord_error_det_b = None
|
|
434
|
+
|
|
388
435
|
# i_det_a = 0
|
|
389
436
|
for i_det_a in range(0,len(detections_a)):
|
|
390
437
|
|
|
391
438
|
det_a = detections_a[i_det_a]
|
|
392
439
|
|
|
393
440
|
# Don't process very-low-confidence boxes
|
|
394
|
-
if det_a['conf'] < options.max_conf_error:
|
|
395
|
-
|
|
441
|
+
# if det_a['conf'] < options.max_conf_error:
|
|
442
|
+
# continue
|
|
396
443
|
|
|
397
444
|
matching_det_b = None
|
|
398
445
|
highest_iou = -1
|
|
@@ -402,22 +449,23 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
402
449
|
# i_det_b = 0
|
|
403
450
|
for i_det_b in range(0,len(detections_b)):
|
|
404
451
|
|
|
405
|
-
|
|
452
|
+
det_b = detections_b[i_det_b]
|
|
406
453
|
|
|
407
|
-
if
|
|
454
|
+
if det_b['category'] != det_a['category']:
|
|
408
455
|
continue
|
|
409
456
|
|
|
410
|
-
iou = get_iou(det_a['bbox'],
|
|
457
|
+
iou = get_iou(det_a['bbox'],det_b['bbox'])
|
|
411
458
|
|
|
412
459
|
# Is this likely the same detection as det_a?
|
|
413
460
|
if iou >= options.iou_threshold_for_file_comparison and iou > highest_iou:
|
|
414
|
-
matching_det_b =
|
|
461
|
+
matching_det_b = det_b
|
|
415
462
|
highest_iou = iou
|
|
416
463
|
|
|
417
464
|
# If there are no detections in this category in detections_b
|
|
418
465
|
if matching_det_b is None:
|
|
419
466
|
if det_a['conf'] > max_conf_error:
|
|
420
467
|
max_conf_error = det_a['conf']
|
|
468
|
+
max_conf_error_det_a = det_a
|
|
421
469
|
# max_coord_error = 1.0
|
|
422
470
|
continue
|
|
423
471
|
|
|
@@ -427,13 +475,17 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
427
475
|
for i_coord in range(0,4):
|
|
428
476
|
coord_differences.append(abs(det_a['bbox'][i_coord]-\
|
|
429
477
|
matching_det_b['bbox'][i_coord]))
|
|
430
|
-
coord_err = max(coord_differences)
|
|
478
|
+
coord_err = max(coord_differences)
|
|
431
479
|
|
|
432
480
|
if conf_err >= max_conf_error:
|
|
433
481
|
max_conf_error = conf_err
|
|
482
|
+
max_conf_error_det_a = det_a
|
|
483
|
+
max_conf_error_det_b = det_b
|
|
434
484
|
|
|
435
485
|
if coord_err >= max_coord_error:
|
|
436
|
-
max_coord_error = coord_err
|
|
486
|
+
max_coord_error = coord_err
|
|
487
|
+
max_coord_error_det_a = det_a
|
|
488
|
+
max_coord_error_det_b = det_b
|
|
437
489
|
|
|
438
490
|
# ...for each detection in detections_a
|
|
439
491
|
|
|
@@ -446,19 +498,32 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
446
498
|
|
|
447
499
|
if reverse_comparison_results['max_conf_error'] > max_conf_error:
|
|
448
500
|
max_conf_error = reverse_comparison_results['max_conf_error']
|
|
501
|
+
max_conf_error_det_a = reverse_comparison_results['max_conf_error_det_b']
|
|
502
|
+
max_conf_error_det_b = reverse_comparison_results['max_conf_error_det_a']
|
|
449
503
|
if reverse_comparison_results['max_coord_error'] > max_coord_error:
|
|
450
504
|
max_coord_error = reverse_comparison_results['max_coord_error']
|
|
505
|
+
max_coord_error_det_a = reverse_comparison_results['max_coord_error_det_b']
|
|
506
|
+
max_coord_error_det_b = reverse_comparison_results['max_coord_error_det_a']
|
|
451
507
|
|
|
452
508
|
list_comparison_results = {}
|
|
509
|
+
|
|
453
510
|
list_comparison_results['max_coord_error'] = max_coord_error
|
|
511
|
+
list_comparison_results['max_coord_error_det_a'] = max_coord_error_det_a
|
|
512
|
+
list_comparison_results['max_coord_error_det_b'] = max_coord_error_det_b
|
|
513
|
+
|
|
454
514
|
list_comparison_results['max_conf_error'] = max_conf_error
|
|
515
|
+
list_comparison_results['max_conf_error_det_a'] = max_conf_error_det_a
|
|
516
|
+
list_comparison_results['max_conf_error_det_b'] = max_conf_error_det_b
|
|
455
517
|
|
|
456
518
|
return list_comparison_results
|
|
457
519
|
|
|
458
520
|
# ...def compare_detection_lists(...)
|
|
459
521
|
|
|
460
522
|
|
|
461
|
-
def compare_results(inference_output_file,
|
|
523
|
+
def compare_results(inference_output_file,
|
|
524
|
+
expected_results_file,
|
|
525
|
+
options,
|
|
526
|
+
expected_results_file_is_absolute=False):
|
|
462
527
|
"""
|
|
463
528
|
Compare two MD-formatted output files that should be nearly identical, allowing small
|
|
464
529
|
changes (e.g. rounding differences). Generally used to compare a new results file to
|
|
@@ -468,6 +533,9 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
468
533
|
inference_output_file (str): the first results file to compare
|
|
469
534
|
expected_results_file (str): the second results file to compare
|
|
470
535
|
options (MDTestOptions): options that determine tolerable differences between files
|
|
536
|
+
expected_results_file_is_absolute (str, optional): by default,
|
|
537
|
+
expected_results_file is appended to options.scratch_dir; this option
|
|
538
|
+
specifies that it's an absolute path.
|
|
471
539
|
|
|
472
540
|
Returns:
|
|
473
541
|
dict: dictionary with keys 'max_coord_error' and 'max_conf_error'
|
|
@@ -477,7 +545,10 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
477
545
|
with open(inference_output_file,'r') as f:
|
|
478
546
|
results_from_file = json.load(f) # noqa
|
|
479
547
|
|
|
480
|
-
|
|
548
|
+
if not expected_results_file_is_absolute:
|
|
549
|
+
expected_results_file= os.path.join(options.scratch_dir,expected_results_file)
|
|
550
|
+
|
|
551
|
+
with open(expected_results_file,'r') as f:
|
|
481
552
|
expected_results = json.load(f)
|
|
482
553
|
|
|
483
554
|
filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
|
|
@@ -488,11 +559,13 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
488
559
|
len(filename_to_results_expected),
|
|
489
560
|
len(filename_to_results))
|
|
490
561
|
|
|
491
|
-
max_conf_error =
|
|
562
|
+
max_conf_error = -1
|
|
492
563
|
max_conf_error_file = None
|
|
564
|
+
max_conf_error_comparison_results = None
|
|
493
565
|
|
|
494
|
-
max_coord_error =
|
|
566
|
+
max_coord_error = -1
|
|
495
567
|
max_coord_error_file = None
|
|
568
|
+
max_coord_error_comparison_results = None
|
|
496
569
|
|
|
497
570
|
# fn = next(iter(filename_to_results.keys()))
|
|
498
571
|
for fn in filename_to_results.keys():
|
|
@@ -518,10 +591,12 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
518
591
|
|
|
519
592
|
if comparison_results_this_image['max_conf_error'] > max_conf_error:
|
|
520
593
|
max_conf_error = comparison_results_this_image['max_conf_error']
|
|
594
|
+
max_conf_error_comparison_results = comparison_results_this_image
|
|
521
595
|
max_conf_error_file = fn
|
|
522
596
|
|
|
523
597
|
if comparison_results_this_image['max_coord_error'] > max_coord_error:
|
|
524
598
|
max_coord_error = comparison_results_this_image['max_coord_error']
|
|
599
|
+
max_coord_error_comparison_results = comparison_results_this_image
|
|
525
600
|
max_coord_error_file = fn
|
|
526
601
|
|
|
527
602
|
# ...for each image
|
|
@@ -537,7 +612,7 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
537
612
|
'Coord error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
|
|
538
613
|
max_coord_error,options.max_coord_error,max_coord_error_file,
|
|
539
614
|
inference_output_file,expected_results_file)
|
|
540
|
-
|
|
615
|
+
|
|
541
616
|
print('Max conf error: {} (file {})'.format(
|
|
542
617
|
max_conf_error,max_conf_error_file))
|
|
543
618
|
print('Max coord error: {} (file {})'.format(
|
|
@@ -545,7 +620,9 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
545
620
|
|
|
546
621
|
comparison_results = {}
|
|
547
622
|
comparison_results['max_conf_error'] = max_conf_error
|
|
623
|
+
comparison_results['max_conf_error_comparison_results'] = max_conf_error_comparison_results
|
|
548
624
|
comparison_results['max_coord_error'] = max_coord_error
|
|
625
|
+
comparison_results['max_coord_error_comparison_results'] = max_coord_error_comparison_results
|
|
549
626
|
|
|
550
627
|
return comparison_results
|
|
551
628
|
|
|
@@ -580,6 +657,10 @@ def _args_to_object(args, obj):
|
|
|
580
657
|
|
|
581
658
|
os.environ["PYTHONUNBUFFERED"] = "1"
|
|
582
659
|
|
|
660
|
+
# In some circumstances I want to allow CLI tests to "succeed" even when they return
|
|
661
|
+
# specific non-zero output values.
|
|
662
|
+
allowable_process_return_codes = [0]
|
|
663
|
+
|
|
583
664
|
def execute(cmd):
|
|
584
665
|
"""
|
|
585
666
|
Runs [cmd] (a single string) in a shell, yielding each line of output to the caller.
|
|
@@ -598,7 +679,7 @@ def execute(cmd):
|
|
|
598
679
|
yield stdout_line
|
|
599
680
|
popen.stdout.close()
|
|
600
681
|
return_code = popen.wait()
|
|
601
|
-
if return_code:
|
|
682
|
+
if return_code not in allowable_process_return_codes:
|
|
602
683
|
raise subprocess.CalledProcessError(return_code, cmd)
|
|
603
684
|
return return_code
|
|
604
685
|
|
|
@@ -628,7 +709,7 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
|
|
|
628
709
|
print(s,end='',flush=True)
|
|
629
710
|
to_return['status'] = 0
|
|
630
711
|
except subprocess.CalledProcessError as cpe:
|
|
631
|
-
if not catch_exceptions:
|
|
712
|
+
if not catch_exceptions:
|
|
632
713
|
raise
|
|
633
714
|
print('execute_and_print caught error: {}'.format(cpe.output))
|
|
634
715
|
to_return['status'] = cpe.returncode
|
|
@@ -639,6 +720,47 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
|
|
|
639
720
|
|
|
640
721
|
#%% Python tests
|
|
641
722
|
|
|
723
|
+
def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
724
|
+
"""
|
|
725
|
+
Imports all modules in [package_name]
|
|
726
|
+
|
|
727
|
+
Args:
|
|
728
|
+
package_name (str): the package name to test
|
|
729
|
+
exceptions (list, optional): exclude any modules that contain any of these strings
|
|
730
|
+
verbose (bool, optional): enable additional debug output
|
|
731
|
+
"""
|
|
732
|
+
import importlib
|
|
733
|
+
import pkgutil
|
|
734
|
+
|
|
735
|
+
package = importlib.import_module(package_name)
|
|
736
|
+
package_path = package.__path__
|
|
737
|
+
imported_modules = []
|
|
738
|
+
|
|
739
|
+
if exceptions is None:
|
|
740
|
+
exceptions = []
|
|
741
|
+
|
|
742
|
+
for _, modname, _ in pkgutil.walk_packages(package_path, package_name + '.'):
|
|
743
|
+
|
|
744
|
+
skip_module = False
|
|
745
|
+
for s in exceptions:
|
|
746
|
+
if s in modname:
|
|
747
|
+
skip_module = True
|
|
748
|
+
break
|
|
749
|
+
if skip_module:
|
|
750
|
+
continue
|
|
751
|
+
|
|
752
|
+
if verbose:
|
|
753
|
+
print('Testing import: {}'.format(modname))
|
|
754
|
+
|
|
755
|
+
try:
|
|
756
|
+
# Attempt to import each module
|
|
757
|
+
_ = importlib.import_module(modname)
|
|
758
|
+
imported_modules.append(modname)
|
|
759
|
+
except ImportError as e:
|
|
760
|
+
print(f"Failed to import module {modname}: {e}")
|
|
761
|
+
raise
|
|
762
|
+
|
|
763
|
+
#%%
|
|
642
764
|
def run_python_tests(options):
|
|
643
765
|
"""
|
|
644
766
|
Runs Python-based (as opposed to CLI-based) package tests.
|
|
@@ -648,6 +770,7 @@ def run_python_tests(options):
|
|
|
648
770
|
"""
|
|
649
771
|
|
|
650
772
|
print('\n*** Starting module tests ***\n')
|
|
773
|
+
|
|
651
774
|
|
|
652
775
|
## Prepare data
|
|
653
776
|
|
|
@@ -662,40 +785,68 @@ def run_python_tests(options):
|
|
|
662
785
|
ct_utils_test()
|
|
663
786
|
|
|
664
787
|
|
|
665
|
-
##
|
|
788
|
+
## Import tests
|
|
789
|
+
|
|
790
|
+
print('\n** Running package import tests **\n')
|
|
791
|
+
test_package_imports('megadetector.visualization')
|
|
792
|
+
test_package_imports('megadetector.postprocessing')
|
|
793
|
+
test_package_imports('megadetector.postprocessing.repeat_detection_elimination')
|
|
794
|
+
test_package_imports('megadetector.utils',exceptions=['azure_utils','sas_blob_utils','md_tests'])
|
|
795
|
+
test_package_imports('megadetector.data_management',exceptions=['lila','ocr_tools'])
|
|
796
|
+
|
|
797
|
+
|
|
798
|
+
## Return early if we're not running torch-related tests
|
|
799
|
+
|
|
800
|
+
if options.test_mode == 'utils-only':
|
|
801
|
+
return
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
## Make sure our tests are doing what we think they're doing
|
|
805
|
+
|
|
806
|
+
from megadetector.detection import pytorch_detector
|
|
807
|
+
pytorch_detector.require_non_default_compatibility_mode = True
|
|
666
808
|
|
|
809
|
+
|
|
810
|
+
## Run inference on an image
|
|
811
|
+
|
|
667
812
|
print('\n** Running MD on a single image (module) **\n')
|
|
668
813
|
|
|
669
814
|
from megadetector.detection import run_detector
|
|
670
|
-
from megadetector.visualization import visualization_utils as vis_utils
|
|
815
|
+
from megadetector.visualization import visualization_utils as vis_utils # noqa
|
|
671
816
|
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
672
|
-
model = run_detector.load_detector(options.default_model
|
|
817
|
+
model = run_detector.load_detector(options.default_model,
|
|
818
|
+
detector_options=copy(options.detector_options))
|
|
673
819
|
pil_im = vis_utils.load_image(image_fn)
|
|
674
820
|
result = model.generate_detections_one_image(pil_im) # noqa
|
|
675
|
-
|
|
821
|
+
|
|
822
|
+
if options.python_test_depth <= 1:
|
|
823
|
+
return
|
|
824
|
+
|
|
676
825
|
|
|
677
826
|
## Run inference on a folder
|
|
678
827
|
|
|
679
828
|
print('\n** Running MD on a folder of images (module) **\n')
|
|
680
829
|
|
|
681
830
|
from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
682
|
-
from megadetector.utils import path_utils
|
|
831
|
+
from megadetector.utils import path_utils # noqa
|
|
683
832
|
|
|
684
833
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
685
834
|
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
686
835
|
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
687
836
|
image_file_names = path_utils.find_images(image_folder,recursive=True)
|
|
688
|
-
results = load_and_run_detector_batch(options.default_model,
|
|
837
|
+
results = load_and_run_detector_batch(options.default_model,
|
|
838
|
+
image_file_names,
|
|
839
|
+
quiet=True,
|
|
840
|
+
detector_options=copy(options.detector_options))
|
|
689
841
|
_ = write_results_to_file(results,
|
|
690
842
|
inference_output_file,
|
|
691
843
|
relative_path_base=image_folder,
|
|
692
844
|
detector_file=options.default_model)
|
|
693
845
|
|
|
694
|
-
|
|
695
846
|
## Verify results
|
|
696
847
|
|
|
697
848
|
# Verify format correctness
|
|
698
|
-
from megadetector.postprocessing.validate_batch_results import validate_batch_results
|
|
849
|
+
from megadetector.postprocessing.validate_batch_results import validate_batch_results #noqa
|
|
699
850
|
validate_batch_results(inference_output_file)
|
|
700
851
|
|
|
701
852
|
# Verify value correctness
|
|
@@ -706,6 +857,9 @@ def run_python_tests(options):
|
|
|
706
857
|
|
|
707
858
|
# Make note of this filename, we will use it again later
|
|
708
859
|
inference_output_file_standard_inference = inference_output_file
|
|
860
|
+
|
|
861
|
+
if options.python_test_depth <= 2:
|
|
862
|
+
return
|
|
709
863
|
|
|
710
864
|
|
|
711
865
|
## Run and verify again with augmentation enabled
|
|
@@ -715,7 +869,11 @@ def run_python_tests(options):
|
|
|
715
869
|
from megadetector.utils.path_utils import insert_before_extension
|
|
716
870
|
|
|
717
871
|
inference_output_file_augmented = insert_before_extension(inference_output_file,'augmented')
|
|
718
|
-
results = load_and_run_detector_batch(options.default_model,
|
|
872
|
+
results = load_and_run_detector_batch(options.default_model,
|
|
873
|
+
image_file_names,
|
|
874
|
+
quiet=True,
|
|
875
|
+
augment=True,
|
|
876
|
+
detector_options=copy(options.detector_options))
|
|
719
877
|
_ = write_results_to_file(results,
|
|
720
878
|
inference_output_file_augmented,
|
|
721
879
|
relative_path_base=image_folder,
|
|
@@ -855,7 +1013,9 @@ def run_python_tests(options):
|
|
|
855
1013
|
video_options.output_video_file = os.path.join(options.scratch_dir,'video_scratch/rendered_video.mp4')
|
|
856
1014
|
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
857
1015
|
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
858
|
-
|
|
1016
|
+
|
|
1017
|
+
video_options.render_output_video = (not options.skip_video_rendering_tests)
|
|
1018
|
+
|
|
859
1019
|
# video_options.keep_rendered_frames = False
|
|
860
1020
|
# video_options.keep_extracted_frames = False
|
|
861
1021
|
video_options.force_extracted_frame_folder_deletion = True
|
|
@@ -868,9 +1028,10 @@ def run_python_tests(options):
|
|
|
868
1028
|
# video_options.rendering_confidence_threshold = None
|
|
869
1029
|
# video_options.json_confidence_threshold = 0.005
|
|
870
1030
|
video_options.frame_sample = 10
|
|
871
|
-
video_options.n_cores =
|
|
1031
|
+
video_options.n_cores = options.n_cores_for_video_tests
|
|
872
1032
|
# video_options.debug_max_frames = -1
|
|
873
1033
|
# video_options.class_mapping_filename = None
|
|
1034
|
+
video_options.detector_options = copy(options.detector_options)
|
|
874
1035
|
|
|
875
1036
|
_ = process_video(video_options)
|
|
876
1037
|
|
|
@@ -908,7 +1069,7 @@ def run_python_tests(options):
|
|
|
908
1069
|
# video_options.rendering_confidence_threshold = None
|
|
909
1070
|
# video_options.json_confidence_threshold = 0.005
|
|
910
1071
|
video_options.frame_sample = 10
|
|
911
|
-
video_options.n_cores =
|
|
1072
|
+
video_options.n_cores = options.n_cores_for_video_tests
|
|
912
1073
|
|
|
913
1074
|
# Force frame extraction to disk, since that's how we generated our expected results file
|
|
914
1075
|
video_options.force_on_disk_frame_extraction = True
|
|
@@ -918,13 +1079,15 @@ def run_python_tests(options):
|
|
|
918
1079
|
# Use quality == None, because we can't control whether YOLOv5 has patched cm2.imread,
|
|
919
1080
|
# and therefore can't rely on using the quality parameter
|
|
920
1081
|
video_options.quality = None
|
|
921
|
-
video_options.max_width = None
|
|
1082
|
+
video_options.max_width = None
|
|
1083
|
+
video_options.detector_options = copy(options.detector_options)
|
|
922
1084
|
|
|
1085
|
+
video_options.keep_extracted_frames = True
|
|
923
1086
|
_ = process_video_folder(video_options)
|
|
924
1087
|
|
|
925
1088
|
assert os.path.isfile(video_options.output_json_file), \
|
|
926
1089
|
'Python video test failed to render output .json file'
|
|
927
|
-
|
|
1090
|
+
|
|
928
1091
|
frame_output_file = insert_before_extension(video_options.output_json_file,'frames')
|
|
929
1092
|
assert os.path.isfile(frame_output_file)
|
|
930
1093
|
|
|
@@ -934,6 +1097,7 @@ def run_python_tests(options):
|
|
|
934
1097
|
expected_results_file = \
|
|
935
1098
|
get_expected_results_filename(is_gpu_available(verbose=False),test_type='video',options=options)
|
|
936
1099
|
assert os.path.isfile(expected_results_file)
|
|
1100
|
+
|
|
937
1101
|
compare_results(frame_output_file,expected_results_file,options)
|
|
938
1102
|
|
|
939
1103
|
|
|
@@ -978,7 +1142,6 @@ def run_cli_tests(options):
|
|
|
978
1142
|
|
|
979
1143
|
print('\n*** Starting CLI tests ***\n')
|
|
980
1144
|
|
|
981
|
-
|
|
982
1145
|
## Environment management
|
|
983
1146
|
|
|
984
1147
|
if options.cli_test_pythonpath is not None:
|
|
@@ -996,6 +1159,23 @@ def run_cli_tests(options):
|
|
|
996
1159
|
download_test_data(options)
|
|
997
1160
|
|
|
998
1161
|
|
|
1162
|
+
## Utility imports
|
|
1163
|
+
|
|
1164
|
+
from megadetector.utils.ct_utils import dict_to_kvp_list
|
|
1165
|
+
from megadetector.utils.path_utils import insert_before_extension
|
|
1166
|
+
|
|
1167
|
+
|
|
1168
|
+
## Utility tests
|
|
1169
|
+
|
|
1170
|
+
# TODO: move postprocessing tests up to this point, using pre-generated .json results files
|
|
1171
|
+
|
|
1172
|
+
|
|
1173
|
+
## Return early if we're not running torch-related tests
|
|
1174
|
+
|
|
1175
|
+
if options.test_mode == 'utils-only':
|
|
1176
|
+
return
|
|
1177
|
+
|
|
1178
|
+
|
|
999
1179
|
## Run inference on an image
|
|
1000
1180
|
|
|
1001
1181
|
print('\n** Running MD on a single image (CLI) **\n')
|
|
@@ -1008,6 +1188,7 @@ def run_cli_tests(options):
|
|
|
1008
1188
|
cmd = 'python megadetector/detection/run_detector.py'
|
|
1009
1189
|
cmd += ' "{}" --image_file "{}" --output_dir "{}"'.format(
|
|
1010
1190
|
options.default_model,image_fn,output_dir)
|
|
1191
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1011
1192
|
cmd_results = execute_and_print(cmd)
|
|
1012
1193
|
|
|
1013
1194
|
if options.cpu_execution_is_error:
|
|
@@ -1019,6 +1200,13 @@ def run_cli_tests(options):
|
|
|
1019
1200
|
if not gpu_available_via_cli:
|
|
1020
1201
|
raise Exception('GPU execution is required, but not available')
|
|
1021
1202
|
|
|
1203
|
+
# Make sure we can also pass an absolute path to a model file, instead of, e.g. "MDV5A"
|
|
1204
|
+
|
|
1205
|
+
from megadetector.detection.run_detector import try_download_known_detector
|
|
1206
|
+
model_file = try_download_known_detector(options.default_model,force_download=False,verbose=False)
|
|
1207
|
+
cmd = cmd.replace(options.default_model,model_file)
|
|
1208
|
+
cmd_results = execute_and_print(cmd)
|
|
1209
|
+
|
|
1022
1210
|
|
|
1023
1211
|
## Run inference on a folder
|
|
1024
1212
|
|
|
@@ -1035,6 +1223,7 @@ def run_cli_tests(options):
|
|
|
1035
1223
|
options.default_model,image_folder,inference_output_file)
|
|
1036
1224
|
cmd += ' --output_relative_filenames --quiet --include_image_size'
|
|
1037
1225
|
cmd += ' --include_image_timestamp --include_exif_data'
|
|
1226
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1038
1227
|
cmd_results = execute_and_print(cmd)
|
|
1039
1228
|
|
|
1040
1229
|
base_cmd = cmd
|
|
@@ -1043,13 +1232,12 @@ def run_cli_tests(options):
|
|
|
1043
1232
|
## Run again with checkpointing enabled, make sure the results are the same
|
|
1044
1233
|
|
|
1045
1234
|
print('\n** Running MD on a folder (with checkpoints) (CLI) **\n')
|
|
1046
|
-
|
|
1047
|
-
from megadetector.utils.path_utils import insert_before_extension
|
|
1048
1235
|
|
|
1049
1236
|
checkpoint_string = ' --checkpoint_frequency 5'
|
|
1050
1237
|
cmd = base_cmd + checkpoint_string
|
|
1051
1238
|
inference_output_file_checkpoint = insert_before_extension(inference_output_file,'_checkpoint')
|
|
1052
1239
|
cmd = cmd.replace(inference_output_file,inference_output_file_checkpoint)
|
|
1240
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1053
1241
|
cmd_results = execute_and_print(cmd)
|
|
1054
1242
|
|
|
1055
1243
|
assert output_files_are_identical(fn1=inference_output_file,
|
|
@@ -1059,12 +1247,12 @@ def run_cli_tests(options):
|
|
|
1059
1247
|
|
|
1060
1248
|
## Run again with the image queue enabled, make sure the results are the same
|
|
1061
1249
|
|
|
1062
|
-
print('\n** Running MD on a folder (with image queue) (CLI) **\n')
|
|
1250
|
+
print('\n** Running MD on a folder (with image queue but no preprocessing) (CLI) **\n')
|
|
1063
1251
|
|
|
1064
1252
|
cmd = base_cmd + ' --use_image_queue'
|
|
1065
|
-
from megadetector.utils.path_utils import insert_before_extension
|
|
1066
1253
|
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
1067
1254
|
cmd = cmd.replace(inference_output_file,inference_output_file_queue)
|
|
1255
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1068
1256
|
cmd_results = execute_and_print(cmd)
|
|
1069
1257
|
|
|
1070
1258
|
assert output_files_are_identical(fn1=inference_output_file,
|
|
@@ -1072,48 +1260,66 @@ def run_cli_tests(options):
|
|
|
1072
1260
|
verbose=True)
|
|
1073
1261
|
|
|
1074
1262
|
|
|
1075
|
-
|
|
1263
|
+
print('\n** Running MD on a folder (with image queue and preprocessing) (CLI) **\n')
|
|
1076
1264
|
|
|
1077
|
-
|
|
1078
|
-
|
|
1265
|
+
cmd = base_cmd + ' --use_image_queue --preprocess_on_image_queue'
|
|
1266
|
+
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
1267
|
+
cmd = cmd.replace(inference_output_file,inference_output_file_queue)
|
|
1268
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1269
|
+
cmd_results = execute_and_print(cmd)
|
|
1270
|
+
|
|
1271
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1272
|
+
fn2=inference_output_file_queue,
|
|
1273
|
+
verbose=True)
|
|
1079
1274
|
|
|
1080
|
-
|
|
1275
|
+
## Run again on multiple cores, make sure the results are the same
|
|
1081
1276
|
|
|
1082
|
-
|
|
1083
|
-
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
|
1084
|
-
cuda_visible_devices = os.environ['CUDA_VISIBLE_DEVICES']
|
|
1085
|
-
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1277
|
+
if not options.skip_cpu_tests:
|
|
1086
1278
|
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
else:
|
|
1279
|
+
# First run again on the CPU on a single thread if necessary, so we get a file that
|
|
1280
|
+
# *should* be identical to the multicore version.
|
|
1281
|
+
gpu_available = is_gpu_available(verbose=False)
|
|
1091
1282
|
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1283
|
+
cuda_visible_devices = None
|
|
1284
|
+
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
|
1285
|
+
cuda_visible_devices = os.environ['CUDA_VISIBLE_DEVICES']
|
|
1286
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1287
|
+
|
|
1288
|
+
# If we already ran on the CPU, no need to run again
|
|
1289
|
+
if not gpu_available:
|
|
1290
|
+
|
|
1291
|
+
inference_output_file_cpu = inference_output_file
|
|
1292
|
+
|
|
1293
|
+
else:
|
|
1294
|
+
|
|
1295
|
+
print('\n** Running MD on a folder (single CPU) (CLI) **\n')
|
|
1296
|
+
|
|
1297
|
+
inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
|
|
1298
|
+
cmd = base_cmd
|
|
1299
|
+
cmd = cmd.replace(inference_output_file,inference_output_file_cpu)
|
|
1300
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1301
|
+
cmd_results = execute_and_print(cmd)
|
|
1302
|
+
|
|
1303
|
+
print('\n** Running MD on a folder (multiple CPUs) (CLI) **\n')
|
|
1304
|
+
|
|
1305
|
+
cpu_string = ' --ncores {}'.format(options.n_cores_for_multiprocessing_tests)
|
|
1306
|
+
cmd = base_cmd + cpu_string
|
|
1307
|
+
inference_output_file_cpu_multicore = insert_before_extension(inference_output_file,'multicore')
|
|
1308
|
+
cmd = cmd.replace(inference_output_file,inference_output_file_cpu_multicore)
|
|
1309
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1097
1310
|
cmd_results = execute_and_print(cmd)
|
|
1098
1311
|
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
if cuda_visible_devices is not None:
|
|
1109
|
-
print('Restoring CUDA_VISIBLE_DEVICES')
|
|
1110
|
-
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
|
|
1111
|
-
else:
|
|
1112
|
-
del os.environ['CUDA_VISIBLE_DEVICES']
|
|
1312
|
+
if cuda_visible_devices is not None:
|
|
1313
|
+
print('Restoring CUDA_VISIBLE_DEVICES')
|
|
1314
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
|
|
1315
|
+
else:
|
|
1316
|
+
del os.environ['CUDA_VISIBLE_DEVICES']
|
|
1317
|
+
|
|
1318
|
+
assert output_files_are_identical(fn1=inference_output_file_cpu,
|
|
1319
|
+
fn2=inference_output_file_cpu_multicore,
|
|
1320
|
+
verbose=True)
|
|
1113
1321
|
|
|
1114
|
-
|
|
1115
|
-
fn2=inference_output_file_cpu_multicore,
|
|
1116
|
-
verbose=True)
|
|
1322
|
+
# ...if we're not skipping the force-cpu tests
|
|
1117
1323
|
|
|
1118
1324
|
|
|
1119
1325
|
## Postprocessing
|
|
@@ -1172,23 +1378,33 @@ def run_cli_tests(options):
|
|
|
1172
1378
|
|
|
1173
1379
|
## Run inference on a folder (tiled)
|
|
1174
1380
|
|
|
1175
|
-
|
|
1381
|
+
# This is a rather esoteric code path that I turn off when I'm testing some
|
|
1382
|
+
# features that it doesn't include yet, particularly compatibility mode
|
|
1383
|
+
# control.
|
|
1384
|
+
skip_tiling_tests = True
|
|
1176
1385
|
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
cmd = 'python -m megadetector.detection.run_tiled_inference'
|
|
1386
|
+
if skip_tiling_tests:
|
|
1387
|
+
|
|
1388
|
+
print('### DEBUG: skipping tiling tests ###')
|
|
1389
|
+
|
|
1182
1390
|
else:
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
options.
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1391
|
+
print('\n** Running tiled inference (CLI) **\n')
|
|
1392
|
+
|
|
1393
|
+
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1394
|
+
tiling_folder = os.path.join(options.scratch_dir,'tiling-folder')
|
|
1395
|
+
inference_output_file_tiled = os.path.join(options.scratch_dir,'folder_inference_output_tiled.json')
|
|
1396
|
+
if options.cli_working_dir is None:
|
|
1397
|
+
cmd = 'python -m megadetector.detection.run_tiled_inference'
|
|
1398
|
+
else:
|
|
1399
|
+
cmd = 'python megadetector/detection/run_tiled_inference.py'
|
|
1400
|
+
cmd += ' "{}" "{}" "{}" "{}"'.format(
|
|
1401
|
+
options.default_model,image_folder,tiling_folder,inference_output_file_tiled)
|
|
1402
|
+
cmd += ' --overwrite_handling overwrite'
|
|
1403
|
+
cmd_results = execute_and_print(cmd)
|
|
1191
1404
|
|
|
1405
|
+
with open(inference_output_file_tiled,'r') as f:
|
|
1406
|
+
results_from_file = json.load(f) # noqa
|
|
1407
|
+
|
|
1192
1408
|
|
|
1193
1409
|
## Run inference on a folder (augmented, w/YOLOv5 val script)
|
|
1194
1410
|
|
|
@@ -1252,9 +1468,16 @@ def run_cli_tests(options):
|
|
|
1252
1468
|
cmd += ' "{}" "{}"'.format(options.default_model,video_fn)
|
|
1253
1469
|
cmd += ' --frame_folder "{}" --frame_rendering_folder "{}" --output_json_file "{}" --output_video_file "{}"'.format(
|
|
1254
1470
|
frame_folder,frame_rendering_folder,video_inference_output_file,output_video_file)
|
|
1255
|
-
cmd += ' --
|
|
1256
|
-
cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion
|
|
1471
|
+
cmd += ' --fourcc {}'.format(options.video_fourcc)
|
|
1472
|
+
cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion'
|
|
1473
|
+
cmd += ' --n_cores {}'.format(options.n_cores_for_video_tests)
|
|
1474
|
+
cmd += ' --frame_sample 4'
|
|
1257
1475
|
cmd += ' --verbose'
|
|
1476
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1477
|
+
|
|
1478
|
+
if not options.skip_video_rendering_tests:
|
|
1479
|
+
cmd += ' --render_output_video'
|
|
1480
|
+
|
|
1258
1481
|
cmd_results = execute_and_print(cmd)
|
|
1259
1482
|
|
|
1260
1483
|
# ...if we're not skipping video tests
|
|
@@ -1274,6 +1497,7 @@ def run_cli_tests(options):
|
|
|
1274
1497
|
options.alt_model,image_folder,inference_output_file_alt)
|
|
1275
1498
|
cmd += ' --output_relative_filenames --quiet --include_image_size'
|
|
1276
1499
|
cmd += ' --include_image_timestamp --include_exif_data'
|
|
1500
|
+
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1277
1501
|
cmd_results = execute_and_print(cmd)
|
|
1278
1502
|
|
|
1279
1503
|
with open(inference_output_file_alt,'r') as f:
|
|
@@ -1302,6 +1526,49 @@ def run_cli_tests(options):
|
|
|
1302
1526
|
# ...def run_cli_tests(...)
|
|
1303
1527
|
|
|
1304
1528
|
|
|
1529
|
+
def run_download_tests(options):
|
|
1530
|
+
"""
|
|
1531
|
+
Args:
|
|
1532
|
+
options (MDTestOptions): see MDTestOptions for details
|
|
1533
|
+
"""
|
|
1534
|
+
|
|
1535
|
+
if options.skip_download_tests or options.test_mode == 'utils-only':
|
|
1536
|
+
return
|
|
1537
|
+
|
|
1538
|
+
from megadetector.detection.run_detector import known_models, \
|
|
1539
|
+
try_download_known_detector, \
|
|
1540
|
+
get_detector_version_from_model_file, \
|
|
1541
|
+
model_string_to_model_version
|
|
1542
|
+
|
|
1543
|
+
# Make sure we can download models based on canonical version numbers,
|
|
1544
|
+
# e.g. "v5a.0.0"
|
|
1545
|
+
for model_name in known_models:
|
|
1546
|
+
url = known_models[model_name]['url']
|
|
1547
|
+
if 'localhost' in url:
|
|
1548
|
+
continue
|
|
1549
|
+
print('Testing download for known model {}'.format(model_name))
|
|
1550
|
+
fn = try_download_known_detector(model_name,
|
|
1551
|
+
force_download=False,
|
|
1552
|
+
verbose=False)
|
|
1553
|
+
version_string = get_detector_version_from_model_file(fn, verbose=False)
|
|
1554
|
+
assert version_string == model_name
|
|
1555
|
+
|
|
1556
|
+
# Make sure we can download models based on short names, e.g. "MDV5A"
|
|
1557
|
+
for model_name in model_string_to_model_version:
|
|
1558
|
+
model_version = model_string_to_model_version[model_name]
|
|
1559
|
+
assert model_version in known_models
|
|
1560
|
+
url = known_models[model_version]['url']
|
|
1561
|
+
if 'localhost' in url:
|
|
1562
|
+
continue
|
|
1563
|
+
print('Testing download for model short name {}'.format(model_name))
|
|
1564
|
+
fn = try_download_known_detector(model_name,
|
|
1565
|
+
force_download=False,
|
|
1566
|
+
verbose=False)
|
|
1567
|
+
assert fn != model_name
|
|
1568
|
+
|
|
1569
|
+
# ...def run_download_tests()
|
|
1570
|
+
|
|
1571
|
+
|
|
1305
1572
|
#%% Main test wrapper
|
|
1306
1573
|
|
|
1307
1574
|
def run_tests(options):
|
|
@@ -1315,6 +1582,9 @@ def run_tests(options):
|
|
|
1315
1582
|
# Prepare data folder
|
|
1316
1583
|
download_test_data(options)
|
|
1317
1584
|
|
|
1585
|
+
# Run model download tests if necessary
|
|
1586
|
+
run_download_tests(options)
|
|
1587
|
+
|
|
1318
1588
|
if options.disable_gpu:
|
|
1319
1589
|
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1320
1590
|
|
|
@@ -1331,8 +1601,32 @@ def run_tests(options):
|
|
|
1331
1601
|
|
|
1332
1602
|
# Run python tests
|
|
1333
1603
|
if not options.skip_python_tests:
|
|
1334
|
-
|
|
1335
|
-
|
|
1604
|
+
|
|
1605
|
+
if options.model_folder is not None:
|
|
1606
|
+
|
|
1607
|
+
assert os.path.isdir(options.model_folder), \
|
|
1608
|
+
'Could not find model folder {}'.format(options.model_folder)
|
|
1609
|
+
|
|
1610
|
+
model_files = os.listdir(options.model_folder)
|
|
1611
|
+
model_files = [fn for fn in model_files if fn.endswith('.pt')]
|
|
1612
|
+
model_files = [os.path.join(options.model_folder,fn) for fn in model_files]
|
|
1613
|
+
|
|
1614
|
+
assert len(model_files) > 0, \
|
|
1615
|
+
'Could not find any models in folder {}'.format(options.model_folder)
|
|
1616
|
+
|
|
1617
|
+
original_default_model = options.default_model
|
|
1618
|
+
|
|
1619
|
+
for model_file in model_files:
|
|
1620
|
+
print('Running Python tests for model {}'.format(model_file))
|
|
1621
|
+
options.default_model = model_file
|
|
1622
|
+
run_python_tests(options)
|
|
1623
|
+
|
|
1624
|
+
options.default_model = original_default_model
|
|
1625
|
+
|
|
1626
|
+
else:
|
|
1627
|
+
|
|
1628
|
+
run_python_tests(options)
|
|
1629
|
+
|
|
1336
1630
|
# Run CLI tests
|
|
1337
1631
|
if not options.skip_cli_tests:
|
|
1338
1632
|
run_cli_tests(options)
|
|
@@ -1360,16 +1654,26 @@ if False:
|
|
|
1360
1654
|
options.warning_mode = False
|
|
1361
1655
|
options.max_coord_error = 0.01 # 0.001
|
|
1362
1656
|
options.max_conf_error = 0.01 # 0.005
|
|
1363
|
-
|
|
1657
|
+
options.skip_video_rendering_tests = True
|
|
1658
|
+
# options.iou_threshold_for_file_comparison = 0.7
|
|
1659
|
+
|
|
1660
|
+
options.cli_working_dir = r'c:\git\MegaDetector'
|
|
1661
|
+
# When running in the cameratraps-detector environment
|
|
1662
|
+
# options.cli_test_pythonpath = r'c:\git\MegaDetector;c:\git\yolov5-md'
|
|
1663
|
+
|
|
1664
|
+
# When running in the MegaDetector environment
|
|
1665
|
+
options.cli_test_pythonpath = r'c:\git\MegaDetector'
|
|
1666
|
+
|
|
1667
|
+
# options.cli_working_dir = os.path.expanduser('~')
|
|
1364
1668
|
# options.yolo_working_dir = r'c:\git\yolov5-md'
|
|
1365
|
-
options.cli_working_dir = os.path.expanduser('~')
|
|
1366
1669
|
# options.yolo_working_dir = '/mnt/c/git/yolov5-md'
|
|
1367
1670
|
options = download_test_data(options)
|
|
1368
1671
|
|
|
1369
1672
|
#%%
|
|
1370
1673
|
|
|
1371
1674
|
import os
|
|
1372
|
-
if 'PYTHONPATH' not in os.environ or
|
|
1675
|
+
if ('PYTHONPATH' not in os.environ) or \
|
|
1676
|
+
(options.yolo_working_dir is not None and options.yolo_working_dir not in os.environ['PYTHONPATH']):
|
|
1373
1677
|
os.environ['PYTHONPATH'] += ';' + options.yolo_working_dir
|
|
1374
1678
|
|
|
1375
1679
|
#%%
|
|
@@ -1448,6 +1752,11 @@ def main():
|
|
|
1448
1752
|
action='store_true',
|
|
1449
1753
|
help='Skip tests related to video (which can be slow)')
|
|
1450
1754
|
|
|
1755
|
+
parser.add_argument(
|
|
1756
|
+
'--skip_video_rendering_tests',
|
|
1757
|
+
action='store_true',
|
|
1758
|
+
help='Skip tests related to *rendering* video')
|
|
1759
|
+
|
|
1451
1760
|
parser.add_argument(
|
|
1452
1761
|
'--skip_python_tests',
|
|
1453
1762
|
action='store_true',
|
|
@@ -1458,6 +1767,16 @@ def main():
|
|
|
1458
1767
|
action='store_true',
|
|
1459
1768
|
help='Skip CLI tests')
|
|
1460
1769
|
|
|
1770
|
+
parser.add_argument(
|
|
1771
|
+
'--skip_download_tests',
|
|
1772
|
+
action='store_true',
|
|
1773
|
+
help='Skip model download tests')
|
|
1774
|
+
|
|
1775
|
+
parser.add_argument(
|
|
1776
|
+
'--skip_cpu_tests',
|
|
1777
|
+
action='store_true',
|
|
1778
|
+
help='Skip force-CPU tests')
|
|
1779
|
+
|
|
1461
1780
|
parser.add_argument(
|
|
1462
1781
|
'--force_data_download',
|
|
1463
1782
|
action='store_true',
|
|
@@ -1506,13 +1825,50 @@ def main():
|
|
|
1506
1825
|
help='PYTHONPATH to set for CLI tests; if None, inherits from the parent process'
|
|
1507
1826
|
)
|
|
1508
1827
|
|
|
1509
|
-
|
|
1828
|
+
parser.add_argument(
|
|
1829
|
+
'--test_mode',
|
|
1830
|
+
type=str,
|
|
1831
|
+
default='utils-only',
|
|
1832
|
+
help='Test mode: "all" or "utils-only"'
|
|
1833
|
+
)
|
|
1834
|
+
|
|
1835
|
+
parser.add_argument(
|
|
1836
|
+
'--python_test_depth',
|
|
1837
|
+
type=int,
|
|
1838
|
+
default=options.python_test_depth,
|
|
1839
|
+
help='Used as a knob to control the level of Python tests (0-100)'
|
|
1840
|
+
)
|
|
1841
|
+
|
|
1842
|
+
parser.add_argument(
|
|
1843
|
+
'--model_folder',
|
|
1844
|
+
type=str,
|
|
1845
|
+
default=None,
|
|
1846
|
+
help='Run Python tests on every model in this folder'
|
|
1847
|
+
)
|
|
1848
|
+
|
|
1849
|
+
parser.add_argument(
|
|
1850
|
+
'--detector_options',
|
|
1851
|
+
nargs='*',
|
|
1852
|
+
metavar='KEY=VALUE',
|
|
1853
|
+
default='',
|
|
1854
|
+
help='Detector-specific options, as a space-separated list of key-value pairs')
|
|
1855
|
+
|
|
1856
|
+
parser.add_argument(
|
|
1857
|
+
'--default_model',
|
|
1858
|
+
type=str,
|
|
1859
|
+
default=options.default_model,
|
|
1860
|
+
help='Default model file or well-known model name (used for most tests)')
|
|
1861
|
+
|
|
1862
|
+
# The following token is used for linting, do not remove.
|
|
1510
1863
|
#
|
|
1511
1864
|
# no_arguments_required
|
|
1512
|
-
|
|
1865
|
+
|
|
1513
1866
|
args = parser.parse_args()
|
|
1514
1867
|
|
|
1868
|
+
initial_detector_options = options.detector_options
|
|
1515
1869
|
_args_to_object(args,options)
|
|
1870
|
+
from megadetector.utils.ct_utils import parse_kvp_list
|
|
1871
|
+
options.detector_options = parse_kvp_list(args.detector_options,d=initial_detector_options)
|
|
1516
1872
|
|
|
1517
1873
|
run_tests(options)
|
|
1518
1874
|
|
|
@@ -1520,23 +1876,6 @@ if __name__ == '__main__':
|
|
|
1520
1876
|
main()
|
|
1521
1877
|
|
|
1522
1878
|
|
|
1523
|
-
#%% Sample invocations
|
|
1524
|
-
|
|
1525
|
-
r"""
|
|
1526
|
-
# Windows
|
|
1527
|
-
set PYTHONPATH=c:\git\MegaDetector;c:\git\yolov5-md
|
|
1528
|
-
cd c:\git\MegaDetector\megadetector\utils
|
|
1529
|
-
python md_tests.py --cli_working_dir "c:\git\MegaDetector" --yolo_working_dir "c:\git\yolov5-md" --cli_test_pythonpath "c:\git\MegaDetector;c:\git\yolov5-md"
|
|
1530
|
-
|
|
1531
|
-
# Linux
|
|
1532
|
-
export PYTHONPATH=/mnt/c/git/MegaDetector:/mnt/c/git/yolov5-md
|
|
1533
|
-
cd /mnt/c/git/MegaDetector/megadetector/utils
|
|
1534
|
-
python md_tests.py --cli_working_dir "/mnt/c/git/MegaDetector" --yolo_working_dir "/mnt/c/git/yolov5-md" --cli_test_pythonpath "/mnt/c/git/MegaDetector:/mnt/c/git/yolov5-md"
|
|
1535
|
-
|
|
1536
|
-
python -c "import md_tests; print(md_tests.get_expected_results_filename(True))"
|
|
1537
|
-
"""
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
1879
|
#%% Scrap
|
|
1541
1880
|
|
|
1542
1881
|
if False:
|