megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/efficientnet/model.py +8 -8
- megadetector/classification/efficientnet/utils.py +6 -5
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +26 -26
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -2
- megadetector/data_management/camtrap_dp_to_coco.py +79 -46
- megadetector/data_management/cct_json_utils.py +103 -103
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +210 -193
- megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
- megadetector/data_management/databases/integrity_check_json_db.py +228 -200
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +88 -39
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +133 -125
- megadetector/data_management/labelme_to_yolo.py +159 -73
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
- megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +73 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
- megadetector/data_management/mewc_to_md.py +344 -340
- megadetector/data_management/ocr_tools.py +262 -255
- megadetector/data_management/read_exif.py +249 -227
- megadetector/data_management/remap_coco_categories.py +90 -28
- megadetector/data_management/remove_exif.py +81 -21
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +588 -120
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +248 -122
- megadetector/data_management/yolo_to_coco.py +333 -191
- megadetector/detection/change_detection.py +832 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +358 -278
- megadetector/detection/run_detector.py +399 -186
- megadetector/detection/run_detector_batch.py +404 -377
- megadetector/detection/run_inference_with_yolov5_val.py +340 -327
- megadetector/detection/run_tiled_inference.py +257 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +332 -295
- megadetector/postprocessing/add_max_conf.py +19 -11
- megadetector/postprocessing/categorize_detections_by_size.py +45 -45
- megadetector/postprocessing/classification_postprocessing.py +468 -433
- megadetector/postprocessing/combine_batch_outputs.py +23 -23
- megadetector/postprocessing/compare_batch_results.py +590 -525
- megadetector/postprocessing/convert_output_format.py +106 -102
- megadetector/postprocessing/create_crop_folder.py +347 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +48 -27
- megadetector/postprocessing/md_to_coco.py +133 -102
- megadetector/postprocessing/md_to_labelme.py +107 -90
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +92 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -301
- megadetector/postprocessing/remap_detection_categories.py +91 -38
- megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +156 -74
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/ct_utils.py +1049 -211
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +632 -529
- megadetector/utils/path_utils.py +1520 -431
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/split_locations_into_train_val.py +62 -62
- megadetector/utils/string_utils.py +148 -27
- megadetector/utils/url_utils.py +489 -176
- megadetector/utils/wi_utils.py +2658 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +34 -30
- megadetector/visualization/render_images_with_thumbnails.py +39 -74
- megadetector/visualization/visualization_utils.py +487 -435
- megadetector/visualization/visualize_db.py +232 -198
- megadetector/visualization/visualize_detector_output.py +82 -76
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
- megadetector-10.0.0.dist-info/RECORD +139 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
- megadetector/api/batch_processing/api_core/server.py +0 -294
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
- megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
- megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
- megadetector/api/batch_processing/api_core/server_utils.py +0 -88
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector/utils/azure_utils.py +0 -178
- megadetector/utils/sas_blob_utils.py +0 -509
- megadetector-5.0.28.dist-info/RECORD +0 -209
- /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
megadetector/utils/md_tests.py
CHANGED
|
@@ -6,7 +6,7 @@ A series of tests to validate basic repo functionality and verify either "correc
|
|
|
6
6
|
inference behavior, or - when operating in environments other than the training
|
|
7
7
|
environment - acceptable deviation from the correct results.
|
|
8
8
|
|
|
9
|
-
This module should not depend on anything else in this repo outside of the
|
|
9
|
+
This module should not depend on anything else in this repo outside of the
|
|
10
10
|
tests themselves, even if it means some duplicated code (e.g. for downloading files),
|
|
11
11
|
since much of what it tries to test is, e.g., imports.
|
|
12
12
|
|
|
@@ -28,6 +28,7 @@ import zipfile
|
|
|
28
28
|
import subprocess
|
|
29
29
|
import argparse
|
|
30
30
|
import inspect
|
|
31
|
+
import pytest
|
|
31
32
|
|
|
32
33
|
from copy import copy
|
|
33
34
|
|
|
@@ -38,110 +39,113 @@ class MDTestOptions:
|
|
|
38
39
|
"""
|
|
39
40
|
Options controlling test behavior
|
|
40
41
|
"""
|
|
41
|
-
|
|
42
|
+
|
|
42
43
|
def __init__(self):
|
|
43
|
-
|
|
44
|
+
|
|
44
45
|
## Required ##
|
|
45
|
-
|
|
46
|
+
|
|
46
47
|
#: Force CPU execution
|
|
47
48
|
self.disable_gpu = False
|
|
48
|
-
|
|
49
|
+
|
|
49
50
|
#: If GPU execution is requested, but a GPU is not available, should we error?
|
|
50
51
|
self.cpu_execution_is_error = False
|
|
51
|
-
|
|
52
|
+
|
|
52
53
|
#: Skip tests related to video processing
|
|
53
54
|
self.skip_video_tests = False
|
|
54
|
-
|
|
55
|
+
|
|
55
56
|
#: Skip tests related to video rendering
|
|
56
57
|
self.skip_video_rendering_tests = False
|
|
57
|
-
|
|
58
|
+
|
|
58
59
|
#: Skip tests launched via Python functions (as opposed to CLIs)
|
|
59
60
|
self.skip_python_tests = False
|
|
60
|
-
|
|
61
|
+
|
|
61
62
|
#: Skip CLI tests
|
|
62
63
|
self.skip_cli_tests = False
|
|
63
|
-
|
|
64
|
+
|
|
64
65
|
#: Skip download tests
|
|
65
66
|
self.skip_download_tests = False
|
|
66
|
-
|
|
67
|
+
|
|
68
|
+
#: Skip download tests for local URLs
|
|
69
|
+
self.skip_localhost_downloads = False
|
|
70
|
+
|
|
67
71
|
#: Skip force-CPU tests
|
|
68
72
|
self.skip_cpu_tests = False
|
|
69
|
-
|
|
73
|
+
|
|
70
74
|
#: Force a specific folder for temporary input/output
|
|
71
75
|
self.scratch_dir = None
|
|
72
|
-
|
|
76
|
+
|
|
73
77
|
#: Where does the test data live?
|
|
74
78
|
self.test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
75
|
-
|
|
79
|
+
|
|
76
80
|
#: Download test data even if it appears to have already been downloaded
|
|
77
81
|
self.force_data_download = False
|
|
78
|
-
|
|
82
|
+
|
|
79
83
|
#: Unzip test data even if it appears to have already been unzipped
|
|
80
84
|
self.force_data_unzip = False
|
|
81
|
-
|
|
85
|
+
|
|
82
86
|
#: By default, any unexpected behavior is an error; this forces most errors to
|
|
83
87
|
#: be treated as warnings.
|
|
84
88
|
self.warning_mode = False
|
|
85
|
-
|
|
89
|
+
|
|
86
90
|
#: How much deviation from the expected detection coordinates should we allow before
|
|
87
91
|
#: a disrepancy becomes an error?
|
|
88
92
|
self.max_coord_error = 0.001
|
|
89
|
-
|
|
93
|
+
|
|
90
94
|
#: How much deviation from the expected confidence values should we allow before
|
|
91
95
|
#: a disrepancy becomes an error?
|
|
92
96
|
self.max_conf_error = 0.005
|
|
93
|
-
|
|
97
|
+
|
|
94
98
|
#: Current working directory when running CLI tests
|
|
95
99
|
#:
|
|
96
100
|
#: If this is None, we won't mess with the inherited working directory.
|
|
97
101
|
self.cli_working_dir = None
|
|
98
|
-
|
|
99
|
-
#: YOLOv5 installation, only relevant if we're testing run_inference_with_yolov5_val.
|
|
102
|
+
|
|
103
|
+
#: YOLOv5 installation, only relevant if we're testing run_inference_with_yolov5_val.
|
|
100
104
|
#:
|
|
101
105
|
#: If this is None, we'll skip that test.
|
|
102
106
|
self.yolo_working_dir = None
|
|
103
|
-
|
|
107
|
+
|
|
104
108
|
#: fourcc code to use for video tests that involve rendering video
|
|
105
|
-
self.video_fourcc = 'mp4v'
|
|
106
|
-
|
|
109
|
+
self.video_fourcc = 'mp4v'
|
|
110
|
+
|
|
107
111
|
#: Default model to use for testing (filename, URL, or well-known model string)
|
|
108
112
|
self.default_model = 'MDV5A'
|
|
109
113
|
|
|
110
114
|
#: For comparison tests, use a model that produces slightly different output
|
|
111
115
|
self.alt_model = 'MDV5B'
|
|
112
|
-
|
|
116
|
+
|
|
113
117
|
#: PYTHONPATH to set for CLI tests; if None, inherits from the parent process. Only
|
|
114
118
|
#: impacts the called functions, not the parent process.
|
|
115
119
|
self.cli_test_pythonpath = None
|
|
116
|
-
|
|
120
|
+
|
|
117
121
|
#: IoU threshold used to determine whether boxes in two detection files likely correspond
|
|
118
122
|
#: to the same box.
|
|
119
123
|
self.iou_threshold_for_file_comparison = 0.85
|
|
120
|
-
|
|
124
|
+
|
|
121
125
|
#: Detector options passed to PTDetector
|
|
122
|
-
self.detector_options = {'compatibility_mode':'classic-test'}
|
|
123
|
-
|
|
124
|
-
#: Used to drive a series of tests (typically with a low value for
|
|
126
|
+
self.detector_options = {'compatibility_mode':'classic-test'}
|
|
127
|
+
|
|
128
|
+
#: Used to drive a series of tests (typically with a low value for
|
|
125
129
|
#: python_test_depth) over a folder of models.
|
|
126
130
|
self.model_folder = None
|
|
127
|
-
|
|
131
|
+
|
|
128
132
|
#: Used as a knob to control the level of Python tests, typically used when
|
|
129
|
-
#: we want to run a series of simple tests on a small number of models, rather
|
|
133
|
+
#: we want to run a series of simple tests on a small number of models, rather
|
|
130
134
|
#: than a deep test of tests on a small number of models. The gestalt is that
|
|
131
135
|
#: this is a range from 0-100.
|
|
132
136
|
self.python_test_depth = 100
|
|
133
|
-
|
|
137
|
+
|
|
134
138
|
#: Currently should be 'all' or 'utils-only'
|
|
135
139
|
self.test_mode = 'all'
|
|
136
|
-
|
|
140
|
+
|
|
137
141
|
#: Number of cores to use for multi-CPU inference tests
|
|
138
142
|
self.n_cores_for_multiprocessing_tests = 2
|
|
139
|
-
|
|
143
|
+
|
|
140
144
|
#: Number of cores to use for multi-CPU video tests
|
|
141
145
|
self.n_cores_for_video_tests = 2
|
|
142
|
-
|
|
146
|
+
|
|
143
147
|
# ...def __init__()
|
|
144
|
-
|
|
148
|
+
|
|
145
149
|
# ...class MDTestOptions()
|
|
146
150
|
|
|
147
151
|
|
|
@@ -154,20 +158,24 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
154
158
|
options=None):
|
|
155
159
|
"""
|
|
156
160
|
Expected results vary just a little across inference environments, particularly
|
|
157
|
-
between PT 1.x and 2.x, so when making sure things are working acceptably, we
|
|
161
|
+
between PT 1.x and 2.x, so when making sure things are working acceptably, we
|
|
158
162
|
compare to a reference file that matches the current environment.
|
|
159
|
-
|
|
163
|
+
|
|
160
164
|
This function gets the correct filename to compare to current results, depending
|
|
161
165
|
on whether a GPU is available.
|
|
162
|
-
|
|
166
|
+
|
|
163
167
|
Args:
|
|
164
168
|
gpu_is_available (bool): whether a GPU is available
|
|
165
|
-
|
|
169
|
+
model_string (str, optional): the model for which we're retrieving expected results
|
|
170
|
+
test_type (str, optional): the test type we're running ("image" or "video")
|
|
171
|
+
augment (bool, optional): whether we're running this test with image augmentation
|
|
172
|
+
options (MDTestOptiosn, optional): additional control flow options
|
|
173
|
+
|
|
166
174
|
Returns:
|
|
167
175
|
str: relative filename of the results file we should use (within the test
|
|
168
176
|
data zipfile)
|
|
169
177
|
"""
|
|
170
|
-
|
|
178
|
+
|
|
171
179
|
if gpu_is_available:
|
|
172
180
|
hw_string = 'gpu'
|
|
173
181
|
else:
|
|
@@ -180,8 +188,8 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
180
188
|
else:
|
|
181
189
|
assert torch_version.startswith('2'), 'Unknown torch version: {}'.format(torch_version)
|
|
182
190
|
pt_string = 'pt2.x'
|
|
183
|
-
|
|
184
|
-
# A hack for now to account for the fact that even with acceleration enabled and PT2
|
|
191
|
+
|
|
192
|
+
# A hack for now to account for the fact that even with acceleration enabled and PT2
|
|
185
193
|
# installed, Apple silicon appears to provide the same results as CPU/PT1 inference
|
|
186
194
|
try:
|
|
187
195
|
import torch
|
|
@@ -192,32 +200,32 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
192
200
|
pt_string = 'pt1.10.1'
|
|
193
201
|
except Exception:
|
|
194
202
|
pass
|
|
195
|
-
|
|
203
|
+
|
|
196
204
|
aug_string = ''
|
|
197
205
|
if augment:
|
|
198
206
|
aug_string = 'augment-'
|
|
199
|
-
|
|
207
|
+
|
|
200
208
|
fn = '{}-{}{}-{}-{}.json'.format(model_string,aug_string,test_type,hw_string,pt_string)
|
|
201
|
-
|
|
209
|
+
|
|
202
210
|
from megadetector.utils.path_utils import insert_before_extension
|
|
203
|
-
|
|
211
|
+
|
|
204
212
|
if test_type == 'video':
|
|
205
213
|
fn = insert_before_extension(fn,'frames')
|
|
206
|
-
|
|
214
|
+
|
|
207
215
|
if options is not None and options.scratch_dir is not None:
|
|
208
216
|
fn = os.path.join(options.scratch_dir,fn)
|
|
209
|
-
|
|
217
|
+
|
|
210
218
|
return fn
|
|
211
|
-
|
|
212
|
-
|
|
219
|
+
|
|
220
|
+
|
|
213
221
|
def download_test_data(options=None):
|
|
214
222
|
"""
|
|
215
|
-
Downloads the test zipfile if necessary, unzips if necessary. Initializes
|
|
223
|
+
Downloads the test zipfile if necessary, unzips if necessary. Initializes
|
|
216
224
|
temporary fields in [options], particularly [options.scratch_dir].
|
|
217
|
-
|
|
225
|
+
|
|
218
226
|
Args:
|
|
219
227
|
options (MDTestOptions, optional): see MDTestOptions for details
|
|
220
|
-
|
|
228
|
+
|
|
221
229
|
Returns:
|
|
222
230
|
MDTestOptions: the same object passed in as input, or the options that
|
|
223
231
|
were used if [options] was supplied as None
|
|
@@ -225,17 +233,17 @@ def download_test_data(options=None):
|
|
|
225
233
|
|
|
226
234
|
if options is None:
|
|
227
235
|
options = MDTestOptions()
|
|
228
|
-
|
|
229
|
-
if options.scratch_dir is None:
|
|
236
|
+
|
|
237
|
+
if options.scratch_dir is None:
|
|
230
238
|
tempdir_base = tempfile.gettempdir()
|
|
231
239
|
scratch_dir = os.path.join(tempdir_base,'md-tests')
|
|
232
240
|
else:
|
|
233
241
|
scratch_dir = options.scratch_dir
|
|
234
|
-
|
|
235
|
-
os.makedirs(scratch_dir,exist_ok=True)
|
|
236
|
-
|
|
242
|
+
|
|
243
|
+
os.makedirs(scratch_dir,exist_ok=True)
|
|
244
|
+
|
|
237
245
|
# See whether we've already downloaded the data zipfile
|
|
238
|
-
download_zipfile = True
|
|
246
|
+
download_zipfile = True
|
|
239
247
|
if not options.force_data_download:
|
|
240
248
|
local_zipfile = os.path.join(scratch_dir,options.test_data_url.split('/')[-1])
|
|
241
249
|
if os.path.isfile(local_zipfile):
|
|
@@ -244,23 +252,23 @@ def download_test_data(options=None):
|
|
|
244
252
|
target_file_size = os.path.getsize(local_zipfile)
|
|
245
253
|
if remote_size == target_file_size:
|
|
246
254
|
download_zipfile = False
|
|
247
|
-
|
|
255
|
+
|
|
248
256
|
if download_zipfile:
|
|
249
257
|
print('Downloading test data zipfile')
|
|
250
258
|
urllib.request.urlretrieve(options.test_data_url, local_zipfile)
|
|
251
259
|
print('Finished download to {}'.format(local_zipfile))
|
|
252
260
|
else:
|
|
253
261
|
print('Bypassing test data zipfile download for {}'.format(local_zipfile))
|
|
254
|
-
|
|
255
|
-
|
|
262
|
+
|
|
263
|
+
|
|
256
264
|
## Unzip data
|
|
257
|
-
|
|
258
|
-
zipf = zipfile.ZipFile(local_zipfile)
|
|
265
|
+
|
|
266
|
+
zipf = zipfile.ZipFile(local_zipfile)
|
|
259
267
|
zip_contents = zipf.filelist
|
|
260
|
-
|
|
268
|
+
|
|
261
269
|
# file_info = zip_contents[1]
|
|
262
270
|
for file_info in zip_contents:
|
|
263
|
-
|
|
271
|
+
|
|
264
272
|
expected_size = file_info.file_size
|
|
265
273
|
if expected_size == 0:
|
|
266
274
|
continue
|
|
@@ -275,14 +283,14 @@ def download_test_data(options=None):
|
|
|
275
283
|
os.makedirs(os.path.dirname(target_file),exist_ok=True)
|
|
276
284
|
with open(target_file,'wb') as f:
|
|
277
285
|
f.write(zipf.read(fn_relative))
|
|
278
|
-
|
|
286
|
+
|
|
279
287
|
# ...for each file in the zipfile
|
|
280
|
-
|
|
288
|
+
|
|
281
289
|
try:
|
|
282
290
|
zipf.close()
|
|
283
291
|
except Exception as e:
|
|
284
292
|
print('Warning: error closing zipfile:\n{}'.format(str(e)))
|
|
285
|
-
|
|
293
|
+
|
|
286
294
|
# Warn if files are present that aren't expected
|
|
287
295
|
test_files = glob.glob(os.path.join(scratch_dir,'**/*'), recursive=True)
|
|
288
296
|
test_files = [os.path.relpath(fn,scratch_dir).replace('\\','/') for fn in test_files]
|
|
@@ -292,18 +300,18 @@ def download_test_data(options=None):
|
|
|
292
300
|
if fn.endswith('/'):
|
|
293
301
|
continue
|
|
294
302
|
assert fn in test_files_set, 'File {} is missing from the test image folder'.format(fn)
|
|
295
|
-
|
|
303
|
+
|
|
296
304
|
# Populate the test options with test data information
|
|
297
305
|
options.scratch_dir = scratch_dir
|
|
298
306
|
options.all_test_files = test_files
|
|
299
307
|
options.test_images = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.jpg','.jpeg','.png')]
|
|
300
|
-
options.test_videos = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.mp4','.avi')]
|
|
308
|
+
options.test_videos = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.mp4','.avi')]
|
|
301
309
|
options.test_videos = [fn for fn in options.test_videos if 'rendered' not in fn]
|
|
302
310
|
options.test_videos = [fn for fn in options.test_videos if \
|
|
303
311
|
os.path.isfile(os.path.join(scratch_dir,fn))]
|
|
304
|
-
|
|
312
|
+
|
|
305
313
|
print('Finished unzipping and enumerating test data')
|
|
306
|
-
|
|
314
|
+
|
|
307
315
|
return options
|
|
308
316
|
|
|
309
317
|
# ...def download_test_data(...)
|
|
@@ -311,16 +319,16 @@ def download_test_data(options=None):
|
|
|
311
319
|
|
|
312
320
|
def is_gpu_available(verbose=True):
|
|
313
321
|
"""
|
|
314
|
-
Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch. Returns
|
|
322
|
+
Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch. Returns
|
|
315
323
|
false if PT fails to import.
|
|
316
|
-
|
|
324
|
+
|
|
317
325
|
Args:
|
|
318
|
-
verbose (bool, optional): enable additional debug console output
|
|
319
|
-
|
|
326
|
+
verbose (bool, optional): enable additional debug console output
|
|
327
|
+
|
|
320
328
|
Returns:
|
|
321
|
-
bool: whether a GPU is available
|
|
329
|
+
bool: whether a GPU is available
|
|
322
330
|
"""
|
|
323
|
-
|
|
331
|
+
|
|
324
332
|
# Import torch inside this function, so we have a chance to set CUDA_VISIBLE_DEVICES
|
|
325
333
|
# before checking GPU availability.
|
|
326
334
|
try:
|
|
@@ -328,9 +336,9 @@ def is_gpu_available(verbose=True):
|
|
|
328
336
|
except Exception:
|
|
329
337
|
print('Warning: could not import torch')
|
|
330
338
|
return False
|
|
331
|
-
|
|
339
|
+
|
|
332
340
|
gpu_available = torch.cuda.is_available()
|
|
333
|
-
|
|
341
|
+
|
|
334
342
|
if gpu_available:
|
|
335
343
|
if verbose:
|
|
336
344
|
print('CUDA available: {}'.format(gpu_available))
|
|
@@ -344,123 +352,124 @@ def is_gpu_available(verbose=True):
|
|
|
344
352
|
pass
|
|
345
353
|
if gpu_available:
|
|
346
354
|
print('Metal performance shaders available')
|
|
347
|
-
|
|
355
|
+
|
|
348
356
|
if not gpu_available:
|
|
349
357
|
print('No GPU available')
|
|
350
|
-
|
|
351
|
-
return gpu_available
|
|
352
358
|
|
|
353
|
-
|
|
359
|
+
return gpu_available
|
|
360
|
+
|
|
361
|
+
# ...def is_gpu_available(...)
|
|
354
362
|
|
|
355
363
|
|
|
356
364
|
def output_files_are_identical(fn1,fn2,verbose=False):
|
|
357
365
|
"""
|
|
358
366
|
Checks whether two MD-formatted output files are identical other than file sorting.
|
|
359
|
-
|
|
367
|
+
|
|
360
368
|
Args:
|
|
361
369
|
fn1 (str): the first filename to compare
|
|
362
370
|
fn2 (str): the second filename to compare
|
|
363
|
-
|
|
371
|
+
verbose (bool, optional): enable additional debug output
|
|
372
|
+
|
|
364
373
|
Returns:
|
|
365
374
|
bool: whether [fn1] and [fn2] are identical other than file sorting.
|
|
366
375
|
"""
|
|
367
|
-
|
|
376
|
+
|
|
368
377
|
if verbose:
|
|
369
378
|
print('Comparing {} to {}'.format(fn1,fn2))
|
|
370
|
-
|
|
379
|
+
|
|
371
380
|
with open(fn1,'r') as f:
|
|
372
381
|
fn1_results = json.load(f)
|
|
373
382
|
fn1_results['images'] = \
|
|
374
383
|
sorted(fn1_results['images'], key=lambda d: d['file'])
|
|
375
|
-
|
|
384
|
+
|
|
376
385
|
with open(fn2,'r') as f:
|
|
377
386
|
fn2_results = json.load(f)
|
|
378
387
|
fn2_results['images'] = \
|
|
379
388
|
sorted(fn2_results['images'], key=lambda d: d['file'])
|
|
380
|
-
|
|
389
|
+
|
|
381
390
|
if len(fn1_results['images']) != len(fn1_results['images']):
|
|
382
391
|
if verbose:
|
|
383
392
|
print('{} images in {}, {} images in {}'.format(
|
|
384
393
|
len(fn1_results['images']),fn1,
|
|
385
394
|
len(fn2_results['images']),fn2))
|
|
386
395
|
return False
|
|
387
|
-
|
|
396
|
+
|
|
388
397
|
# i_image = 0; fn1_image = fn1_results['images'][i_image]
|
|
389
398
|
for i_image,fn1_image in enumerate(fn1_results['images']):
|
|
390
|
-
|
|
399
|
+
|
|
391
400
|
fn2_image = fn2_results['images'][i_image]
|
|
392
|
-
|
|
401
|
+
|
|
393
402
|
if fn1_image['file'] != fn2_image['file']:
|
|
394
403
|
if verbose:
|
|
395
404
|
print('Filename difference at {}: {} vs {} '.format(i_image,fn1_image['file'],fn1_image['file']))
|
|
396
405
|
return False
|
|
397
|
-
|
|
406
|
+
|
|
398
407
|
if fn1_image != fn2_image:
|
|
399
408
|
if verbose:
|
|
400
409
|
print('Image-level difference in image {}: {}'.format(i_image,fn1_image['file']))
|
|
401
410
|
return False
|
|
402
|
-
|
|
411
|
+
|
|
403
412
|
return True
|
|
404
413
|
|
|
405
414
|
# ...def output_files_are_identical(...)
|
|
406
|
-
|
|
415
|
+
|
|
407
416
|
|
|
408
417
|
def compare_detection_lists(detections_a,detections_b,options,bidirectional_comparison=True):
|
|
409
418
|
"""
|
|
410
419
|
Compare two lists of MD-formatted detections, matching detections across lists using IoU
|
|
411
|
-
criteria. Generally used to compare detections for the same image when two sets of results
|
|
420
|
+
criteria. Generally used to compare detections for the same image when two sets of results
|
|
412
421
|
are expected to be more or less the same.
|
|
413
|
-
|
|
422
|
+
|
|
414
423
|
Args:
|
|
415
424
|
detections_a (list): the first set of detection dicts
|
|
416
425
|
detections_b (list): the second set of detection dicts
|
|
417
|
-
options (MDTestOptions): options that determine tolerable differences between files
|
|
426
|
+
options (MDTestOptions): options that determine tolerable differences between files
|
|
418
427
|
bidirectional_comparison (bool, optional): reverse the arguments and make a recursive
|
|
419
428
|
call.
|
|
420
|
-
|
|
429
|
+
|
|
421
430
|
Returns:
|
|
422
431
|
dict: a dictionary with keys 'max_conf_error' and 'max_coord_error'.
|
|
423
432
|
"""
|
|
424
433
|
from megadetector.utils.ct_utils import get_iou
|
|
425
|
-
|
|
434
|
+
|
|
426
435
|
max_conf_error = 0
|
|
427
436
|
max_coord_error = 0
|
|
428
|
-
|
|
437
|
+
|
|
429
438
|
max_conf_error_det_a = None
|
|
430
439
|
max_conf_error_det_b = None
|
|
431
|
-
|
|
440
|
+
|
|
432
441
|
max_coord_error_det_a = None
|
|
433
442
|
max_coord_error_det_b = None
|
|
434
|
-
|
|
443
|
+
|
|
435
444
|
# i_det_a = 0
|
|
436
445
|
for i_det_a in range(0,len(detections_a)):
|
|
437
|
-
|
|
446
|
+
|
|
438
447
|
det_a = detections_a[i_det_a]
|
|
439
|
-
|
|
448
|
+
|
|
440
449
|
# Don't process very-low-confidence boxes
|
|
441
450
|
# if det_a['conf'] < options.max_conf_error:
|
|
442
451
|
# continue
|
|
443
|
-
|
|
452
|
+
|
|
444
453
|
matching_det_b = None
|
|
445
454
|
highest_iou = -1
|
|
446
|
-
|
|
455
|
+
|
|
447
456
|
# Find the closest match in the detections_b list
|
|
448
|
-
|
|
457
|
+
|
|
449
458
|
# i_det_b = 0
|
|
450
459
|
for i_det_b in range(0,len(detections_b)):
|
|
451
|
-
|
|
460
|
+
|
|
452
461
|
det_b = detections_b[i_det_b]
|
|
453
|
-
|
|
462
|
+
|
|
454
463
|
if det_b['category'] != det_a['category']:
|
|
455
464
|
continue
|
|
456
|
-
|
|
465
|
+
|
|
457
466
|
iou = get_iou(det_a['bbox'],det_b['bbox'])
|
|
458
|
-
|
|
467
|
+
|
|
459
468
|
# Is this likely the same detection as det_a?
|
|
460
469
|
if iou >= options.iou_threshold_for_file_comparison and iou > highest_iou:
|
|
461
470
|
matching_det_b = det_b
|
|
462
471
|
highest_iou = iou
|
|
463
|
-
|
|
472
|
+
|
|
464
473
|
# If there are no detections in this category in detections_b
|
|
465
474
|
if matching_det_b is None:
|
|
466
475
|
if det_a['conf'] > max_conf_error:
|
|
@@ -468,34 +477,34 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
468
477
|
max_conf_error_det_a = det_a
|
|
469
478
|
# max_coord_error = 1.0
|
|
470
479
|
continue
|
|
471
|
-
|
|
480
|
+
|
|
472
481
|
assert det_a['category'] == matching_det_b['category']
|
|
473
482
|
conf_err = abs(det_a['conf'] - matching_det_b['conf'])
|
|
474
483
|
coord_differences = []
|
|
475
484
|
for i_coord in range(0,4):
|
|
476
485
|
coord_differences.append(abs(det_a['bbox'][i_coord]-\
|
|
477
486
|
matching_det_b['bbox'][i_coord]))
|
|
478
|
-
coord_err = max(coord_differences)
|
|
479
|
-
|
|
487
|
+
coord_err = max(coord_differences)
|
|
488
|
+
|
|
480
489
|
if conf_err >= max_conf_error:
|
|
481
490
|
max_conf_error = conf_err
|
|
482
491
|
max_conf_error_det_a = det_a
|
|
483
492
|
max_conf_error_det_b = det_b
|
|
484
|
-
|
|
493
|
+
|
|
485
494
|
if coord_err >= max_coord_error:
|
|
486
495
|
max_coord_error = coord_err
|
|
487
496
|
max_coord_error_det_a = det_a
|
|
488
497
|
max_coord_error_det_b = det_b
|
|
489
|
-
|
|
498
|
+
|
|
490
499
|
# ...for each detection in detections_a
|
|
491
|
-
|
|
500
|
+
|
|
492
501
|
if bidirectional_comparison:
|
|
493
|
-
|
|
502
|
+
|
|
494
503
|
reverse_comparison_results = compare_detection_lists(detections_b,
|
|
495
|
-
detections_a,
|
|
496
|
-
options,
|
|
504
|
+
detections_a,
|
|
505
|
+
options,
|
|
497
506
|
bidirectional_comparison=False)
|
|
498
|
-
|
|
507
|
+
|
|
499
508
|
if reverse_comparison_results['max_conf_error'] > max_conf_error:
|
|
500
509
|
max_conf_error = reverse_comparison_results['max_conf_error']
|
|
501
510
|
max_conf_error_det_a = reverse_comparison_results['max_conf_error_det_b']
|
|
@@ -504,17 +513,17 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
504
513
|
max_coord_error = reverse_comparison_results['max_coord_error']
|
|
505
514
|
max_coord_error_det_a = reverse_comparison_results['max_coord_error_det_b']
|
|
506
515
|
max_coord_error_det_b = reverse_comparison_results['max_coord_error_det_a']
|
|
507
|
-
|
|
516
|
+
|
|
508
517
|
list_comparison_results = {}
|
|
509
|
-
|
|
518
|
+
|
|
510
519
|
list_comparison_results['max_coord_error'] = max_coord_error
|
|
511
520
|
list_comparison_results['max_coord_error_det_a'] = max_coord_error_det_a
|
|
512
521
|
list_comparison_results['max_coord_error_det_b'] = max_coord_error_det_b
|
|
513
|
-
|
|
522
|
+
|
|
514
523
|
list_comparison_results['max_conf_error'] = max_conf_error
|
|
515
524
|
list_comparison_results['max_conf_error_det_a'] = max_conf_error_det_a
|
|
516
525
|
list_comparison_results['max_conf_error_det_b'] = max_conf_error_det_b
|
|
517
|
-
|
|
526
|
+
|
|
518
527
|
return list_comparison_results
|
|
519
528
|
|
|
520
529
|
# ...def compare_detection_lists(...)
|
|
@@ -526,98 +535,98 @@ def compare_results(inference_output_file,
|
|
|
526
535
|
expected_results_file_is_absolute=False):
|
|
527
536
|
"""
|
|
528
537
|
Compare two MD-formatted output files that should be nearly identical, allowing small
|
|
529
|
-
changes (e.g. rounding differences). Generally used to compare a new results file to
|
|
538
|
+
changes (e.g. rounding differences). Generally used to compare a new results file to
|
|
530
539
|
an expected results file.
|
|
531
|
-
|
|
540
|
+
|
|
532
541
|
Args:
|
|
533
542
|
inference_output_file (str): the first results file to compare
|
|
534
543
|
expected_results_file (str): the second results file to compare
|
|
535
544
|
options (MDTestOptions): options that determine tolerable differences between files
|
|
536
|
-
expected_results_file_is_absolute (str, optional): by default,
|
|
545
|
+
expected_results_file_is_absolute (str, optional): by default,
|
|
537
546
|
expected_results_file is appended to options.scratch_dir; this option
|
|
538
547
|
specifies that it's an absolute path.
|
|
539
|
-
|
|
548
|
+
|
|
540
549
|
Returns:
|
|
541
550
|
dict: dictionary with keys 'max_coord_error' and 'max_conf_error'
|
|
542
551
|
"""
|
|
543
|
-
|
|
552
|
+
|
|
544
553
|
# Read results
|
|
545
554
|
with open(inference_output_file,'r') as f:
|
|
546
555
|
results_from_file = json.load(f) # noqa
|
|
547
|
-
|
|
556
|
+
|
|
548
557
|
if not expected_results_file_is_absolute:
|
|
549
558
|
expected_results_file= os.path.join(options.scratch_dir,expected_results_file)
|
|
550
|
-
|
|
559
|
+
|
|
551
560
|
with open(expected_results_file,'r') as f:
|
|
552
561
|
expected_results = json.load(f)
|
|
553
|
-
|
|
562
|
+
|
|
554
563
|
filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
|
|
555
564
|
filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
|
|
556
|
-
|
|
565
|
+
|
|
557
566
|
assert len(filename_to_results) == len(filename_to_results_expected), \
|
|
558
567
|
'Error: expected {} files in results, found {}'.format(
|
|
559
568
|
len(filename_to_results_expected),
|
|
560
569
|
len(filename_to_results))
|
|
561
|
-
|
|
570
|
+
|
|
562
571
|
max_conf_error = -1
|
|
563
572
|
max_conf_error_file = None
|
|
564
573
|
max_conf_error_comparison_results = None
|
|
565
|
-
|
|
574
|
+
|
|
566
575
|
max_coord_error = -1
|
|
567
|
-
max_coord_error_file = None
|
|
576
|
+
max_coord_error_file = None
|
|
568
577
|
max_coord_error_comparison_results = None
|
|
569
|
-
|
|
578
|
+
|
|
570
579
|
# fn = next(iter(filename_to_results.keys()))
|
|
571
580
|
for fn in filename_to_results.keys():
|
|
572
|
-
|
|
581
|
+
|
|
573
582
|
actual_image_results = filename_to_results[fn]
|
|
574
583
|
expected_image_results = filename_to_results_expected[fn]
|
|
575
|
-
|
|
584
|
+
|
|
576
585
|
if 'failure' in actual_image_results:
|
|
577
586
|
assert 'failure' in expected_image_results and \
|
|
578
587
|
'detections' not in actual_image_results and \
|
|
579
588
|
'detections' not in expected_image_results
|
|
580
589
|
continue
|
|
581
590
|
assert 'failure' not in expected_image_results
|
|
582
|
-
|
|
591
|
+
|
|
583
592
|
actual_detections = actual_image_results['detections']
|
|
584
593
|
expected_detections = expected_image_results['detections']
|
|
585
|
-
|
|
594
|
+
|
|
586
595
|
comparison_results_this_image = compare_detection_lists(
|
|
587
596
|
detections_a=actual_detections,
|
|
588
597
|
detections_b=expected_detections,
|
|
589
598
|
options=options,
|
|
590
599
|
bidirectional_comparison=True)
|
|
591
|
-
|
|
600
|
+
|
|
592
601
|
if comparison_results_this_image['max_conf_error'] > max_conf_error:
|
|
593
602
|
max_conf_error = comparison_results_this_image['max_conf_error']
|
|
594
603
|
max_conf_error_comparison_results = comparison_results_this_image
|
|
595
604
|
max_conf_error_file = fn
|
|
596
|
-
|
|
605
|
+
|
|
597
606
|
if comparison_results_this_image['max_coord_error'] > max_coord_error:
|
|
598
607
|
max_coord_error = comparison_results_this_image['max_coord_error']
|
|
599
608
|
max_coord_error_comparison_results = comparison_results_this_image
|
|
600
609
|
max_coord_error_file = fn
|
|
601
|
-
|
|
610
|
+
|
|
602
611
|
# ...for each image
|
|
603
|
-
|
|
612
|
+
|
|
604
613
|
if not options.warning_mode:
|
|
605
|
-
|
|
614
|
+
|
|
606
615
|
assert max_conf_error <= options.max_conf_error, \
|
|
607
616
|
'Confidence error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
|
|
608
617
|
max_conf_error,options.max_conf_error,max_conf_error_file,
|
|
609
618
|
inference_output_file,expected_results_file)
|
|
610
|
-
|
|
619
|
+
|
|
611
620
|
assert max_coord_error <= options.max_coord_error, \
|
|
612
621
|
'Coord error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
|
|
613
622
|
max_coord_error,options.max_coord_error,max_coord_error_file,
|
|
614
623
|
inference_output_file,expected_results_file)
|
|
615
|
-
|
|
624
|
+
|
|
616
625
|
print('Max conf error: {} (file {})'.format(
|
|
617
626
|
max_conf_error,max_conf_error_file))
|
|
618
627
|
print('Max coord error: {} (file {})'.format(
|
|
619
628
|
max_coord_error,max_coord_error_file))
|
|
620
|
-
|
|
629
|
+
|
|
621
630
|
comparison_results = {}
|
|
622
631
|
comparison_results['max_conf_error'] = max_conf_error
|
|
623
632
|
comparison_results['max_conf_error_comparison_results'] = max_conf_error_comparison_results
|
|
@@ -638,18 +647,18 @@ def _args_to_object(args, obj):
|
|
|
638
647
|
Args:
|
|
639
648
|
args (argparse.Namespace): the namespace to convert to an object
|
|
640
649
|
obj (object): object whose whose attributes will be updated
|
|
641
|
-
|
|
650
|
+
|
|
642
651
|
Returns:
|
|
643
652
|
object: the modified object (modified in place, but also returned)
|
|
644
653
|
"""
|
|
645
|
-
|
|
654
|
+
|
|
646
655
|
for n, v in inspect.getmembers(args):
|
|
647
656
|
if not n.startswith('_'):
|
|
648
657
|
setattr(obj, n, v)
|
|
649
658
|
|
|
650
659
|
return obj
|
|
651
660
|
|
|
652
|
-
|
|
661
|
+
|
|
653
662
|
#%% CLI functions
|
|
654
663
|
|
|
655
664
|
# These are copied from process_utils.py to avoid imports outside of the test
|
|
@@ -657,21 +666,21 @@ def _args_to_object(args, obj):
|
|
|
657
666
|
|
|
658
667
|
os.environ["PYTHONUNBUFFERED"] = "1"
|
|
659
668
|
|
|
660
|
-
# In some circumstances I want to allow CLI tests to "succeed" even when they return
|
|
669
|
+
# In some circumstances I want to allow CLI tests to "succeed" even when they return
|
|
661
670
|
# specific non-zero output values.
|
|
662
671
|
allowable_process_return_codes = [0]
|
|
663
672
|
|
|
664
673
|
def execute(cmd):
|
|
665
674
|
"""
|
|
666
675
|
Runs [cmd] (a single string) in a shell, yielding each line of output to the caller.
|
|
667
|
-
|
|
676
|
+
|
|
668
677
|
Args:
|
|
669
678
|
cmd (str): command to run
|
|
670
|
-
|
|
679
|
+
|
|
671
680
|
Returns:
|
|
672
681
|
int: the command's return code, always zero, otherwise a CalledProcessError is raised
|
|
673
682
|
"""
|
|
674
|
-
|
|
683
|
+
|
|
675
684
|
# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
|
|
676
685
|
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
677
686
|
shell=True, universal_newlines=True)
|
|
@@ -687,11 +696,14 @@ def execute(cmd):
|
|
|
687
696
|
def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=True):
|
|
688
697
|
"""
|
|
689
698
|
Runs [cmd] (a single string) in a shell, capturing (and optionally printing) output.
|
|
690
|
-
|
|
699
|
+
|
|
691
700
|
Args:
|
|
692
701
|
cmd (str): command to run
|
|
693
702
|
print_output (bool, optional): whether to print output from [cmd]
|
|
694
|
-
|
|
703
|
+
catch_exceptions (bool, optional): whether to catch exceptions, rather than raising
|
|
704
|
+
them
|
|
705
|
+
echo_command (bool, optional): whether to print [cmd] to stdout prior to execution
|
|
706
|
+
|
|
695
707
|
Returns:
|
|
696
708
|
dict: a dictionary with fields "status" (the process return code) and "output"
|
|
697
709
|
(the content of stdout)
|
|
@@ -699,7 +711,7 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
|
|
|
699
711
|
|
|
700
712
|
if echo_command:
|
|
701
713
|
print('Running command:\n{}\n'.format(cmd))
|
|
702
|
-
|
|
714
|
+
|
|
703
715
|
to_return = {'status':'unknown','output':''}
|
|
704
716
|
output = []
|
|
705
717
|
try:
|
|
@@ -709,21 +721,22 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
|
|
|
709
721
|
print(s,end='',flush=True)
|
|
710
722
|
to_return['status'] = 0
|
|
711
723
|
except subprocess.CalledProcessError as cpe:
|
|
712
|
-
if not catch_exceptions:
|
|
724
|
+
if not catch_exceptions:
|
|
713
725
|
raise
|
|
714
726
|
print('execute_and_print caught error: {}'.format(cpe.output))
|
|
715
727
|
to_return['status'] = cpe.returncode
|
|
716
728
|
to_return['output'] = output
|
|
717
|
-
|
|
729
|
+
|
|
718
730
|
return to_return
|
|
719
731
|
|
|
720
732
|
|
|
721
733
|
#%% Python tests
|
|
722
734
|
|
|
735
|
+
@pytest.mark.skip(reason='Called one for each module')
|
|
723
736
|
def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
724
737
|
"""
|
|
725
738
|
Imports all modules in [package_name]
|
|
726
|
-
|
|
739
|
+
|
|
727
740
|
Args:
|
|
728
741
|
package_name (str): the package name to test
|
|
729
742
|
exceptions (list, optional): exclude any modules that contain any of these strings
|
|
@@ -731,16 +744,16 @@ def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
|
731
744
|
"""
|
|
732
745
|
import importlib
|
|
733
746
|
import pkgutil
|
|
734
|
-
|
|
747
|
+
|
|
735
748
|
package = importlib.import_module(package_name)
|
|
736
749
|
package_path = package.__path__
|
|
737
750
|
imported_modules = []
|
|
738
|
-
|
|
751
|
+
|
|
739
752
|
if exceptions is None:
|
|
740
753
|
exceptions = []
|
|
741
|
-
|
|
754
|
+
|
|
742
755
|
for _, modname, _ in pkgutil.walk_packages(package_path, package_name + '.'):
|
|
743
|
-
|
|
756
|
+
|
|
744
757
|
skip_module = False
|
|
745
758
|
for s in exceptions:
|
|
746
759
|
if s in modname:
|
|
@@ -748,69 +761,61 @@ def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
|
748
761
|
break
|
|
749
762
|
if skip_module:
|
|
750
763
|
continue
|
|
751
|
-
|
|
764
|
+
|
|
752
765
|
if verbose:
|
|
753
766
|
print('Testing import: {}'.format(modname))
|
|
754
|
-
|
|
767
|
+
|
|
755
768
|
try:
|
|
756
769
|
# Attempt to import each module
|
|
757
770
|
_ = importlib.import_module(modname)
|
|
758
|
-
imported_modules.append(modname)
|
|
771
|
+
imported_modules.append(modname)
|
|
759
772
|
except ImportError as e:
|
|
760
773
|
print(f"Failed to import module {modname}: {e}")
|
|
761
774
|
raise
|
|
762
775
|
|
|
763
|
-
|
|
776
|
+
|
|
764
777
|
def run_python_tests(options):
|
|
765
778
|
"""
|
|
766
779
|
Runs Python-based (as opposed to CLI-based) package tests.
|
|
767
|
-
|
|
780
|
+
|
|
768
781
|
Args:
|
|
769
782
|
options (MDTestOptions): see MDTestOptions for details
|
|
770
783
|
"""
|
|
771
|
-
|
|
784
|
+
|
|
772
785
|
print('\n*** Starting module tests ***\n')
|
|
773
|
-
|
|
774
|
-
|
|
786
|
+
|
|
787
|
+
|
|
775
788
|
## Prepare data
|
|
776
|
-
|
|
789
|
+
|
|
777
790
|
download_test_data(options)
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
## Miscellaneous utility tests
|
|
781
|
-
|
|
782
|
-
print('\n** Running ct_utils module test **\n')
|
|
783
|
-
|
|
784
|
-
from megadetector.utils.ct_utils import __module_test__ as ct_utils_test
|
|
785
|
-
ct_utils_test()
|
|
786
|
-
|
|
787
|
-
|
|
791
|
+
|
|
792
|
+
|
|
788
793
|
## Import tests
|
|
789
|
-
|
|
794
|
+
|
|
790
795
|
print('\n** Running package import tests **\n')
|
|
791
796
|
test_package_imports('megadetector.visualization')
|
|
792
797
|
test_package_imports('megadetector.postprocessing')
|
|
793
798
|
test_package_imports('megadetector.postprocessing.repeat_detection_elimination')
|
|
794
|
-
test_package_imports('megadetector.utils',exceptions=['
|
|
799
|
+
test_package_imports('megadetector.utils',exceptions=['md_tests'])
|
|
795
800
|
test_package_imports('megadetector.data_management',exceptions=['lila','ocr_tools'])
|
|
796
|
-
|
|
801
|
+
|
|
797
802
|
|
|
798
803
|
## Return early if we're not running torch-related tests
|
|
799
|
-
|
|
804
|
+
|
|
800
805
|
if options.test_mode == 'utils-only':
|
|
801
806
|
return
|
|
802
|
-
|
|
803
|
-
|
|
807
|
+
|
|
808
|
+
|
|
804
809
|
## Make sure our tests are doing what we think they're doing
|
|
805
|
-
|
|
810
|
+
|
|
806
811
|
from megadetector.detection import pytorch_detector
|
|
807
812
|
pytorch_detector.require_non_default_compatibility_mode = True
|
|
808
|
-
|
|
809
|
-
|
|
813
|
+
|
|
814
|
+
|
|
810
815
|
## Run inference on an image
|
|
811
|
-
|
|
816
|
+
|
|
812
817
|
print('\n** Running MD on a single image (module) **\n')
|
|
813
|
-
|
|
818
|
+
|
|
814
819
|
from megadetector.detection import run_detector
|
|
815
820
|
from megadetector.visualization import visualization_utils as vis_utils # noqa
|
|
816
821
|
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
@@ -818,15 +823,15 @@ def run_python_tests(options):
|
|
|
818
823
|
detector_options=copy(options.detector_options))
|
|
819
824
|
pil_im = vis_utils.load_image(image_fn)
|
|
820
825
|
result = model.generate_detections_one_image(pil_im) # noqa
|
|
821
|
-
|
|
826
|
+
|
|
822
827
|
if options.python_test_depth <= 1:
|
|
823
828
|
return
|
|
824
|
-
|
|
825
|
-
|
|
829
|
+
|
|
830
|
+
|
|
826
831
|
## Run inference on a folder
|
|
827
832
|
|
|
828
833
|
print('\n** Running MD on a folder of images (module) **\n')
|
|
829
|
-
|
|
834
|
+
|
|
830
835
|
from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
831
836
|
from megadetector.utils import path_utils # noqa
|
|
832
837
|
|
|
@@ -834,7 +839,7 @@ def run_python_tests(options):
|
|
|
834
839
|
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
835
840
|
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
836
841
|
image_file_names = path_utils.find_images(image_folder,recursive=True)
|
|
837
|
-
results = load_and_run_detector_batch(options.default_model,
|
|
842
|
+
results = load_and_run_detector_batch(options.default_model,
|
|
838
843
|
image_file_names,
|
|
839
844
|
quiet=True,
|
|
840
845
|
detector_options=copy(options.detector_options))
|
|
@@ -844,30 +849,30 @@ def run_python_tests(options):
|
|
|
844
849
|
detector_file=options.default_model)
|
|
845
850
|
|
|
846
851
|
## Verify results
|
|
847
|
-
|
|
852
|
+
|
|
848
853
|
# Verify format correctness
|
|
849
854
|
from megadetector.postprocessing.validate_batch_results import validate_batch_results #noqa
|
|
850
855
|
validate_batch_results(inference_output_file)
|
|
851
|
-
|
|
856
|
+
|
|
852
857
|
# Verify value correctness
|
|
853
858
|
expected_results_file = get_expected_results_filename(is_gpu_available(verbose=False),
|
|
854
859
|
options=options)
|
|
855
860
|
compare_results(inference_output_file,expected_results_file,options)
|
|
856
|
-
|
|
857
|
-
|
|
861
|
+
|
|
862
|
+
|
|
858
863
|
# Make note of this filename, we will use it again later
|
|
859
864
|
inference_output_file_standard_inference = inference_output_file
|
|
860
|
-
|
|
865
|
+
|
|
861
866
|
if options.python_test_depth <= 2:
|
|
862
867
|
return
|
|
863
|
-
|
|
864
|
-
|
|
868
|
+
|
|
869
|
+
|
|
865
870
|
## Run and verify again with augmentation enabled
|
|
866
|
-
|
|
871
|
+
|
|
867
872
|
print('\n** Running MD on images with augmentation (module) **\n')
|
|
868
|
-
|
|
873
|
+
|
|
869
874
|
from megadetector.utils.path_utils import insert_before_extension
|
|
870
|
-
|
|
875
|
+
|
|
871
876
|
inference_output_file_augmented = insert_before_extension(inference_output_file,'augmented')
|
|
872
877
|
results = load_and_run_detector_batch(options.default_model,
|
|
873
878
|
image_file_names,
|
|
@@ -883,32 +888,32 @@ def run_python_tests(options):
|
|
|
883
888
|
get_expected_results_filename(is_gpu_available(verbose=False),
|
|
884
889
|
augment=True,options=options)
|
|
885
890
|
compare_results(inference_output_file_augmented,expected_results_file_augmented,options)
|
|
886
|
-
|
|
887
|
-
|
|
891
|
+
|
|
892
|
+
|
|
888
893
|
## Postprocess results
|
|
889
|
-
|
|
894
|
+
|
|
890
895
|
print('\n** Post-processing results (module) **\n')
|
|
891
|
-
|
|
896
|
+
|
|
892
897
|
from megadetector.postprocessing.postprocess_batch_results import \
|
|
893
898
|
PostProcessingOptions,process_batch_results
|
|
894
899
|
postprocessing_options = PostProcessingOptions()
|
|
895
|
-
|
|
900
|
+
|
|
896
901
|
postprocessing_options.md_results_file = inference_output_file
|
|
897
902
|
postprocessing_options.output_dir = os.path.join(options.scratch_dir,'postprocessing_output')
|
|
898
903
|
postprocessing_options.image_base_dir = image_folder
|
|
899
|
-
|
|
904
|
+
|
|
900
905
|
postprocessing_results = process_batch_results(postprocessing_options)
|
|
901
906
|
assert os.path.isfile(postprocessing_results.output_html_file), \
|
|
902
907
|
'Postprocessing output file {} not found'.format(postprocessing_results.output_html_file)
|
|
903
|
-
|
|
904
|
-
|
|
908
|
+
|
|
909
|
+
|
|
905
910
|
## Partial RDE test
|
|
906
|
-
|
|
911
|
+
|
|
907
912
|
print('\n** Testing RDE (module) **\n')
|
|
908
|
-
|
|
913
|
+
|
|
909
914
|
from megadetector.postprocessing.repeat_detection_elimination.repeat_detections_core import \
|
|
910
915
|
RepeatDetectionOptions, find_repeat_detections
|
|
911
|
-
|
|
916
|
+
|
|
912
917
|
rde_options = RepeatDetectionOptions()
|
|
913
918
|
rde_options.occurrenceThreshold = 2
|
|
914
919
|
rde_options.confidenceMin = 0.001
|
|
@@ -919,24 +924,24 @@ def run_python_tests(options):
|
|
|
919
924
|
rde_results = find_repeat_detections(inference_output_file, rde_output_file, rde_options)
|
|
920
925
|
assert os.path.isfile(rde_results.filterFile),\
|
|
921
926
|
'Could not find RDE output file {}'.format(rde_results.filterFile)
|
|
922
|
-
|
|
923
|
-
|
|
927
|
+
|
|
928
|
+
|
|
924
929
|
## Run inference on a folder (with YOLOv5 val script)
|
|
925
|
-
|
|
930
|
+
|
|
926
931
|
if options.yolo_working_dir is None:
|
|
927
|
-
|
|
932
|
+
|
|
928
933
|
print('Skipping YOLO val inference tests, no YOLO folder supplied')
|
|
929
|
-
|
|
934
|
+
|
|
930
935
|
else:
|
|
931
|
-
|
|
936
|
+
|
|
932
937
|
print('\n** Running YOLO val inference test (module) **\n')
|
|
933
|
-
|
|
938
|
+
|
|
934
939
|
from megadetector.detection.run_inference_with_yolov5_val import \
|
|
935
940
|
YoloInferenceOptions, run_inference_with_yolo_val
|
|
936
941
|
from megadetector.utils.path_utils import insert_before_extension
|
|
937
|
-
|
|
942
|
+
|
|
938
943
|
inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
|
|
939
|
-
|
|
944
|
+
|
|
940
945
|
yolo_inference_options = YoloInferenceOptions()
|
|
941
946
|
yolo_inference_options.input_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
942
947
|
yolo_inference_options.output_file = inference_output_file_yolo_val
|
|
@@ -946,76 +951,76 @@ def run_python_tests(options):
|
|
|
946
951
|
yolo_inference_options.overwrite_handling = 'overwrite'
|
|
947
952
|
from megadetector.detection.run_detector import DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
|
|
948
953
|
yolo_inference_options.conf_thres = DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
|
|
949
|
-
|
|
954
|
+
|
|
950
955
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
951
|
-
|
|
956
|
+
|
|
952
957
|
## Confirm this matches the standard inference path
|
|
953
|
-
|
|
958
|
+
|
|
954
959
|
if False:
|
|
955
960
|
# TODO: compare_results() isn't quite ready for this yet
|
|
956
|
-
compare_results(inference_output_file=inference_output_file_yolo_val,
|
|
957
|
-
expected_results_file=inference_output_file_standard_inference,
|
|
961
|
+
compare_results(inference_output_file=inference_output_file_yolo_val,
|
|
962
|
+
expected_results_file=inference_output_file_standard_inference,
|
|
958
963
|
options=options)
|
|
959
|
-
|
|
964
|
+
|
|
960
965
|
# Run again, without symlinks this time
|
|
961
|
-
|
|
966
|
+
|
|
962
967
|
inference_output_file_yolo_val_no_links = insert_before_extension(inference_output_file_yolo_val,
|
|
963
968
|
'no-links')
|
|
964
969
|
yolo_inference_options.output_file = inference_output_file_yolo_val_no_links
|
|
965
970
|
yolo_inference_options.use_symlinks = False
|
|
966
971
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
967
|
-
|
|
972
|
+
|
|
968
973
|
# Run again, with chunked inference and symlinks
|
|
969
|
-
|
|
974
|
+
|
|
970
975
|
inference_output_file_yolo_val_checkpoints = insert_before_extension(inference_output_file_yolo_val,
|
|
971
976
|
'checkpoints')
|
|
972
977
|
yolo_inference_options.output_file = inference_output_file_yolo_val_checkpoints
|
|
973
978
|
yolo_inference_options.use_symlinks = True
|
|
974
979
|
yolo_inference_options.checkpoint_frequency = 5
|
|
975
980
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
976
|
-
|
|
981
|
+
|
|
977
982
|
# Run again, with chunked inference and no symlinks
|
|
978
|
-
|
|
983
|
+
|
|
979
984
|
inference_output_file_yolo_val_checkpoints_no_links = \
|
|
980
985
|
insert_before_extension(inference_output_file_yolo_val,'checkpoints-no-links')
|
|
981
986
|
yolo_inference_options.output_file = inference_output_file_yolo_val_checkpoints_no_links
|
|
982
987
|
yolo_inference_options.use_symlinks = False
|
|
983
988
|
yolo_inference_options.checkpoint_frequency = 5
|
|
984
989
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
985
|
-
|
|
990
|
+
|
|
986
991
|
fn1 = inference_output_file_yolo_val
|
|
987
|
-
|
|
992
|
+
|
|
988
993
|
output_files_to_compare = [
|
|
989
994
|
inference_output_file_yolo_val_no_links,
|
|
990
995
|
inference_output_file_yolo_val_checkpoints,
|
|
991
996
|
inference_output_file_yolo_val_checkpoints_no_links
|
|
992
997
|
]
|
|
993
|
-
|
|
998
|
+
|
|
994
999
|
for fn2 in output_files_to_compare:
|
|
995
1000
|
assert output_files_are_identical(fn1, fn2, verbose=True)
|
|
996
|
-
|
|
1001
|
+
|
|
997
1002
|
# ...if we need to run the YOLO val inference tests
|
|
998
|
-
|
|
999
|
-
|
|
1003
|
+
|
|
1004
|
+
|
|
1000
1005
|
if not options.skip_video_tests:
|
|
1001
|
-
|
|
1006
|
+
|
|
1002
1007
|
## Video test (single video)
|
|
1003
|
-
|
|
1008
|
+
|
|
1004
1009
|
print('\n** Running MD on a single video (module) **\n')
|
|
1005
|
-
|
|
1010
|
+
|
|
1006
1011
|
from megadetector.detection.process_video import ProcessVideoOptions, process_video
|
|
1007
1012
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1008
|
-
|
|
1013
|
+
|
|
1009
1014
|
video_options = ProcessVideoOptions()
|
|
1010
1015
|
video_options.model_file = options.default_model
|
|
1011
1016
|
video_options.input_video_file = os.path.join(options.scratch_dir,options.test_videos[0])
|
|
1012
1017
|
video_options.output_json_file = os.path.join(options.scratch_dir,'single_video_output.json')
|
|
1013
1018
|
video_options.output_video_file = os.path.join(options.scratch_dir,'video_scratch/rendered_video.mp4')
|
|
1014
1019
|
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
1015
|
-
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1016
|
-
|
|
1020
|
+
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1021
|
+
|
|
1017
1022
|
video_options.render_output_video = (not options.skip_video_rendering_tests)
|
|
1018
|
-
|
|
1023
|
+
|
|
1019
1024
|
# video_options.keep_rendered_frames = False
|
|
1020
1025
|
# video_options.keep_extracted_frames = False
|
|
1021
1026
|
video_options.force_extracted_frame_folder_deletion = True
|
|
@@ -1032,22 +1037,22 @@ def run_python_tests(options):
|
|
|
1032
1037
|
# video_options.debug_max_frames = -1
|
|
1033
1038
|
# video_options.class_mapping_filename = None
|
|
1034
1039
|
video_options.detector_options = copy(options.detector_options)
|
|
1035
|
-
|
|
1040
|
+
|
|
1036
1041
|
_ = process_video(video_options)
|
|
1037
|
-
|
|
1042
|
+
|
|
1038
1043
|
assert os.path.isfile(video_options.output_video_file), \
|
|
1039
1044
|
'Python video test failed to render output video file'
|
|
1040
1045
|
assert os.path.isfile(video_options.output_json_file), \
|
|
1041
1046
|
'Python video test failed to render output .json file'
|
|
1042
|
-
|
|
1043
|
-
|
|
1047
|
+
|
|
1048
|
+
|
|
1044
1049
|
## Video test (folder)
|
|
1045
|
-
|
|
1050
|
+
|
|
1046
1051
|
print('\n** Running MD on a folder of videos (module) **\n')
|
|
1047
|
-
|
|
1052
|
+
|
|
1048
1053
|
from megadetector.detection.process_video import ProcessVideoOptions, process_video_folder
|
|
1049
1054
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1050
|
-
|
|
1055
|
+
|
|
1051
1056
|
video_options = ProcessVideoOptions()
|
|
1052
1057
|
video_options.model_file = options.default_model
|
|
1053
1058
|
video_options.input_video_file = os.path.join(options.scratch_dir,
|
|
@@ -1055,7 +1060,7 @@ def run_python_tests(options):
|
|
|
1055
1060
|
video_options.output_json_file = os.path.join(options.scratch_dir,'video_folder_output.json')
|
|
1056
1061
|
video_options.output_video_file = None
|
|
1057
1062
|
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
1058
|
-
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1063
|
+
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1059
1064
|
video_options.render_output_video = False
|
|
1060
1065
|
video_options.keep_rendered_frames = False
|
|
1061
1066
|
video_options.keep_extracted_frames = False
|
|
@@ -1068,63 +1073,63 @@ def run_python_tests(options):
|
|
|
1068
1073
|
video_options.fourcc = options.video_fourcc
|
|
1069
1074
|
# video_options.rendering_confidence_threshold = None
|
|
1070
1075
|
# video_options.json_confidence_threshold = 0.005
|
|
1071
|
-
video_options.frame_sample = 10
|
|
1076
|
+
video_options.frame_sample = 10
|
|
1072
1077
|
video_options.n_cores = options.n_cores_for_video_tests
|
|
1073
|
-
|
|
1078
|
+
|
|
1074
1079
|
# Force frame extraction to disk, since that's how we generated our expected results file
|
|
1075
1080
|
video_options.force_on_disk_frame_extraction = True
|
|
1076
1081
|
# video_options.debug_max_frames = -1
|
|
1077
1082
|
# video_options.class_mapping_filename = None
|
|
1078
|
-
|
|
1083
|
+
|
|
1079
1084
|
# Use quality == None, because we can't control whether YOLOv5 has patched cm2.imread,
|
|
1080
1085
|
# and therefore can't rely on using the quality parameter
|
|
1081
1086
|
video_options.quality = None
|
|
1082
|
-
video_options.max_width = None
|
|
1087
|
+
video_options.max_width = None
|
|
1083
1088
|
video_options.detector_options = copy(options.detector_options)
|
|
1084
|
-
|
|
1089
|
+
|
|
1085
1090
|
video_options.keep_extracted_frames = True
|
|
1086
1091
|
_ = process_video_folder(video_options)
|
|
1087
|
-
|
|
1092
|
+
|
|
1088
1093
|
assert os.path.isfile(video_options.output_json_file), \
|
|
1089
1094
|
'Python video test failed to render output .json file'
|
|
1090
|
-
|
|
1095
|
+
|
|
1091
1096
|
frame_output_file = insert_before_extension(video_options.output_json_file,'frames')
|
|
1092
1097
|
assert os.path.isfile(frame_output_file)
|
|
1093
|
-
|
|
1094
|
-
|
|
1098
|
+
|
|
1099
|
+
|
|
1095
1100
|
## Verify results
|
|
1096
|
-
|
|
1101
|
+
|
|
1097
1102
|
expected_results_file = \
|
|
1098
1103
|
get_expected_results_filename(is_gpu_available(verbose=False),test_type='video',options=options)
|
|
1099
1104
|
assert os.path.isfile(expected_results_file)
|
|
1100
|
-
|
|
1105
|
+
|
|
1101
1106
|
compare_results(frame_output_file,expected_results_file,options)
|
|
1102
|
-
|
|
1103
|
-
|
|
1107
|
+
|
|
1108
|
+
|
|
1104
1109
|
## Run again, this time in memory, and make sure the results are *almost* the same
|
|
1105
|
-
|
|
1110
|
+
|
|
1106
1111
|
# They won't be quite the same, because the on-disk path goes through a jpeg intermediate
|
|
1107
|
-
|
|
1112
|
+
|
|
1108
1113
|
print('\n** Running MD on a folder of videos (in memory) (module) **\n')
|
|
1109
|
-
|
|
1114
|
+
|
|
1110
1115
|
video_options.output_json_file = insert_before_extension(video_options.output_json_file,'in-memory')
|
|
1111
1116
|
video_options.force_on_disk_frame_extraction = False
|
|
1112
1117
|
_ = process_video_folder(video_options)
|
|
1113
|
-
|
|
1118
|
+
|
|
1114
1119
|
frame_output_file_in_memory = insert_before_extension(video_options.output_json_file,'frames')
|
|
1115
1120
|
assert os.path.isfile(frame_output_file_in_memory)
|
|
1116
|
-
|
|
1121
|
+
|
|
1117
1122
|
from copy import deepcopy
|
|
1118
1123
|
options_loose = deepcopy(options)
|
|
1119
1124
|
options_loose.max_conf_error = 0.05
|
|
1120
1125
|
options_loose.max_coord_error = 0.01
|
|
1121
|
-
|
|
1126
|
+
|
|
1122
1127
|
compare_results(inference_output_file=frame_output_file,
|
|
1123
1128
|
expected_results_file=frame_output_file_in_memory,
|
|
1124
1129
|
options=options_loose)
|
|
1125
|
-
|
|
1130
|
+
|
|
1126
1131
|
# ...if we're not skipping video tests
|
|
1127
|
-
|
|
1132
|
+
|
|
1128
1133
|
print('\n*** Finished module tests ***\n')
|
|
1129
1134
|
|
|
1130
1135
|
# ...def run_python_tests(...)
|
|
@@ -1135,52 +1140,52 @@ def run_python_tests(options):
|
|
|
1135
1140
|
def run_cli_tests(options):
|
|
1136
1141
|
"""
|
|
1137
1142
|
Runs CLI (as opposed to Python-based) package tests.
|
|
1138
|
-
|
|
1143
|
+
|
|
1139
1144
|
Args:
|
|
1140
1145
|
options (MDTestOptions): see MDTestOptions for details
|
|
1141
1146
|
"""
|
|
1142
|
-
|
|
1147
|
+
|
|
1143
1148
|
print('\n*** Starting CLI tests ***\n')
|
|
1144
|
-
|
|
1149
|
+
|
|
1145
1150
|
## Environment management
|
|
1146
|
-
|
|
1151
|
+
|
|
1147
1152
|
if options.cli_test_pythonpath is not None:
|
|
1148
|
-
os.environ['PYTHONPATH'] = options.cli_test_pythonpath
|
|
1149
|
-
|
|
1150
|
-
|
|
1153
|
+
os.environ['PYTHONPATH'] = options.cli_test_pythonpath
|
|
1154
|
+
|
|
1155
|
+
|
|
1151
1156
|
## chdir if necessary
|
|
1152
|
-
|
|
1157
|
+
|
|
1153
1158
|
if options.cli_working_dir is not None:
|
|
1154
1159
|
os.chdir(options.cli_working_dir)
|
|
1155
|
-
|
|
1156
|
-
|
|
1160
|
+
|
|
1161
|
+
|
|
1157
1162
|
## Prepare data
|
|
1158
|
-
|
|
1163
|
+
|
|
1159
1164
|
download_test_data(options)
|
|
1160
|
-
|
|
1161
|
-
|
|
1165
|
+
|
|
1166
|
+
|
|
1162
1167
|
## Utility imports
|
|
1163
|
-
|
|
1168
|
+
|
|
1164
1169
|
from megadetector.utils.ct_utils import dict_to_kvp_list
|
|
1165
1170
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1166
|
-
|
|
1167
|
-
|
|
1171
|
+
|
|
1172
|
+
|
|
1168
1173
|
## Utility tests
|
|
1169
|
-
|
|
1174
|
+
|
|
1170
1175
|
# TODO: move postprocessing tests up to this point, using pre-generated .json results files
|
|
1171
|
-
|
|
1176
|
+
|
|
1172
1177
|
|
|
1173
1178
|
## Return early if we're not running torch-related tests
|
|
1174
|
-
|
|
1179
|
+
|
|
1175
1180
|
if options.test_mode == 'utils-only':
|
|
1176
1181
|
print('utils-only tests finished, returning')
|
|
1177
1182
|
return
|
|
1178
1183
|
|
|
1179
|
-
|
|
1184
|
+
|
|
1180
1185
|
## Run inference on an image
|
|
1181
|
-
|
|
1186
|
+
|
|
1182
1187
|
print('\n** Running MD on a single image (CLI) **\n')
|
|
1183
|
-
|
|
1188
|
+
|
|
1184
1189
|
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
1185
1190
|
output_dir = os.path.join(options.scratch_dir,'single_image_test')
|
|
1186
1191
|
if options.cli_working_dir is None:
|
|
@@ -1191,7 +1196,7 @@ def run_cli_tests(options):
|
|
|
1191
1196
|
options.default_model,image_fn,output_dir)
|
|
1192
1197
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1193
1198
|
cmd_results = execute_and_print(cmd)
|
|
1194
|
-
|
|
1199
|
+
|
|
1195
1200
|
if options.cpu_execution_is_error:
|
|
1196
1201
|
gpu_available_via_cli = False
|
|
1197
1202
|
for s in cmd_results['output']:
|
|
@@ -1202,17 +1207,17 @@ def run_cli_tests(options):
|
|
|
1202
1207
|
raise Exception('GPU execution is required, but not available')
|
|
1203
1208
|
|
|
1204
1209
|
# Make sure we can also pass an absolute path to a model file, instead of, e.g. "MDV5A"
|
|
1205
|
-
|
|
1210
|
+
|
|
1206
1211
|
from megadetector.detection.run_detector import try_download_known_detector
|
|
1207
1212
|
model_file = try_download_known_detector(options.default_model,force_download=False,verbose=False)
|
|
1208
1213
|
cmd = cmd.replace(options.default_model,model_file)
|
|
1209
1214
|
cmd_results = execute_and_print(cmd)
|
|
1210
|
-
|
|
1211
|
-
|
|
1215
|
+
|
|
1216
|
+
|
|
1212
1217
|
## Run inference on a folder
|
|
1213
|
-
|
|
1218
|
+
|
|
1214
1219
|
print('\n** Running MD on a folder (CLI) **\n')
|
|
1215
|
-
|
|
1220
|
+
|
|
1216
1221
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1217
1222
|
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
1218
1223
|
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
@@ -1226,109 +1231,109 @@ def run_cli_tests(options):
|
|
|
1226
1231
|
cmd += ' --include_image_timestamp --include_exif_data'
|
|
1227
1232
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1228
1233
|
cmd_results = execute_and_print(cmd)
|
|
1229
|
-
|
|
1234
|
+
|
|
1230
1235
|
base_cmd = cmd
|
|
1231
|
-
|
|
1232
|
-
|
|
1236
|
+
|
|
1237
|
+
|
|
1233
1238
|
## Run again with checkpointing enabled, make sure the results are the same
|
|
1234
|
-
|
|
1239
|
+
|
|
1235
1240
|
print('\n** Running MD on a folder (with checkpoints) (CLI) **\n')
|
|
1236
|
-
|
|
1241
|
+
|
|
1237
1242
|
checkpoint_string = ' --checkpoint_frequency 5'
|
|
1238
1243
|
cmd = base_cmd + checkpoint_string
|
|
1239
1244
|
inference_output_file_checkpoint = insert_before_extension(inference_output_file,'_checkpoint')
|
|
1240
1245
|
cmd = cmd.replace(inference_output_file,inference_output_file_checkpoint)
|
|
1241
1246
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1242
1247
|
cmd_results = execute_and_print(cmd)
|
|
1243
|
-
|
|
1244
|
-
assert output_files_are_identical(fn1=inference_output_file,
|
|
1248
|
+
|
|
1249
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1245
1250
|
fn2=inference_output_file_checkpoint,
|
|
1246
1251
|
verbose=True)
|
|
1247
|
-
|
|
1248
|
-
|
|
1252
|
+
|
|
1253
|
+
|
|
1249
1254
|
## Run again with the image queue enabled, make sure the results are the same
|
|
1250
|
-
|
|
1255
|
+
|
|
1251
1256
|
print('\n** Running MD on a folder (with image queue but no preprocessing) (CLI) **\n')
|
|
1252
|
-
|
|
1257
|
+
|
|
1253
1258
|
cmd = base_cmd + ' --use_image_queue'
|
|
1254
1259
|
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
1255
1260
|
cmd = cmd.replace(inference_output_file,inference_output_file_queue)
|
|
1256
1261
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1257
1262
|
cmd_results = execute_and_print(cmd)
|
|
1258
|
-
|
|
1259
|
-
assert output_files_are_identical(fn1=inference_output_file,
|
|
1263
|
+
|
|
1264
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1260
1265
|
fn2=inference_output_file_queue,
|
|
1261
1266
|
verbose=True)
|
|
1262
|
-
|
|
1263
|
-
|
|
1267
|
+
|
|
1268
|
+
|
|
1264
1269
|
print('\n** Running MD on a folder (with image queue and preprocessing) (CLI) **\n')
|
|
1265
|
-
|
|
1270
|
+
|
|
1266
1271
|
cmd = base_cmd + ' --use_image_queue --preprocess_on_image_queue'
|
|
1267
1272
|
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
1268
1273
|
cmd = cmd.replace(inference_output_file,inference_output_file_queue)
|
|
1269
1274
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1270
1275
|
cmd_results = execute_and_print(cmd)
|
|
1271
|
-
|
|
1272
|
-
assert output_files_are_identical(fn1=inference_output_file,
|
|
1276
|
+
|
|
1277
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1273
1278
|
fn2=inference_output_file_queue,
|
|
1274
1279
|
verbose=True)
|
|
1275
|
-
|
|
1280
|
+
|
|
1276
1281
|
## Run again on multiple cores, make sure the results are the same
|
|
1277
|
-
|
|
1282
|
+
|
|
1278
1283
|
if not options.skip_cpu_tests:
|
|
1279
|
-
|
|
1280
|
-
# First run again on the CPU on a single thread if necessary, so we get a file that
|
|
1281
|
-
# *should* be identical to the multicore version.
|
|
1284
|
+
|
|
1285
|
+
# First run again on the CPU on a single thread if necessary, so we get a file that
|
|
1286
|
+
# *should* be identical to the multicore version.
|
|
1282
1287
|
gpu_available = is_gpu_available(verbose=False)
|
|
1283
|
-
|
|
1288
|
+
|
|
1284
1289
|
cuda_visible_devices = None
|
|
1285
1290
|
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
|
1286
1291
|
cuda_visible_devices = os.environ['CUDA_VISIBLE_DEVICES']
|
|
1287
|
-
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1288
|
-
|
|
1292
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1293
|
+
|
|
1289
1294
|
# If we already ran on the CPU, no need to run again
|
|
1290
1295
|
if not gpu_available:
|
|
1291
|
-
|
|
1296
|
+
|
|
1292
1297
|
inference_output_file_cpu = inference_output_file
|
|
1293
|
-
|
|
1298
|
+
|
|
1294
1299
|
else:
|
|
1295
|
-
|
|
1300
|
+
|
|
1296
1301
|
print('\n** Running MD on a folder (single CPU) (CLI) **\n')
|
|
1297
|
-
|
|
1298
|
-
inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
|
|
1302
|
+
|
|
1303
|
+
inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
|
|
1299
1304
|
cmd = base_cmd
|
|
1300
1305
|
cmd = cmd.replace(inference_output_file,inference_output_file_cpu)
|
|
1301
1306
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1302
1307
|
cmd_results = execute_and_print(cmd)
|
|
1303
|
-
|
|
1308
|
+
|
|
1304
1309
|
print('\n** Running MD on a folder (multiple CPUs) (CLI) **\n')
|
|
1305
|
-
|
|
1310
|
+
|
|
1306
1311
|
cpu_string = ' --ncores {}'.format(options.n_cores_for_multiprocessing_tests)
|
|
1307
1312
|
cmd = base_cmd + cpu_string
|
|
1308
1313
|
inference_output_file_cpu_multicore = insert_before_extension(inference_output_file,'multicore')
|
|
1309
1314
|
cmd = cmd.replace(inference_output_file,inference_output_file_cpu_multicore)
|
|
1310
1315
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1311
1316
|
cmd_results = execute_and_print(cmd)
|
|
1312
|
-
|
|
1317
|
+
|
|
1313
1318
|
if cuda_visible_devices is not None:
|
|
1314
1319
|
print('Restoring CUDA_VISIBLE_DEVICES')
|
|
1315
1320
|
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
|
|
1316
1321
|
else:
|
|
1317
1322
|
del os.environ['CUDA_VISIBLE_DEVICES']
|
|
1318
|
-
|
|
1319
|
-
assert output_files_are_identical(fn1=inference_output_file_cpu,
|
|
1323
|
+
|
|
1324
|
+
assert output_files_are_identical(fn1=inference_output_file_cpu,
|
|
1320
1325
|
fn2=inference_output_file_cpu_multicore,
|
|
1321
1326
|
verbose=True)
|
|
1322
|
-
|
|
1327
|
+
|
|
1323
1328
|
# ...if we're not skipping the force-cpu tests
|
|
1324
|
-
|
|
1325
|
-
|
|
1329
|
+
|
|
1330
|
+
|
|
1326
1331
|
## Postprocessing
|
|
1327
|
-
|
|
1332
|
+
|
|
1328
1333
|
print('\n** Testing post-processing (CLI) **\n')
|
|
1329
|
-
|
|
1334
|
+
|
|
1330
1335
|
postprocessing_output_dir = os.path.join(options.scratch_dir,'postprocessing_output_cli')
|
|
1331
|
-
|
|
1336
|
+
|
|
1332
1337
|
if options.cli_working_dir is None:
|
|
1333
1338
|
cmd = 'python -m megadetector.postprocessing.postprocess_batch_results'
|
|
1334
1339
|
else:
|
|
@@ -1337,14 +1342,14 @@ def run_cli_tests(options):
|
|
|
1337
1342
|
inference_output_file,postprocessing_output_dir)
|
|
1338
1343
|
cmd += ' --image_base_dir "{}"'.format(image_folder)
|
|
1339
1344
|
cmd_results = execute_and_print(cmd)
|
|
1340
|
-
|
|
1341
|
-
|
|
1345
|
+
|
|
1346
|
+
|
|
1342
1347
|
## RDE
|
|
1343
|
-
|
|
1348
|
+
|
|
1344
1349
|
print('\n** Running RDE (CLI) **\n')
|
|
1345
|
-
|
|
1350
|
+
|
|
1346
1351
|
rde_output_dir = os.path.join(options.scratch_dir,'rde_output_cli')
|
|
1347
|
-
|
|
1352
|
+
|
|
1348
1353
|
if options.cli_working_dir is None:
|
|
1349
1354
|
cmd = 'python -m megadetector.postprocessing.repeat_detection_elimination.find_repeat_detections'
|
|
1350
1355
|
else:
|
|
@@ -1353,44 +1358,44 @@ def run_cli_tests(options):
|
|
|
1353
1358
|
cmd += ' --imageBase "{}"'.format(image_folder)
|
|
1354
1359
|
cmd += ' --outputBase "{}"'.format(rde_output_dir)
|
|
1355
1360
|
cmd += ' --occurrenceThreshold 1' # Use an absurd number here to make sure we get some suspicious detections
|
|
1356
|
-
cmd_results = execute_and_print(cmd)
|
|
1357
|
-
|
|
1361
|
+
cmd_results = execute_and_print(cmd)
|
|
1362
|
+
|
|
1358
1363
|
# Find the latest filtering folder
|
|
1359
1364
|
filtering_output_dir = os.listdir(rde_output_dir)
|
|
1360
1365
|
filtering_output_dir = [fn for fn in filtering_output_dir if fn.startswith('filtering_')]
|
|
1361
1366
|
filtering_output_dir = [os.path.join(rde_output_dir,fn) for fn in filtering_output_dir]
|
|
1362
1367
|
filtering_output_dir = [fn for fn in filtering_output_dir if os.path.isdir(fn)]
|
|
1363
1368
|
filtering_output_dir = sorted(filtering_output_dir)[-1]
|
|
1364
|
-
|
|
1369
|
+
|
|
1365
1370
|
print('Using RDE filtering folder {}'.format(filtering_output_dir))
|
|
1366
|
-
|
|
1371
|
+
|
|
1367
1372
|
filtered_output_file = inference_output_file.replace('.json','_filtered.json')
|
|
1368
|
-
|
|
1373
|
+
|
|
1369
1374
|
if options.cli_working_dir is None:
|
|
1370
1375
|
cmd = 'python -m megadetector.postprocessing.repeat_detection_elimination.remove_repeat_detections'
|
|
1371
1376
|
else:
|
|
1372
1377
|
cmd = 'python megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py'
|
|
1373
1378
|
cmd += ' "{}" "{}" "{}"'.format(inference_output_file,filtered_output_file,filtering_output_dir)
|
|
1374
1379
|
cmd_results = execute_and_print(cmd)
|
|
1375
|
-
|
|
1380
|
+
|
|
1376
1381
|
assert os.path.isfile(filtered_output_file), \
|
|
1377
1382
|
'Could not find RDE output file {}'.format(filtered_output_file)
|
|
1378
|
-
|
|
1379
|
-
|
|
1383
|
+
|
|
1384
|
+
|
|
1380
1385
|
## Run inference on a folder (tiled)
|
|
1381
|
-
|
|
1386
|
+
|
|
1382
1387
|
# This is a rather esoteric code path that I turn off when I'm testing some
|
|
1383
1388
|
# features that it doesn't include yet, particularly compatibility mode
|
|
1384
1389
|
# control.
|
|
1385
1390
|
skip_tiling_tests = True
|
|
1386
|
-
|
|
1391
|
+
|
|
1387
1392
|
if skip_tiling_tests:
|
|
1388
|
-
|
|
1393
|
+
|
|
1389
1394
|
print('### DEBUG: skipping tiling tests ###')
|
|
1390
|
-
|
|
1395
|
+
|
|
1391
1396
|
else:
|
|
1392
1397
|
print('\n** Running tiled inference (CLI) **\n')
|
|
1393
|
-
|
|
1398
|
+
|
|
1394
1399
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1395
1400
|
tiling_folder = os.path.join(options.scratch_dir,'tiling-folder')
|
|
1396
1401
|
inference_output_file_tiled = os.path.join(options.scratch_dir,'folder_inference_output_tiled.json')
|
|
@@ -1402,21 +1407,21 @@ def run_cli_tests(options):
|
|
|
1402
1407
|
options.default_model,image_folder,tiling_folder,inference_output_file_tiled)
|
|
1403
1408
|
cmd += ' --overwrite_handling overwrite'
|
|
1404
1409
|
cmd_results = execute_and_print(cmd)
|
|
1405
|
-
|
|
1410
|
+
|
|
1406
1411
|
with open(inference_output_file_tiled,'r') as f:
|
|
1407
1412
|
results_from_file = json.load(f) # noqa
|
|
1408
|
-
|
|
1409
|
-
|
|
1413
|
+
|
|
1414
|
+
|
|
1410
1415
|
## Run inference on a folder (augmented, w/YOLOv5 val script)
|
|
1411
|
-
|
|
1416
|
+
|
|
1412
1417
|
if options.yolo_working_dir is None:
|
|
1413
|
-
|
|
1418
|
+
|
|
1414
1419
|
print('Bypassing YOLOv5 val tests, no yolo folder supplied')
|
|
1415
|
-
|
|
1420
|
+
|
|
1416
1421
|
else:
|
|
1417
|
-
|
|
1422
|
+
|
|
1418
1423
|
print('\n** Running YOLOv5 val tests (CLI) **\n')
|
|
1419
|
-
|
|
1424
|
+
|
|
1420
1425
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1421
1426
|
yolo_results_folder = os.path.join(options.scratch_dir,'yolo-output-folder')
|
|
1422
1427
|
yolo_symlink_folder = os.path.join(options.scratch_dir,'yolo-symlink_folder')
|
|
@@ -1434,7 +1439,7 @@ def run_cli_tests(options):
|
|
|
1434
1439
|
# cmd += ' --no_use_symlinks'
|
|
1435
1440
|
cmd += ' --overwrite_handling overwrite'
|
|
1436
1441
|
cmd_results = execute_and_print(cmd)
|
|
1437
|
-
|
|
1442
|
+
|
|
1438
1443
|
# Run again with checkpointing, make sure the outputs are identical
|
|
1439
1444
|
cmd += ' --checkpoint_frequency 5'
|
|
1440
1445
|
inference_output_file_yolo_val_checkpoint = \
|
|
@@ -1442,32 +1447,32 @@ def run_cli_tests(options):
|
|
|
1442
1447
|
assert inference_output_file_yolo_val_checkpoint != inference_output_file_yolo_val
|
|
1443
1448
|
cmd = cmd.replace(inference_output_file_yolo_val,inference_output_file_yolo_val_checkpoint)
|
|
1444
1449
|
cmd_results = execute_and_print(cmd)
|
|
1445
|
-
|
|
1450
|
+
|
|
1446
1451
|
assert output_files_are_identical(fn1=inference_output_file_yolo_val,
|
|
1447
1452
|
fn2=inference_output_file_yolo_val_checkpoint,
|
|
1448
1453
|
verbose=True)
|
|
1449
|
-
|
|
1454
|
+
|
|
1450
1455
|
if not options.skip_video_tests:
|
|
1451
|
-
|
|
1456
|
+
|
|
1452
1457
|
## Video test
|
|
1453
|
-
|
|
1458
|
+
|
|
1454
1459
|
print('\n** Testing video rendering (CLI) **\n')
|
|
1455
|
-
|
|
1460
|
+
|
|
1456
1461
|
video_inference_output_file = os.path.join(options.scratch_dir,'video_inference_output.json')
|
|
1457
1462
|
output_video_file = os.path.join(options.scratch_dir,'video_scratch/cli_rendered_video.mp4')
|
|
1458
1463
|
frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder_cli')
|
|
1459
|
-
frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder_cli')
|
|
1460
|
-
|
|
1461
|
-
video_fn = os.path.join(options.scratch_dir,options.test_videos[-1])
|
|
1464
|
+
frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder_cli')
|
|
1465
|
+
|
|
1466
|
+
video_fn = os.path.join(options.scratch_dir,options.test_videos[-1])
|
|
1462
1467
|
assert os.path.isfile(video_fn), 'Could not find video file {}'.format(video_fn)
|
|
1463
|
-
|
|
1468
|
+
|
|
1464
1469
|
output_dir = os.path.join(options.scratch_dir,'single_video_test_cli')
|
|
1465
1470
|
if options.cli_working_dir is None:
|
|
1466
1471
|
cmd = 'python -m megadetector.detection.process_video'
|
|
1467
1472
|
else:
|
|
1468
1473
|
cmd = 'python megadetector/detection/process_video.py'
|
|
1469
1474
|
cmd += ' "{}" "{}"'.format(options.default_model,video_fn)
|
|
1470
|
-
cmd += ' --frame_folder "{}" --frame_rendering_folder "{}" --output_json_file "{}" --output_video_file "{}"'.format(
|
|
1475
|
+
cmd += ' --frame_folder "{}" --frame_rendering_folder "{}" --output_json_file "{}" --output_video_file "{}"'.format( #noqa
|
|
1471
1476
|
frame_folder,frame_rendering_folder,video_inference_output_file,output_video_file)
|
|
1472
1477
|
cmd += ' --fourcc {}'.format(options.video_fourcc)
|
|
1473
1478
|
cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion'
|
|
@@ -1475,19 +1480,19 @@ def run_cli_tests(options):
|
|
|
1475
1480
|
cmd += ' --frame_sample 4'
|
|
1476
1481
|
cmd += ' --verbose'
|
|
1477
1482
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1478
|
-
|
|
1483
|
+
|
|
1479
1484
|
if not options.skip_video_rendering_tests:
|
|
1480
1485
|
cmd += ' --render_output_video'
|
|
1481
|
-
|
|
1486
|
+
|
|
1482
1487
|
cmd_results = execute_and_print(cmd)
|
|
1483
1488
|
|
|
1484
1489
|
# ...if we're not skipping video tests
|
|
1485
|
-
|
|
1486
|
-
|
|
1490
|
+
|
|
1491
|
+
|
|
1487
1492
|
## Run inference on a folder (with MDV5B, so we can do a comparison)
|
|
1488
|
-
|
|
1493
|
+
|
|
1489
1494
|
print('\n** Running MDv5b (CLI) **\n')
|
|
1490
|
-
|
|
1495
|
+
|
|
1491
1496
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1492
1497
|
inference_output_file_alt = os.path.join(options.scratch_dir,'folder_inference_output_alt.json')
|
|
1493
1498
|
if options.cli_working_dir is None:
|
|
@@ -1500,13 +1505,13 @@ def run_cli_tests(options):
|
|
|
1500
1505
|
cmd += ' --include_image_timestamp --include_exif_data'
|
|
1501
1506
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1502
1507
|
cmd_results = execute_and_print(cmd)
|
|
1503
|
-
|
|
1508
|
+
|
|
1504
1509
|
with open(inference_output_file_alt,'r') as f:
|
|
1505
1510
|
results_from_file = json.load(f) # noqa
|
|
1506
|
-
|
|
1507
|
-
|
|
1511
|
+
|
|
1512
|
+
|
|
1508
1513
|
## Compare the two files
|
|
1509
|
-
|
|
1514
|
+
|
|
1510
1515
|
comparison_output_folder = os.path.join(options.scratch_dir,'results_comparison')
|
|
1511
1516
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1512
1517
|
results_files_string = '"{}" "{}"'.format(
|
|
@@ -1517,22 +1522,24 @@ def run_cli_tests(options):
|
|
|
1517
1522
|
cmd = 'python megadetector/postprocessing/compare_batch_results.py'
|
|
1518
1523
|
cmd += ' "{}" "{}" {}'.format(comparison_output_folder,image_folder,results_files_string)
|
|
1519
1524
|
cmd_results = execute_and_print(cmd)
|
|
1520
|
-
|
|
1525
|
+
|
|
1521
1526
|
assert cmd_results['status'] == 0, 'Error generating comparison HTML'
|
|
1522
1527
|
assert os.path.isfile(os.path.join(comparison_output_folder,'index.html')), \
|
|
1523
1528
|
'Failed to generate comparison HTML'
|
|
1524
|
-
|
|
1529
|
+
|
|
1525
1530
|
print('\n*** Finished CLI tests ***\n')
|
|
1526
|
-
|
|
1531
|
+
|
|
1527
1532
|
# ...def run_cli_tests(...)
|
|
1528
1533
|
|
|
1529
1534
|
|
|
1530
1535
|
def run_download_tests(options):
|
|
1531
1536
|
"""
|
|
1537
|
+
Test automatic model downloads.
|
|
1538
|
+
|
|
1532
1539
|
Args:
|
|
1533
|
-
options (MDTestOptions): see MDTestOptions for details
|
|
1540
|
+
options (MDTestOptions): see MDTestOptions for details
|
|
1534
1541
|
"""
|
|
1535
|
-
|
|
1542
|
+
|
|
1536
1543
|
if options.skip_download_tests or options.test_mode == 'utils-only':
|
|
1537
1544
|
return
|
|
1538
1545
|
|
|
@@ -1541,19 +1548,20 @@ def run_download_tests(options):
|
|
|
1541
1548
|
get_detector_version_from_model_file, \
|
|
1542
1549
|
model_string_to_model_version
|
|
1543
1550
|
|
|
1544
|
-
# Make sure we can download models based on canonical version numbers,
|
|
1551
|
+
# Make sure we can download models based on canonical version numbers,
|
|
1545
1552
|
# e.g. "v5a.0.0"
|
|
1546
1553
|
for model_name in known_models:
|
|
1547
1554
|
url = known_models[model_name]['url']
|
|
1548
|
-
if 'localhost' in url:
|
|
1555
|
+
if ('localhost' in url) and options.skip_localhost_downloads:
|
|
1549
1556
|
continue
|
|
1550
1557
|
print('Testing download for known model {}'.format(model_name))
|
|
1551
|
-
fn = try_download_known_detector(model_name,
|
|
1558
|
+
fn = try_download_known_detector(model_name,
|
|
1552
1559
|
force_download=False,
|
|
1553
1560
|
verbose=False)
|
|
1554
1561
|
version_string = get_detector_version_from_model_file(fn, verbose=False)
|
|
1555
|
-
|
|
1556
|
-
|
|
1562
|
+
# Make sure this is the same version we asked for, modulo the MDv5 re-releases
|
|
1563
|
+
assert (version_string.replace('.0.1','.0.0') == model_name.replace('.0.1','.0.0'))
|
|
1564
|
+
|
|
1557
1565
|
# Make sure we can download models based on short names, e.g. "MDV5A"
|
|
1558
1566
|
for model_name in model_string_to_model_version:
|
|
1559
1567
|
model_version = model_string_to_model_version[model_name]
|
|
@@ -1562,11 +1570,58 @@ def run_download_tests(options):
|
|
|
1562
1570
|
if 'localhost' in url:
|
|
1563
1571
|
continue
|
|
1564
1572
|
print('Testing download for model short name {}'.format(model_name))
|
|
1565
|
-
fn = try_download_known_detector(model_name,
|
|
1573
|
+
fn = try_download_known_detector(model_name,
|
|
1566
1574
|
force_download=False,
|
|
1567
|
-
verbose=False)
|
|
1568
|
-
assert fn != model_name
|
|
1569
|
-
|
|
1575
|
+
verbose=False)
|
|
1576
|
+
assert fn != model_name
|
|
1577
|
+
|
|
1578
|
+
# Test corruption handling for .pt files
|
|
1579
|
+
print('Testing corruption handling for MDV5B')
|
|
1580
|
+
|
|
1581
|
+
# First ensure MDV5B is downloaded
|
|
1582
|
+
mdv5b_file = try_download_known_detector('MDV5B',
|
|
1583
|
+
force_download=False,
|
|
1584
|
+
verbose=False)
|
|
1585
|
+
assert mdv5b_file is not None
|
|
1586
|
+
assert os.path.exists(mdv5b_file)
|
|
1587
|
+
assert mdv5b_file.endswith('.pt')
|
|
1588
|
+
|
|
1589
|
+
# Get the original file size and MD5 hash for comparison
|
|
1590
|
+
original_size = os.path.getsize(mdv5b_file)
|
|
1591
|
+
from megadetector.utils.path_utils import compute_file_hash
|
|
1592
|
+
original_hash = compute_file_hash(mdv5b_file, algorithm='md5')
|
|
1593
|
+
|
|
1594
|
+
# Deliberately corrupt the file by overwriting the first few bytes
|
|
1595
|
+
print('Corrupting model file: {}'.format(mdv5b_file))
|
|
1596
|
+
with open(mdv5b_file, 'r+b') as f:
|
|
1597
|
+
f.write(b'CORRUPTED_FILE_DATA_XXXXXX')
|
|
1598
|
+
|
|
1599
|
+
# Verify the file is now corrupted (different hash)
|
|
1600
|
+
corrupted_hash = compute_file_hash(mdv5b_file, algorithm='md5')
|
|
1601
|
+
assert corrupted_hash != original_hash, 'File corruption verification failed'
|
|
1602
|
+
|
|
1603
|
+
# Try to download again; this should detect corruption and re-download
|
|
1604
|
+
print('Testing corruption detection and re-download')
|
|
1605
|
+
mdv5b_file_redownloaded = try_download_known_detector('MDV5B',
|
|
1606
|
+
force_download=False,
|
|
1607
|
+
verbose=True)
|
|
1608
|
+
|
|
1609
|
+
# Verify that the file was re-downloaded and is now valid
|
|
1610
|
+
assert mdv5b_file_redownloaded is not None
|
|
1611
|
+
assert os.path.exists(mdv5b_file_redownloaded)
|
|
1612
|
+
assert mdv5b_file_redownloaded == mdv5b_file
|
|
1613
|
+
|
|
1614
|
+
# Verify that the file is back to its original state
|
|
1615
|
+
new_size = os.path.getsize(mdv5b_file_redownloaded)
|
|
1616
|
+
new_hash = compute_file_hash(mdv5b_file_redownloaded, algorithm='md5')
|
|
1617
|
+
|
|
1618
|
+
assert new_size == original_size, \
|
|
1619
|
+
'Re-downloaded file size ({}) does not match original ({})'.format(new_size, original_size)
|
|
1620
|
+
assert new_hash == original_hash, \
|
|
1621
|
+
'Re-downloaded file hash ({}) does not match original ({})'.format(new_hash, original_hash)
|
|
1622
|
+
|
|
1623
|
+
print('Corruption handling test passed')
|
|
1624
|
+
|
|
1570
1625
|
# ...def run_download_tests()
|
|
1571
1626
|
|
|
1572
1627
|
|
|
@@ -1575,74 +1630,105 @@ def run_download_tests(options):
|
|
|
1575
1630
|
def run_tests(options):
|
|
1576
1631
|
"""
|
|
1577
1632
|
Runs Python-based and/or CLI-based package tests.
|
|
1578
|
-
|
|
1633
|
+
|
|
1579
1634
|
Args:
|
|
1580
1635
|
options (MDTestOptions): see MDTestOptions for details
|
|
1581
1636
|
"""
|
|
1582
|
-
|
|
1637
|
+
|
|
1583
1638
|
# Prepare data folder
|
|
1584
1639
|
download_test_data(options)
|
|
1585
|
-
|
|
1640
|
+
|
|
1586
1641
|
# Run model download tests if necessary
|
|
1587
1642
|
run_download_tests(options)
|
|
1588
|
-
|
|
1643
|
+
|
|
1589
1644
|
if options.disable_gpu:
|
|
1590
1645
|
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1591
|
-
|
|
1646
|
+
|
|
1592
1647
|
# Verify GPU
|
|
1593
1648
|
gpu_available = is_gpu_available()
|
|
1594
|
-
|
|
1649
|
+
|
|
1595
1650
|
# If the GPU is required and isn't available, error
|
|
1596
1651
|
if options.cpu_execution_is_error and (not gpu_available):
|
|
1597
1652
|
raise ValueError('GPU not available, and cpu_execution_is_error is set')
|
|
1598
|
-
|
|
1653
|
+
|
|
1599
1654
|
# If the GPU should be disabled, verify that it is
|
|
1600
1655
|
if options.disable_gpu:
|
|
1601
1656
|
assert (not gpu_available), 'CPU execution specified, but the GPU appears to be available'
|
|
1602
|
-
|
|
1657
|
+
|
|
1603
1658
|
# Run python tests
|
|
1604
1659
|
if not options.skip_python_tests:
|
|
1605
|
-
|
|
1660
|
+
|
|
1606
1661
|
if options.model_folder is not None:
|
|
1607
|
-
|
|
1662
|
+
|
|
1608
1663
|
assert os.path.isdir(options.model_folder), \
|
|
1609
1664
|
'Could not find model folder {}'.format(options.model_folder)
|
|
1610
|
-
|
|
1665
|
+
|
|
1611
1666
|
model_files = os.listdir(options.model_folder)
|
|
1612
1667
|
model_files = [fn for fn in model_files if fn.endswith('.pt')]
|
|
1613
1668
|
model_files = [os.path.join(options.model_folder,fn) for fn in model_files]
|
|
1614
|
-
|
|
1669
|
+
|
|
1615
1670
|
assert len(model_files) > 0, \
|
|
1616
1671
|
'Could not find any models in folder {}'.format(options.model_folder)
|
|
1617
|
-
|
|
1672
|
+
|
|
1618
1673
|
original_default_model = options.default_model
|
|
1619
|
-
|
|
1674
|
+
|
|
1620
1675
|
for model_file in model_files:
|
|
1621
1676
|
print('Running Python tests for model {}'.format(model_file))
|
|
1622
|
-
options.default_model = model_file
|
|
1677
|
+
options.default_model = model_file
|
|
1623
1678
|
run_python_tests(options)
|
|
1624
|
-
|
|
1679
|
+
|
|
1625
1680
|
options.default_model = original_default_model
|
|
1626
|
-
|
|
1681
|
+
|
|
1627
1682
|
else:
|
|
1628
|
-
|
|
1683
|
+
|
|
1629
1684
|
run_python_tests(options)
|
|
1630
|
-
|
|
1685
|
+
|
|
1631
1686
|
# Run CLI tests
|
|
1632
1687
|
if not options.skip_cli_tests:
|
|
1633
1688
|
run_cli_tests(options)
|
|
1634
1689
|
|
|
1635
1690
|
|
|
1691
|
+
#%% Automated test entry point
|
|
1692
|
+
|
|
1693
|
+
def test_suite_entry_point():
|
|
1694
|
+
"""
|
|
1695
|
+
Main entry point for the numerical test suite.
|
|
1696
|
+
"""
|
|
1697
|
+
|
|
1698
|
+
options = MDTestOptions()
|
|
1699
|
+
options.disable_gpu = False
|
|
1700
|
+
options.cpu_execution_is_error = False
|
|
1701
|
+
options.skip_video_tests = True
|
|
1702
|
+
options.skip_python_tests = False
|
|
1703
|
+
options.skip_cli_tests = True
|
|
1704
|
+
options.scratch_dir = None
|
|
1705
|
+
options.test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
1706
|
+
options.force_data_download = False
|
|
1707
|
+
options.force_data_unzip = False
|
|
1708
|
+
options.warning_mode = False
|
|
1709
|
+
options.max_coord_error = 0.01 # 0.001
|
|
1710
|
+
options.max_conf_error = 0.01 # 0.005
|
|
1711
|
+
options.skip_video_rendering_tests = True
|
|
1712
|
+
options.cli_working_dir = None
|
|
1713
|
+
options.cli_test_pythonpath = None
|
|
1714
|
+
options.skip_download_tests = True
|
|
1715
|
+
options.skip_localhost_downloads = True
|
|
1716
|
+
|
|
1717
|
+
options = download_test_data(options)
|
|
1718
|
+
|
|
1719
|
+
run_tests(options)
|
|
1720
|
+
|
|
1721
|
+
|
|
1636
1722
|
#%% Interactive driver
|
|
1637
1723
|
|
|
1638
1724
|
if False:
|
|
1639
|
-
|
|
1725
|
+
|
|
1640
1726
|
pass
|
|
1641
1727
|
|
|
1642
|
-
#%%
|
|
1643
|
-
|
|
1728
|
+
#%% Test Prep
|
|
1729
|
+
|
|
1644
1730
|
options = MDTestOptions()
|
|
1645
|
-
|
|
1731
|
+
|
|
1646
1732
|
options.disable_gpu = False
|
|
1647
1733
|
options.cpu_execution_is_error = False
|
|
1648
1734
|
options.skip_video_tests = False
|
|
@@ -1656,150 +1742,165 @@ if False:
|
|
|
1656
1742
|
options.max_coord_error = 0.01 # 0.001
|
|
1657
1743
|
options.max_conf_error = 0.01 # 0.005
|
|
1658
1744
|
options.skip_video_rendering_tests = True
|
|
1745
|
+
options.skip_download_tests = False
|
|
1746
|
+
options.skip_localhost_downloads = False
|
|
1747
|
+
|
|
1659
1748
|
# options.iou_threshold_for_file_comparison = 0.7
|
|
1660
|
-
|
|
1749
|
+
|
|
1661
1750
|
options.cli_working_dir = r'c:\git\MegaDetector'
|
|
1662
|
-
# When running in the cameratraps-detector environment
|
|
1751
|
+
# When running in the cameratraps-detector environment
|
|
1663
1752
|
# options.cli_test_pythonpath = r'c:\git\MegaDetector;c:\git\yolov5-md'
|
|
1664
|
-
|
|
1753
|
+
|
|
1665
1754
|
# When running in the MegaDetector environment
|
|
1666
1755
|
options.cli_test_pythonpath = r'c:\git\MegaDetector'
|
|
1667
|
-
|
|
1756
|
+
|
|
1668
1757
|
# options.cli_working_dir = os.path.expanduser('~')
|
|
1669
1758
|
# options.yolo_working_dir = r'c:\git\yolov5-md'
|
|
1670
1759
|
# options.yolo_working_dir = '/mnt/c/git/yolov5-md'
|
|
1671
1760
|
options = download_test_data(options)
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
|
|
1761
|
+
|
|
1762
|
+
|
|
1763
|
+
#%% Environment prep
|
|
1764
|
+
|
|
1765
|
+
# Add the YOLO working dir to the PYTHONPATH if necessary
|
|
1675
1766
|
import os
|
|
1676
|
-
if (
|
|
1677
|
-
|
|
1678
|
-
|
|
1767
|
+
if (options.yolo_working_dir is not None) and \
|
|
1768
|
+
(('PYTHONPATH' not in os.environ) or (options.yolo_working_dir not in os.environ['PYTHONPATH'])):
|
|
1769
|
+
if ('PYTHONPATH' not in os.environ):
|
|
1770
|
+
os.environ['PYTHONPATH'] = options.yolo_working_dir
|
|
1771
|
+
else:
|
|
1772
|
+
os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ';' + options.yolo_working_dir
|
|
1773
|
+
|
|
1774
|
+
|
|
1775
|
+
#%% Run download tests
|
|
1776
|
+
|
|
1777
|
+
run_download_tests(options=options)
|
|
1778
|
+
|
|
1779
|
+
|
|
1780
|
+
#%% Run all tests
|
|
1679
1781
|
|
|
1680
|
-
#%%
|
|
1681
|
-
|
|
1682
1782
|
run_tests(options)
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
'
|
|
1689
|
-
'
|
|
1690
|
-
'
|
|
1691
|
-
'
|
|
1692
|
-
'
|
|
1693
|
-
'
|
|
1694
|
-
'
|
|
1695
|
-
'
|
|
1696
|
-
'
|
|
1697
|
-
'
|
|
1698
|
-
'
|
|
1699
|
-
'
|
|
1700
|
-
'
|
|
1701
|
-
'
|
|
1702
|
-
'
|
|
1703
|
-
'
|
|
1704
|
-
'
|
|
1705
|
-
'
|
|
1706
|
-
'
|
|
1707
|
-
'
|
|
1708
|
-
'
|
|
1709
|
-
'
|
|
1783
|
+
|
|
1784
|
+
|
|
1785
|
+
#%% Run YOLO inference tests
|
|
1786
|
+
|
|
1787
|
+
yolo_inference_options_dict = {'input_folder': '/tmp/md-tests/md-test-images',
|
|
1788
|
+
'image_filename_list': None,
|
|
1789
|
+
'model_filename': 'MDV5A',
|
|
1790
|
+
'output_file': '/tmp/md-tests/folder_inference_output_yolo_val.json',
|
|
1791
|
+
'yolo_working_folder': '/mnt/c/git/yolov5-md',
|
|
1792
|
+
'model_type': 'yolov5',
|
|
1793
|
+
'image_size': None,
|
|
1794
|
+
'conf_thres': 0.005,
|
|
1795
|
+
'batch_size': 1,
|
|
1796
|
+
'device_string': '0',
|
|
1797
|
+
'augment': False,
|
|
1798
|
+
'half_precision_enabled': None,
|
|
1799
|
+
'symlink_folder': None,
|
|
1800
|
+
'use_symlinks': True,
|
|
1801
|
+
'unique_id_strategy': 'links',
|
|
1802
|
+
'yolo_results_folder': None,
|
|
1803
|
+
'remove_symlink_folder': True,
|
|
1804
|
+
'remove_yolo_results_folder': True,
|
|
1805
|
+
'yolo_category_id_to_name': {0: 'animal', 1: 'person', 2: 'vehicle'},
|
|
1806
|
+
'overwrite_handling': 'overwrite',
|
|
1807
|
+
'preview_yolo_command_only': False,
|
|
1808
|
+
'treat_copy_failures_as_warnings': False,
|
|
1809
|
+
'save_yolo_debug_output': False,
|
|
1810
|
+
'recursive': True,
|
|
1710
1811
|
'checkpoint_frequency': None}
|
|
1711
|
-
|
|
1712
|
-
from megadetector.utils.ct_utils import dict_to_object
|
|
1812
|
+
|
|
1813
|
+
from megadetector.utils.ct_utils import dict_to_object
|
|
1713
1814
|
from megadetector.detection.run_inference_with_yolov5_val import \
|
|
1714
1815
|
YoloInferenceOptions, run_inference_with_yolo_val
|
|
1715
|
-
|
|
1816
|
+
|
|
1716
1817
|
yolo_inference_options = YoloInferenceOptions()
|
|
1717
1818
|
yolo_inference_options = dict_to_object(yolo_inference_options_dict, yolo_inference_options)
|
|
1718
|
-
|
|
1819
|
+
|
|
1719
1820
|
os.makedirs(options.scratch_dir,exist_ok=True)
|
|
1720
|
-
|
|
1821
|
+
|
|
1721
1822
|
inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
|
|
1722
|
-
|
|
1823
|
+
|
|
1723
1824
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
1724
|
-
|
|
1725
|
-
|
|
1825
|
+
|
|
1826
|
+
|
|
1726
1827
|
#%% Command-line driver
|
|
1727
1828
|
|
|
1728
|
-
def main():
|
|
1829
|
+
def main(): # noqa
|
|
1729
1830
|
|
|
1730
1831
|
options = MDTestOptions()
|
|
1731
|
-
|
|
1832
|
+
|
|
1732
1833
|
parser = argparse.ArgumentParser(
|
|
1733
1834
|
description='MegaDetector test suite')
|
|
1734
|
-
|
|
1835
|
+
|
|
1735
1836
|
parser.add_argument(
|
|
1736
1837
|
'--disable_gpu',
|
|
1737
1838
|
action='store_true',
|
|
1738
1839
|
help='Disable GPU operation')
|
|
1739
|
-
|
|
1840
|
+
|
|
1740
1841
|
parser.add_argument(
|
|
1741
1842
|
'--cpu_execution_is_error',
|
|
1742
1843
|
action='store_true',
|
|
1743
1844
|
help='Fail if the GPU appears not to be available')
|
|
1744
|
-
|
|
1845
|
+
|
|
1745
1846
|
parser.add_argument(
|
|
1746
1847
|
'--scratch_dir',
|
|
1747
1848
|
default=None,
|
|
1748
1849
|
type=str,
|
|
1749
1850
|
help='Directory for temporary storage (defaults to system temp dir)')
|
|
1750
|
-
|
|
1851
|
+
|
|
1751
1852
|
parser.add_argument(
|
|
1752
1853
|
'--skip_video_tests',
|
|
1753
1854
|
action='store_true',
|
|
1754
1855
|
help='Skip tests related to video (which can be slow)')
|
|
1755
|
-
|
|
1856
|
+
|
|
1756
1857
|
parser.add_argument(
|
|
1757
1858
|
'--skip_video_rendering_tests',
|
|
1758
1859
|
action='store_true',
|
|
1759
1860
|
help='Skip tests related to *rendering* video')
|
|
1760
|
-
|
|
1861
|
+
|
|
1761
1862
|
parser.add_argument(
|
|
1762
1863
|
'--skip_python_tests',
|
|
1763
1864
|
action='store_true',
|
|
1764
1865
|
help='Skip python tests')
|
|
1765
|
-
|
|
1866
|
+
|
|
1766
1867
|
parser.add_argument(
|
|
1767
1868
|
'--skip_cli_tests',
|
|
1768
1869
|
action='store_true',
|
|
1769
1870
|
help='Skip CLI tests')
|
|
1770
|
-
|
|
1871
|
+
|
|
1771
1872
|
parser.add_argument(
|
|
1772
1873
|
'--skip_download_tests',
|
|
1773
1874
|
action='store_true',
|
|
1774
1875
|
help='Skip model download tests')
|
|
1775
|
-
|
|
1876
|
+
|
|
1776
1877
|
parser.add_argument(
|
|
1777
1878
|
'--skip_cpu_tests',
|
|
1778
1879
|
action='store_true',
|
|
1779
1880
|
help='Skip force-CPU tests')
|
|
1780
|
-
|
|
1881
|
+
|
|
1781
1882
|
parser.add_argument(
|
|
1782
1883
|
'--force_data_download',
|
|
1783
1884
|
action='store_true',
|
|
1784
1885
|
help='Force download of the test data file, even if it\'s already available')
|
|
1785
|
-
|
|
1886
|
+
|
|
1786
1887
|
parser.add_argument(
|
|
1787
1888
|
'--force_data_unzip',
|
|
1788
1889
|
action='store_true',
|
|
1789
1890
|
help='Force extraction of all files in the test data file, even if they\'re already available')
|
|
1790
|
-
|
|
1891
|
+
|
|
1791
1892
|
parser.add_argument(
|
|
1792
1893
|
'--warning_mode',
|
|
1793
1894
|
action='store_true',
|
|
1794
1895
|
help='Turns numeric/content errors into warnings')
|
|
1795
|
-
|
|
1896
|
+
|
|
1796
1897
|
parser.add_argument(
|
|
1797
1898
|
'--max_conf_error',
|
|
1798
1899
|
type=float,
|
|
1799
1900
|
default=options.max_conf_error,
|
|
1800
1901
|
help='Maximum tolerable confidence value deviation from expected (default {})'.format(
|
|
1801
1902
|
options.max_conf_error))
|
|
1802
|
-
|
|
1903
|
+
|
|
1803
1904
|
parser.add_argument(
|
|
1804
1905
|
'--max_coord_error',
|
|
1805
1906
|
type=float,
|
|
@@ -1812,7 +1913,7 @@ def main():
|
|
|
1812
1913
|
type=str,
|
|
1813
1914
|
default=None,
|
|
1814
1915
|
help='Working directory for CLI tests')
|
|
1815
|
-
|
|
1916
|
+
|
|
1816
1917
|
parser.add_argument(
|
|
1817
1918
|
'--yolo_working_dir',
|
|
1818
1919
|
type=str,
|
|
@@ -1825,55 +1926,57 @@ def main():
|
|
|
1825
1926
|
default=None,
|
|
1826
1927
|
help='PYTHONPATH to set for CLI tests; if None, inherits from the parent process'
|
|
1827
1928
|
)
|
|
1828
|
-
|
|
1929
|
+
|
|
1829
1930
|
parser.add_argument(
|
|
1830
1931
|
'--test_mode',
|
|
1831
1932
|
type=str,
|
|
1832
1933
|
default='all',
|
|
1833
1934
|
help='Test mode: "all" or "utils-only"'
|
|
1834
1935
|
)
|
|
1835
|
-
|
|
1936
|
+
|
|
1836
1937
|
parser.add_argument(
|
|
1837
1938
|
'--python_test_depth',
|
|
1838
1939
|
type=int,
|
|
1839
1940
|
default=options.python_test_depth,
|
|
1840
1941
|
help='Used as a knob to control the level of Python tests (0-100)'
|
|
1841
1942
|
)
|
|
1842
|
-
|
|
1943
|
+
|
|
1843
1944
|
parser.add_argument(
|
|
1844
1945
|
'--model_folder',
|
|
1845
1946
|
type=str,
|
|
1846
1947
|
default=None,
|
|
1847
1948
|
help='Run Python tests on every model in this folder'
|
|
1848
1949
|
)
|
|
1849
|
-
|
|
1950
|
+
|
|
1850
1951
|
parser.add_argument(
|
|
1851
1952
|
'--detector_options',
|
|
1852
1953
|
nargs='*',
|
|
1853
1954
|
metavar='KEY=VALUE',
|
|
1854
1955
|
default='',
|
|
1855
1956
|
help='Detector-specific options, as a space-separated list of key-value pairs')
|
|
1856
|
-
|
|
1957
|
+
|
|
1857
1958
|
parser.add_argument(
|
|
1858
1959
|
'--default_model',
|
|
1859
1960
|
type=str,
|
|
1860
1961
|
default=options.default_model,
|
|
1861
1962
|
help='Default model file or well-known model name (used for most tests)')
|
|
1862
|
-
|
|
1963
|
+
|
|
1863
1964
|
# The following token is used for linting, do not remove.
|
|
1864
1965
|
#
|
|
1865
1966
|
# no_arguments_required
|
|
1866
|
-
|
|
1967
|
+
|
|
1867
1968
|
args = parser.parse_args()
|
|
1868
|
-
|
|
1869
|
-
initial_detector_options = options.detector_options
|
|
1969
|
+
|
|
1970
|
+
initial_detector_options = options.detector_options
|
|
1870
1971
|
_args_to_object(args,options)
|
|
1871
|
-
from megadetector.utils.ct_utils import parse_kvp_list
|
|
1972
|
+
from megadetector.utils.ct_utils import parse_kvp_list
|
|
1872
1973
|
options.detector_options = parse_kvp_list(args.detector_options,d=initial_detector_options)
|
|
1873
|
-
|
|
1974
|
+
|
|
1874
1975
|
run_tests(options)
|
|
1875
|
-
|
|
1876
|
-
|
|
1976
|
+
|
|
1977
|
+
# ...def main()
|
|
1978
|
+
|
|
1979
|
+
if __name__ == '__main__':
|
|
1877
1980
|
main()
|
|
1878
1981
|
|
|
1879
1982
|
|
|
@@ -1884,27 +1987,27 @@ if False:
|
|
|
1884
1987
|
pass
|
|
1885
1988
|
|
|
1886
1989
|
#%%
|
|
1887
|
-
|
|
1990
|
+
|
|
1888
1991
|
import sys; sys.path.append(r'c:\git\yolov5-md')
|
|
1889
|
-
|
|
1992
|
+
|
|
1890
1993
|
#%%
|
|
1891
|
-
|
|
1994
|
+
|
|
1892
1995
|
fn1 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt1.10.1.frames.json"
|
|
1893
1996
|
fn2 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt1.10.1.frames.json"
|
|
1894
1997
|
fn3 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt2.x.frames.json"
|
|
1895
1998
|
fn4 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt2.x.frames.json"
|
|
1896
|
-
|
|
1999
|
+
|
|
1897
2000
|
assert all([os.path.isfile(fn) for fn in [fn1,fn2,fn3,fn4]])
|
|
1898
2001
|
print(output_files_are_identical(fn1,fn1,verbose=False))
|
|
1899
2002
|
print(output_files_are_identical(fn1,fn2,verbose=False))
|
|
1900
2003
|
print(output_files_are_identical(fn1,fn3,verbose=False))
|
|
1901
|
-
|
|
2004
|
+
|
|
1902
2005
|
#%%
|
|
1903
|
-
|
|
2006
|
+
|
|
1904
2007
|
fn1 = r"G:\temp\md-test-package\mdv5a-image-gpu-pt1.10.1.json"
|
|
1905
2008
|
fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-gpu-pt1.10.1.json"
|
|
1906
2009
|
print(output_files_are_identical(fn1,fn2,verbose=True))
|
|
1907
|
-
|
|
2010
|
+
|
|
1908
2011
|
fn1 = r"G:\temp\md-test-package\mdv5a-image-cpu-pt1.10.1.json"
|
|
1909
2012
|
fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-cpu-pt1.10.1.json"
|
|
1910
2013
|
print(output_files_are_identical(fn1,fn2,verbose=True))
|