megadetector 5.0.28__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +231 -224
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +304 -262
- megadetector/detection/run_detector.py +177 -164
- megadetector/detection/run_detector_batch.py +364 -363
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +256 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +290 -282
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +415 -415
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +219 -146
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +23 -20
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +313 -298
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -66
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1018 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1457 -398
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +61 -61
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +401 -397
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +79 -73
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/METADATA +135 -132
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.28.dist-info/RECORD +0 -209
megadetector/utils/md_tests.py
CHANGED
|
@@ -6,7 +6,7 @@ A series of tests to validate basic repo functionality and verify either "correc
|
|
|
6
6
|
inference behavior, or - when operating in environments other than the training
|
|
7
7
|
environment - acceptable deviation from the correct results.
|
|
8
8
|
|
|
9
|
-
This module should not depend on anything else in this repo outside of the
|
|
9
|
+
This module should not depend on anything else in this repo outside of the
|
|
10
10
|
tests themselves, even if it means some duplicated code (e.g. for downloading files),
|
|
11
11
|
since much of what it tries to test is, e.g., imports.
|
|
12
12
|
|
|
@@ -28,6 +28,7 @@ import zipfile
|
|
|
28
28
|
import subprocess
|
|
29
29
|
import argparse
|
|
30
30
|
import inspect
|
|
31
|
+
import pytest
|
|
31
32
|
|
|
32
33
|
from copy import copy
|
|
33
34
|
|
|
@@ -38,110 +39,110 @@ class MDTestOptions:
|
|
|
38
39
|
"""
|
|
39
40
|
Options controlling test behavior
|
|
40
41
|
"""
|
|
41
|
-
|
|
42
|
+
|
|
42
43
|
def __init__(self):
|
|
43
|
-
|
|
44
|
+
|
|
44
45
|
## Required ##
|
|
45
|
-
|
|
46
|
+
|
|
46
47
|
#: Force CPU execution
|
|
47
48
|
self.disable_gpu = False
|
|
48
|
-
|
|
49
|
+
|
|
49
50
|
#: If GPU execution is requested, but a GPU is not available, should we error?
|
|
50
51
|
self.cpu_execution_is_error = False
|
|
51
|
-
|
|
52
|
+
|
|
52
53
|
#: Skip tests related to video processing
|
|
53
54
|
self.skip_video_tests = False
|
|
54
|
-
|
|
55
|
+
|
|
55
56
|
#: Skip tests related to video rendering
|
|
56
57
|
self.skip_video_rendering_tests = False
|
|
57
|
-
|
|
58
|
+
|
|
58
59
|
#: Skip tests launched via Python functions (as opposed to CLIs)
|
|
59
60
|
self.skip_python_tests = False
|
|
60
|
-
|
|
61
|
+
|
|
61
62
|
#: Skip CLI tests
|
|
62
63
|
self.skip_cli_tests = False
|
|
63
|
-
|
|
64
|
+
|
|
64
65
|
#: Skip download tests
|
|
65
66
|
self.skip_download_tests = False
|
|
66
|
-
|
|
67
|
+
|
|
67
68
|
#: Skip force-CPU tests
|
|
68
69
|
self.skip_cpu_tests = False
|
|
69
|
-
|
|
70
|
+
|
|
70
71
|
#: Force a specific folder for temporary input/output
|
|
71
72
|
self.scratch_dir = None
|
|
72
|
-
|
|
73
|
+
|
|
73
74
|
#: Where does the test data live?
|
|
74
75
|
self.test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
75
|
-
|
|
76
|
+
|
|
76
77
|
#: Download test data even if it appears to have already been downloaded
|
|
77
78
|
self.force_data_download = False
|
|
78
|
-
|
|
79
|
+
|
|
79
80
|
#: Unzip test data even if it appears to have already been unzipped
|
|
80
81
|
self.force_data_unzip = False
|
|
81
|
-
|
|
82
|
+
|
|
82
83
|
#: By default, any unexpected behavior is an error; this forces most errors to
|
|
83
84
|
#: be treated as warnings.
|
|
84
85
|
self.warning_mode = False
|
|
85
|
-
|
|
86
|
+
|
|
86
87
|
#: How much deviation from the expected detection coordinates should we allow before
|
|
87
88
|
#: a disrepancy becomes an error?
|
|
88
89
|
self.max_coord_error = 0.001
|
|
89
|
-
|
|
90
|
+
|
|
90
91
|
#: How much deviation from the expected confidence values should we allow before
|
|
91
92
|
#: a disrepancy becomes an error?
|
|
92
93
|
self.max_conf_error = 0.005
|
|
93
|
-
|
|
94
|
+
|
|
94
95
|
#: Current working directory when running CLI tests
|
|
95
96
|
#:
|
|
96
97
|
#: If this is None, we won't mess with the inherited working directory.
|
|
97
98
|
self.cli_working_dir = None
|
|
98
|
-
|
|
99
|
-
#: YOLOv5 installation, only relevant if we're testing run_inference_with_yolov5_val.
|
|
99
|
+
|
|
100
|
+
#: YOLOv5 installation, only relevant if we're testing run_inference_with_yolov5_val.
|
|
100
101
|
#:
|
|
101
102
|
#: If this is None, we'll skip that test.
|
|
102
103
|
self.yolo_working_dir = None
|
|
103
|
-
|
|
104
|
+
|
|
104
105
|
#: fourcc code to use for video tests that involve rendering video
|
|
105
|
-
self.video_fourcc = 'mp4v'
|
|
106
|
-
|
|
106
|
+
self.video_fourcc = 'mp4v'
|
|
107
|
+
|
|
107
108
|
#: Default model to use for testing (filename, URL, or well-known model string)
|
|
108
109
|
self.default_model = 'MDV5A'
|
|
109
110
|
|
|
110
111
|
#: For comparison tests, use a model that produces slightly different output
|
|
111
112
|
self.alt_model = 'MDV5B'
|
|
112
|
-
|
|
113
|
+
|
|
113
114
|
#: PYTHONPATH to set for CLI tests; if None, inherits from the parent process. Only
|
|
114
115
|
#: impacts the called functions, not the parent process.
|
|
115
116
|
self.cli_test_pythonpath = None
|
|
116
|
-
|
|
117
|
+
|
|
117
118
|
#: IoU threshold used to determine whether boxes in two detection files likely correspond
|
|
118
119
|
#: to the same box.
|
|
119
120
|
self.iou_threshold_for_file_comparison = 0.85
|
|
120
|
-
|
|
121
|
+
|
|
121
122
|
#: Detector options passed to PTDetector
|
|
122
|
-
self.detector_options = {'compatibility_mode':'classic-test'}
|
|
123
|
-
|
|
124
|
-
#: Used to drive a series of tests (typically with a low value for
|
|
123
|
+
self.detector_options = {'compatibility_mode':'classic-test'}
|
|
124
|
+
|
|
125
|
+
#: Used to drive a series of tests (typically with a low value for
|
|
125
126
|
#: python_test_depth) over a folder of models.
|
|
126
127
|
self.model_folder = None
|
|
127
|
-
|
|
128
|
+
|
|
128
129
|
#: Used as a knob to control the level of Python tests, typically used when
|
|
129
|
-
#: we want to run a series of simple tests on a small number of models, rather
|
|
130
|
+
#: we want to run a series of simple tests on a small number of models, rather
|
|
130
131
|
#: than a deep test of tests on a small number of models. The gestalt is that
|
|
131
132
|
#: this is a range from 0-100.
|
|
132
133
|
self.python_test_depth = 100
|
|
133
|
-
|
|
134
|
+
|
|
134
135
|
#: Currently should be 'all' or 'utils-only'
|
|
135
136
|
self.test_mode = 'all'
|
|
136
|
-
|
|
137
|
+
|
|
137
138
|
#: Number of cores to use for multi-CPU inference tests
|
|
138
139
|
self.n_cores_for_multiprocessing_tests = 2
|
|
139
|
-
|
|
140
|
+
|
|
140
141
|
#: Number of cores to use for multi-CPU video tests
|
|
141
142
|
self.n_cores_for_video_tests = 2
|
|
142
|
-
|
|
143
|
+
|
|
143
144
|
# ...def __init__()
|
|
144
|
-
|
|
145
|
+
|
|
145
146
|
# ...class MDTestOptions()
|
|
146
147
|
|
|
147
148
|
|
|
@@ -154,20 +155,20 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
154
155
|
options=None):
|
|
155
156
|
"""
|
|
156
157
|
Expected results vary just a little across inference environments, particularly
|
|
157
|
-
between PT 1.x and 2.x, so when making sure things are working acceptably, we
|
|
158
|
+
between PT 1.x and 2.x, so when making sure things are working acceptably, we
|
|
158
159
|
compare to a reference file that matches the current environment.
|
|
159
|
-
|
|
160
|
+
|
|
160
161
|
This function gets the correct filename to compare to current results, depending
|
|
161
162
|
on whether a GPU is available.
|
|
162
|
-
|
|
163
|
+
|
|
163
164
|
Args:
|
|
164
165
|
gpu_is_available (bool): whether a GPU is available
|
|
165
|
-
|
|
166
|
+
|
|
166
167
|
Returns:
|
|
167
168
|
str: relative filename of the results file we should use (within the test
|
|
168
169
|
data zipfile)
|
|
169
170
|
"""
|
|
170
|
-
|
|
171
|
+
|
|
171
172
|
if gpu_is_available:
|
|
172
173
|
hw_string = 'gpu'
|
|
173
174
|
else:
|
|
@@ -180,8 +181,8 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
180
181
|
else:
|
|
181
182
|
assert torch_version.startswith('2'), 'Unknown torch version: {}'.format(torch_version)
|
|
182
183
|
pt_string = 'pt2.x'
|
|
183
|
-
|
|
184
|
-
# A hack for now to account for the fact that even with acceleration enabled and PT2
|
|
184
|
+
|
|
185
|
+
# A hack for now to account for the fact that even with acceleration enabled and PT2
|
|
185
186
|
# installed, Apple silicon appears to provide the same results as CPU/PT1 inference
|
|
186
187
|
try:
|
|
187
188
|
import torch
|
|
@@ -192,32 +193,32 @@ def get_expected_results_filename(gpu_is_available,
|
|
|
192
193
|
pt_string = 'pt1.10.1'
|
|
193
194
|
except Exception:
|
|
194
195
|
pass
|
|
195
|
-
|
|
196
|
+
|
|
196
197
|
aug_string = ''
|
|
197
198
|
if augment:
|
|
198
199
|
aug_string = 'augment-'
|
|
199
|
-
|
|
200
|
+
|
|
200
201
|
fn = '{}-{}{}-{}-{}.json'.format(model_string,aug_string,test_type,hw_string,pt_string)
|
|
201
|
-
|
|
202
|
+
|
|
202
203
|
from megadetector.utils.path_utils import insert_before_extension
|
|
203
|
-
|
|
204
|
+
|
|
204
205
|
if test_type == 'video':
|
|
205
206
|
fn = insert_before_extension(fn,'frames')
|
|
206
|
-
|
|
207
|
+
|
|
207
208
|
if options is not None and options.scratch_dir is not None:
|
|
208
209
|
fn = os.path.join(options.scratch_dir,fn)
|
|
209
|
-
|
|
210
|
+
|
|
210
211
|
return fn
|
|
211
|
-
|
|
212
|
-
|
|
212
|
+
|
|
213
|
+
|
|
213
214
|
def download_test_data(options=None):
|
|
214
215
|
"""
|
|
215
|
-
Downloads the test zipfile if necessary, unzips if necessary. Initializes
|
|
216
|
+
Downloads the test zipfile if necessary, unzips if necessary. Initializes
|
|
216
217
|
temporary fields in [options], particularly [options.scratch_dir].
|
|
217
|
-
|
|
218
|
+
|
|
218
219
|
Args:
|
|
219
220
|
options (MDTestOptions, optional): see MDTestOptions for details
|
|
220
|
-
|
|
221
|
+
|
|
221
222
|
Returns:
|
|
222
223
|
MDTestOptions: the same object passed in as input, or the options that
|
|
223
224
|
were used if [options] was supplied as None
|
|
@@ -225,17 +226,17 @@ def download_test_data(options=None):
|
|
|
225
226
|
|
|
226
227
|
if options is None:
|
|
227
228
|
options = MDTestOptions()
|
|
228
|
-
|
|
229
|
-
if options.scratch_dir is None:
|
|
229
|
+
|
|
230
|
+
if options.scratch_dir is None:
|
|
230
231
|
tempdir_base = tempfile.gettempdir()
|
|
231
232
|
scratch_dir = os.path.join(tempdir_base,'md-tests')
|
|
232
233
|
else:
|
|
233
234
|
scratch_dir = options.scratch_dir
|
|
234
|
-
|
|
235
|
-
os.makedirs(scratch_dir,exist_ok=True)
|
|
236
|
-
|
|
235
|
+
|
|
236
|
+
os.makedirs(scratch_dir,exist_ok=True)
|
|
237
|
+
|
|
237
238
|
# See whether we've already downloaded the data zipfile
|
|
238
|
-
download_zipfile = True
|
|
239
|
+
download_zipfile = True
|
|
239
240
|
if not options.force_data_download:
|
|
240
241
|
local_zipfile = os.path.join(scratch_dir,options.test_data_url.split('/')[-1])
|
|
241
242
|
if os.path.isfile(local_zipfile):
|
|
@@ -244,23 +245,23 @@ def download_test_data(options=None):
|
|
|
244
245
|
target_file_size = os.path.getsize(local_zipfile)
|
|
245
246
|
if remote_size == target_file_size:
|
|
246
247
|
download_zipfile = False
|
|
247
|
-
|
|
248
|
+
|
|
248
249
|
if download_zipfile:
|
|
249
250
|
print('Downloading test data zipfile')
|
|
250
251
|
urllib.request.urlretrieve(options.test_data_url, local_zipfile)
|
|
251
252
|
print('Finished download to {}'.format(local_zipfile))
|
|
252
253
|
else:
|
|
253
254
|
print('Bypassing test data zipfile download for {}'.format(local_zipfile))
|
|
254
|
-
|
|
255
|
-
|
|
255
|
+
|
|
256
|
+
|
|
256
257
|
## Unzip data
|
|
257
|
-
|
|
258
|
-
zipf = zipfile.ZipFile(local_zipfile)
|
|
258
|
+
|
|
259
|
+
zipf = zipfile.ZipFile(local_zipfile)
|
|
259
260
|
zip_contents = zipf.filelist
|
|
260
|
-
|
|
261
|
+
|
|
261
262
|
# file_info = zip_contents[1]
|
|
262
263
|
for file_info in zip_contents:
|
|
263
|
-
|
|
264
|
+
|
|
264
265
|
expected_size = file_info.file_size
|
|
265
266
|
if expected_size == 0:
|
|
266
267
|
continue
|
|
@@ -275,14 +276,14 @@ def download_test_data(options=None):
|
|
|
275
276
|
os.makedirs(os.path.dirname(target_file),exist_ok=True)
|
|
276
277
|
with open(target_file,'wb') as f:
|
|
277
278
|
f.write(zipf.read(fn_relative))
|
|
278
|
-
|
|
279
|
+
|
|
279
280
|
# ...for each file in the zipfile
|
|
280
|
-
|
|
281
|
+
|
|
281
282
|
try:
|
|
282
283
|
zipf.close()
|
|
283
284
|
except Exception as e:
|
|
284
285
|
print('Warning: error closing zipfile:\n{}'.format(str(e)))
|
|
285
|
-
|
|
286
|
+
|
|
286
287
|
# Warn if files are present that aren't expected
|
|
287
288
|
test_files = glob.glob(os.path.join(scratch_dir,'**/*'), recursive=True)
|
|
288
289
|
test_files = [os.path.relpath(fn,scratch_dir).replace('\\','/') for fn in test_files]
|
|
@@ -292,18 +293,18 @@ def download_test_data(options=None):
|
|
|
292
293
|
if fn.endswith('/'):
|
|
293
294
|
continue
|
|
294
295
|
assert fn in test_files_set, 'File {} is missing from the test image folder'.format(fn)
|
|
295
|
-
|
|
296
|
+
|
|
296
297
|
# Populate the test options with test data information
|
|
297
298
|
options.scratch_dir = scratch_dir
|
|
298
299
|
options.all_test_files = test_files
|
|
299
300
|
options.test_images = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.jpg','.jpeg','.png')]
|
|
300
|
-
options.test_videos = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.mp4','.avi')]
|
|
301
|
+
options.test_videos = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.mp4','.avi')]
|
|
301
302
|
options.test_videos = [fn for fn in options.test_videos if 'rendered' not in fn]
|
|
302
303
|
options.test_videos = [fn for fn in options.test_videos if \
|
|
303
304
|
os.path.isfile(os.path.join(scratch_dir,fn))]
|
|
304
|
-
|
|
305
|
+
|
|
305
306
|
print('Finished unzipping and enumerating test data')
|
|
306
|
-
|
|
307
|
+
|
|
307
308
|
return options
|
|
308
309
|
|
|
309
310
|
# ...def download_test_data(...)
|
|
@@ -311,16 +312,16 @@ def download_test_data(options=None):
|
|
|
311
312
|
|
|
312
313
|
def is_gpu_available(verbose=True):
|
|
313
314
|
"""
|
|
314
|
-
Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch. Returns
|
|
315
|
+
Checks whether a GPU (including M1/M2 MPS) is available, according to PyTorch. Returns
|
|
315
316
|
false if PT fails to import.
|
|
316
|
-
|
|
317
|
+
|
|
317
318
|
Args:
|
|
318
|
-
verbose (bool, optional): enable additional debug console output
|
|
319
|
-
|
|
319
|
+
verbose (bool, optional): enable additional debug console output
|
|
320
|
+
|
|
320
321
|
Returns:
|
|
321
|
-
bool: whether a GPU is available
|
|
322
|
+
bool: whether a GPU is available
|
|
322
323
|
"""
|
|
323
|
-
|
|
324
|
+
|
|
324
325
|
# Import torch inside this function, so we have a chance to set CUDA_VISIBLE_DEVICES
|
|
325
326
|
# before checking GPU availability.
|
|
326
327
|
try:
|
|
@@ -328,9 +329,9 @@ def is_gpu_available(verbose=True):
|
|
|
328
329
|
except Exception:
|
|
329
330
|
print('Warning: could not import torch')
|
|
330
331
|
return False
|
|
331
|
-
|
|
332
|
+
|
|
332
333
|
gpu_available = torch.cuda.is_available()
|
|
333
|
-
|
|
334
|
+
|
|
334
335
|
if gpu_available:
|
|
335
336
|
if verbose:
|
|
336
337
|
print('CUDA available: {}'.format(gpu_available))
|
|
@@ -344,123 +345,123 @@ def is_gpu_available(verbose=True):
|
|
|
344
345
|
pass
|
|
345
346
|
if gpu_available:
|
|
346
347
|
print('Metal performance shaders available')
|
|
347
|
-
|
|
348
|
+
|
|
348
349
|
if not gpu_available:
|
|
349
350
|
print('No GPU available')
|
|
350
|
-
|
|
351
|
-
return gpu_available
|
|
352
351
|
|
|
353
|
-
|
|
352
|
+
return gpu_available
|
|
353
|
+
|
|
354
|
+
# ...def is_gpu_available(...)
|
|
354
355
|
|
|
355
356
|
|
|
356
357
|
def output_files_are_identical(fn1,fn2,verbose=False):
|
|
357
358
|
"""
|
|
358
359
|
Checks whether two MD-formatted output files are identical other than file sorting.
|
|
359
|
-
|
|
360
|
+
|
|
360
361
|
Args:
|
|
361
362
|
fn1 (str): the first filename to compare
|
|
362
363
|
fn2 (str): the second filename to compare
|
|
363
|
-
|
|
364
|
+
|
|
364
365
|
Returns:
|
|
365
366
|
bool: whether [fn1] and [fn2] are identical other than file sorting.
|
|
366
367
|
"""
|
|
367
|
-
|
|
368
|
+
|
|
368
369
|
if verbose:
|
|
369
370
|
print('Comparing {} to {}'.format(fn1,fn2))
|
|
370
|
-
|
|
371
|
+
|
|
371
372
|
with open(fn1,'r') as f:
|
|
372
373
|
fn1_results = json.load(f)
|
|
373
374
|
fn1_results['images'] = \
|
|
374
375
|
sorted(fn1_results['images'], key=lambda d: d['file'])
|
|
375
|
-
|
|
376
|
+
|
|
376
377
|
with open(fn2,'r') as f:
|
|
377
378
|
fn2_results = json.load(f)
|
|
378
379
|
fn2_results['images'] = \
|
|
379
380
|
sorted(fn2_results['images'], key=lambda d: d['file'])
|
|
380
|
-
|
|
381
|
+
|
|
381
382
|
if len(fn1_results['images']) != len(fn1_results['images']):
|
|
382
383
|
if verbose:
|
|
383
384
|
print('{} images in {}, {} images in {}'.format(
|
|
384
385
|
len(fn1_results['images']),fn1,
|
|
385
386
|
len(fn2_results['images']),fn2))
|
|
386
387
|
return False
|
|
387
|
-
|
|
388
|
+
|
|
388
389
|
# i_image = 0; fn1_image = fn1_results['images'][i_image]
|
|
389
390
|
for i_image,fn1_image in enumerate(fn1_results['images']):
|
|
390
|
-
|
|
391
|
+
|
|
391
392
|
fn2_image = fn2_results['images'][i_image]
|
|
392
|
-
|
|
393
|
+
|
|
393
394
|
if fn1_image['file'] != fn2_image['file']:
|
|
394
395
|
if verbose:
|
|
395
396
|
print('Filename difference at {}: {} vs {} '.format(i_image,fn1_image['file'],fn1_image['file']))
|
|
396
397
|
return False
|
|
397
|
-
|
|
398
|
+
|
|
398
399
|
if fn1_image != fn2_image:
|
|
399
400
|
if verbose:
|
|
400
401
|
print('Image-level difference in image {}: {}'.format(i_image,fn1_image['file']))
|
|
401
402
|
return False
|
|
402
|
-
|
|
403
|
+
|
|
403
404
|
return True
|
|
404
405
|
|
|
405
406
|
# ...def output_files_are_identical(...)
|
|
406
|
-
|
|
407
|
+
|
|
407
408
|
|
|
408
409
|
def compare_detection_lists(detections_a,detections_b,options,bidirectional_comparison=True):
|
|
409
410
|
"""
|
|
410
411
|
Compare two lists of MD-formatted detections, matching detections across lists using IoU
|
|
411
|
-
criteria. Generally used to compare detections for the same image when two sets of results
|
|
412
|
+
criteria. Generally used to compare detections for the same image when two sets of results
|
|
412
413
|
are expected to be more or less the same.
|
|
413
|
-
|
|
414
|
+
|
|
414
415
|
Args:
|
|
415
416
|
detections_a (list): the first set of detection dicts
|
|
416
417
|
detections_b (list): the second set of detection dicts
|
|
417
|
-
options (MDTestOptions): options that determine tolerable differences between files
|
|
418
|
+
options (MDTestOptions): options that determine tolerable differences between files
|
|
418
419
|
bidirectional_comparison (bool, optional): reverse the arguments and make a recursive
|
|
419
420
|
call.
|
|
420
|
-
|
|
421
|
+
|
|
421
422
|
Returns:
|
|
422
423
|
dict: a dictionary with keys 'max_conf_error' and 'max_coord_error'.
|
|
423
424
|
"""
|
|
424
425
|
from megadetector.utils.ct_utils import get_iou
|
|
425
|
-
|
|
426
|
+
|
|
426
427
|
max_conf_error = 0
|
|
427
428
|
max_coord_error = 0
|
|
428
|
-
|
|
429
|
+
|
|
429
430
|
max_conf_error_det_a = None
|
|
430
431
|
max_conf_error_det_b = None
|
|
431
|
-
|
|
432
|
+
|
|
432
433
|
max_coord_error_det_a = None
|
|
433
434
|
max_coord_error_det_b = None
|
|
434
|
-
|
|
435
|
+
|
|
435
436
|
# i_det_a = 0
|
|
436
437
|
for i_det_a in range(0,len(detections_a)):
|
|
437
|
-
|
|
438
|
+
|
|
438
439
|
det_a = detections_a[i_det_a]
|
|
439
|
-
|
|
440
|
+
|
|
440
441
|
# Don't process very-low-confidence boxes
|
|
441
442
|
# if det_a['conf'] < options.max_conf_error:
|
|
442
443
|
# continue
|
|
443
|
-
|
|
444
|
+
|
|
444
445
|
matching_det_b = None
|
|
445
446
|
highest_iou = -1
|
|
446
|
-
|
|
447
|
+
|
|
447
448
|
# Find the closest match in the detections_b list
|
|
448
|
-
|
|
449
|
+
|
|
449
450
|
# i_det_b = 0
|
|
450
451
|
for i_det_b in range(0,len(detections_b)):
|
|
451
|
-
|
|
452
|
+
|
|
452
453
|
det_b = detections_b[i_det_b]
|
|
453
|
-
|
|
454
|
+
|
|
454
455
|
if det_b['category'] != det_a['category']:
|
|
455
456
|
continue
|
|
456
|
-
|
|
457
|
+
|
|
457
458
|
iou = get_iou(det_a['bbox'],det_b['bbox'])
|
|
458
|
-
|
|
459
|
+
|
|
459
460
|
# Is this likely the same detection as det_a?
|
|
460
461
|
if iou >= options.iou_threshold_for_file_comparison and iou > highest_iou:
|
|
461
462
|
matching_det_b = det_b
|
|
462
463
|
highest_iou = iou
|
|
463
|
-
|
|
464
|
+
|
|
464
465
|
# If there are no detections in this category in detections_b
|
|
465
466
|
if matching_det_b is None:
|
|
466
467
|
if det_a['conf'] > max_conf_error:
|
|
@@ -468,34 +469,34 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
468
469
|
max_conf_error_det_a = det_a
|
|
469
470
|
# max_coord_error = 1.0
|
|
470
471
|
continue
|
|
471
|
-
|
|
472
|
+
|
|
472
473
|
assert det_a['category'] == matching_det_b['category']
|
|
473
474
|
conf_err = abs(det_a['conf'] - matching_det_b['conf'])
|
|
474
475
|
coord_differences = []
|
|
475
476
|
for i_coord in range(0,4):
|
|
476
477
|
coord_differences.append(abs(det_a['bbox'][i_coord]-\
|
|
477
478
|
matching_det_b['bbox'][i_coord]))
|
|
478
|
-
coord_err = max(coord_differences)
|
|
479
|
-
|
|
479
|
+
coord_err = max(coord_differences)
|
|
480
|
+
|
|
480
481
|
if conf_err >= max_conf_error:
|
|
481
482
|
max_conf_error = conf_err
|
|
482
483
|
max_conf_error_det_a = det_a
|
|
483
484
|
max_conf_error_det_b = det_b
|
|
484
|
-
|
|
485
|
+
|
|
485
486
|
if coord_err >= max_coord_error:
|
|
486
487
|
max_coord_error = coord_err
|
|
487
488
|
max_coord_error_det_a = det_a
|
|
488
489
|
max_coord_error_det_b = det_b
|
|
489
|
-
|
|
490
|
+
|
|
490
491
|
# ...for each detection in detections_a
|
|
491
|
-
|
|
492
|
+
|
|
492
493
|
if bidirectional_comparison:
|
|
493
|
-
|
|
494
|
+
|
|
494
495
|
reverse_comparison_results = compare_detection_lists(detections_b,
|
|
495
|
-
detections_a,
|
|
496
|
-
options,
|
|
496
|
+
detections_a,
|
|
497
|
+
options,
|
|
497
498
|
bidirectional_comparison=False)
|
|
498
|
-
|
|
499
|
+
|
|
499
500
|
if reverse_comparison_results['max_conf_error'] > max_conf_error:
|
|
500
501
|
max_conf_error = reverse_comparison_results['max_conf_error']
|
|
501
502
|
max_conf_error_det_a = reverse_comparison_results['max_conf_error_det_b']
|
|
@@ -504,17 +505,17 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
|
|
|
504
505
|
max_coord_error = reverse_comparison_results['max_coord_error']
|
|
505
506
|
max_coord_error_det_a = reverse_comparison_results['max_coord_error_det_b']
|
|
506
507
|
max_coord_error_det_b = reverse_comparison_results['max_coord_error_det_a']
|
|
507
|
-
|
|
508
|
+
|
|
508
509
|
list_comparison_results = {}
|
|
509
|
-
|
|
510
|
+
|
|
510
511
|
list_comparison_results['max_coord_error'] = max_coord_error
|
|
511
512
|
list_comparison_results['max_coord_error_det_a'] = max_coord_error_det_a
|
|
512
513
|
list_comparison_results['max_coord_error_det_b'] = max_coord_error_det_b
|
|
513
|
-
|
|
514
|
+
|
|
514
515
|
list_comparison_results['max_conf_error'] = max_conf_error
|
|
515
516
|
list_comparison_results['max_conf_error_det_a'] = max_conf_error_det_a
|
|
516
517
|
list_comparison_results['max_conf_error_det_b'] = max_conf_error_det_b
|
|
517
|
-
|
|
518
|
+
|
|
518
519
|
return list_comparison_results
|
|
519
520
|
|
|
520
521
|
# ...def compare_detection_lists(...)
|
|
@@ -526,98 +527,98 @@ def compare_results(inference_output_file,
|
|
|
526
527
|
expected_results_file_is_absolute=False):
|
|
527
528
|
"""
|
|
528
529
|
Compare two MD-formatted output files that should be nearly identical, allowing small
|
|
529
|
-
changes (e.g. rounding differences). Generally used to compare a new results file to
|
|
530
|
+
changes (e.g. rounding differences). Generally used to compare a new results file to
|
|
530
531
|
an expected results file.
|
|
531
|
-
|
|
532
|
+
|
|
532
533
|
Args:
|
|
533
534
|
inference_output_file (str): the first results file to compare
|
|
534
535
|
expected_results_file (str): the second results file to compare
|
|
535
536
|
options (MDTestOptions): options that determine tolerable differences between files
|
|
536
|
-
expected_results_file_is_absolute (str, optional): by default,
|
|
537
|
+
expected_results_file_is_absolute (str, optional): by default,
|
|
537
538
|
expected_results_file is appended to options.scratch_dir; this option
|
|
538
539
|
specifies that it's an absolute path.
|
|
539
|
-
|
|
540
|
+
|
|
540
541
|
Returns:
|
|
541
542
|
dict: dictionary with keys 'max_coord_error' and 'max_conf_error'
|
|
542
543
|
"""
|
|
543
|
-
|
|
544
|
+
|
|
544
545
|
# Read results
|
|
545
546
|
with open(inference_output_file,'r') as f:
|
|
546
547
|
results_from_file = json.load(f) # noqa
|
|
547
|
-
|
|
548
|
+
|
|
548
549
|
if not expected_results_file_is_absolute:
|
|
549
550
|
expected_results_file= os.path.join(options.scratch_dir,expected_results_file)
|
|
550
|
-
|
|
551
|
+
|
|
551
552
|
with open(expected_results_file,'r') as f:
|
|
552
553
|
expected_results = json.load(f)
|
|
553
|
-
|
|
554
|
+
|
|
554
555
|
filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
|
|
555
556
|
filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
|
|
556
|
-
|
|
557
|
+
|
|
557
558
|
assert len(filename_to_results) == len(filename_to_results_expected), \
|
|
558
559
|
'Error: expected {} files in results, found {}'.format(
|
|
559
560
|
len(filename_to_results_expected),
|
|
560
561
|
len(filename_to_results))
|
|
561
|
-
|
|
562
|
+
|
|
562
563
|
max_conf_error = -1
|
|
563
564
|
max_conf_error_file = None
|
|
564
565
|
max_conf_error_comparison_results = None
|
|
565
|
-
|
|
566
|
+
|
|
566
567
|
max_coord_error = -1
|
|
567
|
-
max_coord_error_file = None
|
|
568
|
+
max_coord_error_file = None
|
|
568
569
|
max_coord_error_comparison_results = None
|
|
569
|
-
|
|
570
|
+
|
|
570
571
|
# fn = next(iter(filename_to_results.keys()))
|
|
571
572
|
for fn in filename_to_results.keys():
|
|
572
|
-
|
|
573
|
+
|
|
573
574
|
actual_image_results = filename_to_results[fn]
|
|
574
575
|
expected_image_results = filename_to_results_expected[fn]
|
|
575
|
-
|
|
576
|
+
|
|
576
577
|
if 'failure' in actual_image_results:
|
|
577
578
|
assert 'failure' in expected_image_results and \
|
|
578
579
|
'detections' not in actual_image_results and \
|
|
579
580
|
'detections' not in expected_image_results
|
|
580
581
|
continue
|
|
581
582
|
assert 'failure' not in expected_image_results
|
|
582
|
-
|
|
583
|
+
|
|
583
584
|
actual_detections = actual_image_results['detections']
|
|
584
585
|
expected_detections = expected_image_results['detections']
|
|
585
|
-
|
|
586
|
+
|
|
586
587
|
comparison_results_this_image = compare_detection_lists(
|
|
587
588
|
detections_a=actual_detections,
|
|
588
589
|
detections_b=expected_detections,
|
|
589
590
|
options=options,
|
|
590
591
|
bidirectional_comparison=True)
|
|
591
|
-
|
|
592
|
+
|
|
592
593
|
if comparison_results_this_image['max_conf_error'] > max_conf_error:
|
|
593
594
|
max_conf_error = comparison_results_this_image['max_conf_error']
|
|
594
595
|
max_conf_error_comparison_results = comparison_results_this_image
|
|
595
596
|
max_conf_error_file = fn
|
|
596
|
-
|
|
597
|
+
|
|
597
598
|
if comparison_results_this_image['max_coord_error'] > max_coord_error:
|
|
598
599
|
max_coord_error = comparison_results_this_image['max_coord_error']
|
|
599
600
|
max_coord_error_comparison_results = comparison_results_this_image
|
|
600
601
|
max_coord_error_file = fn
|
|
601
|
-
|
|
602
|
+
|
|
602
603
|
# ...for each image
|
|
603
|
-
|
|
604
|
+
|
|
604
605
|
if not options.warning_mode:
|
|
605
|
-
|
|
606
|
+
|
|
606
607
|
assert max_conf_error <= options.max_conf_error, \
|
|
607
608
|
'Confidence error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
|
|
608
609
|
max_conf_error,options.max_conf_error,max_conf_error_file,
|
|
609
610
|
inference_output_file,expected_results_file)
|
|
610
|
-
|
|
611
|
+
|
|
611
612
|
assert max_coord_error <= options.max_coord_error, \
|
|
612
613
|
'Coord error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
|
|
613
614
|
max_coord_error,options.max_coord_error,max_coord_error_file,
|
|
614
615
|
inference_output_file,expected_results_file)
|
|
615
|
-
|
|
616
|
+
|
|
616
617
|
print('Max conf error: {} (file {})'.format(
|
|
617
618
|
max_conf_error,max_conf_error_file))
|
|
618
619
|
print('Max coord error: {} (file {})'.format(
|
|
619
620
|
max_coord_error,max_coord_error_file))
|
|
620
|
-
|
|
621
|
+
|
|
621
622
|
comparison_results = {}
|
|
622
623
|
comparison_results['max_conf_error'] = max_conf_error
|
|
623
624
|
comparison_results['max_conf_error_comparison_results'] = max_conf_error_comparison_results
|
|
@@ -638,18 +639,18 @@ def _args_to_object(args, obj):
|
|
|
638
639
|
Args:
|
|
639
640
|
args (argparse.Namespace): the namespace to convert to an object
|
|
640
641
|
obj (object): object whose whose attributes will be updated
|
|
641
|
-
|
|
642
|
+
|
|
642
643
|
Returns:
|
|
643
644
|
object: the modified object (modified in place, but also returned)
|
|
644
645
|
"""
|
|
645
|
-
|
|
646
|
+
|
|
646
647
|
for n, v in inspect.getmembers(args):
|
|
647
648
|
if not n.startswith('_'):
|
|
648
649
|
setattr(obj, n, v)
|
|
649
650
|
|
|
650
651
|
return obj
|
|
651
652
|
|
|
652
|
-
|
|
653
|
+
|
|
653
654
|
#%% CLI functions
|
|
654
655
|
|
|
655
656
|
# These are copied from process_utils.py to avoid imports outside of the test
|
|
@@ -657,21 +658,21 @@ def _args_to_object(args, obj):
|
|
|
657
658
|
|
|
658
659
|
os.environ["PYTHONUNBUFFERED"] = "1"
|
|
659
660
|
|
|
660
|
-
# In some circumstances I want to allow CLI tests to "succeed" even when they return
|
|
661
|
+
# In some circumstances I want to allow CLI tests to "succeed" even when they return
|
|
661
662
|
# specific non-zero output values.
|
|
662
663
|
allowable_process_return_codes = [0]
|
|
663
664
|
|
|
664
665
|
def execute(cmd):
|
|
665
666
|
"""
|
|
666
667
|
Runs [cmd] (a single string) in a shell, yielding each line of output to the caller.
|
|
667
|
-
|
|
668
|
+
|
|
668
669
|
Args:
|
|
669
670
|
cmd (str): command to run
|
|
670
|
-
|
|
671
|
+
|
|
671
672
|
Returns:
|
|
672
673
|
int: the command's return code, always zero, otherwise a CalledProcessError is raised
|
|
673
674
|
"""
|
|
674
|
-
|
|
675
|
+
|
|
675
676
|
# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
|
|
676
677
|
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
677
678
|
shell=True, universal_newlines=True)
|
|
@@ -687,11 +688,11 @@ def execute(cmd):
|
|
|
687
688
|
def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=True):
|
|
688
689
|
"""
|
|
689
690
|
Runs [cmd] (a single string) in a shell, capturing (and optionally printing) output.
|
|
690
|
-
|
|
691
|
+
|
|
691
692
|
Args:
|
|
692
693
|
cmd (str): command to run
|
|
693
694
|
print_output (bool, optional): whether to print output from [cmd]
|
|
694
|
-
|
|
695
|
+
|
|
695
696
|
Returns:
|
|
696
697
|
dict: a dictionary with fields "status" (the process return code) and "output"
|
|
697
698
|
(the content of stdout)
|
|
@@ -699,7 +700,7 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
|
|
|
699
700
|
|
|
700
701
|
if echo_command:
|
|
701
702
|
print('Running command:\n{}\n'.format(cmd))
|
|
702
|
-
|
|
703
|
+
|
|
703
704
|
to_return = {'status':'unknown','output':''}
|
|
704
705
|
output = []
|
|
705
706
|
try:
|
|
@@ -709,21 +710,22 @@ def execute_and_print(cmd,print_output=True,catch_exceptions=False,echo_command=
|
|
|
709
710
|
print(s,end='',flush=True)
|
|
710
711
|
to_return['status'] = 0
|
|
711
712
|
except subprocess.CalledProcessError as cpe:
|
|
712
|
-
if not catch_exceptions:
|
|
713
|
+
if not catch_exceptions:
|
|
713
714
|
raise
|
|
714
715
|
print('execute_and_print caught error: {}'.format(cpe.output))
|
|
715
716
|
to_return['status'] = cpe.returncode
|
|
716
717
|
to_return['output'] = output
|
|
717
|
-
|
|
718
|
+
|
|
718
719
|
return to_return
|
|
719
720
|
|
|
720
721
|
|
|
721
722
|
#%% Python tests
|
|
722
723
|
|
|
724
|
+
@pytest.mark.skip(reason='Called one for each module')
|
|
723
725
|
def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
724
726
|
"""
|
|
725
727
|
Imports all modules in [package_name]
|
|
726
|
-
|
|
728
|
+
|
|
727
729
|
Args:
|
|
728
730
|
package_name (str): the package name to test
|
|
729
731
|
exceptions (list, optional): exclude any modules that contain any of these strings
|
|
@@ -731,16 +733,16 @@ def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
|
731
733
|
"""
|
|
732
734
|
import importlib
|
|
733
735
|
import pkgutil
|
|
734
|
-
|
|
736
|
+
|
|
735
737
|
package = importlib.import_module(package_name)
|
|
736
738
|
package_path = package.__path__
|
|
737
739
|
imported_modules = []
|
|
738
|
-
|
|
740
|
+
|
|
739
741
|
if exceptions is None:
|
|
740
742
|
exceptions = []
|
|
741
|
-
|
|
743
|
+
|
|
742
744
|
for _, modname, _ in pkgutil.walk_packages(package_path, package_name + '.'):
|
|
743
|
-
|
|
745
|
+
|
|
744
746
|
skip_module = False
|
|
745
747
|
for s in exceptions:
|
|
746
748
|
if s in modname:
|
|
@@ -748,14 +750,14 @@ def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
|
748
750
|
break
|
|
749
751
|
if skip_module:
|
|
750
752
|
continue
|
|
751
|
-
|
|
753
|
+
|
|
752
754
|
if verbose:
|
|
753
755
|
print('Testing import: {}'.format(modname))
|
|
754
|
-
|
|
756
|
+
|
|
755
757
|
try:
|
|
756
758
|
# Attempt to import each module
|
|
757
759
|
_ = importlib.import_module(modname)
|
|
758
|
-
imported_modules.append(modname)
|
|
760
|
+
imported_modules.append(modname)
|
|
759
761
|
except ImportError as e:
|
|
760
762
|
print(f"Failed to import module {modname}: {e}")
|
|
761
763
|
raise
|
|
@@ -764,53 +766,45 @@ def test_package_imports(package_name,exceptions=None,verbose=True):
|
|
|
764
766
|
def run_python_tests(options):
|
|
765
767
|
"""
|
|
766
768
|
Runs Python-based (as opposed to CLI-based) package tests.
|
|
767
|
-
|
|
769
|
+
|
|
768
770
|
Args:
|
|
769
771
|
options (MDTestOptions): see MDTestOptions for details
|
|
770
772
|
"""
|
|
771
|
-
|
|
773
|
+
|
|
772
774
|
print('\n*** Starting module tests ***\n')
|
|
773
|
-
|
|
774
|
-
|
|
775
|
+
|
|
776
|
+
|
|
775
777
|
## Prepare data
|
|
776
|
-
|
|
778
|
+
|
|
777
779
|
download_test_data(options)
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
## Miscellaneous utility tests
|
|
781
|
-
|
|
782
|
-
print('\n** Running ct_utils module test **\n')
|
|
783
|
-
|
|
784
|
-
from megadetector.utils.ct_utils import __module_test__ as ct_utils_test
|
|
785
|
-
ct_utils_test()
|
|
786
|
-
|
|
787
|
-
|
|
780
|
+
|
|
781
|
+
|
|
788
782
|
## Import tests
|
|
789
|
-
|
|
783
|
+
|
|
790
784
|
print('\n** Running package import tests **\n')
|
|
791
785
|
test_package_imports('megadetector.visualization')
|
|
792
786
|
test_package_imports('megadetector.postprocessing')
|
|
793
787
|
test_package_imports('megadetector.postprocessing.repeat_detection_elimination')
|
|
794
788
|
test_package_imports('megadetector.utils',exceptions=['azure_utils','sas_blob_utils','md_tests'])
|
|
795
789
|
test_package_imports('megadetector.data_management',exceptions=['lila','ocr_tools'])
|
|
796
|
-
|
|
790
|
+
|
|
797
791
|
|
|
798
792
|
## Return early if we're not running torch-related tests
|
|
799
|
-
|
|
793
|
+
|
|
800
794
|
if options.test_mode == 'utils-only':
|
|
801
795
|
return
|
|
802
|
-
|
|
803
|
-
|
|
796
|
+
|
|
797
|
+
|
|
804
798
|
## Make sure our tests are doing what we think they're doing
|
|
805
|
-
|
|
799
|
+
|
|
806
800
|
from megadetector.detection import pytorch_detector
|
|
807
801
|
pytorch_detector.require_non_default_compatibility_mode = True
|
|
808
|
-
|
|
809
|
-
|
|
802
|
+
|
|
803
|
+
|
|
810
804
|
## Run inference on an image
|
|
811
|
-
|
|
805
|
+
|
|
812
806
|
print('\n** Running MD on a single image (module) **\n')
|
|
813
|
-
|
|
807
|
+
|
|
814
808
|
from megadetector.detection import run_detector
|
|
815
809
|
from megadetector.visualization import visualization_utils as vis_utils # noqa
|
|
816
810
|
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
@@ -818,15 +812,15 @@ def run_python_tests(options):
|
|
|
818
812
|
detector_options=copy(options.detector_options))
|
|
819
813
|
pil_im = vis_utils.load_image(image_fn)
|
|
820
814
|
result = model.generate_detections_one_image(pil_im) # noqa
|
|
821
|
-
|
|
815
|
+
|
|
822
816
|
if options.python_test_depth <= 1:
|
|
823
817
|
return
|
|
824
|
-
|
|
825
|
-
|
|
818
|
+
|
|
819
|
+
|
|
826
820
|
## Run inference on a folder
|
|
827
821
|
|
|
828
822
|
print('\n** Running MD on a folder of images (module) **\n')
|
|
829
|
-
|
|
823
|
+
|
|
830
824
|
from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
831
825
|
from megadetector.utils import path_utils # noqa
|
|
832
826
|
|
|
@@ -834,7 +828,7 @@ def run_python_tests(options):
|
|
|
834
828
|
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
835
829
|
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
836
830
|
image_file_names = path_utils.find_images(image_folder,recursive=True)
|
|
837
|
-
results = load_and_run_detector_batch(options.default_model,
|
|
831
|
+
results = load_and_run_detector_batch(options.default_model,
|
|
838
832
|
image_file_names,
|
|
839
833
|
quiet=True,
|
|
840
834
|
detector_options=copy(options.detector_options))
|
|
@@ -844,30 +838,30 @@ def run_python_tests(options):
|
|
|
844
838
|
detector_file=options.default_model)
|
|
845
839
|
|
|
846
840
|
## Verify results
|
|
847
|
-
|
|
841
|
+
|
|
848
842
|
# Verify format correctness
|
|
849
843
|
from megadetector.postprocessing.validate_batch_results import validate_batch_results #noqa
|
|
850
844
|
validate_batch_results(inference_output_file)
|
|
851
|
-
|
|
845
|
+
|
|
852
846
|
# Verify value correctness
|
|
853
847
|
expected_results_file = get_expected_results_filename(is_gpu_available(verbose=False),
|
|
854
848
|
options=options)
|
|
855
849
|
compare_results(inference_output_file,expected_results_file,options)
|
|
856
|
-
|
|
857
|
-
|
|
850
|
+
|
|
851
|
+
|
|
858
852
|
# Make note of this filename, we will use it again later
|
|
859
853
|
inference_output_file_standard_inference = inference_output_file
|
|
860
|
-
|
|
854
|
+
|
|
861
855
|
if options.python_test_depth <= 2:
|
|
862
856
|
return
|
|
863
|
-
|
|
864
|
-
|
|
857
|
+
|
|
858
|
+
|
|
865
859
|
## Run and verify again with augmentation enabled
|
|
866
|
-
|
|
860
|
+
|
|
867
861
|
print('\n** Running MD on images with augmentation (module) **\n')
|
|
868
|
-
|
|
862
|
+
|
|
869
863
|
from megadetector.utils.path_utils import insert_before_extension
|
|
870
|
-
|
|
864
|
+
|
|
871
865
|
inference_output_file_augmented = insert_before_extension(inference_output_file,'augmented')
|
|
872
866
|
results = load_and_run_detector_batch(options.default_model,
|
|
873
867
|
image_file_names,
|
|
@@ -883,32 +877,32 @@ def run_python_tests(options):
|
|
|
883
877
|
get_expected_results_filename(is_gpu_available(verbose=False),
|
|
884
878
|
augment=True,options=options)
|
|
885
879
|
compare_results(inference_output_file_augmented,expected_results_file_augmented,options)
|
|
886
|
-
|
|
887
|
-
|
|
880
|
+
|
|
881
|
+
|
|
888
882
|
## Postprocess results
|
|
889
|
-
|
|
883
|
+
|
|
890
884
|
print('\n** Post-processing results (module) **\n')
|
|
891
|
-
|
|
885
|
+
|
|
892
886
|
from megadetector.postprocessing.postprocess_batch_results import \
|
|
893
887
|
PostProcessingOptions,process_batch_results
|
|
894
888
|
postprocessing_options = PostProcessingOptions()
|
|
895
|
-
|
|
889
|
+
|
|
896
890
|
postprocessing_options.md_results_file = inference_output_file
|
|
897
891
|
postprocessing_options.output_dir = os.path.join(options.scratch_dir,'postprocessing_output')
|
|
898
892
|
postprocessing_options.image_base_dir = image_folder
|
|
899
|
-
|
|
893
|
+
|
|
900
894
|
postprocessing_results = process_batch_results(postprocessing_options)
|
|
901
895
|
assert os.path.isfile(postprocessing_results.output_html_file), \
|
|
902
896
|
'Postprocessing output file {} not found'.format(postprocessing_results.output_html_file)
|
|
903
|
-
|
|
904
|
-
|
|
897
|
+
|
|
898
|
+
|
|
905
899
|
## Partial RDE test
|
|
906
|
-
|
|
900
|
+
|
|
907
901
|
print('\n** Testing RDE (module) **\n')
|
|
908
|
-
|
|
902
|
+
|
|
909
903
|
from megadetector.postprocessing.repeat_detection_elimination.repeat_detections_core import \
|
|
910
904
|
RepeatDetectionOptions, find_repeat_detections
|
|
911
|
-
|
|
905
|
+
|
|
912
906
|
rde_options = RepeatDetectionOptions()
|
|
913
907
|
rde_options.occurrenceThreshold = 2
|
|
914
908
|
rde_options.confidenceMin = 0.001
|
|
@@ -919,24 +913,24 @@ def run_python_tests(options):
|
|
|
919
913
|
rde_results = find_repeat_detections(inference_output_file, rde_output_file, rde_options)
|
|
920
914
|
assert os.path.isfile(rde_results.filterFile),\
|
|
921
915
|
'Could not find RDE output file {}'.format(rde_results.filterFile)
|
|
922
|
-
|
|
923
|
-
|
|
916
|
+
|
|
917
|
+
|
|
924
918
|
## Run inference on a folder (with YOLOv5 val script)
|
|
925
|
-
|
|
919
|
+
|
|
926
920
|
if options.yolo_working_dir is None:
|
|
927
|
-
|
|
921
|
+
|
|
928
922
|
print('Skipping YOLO val inference tests, no YOLO folder supplied')
|
|
929
|
-
|
|
923
|
+
|
|
930
924
|
else:
|
|
931
|
-
|
|
925
|
+
|
|
932
926
|
print('\n** Running YOLO val inference test (module) **\n')
|
|
933
|
-
|
|
927
|
+
|
|
934
928
|
from megadetector.detection.run_inference_with_yolov5_val import \
|
|
935
929
|
YoloInferenceOptions, run_inference_with_yolo_val
|
|
936
930
|
from megadetector.utils.path_utils import insert_before_extension
|
|
937
|
-
|
|
931
|
+
|
|
938
932
|
inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
|
|
939
|
-
|
|
933
|
+
|
|
940
934
|
yolo_inference_options = YoloInferenceOptions()
|
|
941
935
|
yolo_inference_options.input_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
942
936
|
yolo_inference_options.output_file = inference_output_file_yolo_val
|
|
@@ -946,76 +940,76 @@ def run_python_tests(options):
|
|
|
946
940
|
yolo_inference_options.overwrite_handling = 'overwrite'
|
|
947
941
|
from megadetector.detection.run_detector import DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
|
|
948
942
|
yolo_inference_options.conf_thres = DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD
|
|
949
|
-
|
|
943
|
+
|
|
950
944
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
951
|
-
|
|
945
|
+
|
|
952
946
|
## Confirm this matches the standard inference path
|
|
953
|
-
|
|
947
|
+
|
|
954
948
|
if False:
|
|
955
949
|
# TODO: compare_results() isn't quite ready for this yet
|
|
956
|
-
compare_results(inference_output_file=inference_output_file_yolo_val,
|
|
957
|
-
expected_results_file=inference_output_file_standard_inference,
|
|
950
|
+
compare_results(inference_output_file=inference_output_file_yolo_val,
|
|
951
|
+
expected_results_file=inference_output_file_standard_inference,
|
|
958
952
|
options=options)
|
|
959
|
-
|
|
953
|
+
|
|
960
954
|
# Run again, without symlinks this time
|
|
961
|
-
|
|
955
|
+
|
|
962
956
|
inference_output_file_yolo_val_no_links = insert_before_extension(inference_output_file_yolo_val,
|
|
963
957
|
'no-links')
|
|
964
958
|
yolo_inference_options.output_file = inference_output_file_yolo_val_no_links
|
|
965
959
|
yolo_inference_options.use_symlinks = False
|
|
966
960
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
967
|
-
|
|
961
|
+
|
|
968
962
|
# Run again, with chunked inference and symlinks
|
|
969
|
-
|
|
963
|
+
|
|
970
964
|
inference_output_file_yolo_val_checkpoints = insert_before_extension(inference_output_file_yolo_val,
|
|
971
965
|
'checkpoints')
|
|
972
966
|
yolo_inference_options.output_file = inference_output_file_yolo_val_checkpoints
|
|
973
967
|
yolo_inference_options.use_symlinks = True
|
|
974
968
|
yolo_inference_options.checkpoint_frequency = 5
|
|
975
969
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
976
|
-
|
|
970
|
+
|
|
977
971
|
# Run again, with chunked inference and no symlinks
|
|
978
|
-
|
|
972
|
+
|
|
979
973
|
inference_output_file_yolo_val_checkpoints_no_links = \
|
|
980
974
|
insert_before_extension(inference_output_file_yolo_val,'checkpoints-no-links')
|
|
981
975
|
yolo_inference_options.output_file = inference_output_file_yolo_val_checkpoints_no_links
|
|
982
976
|
yolo_inference_options.use_symlinks = False
|
|
983
977
|
yolo_inference_options.checkpoint_frequency = 5
|
|
984
978
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
985
|
-
|
|
979
|
+
|
|
986
980
|
fn1 = inference_output_file_yolo_val
|
|
987
|
-
|
|
981
|
+
|
|
988
982
|
output_files_to_compare = [
|
|
989
983
|
inference_output_file_yolo_val_no_links,
|
|
990
984
|
inference_output_file_yolo_val_checkpoints,
|
|
991
985
|
inference_output_file_yolo_val_checkpoints_no_links
|
|
992
986
|
]
|
|
993
|
-
|
|
987
|
+
|
|
994
988
|
for fn2 in output_files_to_compare:
|
|
995
989
|
assert output_files_are_identical(fn1, fn2, verbose=True)
|
|
996
|
-
|
|
990
|
+
|
|
997
991
|
# ...if we need to run the YOLO val inference tests
|
|
998
|
-
|
|
999
|
-
|
|
992
|
+
|
|
993
|
+
|
|
1000
994
|
if not options.skip_video_tests:
|
|
1001
|
-
|
|
995
|
+
|
|
1002
996
|
## Video test (single video)
|
|
1003
|
-
|
|
997
|
+
|
|
1004
998
|
print('\n** Running MD on a single video (module) **\n')
|
|
1005
|
-
|
|
999
|
+
|
|
1006
1000
|
from megadetector.detection.process_video import ProcessVideoOptions, process_video
|
|
1007
1001
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1008
|
-
|
|
1002
|
+
|
|
1009
1003
|
video_options = ProcessVideoOptions()
|
|
1010
1004
|
video_options.model_file = options.default_model
|
|
1011
1005
|
video_options.input_video_file = os.path.join(options.scratch_dir,options.test_videos[0])
|
|
1012
1006
|
video_options.output_json_file = os.path.join(options.scratch_dir,'single_video_output.json')
|
|
1013
1007
|
video_options.output_video_file = os.path.join(options.scratch_dir,'video_scratch/rendered_video.mp4')
|
|
1014
1008
|
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
1015
|
-
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1016
|
-
|
|
1009
|
+
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1010
|
+
|
|
1017
1011
|
video_options.render_output_video = (not options.skip_video_rendering_tests)
|
|
1018
|
-
|
|
1012
|
+
|
|
1019
1013
|
# video_options.keep_rendered_frames = False
|
|
1020
1014
|
# video_options.keep_extracted_frames = False
|
|
1021
1015
|
video_options.force_extracted_frame_folder_deletion = True
|
|
@@ -1032,22 +1026,22 @@ def run_python_tests(options):
|
|
|
1032
1026
|
# video_options.debug_max_frames = -1
|
|
1033
1027
|
# video_options.class_mapping_filename = None
|
|
1034
1028
|
video_options.detector_options = copy(options.detector_options)
|
|
1035
|
-
|
|
1029
|
+
|
|
1036
1030
|
_ = process_video(video_options)
|
|
1037
|
-
|
|
1031
|
+
|
|
1038
1032
|
assert os.path.isfile(video_options.output_video_file), \
|
|
1039
1033
|
'Python video test failed to render output video file'
|
|
1040
1034
|
assert os.path.isfile(video_options.output_json_file), \
|
|
1041
1035
|
'Python video test failed to render output .json file'
|
|
1042
|
-
|
|
1043
|
-
|
|
1036
|
+
|
|
1037
|
+
|
|
1044
1038
|
## Video test (folder)
|
|
1045
|
-
|
|
1039
|
+
|
|
1046
1040
|
print('\n** Running MD on a folder of videos (module) **\n')
|
|
1047
|
-
|
|
1041
|
+
|
|
1048
1042
|
from megadetector.detection.process_video import ProcessVideoOptions, process_video_folder
|
|
1049
1043
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1050
|
-
|
|
1044
|
+
|
|
1051
1045
|
video_options = ProcessVideoOptions()
|
|
1052
1046
|
video_options.model_file = options.default_model
|
|
1053
1047
|
video_options.input_video_file = os.path.join(options.scratch_dir,
|
|
@@ -1055,7 +1049,7 @@ def run_python_tests(options):
|
|
|
1055
1049
|
video_options.output_json_file = os.path.join(options.scratch_dir,'video_folder_output.json')
|
|
1056
1050
|
video_options.output_video_file = None
|
|
1057
1051
|
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
1058
|
-
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1052
|
+
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
1059
1053
|
video_options.render_output_video = False
|
|
1060
1054
|
video_options.keep_rendered_frames = False
|
|
1061
1055
|
video_options.keep_extracted_frames = False
|
|
@@ -1068,63 +1062,63 @@ def run_python_tests(options):
|
|
|
1068
1062
|
video_options.fourcc = options.video_fourcc
|
|
1069
1063
|
# video_options.rendering_confidence_threshold = None
|
|
1070
1064
|
# video_options.json_confidence_threshold = 0.005
|
|
1071
|
-
video_options.frame_sample = 10
|
|
1065
|
+
video_options.frame_sample = 10
|
|
1072
1066
|
video_options.n_cores = options.n_cores_for_video_tests
|
|
1073
|
-
|
|
1067
|
+
|
|
1074
1068
|
# Force frame extraction to disk, since that's how we generated our expected results file
|
|
1075
1069
|
video_options.force_on_disk_frame_extraction = True
|
|
1076
1070
|
# video_options.debug_max_frames = -1
|
|
1077
1071
|
# video_options.class_mapping_filename = None
|
|
1078
|
-
|
|
1072
|
+
|
|
1079
1073
|
# Use quality == None, because we can't control whether YOLOv5 has patched cm2.imread,
|
|
1080
1074
|
# and therefore can't rely on using the quality parameter
|
|
1081
1075
|
video_options.quality = None
|
|
1082
|
-
video_options.max_width = None
|
|
1076
|
+
video_options.max_width = None
|
|
1083
1077
|
video_options.detector_options = copy(options.detector_options)
|
|
1084
|
-
|
|
1078
|
+
|
|
1085
1079
|
video_options.keep_extracted_frames = True
|
|
1086
1080
|
_ = process_video_folder(video_options)
|
|
1087
|
-
|
|
1081
|
+
|
|
1088
1082
|
assert os.path.isfile(video_options.output_json_file), \
|
|
1089
1083
|
'Python video test failed to render output .json file'
|
|
1090
|
-
|
|
1084
|
+
|
|
1091
1085
|
frame_output_file = insert_before_extension(video_options.output_json_file,'frames')
|
|
1092
1086
|
assert os.path.isfile(frame_output_file)
|
|
1093
|
-
|
|
1094
|
-
|
|
1087
|
+
|
|
1088
|
+
|
|
1095
1089
|
## Verify results
|
|
1096
|
-
|
|
1090
|
+
|
|
1097
1091
|
expected_results_file = \
|
|
1098
1092
|
get_expected_results_filename(is_gpu_available(verbose=False),test_type='video',options=options)
|
|
1099
1093
|
assert os.path.isfile(expected_results_file)
|
|
1100
|
-
|
|
1094
|
+
|
|
1101
1095
|
compare_results(frame_output_file,expected_results_file,options)
|
|
1102
|
-
|
|
1103
|
-
|
|
1096
|
+
|
|
1097
|
+
|
|
1104
1098
|
## Run again, this time in memory, and make sure the results are *almost* the same
|
|
1105
|
-
|
|
1099
|
+
|
|
1106
1100
|
# They won't be quite the same, because the on-disk path goes through a jpeg intermediate
|
|
1107
|
-
|
|
1101
|
+
|
|
1108
1102
|
print('\n** Running MD on a folder of videos (in memory) (module) **\n')
|
|
1109
|
-
|
|
1103
|
+
|
|
1110
1104
|
video_options.output_json_file = insert_before_extension(video_options.output_json_file,'in-memory')
|
|
1111
1105
|
video_options.force_on_disk_frame_extraction = False
|
|
1112
1106
|
_ = process_video_folder(video_options)
|
|
1113
|
-
|
|
1107
|
+
|
|
1114
1108
|
frame_output_file_in_memory = insert_before_extension(video_options.output_json_file,'frames')
|
|
1115
1109
|
assert os.path.isfile(frame_output_file_in_memory)
|
|
1116
|
-
|
|
1110
|
+
|
|
1117
1111
|
from copy import deepcopy
|
|
1118
1112
|
options_loose = deepcopy(options)
|
|
1119
1113
|
options_loose.max_conf_error = 0.05
|
|
1120
1114
|
options_loose.max_coord_error = 0.01
|
|
1121
|
-
|
|
1115
|
+
|
|
1122
1116
|
compare_results(inference_output_file=frame_output_file,
|
|
1123
1117
|
expected_results_file=frame_output_file_in_memory,
|
|
1124
1118
|
options=options_loose)
|
|
1125
|
-
|
|
1119
|
+
|
|
1126
1120
|
# ...if we're not skipping video tests
|
|
1127
|
-
|
|
1121
|
+
|
|
1128
1122
|
print('\n*** Finished module tests ***\n')
|
|
1129
1123
|
|
|
1130
1124
|
# ...def run_python_tests(...)
|
|
@@ -1135,52 +1129,52 @@ def run_python_tests(options):
|
|
|
1135
1129
|
def run_cli_tests(options):
|
|
1136
1130
|
"""
|
|
1137
1131
|
Runs CLI (as opposed to Python-based) package tests.
|
|
1138
|
-
|
|
1132
|
+
|
|
1139
1133
|
Args:
|
|
1140
1134
|
options (MDTestOptions): see MDTestOptions for details
|
|
1141
1135
|
"""
|
|
1142
|
-
|
|
1136
|
+
|
|
1143
1137
|
print('\n*** Starting CLI tests ***\n')
|
|
1144
|
-
|
|
1138
|
+
|
|
1145
1139
|
## Environment management
|
|
1146
|
-
|
|
1140
|
+
|
|
1147
1141
|
if options.cli_test_pythonpath is not None:
|
|
1148
|
-
os.environ['PYTHONPATH'] = options.cli_test_pythonpath
|
|
1149
|
-
|
|
1150
|
-
|
|
1142
|
+
os.environ['PYTHONPATH'] = options.cli_test_pythonpath
|
|
1143
|
+
|
|
1144
|
+
|
|
1151
1145
|
## chdir if necessary
|
|
1152
|
-
|
|
1146
|
+
|
|
1153
1147
|
if options.cli_working_dir is not None:
|
|
1154
1148
|
os.chdir(options.cli_working_dir)
|
|
1155
|
-
|
|
1156
|
-
|
|
1149
|
+
|
|
1150
|
+
|
|
1157
1151
|
## Prepare data
|
|
1158
|
-
|
|
1152
|
+
|
|
1159
1153
|
download_test_data(options)
|
|
1160
|
-
|
|
1161
|
-
|
|
1154
|
+
|
|
1155
|
+
|
|
1162
1156
|
## Utility imports
|
|
1163
|
-
|
|
1157
|
+
|
|
1164
1158
|
from megadetector.utils.ct_utils import dict_to_kvp_list
|
|
1165
1159
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1166
|
-
|
|
1167
|
-
|
|
1160
|
+
|
|
1161
|
+
|
|
1168
1162
|
## Utility tests
|
|
1169
|
-
|
|
1163
|
+
|
|
1170
1164
|
# TODO: move postprocessing tests up to this point, using pre-generated .json results files
|
|
1171
|
-
|
|
1165
|
+
|
|
1172
1166
|
|
|
1173
1167
|
## Return early if we're not running torch-related tests
|
|
1174
|
-
|
|
1168
|
+
|
|
1175
1169
|
if options.test_mode == 'utils-only':
|
|
1176
1170
|
print('utils-only tests finished, returning')
|
|
1177
1171
|
return
|
|
1178
1172
|
|
|
1179
|
-
|
|
1173
|
+
|
|
1180
1174
|
## Run inference on an image
|
|
1181
|
-
|
|
1175
|
+
|
|
1182
1176
|
print('\n** Running MD on a single image (CLI) **\n')
|
|
1183
|
-
|
|
1177
|
+
|
|
1184
1178
|
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
1185
1179
|
output_dir = os.path.join(options.scratch_dir,'single_image_test')
|
|
1186
1180
|
if options.cli_working_dir is None:
|
|
@@ -1191,7 +1185,7 @@ def run_cli_tests(options):
|
|
|
1191
1185
|
options.default_model,image_fn,output_dir)
|
|
1192
1186
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1193
1187
|
cmd_results = execute_and_print(cmd)
|
|
1194
|
-
|
|
1188
|
+
|
|
1195
1189
|
if options.cpu_execution_is_error:
|
|
1196
1190
|
gpu_available_via_cli = False
|
|
1197
1191
|
for s in cmd_results['output']:
|
|
@@ -1202,17 +1196,17 @@ def run_cli_tests(options):
|
|
|
1202
1196
|
raise Exception('GPU execution is required, but not available')
|
|
1203
1197
|
|
|
1204
1198
|
# Make sure we can also pass an absolute path to a model file, instead of, e.g. "MDV5A"
|
|
1205
|
-
|
|
1199
|
+
|
|
1206
1200
|
from megadetector.detection.run_detector import try_download_known_detector
|
|
1207
1201
|
model_file = try_download_known_detector(options.default_model,force_download=False,verbose=False)
|
|
1208
1202
|
cmd = cmd.replace(options.default_model,model_file)
|
|
1209
1203
|
cmd_results = execute_and_print(cmd)
|
|
1210
|
-
|
|
1211
|
-
|
|
1204
|
+
|
|
1205
|
+
|
|
1212
1206
|
## Run inference on a folder
|
|
1213
|
-
|
|
1207
|
+
|
|
1214
1208
|
print('\n** Running MD on a folder (CLI) **\n')
|
|
1215
|
-
|
|
1209
|
+
|
|
1216
1210
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1217
1211
|
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
1218
1212
|
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
@@ -1226,109 +1220,109 @@ def run_cli_tests(options):
|
|
|
1226
1220
|
cmd += ' --include_image_timestamp --include_exif_data'
|
|
1227
1221
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1228
1222
|
cmd_results = execute_and_print(cmd)
|
|
1229
|
-
|
|
1223
|
+
|
|
1230
1224
|
base_cmd = cmd
|
|
1231
|
-
|
|
1232
|
-
|
|
1225
|
+
|
|
1226
|
+
|
|
1233
1227
|
## Run again with checkpointing enabled, make sure the results are the same
|
|
1234
|
-
|
|
1228
|
+
|
|
1235
1229
|
print('\n** Running MD on a folder (with checkpoints) (CLI) **\n')
|
|
1236
|
-
|
|
1230
|
+
|
|
1237
1231
|
checkpoint_string = ' --checkpoint_frequency 5'
|
|
1238
1232
|
cmd = base_cmd + checkpoint_string
|
|
1239
1233
|
inference_output_file_checkpoint = insert_before_extension(inference_output_file,'_checkpoint')
|
|
1240
1234
|
cmd = cmd.replace(inference_output_file,inference_output_file_checkpoint)
|
|
1241
1235
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1242
1236
|
cmd_results = execute_and_print(cmd)
|
|
1243
|
-
|
|
1244
|
-
assert output_files_are_identical(fn1=inference_output_file,
|
|
1237
|
+
|
|
1238
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1245
1239
|
fn2=inference_output_file_checkpoint,
|
|
1246
1240
|
verbose=True)
|
|
1247
|
-
|
|
1248
|
-
|
|
1241
|
+
|
|
1242
|
+
|
|
1249
1243
|
## Run again with the image queue enabled, make sure the results are the same
|
|
1250
|
-
|
|
1244
|
+
|
|
1251
1245
|
print('\n** Running MD on a folder (with image queue but no preprocessing) (CLI) **\n')
|
|
1252
|
-
|
|
1246
|
+
|
|
1253
1247
|
cmd = base_cmd + ' --use_image_queue'
|
|
1254
1248
|
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
1255
1249
|
cmd = cmd.replace(inference_output_file,inference_output_file_queue)
|
|
1256
1250
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1257
1251
|
cmd_results = execute_and_print(cmd)
|
|
1258
|
-
|
|
1259
|
-
assert output_files_are_identical(fn1=inference_output_file,
|
|
1252
|
+
|
|
1253
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1260
1254
|
fn2=inference_output_file_queue,
|
|
1261
1255
|
verbose=True)
|
|
1262
|
-
|
|
1263
|
-
|
|
1256
|
+
|
|
1257
|
+
|
|
1264
1258
|
print('\n** Running MD on a folder (with image queue and preprocessing) (CLI) **\n')
|
|
1265
|
-
|
|
1259
|
+
|
|
1266
1260
|
cmd = base_cmd + ' --use_image_queue --preprocess_on_image_queue'
|
|
1267
1261
|
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
1268
1262
|
cmd = cmd.replace(inference_output_file,inference_output_file_queue)
|
|
1269
1263
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1270
1264
|
cmd_results = execute_and_print(cmd)
|
|
1271
|
-
|
|
1272
|
-
assert output_files_are_identical(fn1=inference_output_file,
|
|
1265
|
+
|
|
1266
|
+
assert output_files_are_identical(fn1=inference_output_file,
|
|
1273
1267
|
fn2=inference_output_file_queue,
|
|
1274
1268
|
verbose=True)
|
|
1275
|
-
|
|
1269
|
+
|
|
1276
1270
|
## Run again on multiple cores, make sure the results are the same
|
|
1277
|
-
|
|
1271
|
+
|
|
1278
1272
|
if not options.skip_cpu_tests:
|
|
1279
|
-
|
|
1280
|
-
# First run again on the CPU on a single thread if necessary, so we get a file that
|
|
1281
|
-
# *should* be identical to the multicore version.
|
|
1273
|
+
|
|
1274
|
+
# First run again on the CPU on a single thread if necessary, so we get a file that
|
|
1275
|
+
# *should* be identical to the multicore version.
|
|
1282
1276
|
gpu_available = is_gpu_available(verbose=False)
|
|
1283
|
-
|
|
1277
|
+
|
|
1284
1278
|
cuda_visible_devices = None
|
|
1285
1279
|
if 'CUDA_VISIBLE_DEVICES' in os.environ:
|
|
1286
1280
|
cuda_visible_devices = os.environ['CUDA_VISIBLE_DEVICES']
|
|
1287
|
-
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1288
|
-
|
|
1281
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1282
|
+
|
|
1289
1283
|
# If we already ran on the CPU, no need to run again
|
|
1290
1284
|
if not gpu_available:
|
|
1291
|
-
|
|
1285
|
+
|
|
1292
1286
|
inference_output_file_cpu = inference_output_file
|
|
1293
|
-
|
|
1287
|
+
|
|
1294
1288
|
else:
|
|
1295
|
-
|
|
1289
|
+
|
|
1296
1290
|
print('\n** Running MD on a folder (single CPU) (CLI) **\n')
|
|
1297
|
-
|
|
1298
|
-
inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
|
|
1291
|
+
|
|
1292
|
+
inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
|
|
1299
1293
|
cmd = base_cmd
|
|
1300
1294
|
cmd = cmd.replace(inference_output_file,inference_output_file_cpu)
|
|
1301
1295
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1302
1296
|
cmd_results = execute_and_print(cmd)
|
|
1303
|
-
|
|
1297
|
+
|
|
1304
1298
|
print('\n** Running MD on a folder (multiple CPUs) (CLI) **\n')
|
|
1305
|
-
|
|
1299
|
+
|
|
1306
1300
|
cpu_string = ' --ncores {}'.format(options.n_cores_for_multiprocessing_tests)
|
|
1307
1301
|
cmd = base_cmd + cpu_string
|
|
1308
1302
|
inference_output_file_cpu_multicore = insert_before_extension(inference_output_file,'multicore')
|
|
1309
1303
|
cmd = cmd.replace(inference_output_file,inference_output_file_cpu_multicore)
|
|
1310
1304
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1311
1305
|
cmd_results = execute_and_print(cmd)
|
|
1312
|
-
|
|
1306
|
+
|
|
1313
1307
|
if cuda_visible_devices is not None:
|
|
1314
1308
|
print('Restoring CUDA_VISIBLE_DEVICES')
|
|
1315
1309
|
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
|
|
1316
1310
|
else:
|
|
1317
1311
|
del os.environ['CUDA_VISIBLE_DEVICES']
|
|
1318
|
-
|
|
1319
|
-
assert output_files_are_identical(fn1=inference_output_file_cpu,
|
|
1312
|
+
|
|
1313
|
+
assert output_files_are_identical(fn1=inference_output_file_cpu,
|
|
1320
1314
|
fn2=inference_output_file_cpu_multicore,
|
|
1321
1315
|
verbose=True)
|
|
1322
|
-
|
|
1316
|
+
|
|
1323
1317
|
# ...if we're not skipping the force-cpu tests
|
|
1324
|
-
|
|
1325
|
-
|
|
1318
|
+
|
|
1319
|
+
|
|
1326
1320
|
## Postprocessing
|
|
1327
|
-
|
|
1321
|
+
|
|
1328
1322
|
print('\n** Testing post-processing (CLI) **\n')
|
|
1329
|
-
|
|
1323
|
+
|
|
1330
1324
|
postprocessing_output_dir = os.path.join(options.scratch_dir,'postprocessing_output_cli')
|
|
1331
|
-
|
|
1325
|
+
|
|
1332
1326
|
if options.cli_working_dir is None:
|
|
1333
1327
|
cmd = 'python -m megadetector.postprocessing.postprocess_batch_results'
|
|
1334
1328
|
else:
|
|
@@ -1337,14 +1331,14 @@ def run_cli_tests(options):
|
|
|
1337
1331
|
inference_output_file,postprocessing_output_dir)
|
|
1338
1332
|
cmd += ' --image_base_dir "{}"'.format(image_folder)
|
|
1339
1333
|
cmd_results = execute_and_print(cmd)
|
|
1340
|
-
|
|
1341
|
-
|
|
1334
|
+
|
|
1335
|
+
|
|
1342
1336
|
## RDE
|
|
1343
|
-
|
|
1337
|
+
|
|
1344
1338
|
print('\n** Running RDE (CLI) **\n')
|
|
1345
|
-
|
|
1339
|
+
|
|
1346
1340
|
rde_output_dir = os.path.join(options.scratch_dir,'rde_output_cli')
|
|
1347
|
-
|
|
1341
|
+
|
|
1348
1342
|
if options.cli_working_dir is None:
|
|
1349
1343
|
cmd = 'python -m megadetector.postprocessing.repeat_detection_elimination.find_repeat_detections'
|
|
1350
1344
|
else:
|
|
@@ -1353,44 +1347,44 @@ def run_cli_tests(options):
|
|
|
1353
1347
|
cmd += ' --imageBase "{}"'.format(image_folder)
|
|
1354
1348
|
cmd += ' --outputBase "{}"'.format(rde_output_dir)
|
|
1355
1349
|
cmd += ' --occurrenceThreshold 1' # Use an absurd number here to make sure we get some suspicious detections
|
|
1356
|
-
cmd_results = execute_and_print(cmd)
|
|
1357
|
-
|
|
1350
|
+
cmd_results = execute_and_print(cmd)
|
|
1351
|
+
|
|
1358
1352
|
# Find the latest filtering folder
|
|
1359
1353
|
filtering_output_dir = os.listdir(rde_output_dir)
|
|
1360
1354
|
filtering_output_dir = [fn for fn in filtering_output_dir if fn.startswith('filtering_')]
|
|
1361
1355
|
filtering_output_dir = [os.path.join(rde_output_dir,fn) for fn in filtering_output_dir]
|
|
1362
1356
|
filtering_output_dir = [fn for fn in filtering_output_dir if os.path.isdir(fn)]
|
|
1363
1357
|
filtering_output_dir = sorted(filtering_output_dir)[-1]
|
|
1364
|
-
|
|
1358
|
+
|
|
1365
1359
|
print('Using RDE filtering folder {}'.format(filtering_output_dir))
|
|
1366
|
-
|
|
1360
|
+
|
|
1367
1361
|
filtered_output_file = inference_output_file.replace('.json','_filtered.json')
|
|
1368
|
-
|
|
1362
|
+
|
|
1369
1363
|
if options.cli_working_dir is None:
|
|
1370
1364
|
cmd = 'python -m megadetector.postprocessing.repeat_detection_elimination.remove_repeat_detections'
|
|
1371
1365
|
else:
|
|
1372
1366
|
cmd = 'python megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py'
|
|
1373
1367
|
cmd += ' "{}" "{}" "{}"'.format(inference_output_file,filtered_output_file,filtering_output_dir)
|
|
1374
1368
|
cmd_results = execute_and_print(cmd)
|
|
1375
|
-
|
|
1369
|
+
|
|
1376
1370
|
assert os.path.isfile(filtered_output_file), \
|
|
1377
1371
|
'Could not find RDE output file {}'.format(filtered_output_file)
|
|
1378
|
-
|
|
1379
|
-
|
|
1372
|
+
|
|
1373
|
+
|
|
1380
1374
|
## Run inference on a folder (tiled)
|
|
1381
|
-
|
|
1375
|
+
|
|
1382
1376
|
# This is a rather esoteric code path that I turn off when I'm testing some
|
|
1383
1377
|
# features that it doesn't include yet, particularly compatibility mode
|
|
1384
1378
|
# control.
|
|
1385
1379
|
skip_tiling_tests = True
|
|
1386
|
-
|
|
1380
|
+
|
|
1387
1381
|
if skip_tiling_tests:
|
|
1388
|
-
|
|
1382
|
+
|
|
1389
1383
|
print('### DEBUG: skipping tiling tests ###')
|
|
1390
|
-
|
|
1384
|
+
|
|
1391
1385
|
else:
|
|
1392
1386
|
print('\n** Running tiled inference (CLI) **\n')
|
|
1393
|
-
|
|
1387
|
+
|
|
1394
1388
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1395
1389
|
tiling_folder = os.path.join(options.scratch_dir,'tiling-folder')
|
|
1396
1390
|
inference_output_file_tiled = os.path.join(options.scratch_dir,'folder_inference_output_tiled.json')
|
|
@@ -1402,21 +1396,21 @@ def run_cli_tests(options):
|
|
|
1402
1396
|
options.default_model,image_folder,tiling_folder,inference_output_file_tiled)
|
|
1403
1397
|
cmd += ' --overwrite_handling overwrite'
|
|
1404
1398
|
cmd_results = execute_and_print(cmd)
|
|
1405
|
-
|
|
1399
|
+
|
|
1406
1400
|
with open(inference_output_file_tiled,'r') as f:
|
|
1407
1401
|
results_from_file = json.load(f) # noqa
|
|
1408
|
-
|
|
1409
|
-
|
|
1402
|
+
|
|
1403
|
+
|
|
1410
1404
|
## Run inference on a folder (augmented, w/YOLOv5 val script)
|
|
1411
|
-
|
|
1405
|
+
|
|
1412
1406
|
if options.yolo_working_dir is None:
|
|
1413
|
-
|
|
1407
|
+
|
|
1414
1408
|
print('Bypassing YOLOv5 val tests, no yolo folder supplied')
|
|
1415
|
-
|
|
1409
|
+
|
|
1416
1410
|
else:
|
|
1417
|
-
|
|
1411
|
+
|
|
1418
1412
|
print('\n** Running YOLOv5 val tests (CLI) **\n')
|
|
1419
|
-
|
|
1413
|
+
|
|
1420
1414
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1421
1415
|
yolo_results_folder = os.path.join(options.scratch_dir,'yolo-output-folder')
|
|
1422
1416
|
yolo_symlink_folder = os.path.join(options.scratch_dir,'yolo-symlink_folder')
|
|
@@ -1434,7 +1428,7 @@ def run_cli_tests(options):
|
|
|
1434
1428
|
# cmd += ' --no_use_symlinks'
|
|
1435
1429
|
cmd += ' --overwrite_handling overwrite'
|
|
1436
1430
|
cmd_results = execute_and_print(cmd)
|
|
1437
|
-
|
|
1431
|
+
|
|
1438
1432
|
# Run again with checkpointing, make sure the outputs are identical
|
|
1439
1433
|
cmd += ' --checkpoint_frequency 5'
|
|
1440
1434
|
inference_output_file_yolo_val_checkpoint = \
|
|
@@ -1442,32 +1436,32 @@ def run_cli_tests(options):
|
|
|
1442
1436
|
assert inference_output_file_yolo_val_checkpoint != inference_output_file_yolo_val
|
|
1443
1437
|
cmd = cmd.replace(inference_output_file_yolo_val,inference_output_file_yolo_val_checkpoint)
|
|
1444
1438
|
cmd_results = execute_and_print(cmd)
|
|
1445
|
-
|
|
1439
|
+
|
|
1446
1440
|
assert output_files_are_identical(fn1=inference_output_file_yolo_val,
|
|
1447
1441
|
fn2=inference_output_file_yolo_val_checkpoint,
|
|
1448
1442
|
verbose=True)
|
|
1449
|
-
|
|
1443
|
+
|
|
1450
1444
|
if not options.skip_video_tests:
|
|
1451
|
-
|
|
1445
|
+
|
|
1452
1446
|
## Video test
|
|
1453
|
-
|
|
1447
|
+
|
|
1454
1448
|
print('\n** Testing video rendering (CLI) **\n')
|
|
1455
|
-
|
|
1449
|
+
|
|
1456
1450
|
video_inference_output_file = os.path.join(options.scratch_dir,'video_inference_output.json')
|
|
1457
1451
|
output_video_file = os.path.join(options.scratch_dir,'video_scratch/cli_rendered_video.mp4')
|
|
1458
1452
|
frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder_cli')
|
|
1459
|
-
frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder_cli')
|
|
1460
|
-
|
|
1461
|
-
video_fn = os.path.join(options.scratch_dir,options.test_videos[-1])
|
|
1453
|
+
frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder_cli')
|
|
1454
|
+
|
|
1455
|
+
video_fn = os.path.join(options.scratch_dir,options.test_videos[-1])
|
|
1462
1456
|
assert os.path.isfile(video_fn), 'Could not find video file {}'.format(video_fn)
|
|
1463
|
-
|
|
1457
|
+
|
|
1464
1458
|
output_dir = os.path.join(options.scratch_dir,'single_video_test_cli')
|
|
1465
1459
|
if options.cli_working_dir is None:
|
|
1466
1460
|
cmd = 'python -m megadetector.detection.process_video'
|
|
1467
1461
|
else:
|
|
1468
1462
|
cmd = 'python megadetector/detection/process_video.py'
|
|
1469
1463
|
cmd += ' "{}" "{}"'.format(options.default_model,video_fn)
|
|
1470
|
-
cmd += ' --frame_folder "{}" --frame_rendering_folder "{}" --output_json_file "{}" --output_video_file "{}"'.format(
|
|
1464
|
+
cmd += ' --frame_folder "{}" --frame_rendering_folder "{}" --output_json_file "{}" --output_video_file "{}"'.format( #noqa
|
|
1471
1465
|
frame_folder,frame_rendering_folder,video_inference_output_file,output_video_file)
|
|
1472
1466
|
cmd += ' --fourcc {}'.format(options.video_fourcc)
|
|
1473
1467
|
cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion'
|
|
@@ -1475,19 +1469,19 @@ def run_cli_tests(options):
|
|
|
1475
1469
|
cmd += ' --frame_sample 4'
|
|
1476
1470
|
cmd += ' --verbose'
|
|
1477
1471
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1478
|
-
|
|
1472
|
+
|
|
1479
1473
|
if not options.skip_video_rendering_tests:
|
|
1480
1474
|
cmd += ' --render_output_video'
|
|
1481
|
-
|
|
1475
|
+
|
|
1482
1476
|
cmd_results = execute_and_print(cmd)
|
|
1483
1477
|
|
|
1484
1478
|
# ...if we're not skipping video tests
|
|
1485
|
-
|
|
1486
|
-
|
|
1479
|
+
|
|
1480
|
+
|
|
1487
1481
|
## Run inference on a folder (with MDV5B, so we can do a comparison)
|
|
1488
|
-
|
|
1482
|
+
|
|
1489
1483
|
print('\n** Running MDv5b (CLI) **\n')
|
|
1490
|
-
|
|
1484
|
+
|
|
1491
1485
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1492
1486
|
inference_output_file_alt = os.path.join(options.scratch_dir,'folder_inference_output_alt.json')
|
|
1493
1487
|
if options.cli_working_dir is None:
|
|
@@ -1500,13 +1494,13 @@ def run_cli_tests(options):
|
|
|
1500
1494
|
cmd += ' --include_image_timestamp --include_exif_data'
|
|
1501
1495
|
cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
|
|
1502
1496
|
cmd_results = execute_and_print(cmd)
|
|
1503
|
-
|
|
1497
|
+
|
|
1504
1498
|
with open(inference_output_file_alt,'r') as f:
|
|
1505
1499
|
results_from_file = json.load(f) # noqa
|
|
1506
|
-
|
|
1507
|
-
|
|
1500
|
+
|
|
1501
|
+
|
|
1508
1502
|
## Compare the two files
|
|
1509
|
-
|
|
1503
|
+
|
|
1510
1504
|
comparison_output_folder = os.path.join(options.scratch_dir,'results_comparison')
|
|
1511
1505
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1512
1506
|
results_files_string = '"{}" "{}"'.format(
|
|
@@ -1517,22 +1511,22 @@ def run_cli_tests(options):
|
|
|
1517
1511
|
cmd = 'python megadetector/postprocessing/compare_batch_results.py'
|
|
1518
1512
|
cmd += ' "{}" "{}" {}'.format(comparison_output_folder,image_folder,results_files_string)
|
|
1519
1513
|
cmd_results = execute_and_print(cmd)
|
|
1520
|
-
|
|
1514
|
+
|
|
1521
1515
|
assert cmd_results['status'] == 0, 'Error generating comparison HTML'
|
|
1522
1516
|
assert os.path.isfile(os.path.join(comparison_output_folder,'index.html')), \
|
|
1523
1517
|
'Failed to generate comparison HTML'
|
|
1524
|
-
|
|
1518
|
+
|
|
1525
1519
|
print('\n*** Finished CLI tests ***\n')
|
|
1526
|
-
|
|
1520
|
+
|
|
1527
1521
|
# ...def run_cli_tests(...)
|
|
1528
1522
|
|
|
1529
1523
|
|
|
1530
1524
|
def run_download_tests(options):
|
|
1531
1525
|
"""
|
|
1532
1526
|
Args:
|
|
1533
|
-
options (MDTestOptions): see MDTestOptions for details
|
|
1527
|
+
options (MDTestOptions): see MDTestOptions for details
|
|
1534
1528
|
"""
|
|
1535
|
-
|
|
1529
|
+
|
|
1536
1530
|
if options.skip_download_tests or options.test_mode == 'utils-only':
|
|
1537
1531
|
return
|
|
1538
1532
|
|
|
@@ -1541,19 +1535,19 @@ def run_download_tests(options):
|
|
|
1541
1535
|
get_detector_version_from_model_file, \
|
|
1542
1536
|
model_string_to_model_version
|
|
1543
1537
|
|
|
1544
|
-
# Make sure we can download models based on canonical version numbers,
|
|
1538
|
+
# Make sure we can download models based on canonical version numbers,
|
|
1545
1539
|
# e.g. "v5a.0.0"
|
|
1546
1540
|
for model_name in known_models:
|
|
1547
1541
|
url = known_models[model_name]['url']
|
|
1548
1542
|
if 'localhost' in url:
|
|
1549
1543
|
continue
|
|
1550
1544
|
print('Testing download for known model {}'.format(model_name))
|
|
1551
|
-
fn = try_download_known_detector(model_name,
|
|
1545
|
+
fn = try_download_known_detector(model_name,
|
|
1552
1546
|
force_download=False,
|
|
1553
1547
|
verbose=False)
|
|
1554
1548
|
version_string = get_detector_version_from_model_file(fn, verbose=False)
|
|
1555
1549
|
assert version_string == model_name
|
|
1556
|
-
|
|
1550
|
+
|
|
1557
1551
|
# Make sure we can download models based on short names, e.g. "MDV5A"
|
|
1558
1552
|
for model_name in model_string_to_model_version:
|
|
1559
1553
|
model_version = model_string_to_model_version[model_name]
|
|
@@ -1562,11 +1556,11 @@ def run_download_tests(options):
|
|
|
1562
1556
|
if 'localhost' in url:
|
|
1563
1557
|
continue
|
|
1564
1558
|
print('Testing download for model short name {}'.format(model_name))
|
|
1565
|
-
fn = try_download_known_detector(model_name,
|
|
1559
|
+
fn = try_download_known_detector(model_name,
|
|
1566
1560
|
force_download=False,
|
|
1567
|
-
verbose=False)
|
|
1568
|
-
assert fn != model_name
|
|
1569
|
-
|
|
1561
|
+
verbose=False)
|
|
1562
|
+
assert fn != model_name
|
|
1563
|
+
|
|
1570
1564
|
# ...def run_download_tests()
|
|
1571
1565
|
|
|
1572
1566
|
|
|
@@ -1575,74 +1569,101 @@ def run_download_tests(options):
|
|
|
1575
1569
|
def run_tests(options):
|
|
1576
1570
|
"""
|
|
1577
1571
|
Runs Python-based and/or CLI-based package tests.
|
|
1578
|
-
|
|
1572
|
+
|
|
1579
1573
|
Args:
|
|
1580
1574
|
options (MDTestOptions): see MDTestOptions for details
|
|
1581
1575
|
"""
|
|
1582
|
-
|
|
1576
|
+
|
|
1583
1577
|
# Prepare data folder
|
|
1584
1578
|
download_test_data(options)
|
|
1585
|
-
|
|
1579
|
+
|
|
1586
1580
|
# Run model download tests if necessary
|
|
1587
1581
|
run_download_tests(options)
|
|
1588
|
-
|
|
1582
|
+
|
|
1589
1583
|
if options.disable_gpu:
|
|
1590
1584
|
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
1591
|
-
|
|
1585
|
+
|
|
1592
1586
|
# Verify GPU
|
|
1593
1587
|
gpu_available = is_gpu_available()
|
|
1594
|
-
|
|
1588
|
+
|
|
1595
1589
|
# If the GPU is required and isn't available, error
|
|
1596
1590
|
if options.cpu_execution_is_error and (not gpu_available):
|
|
1597
1591
|
raise ValueError('GPU not available, and cpu_execution_is_error is set')
|
|
1598
|
-
|
|
1592
|
+
|
|
1599
1593
|
# If the GPU should be disabled, verify that it is
|
|
1600
1594
|
if options.disable_gpu:
|
|
1601
1595
|
assert (not gpu_available), 'CPU execution specified, but the GPU appears to be available'
|
|
1602
|
-
|
|
1596
|
+
|
|
1603
1597
|
# Run python tests
|
|
1604
1598
|
if not options.skip_python_tests:
|
|
1605
|
-
|
|
1599
|
+
|
|
1606
1600
|
if options.model_folder is not None:
|
|
1607
|
-
|
|
1601
|
+
|
|
1608
1602
|
assert os.path.isdir(options.model_folder), \
|
|
1609
1603
|
'Could not find model folder {}'.format(options.model_folder)
|
|
1610
|
-
|
|
1604
|
+
|
|
1611
1605
|
model_files = os.listdir(options.model_folder)
|
|
1612
1606
|
model_files = [fn for fn in model_files if fn.endswith('.pt')]
|
|
1613
1607
|
model_files = [os.path.join(options.model_folder,fn) for fn in model_files]
|
|
1614
|
-
|
|
1608
|
+
|
|
1615
1609
|
assert len(model_files) > 0, \
|
|
1616
1610
|
'Could not find any models in folder {}'.format(options.model_folder)
|
|
1617
|
-
|
|
1611
|
+
|
|
1618
1612
|
original_default_model = options.default_model
|
|
1619
|
-
|
|
1613
|
+
|
|
1620
1614
|
for model_file in model_files:
|
|
1621
1615
|
print('Running Python tests for model {}'.format(model_file))
|
|
1622
|
-
options.default_model = model_file
|
|
1616
|
+
options.default_model = model_file
|
|
1623
1617
|
run_python_tests(options)
|
|
1624
|
-
|
|
1618
|
+
|
|
1625
1619
|
options.default_model = original_default_model
|
|
1626
|
-
|
|
1620
|
+
|
|
1627
1621
|
else:
|
|
1628
|
-
|
|
1622
|
+
|
|
1629
1623
|
run_python_tests(options)
|
|
1630
|
-
|
|
1624
|
+
|
|
1631
1625
|
# Run CLI tests
|
|
1632
1626
|
if not options.skip_cli_tests:
|
|
1633
1627
|
run_cli_tests(options)
|
|
1634
1628
|
|
|
1635
1629
|
|
|
1630
|
+
#%% Automated test entry point
|
|
1631
|
+
|
|
1632
|
+
def test_suite_entry_point():
|
|
1633
|
+
|
|
1634
|
+
options = MDTestOptions()
|
|
1635
|
+
options.disable_gpu = False
|
|
1636
|
+
options.cpu_execution_is_error = False
|
|
1637
|
+
options.skip_video_tests = True
|
|
1638
|
+
options.skip_python_tests = False
|
|
1639
|
+
options.skip_cli_tests = True
|
|
1640
|
+
options.scratch_dir = None
|
|
1641
|
+
options.test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
1642
|
+
options.force_data_download = False
|
|
1643
|
+
options.force_data_unzip = False
|
|
1644
|
+
options.warning_mode = False
|
|
1645
|
+
options.max_coord_error = 0.01 # 0.001
|
|
1646
|
+
options.max_conf_error = 0.01 # 0.005
|
|
1647
|
+
options.skip_video_rendering_tests = True
|
|
1648
|
+
options.cli_working_dir = None
|
|
1649
|
+
options.cli_test_pythonpath = None
|
|
1650
|
+
|
|
1651
|
+
options.skip_download_tests = True
|
|
1652
|
+
|
|
1653
|
+
options = download_test_data(options)
|
|
1654
|
+
run_tests(options)
|
|
1655
|
+
|
|
1656
|
+
|
|
1636
1657
|
#%% Interactive driver
|
|
1637
1658
|
|
|
1638
1659
|
if False:
|
|
1639
|
-
|
|
1660
|
+
|
|
1640
1661
|
pass
|
|
1641
1662
|
|
|
1642
1663
|
#%%
|
|
1643
|
-
|
|
1664
|
+
|
|
1644
1665
|
options = MDTestOptions()
|
|
1645
|
-
|
|
1666
|
+
|
|
1646
1667
|
options.disable_gpu = False
|
|
1647
1668
|
options.cpu_execution_is_error = False
|
|
1648
1669
|
options.skip_video_tests = False
|
|
@@ -1657,149 +1678,149 @@ if False:
|
|
|
1657
1678
|
options.max_conf_error = 0.01 # 0.005
|
|
1658
1679
|
options.skip_video_rendering_tests = True
|
|
1659
1680
|
# options.iou_threshold_for_file_comparison = 0.7
|
|
1660
|
-
|
|
1681
|
+
|
|
1661
1682
|
options.cli_working_dir = r'c:\git\MegaDetector'
|
|
1662
|
-
# When running in the cameratraps-detector environment
|
|
1683
|
+
# When running in the cameratraps-detector environment
|
|
1663
1684
|
# options.cli_test_pythonpath = r'c:\git\MegaDetector;c:\git\yolov5-md'
|
|
1664
|
-
|
|
1685
|
+
|
|
1665
1686
|
# When running in the MegaDetector environment
|
|
1666
1687
|
options.cli_test_pythonpath = r'c:\git\MegaDetector'
|
|
1667
|
-
|
|
1688
|
+
|
|
1668
1689
|
# options.cli_working_dir = os.path.expanduser('~')
|
|
1669
1690
|
# options.yolo_working_dir = r'c:\git\yolov5-md'
|
|
1670
1691
|
# options.yolo_working_dir = '/mnt/c/git/yolov5-md'
|
|
1671
1692
|
options = download_test_data(options)
|
|
1672
|
-
|
|
1693
|
+
|
|
1673
1694
|
#%%
|
|
1674
|
-
|
|
1695
|
+
|
|
1675
1696
|
import os
|
|
1676
1697
|
if ('PYTHONPATH' not in os.environ) or \
|
|
1677
1698
|
(options.yolo_working_dir is not None and options.yolo_working_dir not in os.environ['PYTHONPATH']):
|
|
1678
1699
|
os.environ['PYTHONPATH'] += ';' + options.yolo_working_dir
|
|
1679
1700
|
|
|
1680
1701
|
#%%
|
|
1681
|
-
|
|
1702
|
+
|
|
1682
1703
|
run_tests(options)
|
|
1683
|
-
|
|
1704
|
+
|
|
1684
1705
|
#%%
|
|
1685
|
-
|
|
1686
|
-
yolo_inference_options_dict = {'input_folder': '/tmp/md-tests/md-test-images',
|
|
1687
|
-
'image_filename_list': None,
|
|
1688
|
-
'model_filename': 'MDV5A',
|
|
1689
|
-
'output_file': '/tmp/md-tests/folder_inference_output_yolo_val.json',
|
|
1690
|
-
'yolo_working_folder': '/mnt/c/git/yolov5-md',
|
|
1691
|
-
'model_type': 'yolov5',
|
|
1692
|
-
'image_size': None,
|
|
1693
|
-
'conf_thres': 0.005,
|
|
1694
|
-
'batch_size': 1,
|
|
1695
|
-
'device_string': '0',
|
|
1696
|
-
'augment': False,
|
|
1697
|
-
'half_precision_enabled': None,
|
|
1698
|
-
'symlink_folder': None,
|
|
1699
|
-
'use_symlinks': True,
|
|
1700
|
-
'unique_id_strategy': 'links',
|
|
1701
|
-
'yolo_results_folder': None,
|
|
1702
|
-
'remove_symlink_folder': True,
|
|
1703
|
-
'remove_yolo_results_folder': True,
|
|
1704
|
-
'yolo_category_id_to_name': {0: 'animal', 1: 'person', 2: 'vehicle'},
|
|
1705
|
-
'overwrite_handling': 'overwrite',
|
|
1706
|
-
'preview_yolo_command_only': False,
|
|
1707
|
-
'treat_copy_failures_as_warnings': False,
|
|
1708
|
-
'save_yolo_debug_output': False,
|
|
1709
|
-
'recursive': True,
|
|
1706
|
+
|
|
1707
|
+
yolo_inference_options_dict = {'input_folder': '/tmp/md-tests/md-test-images',
|
|
1708
|
+
'image_filename_list': None,
|
|
1709
|
+
'model_filename': 'MDV5A',
|
|
1710
|
+
'output_file': '/tmp/md-tests/folder_inference_output_yolo_val.json',
|
|
1711
|
+
'yolo_working_folder': '/mnt/c/git/yolov5-md',
|
|
1712
|
+
'model_type': 'yolov5',
|
|
1713
|
+
'image_size': None,
|
|
1714
|
+
'conf_thres': 0.005,
|
|
1715
|
+
'batch_size': 1,
|
|
1716
|
+
'device_string': '0',
|
|
1717
|
+
'augment': False,
|
|
1718
|
+
'half_precision_enabled': None,
|
|
1719
|
+
'symlink_folder': None,
|
|
1720
|
+
'use_symlinks': True,
|
|
1721
|
+
'unique_id_strategy': 'links',
|
|
1722
|
+
'yolo_results_folder': None,
|
|
1723
|
+
'remove_symlink_folder': True,
|
|
1724
|
+
'remove_yolo_results_folder': True,
|
|
1725
|
+
'yolo_category_id_to_name': {0: 'animal', 1: 'person', 2: 'vehicle'},
|
|
1726
|
+
'overwrite_handling': 'overwrite',
|
|
1727
|
+
'preview_yolo_command_only': False,
|
|
1728
|
+
'treat_copy_failures_as_warnings': False,
|
|
1729
|
+
'save_yolo_debug_output': False,
|
|
1730
|
+
'recursive': True,
|
|
1710
1731
|
'checkpoint_frequency': None}
|
|
1711
|
-
|
|
1712
|
-
from megadetector.utils.ct_utils import dict_to_object
|
|
1732
|
+
|
|
1733
|
+
from megadetector.utils.ct_utils import dict_to_object
|
|
1713
1734
|
from megadetector.detection.run_inference_with_yolov5_val import \
|
|
1714
1735
|
YoloInferenceOptions, run_inference_with_yolo_val
|
|
1715
|
-
|
|
1736
|
+
|
|
1716
1737
|
yolo_inference_options = YoloInferenceOptions()
|
|
1717
1738
|
yolo_inference_options = dict_to_object(yolo_inference_options_dict, yolo_inference_options)
|
|
1718
|
-
|
|
1739
|
+
|
|
1719
1740
|
os.makedirs(options.scratch_dir,exist_ok=True)
|
|
1720
|
-
|
|
1741
|
+
|
|
1721
1742
|
inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
|
|
1722
|
-
|
|
1743
|
+
|
|
1723
1744
|
run_inference_with_yolo_val(yolo_inference_options)
|
|
1724
|
-
|
|
1725
|
-
|
|
1745
|
+
|
|
1746
|
+
|
|
1726
1747
|
#%% Command-line driver
|
|
1727
1748
|
|
|
1728
|
-
def main():
|
|
1749
|
+
def main(): # noqa
|
|
1729
1750
|
|
|
1730
1751
|
options = MDTestOptions()
|
|
1731
|
-
|
|
1752
|
+
|
|
1732
1753
|
parser = argparse.ArgumentParser(
|
|
1733
1754
|
description='MegaDetector test suite')
|
|
1734
|
-
|
|
1755
|
+
|
|
1735
1756
|
parser.add_argument(
|
|
1736
1757
|
'--disable_gpu',
|
|
1737
1758
|
action='store_true',
|
|
1738
1759
|
help='Disable GPU operation')
|
|
1739
|
-
|
|
1760
|
+
|
|
1740
1761
|
parser.add_argument(
|
|
1741
1762
|
'--cpu_execution_is_error',
|
|
1742
1763
|
action='store_true',
|
|
1743
1764
|
help='Fail if the GPU appears not to be available')
|
|
1744
|
-
|
|
1765
|
+
|
|
1745
1766
|
parser.add_argument(
|
|
1746
1767
|
'--scratch_dir',
|
|
1747
1768
|
default=None,
|
|
1748
1769
|
type=str,
|
|
1749
1770
|
help='Directory for temporary storage (defaults to system temp dir)')
|
|
1750
|
-
|
|
1771
|
+
|
|
1751
1772
|
parser.add_argument(
|
|
1752
1773
|
'--skip_video_tests',
|
|
1753
1774
|
action='store_true',
|
|
1754
1775
|
help='Skip tests related to video (which can be slow)')
|
|
1755
|
-
|
|
1776
|
+
|
|
1756
1777
|
parser.add_argument(
|
|
1757
1778
|
'--skip_video_rendering_tests',
|
|
1758
1779
|
action='store_true',
|
|
1759
1780
|
help='Skip tests related to *rendering* video')
|
|
1760
|
-
|
|
1781
|
+
|
|
1761
1782
|
parser.add_argument(
|
|
1762
1783
|
'--skip_python_tests',
|
|
1763
1784
|
action='store_true',
|
|
1764
1785
|
help='Skip python tests')
|
|
1765
|
-
|
|
1786
|
+
|
|
1766
1787
|
parser.add_argument(
|
|
1767
1788
|
'--skip_cli_tests',
|
|
1768
1789
|
action='store_true',
|
|
1769
1790
|
help='Skip CLI tests')
|
|
1770
|
-
|
|
1791
|
+
|
|
1771
1792
|
parser.add_argument(
|
|
1772
1793
|
'--skip_download_tests',
|
|
1773
1794
|
action='store_true',
|
|
1774
1795
|
help='Skip model download tests')
|
|
1775
|
-
|
|
1796
|
+
|
|
1776
1797
|
parser.add_argument(
|
|
1777
1798
|
'--skip_cpu_tests',
|
|
1778
1799
|
action='store_true',
|
|
1779
1800
|
help='Skip force-CPU tests')
|
|
1780
|
-
|
|
1801
|
+
|
|
1781
1802
|
parser.add_argument(
|
|
1782
1803
|
'--force_data_download',
|
|
1783
1804
|
action='store_true',
|
|
1784
1805
|
help='Force download of the test data file, even if it\'s already available')
|
|
1785
|
-
|
|
1806
|
+
|
|
1786
1807
|
parser.add_argument(
|
|
1787
1808
|
'--force_data_unzip',
|
|
1788
1809
|
action='store_true',
|
|
1789
1810
|
help='Force extraction of all files in the test data file, even if they\'re already available')
|
|
1790
|
-
|
|
1811
|
+
|
|
1791
1812
|
parser.add_argument(
|
|
1792
1813
|
'--warning_mode',
|
|
1793
1814
|
action='store_true',
|
|
1794
1815
|
help='Turns numeric/content errors into warnings')
|
|
1795
|
-
|
|
1816
|
+
|
|
1796
1817
|
parser.add_argument(
|
|
1797
1818
|
'--max_conf_error',
|
|
1798
1819
|
type=float,
|
|
1799
1820
|
default=options.max_conf_error,
|
|
1800
1821
|
help='Maximum tolerable confidence value deviation from expected (default {})'.format(
|
|
1801
1822
|
options.max_conf_error))
|
|
1802
|
-
|
|
1823
|
+
|
|
1803
1824
|
parser.add_argument(
|
|
1804
1825
|
'--max_coord_error',
|
|
1805
1826
|
type=float,
|
|
@@ -1812,7 +1833,7 @@ def main():
|
|
|
1812
1833
|
type=str,
|
|
1813
1834
|
default=None,
|
|
1814
1835
|
help='Working directory for CLI tests')
|
|
1815
|
-
|
|
1836
|
+
|
|
1816
1837
|
parser.add_argument(
|
|
1817
1838
|
'--yolo_working_dir',
|
|
1818
1839
|
type=str,
|
|
@@ -1825,55 +1846,57 @@ def main():
|
|
|
1825
1846
|
default=None,
|
|
1826
1847
|
help='PYTHONPATH to set for CLI tests; if None, inherits from the parent process'
|
|
1827
1848
|
)
|
|
1828
|
-
|
|
1849
|
+
|
|
1829
1850
|
parser.add_argument(
|
|
1830
1851
|
'--test_mode',
|
|
1831
1852
|
type=str,
|
|
1832
1853
|
default='all',
|
|
1833
1854
|
help='Test mode: "all" or "utils-only"'
|
|
1834
1855
|
)
|
|
1835
|
-
|
|
1856
|
+
|
|
1836
1857
|
parser.add_argument(
|
|
1837
1858
|
'--python_test_depth',
|
|
1838
1859
|
type=int,
|
|
1839
1860
|
default=options.python_test_depth,
|
|
1840
1861
|
help='Used as a knob to control the level of Python tests (0-100)'
|
|
1841
1862
|
)
|
|
1842
|
-
|
|
1863
|
+
|
|
1843
1864
|
parser.add_argument(
|
|
1844
1865
|
'--model_folder',
|
|
1845
1866
|
type=str,
|
|
1846
1867
|
default=None,
|
|
1847
1868
|
help='Run Python tests on every model in this folder'
|
|
1848
1869
|
)
|
|
1849
|
-
|
|
1870
|
+
|
|
1850
1871
|
parser.add_argument(
|
|
1851
1872
|
'--detector_options',
|
|
1852
1873
|
nargs='*',
|
|
1853
1874
|
metavar='KEY=VALUE',
|
|
1854
1875
|
default='',
|
|
1855
1876
|
help='Detector-specific options, as a space-separated list of key-value pairs')
|
|
1856
|
-
|
|
1877
|
+
|
|
1857
1878
|
parser.add_argument(
|
|
1858
1879
|
'--default_model',
|
|
1859
1880
|
type=str,
|
|
1860
1881
|
default=options.default_model,
|
|
1861
1882
|
help='Default model file or well-known model name (used for most tests)')
|
|
1862
|
-
|
|
1883
|
+
|
|
1863
1884
|
# The following token is used for linting, do not remove.
|
|
1864
1885
|
#
|
|
1865
1886
|
# no_arguments_required
|
|
1866
|
-
|
|
1887
|
+
|
|
1867
1888
|
args = parser.parse_args()
|
|
1868
|
-
|
|
1869
|
-
initial_detector_options = options.detector_options
|
|
1889
|
+
|
|
1890
|
+
initial_detector_options = options.detector_options
|
|
1870
1891
|
_args_to_object(args,options)
|
|
1871
|
-
from megadetector.utils.ct_utils import parse_kvp_list
|
|
1892
|
+
from megadetector.utils.ct_utils import parse_kvp_list
|
|
1872
1893
|
options.detector_options = parse_kvp_list(args.detector_options,d=initial_detector_options)
|
|
1873
|
-
|
|
1894
|
+
|
|
1874
1895
|
run_tests(options)
|
|
1875
|
-
|
|
1876
|
-
|
|
1896
|
+
|
|
1897
|
+
# ...def main()
|
|
1898
|
+
|
|
1899
|
+
if __name__ == '__main__':
|
|
1877
1900
|
main()
|
|
1878
1901
|
|
|
1879
1902
|
|
|
@@ -1884,27 +1907,27 @@ if False:
|
|
|
1884
1907
|
pass
|
|
1885
1908
|
|
|
1886
1909
|
#%%
|
|
1887
|
-
|
|
1910
|
+
|
|
1888
1911
|
import sys; sys.path.append(r'c:\git\yolov5-md')
|
|
1889
|
-
|
|
1912
|
+
|
|
1890
1913
|
#%%
|
|
1891
|
-
|
|
1914
|
+
|
|
1892
1915
|
fn1 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt1.10.1.frames.json"
|
|
1893
1916
|
fn2 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt1.10.1.frames.json"
|
|
1894
1917
|
fn3 = r"G:\temp\md-test-package\mdv5a-video-cpu-pt2.x.frames.json"
|
|
1895
1918
|
fn4 = r"G:\temp\md-test-package\mdv5a-video-gpu-pt2.x.frames.json"
|
|
1896
|
-
|
|
1919
|
+
|
|
1897
1920
|
assert all([os.path.isfile(fn) for fn in [fn1,fn2,fn3,fn4]])
|
|
1898
1921
|
print(output_files_are_identical(fn1,fn1,verbose=False))
|
|
1899
1922
|
print(output_files_are_identical(fn1,fn2,verbose=False))
|
|
1900
1923
|
print(output_files_are_identical(fn1,fn3,verbose=False))
|
|
1901
|
-
|
|
1924
|
+
|
|
1902
1925
|
#%%
|
|
1903
|
-
|
|
1926
|
+
|
|
1904
1927
|
fn1 = r"G:\temp\md-test-package\mdv5a-image-gpu-pt1.10.1.json"
|
|
1905
1928
|
fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-gpu-pt1.10.1.json"
|
|
1906
1929
|
print(output_files_are_identical(fn1,fn2,verbose=True))
|
|
1907
|
-
|
|
1930
|
+
|
|
1908
1931
|
fn1 = r"G:\temp\md-test-package\mdv5a-image-cpu-pt1.10.1.json"
|
|
1909
1932
|
fn2 = r"G:\temp\md-test-package\mdv5a-augment-image-cpu-pt1.10.1.json"
|
|
1910
1933
|
print(output_files_are_identical(fn1,fn2,verbose=True))
|