megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
- megadetector-5.0.11.dist-info/RECORD +5 -0
- megadetector-5.0.11.dist-info/top_level.txt +1 -0
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -439
- api/batch_processing/api_core/server.py +0 -294
- api/batch_processing/api_core/server_api_config.py +0 -98
- api/batch_processing/api_core/server_app_config.py +0 -55
- api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- api/batch_processing/api_core/server_job_status_table.py +0 -152
- api/batch_processing/api_core/server_orchestration.py +0 -360
- api/batch_processing/api_core/server_utils.py +0 -92
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
- api/batch_processing/data_preparation/manage_video_batch.py +0 -327
- api/batch_processing/integration/digiKam/setup.py +0 -6
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
- api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
- api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +0 -64
- api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
- api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
- api/batch_processing/postprocessing/compare_batch_results.py +0 -958
- api/batch_processing/postprocessing/convert_output_format.py +0 -397
- api/batch_processing/postprocessing/load_api_results.py +0 -195
- api/batch_processing/postprocessing/md_to_coco.py +0 -310
- api/batch_processing/postprocessing/md_to_labelme.py +0 -330
- api/batch_processing/postprocessing/merge_detections.py +0 -401
- api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
- api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
- api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
- api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
- api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
- api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
- api/synchronous/api_core/animal_detection_api/config.py +0 -35
- api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
- api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
- api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
- api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
- api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
- api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
- api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
- api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
- api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
- api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
- api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
- api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
- api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
- api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
- api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
- api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
- api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
- api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
- api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
- api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
- api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +0 -110
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +0 -108
- classification/analyze_failed_images.py +0 -227
- classification/cache_batchapi_outputs.py +0 -198
- classification/create_classification_dataset.py +0 -627
- classification/crop_detections.py +0 -516
- classification/csv_to_json.py +0 -226
- classification/detect_and_crop.py +0 -855
- classification/efficientnet/__init__.py +0 -9
- classification/efficientnet/model.py +0 -415
- classification/efficientnet/utils.py +0 -610
- classification/evaluate_model.py +0 -520
- classification/identify_mislabeled_candidates.py +0 -152
- classification/json_to_azcopy_list.py +0 -63
- classification/json_validator.py +0 -695
- classification/map_classification_categories.py +0 -276
- classification/merge_classification_detection_output.py +0 -506
- classification/prepare_classification_script.py +0 -194
- classification/prepare_classification_script_mc.py +0 -228
- classification/run_classifier.py +0 -286
- classification/save_mislabeled.py +0 -110
- classification/train_classifier.py +0 -825
- classification/train_classifier_tf.py +0 -724
- classification/train_utils.py +0 -322
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +0 -34
- data_management/camtrap_dp_to_coco.py +0 -238
- data_management/cct_json_utils.py +0 -395
- data_management/cct_to_md.py +0 -176
- data_management/cct_to_wi.py +0 -289
- data_management/coco_to_labelme.py +0 -272
- data_management/coco_to_yolo.py +0 -662
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +0 -33
- data_management/databases/combine_coco_camera_traps_files.py +0 -206
- data_management/databases/integrity_check_json_db.py +0 -477
- data_management/databases/subset_json_db.py +0 -115
- data_management/generate_crops_from_cct.py +0 -149
- data_management/get_image_sizes.py +0 -188
- data_management/importers/add_nacti_sizes.py +0 -52
- data_management/importers/add_timestamps_to_icct.py +0 -79
- data_management/importers/animl_results_to_md_results.py +0 -158
- data_management/importers/auckland_doc_test_to_json.py +0 -372
- data_management/importers/auckland_doc_to_json.py +0 -200
- data_management/importers/awc_to_json.py +0 -189
- data_management/importers/bellevue_to_json.py +0 -273
- data_management/importers/cacophony-thermal-importer.py +0 -796
- data_management/importers/carrizo_shrubfree_2018.py +0 -268
- data_management/importers/carrizo_trail_cam_2017.py +0 -287
- data_management/importers/cct_field_adjustments.py +0 -57
- data_management/importers/channel_islands_to_cct.py +0 -913
- data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- data_management/importers/eMammal/eMammal_helpers.py +0 -249
- data_management/importers/eMammal/make_eMammal_json.py +0 -223
- data_management/importers/ena24_to_json.py +0 -275
- data_management/importers/filenames_to_json.py +0 -385
- data_management/importers/helena_to_cct.py +0 -282
- data_management/importers/idaho-camera-traps.py +0 -1407
- data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- data_management/importers/jb_csv_to_json.py +0 -150
- data_management/importers/mcgill_to_json.py +0 -250
- data_management/importers/missouri_to_json.py +0 -489
- data_management/importers/nacti_fieldname_adjustments.py +0 -79
- data_management/importers/noaa_seals_2019.py +0 -181
- data_management/importers/pc_to_json.py +0 -365
- data_management/importers/plot_wni_giraffes.py +0 -123
- data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
- data_management/importers/prepare_zsl_imerit.py +0 -131
- data_management/importers/rspb_to_json.py +0 -356
- data_management/importers/save_the_elephants_survey_A.py +0 -320
- data_management/importers/save_the_elephants_survey_B.py +0 -332
- data_management/importers/snapshot_safari_importer.py +0 -758
- data_management/importers/snapshot_safari_importer_reprise.py +0 -665
- data_management/importers/snapshot_serengeti_lila.py +0 -1067
- data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- data_management/importers/sulross_get_exif.py +0 -65
- data_management/importers/timelapse_csv_set_to_json.py +0 -490
- data_management/importers/ubc_to_json.py +0 -399
- data_management/importers/umn_to_json.py +0 -507
- data_management/importers/wellington_to_json.py +0 -263
- data_management/importers/wi_to_json.py +0 -441
- data_management/importers/zamba_results_to_md_results.py +0 -181
- data_management/labelme_to_coco.py +0 -548
- data_management/labelme_to_yolo.py +0 -272
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +0 -97
- data_management/lila/add_locations_to_nacti.py +0 -147
- data_management/lila/create_lila_blank_set.py +0 -557
- data_management/lila/create_lila_test_set.py +0 -151
- data_management/lila/create_links_to_md_results_files.py +0 -106
- data_management/lila/download_lila_subset.py +0 -177
- data_management/lila/generate_lila_per_image_labels.py +0 -515
- data_management/lila/get_lila_annotation_counts.py +0 -170
- data_management/lila/get_lila_image_counts.py +0 -111
- data_management/lila/lila_common.py +0 -300
- data_management/lila/test_lila_metadata_urls.py +0 -132
- data_management/ocr_tools.py +0 -874
- data_management/read_exif.py +0 -681
- data_management/remap_coco_categories.py +0 -84
- data_management/remove_exif.py +0 -66
- data_management/resize_coco_dataset.py +0 -189
- data_management/wi_download_csv_to_coco.py +0 -246
- data_management/yolo_output_to_md_output.py +0 -441
- data_management/yolo_to_coco.py +0 -676
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/detector_training/model_main_tf2.py +0 -114
- detection/process_video.py +0 -703
- detection/pytorch_detector.py +0 -337
- detection/run_detector.py +0 -779
- detection/run_detector_batch.py +0 -1219
- detection/run_inference_with_yolov5_val.py +0 -917
- detection/run_tiled_inference.py +0 -935
- detection/tf_detector.py +0 -188
- detection/video_utils.py +0 -606
- docs/source/conf.py +0 -43
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +0 -174
- md_utils/ct_utils.py +0 -612
- md_utils/directory_listing.py +0 -246
- md_utils/md_tests.py +0 -968
- md_utils/path_utils.py +0 -1044
- md_utils/process_utils.py +0 -157
- md_utils/sas_blob_utils.py +0 -509
- md_utils/split_locations_into_train_val.py +0 -228
- md_utils/string_utils.py +0 -92
- md_utils/url_utils.py +0 -323
- md_utils/write_html_image_list.py +0 -225
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +0 -293
- md_visualization/render_images_with_thumbnails.py +0 -275
- md_visualization/visualization_utils.py +0 -1537
- md_visualization/visualize_db.py +0 -551
- md_visualization/visualize_detector_output.py +0 -406
- megadetector-5.0.10.dist-info/RECORD +0 -224
- megadetector-5.0.10.dist-info/top_level.txt +0 -8
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
- taxonomy_mapping/map_new_lila_datasets.py +0 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
- taxonomy_mapping/preview_lila_taxonomy.py +0 -591
- taxonomy_mapping/retrieve_sample_image.py +0 -71
- taxonomy_mapping/simple_image_download.py +0 -218
- taxonomy_mapping/species_lookup.py +0 -834
- taxonomy_mapping/taxonomy_csv_checker.py +0 -159
- taxonomy_mapping/taxonomy_graph.py +0 -346
- taxonomy_mapping/validate_lila_category_mappings.py +0 -83
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
md_utils/md_tests.py
DELETED
|
@@ -1,968 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
md_tests.py
|
|
4
|
-
|
|
5
|
-
A series of tests to validate basic repo functionality and verify either "correct"
|
|
6
|
-
inference behavior, or - when operating in environments other than the training
|
|
7
|
-
environment - acceptable deviation from the correct results.
|
|
8
|
-
|
|
9
|
-
This module should not depend on anything else in this repo outside of the
|
|
10
|
-
tests themselves, even if it means some duplicated code (e.g. for downloading files),
|
|
11
|
-
since much of what it tries to test is, e.g., imports.
|
|
12
|
-
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
#%% Imports and constants
|
|
16
|
-
|
|
17
|
-
### Only standard imports belong here, not MD-specific imports ###
|
|
18
|
-
|
|
19
|
-
import os
|
|
20
|
-
import json
|
|
21
|
-
import glob
|
|
22
|
-
import tempfile
|
|
23
|
-
import urllib
|
|
24
|
-
import urllib.request
|
|
25
|
-
import zipfile
|
|
26
|
-
import subprocess
|
|
27
|
-
import argparse
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
#%% Classes
|
|
31
|
-
|
|
32
|
-
class MDTestOptions:
|
|
33
|
-
"""
|
|
34
|
-
Options controlling test behavior.
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
## Required ##
|
|
38
|
-
|
|
39
|
-
#: Force CPU execution
|
|
40
|
-
disable_gpu = False
|
|
41
|
-
|
|
42
|
-
#: If GPU execution is requested, but a GPU is not available, should we error?
|
|
43
|
-
cpu_execution_is_error = False
|
|
44
|
-
|
|
45
|
-
#: Skip tests related to video processing
|
|
46
|
-
skip_video_tests = False
|
|
47
|
-
|
|
48
|
-
#: Skip tests launched via Python functions (as opposed to CLIs)
|
|
49
|
-
skip_python_tests = False
|
|
50
|
-
|
|
51
|
-
#: Skip CLI tests
|
|
52
|
-
skip_cli_tests = False
|
|
53
|
-
|
|
54
|
-
#: Force a specific folder for temporary input/output
|
|
55
|
-
scratch_dir = None
|
|
56
|
-
|
|
57
|
-
#: Where does the test data live?
|
|
58
|
-
test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
59
|
-
|
|
60
|
-
#: Download test data even if it appears to have already been downloaded
|
|
61
|
-
force_data_download = False
|
|
62
|
-
|
|
63
|
-
#: Unzip test data even if it appears to have already been unzipped
|
|
64
|
-
force_data_unzip = False
|
|
65
|
-
|
|
66
|
-
#: By default, any unexpected behavior is an error; this forces most errors to
|
|
67
|
-
#: be treated as warnings.
|
|
68
|
-
warning_mode = False
|
|
69
|
-
|
|
70
|
-
#: How much deviation from the expected detection coordinates should we allow before
|
|
71
|
-
#: a disrepancy becomes an error?
|
|
72
|
-
max_coord_error = 0.001
|
|
73
|
-
|
|
74
|
-
#: How much deviation from the expected confidence values should we allow before
|
|
75
|
-
#: a disrepancy becomes an error?
|
|
76
|
-
max_conf_error = 0.005
|
|
77
|
-
|
|
78
|
-
#: Current working directory when running CLI tests
|
|
79
|
-
cli_working_dir = None
|
|
80
|
-
|
|
81
|
-
#: YOLOv5 installation, only relevant if we're testing run_inference_with_yolov5_val.
|
|
82
|
-
#:
|
|
83
|
-
#: If this is None, we'll skip that test.
|
|
84
|
-
yolo_working_folder = None
|
|
85
|
-
|
|
86
|
-
# ...class MDTestOptions()
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
#%% Support functions
|
|
90
|
-
|
|
91
|
-
def get_expected_results_filename(gpu_is_available):
|
|
92
|
-
"""
|
|
93
|
-
Expected results vary just a little across inference environments, particularly
|
|
94
|
-
between PT 1.x and 2.x, so when making sure things are working acceptably, we
|
|
95
|
-
compare to a reference file that matches the current environment.
|
|
96
|
-
|
|
97
|
-
This function gets the correct filename to compare to current results, depending
|
|
98
|
-
on whether a GPU is available.
|
|
99
|
-
|
|
100
|
-
Args:
|
|
101
|
-
gpu_is_available (bool): whether a GPU is available
|
|
102
|
-
|
|
103
|
-
Returns:
|
|
104
|
-
str: relative filename of the results file we should use (within the test
|
|
105
|
-
data zipfile)
|
|
106
|
-
"""
|
|
107
|
-
|
|
108
|
-
if gpu_is_available:
|
|
109
|
-
hw_string = 'gpu'
|
|
110
|
-
else:
|
|
111
|
-
hw_string = 'cpu'
|
|
112
|
-
import torch
|
|
113
|
-
torch_version = str(torch.__version__)
|
|
114
|
-
if torch_version.startswith('1'):
|
|
115
|
-
assert torch_version == '1.10.1', 'Only tested against PT 1.10.1 and PT 2.x'
|
|
116
|
-
pt_string = 'pt1.10.1'
|
|
117
|
-
else:
|
|
118
|
-
assert torch_version.startswith('2'), 'Unknown torch version: {}'.format(torch_version)
|
|
119
|
-
pt_string = 'pt2.x'
|
|
120
|
-
|
|
121
|
-
# A hack for now to account for the fact that even with acceleration enabled and PT2
|
|
122
|
-
# installed, Apple silicon appears to provide the same results as CPU/PT1 inference
|
|
123
|
-
try:
|
|
124
|
-
import torch
|
|
125
|
-
m1_inference = torch.backends.mps.is_built and torch.backends.mps.is_available()
|
|
126
|
-
if m1_inference:
|
|
127
|
-
hw_string = 'cpu'
|
|
128
|
-
pt_string = 'pt1.10.1'
|
|
129
|
-
except Exception:
|
|
130
|
-
pass
|
|
131
|
-
|
|
132
|
-
return 'md-test-results-{}-{}.json'.format(hw_string,pt_string)
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
def download_test_data(options=None):
|
|
136
|
-
"""
|
|
137
|
-
Downloads the test zipfile if necessary, unzips if necessary.
|
|
138
|
-
|
|
139
|
-
Args:
|
|
140
|
-
options (MDTestOptions, optional): see MDTestOptions for details
|
|
141
|
-
|
|
142
|
-
Returns:
|
|
143
|
-
MDTestOptions: the same object passed in as input, or the options that
|
|
144
|
-
were used if [options] was supplied as None
|
|
145
|
-
"""
|
|
146
|
-
|
|
147
|
-
if options is None:
|
|
148
|
-
options = MDTestOptions()
|
|
149
|
-
|
|
150
|
-
if options.scratch_dir is None:
|
|
151
|
-
tempdir_base = tempfile.gettempdir()
|
|
152
|
-
scratch_dir = os.path.join(tempdir_base,'md-tests')
|
|
153
|
-
else:
|
|
154
|
-
scratch_dir = options.scratch_dir
|
|
155
|
-
|
|
156
|
-
os.makedirs(scratch_dir,exist_ok=True)
|
|
157
|
-
|
|
158
|
-
# See whether we've already downloaded the data zipfile
|
|
159
|
-
download_zipfile = True
|
|
160
|
-
if not options.force_data_download:
|
|
161
|
-
local_zipfile = os.path.join(scratch_dir,options.test_data_url.split('/')[-1])
|
|
162
|
-
if os.path.isfile(local_zipfile):
|
|
163
|
-
url_info = urllib.request.urlopen(options.test_data_url).info()
|
|
164
|
-
remote_size = int(url_info['Content-Length'])
|
|
165
|
-
target_file_size = os.path.getsize(local_zipfile)
|
|
166
|
-
if remote_size == target_file_size:
|
|
167
|
-
download_zipfile = False
|
|
168
|
-
|
|
169
|
-
if download_zipfile:
|
|
170
|
-
print('Downloading test data zipfile')
|
|
171
|
-
urllib.request.urlretrieve(options.test_data_url, local_zipfile)
|
|
172
|
-
print('Finished download to {}'.format(local_zipfile))
|
|
173
|
-
else:
|
|
174
|
-
print('Bypassing test data zipfile download for {}'.format(local_zipfile))
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
## Unzip data
|
|
178
|
-
|
|
179
|
-
zipf = zipfile.ZipFile(local_zipfile)
|
|
180
|
-
zip_contents = zipf.filelist
|
|
181
|
-
|
|
182
|
-
# file_info = zip_contents[1]
|
|
183
|
-
for file_info in zip_contents:
|
|
184
|
-
|
|
185
|
-
expected_size = file_info.file_size
|
|
186
|
-
if expected_size == 0:
|
|
187
|
-
continue
|
|
188
|
-
fn_relative = file_info.filename
|
|
189
|
-
target_file = os.path.join(scratch_dir,fn_relative)
|
|
190
|
-
unzip_file = True
|
|
191
|
-
if (not options.force_data_unzip) and os.path.isfile(target_file):
|
|
192
|
-
existing_file_size = os.path.getsize(target_file)
|
|
193
|
-
if existing_file_size == expected_size:
|
|
194
|
-
unzip_file = False
|
|
195
|
-
if unzip_file:
|
|
196
|
-
os.makedirs(os.path.dirname(target_file),exist_ok=True)
|
|
197
|
-
with open(target_file,'wb') as f:
|
|
198
|
-
f.write(zipf.read(fn_relative))
|
|
199
|
-
|
|
200
|
-
# ...for each file in the zipfile
|
|
201
|
-
|
|
202
|
-
# Warn if file are present that aren't expected
|
|
203
|
-
test_files = glob.glob(os.path.join(scratch_dir,'**/*'), recursive=True)
|
|
204
|
-
test_files = [os.path.relpath(fn,scratch_dir).replace('\\','/') for fn in test_files]
|
|
205
|
-
test_files_set = set(test_files)
|
|
206
|
-
expected_images_set = set(zipf.namelist())
|
|
207
|
-
for fn in expected_images_set:
|
|
208
|
-
if fn.endswith('/'):
|
|
209
|
-
continue
|
|
210
|
-
assert fn in test_files_set, 'File {} is missing from the test image folder'.format(fn)
|
|
211
|
-
|
|
212
|
-
# Populate the test options with test data information
|
|
213
|
-
options.scratch_dir = scratch_dir
|
|
214
|
-
options.all_test_files = test_files
|
|
215
|
-
options.test_images = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.jpg','.jpeg','.png')]
|
|
216
|
-
options.test_videos = [fn for fn in test_files if os.path.splitext(fn.lower())[1] in ('.mp4','.avi')]
|
|
217
|
-
options.test_videos = [fn for fn in options.test_videos if 'rendered' not in fn]
|
|
218
|
-
|
|
219
|
-
print('Finished unzipping and enumerating test data')
|
|
220
|
-
|
|
221
|
-
return options
|
|
222
|
-
|
|
223
|
-
# ...def download_test_data(...)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def is_gpu_available(verbose=True):
|
|
227
|
-
"""
|
|
228
|
-
Checks whether a GPU (including M1/M2 MPS) is available.
|
|
229
|
-
|
|
230
|
-
Args:
|
|
231
|
-
verbose (bool, optional): enable additional debug console output
|
|
232
|
-
|
|
233
|
-
Returns:
|
|
234
|
-
bool: whether a GPU is available
|
|
235
|
-
"""
|
|
236
|
-
|
|
237
|
-
# Import torch inside this function, so we have a chance to set CUDA_VISIBLE_DEVICES
|
|
238
|
-
# before checking GPU availability.
|
|
239
|
-
import torch
|
|
240
|
-
gpu_available = torch.cuda.is_available()
|
|
241
|
-
|
|
242
|
-
if gpu_available:
|
|
243
|
-
if verbose:
|
|
244
|
-
print('CUDA available: {}'.format(gpu_available))
|
|
245
|
-
device_ids = list(range(torch.cuda.device_count()))
|
|
246
|
-
if len(device_ids) > 1:
|
|
247
|
-
print('Found multiple devices: {}'.format(str(device_ids)))
|
|
248
|
-
else:
|
|
249
|
-
try:
|
|
250
|
-
gpu_available = torch.backends.mps.is_built and torch.backends.mps.is_available()
|
|
251
|
-
except AttributeError:
|
|
252
|
-
pass
|
|
253
|
-
if gpu_available:
|
|
254
|
-
print('Metal performance shaders available')
|
|
255
|
-
|
|
256
|
-
if not gpu_available:
|
|
257
|
-
print('No GPU available')
|
|
258
|
-
|
|
259
|
-
return gpu_available
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
#%% CLI functions
|
|
263
|
-
|
|
264
|
-
# These are copied from process_utils.py to avoid imports outside of the test
|
|
265
|
-
# functions.
|
|
266
|
-
|
|
267
|
-
os.environ["PYTHONUNBUFFERED"] = "1"
|
|
268
|
-
|
|
269
|
-
def execute(cmd):
|
|
270
|
-
"""
|
|
271
|
-
Runs [cmd] (a single string) in a shell, yielding each line of output to the caller.
|
|
272
|
-
|
|
273
|
-
Args:
|
|
274
|
-
cmd (str): command to run
|
|
275
|
-
|
|
276
|
-
Returns:
|
|
277
|
-
int: the command's return code, always zero, otherwise a CalledProcessError is raised
|
|
278
|
-
"""
|
|
279
|
-
|
|
280
|
-
# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
|
|
281
|
-
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
|
282
|
-
shell=True, universal_newlines=True)
|
|
283
|
-
for stdout_line in iter(popen.stdout.readline, ""):
|
|
284
|
-
yield stdout_line
|
|
285
|
-
popen.stdout.close()
|
|
286
|
-
return_code = popen.wait()
|
|
287
|
-
if return_code:
|
|
288
|
-
raise subprocess.CalledProcessError(return_code, cmd)
|
|
289
|
-
return return_code
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
def execute_and_print(cmd,print_output=True):
|
|
293
|
-
"""
|
|
294
|
-
Runs [cmd] (a single string) in a shell, capturing (and optionally printing) output.
|
|
295
|
-
|
|
296
|
-
Args:
|
|
297
|
-
cmd (str): command to run
|
|
298
|
-
print_output (bool, optional): whether to print output from [cmd]
|
|
299
|
-
|
|
300
|
-
Returns:
|
|
301
|
-
dict: a dictionary with fields "status" (the process return code) and "output"
|
|
302
|
-
(the content of stdout)
|
|
303
|
-
"""
|
|
304
|
-
|
|
305
|
-
to_return = {'status':'unknown','output':''}
|
|
306
|
-
output=[]
|
|
307
|
-
try:
|
|
308
|
-
for s in execute(cmd):
|
|
309
|
-
output.append(s)
|
|
310
|
-
if print_output:
|
|
311
|
-
print(s,end='',flush=True)
|
|
312
|
-
to_return['status'] = 0
|
|
313
|
-
except subprocess.CalledProcessError as cpe:
|
|
314
|
-
print('execute_and_print caught error: {}'.format(cpe.output))
|
|
315
|
-
to_return['status'] = cpe.returncode
|
|
316
|
-
to_return['output'] = output
|
|
317
|
-
|
|
318
|
-
return to_return
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
#%% Python tests
|
|
322
|
-
|
|
323
|
-
def run_python_tests(options):
|
|
324
|
-
"""
|
|
325
|
-
Runs Python-based (as opposed to CLI-based) package tests.
|
|
326
|
-
|
|
327
|
-
Args:
|
|
328
|
-
options (MDTestOptions): see MDTestOptions for details
|
|
329
|
-
"""
|
|
330
|
-
|
|
331
|
-
print('\n*** Starting module tests ***\n')
|
|
332
|
-
|
|
333
|
-
## Prepare data
|
|
334
|
-
|
|
335
|
-
download_test_data(options)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
## Run inference on an image
|
|
339
|
-
|
|
340
|
-
from detection import run_detector
|
|
341
|
-
from md_visualization import visualization_utils as vis_utils
|
|
342
|
-
model_file = 'MDV5A'
|
|
343
|
-
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
344
|
-
model = run_detector.load_detector(model_file)
|
|
345
|
-
pil_im = vis_utils.load_image(image_fn)
|
|
346
|
-
result = model.generate_detections_one_image(pil_im) # noqa
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
## Run inference on a folder
|
|
350
|
-
|
|
351
|
-
from detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
352
|
-
from md_utils import path_utils
|
|
353
|
-
|
|
354
|
-
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
355
|
-
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
356
|
-
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
357
|
-
image_file_names = path_utils.find_images(image_folder,recursive=True)
|
|
358
|
-
results = load_and_run_detector_batch('MDV5A', image_file_names, quiet=True)
|
|
359
|
-
_ = write_results_to_file(results,inference_output_file,
|
|
360
|
-
relative_path_base=image_folder,detector_file=model_file)
|
|
361
|
-
|
|
362
|
-
# Read results
|
|
363
|
-
with open(inference_output_file,'r') as f:
|
|
364
|
-
results_from_file = json.load(f) # noqa
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
## Verify results
|
|
368
|
-
|
|
369
|
-
# Read expected results
|
|
370
|
-
expected_results_filename = get_expected_results_filename(is_gpu_available(verbose=False))
|
|
371
|
-
|
|
372
|
-
with open(os.path.join(options.scratch_dir,expected_results_filename),'r') as f:
|
|
373
|
-
expected_results = json.load(f)
|
|
374
|
-
|
|
375
|
-
filename_to_results = {im['file'].replace('\\','/'):im for im in results_from_file['images']}
|
|
376
|
-
filename_to_results_expected = {im['file'].replace('\\','/'):im for im in expected_results['images']}
|
|
377
|
-
|
|
378
|
-
assert len(filename_to_results) == len(filename_to_results_expected), \
|
|
379
|
-
'Error: expected {} files in results, found {}'.format(
|
|
380
|
-
len(filename_to_results_expected),
|
|
381
|
-
len(filename_to_results))
|
|
382
|
-
|
|
383
|
-
max_coord_error = 0
|
|
384
|
-
max_conf_error = 0
|
|
385
|
-
|
|
386
|
-
# fn = next(iter(filename_to_results.keys()))
|
|
387
|
-
for fn in filename_to_results.keys():
|
|
388
|
-
|
|
389
|
-
actual_image_results = filename_to_results[fn]
|
|
390
|
-
expected_image_results = filename_to_results_expected[fn]
|
|
391
|
-
|
|
392
|
-
if 'failure' in actual_image_results:
|
|
393
|
-
assert 'failure' in expected_image_results and \
|
|
394
|
-
'detections' not in actual_image_results and \
|
|
395
|
-
'detections' not in expected_image_results
|
|
396
|
-
continue
|
|
397
|
-
assert 'failure' not in expected_image_results
|
|
398
|
-
|
|
399
|
-
actual_detections = actual_image_results['detections']
|
|
400
|
-
expected_detections = expected_image_results['detections']
|
|
401
|
-
|
|
402
|
-
s = 'expected {} detections for file {}, found {}'.format(
|
|
403
|
-
len(expected_detections),fn,len(actual_detections))
|
|
404
|
-
s += '\nExpected results file: {}\nActual results file: {}'.format(
|
|
405
|
-
expected_results_filename,inference_output_file)
|
|
406
|
-
|
|
407
|
-
if options.warning_mode:
|
|
408
|
-
if len(actual_detections) != len(expected_detections):
|
|
409
|
-
print('Warning: {}'.format(s))
|
|
410
|
-
continue
|
|
411
|
-
assert len(actual_detections) == len(expected_detections), \
|
|
412
|
-
'Error: {}'.format(s)
|
|
413
|
-
|
|
414
|
-
# i_det = 0
|
|
415
|
-
for i_det in range(0,len(actual_detections)):
|
|
416
|
-
actual_det = actual_detections[i_det]
|
|
417
|
-
expected_det = expected_detections[i_det]
|
|
418
|
-
assert actual_det['category'] == expected_det['category']
|
|
419
|
-
conf_err = abs(actual_det['conf'] - expected_det['conf'])
|
|
420
|
-
coord_differences = []
|
|
421
|
-
for i_coord in range(0,4):
|
|
422
|
-
coord_differences.append(abs(actual_det['bbox'][i_coord]-expected_det['bbox'][i_coord]))
|
|
423
|
-
coord_err = max(coord_differences)
|
|
424
|
-
|
|
425
|
-
if conf_err > max_conf_error:
|
|
426
|
-
max_conf_error = conf_err
|
|
427
|
-
if coord_err > max_coord_error:
|
|
428
|
-
max_coord_error = coord_err
|
|
429
|
-
|
|
430
|
-
# ...for each detection
|
|
431
|
-
|
|
432
|
-
# ...for each image
|
|
433
|
-
|
|
434
|
-
if not options.warning_mode:
|
|
435
|
-
|
|
436
|
-
assert max_conf_error <= options.max_conf_error, \
|
|
437
|
-
'Confidence error {} is greater than allowable ({})'.format(
|
|
438
|
-
max_conf_error,options.max_conf_error)
|
|
439
|
-
|
|
440
|
-
assert max_coord_error <= options.max_coord_error, \
|
|
441
|
-
'Coord error {} is greater than allowable ({})'.format(
|
|
442
|
-
max_coord_error,options.max_coord_error)
|
|
443
|
-
|
|
444
|
-
print('Max conf error: {}'.format(max_conf_error))
|
|
445
|
-
print('Max coord error: {}'.format(max_coord_error))
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
## Postprocess results
|
|
449
|
-
|
|
450
|
-
from api.batch_processing.postprocessing.postprocess_batch_results import \
|
|
451
|
-
PostProcessingOptions,process_batch_results
|
|
452
|
-
postprocessing_options = PostProcessingOptions()
|
|
453
|
-
|
|
454
|
-
postprocessing_options.api_output_file = inference_output_file
|
|
455
|
-
postprocessing_options.output_dir = os.path.join(options.scratch_dir,'postprocessing_output')
|
|
456
|
-
postprocessing_options.image_base_dir = image_folder
|
|
457
|
-
|
|
458
|
-
postprocessing_results = process_batch_results(postprocessing_options)
|
|
459
|
-
assert os.path.isfile(postprocessing_results.output_html_file), \
|
|
460
|
-
'Postprocessing output file {} not found'.format(postprocessing_results.output_html_file)
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
## Partial RDE test
|
|
464
|
-
|
|
465
|
-
from api.batch_processing.postprocessing.repeat_detection_elimination.repeat_detections_core import \
|
|
466
|
-
RepeatDetectionOptions,find_repeat_detections
|
|
467
|
-
|
|
468
|
-
rde_options = RepeatDetectionOptions()
|
|
469
|
-
rde_options.occurrenceThreshold = 2
|
|
470
|
-
rde_options.confidenceMin = 0.001
|
|
471
|
-
rde_options.outputBase = os.path.join(options.scratch_dir,'rde_working_dir')
|
|
472
|
-
rde_options.imageBase = image_folder
|
|
473
|
-
rde_output_file = inference_output_file.replace('.json','_filtered.json')
|
|
474
|
-
assert rde_output_file != inference_output_file
|
|
475
|
-
rde_results = find_repeat_detections(inference_output_file, rde_output_file, rde_options)
|
|
476
|
-
assert os.path.isfile(rde_results.filterFile),\
|
|
477
|
-
'Could not find RDE output file {}'.format(rde_results.filterFile)
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
# TODO: add remove_repeat_detections test here
|
|
481
|
-
#
|
|
482
|
-
# It's already tested in the CLI tests, so this is not urgent.
|
|
483
|
-
|
|
484
|
-
if not options.skip_video_tests:
|
|
485
|
-
|
|
486
|
-
## Video test (single video)
|
|
487
|
-
|
|
488
|
-
from detection.process_video import ProcessVideoOptions, process_video
|
|
489
|
-
|
|
490
|
-
video_options = ProcessVideoOptions()
|
|
491
|
-
video_options.model_file = 'MDV5A'
|
|
492
|
-
video_options.input_video_file = os.path.join(options.scratch_dir,options.test_videos[0])
|
|
493
|
-
video_options.output_json_file = os.path.join(options.scratch_dir,'single_video_output.json')
|
|
494
|
-
video_options.output_video_file = os.path.join(options.scratch_dir,'video_scratch/rendered_video.mp4')
|
|
495
|
-
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
496
|
-
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
497
|
-
video_options.render_output_video = True
|
|
498
|
-
# video_options.keep_rendered_frames = False
|
|
499
|
-
# video_options.keep_rendered_frames = False
|
|
500
|
-
video_options.force_extracted_frame_folder_deletion = True
|
|
501
|
-
video_options.force_rendered_frame_folder_deletion = True
|
|
502
|
-
# video_options.reuse_results_if_available = False
|
|
503
|
-
# video_options.reuse_frames_if_available = False
|
|
504
|
-
video_options.recursive = True
|
|
505
|
-
video_options.verbose = False
|
|
506
|
-
video_options.fourcc = 'mp4v'
|
|
507
|
-
# video_options.rendering_confidence_threshold = None
|
|
508
|
-
# video_options.json_confidence_threshold = 0.005
|
|
509
|
-
video_options.frame_sample = 5
|
|
510
|
-
video_options.n_cores = 5
|
|
511
|
-
# video_options.debug_max_frames = -1
|
|
512
|
-
# video_options.class_mapping_filename = None
|
|
513
|
-
|
|
514
|
-
_ = process_video(video_options)
|
|
515
|
-
|
|
516
|
-
assert os.path.isfile(video_options.output_video_file), \
|
|
517
|
-
'Python video test failed to render output video file'
|
|
518
|
-
assert os.path.isfile(video_options.output_json_file), \
|
|
519
|
-
'Python video test failed to render output .json file'
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
## Video test (folder)
|
|
523
|
-
|
|
524
|
-
from detection.process_video import ProcessVideoOptions, process_video_folder
|
|
525
|
-
|
|
526
|
-
video_options = ProcessVideoOptions()
|
|
527
|
-
video_options.model_file = 'MDV5A'
|
|
528
|
-
video_options.input_video_file = os.path.join(options.scratch_dir,
|
|
529
|
-
os.path.dirname(options.test_videos[0]))
|
|
530
|
-
video_options.output_json_file = os.path.join(options.scratch_dir,'video_folder_output.json')
|
|
531
|
-
# video_options.output_video_file = None
|
|
532
|
-
video_options.frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder')
|
|
533
|
-
video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
|
|
534
|
-
video_options.render_output_video = False
|
|
535
|
-
# video_options.keep_rendered_frames = False
|
|
536
|
-
# video_options.keep_rendered_frames = False
|
|
537
|
-
video_options.force_extracted_frame_folder_deletion = True
|
|
538
|
-
video_options.force_rendered_frame_folder_deletion = True
|
|
539
|
-
# video_options.reuse_results_if_available = False
|
|
540
|
-
# video_options.reuse_frames_if_available = False
|
|
541
|
-
video_options.recursive = True
|
|
542
|
-
video_options.verbose = False
|
|
543
|
-
# video_options.fourcc = None
|
|
544
|
-
# video_options.rendering_confidence_threshold = None
|
|
545
|
-
# video_options.json_confidence_threshold = 0.005
|
|
546
|
-
video_options.frame_sample = 5
|
|
547
|
-
video_options.n_cores = 5
|
|
548
|
-
# video_options.debug_max_frames = -1
|
|
549
|
-
# video_options.class_mapping_filename = None
|
|
550
|
-
|
|
551
|
-
_ = process_video_folder(video_options)
|
|
552
|
-
|
|
553
|
-
assert os.path.isfile(video_options.output_json_file), \
|
|
554
|
-
'Python video test failed to render output .json file'
|
|
555
|
-
|
|
556
|
-
# ...if we're not skipping video tests
|
|
557
|
-
|
|
558
|
-
print('\n*** Finished module tests ***\n')
|
|
559
|
-
|
|
560
|
-
# ...def run_python_tests(...)
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
#%% Command-line tests
|
|
564
|
-
|
|
565
|
-
def run_cli_tests(options):
|
|
566
|
-
"""
|
|
567
|
-
Runs CLI (as opposed to Python-based) package tests.
|
|
568
|
-
|
|
569
|
-
Args:
|
|
570
|
-
options (MDTestOptions): see MDTestOptions for details
|
|
571
|
-
"""
|
|
572
|
-
|
|
573
|
-
print('\n*** Starting CLI tests ***\n')
|
|
574
|
-
|
|
575
|
-
## chdir if necessary
|
|
576
|
-
|
|
577
|
-
if options.cli_working_dir is not None:
|
|
578
|
-
os.chdir(options.cli_working_dir)
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
## Prepare data
|
|
582
|
-
|
|
583
|
-
download_test_data(options)
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
## Run inference on an image
|
|
587
|
-
|
|
588
|
-
model_file = 'MDV5A'
|
|
589
|
-
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
590
|
-
output_dir = os.path.join(options.scratch_dir,'single_image_test')
|
|
591
|
-
if options.cli_working_dir is None:
|
|
592
|
-
cmd = 'python -m detection.run_detector'
|
|
593
|
-
else:
|
|
594
|
-
cmd = 'python detection/run_detector.py'
|
|
595
|
-
cmd += ' {} --image_file {} --output_dir {}'.format(
|
|
596
|
-
model_file,image_fn,output_dir)
|
|
597
|
-
print('Running: {}'.format(cmd))
|
|
598
|
-
cmd_results = execute_and_print(cmd)
|
|
599
|
-
|
|
600
|
-
if options.cpu_execution_is_error:
|
|
601
|
-
gpu_available_via_cli = False
|
|
602
|
-
for s in cmd_results['output']:
|
|
603
|
-
if 'GPU available: True' in s:
|
|
604
|
-
gpu_available_via_cli = True
|
|
605
|
-
break
|
|
606
|
-
if not gpu_available_via_cli:
|
|
607
|
-
raise Exception('GPU execution is required, but not available')
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
## Run inference on a folder
|
|
611
|
-
|
|
612
|
-
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
613
|
-
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
614
|
-
inference_output_file = os.path.join(options.scratch_dir,'folder_inference_output.json')
|
|
615
|
-
if options.cli_working_dir is None:
|
|
616
|
-
cmd = 'python -m detection.run_detector_batch'
|
|
617
|
-
else:
|
|
618
|
-
cmd = 'python detection/run_detector_batch.py'
|
|
619
|
-
cmd += ' {} {} {} --recursive'.format(
|
|
620
|
-
model_file,image_folder,inference_output_file)
|
|
621
|
-
cmd += ' --output_relative_filenames --quiet --include_image_size'
|
|
622
|
-
cmd += ' --include_image_timestamp --include_exif_data'
|
|
623
|
-
print('Running: {}'.format(cmd))
|
|
624
|
-
cmd_results = execute_and_print(cmd)
|
|
625
|
-
|
|
626
|
-
# Make sure a coherent file got written out, but don't verify the results, leave that
|
|
627
|
-
# to the Python tests.
|
|
628
|
-
with open(inference_output_file,'r') as f:
|
|
629
|
-
results_from_file = json.load(f) # noqa
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
## Postprocessing
|
|
633
|
-
|
|
634
|
-
postprocessing_output_dir = os.path.join(options.scratch_dir,'postprocessing_output_cli')
|
|
635
|
-
|
|
636
|
-
if options.cli_working_dir is None:
|
|
637
|
-
cmd = 'python -m api.batch_processing.postprocessing.postprocess_batch_results'
|
|
638
|
-
else:
|
|
639
|
-
cmd = 'python api/batch_processing/postprocessing/postprocess_batch_results.py'
|
|
640
|
-
cmd += ' {} {}'.format(
|
|
641
|
-
inference_output_file,postprocessing_output_dir)
|
|
642
|
-
cmd += ' --image_base_dir {}'.format(image_folder)
|
|
643
|
-
print('Running: {}'.format(cmd))
|
|
644
|
-
cmd_results = execute_and_print(cmd)
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
## RDE
|
|
648
|
-
|
|
649
|
-
rde_output_dir = os.path.join(options.scratch_dir,'rde_output_cli')
|
|
650
|
-
|
|
651
|
-
if options.cli_working_dir is None:
|
|
652
|
-
cmd = 'python -m api.batch_processing.postprocessing.repeat_detection_elimination.find_repeat_detections'
|
|
653
|
-
else:
|
|
654
|
-
cmd = 'python api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py'
|
|
655
|
-
cmd += ' {}'.format(inference_output_file)
|
|
656
|
-
cmd += ' --imageBase {}'.format(image_folder)
|
|
657
|
-
cmd += ' --outputBase {}'.format(rde_output_dir)
|
|
658
|
-
cmd += ' --occurrenceThreshold 1' # Use an absurd number here to make sure we get some suspicious detections
|
|
659
|
-
print('Running: {}'.format(cmd))
|
|
660
|
-
cmd_results = execute_and_print(cmd)
|
|
661
|
-
|
|
662
|
-
# Find the latest filtering folder
|
|
663
|
-
filtering_output_dir = os.listdir(rde_output_dir)
|
|
664
|
-
filtering_output_dir = [fn for fn in filtering_output_dir if fn.startswith('filtering_')]
|
|
665
|
-
filtering_output_dir = [os.path.join(rde_output_dir,fn) for fn in filtering_output_dir]
|
|
666
|
-
filtering_output_dir = [fn for fn in filtering_output_dir if os.path.isdir(fn)]
|
|
667
|
-
filtering_output_dir = sorted(filtering_output_dir)[-1]
|
|
668
|
-
|
|
669
|
-
print('Using RDE filtering folder {}'.format(filtering_output_dir))
|
|
670
|
-
|
|
671
|
-
filtered_output_file = inference_output_file.replace('.json','_filtered.json')
|
|
672
|
-
|
|
673
|
-
if options.cli_working_dir is None:
|
|
674
|
-
cmd = 'python -m api.batch_processing.postprocessing.repeat_detection_elimination.remove_repeat_detections'
|
|
675
|
-
else:
|
|
676
|
-
cmd = 'python api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py'
|
|
677
|
-
cmd += ' {} {} {}'.format(inference_output_file,filtered_output_file,filtering_output_dir)
|
|
678
|
-
print('Running: {}'.format(cmd))
|
|
679
|
-
cmd_results = execute_and_print(cmd)
|
|
680
|
-
|
|
681
|
-
assert os.path.isfile(filtered_output_file), \
|
|
682
|
-
'Could not find RDE output file {}'.format(filtered_output_file)
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
## Run inference on a folder (tiled)
|
|
686
|
-
|
|
687
|
-
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
688
|
-
tiling_folder = os.path.join(options.scratch_dir,'tiling-folder')
|
|
689
|
-
inference_output_file_tiled = os.path.join(options.scratch_dir,'folder_inference_output_tiled.json')
|
|
690
|
-
if options.cli_working_dir is None:
|
|
691
|
-
cmd = 'python -m detection.run_tiled_inference'
|
|
692
|
-
else:
|
|
693
|
-
cmd = 'python detection/run_tiled_inference.py'
|
|
694
|
-
cmd += ' {} {} {} {}'.format(
|
|
695
|
-
model_file,image_folder,tiling_folder,inference_output_file_tiled)
|
|
696
|
-
cmd += ' --overwrite_handling overwrite'
|
|
697
|
-
print('Running: {}'.format(cmd))
|
|
698
|
-
cmd_results = execute_and_print(cmd)
|
|
699
|
-
|
|
700
|
-
with open(inference_output_file_tiled,'r') as f:
|
|
701
|
-
results_from_file = json.load(f) # noqa
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
## Run inference on a folder (augmented)
|
|
705
|
-
|
|
706
|
-
if options.yolo_working_folder is None:
|
|
707
|
-
|
|
708
|
-
print('Bypassing YOLOv5 val tests, no yolo folder supplied')
|
|
709
|
-
|
|
710
|
-
else:
|
|
711
|
-
|
|
712
|
-
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
713
|
-
yolo_results_folder = os.path.join(options.scratch_dir,'yolo-output-folder')
|
|
714
|
-
yolo_symlink_folder = os.path.join(options.scratch_dir,'yolo-symlink_folder')
|
|
715
|
-
inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
|
|
716
|
-
if options.cli_working_dir is None:
|
|
717
|
-
cmd = 'python -m detection.run_inference_with_yolov5_val'
|
|
718
|
-
else:
|
|
719
|
-
cmd = 'python detection/run_inference_with_yolov5_val.py'
|
|
720
|
-
cmd += ' {} {} {}'.format(
|
|
721
|
-
model_file,image_folder,inference_output_file_yolo_val)
|
|
722
|
-
cmd += ' --yolo_working_folder {}'.format(options.yolo_working_folder)
|
|
723
|
-
cmd += ' --yolo_results_folder {}'.format(yolo_results_folder)
|
|
724
|
-
cmd += ' --symlink_folder {}'.format(yolo_symlink_folder)
|
|
725
|
-
cmd += ' --augment_enabled 1'
|
|
726
|
-
# cmd += ' --no_use_symlinks'
|
|
727
|
-
cmd += ' --overwrite_handling overwrite'
|
|
728
|
-
print('Running: {}'.format(cmd))
|
|
729
|
-
cmd_results = execute_and_print(cmd)
|
|
730
|
-
|
|
731
|
-
with open(inference_output_file_yolo_val,'r') as f:
|
|
732
|
-
results_from_file = json.load(f) # noqa
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
if not options.skip_video_tests:
|
|
736
|
-
|
|
737
|
-
## Video test
|
|
738
|
-
|
|
739
|
-
model_file = 'MDV5A'
|
|
740
|
-
video_inference_output_file = os.path.join(options.scratch_dir,'video_inference_output.json')
|
|
741
|
-
output_video_file = os.path.join(options.scratch_dir,'video_scratch/cli_rendered_video.mp4')
|
|
742
|
-
frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder_cli')
|
|
743
|
-
frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder_cli')
|
|
744
|
-
|
|
745
|
-
video_fn = os.path.join(options.scratch_dir,options.test_videos[-1])
|
|
746
|
-
output_dir = os.path.join(options.scratch_dir,'single_video_test_cli')
|
|
747
|
-
if options.cli_working_dir is None:
|
|
748
|
-
cmd = 'python -m detection.process_video'
|
|
749
|
-
else:
|
|
750
|
-
cmd = 'python detection/process_video.py'
|
|
751
|
-
cmd += ' {} {}'.format(model_file,video_fn)
|
|
752
|
-
cmd += ' --frame_folder {} --frame_rendering_folder {} --output_json_file {} --output_video_file {}'.format(
|
|
753
|
-
frame_folder,frame_rendering_folder,video_inference_output_file,output_video_file)
|
|
754
|
-
cmd += ' --render_output_video --fourcc mp4v'
|
|
755
|
-
cmd += ' --force_extracted_frame_folder_deletion --force_rendered_frame_folder_deletion --n_cores 5 --frame_sample 3'
|
|
756
|
-
print('Running: {}'.format(cmd))
|
|
757
|
-
cmd_results = execute_and_print(cmd)
|
|
758
|
-
|
|
759
|
-
# ...if we're not skipping video tests
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
## Run inference on a folder (again, so we can do a comparison)
|
|
763
|
-
|
|
764
|
-
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
765
|
-
model_file = 'MDV5B'
|
|
766
|
-
inference_output_file_alt = os.path.join(options.scratch_dir,'folder_inference_output_alt.json')
|
|
767
|
-
if options.cli_working_dir is None:
|
|
768
|
-
cmd = 'python -m detection.run_detector_batch'
|
|
769
|
-
else:
|
|
770
|
-
cmd = 'python detection/run_detector_batch.py'
|
|
771
|
-
cmd += ' {} {} {} --recursive'.format(
|
|
772
|
-
model_file,image_folder,inference_output_file_alt)
|
|
773
|
-
cmd += ' --output_relative_filenames --quiet --include_image_size'
|
|
774
|
-
cmd += ' --include_image_timestamp --include_exif_data'
|
|
775
|
-
print('Running: {}'.format(cmd))
|
|
776
|
-
cmd_results = execute_and_print(cmd)
|
|
777
|
-
|
|
778
|
-
with open(inference_output_file_alt,'r') as f:
|
|
779
|
-
results_from_file = json.load(f) # noqa
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
## Compare the two files
|
|
783
|
-
|
|
784
|
-
comparison_output_folder = os.path.join(options.scratch_dir,'results_comparison')
|
|
785
|
-
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
786
|
-
results_files_string = '"{}" "{}"'.format(
|
|
787
|
-
inference_output_file,inference_output_file_alt)
|
|
788
|
-
if options.cli_working_dir is None:
|
|
789
|
-
cmd = 'python -m api.batch_processing.postprocessing.compare_batch_results'
|
|
790
|
-
else:
|
|
791
|
-
cmd = 'python api/batch_processing/postprocessing/compare_batch_results.py'
|
|
792
|
-
cmd += ' {} {} {}'.format(comparison_output_folder,image_folder,results_files_string)
|
|
793
|
-
print('Running: {}'.format(cmd))
|
|
794
|
-
cmd_results = execute_and_print(cmd)
|
|
795
|
-
|
|
796
|
-
assert cmd_results['status'] == 0, 'Error generating comparison HTML'
|
|
797
|
-
assert os.path.isfile(os.path.join(comparison_output_folder,'index.html')), \
|
|
798
|
-
'Failed to generate comparison HTML'
|
|
799
|
-
|
|
800
|
-
print('\n*** Finished CLI tests ***\n')
|
|
801
|
-
|
|
802
|
-
# ...def run_cli_tests(...)
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
#%% Main test wrapper
|
|
806
|
-
|
|
807
|
-
def run_tests(options):
|
|
808
|
-
"""
|
|
809
|
-
Runs Python-based and/or CLI-based package tests.
|
|
810
|
-
|
|
811
|
-
Args:
|
|
812
|
-
options (MDTestOptions): see MDTestOptions for details
|
|
813
|
-
"""
|
|
814
|
-
|
|
815
|
-
# Prepare data folder
|
|
816
|
-
download_test_data(options)
|
|
817
|
-
|
|
818
|
-
if options.disable_gpu:
|
|
819
|
-
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
820
|
-
|
|
821
|
-
# Verify GPU
|
|
822
|
-
gpu_available = is_gpu_available()
|
|
823
|
-
|
|
824
|
-
# If the GPU is required and isn't available, error
|
|
825
|
-
if options.cpu_execution_is_error and (not gpu_available):
|
|
826
|
-
raise ValueError('GPU not available, and cpu_execution_is_error is set')
|
|
827
|
-
|
|
828
|
-
# If the GPU should be disabled, verify that it is
|
|
829
|
-
if options.disable_gpu:
|
|
830
|
-
assert (not gpu_available), 'CPU execution specified, but the GPU appears to be available'
|
|
831
|
-
|
|
832
|
-
# Run python tests
|
|
833
|
-
if not options.skip_python_tests:
|
|
834
|
-
run_python_tests(options)
|
|
835
|
-
|
|
836
|
-
# Run CLI tests
|
|
837
|
-
if not options.skip_cli_tests:
|
|
838
|
-
run_cli_tests(options)
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
#%% Interactive driver
|
|
842
|
-
|
|
843
|
-
if False:
|
|
844
|
-
|
|
845
|
-
pass
|
|
846
|
-
|
|
847
|
-
#%%
|
|
848
|
-
|
|
849
|
-
options = MDTestOptions()
|
|
850
|
-
|
|
851
|
-
options.disable_gpu = False
|
|
852
|
-
options.cpu_execution_is_error = False
|
|
853
|
-
options.skip_video_tests = False
|
|
854
|
-
options.skip_python_tests = False
|
|
855
|
-
options.skip_cli_tests = False
|
|
856
|
-
options.scratch_dir = None
|
|
857
|
-
options.test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
858
|
-
options.force_data_download = False
|
|
859
|
-
options.force_data_unzip = False
|
|
860
|
-
options.warning_mode = True
|
|
861
|
-
options.max_coord_error = 0.001
|
|
862
|
-
options.max_conf_error = 0.005
|
|
863
|
-
options.cli_working_dir = r'c:\git\MegaDetector'
|
|
864
|
-
options.yolo_working_folder = r'c:\git\yolov5'
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
#%%
|
|
868
|
-
|
|
869
|
-
run_tests(options)
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
#%% Command-line driver
|
|
873
|
-
|
|
874
|
-
def main():
|
|
875
|
-
|
|
876
|
-
options = MDTestOptions()
|
|
877
|
-
|
|
878
|
-
parser = argparse.ArgumentParser(
|
|
879
|
-
description='MegaDetector test suite')
|
|
880
|
-
|
|
881
|
-
parser.add_argument(
|
|
882
|
-
'--disable_gpu',
|
|
883
|
-
action='store_true',
|
|
884
|
-
help='Disable GPU operation')
|
|
885
|
-
|
|
886
|
-
parser.add_argument(
|
|
887
|
-
'--cpu_execution_is_error',
|
|
888
|
-
action='store_true',
|
|
889
|
-
help='Fail if the GPU appears not to be available')
|
|
890
|
-
|
|
891
|
-
parser.add_argument(
|
|
892
|
-
'--scratch_dir',
|
|
893
|
-
default=None,
|
|
894
|
-
type=str,
|
|
895
|
-
help='Directory for temporary storage (defaults to system temp dir)')
|
|
896
|
-
|
|
897
|
-
parser.add_argument(
|
|
898
|
-
'--skip_video_tests',
|
|
899
|
-
action='store_true',
|
|
900
|
-
help='Skip tests related to video (which can be slow)')
|
|
901
|
-
|
|
902
|
-
parser.add_argument(
|
|
903
|
-
'--skip_python_tests',
|
|
904
|
-
action='store_true',
|
|
905
|
-
help='Skip python tests')
|
|
906
|
-
|
|
907
|
-
parser.add_argument(
|
|
908
|
-
'--skip_cli_tests',
|
|
909
|
-
action='store_true',
|
|
910
|
-
help='Skip CLI tests')
|
|
911
|
-
|
|
912
|
-
parser.add_argument(
|
|
913
|
-
'--force_data_download',
|
|
914
|
-
action='store_true',
|
|
915
|
-
help='Force download of the test data file, even if it\'s already available')
|
|
916
|
-
|
|
917
|
-
parser.add_argument(
|
|
918
|
-
'--force_data_unzip',
|
|
919
|
-
action='store_true',
|
|
920
|
-
help='Force extraction of all files in the test data file, even if they\'re already available')
|
|
921
|
-
|
|
922
|
-
parser.add_argument(
|
|
923
|
-
'--warning_mode',
|
|
924
|
-
action='store_true',
|
|
925
|
-
help='Turns numeric/content errors into warnings')
|
|
926
|
-
|
|
927
|
-
parser.add_argument(
|
|
928
|
-
'--max_conf_error',
|
|
929
|
-
type=float,
|
|
930
|
-
default=options.max_conf_error,
|
|
931
|
-
help='Maximum tolerable confidence value deviation from expected (default {})'.format(
|
|
932
|
-
options.max_conf_error))
|
|
933
|
-
|
|
934
|
-
parser.add_argument(
|
|
935
|
-
'--max_coord_error',
|
|
936
|
-
type=float,
|
|
937
|
-
default=options.max_coord_error,
|
|
938
|
-
help='Maximum tolerable coordinate value deviation from expected (default {})'.format(
|
|
939
|
-
options.max_coord_error))
|
|
940
|
-
|
|
941
|
-
parser.add_argument(
|
|
942
|
-
'--cli_working_dir',
|
|
943
|
-
type=str,
|
|
944
|
-
default=None,
|
|
945
|
-
help='Working directory for CLI tests')
|
|
946
|
-
|
|
947
|
-
# token used for linting
|
|
948
|
-
#
|
|
949
|
-
# no_arguments_required
|
|
950
|
-
|
|
951
|
-
args = parser.parse_args()
|
|
952
|
-
|
|
953
|
-
options.disable_gpu = args.disable_gpu
|
|
954
|
-
options.cpu_execution_is_error = args.cpu_execution_is_error
|
|
955
|
-
options.skip_video_tests = args.skip_video_tests
|
|
956
|
-
options.skip_python_tests = args.skip_python_tests
|
|
957
|
-
options.skip_cli_tests = args.skip_cli_tests
|
|
958
|
-
options.scratch_dir = args.scratch_dir
|
|
959
|
-
options.warning_mode = args.warning_mode
|
|
960
|
-
options.force_data_download = args.force_data_download
|
|
961
|
-
options.max_conf_error = args.max_conf_error
|
|
962
|
-
options.max_coord_error = args.max_coord_error
|
|
963
|
-
options.cli_working_dir = args.cli_working_dir
|
|
964
|
-
|
|
965
|
-
run_tests(options)
|
|
966
|
-
|
|
967
|
-
if __name__ == '__main__':
|
|
968
|
-
main()
|