megadetector 10.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/__init__.py +0 -0
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +626 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +853 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +608 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +696 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +509 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/animl_to_md.py +161 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +33 -0
- megadetector/data_management/camtrap_dp_to_coco.py +270 -0
- megadetector/data_management/cct_json_utils.py +566 -0
- megadetector/data_management/cct_to_md.py +184 -0
- megadetector/data_management/cct_to_wi.py +293 -0
- megadetector/data_management/coco_to_labelme.py +284 -0
- megadetector/data_management/coco_to_yolo.py +702 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
- megadetector/data_management/databases/integrity_check_json_db.py +528 -0
- megadetector/data_management/databases/subset_json_db.py +195 -0
- megadetector/data_management/generate_crops_from_cct.py +200 -0
- megadetector/data_management/get_image_sizes.py +164 -0
- megadetector/data_management/labelme_to_coco.py +559 -0
- megadetector/data_management/labelme_to_yolo.py +349 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/create_lila_blank_set.py +556 -0
- megadetector/data_management/lila/create_lila_test_set.py +187 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +182 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +319 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
- megadetector/data_management/mewc_to_md.py +344 -0
- megadetector/data_management/ocr_tools.py +873 -0
- megadetector/data_management/read_exif.py +964 -0
- megadetector/data_management/remap_coco_categories.py +195 -0
- megadetector/data_management/remove_exif.py +156 -0
- megadetector/data_management/rename_images.py +194 -0
- megadetector/data_management/resize_coco_dataset.py +663 -0
- megadetector/data_management/speciesnet_to_md.py +41 -0
- megadetector/data_management/wi_download_csv_to_coco.py +247 -0
- megadetector/data_management/yolo_output_to_md_output.py +594 -0
- megadetector/data_management/yolo_to_coco.py +876 -0
- megadetector/data_management/zamba_to_md.py +188 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/change_detection.py +840 -0
- megadetector/detection/process_video.py +479 -0
- megadetector/detection/pytorch_detector.py +1451 -0
- megadetector/detection/run_detector.py +1267 -0
- megadetector/detection/run_detector_batch.py +2159 -0
- megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
- megadetector/detection/run_md_and_speciesnet.py +1494 -0
- megadetector/detection/run_tiled_inference.py +1038 -0
- megadetector/detection/tf_detector.py +209 -0
- megadetector/detection/video_utils.py +1379 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +72 -0
- megadetector/postprocessing/categorize_detections_by_size.py +166 -0
- megadetector/postprocessing/classification_postprocessing.py +1752 -0
- megadetector/postprocessing/combine_batch_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +2110 -0
- megadetector/postprocessing/convert_output_format.py +403 -0
- megadetector/postprocessing/create_crop_folder.py +629 -0
- megadetector/postprocessing/detector_calibration.py +570 -0
- megadetector/postprocessing/generate_csv_report.py +522 -0
- megadetector/postprocessing/load_api_results.py +223 -0
- megadetector/postprocessing/md_to_coco.py +428 -0
- megadetector/postprocessing/md_to_labelme.py +351 -0
- megadetector/postprocessing/md_to_wi.py +41 -0
- megadetector/postprocessing/merge_detections.py +392 -0
- megadetector/postprocessing/postprocess_batch_results.py +2077 -0
- megadetector/postprocessing/remap_detection_categories.py +226 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
- megadetector/postprocessing/separate_detections_into_folders.py +795 -0
- megadetector/postprocessing/subset_json_detector_output.py +964 -0
- megadetector/postprocessing/top_folders_to_bottom.py +238 -0
- megadetector/postprocessing/validate_batch_results.py +332 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +213 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +224 -0
- megadetector/taxonomy_mapping/species_lookup.py +1008 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/tests/__init__.py +0 -0
- megadetector/tests/test_nms_synthetic.py +335 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/ct_utils.py +1857 -0
- megadetector/utils/directory_listing.py +199 -0
- megadetector/utils/extract_frames_from_video.py +307 -0
- megadetector/utils/gpu_test.py +125 -0
- megadetector/utils/md_tests.py +2072 -0
- megadetector/utils/path_utils.py +2832 -0
- megadetector/utils/process_utils.py +172 -0
- megadetector/utils/split_locations_into_train_val.py +237 -0
- megadetector/utils/string_utils.py +234 -0
- megadetector/utils/url_utils.py +825 -0
- megadetector/utils/wi_platform_utils.py +968 -0
- megadetector/utils/wi_taxonomy_utils.py +1759 -0
- megadetector/utils/write_html_image_list.py +239 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +309 -0
- megadetector/visualization/render_images_with_thumbnails.py +243 -0
- megadetector/visualization/visualization_utils.py +1940 -0
- megadetector/visualization/visualize_db.py +630 -0
- megadetector/visualization/visualize_detector_output.py +479 -0
- megadetector/visualization/visualize_video_output.py +705 -0
- megadetector-10.0.13.dist-info/METADATA +134 -0
- megadetector-10.0.13.dist-info/RECORD +147 -0
- megadetector-10.0.13.dist-info/WHEEL +5 -0
- megadetector-10.0.13.dist-info/licenses/LICENSE +19 -0
- megadetector-10.0.13.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
combine_batch_outputs.py
|
|
4
|
+
|
|
5
|
+
Merges two or more .json files in MD output format, optionally
|
|
6
|
+
writing the results to another .json file.
|
|
7
|
+
|
|
8
|
+
* Concatenates image lists, erroring if images are not unique.
|
|
9
|
+
* Errors if class lists are conflicting; errors on unrecognized fields.
|
|
10
|
+
* Checks compatibility in info structs, within reason.
|
|
11
|
+
|
|
12
|
+
File format:
|
|
13
|
+
|
|
14
|
+
https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#batch-processing-api-output-format
|
|
15
|
+
|
|
16
|
+
Command-line use:
|
|
17
|
+
|
|
18
|
+
combine_batch_outputs input1.json input2.json ... inputN.json output.json
|
|
19
|
+
|
|
20
|
+
This does no checking for redundancy; if you are looking to ensemble
|
|
21
|
+
the results of multiple model versions, see merge_detections.py.
|
|
22
|
+
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
#%% Constants and imports
|
|
26
|
+
|
|
27
|
+
import argparse
|
|
28
|
+
import sys
|
|
29
|
+
import json
|
|
30
|
+
from megadetector.utils import ct_utils
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
#%% Merge functions
|
|
34
|
+
|
|
35
|
+
def combine_batch_output_files(input_files,
|
|
36
|
+
output_file=None,
|
|
37
|
+
require_uniqueness=True,
|
|
38
|
+
verbose=True):
|
|
39
|
+
"""
|
|
40
|
+
Merges the list of MD results files [input_files] into a single
|
|
41
|
+
dictionary, optionally writing the result to [output_file].
|
|
42
|
+
|
|
43
|
+
Always overwrites [output_file] if it exists.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
input_files (list of str): paths to JSON detection files
|
|
47
|
+
output_file (str, optional): path to write merged JSON
|
|
48
|
+
require_uniqueness (bool, optional): whether to require that the images in
|
|
49
|
+
each list of images be unique
|
|
50
|
+
verbose (bool, optional): enable additional debug output
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
dict: merged dictionaries loaded from [input_files], identical to what's
|
|
54
|
+
written to [output_file] if [output_file] is not None
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def print_if_verbose(s):
|
|
58
|
+
if verbose:
|
|
59
|
+
print(s)
|
|
60
|
+
|
|
61
|
+
input_dicts = []
|
|
62
|
+
for fn in input_files:
|
|
63
|
+
print_if_verbose('Loading results from {}'.format(fn))
|
|
64
|
+
with open(fn, 'r', encoding='utf-8') as f:
|
|
65
|
+
input_dicts.append(json.load(f))
|
|
66
|
+
|
|
67
|
+
print_if_verbose('Merging results')
|
|
68
|
+
merged_dict = combine_batch_output_dictionaries(
|
|
69
|
+
input_dicts, require_uniqueness=require_uniqueness)
|
|
70
|
+
|
|
71
|
+
print_if_verbose('Writing output to {}'.format(output_file))
|
|
72
|
+
if output_file is not None:
|
|
73
|
+
ct_utils.write_json(output_file, merged_dict)
|
|
74
|
+
|
|
75
|
+
return merged_dict
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def combine_batch_output_dictionaries(input_dicts, require_uniqueness=True):
|
|
79
|
+
"""
|
|
80
|
+
Merges the list of MD results dictionaries [input_dicts] into a single dict.
|
|
81
|
+
See module header comment for details on merge rules.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
input_dicts (list of dicts): list of dicts in which each dict represents the
|
|
85
|
+
contents of a MD output file
|
|
86
|
+
require_uniqueness (bool, optional): whether to require that the images in
|
|
87
|
+
each input dict be unique; if this is True and image filenames are
|
|
88
|
+
not unique, an error is raised.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
dict: merged MD results
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
# Map image filenames to detections, we'll convert to a list later
|
|
95
|
+
images = {}
|
|
96
|
+
info = {}
|
|
97
|
+
detection_categories = {}
|
|
98
|
+
classification_categories = {}
|
|
99
|
+
n_redundant_images = 0
|
|
100
|
+
n_images = 0
|
|
101
|
+
|
|
102
|
+
known_fields = ['info', 'detection_categories', 'classification_categories',
|
|
103
|
+
'images']
|
|
104
|
+
|
|
105
|
+
for input_dict in input_dicts:
|
|
106
|
+
|
|
107
|
+
for k in input_dict:
|
|
108
|
+
if k not in known_fields:
|
|
109
|
+
print(f'Warning: unrecognized batch output field: {k}')
|
|
110
|
+
|
|
111
|
+
# Check compatibility of detection categories
|
|
112
|
+
for cat_id in input_dict['detection_categories']:
|
|
113
|
+
cat_name = input_dict['detection_categories'][cat_id]
|
|
114
|
+
if cat_id in detection_categories:
|
|
115
|
+
assert detection_categories[cat_id] == cat_name, (
|
|
116
|
+
'Detection category mismatch')
|
|
117
|
+
else:
|
|
118
|
+
detection_categories[cat_id] = cat_name
|
|
119
|
+
|
|
120
|
+
# Check compatibility of classification categories
|
|
121
|
+
if 'classification_categories' in input_dict:
|
|
122
|
+
for cat_id in input_dict['classification_categories']:
|
|
123
|
+
cat_name = input_dict['classification_categories'][cat_id]
|
|
124
|
+
if cat_id in classification_categories:
|
|
125
|
+
assert classification_categories[cat_id] == cat_name, (
|
|
126
|
+
'Classification category mismatch')
|
|
127
|
+
else:
|
|
128
|
+
classification_categories[cat_id] = cat_name
|
|
129
|
+
|
|
130
|
+
# Merge image lists, checking uniqueness
|
|
131
|
+
for im in input_dict['images']:
|
|
132
|
+
# Normalize path separators so we don't treat images as different if they
|
|
133
|
+
# were processed on different OS's
|
|
134
|
+
im['file'] = im['file'].replace('\\','/')
|
|
135
|
+
im_file = im['file']
|
|
136
|
+
if require_uniqueness:
|
|
137
|
+
assert im_file not in images, f'Duplicate image: {im_file}'
|
|
138
|
+
images[im_file] = im
|
|
139
|
+
n_images += 1
|
|
140
|
+
else:
|
|
141
|
+
if im_file in images:
|
|
142
|
+
n_redundant_images += 1
|
|
143
|
+
previous_im = images[im_file]
|
|
144
|
+
# Replace a previous failure with a success
|
|
145
|
+
if ('detections' in im) and ('detections' not in previous_im):
|
|
146
|
+
images[im_file] = im
|
|
147
|
+
print(f'Replacing previous failure for image: {im_file}')
|
|
148
|
+
else:
|
|
149
|
+
images[im_file] = im
|
|
150
|
+
n_images += 1
|
|
151
|
+
|
|
152
|
+
# Merge info dicts, don't check completion time fields
|
|
153
|
+
if len(info) == 0:
|
|
154
|
+
info = input_dict['info']
|
|
155
|
+
else:
|
|
156
|
+
info_compare = input_dict['info']
|
|
157
|
+
assert info_compare['detector'] == info['detector'], (
|
|
158
|
+
'Incompatible detection versions in merging')
|
|
159
|
+
assert info_compare['format_version'] == info['format_version'], (
|
|
160
|
+
'Incompatible batch output versions in merging')
|
|
161
|
+
if 'classifier' in info_compare:
|
|
162
|
+
if 'classifier' in info:
|
|
163
|
+
assert info['classifier'] == info_compare['classifier']
|
|
164
|
+
else:
|
|
165
|
+
info['classifier'] = info_compare['classifier']
|
|
166
|
+
|
|
167
|
+
# ...for each dictionary
|
|
168
|
+
|
|
169
|
+
if n_redundant_images > 0:
|
|
170
|
+
print(f'Warning: found {n_redundant_images} redundant images '
|
|
171
|
+
f'(out of {n_images} total) during merge')
|
|
172
|
+
|
|
173
|
+
# Convert merged image dictionaries to a sorted list
|
|
174
|
+
sorted_images = sorted(images.values(), key=lambda im: im['file'])
|
|
175
|
+
|
|
176
|
+
merged_dict = {'info': info,
|
|
177
|
+
'detection_categories': detection_categories,
|
|
178
|
+
'classification_categories': classification_categories,
|
|
179
|
+
'images': sorted_images}
|
|
180
|
+
return merged_dict
|
|
181
|
+
|
|
182
|
+
# ...combine_batch_output_files()
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def combine_api_shard_files(input_files, output_file=None):
|
|
186
|
+
"""
|
|
187
|
+
Merges the list of .json-formatted API shard files [input_files] into a single
|
|
188
|
+
list of dictionaries, optionally writing the result to [output_file].
|
|
189
|
+
|
|
190
|
+
This operates on mostly-deprecated API shard files, not MegaDetector results files.
|
|
191
|
+
If you don't know what an API shard file is, you don't want this function.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
input_files (list of str): files to merge
|
|
195
|
+
output_file (str, optional): file to which we should write merged results
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
dict: merged results
|
|
199
|
+
|
|
200
|
+
:meta private:
|
|
201
|
+
"""
|
|
202
|
+
|
|
203
|
+
input_lists = []
|
|
204
|
+
print('Loading input files')
|
|
205
|
+
for fn in input_files:
|
|
206
|
+
with open(fn,'r') as f:
|
|
207
|
+
input_lists.append(json.load(f))
|
|
208
|
+
|
|
209
|
+
detections = []
|
|
210
|
+
# detection_list = input_lists[0]
|
|
211
|
+
for detection_list in input_lists:
|
|
212
|
+
assert isinstance(detection_list, list)
|
|
213
|
+
# d = detection_list[0]
|
|
214
|
+
for d in detection_list:
|
|
215
|
+
assert 'file' in d
|
|
216
|
+
assert 'max_detection_conf' in d
|
|
217
|
+
assert 'detections' in d
|
|
218
|
+
detections.append(d)
|
|
219
|
+
|
|
220
|
+
print('Writing output')
|
|
221
|
+
if output_file is not None:
|
|
222
|
+
ct_utils.write_json(output_file, detections)
|
|
223
|
+
|
|
224
|
+
return detections
|
|
225
|
+
|
|
226
|
+
# ...combine_api_shard_files()
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
#%% Command-line driver
|
|
230
|
+
|
|
231
|
+
def main(): # noqa
|
|
232
|
+
|
|
233
|
+
parser = argparse.ArgumentParser()
|
|
234
|
+
parser.add_argument(
|
|
235
|
+
'input_paths', nargs='+',
|
|
236
|
+
help='List of input .json files')
|
|
237
|
+
parser.add_argument(
|
|
238
|
+
'output_path',
|
|
239
|
+
help='Output .json file')
|
|
240
|
+
|
|
241
|
+
if len(sys.argv[1:]) == 0:
|
|
242
|
+
parser.print_help()
|
|
243
|
+
parser.exit()
|
|
244
|
+
|
|
245
|
+
args = parser.parse_args()
|
|
246
|
+
combine_batch_output_files(args.input_paths, args.output_path)
|
|
247
|
+
|
|
248
|
+
if __name__ == '__main__':
|
|
249
|
+
main()
|