megadetector 10.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. megadetector/__init__.py +0 -0
  2. megadetector/api/__init__.py +0 -0
  3. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
  7. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  8. megadetector/classification/__init__.py +0 -0
  9. megadetector/classification/aggregate_classifier_probs.py +108 -0
  10. megadetector/classification/analyze_failed_images.py +227 -0
  11. megadetector/classification/cache_batchapi_outputs.py +198 -0
  12. megadetector/classification/create_classification_dataset.py +626 -0
  13. megadetector/classification/crop_detections.py +516 -0
  14. megadetector/classification/csv_to_json.py +226 -0
  15. megadetector/classification/detect_and_crop.py +853 -0
  16. megadetector/classification/efficientnet/__init__.py +9 -0
  17. megadetector/classification/efficientnet/model.py +415 -0
  18. megadetector/classification/efficientnet/utils.py +608 -0
  19. megadetector/classification/evaluate_model.py +520 -0
  20. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  21. megadetector/classification/json_to_azcopy_list.py +63 -0
  22. megadetector/classification/json_validator.py +696 -0
  23. megadetector/classification/map_classification_categories.py +276 -0
  24. megadetector/classification/merge_classification_detection_output.py +509 -0
  25. megadetector/classification/prepare_classification_script.py +194 -0
  26. megadetector/classification/prepare_classification_script_mc.py +228 -0
  27. megadetector/classification/run_classifier.py +287 -0
  28. megadetector/classification/save_mislabeled.py +110 -0
  29. megadetector/classification/train_classifier.py +827 -0
  30. megadetector/classification/train_classifier_tf.py +725 -0
  31. megadetector/classification/train_utils.py +323 -0
  32. megadetector/data_management/__init__.py +0 -0
  33. megadetector/data_management/animl_to_md.py +161 -0
  34. megadetector/data_management/annotations/__init__.py +0 -0
  35. megadetector/data_management/annotations/annotation_constants.py +33 -0
  36. megadetector/data_management/camtrap_dp_to_coco.py +270 -0
  37. megadetector/data_management/cct_json_utils.py +566 -0
  38. megadetector/data_management/cct_to_md.py +184 -0
  39. megadetector/data_management/cct_to_wi.py +293 -0
  40. megadetector/data_management/coco_to_labelme.py +284 -0
  41. megadetector/data_management/coco_to_yolo.py +701 -0
  42. megadetector/data_management/databases/__init__.py +0 -0
  43. megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
  44. megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
  45. megadetector/data_management/databases/integrity_check_json_db.py +563 -0
  46. megadetector/data_management/databases/subset_json_db.py +195 -0
  47. megadetector/data_management/generate_crops_from_cct.py +200 -0
  48. megadetector/data_management/get_image_sizes.py +164 -0
  49. megadetector/data_management/labelme_to_coco.py +559 -0
  50. megadetector/data_management/labelme_to_yolo.py +349 -0
  51. megadetector/data_management/lila/__init__.py +0 -0
  52. megadetector/data_management/lila/create_lila_blank_set.py +556 -0
  53. megadetector/data_management/lila/create_lila_test_set.py +192 -0
  54. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  55. megadetector/data_management/lila/download_lila_subset.py +182 -0
  56. megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
  57. megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
  58. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  59. megadetector/data_management/lila/lila_common.py +319 -0
  60. megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
  61. megadetector/data_management/mewc_to_md.py +344 -0
  62. megadetector/data_management/ocr_tools.py +873 -0
  63. megadetector/data_management/read_exif.py +964 -0
  64. megadetector/data_management/remap_coco_categories.py +195 -0
  65. megadetector/data_management/remove_exif.py +156 -0
  66. megadetector/data_management/rename_images.py +194 -0
  67. megadetector/data_management/resize_coco_dataset.py +665 -0
  68. megadetector/data_management/speciesnet_to_md.py +41 -0
  69. megadetector/data_management/wi_download_csv_to_coco.py +247 -0
  70. megadetector/data_management/yolo_output_to_md_output.py +594 -0
  71. megadetector/data_management/yolo_to_coco.py +984 -0
  72. megadetector/data_management/zamba_to_md.py +188 -0
  73. megadetector/detection/__init__.py +0 -0
  74. megadetector/detection/change_detection.py +840 -0
  75. megadetector/detection/process_video.py +479 -0
  76. megadetector/detection/pytorch_detector.py +1451 -0
  77. megadetector/detection/run_detector.py +1267 -0
  78. megadetector/detection/run_detector_batch.py +2172 -0
  79. megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
  80. megadetector/detection/run_md_and_speciesnet.py +1604 -0
  81. megadetector/detection/run_tiled_inference.py +1044 -0
  82. megadetector/detection/tf_detector.py +209 -0
  83. megadetector/detection/video_utils.py +1379 -0
  84. megadetector/postprocessing/__init__.py +0 -0
  85. megadetector/postprocessing/add_max_conf.py +72 -0
  86. megadetector/postprocessing/categorize_detections_by_size.py +166 -0
  87. megadetector/postprocessing/classification_postprocessing.py +1943 -0
  88. megadetector/postprocessing/combine_batch_outputs.py +249 -0
  89. megadetector/postprocessing/compare_batch_results.py +2110 -0
  90. megadetector/postprocessing/convert_output_format.py +403 -0
  91. megadetector/postprocessing/create_crop_folder.py +629 -0
  92. megadetector/postprocessing/detector_calibration.py +570 -0
  93. megadetector/postprocessing/generate_csv_report.py +522 -0
  94. megadetector/postprocessing/load_api_results.py +223 -0
  95. megadetector/postprocessing/md_to_coco.py +428 -0
  96. megadetector/postprocessing/md_to_labelme.py +351 -0
  97. megadetector/postprocessing/md_to_wi.py +41 -0
  98. megadetector/postprocessing/merge_detections.py +392 -0
  99. megadetector/postprocessing/postprocess_batch_results.py +2140 -0
  100. megadetector/postprocessing/remap_detection_categories.py +226 -0
  101. megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
  102. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
  103. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
  104. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
  105. megadetector/postprocessing/separate_detections_into_folders.py +795 -0
  106. megadetector/postprocessing/subset_json_detector_output.py +964 -0
  107. megadetector/postprocessing/top_folders_to_bottom.py +238 -0
  108. megadetector/postprocessing/validate_batch_results.py +332 -0
  109. megadetector/taxonomy_mapping/__init__.py +0 -0
  110. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  111. megadetector/taxonomy_mapping/map_new_lila_datasets.py +211 -0
  112. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
  113. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
  114. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  115. megadetector/taxonomy_mapping/simple_image_download.py +231 -0
  116. megadetector/taxonomy_mapping/species_lookup.py +1008 -0
  117. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  118. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  119. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  120. megadetector/tests/__init__.py +0 -0
  121. megadetector/tests/test_nms_synthetic.py +335 -0
  122. megadetector/utils/__init__.py +0 -0
  123. megadetector/utils/ct_utils.py +1857 -0
  124. megadetector/utils/directory_listing.py +199 -0
  125. megadetector/utils/extract_frames_from_video.py +307 -0
  126. megadetector/utils/gpu_test.py +125 -0
  127. megadetector/utils/md_tests.py +2072 -0
  128. megadetector/utils/path_utils.py +2872 -0
  129. megadetector/utils/process_utils.py +172 -0
  130. megadetector/utils/split_locations_into_train_val.py +237 -0
  131. megadetector/utils/string_utils.py +234 -0
  132. megadetector/utils/url_utils.py +825 -0
  133. megadetector/utils/wi_platform_utils.py +968 -0
  134. megadetector/utils/wi_taxonomy_utils.py +1766 -0
  135. megadetector/utils/write_html_image_list.py +239 -0
  136. megadetector/visualization/__init__.py +0 -0
  137. megadetector/visualization/plot_utils.py +309 -0
  138. megadetector/visualization/render_images_with_thumbnails.py +243 -0
  139. megadetector/visualization/visualization_utils.py +1973 -0
  140. megadetector/visualization/visualize_db.py +630 -0
  141. megadetector/visualization/visualize_detector_output.py +498 -0
  142. megadetector/visualization/visualize_video_output.py +705 -0
  143. megadetector-10.0.15.dist-info/METADATA +115 -0
  144. megadetector-10.0.15.dist-info/RECORD +147 -0
  145. megadetector-10.0.15.dist-info/WHEEL +5 -0
  146. megadetector-10.0.15.dist-info/licenses/LICENSE +19 -0
  147. megadetector-10.0.15.dist-info/top_level.txt +1 -0
@@ -0,0 +1,223 @@
1
+ """
2
+
3
+ load_api_results.py
4
+
5
+ DEPRECATED
6
+
7
+ As of 2023.12, this module is still used in postprocessing and RDE, but it's not recommended
8
+ for new code.
9
+
10
+ Loads the output of the batch processing API (json) into a Pandas dataframe.
11
+
12
+ Includes functions to read/write the (very very old) .csv results format.
13
+
14
+ """
15
+
16
+ #%% Imports
17
+
18
+ import json
19
+ import os
20
+
21
+ from typing import Optional
22
+ from collections.abc import Mapping
23
+
24
+ import pandas as pd
25
+
26
+ from megadetector.utils.ct_utils import get_max_conf
27
+ from megadetector.utils.ct_utils import write_json
28
+ from megadetector.utils.wi_taxonomy_utils import load_md_or_speciesnet_file
29
+
30
+
31
+ #%% Functions for loading .json results into a Pandas DataFrame, and writing back to .json
32
+
33
+ def load_api_results(api_output_path: str, normalize_paths: bool = True,
34
+ filename_replacements: Optional[Mapping[str, str]] = None,
35
+ force_forward_slashes: bool = True
36
+ ) -> tuple[pd.DataFrame, dict]:
37
+ r"""
38
+ Loads json-formatted MegaDetector results to a Pandas DataFrame.
39
+
40
+ Args:
41
+ api_output_path (str): path to the output json file
42
+ normalize_paths (bool, optional): whether to apply os.path.normpath to the 'file'
43
+ field in each image entry in the output file
44
+ filename_replacements (dict, optional): replace some path tokens to match local paths
45
+ to the original file structure
46
+ force_forward_slashes (bool, optional): whether to convert backslashes to forward
47
+ slashes in filenames
48
+
49
+ Returns:
50
+ detection_results: pd.DataFrame, contains at least the columns ['file', 'detections','failure']
51
+ other_fields: a dict containing fields in the results other than 'images'
52
+ """
53
+
54
+ print('Loading results from {}'.format(api_output_path))
55
+
56
+ detection_results = load_md_or_speciesnet_file(api_output_path)
57
+
58
+ # Validate that this is really a detector output file
59
+ for s in ['info', 'detection_categories', 'images']:
60
+ assert s in detection_results, 'Missing field {} in detection results'.format(s)
61
+
62
+ # Fields in the output json other than 'images'
63
+ other_fields = {}
64
+ for k, v in detection_results.items():
65
+ if k != 'images':
66
+ other_fields[k] = v
67
+
68
+ if normalize_paths:
69
+ for image in detection_results['images']:
70
+ image['file'] = os.path.normpath(image['file'])
71
+
72
+ if force_forward_slashes:
73
+ for image in detection_results['images']:
74
+ image['file'] = image['file'].replace('\\','/')
75
+
76
+ # Replace some path tokens to match local paths to original blob structure
77
+ if filename_replacements is not None:
78
+ for string_to_replace in filename_replacements.keys():
79
+ replacement_string = filename_replacements[string_to_replace]
80
+ for im in detection_results['images']:
81
+ im['file'] = im['file'].replace(string_to_replace,replacement_string)
82
+
83
+ print('Converting results to dataframe')
84
+
85
+ # If this is a newer file that doesn't include maximum detection confidence values,
86
+ # add them, because our unofficial internal dataframe format includes this.
87
+ for im in detection_results['images']:
88
+ if 'max_detection_conf' not in im:
89
+ im['max_detection_conf'] = get_max_conf(im)
90
+
91
+ # Pack the json output into a Pandas DataFrame
92
+ detection_results = pd.DataFrame(detection_results['images'])
93
+
94
+ print('Finished loading MegaDetector results for {} images from {}'.format(
95
+ len(detection_results),api_output_path))
96
+
97
+ return detection_results, other_fields
98
+
99
+
100
+ def write_api_results(detection_results_table, other_fields, out_path):
101
+ """
102
+ Writes a Pandas DataFrame to the MegaDetector .json format.
103
+
104
+ Args:
105
+ detection_results_table (DataFrame): data to write
106
+ other_fields (dict): additional fields to include in the output .json
107
+ out_path (str): output .json filename
108
+ """
109
+
110
+ print('Writing detection results to {}'.format(out_path))
111
+
112
+ fields = other_fields
113
+
114
+ images = detection_results_table.to_json(orient='records',
115
+ double_precision=3)
116
+ images = json.loads(images)
117
+ for im in images:
118
+ if 'failure' in im and im['failure'] is None:
119
+ del im['failure']
120
+ fields['images'] = images
121
+
122
+ # Convert the 'version' field back to a string as per format convention
123
+ try:
124
+ version = other_fields['info']['format_version']
125
+ if not isinstance(version,str):
126
+ other_fields['info']['format_version'] = str(version)
127
+ except Exception:
128
+ print('Warning: error determining format version')
129
+ pass
130
+
131
+ # Remove 'max_detection_conf' as per newer file convention (format >= v1.3)
132
+ try:
133
+ version = other_fields['info']['format_version']
134
+ version = float(version)
135
+ if version >= 1.3:
136
+ for im in images:
137
+ if 'max_detection_conf' in im:
138
+ del im['max_detection_conf']
139
+ except Exception:
140
+ print('Warning: error removing max_detection_conf from output')
141
+ pass
142
+
143
+ write_json(out_path,fields)
144
+
145
+ print('Finished writing detection results to {}'.format(out_path))
146
+
147
+
148
+ def load_api_results_csv(filename, normalize_paths=True, filename_replacements=None, nrows=None):
149
+ """
150
+ [DEPRECATED]
151
+
152
+ Loads .csv-formatted MegaDetector results to a pandas table
153
+
154
+ Args:
155
+ filename (str): path to the csv file to read
156
+ normalize_paths (bool, optional): whether to apply os.path.normpath to the 'file'
157
+ field in each image entry in the output file
158
+ filename_replacements (dict, optional): replace some path tokens to match local paths
159
+ to the original file structure
160
+ nrows (int, optional): read only the first N rows of [filename]
161
+ """
162
+
163
+ if filename_replacements is None:
164
+ filename_replacements = {}
165
+
166
+ print('Loading MegaDetector results from {}'.format(filename))
167
+
168
+ detection_results = pd.read_csv(filename,nrows=nrows)
169
+
170
+ print('De-serializing MegaDetector results from {}'.format(filename))
171
+
172
+ # Confirm that this is really a detector output file
173
+ for s in ['image_path','max_confidence','detections']:
174
+ assert s in detection_results.columns
175
+
176
+ # Normalize paths to simplify comparisons later
177
+ if normalize_paths:
178
+ detection_results['image_path'] = detection_results['image_path'].apply(os.path.normpath)
179
+
180
+ # De-serialize detections
181
+ detection_results['detections'] = detection_results['detections'].apply(json.loads)
182
+
183
+ # Optionally replace some path tokens to match local paths to the original blob structure
184
+ # string_to_replace = list(options.detector_output_filename_replacements.keys())[0]
185
+ for string_to_replace in filename_replacements:
186
+
187
+ replacement_string = filename_replacements[string_to_replace]
188
+
189
+ # i_row = 0
190
+ for i_row in range(0,len(detection_results)):
191
+ row = detection_results.iloc[i_row]
192
+ fn = row['image_path']
193
+ fn = fn.replace(string_to_replace,replacement_string)
194
+ detection_results.at[i_row,'image_path'] = fn
195
+
196
+ print('Finished loading and de-serializing MD results for {} images from {}'.format(
197
+ len(detection_results),filename))
198
+
199
+ return detection_results
200
+
201
+
202
+ def write_api_results_csv(detection_results, filename):
203
+ """
204
+ [DEPRECATED]
205
+
206
+ Writes a Pandas table to csv in a way that's compatible with the .csv output
207
+ format. Currently just a wrapper around to_csv that forces output writing
208
+ to go through a common code path.
209
+
210
+ Args:
211
+ detection_results (DataFrame): dataframe to write to [filename]
212
+ filename (str): .csv filename to write
213
+ """
214
+
215
+ print('Writing detection results to {}'.format(filename))
216
+
217
+ output_dir = os.path.dirname(filename)
218
+ if len(output_dir) > 0:
219
+ os.makedirs(output_dir, exist_ok=True)
220
+
221
+ detection_results.to_csv(filename, index=False)
222
+
223
+ print('Finished writing detection results to {}'.format(filename))
@@ -0,0 +1,428 @@
1
+ """
2
+
3
+ md_to_coco.py
4
+
5
+ "Converts" MegaDetector output files to COCO format. "Converts" is in quotes because
6
+ this is an opinionated transformation that requires a confidence threshold for most
7
+ applications.
8
+
9
+ Does not currently handle classification information.
10
+
11
+ """
12
+
13
+ #%% Constants and imports
14
+
15
+ import os
16
+ import json
17
+ import uuid
18
+ import sys
19
+ import argparse
20
+
21
+ from tqdm import tqdm
22
+
23
+ from megadetector.visualization import visualization_utils as vis_utils
24
+ from megadetector.utils.path_utils import insert_before_extension
25
+ from megadetector.utils.ct_utils import write_json
26
+
27
+ default_confidence_threshold = 0.15
28
+
29
+
30
+ #%% Functions
31
+
32
+ def md_to_coco(md_results_file,
33
+ coco_output_file=None,
34
+ image_folder=None,
35
+ confidence_threshold=default_confidence_threshold,
36
+ validate_image_sizes=False,
37
+ info=None,
38
+ preserve_nonstandard_metadata=True,
39
+ include_failed_images=True,
40
+ include_annotations_without_bounding_boxes=True,
41
+ empty_category_id='0',
42
+ overwrite_behavior='skip',
43
+ verbose=True,
44
+ image_filename_to_size=None,
45
+ unrecognized_category_handling='error'):
46
+ """
47
+ "Converts" MegaDetector output files to COCO format. "Converts" is in quotes because
48
+ this is an opinionated transformation that typically requires a confidence threshold.
49
+
50
+ The default confidence threshold is not 0; the assumption is that by default, you are
51
+ going to treat the resulting COCO file as a set of labels. If you are using the resulting COCO
52
+ file to *evaluate* a detector, rather than as a set of labels, you likely want a
53
+ confidence threshold of 0. Confidence values will be written to the semi-standard "score"
54
+ field for each image (regardless of the threshold) if preserve_nonstandard_metadata is True.
55
+
56
+ A folder of images is required if width and height information are not available
57
+ in the MD results file.
58
+
59
+ Args:
60
+ md_results_file (str): MD results .json file to convert to COCO
61
+ format
62
+ coco_output_file (str, optional): COCO .json file to write; if this is None, we'll return
63
+ a COCO-formatted dict, but won't write it to disk. If this is 'auto', we'll write to
64
+ [md_results_file_without_extension].coco.json.
65
+ image_folder (str, optional): folder of images, required if 'width' and 'height' are not
66
+ present in the MD results file (they are not required by the format)
67
+ confidence_threshold (float, optional): boxes below this confidence threshold will not be
68
+ included in the output data
69
+ validate_image_sizes (bool, optional): if this is True, we'll check the image sizes
70
+ regardless of whether "width" and "height" are present in the MD results file.
71
+ info (dict, optional): arbitrary metadata to include in an "info" field in the COCO-formatted
72
+ output
73
+ preserve_nonstandard_metadata (bool, optional): if this is True, confidence will be preserved in a
74
+ non-standard "score" field in each annotation, and any random fields present in each image's
75
+ data (e.g. EXIF metadata) will be propagated to COCO output
76
+ include_failed_images (bool, optional): if this is True, failed images will be propagated to COCO output
77
+ with a non-empty "failure" field and no other fields, otherwise failed images will be skipped.
78
+ include_annotations_without_bounding_boxes (bool, optional): the only time we end up with
79
+ annotations without bounding boxes is when a detection has the category [empty_category_id];
80
+ this determines whether those annotations are included in the output.
81
+ empty_category_id (str, optional): category ID reserved for the 'empty' class, should not be
82
+ attached to any bounding boxes
83
+ overwrite_behavior (str, optional): determines behavior if the output file exists ('skip' to skip conversion,
84
+ 'overwrite' to overwrite the existing file, 'error' to raise an error, 'skip_if_valid' to skip conversion
85
+ if the .json file appears to be intact (does not verify COCO formatting, just intact-.json-ness))
86
+ verbose (bool, optional): enable debug output, including the progress bar,
87
+ image_filename_to_size (dict, optional): dictionary mapping relative image paths to (w,h) tuples. Reading
88
+ image sizes is the slowest step, so if you need to convert many results files at once for the same
89
+ set of images, things will be gobs faster if you read the image sizes in advance and pass them in
90
+ via this argument. The format used here is the same format output by parallel_get_image_sizes().
91
+ unrecognized_category_handling (str or float, optional): specifies what to do when encountering category
92
+ IDs not in the category mapping. Can be "error", "ignore", or "warning". Can also be a float,
93
+ in which case an error is thrown if an unrecognized category has a confidence value higher than
94
+ this value.
95
+
96
+ Returns:
97
+ dict: the COCO data dict, identical to what's written to [coco_output_file] if [coco_output_file]
98
+ is not None.
99
+ """
100
+
101
+ assert isinstance(md_results_file,str)
102
+ assert os.path.isfile(md_results_file), \
103
+ 'MD results file {} does not exist'.format(md_results_file)
104
+ assert (isinstance(unrecognized_category_handling,float)) or \
105
+ (unrecognized_category_handling in ('error','warning','ignore')), \
106
+ 'Invalid category handling behavior {}'.format(unrecognized_category_handling)
107
+
108
+ if coco_output_file == 'auto':
109
+ coco_output_file = insert_before_extension(md_results_file,'coco')
110
+
111
+ if coco_output_file is not None:
112
+ if os.path.isfile(coco_output_file):
113
+ if overwrite_behavior == 'skip':
114
+ print('Skipping conversion of {}, output file {} exists'.format(
115
+ md_results_file,coco_output_file))
116
+ return None
117
+ elif overwrite_behavior == 'skip_if_valid':
118
+ output_file_is_valid = True
119
+ try:
120
+ with open(coco_output_file,'r') as f:
121
+ _ = json.load(f)
122
+ except Exception:
123
+ print('COCO file {} is invalid, proceeding with conversion'.format(
124
+ coco_output_file))
125
+ output_file_is_valid = False
126
+ if output_file_is_valid:
127
+ print('Skipping conversion of {}, output file {} exists and is valid'.format(
128
+ md_results_file,coco_output_file))
129
+ return None
130
+ elif overwrite_behavior == 'overwrite':
131
+ pass
132
+ elif overwrite_behavior == 'error':
133
+ raise ValueError('Output file {} exists'.format(coco_output_file))
134
+
135
+ with open(md_results_file,'r') as f:
136
+ md_results = json.load(f)
137
+
138
+ coco_images = []
139
+ coco_annotations = []
140
+
141
+ if verbose:
142
+ print('Converting MD results file {} to COCO file {}...'.format(
143
+ md_results_file, coco_output_file))
144
+
145
+ # im = md_results['images'][0]
146
+ for im in tqdm(md_results['images'],disable=(not verbose)):
147
+
148
+ coco_im = {}
149
+ coco_im['id'] = im['file']
150
+ coco_im['file_name'] = im['file']
151
+
152
+ # There is no concept of this in the COCO standard
153
+ if 'failure' in im and im['failure'] is not None:
154
+ if include_failed_images:
155
+ coco_im['failure'] = im['failure']
156
+ coco_images.append(coco_im)
157
+ continue
158
+
159
+ # Read/validate image size
160
+ w = None
161
+ h = None
162
+
163
+ if ('width' not in im) or ('height' not in im) or validate_image_sizes:
164
+ if (image_folder is None) and (image_filename_to_size is None):
165
+ raise ValueError('Must provide an image folder or a size mapping when ' + \
166
+ 'height/width need to be read from images')
167
+
168
+ w = None; h = None
169
+
170
+ if image_filename_to_size is not None:
171
+
172
+ if im['file'] not in image_filename_to_size:
173
+ print('Warning: file {} not in image size mapping dict, reading from file'.format(
174
+ im['file']))
175
+ else:
176
+ image_size = image_filename_to_size[im['file']]
177
+ if image_size is not None:
178
+ assert len(image_size) == 2
179
+ w = image_size[0]
180
+ h = image_size[1]
181
+
182
+ if w is None:
183
+
184
+ image_file_abs = os.path.join(image_folder,im['file'])
185
+ pil_im = vis_utils.open_image(image_file_abs)
186
+ w = pil_im.width
187
+ h = pil_im.height
188
+
189
+ if validate_image_sizes:
190
+ if 'width' in im:
191
+ assert im['width'] == w, 'Width mismatch for image {}'.format(im['file'])
192
+ if 'height' in im:
193
+ assert im['height'] == h, 'Height mismatch for image {}'.format(im['file'])
194
+ else:
195
+
196
+ w = im['width']
197
+ h = im['height']
198
+
199
+ coco_im['width'] = w
200
+ coco_im['height'] = h
201
+
202
+ # Add other, non-standard fields to the output dict
203
+ if preserve_nonstandard_metadata:
204
+ for k in im.keys():
205
+ if k not in ('file','detections','width','height'):
206
+ coco_im[k] = im[k]
207
+
208
+ coco_images.append(coco_im)
209
+
210
+ # detection = im['detections'][0]
211
+ for detection in im['detections']:
212
+
213
+ # Skip below-threshold detections
214
+ if confidence_threshold is not None and detection['conf'] < confidence_threshold:
215
+ continue
216
+
217
+ # Create an annotation
218
+ ann = {}
219
+ ann['id'] = str(uuid.uuid1())
220
+ ann['image_id'] = coco_im['id']
221
+
222
+ md_category_id = detection['category']
223
+
224
+ if md_category_id not in md_results['detection_categories']:
225
+
226
+ s = 'unrecognized category ID {} occurred with confidence {} in file {}'.format(
227
+ md_category_id,detection['conf'],im['file'])
228
+ if isinstance(unrecognized_category_handling,float):
229
+ if detection['conf'] > unrecognized_category_handling:
230
+ raise ValueError(s)
231
+ else:
232
+ continue
233
+ elif unrecognized_category_handling == 'warning':
234
+ print('Warning: {}'.format(s))
235
+ continue
236
+ elif unrecognized_category_handling == 'ignore':
237
+ continue
238
+ else:
239
+ raise ValueError(s)
240
+
241
+ coco_category_id = int(md_category_id)
242
+ ann['category_id'] = coco_category_id
243
+
244
+ if md_category_id != empty_category_id:
245
+
246
+ assert 'bbox' in detection,\
247
+ 'Oops: non-empty category with no bbox in {}'.format(im['file'])
248
+
249
+ ann['bbox'] = detection['bbox']
250
+
251
+ # MegaDetector: [x,y,width,height] (normalized, origin upper-left)
252
+ # COCO: [x,y,width,height] (absolute, origin upper-left)
253
+ ann['bbox'][0] = ann['bbox'][0] * coco_im['width']
254
+ ann['bbox'][1] = ann['bbox'][1] * coco_im['height']
255
+ ann['bbox'][2] = ann['bbox'][2] * coco_im['width']
256
+ ann['bbox'][3] = ann['bbox'][3] * coco_im['height']
257
+
258
+ else:
259
+
260
+ # In very esoteric cases, we use the empty category (0) in MD-formatted output files
261
+ print('Warning: empty category ({}) used for annotation for image {}'.format(
262
+ empty_category_id,im['file']))
263
+ pass
264
+
265
+ if preserve_nonstandard_metadata:
266
+ # "Score" is a semi-standard string here, recognized by at least pycocotools
267
+ # ann['conf'] = detection['conf']
268
+ ann['score'] = detection['conf']
269
+
270
+ if 'bbox' in ann or include_annotations_without_bounding_boxes:
271
+ coco_annotations.append(ann)
272
+
273
+ # ...for each detection
274
+
275
+ # ...for each image
276
+
277
+ output_dict = {}
278
+
279
+ if info is not None:
280
+ output_dict['info'] = info
281
+ else:
282
+ output_dict['info'] = {'description':'Converted from MD results file {}'.format(md_results_file)}
283
+ output_dict['info']['confidence_threshold'] = confidence_threshold
284
+
285
+ output_dict['images'] = coco_images
286
+ output_dict['annotations'] = coco_annotations
287
+
288
+ output_dict['categories'] = []
289
+
290
+ for md_category_id in md_results['detection_categories'].keys():
291
+
292
+ coco_category_id = int(md_category_id)
293
+ coco_category = {'id':coco_category_id,
294
+ 'name':md_results['detection_categories'][md_category_id]}
295
+ output_dict['categories'].append(coco_category)
296
+
297
+ if verbose:
298
+ print('Writing COCO output file...')
299
+
300
+ write_json(coco_output_file,output_dict)
301
+
302
+ return output_dict
303
+
304
+ # ...def md_to_coco(...)
305
+
306
+
307
+ #%% Interactive driver
308
+
309
+ if False:
310
+
311
+ pass
312
+
313
+ #%% Configure options
314
+
315
+ md_results_file = os.path.expanduser('~/data/md-test.json')
316
+ coco_output_file = os.path.expanduser('~/data/md-test-coco.json')
317
+ image_folder = os.path.expanduser('~/data/md-test')
318
+ validate_image_sizes = True
319
+ confidence_threshold = 0.2
320
+ validate_image_sizes=True
321
+ info=None
322
+ preserve_nonstandard_metadata=True
323
+ include_failed_images=False
324
+
325
+
326
+ #%% Programmatic execution
327
+
328
+ output_dict = md_to_coco(md_results_file,
329
+ coco_output_file=coco_output_file,
330
+ image_folder=image_folder,
331
+ confidence_threshold=confidence_threshold,
332
+ validate_image_sizes=validate_image_sizes,
333
+ info=info,
334
+ preserve_nonstandard_metadata=preserve_nonstandard_metadata,
335
+ include_failed_images=include_failed_images)
336
+
337
+
338
+ #%% Command-line example
339
+
340
+ s = f'python md_to_coco.py {md_results_file} {coco_output_file} {confidence_threshold} '
341
+ if image_folder is not None:
342
+ s += f' --image_folder {image_folder}'
343
+ if preserve_nonstandard_metadata:
344
+ s += ' --preserve_nonstandard_metadata'
345
+ if include_failed_images:
346
+ s += ' --include_failed_images'
347
+
348
+ print(s); import clipboard; clipboard.copy(s)
349
+
350
+
351
+ #%% Preview the resulting file
352
+
353
+ from megadetector.visualization import visualize_db
354
+ options = visualize_db.DbVizOptions()
355
+ options.parallelize_rendering = True
356
+ options.viz_size = (900, -1)
357
+ options.num_to_visualize = 5000
358
+
359
+ html_file,_ = visualize_db.visualize_db(coco_output_file,
360
+ os.path.expanduser('~/tmp/md_to_coco_preview'),
361
+ image_folder,options)
362
+
363
+ from megadetector.utils import path_utils # noqa
364
+ path_utils.open_file(html_file)
365
+
366
+
367
+ #%% Command-line driver
368
+
369
+ def main(): # noqa
370
+
371
+ parser = argparse.ArgumentParser(
372
+ description='"Convert" MD output to COCO format, in quotes because this is an opinionated ' + \
373
+ 'transformation that requires a confidence threshold')
374
+
375
+ parser.add_argument(
376
+ 'md_results_file',
377
+ type=str,
378
+ help='Path to MD results file (.json)')
379
+
380
+ parser.add_argument(
381
+ 'coco_output_file',
382
+ type=str,
383
+ help='Output filename (.json)')
384
+
385
+ parser.add_argument(
386
+ 'confidence_threshold',
387
+ type=float,
388
+ default=default_confidence_threshold,
389
+ help='Confidence threshold (default {})'.format(default_confidence_threshold)
390
+ )
391
+
392
+ parser.add_argument(
393
+ '--image_folder',
394
+ type=str,
395
+ default=None,
396
+ help='Image folder, only required if we will need to access image sizes'
397
+ )
398
+
399
+ parser.add_argument(
400
+ '--preserve_nonstandard_metadata',
401
+ action='store_true',
402
+ help='Preserve metadata that isn\'t normally included in ' +
403
+ 'COCO-formatted data (e.g. EXIF metadata, confidence values)'
404
+ )
405
+
406
+ parser.add_argument(
407
+ '--include_failed_images',
408
+ action='store_true',
409
+ help='Keep a record of corrupted images in the output; may not be completely COCO-compliant'
410
+ )
411
+
412
+ if len(sys.argv[1:]) == 0:
413
+ parser.print_help()
414
+ parser.exit()
415
+
416
+ args = parser.parse_args()
417
+
418
+ md_to_coco(args.md_results_file,
419
+ args.coco_output_file,
420
+ args.image_folder,
421
+ args.confidence_threshold,
422
+ validate_image_sizes=False,
423
+ info=None,
424
+ preserve_nonstandard_metadata=args.preserve_nonstandard_metadata,
425
+ include_failed_images=args.include_failed_images)
426
+
427
+ if __name__ == '__main__':
428
+ main()