megadetector 10.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- megadetector/__init__.py +0 -0
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +626 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +853 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +608 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +696 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +509 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/animl_to_md.py +161 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +33 -0
- megadetector/data_management/camtrap_dp_to_coco.py +270 -0
- megadetector/data_management/cct_json_utils.py +566 -0
- megadetector/data_management/cct_to_md.py +184 -0
- megadetector/data_management/cct_to_wi.py +293 -0
- megadetector/data_management/coco_to_labelme.py +284 -0
- megadetector/data_management/coco_to_yolo.py +701 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
- megadetector/data_management/databases/integrity_check_json_db.py +563 -0
- megadetector/data_management/databases/subset_json_db.py +195 -0
- megadetector/data_management/generate_crops_from_cct.py +200 -0
- megadetector/data_management/get_image_sizes.py +164 -0
- megadetector/data_management/labelme_to_coco.py +559 -0
- megadetector/data_management/labelme_to_yolo.py +349 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/create_lila_blank_set.py +556 -0
- megadetector/data_management/lila/create_lila_test_set.py +192 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +182 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +319 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
- megadetector/data_management/mewc_to_md.py +344 -0
- megadetector/data_management/ocr_tools.py +873 -0
- megadetector/data_management/read_exif.py +964 -0
- megadetector/data_management/remap_coco_categories.py +195 -0
- megadetector/data_management/remove_exif.py +156 -0
- megadetector/data_management/rename_images.py +194 -0
- megadetector/data_management/resize_coco_dataset.py +665 -0
- megadetector/data_management/speciesnet_to_md.py +41 -0
- megadetector/data_management/wi_download_csv_to_coco.py +247 -0
- megadetector/data_management/yolo_output_to_md_output.py +594 -0
- megadetector/data_management/yolo_to_coco.py +984 -0
- megadetector/data_management/zamba_to_md.py +188 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/change_detection.py +840 -0
- megadetector/detection/process_video.py +479 -0
- megadetector/detection/pytorch_detector.py +1451 -0
- megadetector/detection/run_detector.py +1267 -0
- megadetector/detection/run_detector_batch.py +2172 -0
- megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
- megadetector/detection/run_md_and_speciesnet.py +1604 -0
- megadetector/detection/run_tiled_inference.py +1044 -0
- megadetector/detection/tf_detector.py +209 -0
- megadetector/detection/video_utils.py +1379 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +72 -0
- megadetector/postprocessing/categorize_detections_by_size.py +166 -0
- megadetector/postprocessing/classification_postprocessing.py +1943 -0
- megadetector/postprocessing/combine_batch_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +2110 -0
- megadetector/postprocessing/convert_output_format.py +403 -0
- megadetector/postprocessing/create_crop_folder.py +629 -0
- megadetector/postprocessing/detector_calibration.py +570 -0
- megadetector/postprocessing/generate_csv_report.py +522 -0
- megadetector/postprocessing/load_api_results.py +223 -0
- megadetector/postprocessing/md_to_coco.py +428 -0
- megadetector/postprocessing/md_to_labelme.py +351 -0
- megadetector/postprocessing/md_to_wi.py +41 -0
- megadetector/postprocessing/merge_detections.py +392 -0
- megadetector/postprocessing/postprocess_batch_results.py +2140 -0
- megadetector/postprocessing/remap_detection_categories.py +226 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
- megadetector/postprocessing/separate_detections_into_folders.py +795 -0
- megadetector/postprocessing/subset_json_detector_output.py +964 -0
- megadetector/postprocessing/top_folders_to_bottom.py +238 -0
- megadetector/postprocessing/validate_batch_results.py +332 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +211 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +231 -0
- megadetector/taxonomy_mapping/species_lookup.py +1008 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/tests/__init__.py +0 -0
- megadetector/tests/test_nms_synthetic.py +335 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/ct_utils.py +1857 -0
- megadetector/utils/directory_listing.py +199 -0
- megadetector/utils/extract_frames_from_video.py +307 -0
- megadetector/utils/gpu_test.py +125 -0
- megadetector/utils/md_tests.py +2072 -0
- megadetector/utils/path_utils.py +2872 -0
- megadetector/utils/process_utils.py +172 -0
- megadetector/utils/split_locations_into_train_val.py +237 -0
- megadetector/utils/string_utils.py +234 -0
- megadetector/utils/url_utils.py +825 -0
- megadetector/utils/wi_platform_utils.py +968 -0
- megadetector/utils/wi_taxonomy_utils.py +1766 -0
- megadetector/utils/write_html_image_list.py +239 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +309 -0
- megadetector/visualization/render_images_with_thumbnails.py +243 -0
- megadetector/visualization/visualization_utils.py +1973 -0
- megadetector/visualization/visualize_db.py +630 -0
- megadetector/visualization/visualize_detector_output.py +498 -0
- megadetector/visualization/visualize_video_output.py +705 -0
- megadetector-10.0.15.dist-info/METADATA +115 -0
- megadetector-10.0.15.dist-info/RECORD +147 -0
- megadetector-10.0.15.dist-info/WHEEL +5 -0
- megadetector-10.0.15.dist-info/licenses/LICENSE +19 -0
- megadetector-10.0.15.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
md_to_labelme.py
|
|
4
|
+
|
|
5
|
+
"Converts" a MegaDetector output .json file to labelme format (one .json per image
|
|
6
|
+
file). "Convert" is in quotes because this is an opinionated transformation that
|
|
7
|
+
requires a confidence threshold.
|
|
8
|
+
|
|
9
|
+
TODO: # noqa
|
|
10
|
+
|
|
11
|
+
* support variable confidence thresholds across classes
|
|
12
|
+
* support classification data
|
|
13
|
+
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
#%% Imports and constants
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
import json
|
|
20
|
+
import sys
|
|
21
|
+
import argparse
|
|
22
|
+
|
|
23
|
+
from tqdm import tqdm
|
|
24
|
+
|
|
25
|
+
from multiprocessing.pool import Pool
|
|
26
|
+
from multiprocessing.pool import ThreadPool
|
|
27
|
+
from functools import partial
|
|
28
|
+
|
|
29
|
+
from megadetector.visualization.visualization_utils import open_image
|
|
30
|
+
from megadetector.utils.ct_utils import round_float
|
|
31
|
+
from megadetector.utils.ct_utils import write_json
|
|
32
|
+
from megadetector.detection.run_detector import DEFAULT_DETECTOR_LABEL_MAP, FAILURE_IMAGE_OPEN
|
|
33
|
+
|
|
34
|
+
output_precision = 3
|
|
35
|
+
default_confidence_threshold = 0.15
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
#%% Functions
|
|
39
|
+
|
|
40
|
+
def get_labelme_dict_for_image(im,
|
|
41
|
+
image_base_name=None,
|
|
42
|
+
category_id_to_name=None,
|
|
43
|
+
info=None,
|
|
44
|
+
confidence_threshold=None):
|
|
45
|
+
"""
|
|
46
|
+
For the given image struct in MD results format, reformat the detections into
|
|
47
|
+
labelme format.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
im (dict): MegaDetector-formatted results dict, must include 'height' and 'width' fields
|
|
51
|
+
image_base_name (str, optional): written directly to the 'imagePath' field in the output;
|
|
52
|
+
defaults to os.path.basename(im['file']).
|
|
53
|
+
category_id_to_name (dict, optional): maps string-int category IDs to category names, defaults
|
|
54
|
+
to the standard MD categories
|
|
55
|
+
info (dict, optional): arbitrary metadata to write to the "detector_info" field in the output
|
|
56
|
+
dict
|
|
57
|
+
confidence_threshold (float, optional): only detections at or above this confidence threshold
|
|
58
|
+
will be included in the output dict
|
|
59
|
+
|
|
60
|
+
Return:
|
|
61
|
+
dict: labelme-formatted dictionary, suitable for writing directly to a labelme-formatted .json file
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
if image_base_name is None:
|
|
65
|
+
image_base_name = os.path.basename(im['file'])
|
|
66
|
+
|
|
67
|
+
if category_id_to_name is None:
|
|
68
|
+
category_id_to_name = DEFAULT_DETECTOR_LABEL_MAP
|
|
69
|
+
|
|
70
|
+
if confidence_threshold is None:
|
|
71
|
+
confidence_threshold = -1.0
|
|
72
|
+
|
|
73
|
+
output_dict = {}
|
|
74
|
+
if info is not None:
|
|
75
|
+
output_dict['detector_info'] = info
|
|
76
|
+
output_dict['version'] = '5.3.0a0'
|
|
77
|
+
output_dict['flags'] = {}
|
|
78
|
+
output_dict['shapes'] = []
|
|
79
|
+
output_dict['imagePath'] = image_base_name
|
|
80
|
+
output_dict['imageHeight'] = im['height']
|
|
81
|
+
output_dict['imageWidth'] = im['width']
|
|
82
|
+
output_dict['imageData'] = None
|
|
83
|
+
output_dict['detections'] = im['detections']
|
|
84
|
+
|
|
85
|
+
# det = im['detections'][1]
|
|
86
|
+
for det in im['detections']:
|
|
87
|
+
|
|
88
|
+
if det['conf'] < confidence_threshold:
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
shape = {}
|
|
92
|
+
shape['conf'] = det['conf']
|
|
93
|
+
shape['label'] = category_id_to_name[det['category']]
|
|
94
|
+
shape['shape_type'] = 'rectangle'
|
|
95
|
+
shape['description'] = ''
|
|
96
|
+
shape['group_id'] = None
|
|
97
|
+
|
|
98
|
+
# MD boxes are [x_min, y_min, width_of_box, height_of_box] (relative)
|
|
99
|
+
#
|
|
100
|
+
# labelme boxes are [[x0,y0],[x1,y1]] (absolute)
|
|
101
|
+
x0 = round_float(det['bbox'][0] * im['width'],output_precision)
|
|
102
|
+
y0 = round_float(det['bbox'][1] * im['height'],output_precision)
|
|
103
|
+
x1 = round_float(x0 + det['bbox'][2] * im['width'],output_precision)
|
|
104
|
+
y1 = round_float(y0 + det['bbox'][3] * im['height'],output_precision)
|
|
105
|
+
shape['points'] = [[x0,y0],[x1,y1]]
|
|
106
|
+
output_dict['shapes'].append(shape)
|
|
107
|
+
|
|
108
|
+
# ...for each detection
|
|
109
|
+
|
|
110
|
+
return output_dict
|
|
111
|
+
|
|
112
|
+
# ...def get_labelme_dict_for_image()
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _write_output_for_image(im,
|
|
116
|
+
image_base,
|
|
117
|
+
extension_prefix,
|
|
118
|
+
info,
|
|
119
|
+
confidence_threshold,
|
|
120
|
+
category_id_to_name,
|
|
121
|
+
overwrite,
|
|
122
|
+
verbose=False):
|
|
123
|
+
|
|
124
|
+
if 'failure' in im and im['failure'] is not None:
|
|
125
|
+
assert 'detections' not in im or im['detections'] is None
|
|
126
|
+
if verbose:
|
|
127
|
+
print('Skipping labelme file generation for failed image {}'.format(
|
|
128
|
+
im['file']))
|
|
129
|
+
return
|
|
130
|
+
|
|
131
|
+
im_full_path = os.path.join(image_base,im['file'])
|
|
132
|
+
json_path = os.path.splitext(im_full_path)[0] + extension_prefix + '.json'
|
|
133
|
+
|
|
134
|
+
if (not overwrite) and (os.path.isfile(json_path)):
|
|
135
|
+
if verbose:
|
|
136
|
+
print('Skipping existing file {}'.format(json_path))
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
output_dict = get_labelme_dict_for_image(im,
|
|
140
|
+
image_base_name=os.path.basename(im_full_path),
|
|
141
|
+
category_id_to_name=category_id_to_name,
|
|
142
|
+
info=info,
|
|
143
|
+
confidence_threshold=confidence_threshold)
|
|
144
|
+
|
|
145
|
+
write_json(json_path,output_dict)
|
|
146
|
+
|
|
147
|
+
# ...def write_output_for_image(...)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def md_to_labelme(results_file,
|
|
152
|
+
image_base,
|
|
153
|
+
confidence_threshold=None,
|
|
154
|
+
overwrite=False,
|
|
155
|
+
extension_prefix='',
|
|
156
|
+
n_workers=1,
|
|
157
|
+
use_threads=False,
|
|
158
|
+
bypass_image_size_read=False,
|
|
159
|
+
verbose=False):
|
|
160
|
+
"""
|
|
161
|
+
For all the images in [results_file], write a .json file in labelme format alongside the
|
|
162
|
+
corresponding relative path within image_base.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
results_file (str): MD results .json file to convert to Labelme format
|
|
166
|
+
image_base (str): folder of images; filenames in [results_file] should be relative to
|
|
167
|
+
this folder
|
|
168
|
+
confidence_threshold (float, optional): only detections at or above this confidence threshold
|
|
169
|
+
will be included in the output dict. If None, no threshold will be applied.
|
|
170
|
+
overwrite (bool, optional): whether to overwrite existing output files; if this is False
|
|
171
|
+
and the output file for an image exists, we'll skip that image
|
|
172
|
+
extension_prefix (str, optional): if non-empty, "extension_prefix" will be inserted before the .json
|
|
173
|
+
extension (typically used to generate multiple copies of labelme files representing different
|
|
174
|
+
MD thresholds)
|
|
175
|
+
n_workers (int, optional): enables multiprocessing if > 1
|
|
176
|
+
use_threads (bool, optional): if [n_workers] > 1, determines whether we parallelize via threads (True)
|
|
177
|
+
or processes (False)
|
|
178
|
+
bypass_image_size_read (bool, optional): if True, skips reading image sizes and trusts whatever is in
|
|
179
|
+
the MD results file (don't set this to "True" if your MD results file doesn't contain image sizes)
|
|
180
|
+
verbose (bool, optional): enables additionald ebug output
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
if extension_prefix is None:
|
|
184
|
+
extension_prefix = ''
|
|
185
|
+
|
|
186
|
+
# Load MD results if necessary
|
|
187
|
+
if isinstance(results_file,dict):
|
|
188
|
+
md_results = results_file
|
|
189
|
+
else:
|
|
190
|
+
print('Loading MD results...')
|
|
191
|
+
with open(results_file,'r') as f:
|
|
192
|
+
md_results = json.load(f)
|
|
193
|
+
|
|
194
|
+
# Read image sizes if necessary
|
|
195
|
+
if bypass_image_size_read:
|
|
196
|
+
|
|
197
|
+
print('Bypassing image size read')
|
|
198
|
+
|
|
199
|
+
else:
|
|
200
|
+
|
|
201
|
+
# TODO: parallelize this loop
|
|
202
|
+
|
|
203
|
+
print('Reading image sizes...')
|
|
204
|
+
|
|
205
|
+
# im = md_results['images'][0]
|
|
206
|
+
for im in tqdm(md_results['images']):
|
|
207
|
+
|
|
208
|
+
# Make sure this file exists
|
|
209
|
+
im_full_path = os.path.join(image_base,im['file'])
|
|
210
|
+
assert os.path.isfile(im_full_path), 'Image file {} does not exist'.format(im_full_path)
|
|
211
|
+
|
|
212
|
+
json_path = os.path.splitext(im_full_path)[0] + extension_prefix + '.json'
|
|
213
|
+
|
|
214
|
+
# Don't even bother reading sizes for files we're not going to generate
|
|
215
|
+
if (not overwrite) and (os.path.isfile(json_path)):
|
|
216
|
+
continue
|
|
217
|
+
|
|
218
|
+
# Load w/h information if necessary
|
|
219
|
+
if 'height' not in im or 'width' not in im:
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
pil_im = open_image(im_full_path)
|
|
223
|
+
im['width'] = pil_im.width
|
|
224
|
+
im['height'] = pil_im.height
|
|
225
|
+
except Exception:
|
|
226
|
+
print('Warning: cannot open image {}, treating as a failure during inference'.format(
|
|
227
|
+
im_full_path))
|
|
228
|
+
if 'failure' not in im:
|
|
229
|
+
im['failure'] = FAILURE_IMAGE_OPEN
|
|
230
|
+
|
|
231
|
+
# ...if we need to read w/h information
|
|
232
|
+
|
|
233
|
+
# ...for each image
|
|
234
|
+
|
|
235
|
+
# ...if we're not bypassing image size read
|
|
236
|
+
|
|
237
|
+
print('\nGenerating labelme files...')
|
|
238
|
+
|
|
239
|
+
# Write output
|
|
240
|
+
if n_workers <= 1:
|
|
241
|
+
for im in tqdm(md_results['images']):
|
|
242
|
+
_write_output_for_image(im,image_base,extension_prefix,md_results['info'],confidence_threshold,
|
|
243
|
+
md_results['detection_categories'],overwrite,verbose)
|
|
244
|
+
else:
|
|
245
|
+
pool = None
|
|
246
|
+
try:
|
|
247
|
+
if use_threads:
|
|
248
|
+
print('Starting parallel thread pool with {} workers'.format(n_workers))
|
|
249
|
+
pool = ThreadPool(n_workers)
|
|
250
|
+
else:
|
|
251
|
+
print('Starting parallel process pool with {} workers'.format(n_workers))
|
|
252
|
+
pool = Pool(n_workers)
|
|
253
|
+
_ = list(tqdm(pool.imap(
|
|
254
|
+
partial(_write_output_for_image,
|
|
255
|
+
image_base=image_base,extension_prefix=extension_prefix,
|
|
256
|
+
info=md_results['info'],confidence_threshold=confidence_threshold,
|
|
257
|
+
category_id_to_name=md_results['detection_categories'],
|
|
258
|
+
overwrite=overwrite,verbose=verbose),
|
|
259
|
+
md_results['images']),
|
|
260
|
+
total=len(md_results['images'])))
|
|
261
|
+
finally:
|
|
262
|
+
if pool is not None:
|
|
263
|
+
pool.close()
|
|
264
|
+
pool.join()
|
|
265
|
+
print('Pool closed and joined for labelme file writes')
|
|
266
|
+
|
|
267
|
+
# ...for each image
|
|
268
|
+
|
|
269
|
+
# ...def md_to_labelme()
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
#%% Interactive driver
|
|
273
|
+
|
|
274
|
+
if False:
|
|
275
|
+
|
|
276
|
+
pass
|
|
277
|
+
|
|
278
|
+
#%% Configure options
|
|
279
|
+
|
|
280
|
+
md_results_file = os.path.expanduser('~/data/md-test.json')
|
|
281
|
+
coco_output_file = os.path.expanduser('~/data/md-test-coco.json')
|
|
282
|
+
image_folder = os.path.expanduser('~/data/md-test')
|
|
283
|
+
confidence_threshold = 0.2
|
|
284
|
+
overwrite = True
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
#%% Programmatic execution
|
|
288
|
+
|
|
289
|
+
md_to_labelme(results_file=md_results_file,
|
|
290
|
+
image_base=image_folder,
|
|
291
|
+
confidence_threshold=confidence_threshold,
|
|
292
|
+
overwrite=overwrite)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
#%% Command-line execution
|
|
296
|
+
|
|
297
|
+
s = 'python md_to_labelme.py {} {} --confidence_threshold {}'.format(md_results_file,
|
|
298
|
+
image_folder,
|
|
299
|
+
confidence_threshold)
|
|
300
|
+
if overwrite:
|
|
301
|
+
s += ' --overwrite'
|
|
302
|
+
|
|
303
|
+
print(s)
|
|
304
|
+
import clipboard; clipboard.copy(s)
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
#%% Opening labelme
|
|
308
|
+
|
|
309
|
+
s = 'python labelme {}'.format(image_folder)
|
|
310
|
+
print(s)
|
|
311
|
+
import clipboard; clipboard.copy(s)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
#%% Command-line driver
|
|
315
|
+
|
|
316
|
+
def main(): # noqa
|
|
317
|
+
|
|
318
|
+
parser = argparse.ArgumentParser(
|
|
319
|
+
description='Convert MD output to labelme annotation format')
|
|
320
|
+
parser.add_argument(
|
|
321
|
+
'results_file',
|
|
322
|
+
type=str,
|
|
323
|
+
help='Path to MD results file (.json)')
|
|
324
|
+
|
|
325
|
+
parser.add_argument(
|
|
326
|
+
'image_base',
|
|
327
|
+
type=str,
|
|
328
|
+
help='Path to images (also the output folder)')
|
|
329
|
+
|
|
330
|
+
parser.add_argument(
|
|
331
|
+
'--confidence_threshold',
|
|
332
|
+
type=float,
|
|
333
|
+
default=default_confidence_threshold,
|
|
334
|
+
help='Confidence threshold (default {})'.format(default_confidence_threshold)
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
parser.add_argument(
|
|
338
|
+
'--overwrite',
|
|
339
|
+
action='store_true',
|
|
340
|
+
help='Overwrite existing labelme .json files')
|
|
341
|
+
|
|
342
|
+
if len(sys.argv[1:]) == 0:
|
|
343
|
+
parser.print_help()
|
|
344
|
+
parser.exit()
|
|
345
|
+
|
|
346
|
+
args = parser.parse_args()
|
|
347
|
+
|
|
348
|
+
md_to_labelme(args.results_file,args.image_base,args.confidence_threshold,args.overwrite)
|
|
349
|
+
|
|
350
|
+
if __name__ == '__main__':
|
|
351
|
+
main()
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
md_to_wi.py
|
|
4
|
+
|
|
5
|
+
Converts the MD .json format to the WI predictions.json format.
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
#%% Imports and constants
|
|
10
|
+
|
|
11
|
+
import sys
|
|
12
|
+
import argparse
|
|
13
|
+
|
|
14
|
+
from megadetector.utils.wi_taxonomy_utils import generate_predictions_json_from_md_results
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
#%% Command-line driver
|
|
18
|
+
|
|
19
|
+
def main(): # noqa
|
|
20
|
+
|
|
21
|
+
parser = argparse.ArgumentParser()
|
|
22
|
+
parser.add_argument('md_results_file', action='store', type=str,
|
|
23
|
+
help='output file in MD format to convert')
|
|
24
|
+
parser.add_argument('predictions_json_file', action='store', type=str,
|
|
25
|
+
help='.json file to write in predictions.json format')
|
|
26
|
+
parser.add_argument('--base_folder', action='store', type=str, default=None,
|
|
27
|
+
help='folder name to prepend to each path in md_results_file, ' + \
|
|
28
|
+
'to convert relative paths to absolute paths.')
|
|
29
|
+
|
|
30
|
+
if len(sys.argv[1:]) == 0:
|
|
31
|
+
parser.print_help()
|
|
32
|
+
parser.exit()
|
|
33
|
+
|
|
34
|
+
args = parser.parse_args()
|
|
35
|
+
|
|
36
|
+
generate_predictions_json_from_md_results(args.md_results_file,
|
|
37
|
+
args.predictions_json_file,
|
|
38
|
+
base_folder=args.base_folder)
|
|
39
|
+
|
|
40
|
+
if __name__ == '__main__':
|
|
41
|
+
main()
|