megadetector 10.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/__init__.py +0 -0
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +626 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +853 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +608 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +696 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +509 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/animl_to_md.py +161 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +33 -0
- megadetector/data_management/camtrap_dp_to_coco.py +270 -0
- megadetector/data_management/cct_json_utils.py +566 -0
- megadetector/data_management/cct_to_md.py +184 -0
- megadetector/data_management/cct_to_wi.py +293 -0
- megadetector/data_management/coco_to_labelme.py +284 -0
- megadetector/data_management/coco_to_yolo.py +702 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
- megadetector/data_management/databases/integrity_check_json_db.py +528 -0
- megadetector/data_management/databases/subset_json_db.py +195 -0
- megadetector/data_management/generate_crops_from_cct.py +200 -0
- megadetector/data_management/get_image_sizes.py +164 -0
- megadetector/data_management/labelme_to_coco.py +559 -0
- megadetector/data_management/labelme_to_yolo.py +349 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/create_lila_blank_set.py +556 -0
- megadetector/data_management/lila/create_lila_test_set.py +187 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +182 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +319 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
- megadetector/data_management/mewc_to_md.py +344 -0
- megadetector/data_management/ocr_tools.py +873 -0
- megadetector/data_management/read_exif.py +964 -0
- megadetector/data_management/remap_coco_categories.py +195 -0
- megadetector/data_management/remove_exif.py +156 -0
- megadetector/data_management/rename_images.py +194 -0
- megadetector/data_management/resize_coco_dataset.py +663 -0
- megadetector/data_management/speciesnet_to_md.py +41 -0
- megadetector/data_management/wi_download_csv_to_coco.py +247 -0
- megadetector/data_management/yolo_output_to_md_output.py +594 -0
- megadetector/data_management/yolo_to_coco.py +876 -0
- megadetector/data_management/zamba_to_md.py +188 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/change_detection.py +840 -0
- megadetector/detection/process_video.py +479 -0
- megadetector/detection/pytorch_detector.py +1451 -0
- megadetector/detection/run_detector.py +1267 -0
- megadetector/detection/run_detector_batch.py +2159 -0
- megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
- megadetector/detection/run_md_and_speciesnet.py +1494 -0
- megadetector/detection/run_tiled_inference.py +1038 -0
- megadetector/detection/tf_detector.py +209 -0
- megadetector/detection/video_utils.py +1379 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +72 -0
- megadetector/postprocessing/categorize_detections_by_size.py +166 -0
- megadetector/postprocessing/classification_postprocessing.py +1752 -0
- megadetector/postprocessing/combine_batch_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +2110 -0
- megadetector/postprocessing/convert_output_format.py +403 -0
- megadetector/postprocessing/create_crop_folder.py +629 -0
- megadetector/postprocessing/detector_calibration.py +570 -0
- megadetector/postprocessing/generate_csv_report.py +522 -0
- megadetector/postprocessing/load_api_results.py +223 -0
- megadetector/postprocessing/md_to_coco.py +428 -0
- megadetector/postprocessing/md_to_labelme.py +351 -0
- megadetector/postprocessing/md_to_wi.py +41 -0
- megadetector/postprocessing/merge_detections.py +392 -0
- megadetector/postprocessing/postprocess_batch_results.py +2077 -0
- megadetector/postprocessing/remap_detection_categories.py +226 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
- megadetector/postprocessing/separate_detections_into_folders.py +795 -0
- megadetector/postprocessing/subset_json_detector_output.py +964 -0
- megadetector/postprocessing/top_folders_to_bottom.py +238 -0
- megadetector/postprocessing/validate_batch_results.py +332 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +213 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +224 -0
- megadetector/taxonomy_mapping/species_lookup.py +1008 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/tests/__init__.py +0 -0
- megadetector/tests/test_nms_synthetic.py +335 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/ct_utils.py +1857 -0
- megadetector/utils/directory_listing.py +199 -0
- megadetector/utils/extract_frames_from_video.py +307 -0
- megadetector/utils/gpu_test.py +125 -0
- megadetector/utils/md_tests.py +2072 -0
- megadetector/utils/path_utils.py +2832 -0
- megadetector/utils/process_utils.py +172 -0
- megadetector/utils/split_locations_into_train_val.py +237 -0
- megadetector/utils/string_utils.py +234 -0
- megadetector/utils/url_utils.py +825 -0
- megadetector/utils/wi_platform_utils.py +968 -0
- megadetector/utils/wi_taxonomy_utils.py +1759 -0
- megadetector/utils/write_html_image_list.py +239 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +309 -0
- megadetector/visualization/render_images_with_thumbnails.py +243 -0
- megadetector/visualization/visualization_utils.py +1940 -0
- megadetector/visualization/visualize_db.py +630 -0
- megadetector/visualization/visualize_detector_output.py +479 -0
- megadetector/visualization/visualize_video_output.py +705 -0
- megadetector-10.0.13.dist-info/METADATA +134 -0
- megadetector-10.0.13.dist-info/RECORD +147 -0
- megadetector-10.0.13.dist-info/WHEEL +5 -0
- megadetector-10.0.13.dist-info/licenses/LICENSE +19 -0
- megadetector-10.0.13.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,663 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
resize_coco_dataset.py
|
|
4
|
+
|
|
5
|
+
Given a COCO-formatted dataset, resizes all the images to a target size,
|
|
6
|
+
scaling bounding boxes accordingly.
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
#%% Imports and constants
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import json
|
|
14
|
+
import shutil
|
|
15
|
+
import argparse
|
|
16
|
+
import sys
|
|
17
|
+
|
|
18
|
+
from collections import defaultdict
|
|
19
|
+
from multiprocessing.pool import Pool, ThreadPool
|
|
20
|
+
from functools import partial
|
|
21
|
+
|
|
22
|
+
from PIL import Image
|
|
23
|
+
from tqdm import tqdm
|
|
24
|
+
|
|
25
|
+
from megadetector.utils.path_utils import insert_before_extension
|
|
26
|
+
from megadetector.visualization.visualization_utils import \
|
|
27
|
+
open_image, resize_image, exif_preserving_save
|
|
28
|
+
from megadetector.utils.ct_utils import make_test_folder
|
|
29
|
+
from megadetector.utils.ct_utils import write_json
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
#%% Functions
|
|
33
|
+
|
|
34
|
+
def _process_single_image_for_resize(image_data,
|
|
35
|
+
input_folder,
|
|
36
|
+
output_folder,
|
|
37
|
+
target_size,
|
|
38
|
+
correct_size_image_handling,
|
|
39
|
+
unavailable_image_handling,
|
|
40
|
+
no_enlarge_width,
|
|
41
|
+
verbose):
|
|
42
|
+
"""
|
|
43
|
+
Processes a single image: loads, resizes/copies, updates metadata, and scales annotations.
|
|
44
|
+
|
|
45
|
+
[image_data] is a tuple of [im,annotations]
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
assert unavailable_image_handling in ('error','omit'), \
|
|
49
|
+
f'Illegal unavailable_image_handling {unavailable_image_handling}'
|
|
50
|
+
|
|
51
|
+
assert isinstance(image_data,tuple) and len(image_data) == 2
|
|
52
|
+
assert isinstance(image_data[0],dict)
|
|
53
|
+
assert isinstance(image_data[1],list)
|
|
54
|
+
im = image_data[0].copy()
|
|
55
|
+
annotations_this_image = [ann.copy() for ann in image_data[1]]
|
|
56
|
+
|
|
57
|
+
input_fn_relative = im['file_name']
|
|
58
|
+
input_fn_abs = os.path.join(input_folder, input_fn_relative)
|
|
59
|
+
|
|
60
|
+
if not os.path.isfile(input_fn_abs):
|
|
61
|
+
if unavailable_image_handling == 'error':
|
|
62
|
+
raise FileNotFoundError('Could not find file {}'.format(input_fn_abs))
|
|
63
|
+
else:
|
|
64
|
+
print("Can't find image {}, skipping".format(input_fn_relative))
|
|
65
|
+
return None, None
|
|
66
|
+
|
|
67
|
+
output_fn_abs = os.path.join(output_folder, input_fn_relative)
|
|
68
|
+
output_dir = os.path.dirname(output_fn_abs)
|
|
69
|
+
if len(output_dir) > 0:
|
|
70
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
71
|
+
|
|
72
|
+
if verbose:
|
|
73
|
+
print('Resizing {} to {}'.format(input_fn_abs,output_fn_abs))
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
pil_im = open_image(input_fn_abs)
|
|
77
|
+
input_w = pil_im.width
|
|
78
|
+
input_h = pil_im.height
|
|
79
|
+
except Exception as e:
|
|
80
|
+
if unavailable_image_handling == 'error':
|
|
81
|
+
raise Exception('Could not open image {}: {}'.format(
|
|
82
|
+
input_fn_relative, str(e)))
|
|
83
|
+
else:
|
|
84
|
+
print("Can't open image {}, skipping".format(input_fn_relative))
|
|
85
|
+
return None, None
|
|
86
|
+
|
|
87
|
+
image_is_already_target_size = \
|
|
88
|
+
(input_w == target_size[0]) and (input_h == target_size[1])
|
|
89
|
+
if no_enlarge_width and (input_w < target_size[0]):
|
|
90
|
+
image_is_already_target_size = True
|
|
91
|
+
preserve_original_size = \
|
|
92
|
+
(target_size[0] == -1) and (target_size[1] == -1)
|
|
93
|
+
|
|
94
|
+
# Do we need to resize, or can we try to get away with a copy?
|
|
95
|
+
if image_is_already_target_size or preserve_original_size:
|
|
96
|
+
output_w = input_w
|
|
97
|
+
output_h = input_h
|
|
98
|
+
if correct_size_image_handling == 'copy':
|
|
99
|
+
if input_fn_abs != output_fn_abs: # only copy if src and dst are different
|
|
100
|
+
shutil.copyfile(input_fn_abs, output_fn_abs)
|
|
101
|
+
elif correct_size_image_handling == 'rewrite':
|
|
102
|
+
exif_preserving_save(pil_im, output_fn_abs)
|
|
103
|
+
else:
|
|
104
|
+
raise ValueError(
|
|
105
|
+
f'Unrecognized value {correct_size_image_handling} for correct_size_image_handling')
|
|
106
|
+
else:
|
|
107
|
+
try:
|
|
108
|
+
pil_im = resize_image(pil_im, target_size[0], target_size[1],
|
|
109
|
+
no_enlarge_width=no_enlarge_width)
|
|
110
|
+
output_w = pil_im.width
|
|
111
|
+
output_h = pil_im.height
|
|
112
|
+
exif_preserving_save(pil_im, output_fn_abs)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
if unavailable_image_handling == 'error':
|
|
115
|
+
raise Exception('Could not resize image {}: {}'.format(
|
|
116
|
+
input_fn_relative, str(e)))
|
|
117
|
+
else:
|
|
118
|
+
print("Can't resize image {}, skipping".format(input_fn_relative))
|
|
119
|
+
return None,None
|
|
120
|
+
|
|
121
|
+
im['width'] = output_w
|
|
122
|
+
im['height'] = output_h
|
|
123
|
+
|
|
124
|
+
for ann in annotations_this_image:
|
|
125
|
+
|
|
126
|
+
if 'bbox' in ann:
|
|
127
|
+
bbox = ann['bbox']
|
|
128
|
+
if (output_w != input_w) or (output_h != input_h):
|
|
129
|
+
width_scale = output_w / input_w
|
|
130
|
+
height_scale = output_h / input_h
|
|
131
|
+
bbox = [
|
|
132
|
+
bbox[0] * width_scale,
|
|
133
|
+
bbox[1] * height_scale,
|
|
134
|
+
bbox[2] * width_scale,
|
|
135
|
+
bbox[3] * height_scale
|
|
136
|
+
]
|
|
137
|
+
ann['bbox'] = bbox
|
|
138
|
+
|
|
139
|
+
# ...for each annotation associated with this image
|
|
140
|
+
|
|
141
|
+
return im, annotations_this_image
|
|
142
|
+
|
|
143
|
+
# ...def _process_single_image_for_resize(...)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def resize_coco_dataset(input_folder,
|
|
147
|
+
input_filename,
|
|
148
|
+
output_folder,
|
|
149
|
+
output_filename=None,
|
|
150
|
+
target_size=(-1,-1),
|
|
151
|
+
correct_size_image_handling='copy',
|
|
152
|
+
unavailable_image_handling='error',
|
|
153
|
+
n_workers=1,
|
|
154
|
+
pool_type='thread',
|
|
155
|
+
no_enlarge_width=True,
|
|
156
|
+
verbose=False):
|
|
157
|
+
"""
|
|
158
|
+
Given a COCO-formatted dataset (images in input_folder, data in input_filename), resizes
|
|
159
|
+
all the images to a target size (in output_folder) and scales bounding boxes accordingly.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
input_folder (str): the folder where images live; filenames in [input_filename] should
|
|
163
|
+
be relative to [input_folder]
|
|
164
|
+
input_filename (str): the (input) COCO-formatted .json file containing annotations
|
|
165
|
+
output_folder (str): the folder to which we should write resized images; can be the
|
|
166
|
+
same as [input_folder], in which case images are over-written
|
|
167
|
+
output_filename (str, optional): the COCO-formatted .json file we should generate that refers
|
|
168
|
+
to the resized images
|
|
169
|
+
target_size (list or tuple of ints, optional): this should be tuple/list of ints, with length 2 (w,h).
|
|
170
|
+
If either dimension is -1, aspect ratio will be preserved. If both dimensions are -1, this means
|
|
171
|
+
"keep the original size". If both dimensions are -1 and correct_size_image_handling is copy, this
|
|
172
|
+
function is basically a no-op.
|
|
173
|
+
correct_size_image_handling (str, optional): what to do in the case where the original size
|
|
174
|
+
already matches the target size. Can be 'copy' (in which case the original image is just copied
|
|
175
|
+
to the output folder) or 'rewrite' (in which case the image is opened via PIL and re-written,
|
|
176
|
+
attempting to preserve the same quality). The only reason to do use 'rewrite' 'is the case where
|
|
177
|
+
you're superstitious about biases coming from images in a training set being written by different
|
|
178
|
+
image encoders.
|
|
179
|
+
unavailable_image_handling (str, optional): what to do when a file can't be opened. Can be
|
|
180
|
+
'error' or 'omit'.
|
|
181
|
+
n_workers (int, optional): number of workers to use for parallel processing.
|
|
182
|
+
Defaults to 1 (no parallelization). If <= 1, processing is sequential.
|
|
183
|
+
pool_type (str, optional): type of multiprocessing pool to use ('thread' or 'process').
|
|
184
|
+
Defaults to 'thread'. Only used if n_workers > 1.
|
|
185
|
+
no_enlarge_width (bool, optional): if [no_enlarge_width] is True, and
|
|
186
|
+
[target width] is larger than the original image width, does not modify the image,
|
|
187
|
+
but still writes it
|
|
188
|
+
verbose (bool, optional): enable additional debug output
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
dict: the COCO database with resized images, identical to the content of [output_filename]
|
|
192
|
+
"""
|
|
193
|
+
|
|
194
|
+
# Validate arguments
|
|
195
|
+
|
|
196
|
+
assert unavailable_image_handling in ('error','omit'), \
|
|
197
|
+
f'Illegal unavailable_image_handling {unavailable_image_handling}'
|
|
198
|
+
|
|
199
|
+
# Read input data
|
|
200
|
+
with open(input_filename,'r') as f:
|
|
201
|
+
d = json.load(f)
|
|
202
|
+
|
|
203
|
+
# Map image IDs to annotations
|
|
204
|
+
image_id_to_annotations = defaultdict(list)
|
|
205
|
+
for ann in d['annotations']:
|
|
206
|
+
image_id_to_annotations[ann['image_id']].append(ann)
|
|
207
|
+
|
|
208
|
+
original_images = d['images']
|
|
209
|
+
|
|
210
|
+
# Our worker function will take tuples of images and their
|
|
211
|
+
# associated annotations
|
|
212
|
+
image_annotation_tuples = []
|
|
213
|
+
for im in original_images:
|
|
214
|
+
if im['id'] not in image_id_to_annotations:
|
|
215
|
+
annotations_this_image = []
|
|
216
|
+
else:
|
|
217
|
+
annotations_this_image = image_id_to_annotations[im['id']]
|
|
218
|
+
image_annotation_tuple = (im,annotations_this_image)
|
|
219
|
+
image_annotation_tuples.append(image_annotation_tuple)
|
|
220
|
+
|
|
221
|
+
processed_results = []
|
|
222
|
+
|
|
223
|
+
if n_workers <= 1:
|
|
224
|
+
|
|
225
|
+
for image_annotation_tuple in tqdm(image_annotation_tuples,
|
|
226
|
+
desc="Resizing images sequentially"):
|
|
227
|
+
result = _process_single_image_for_resize(
|
|
228
|
+
image_data=image_annotation_tuple,
|
|
229
|
+
input_folder=input_folder,
|
|
230
|
+
output_folder=output_folder,
|
|
231
|
+
target_size=target_size,
|
|
232
|
+
correct_size_image_handling=correct_size_image_handling,
|
|
233
|
+
unavailable_image_handling=unavailable_image_handling,
|
|
234
|
+
no_enlarge_width=no_enlarge_width,
|
|
235
|
+
verbose=verbose
|
|
236
|
+
)
|
|
237
|
+
processed_results.append(result)
|
|
238
|
+
|
|
239
|
+
else:
|
|
240
|
+
|
|
241
|
+
pool = None
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
|
|
245
|
+
assert pool_type in ('process', 'thread'), f'Illegal pool type {pool_type}'
|
|
246
|
+
selected_pool = ThreadPool if (pool_type == 'thread') else Pool
|
|
247
|
+
|
|
248
|
+
print(f'Starting a {pool_type} pool of {n_workers} workers for image resizing')
|
|
249
|
+
pool = selected_pool(n_workers)
|
|
250
|
+
|
|
251
|
+
p_process_image = partial(_process_single_image_for_resize,
|
|
252
|
+
input_folder=input_folder,
|
|
253
|
+
output_folder=output_folder,
|
|
254
|
+
target_size=target_size,
|
|
255
|
+
correct_size_image_handling=correct_size_image_handling,
|
|
256
|
+
unavailable_image_handling=unavailable_image_handling,
|
|
257
|
+
no_enlarge_width=no_enlarge_width,
|
|
258
|
+
verbose=verbose)
|
|
259
|
+
|
|
260
|
+
processed_results = list(tqdm(pool.imap(p_process_image, image_annotation_tuples),
|
|
261
|
+
total=len(image_annotation_tuples),
|
|
262
|
+
desc=f"Resizing images with {pool_type} pool"))
|
|
263
|
+
|
|
264
|
+
finally:
|
|
265
|
+
if pool is not None:
|
|
266
|
+
pool.close()
|
|
267
|
+
pool.join()
|
|
268
|
+
print('Pool closed and joined for COCO dataset resizing')
|
|
269
|
+
|
|
270
|
+
new_images_list = []
|
|
271
|
+
new_annotations_list = []
|
|
272
|
+
for res_im_data, res_annotations in processed_results:
|
|
273
|
+
if res_im_data is None or res_annotations is None:
|
|
274
|
+
assert res_annotations is None and res_im_data is None
|
|
275
|
+
assert unavailable_image_handling == 'omit'
|
|
276
|
+
continue
|
|
277
|
+
new_images_list.append(res_im_data)
|
|
278
|
+
new_annotations_list.extend(res_annotations)
|
|
279
|
+
|
|
280
|
+
d['images'] = new_images_list
|
|
281
|
+
d['annotations'] = new_annotations_list
|
|
282
|
+
|
|
283
|
+
if output_filename is not None:
|
|
284
|
+
write_json(output_filename,d)
|
|
285
|
+
|
|
286
|
+
return d
|
|
287
|
+
|
|
288
|
+
# ...def resize_coco_dataset(...)
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
#%% Interactive driver
|
|
292
|
+
|
|
293
|
+
if False:
|
|
294
|
+
|
|
295
|
+
pass
|
|
296
|
+
|
|
297
|
+
#%% Test resizing
|
|
298
|
+
|
|
299
|
+
input_folder = 'i:/data/lila/ena24'
|
|
300
|
+
# input_filename = 'i:/data/lila/ena24.json'
|
|
301
|
+
input_filename = 'i:/data/lila/ena24-mini.json'
|
|
302
|
+
|
|
303
|
+
output_folder = 'i:/data/lila/ena24-resized'
|
|
304
|
+
output_filename = insert_before_extension(input_filename,'resized')
|
|
305
|
+
|
|
306
|
+
target_size = (640,-1)
|
|
307
|
+
|
|
308
|
+
correct_size_image_handling = 'rewrite'
|
|
309
|
+
|
|
310
|
+
_ = resize_coco_dataset(input_folder=input_folder,
|
|
311
|
+
input_filename=input_filename,
|
|
312
|
+
output_folder=output_folder,
|
|
313
|
+
output_filename=output_filename,
|
|
314
|
+
target_size=target_size,
|
|
315
|
+
correct_size_image_handling=correct_size_image_handling,
|
|
316
|
+
unavailable_image_handling='omit',
|
|
317
|
+
n_workers=10,
|
|
318
|
+
pool_type='process')
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
#%% Preview
|
|
322
|
+
|
|
323
|
+
from megadetector.visualization import visualize_db
|
|
324
|
+
options = visualize_db.DbVizOptions()
|
|
325
|
+
options.parallelize_rendering = True
|
|
326
|
+
options.viz_size = (640, -1)
|
|
327
|
+
options.num_to_visualize = 100
|
|
328
|
+
|
|
329
|
+
preview_folder = 'i:/data/lila/ena24-resized-preview'
|
|
330
|
+
html_file,_ = visualize_db.visualize_db(output_filename,
|
|
331
|
+
preview_folder,
|
|
332
|
+
output_folder,options)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
from megadetector.utils import path_utils # noqa
|
|
336
|
+
path_utils.open_file(html_file)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
#%% Command-line driver
|
|
340
|
+
|
|
341
|
+
def main():
|
|
342
|
+
"""
|
|
343
|
+
Command-line driver for resize_coco_dataset
|
|
344
|
+
"""
|
|
345
|
+
|
|
346
|
+
parser = argparse.ArgumentParser(
|
|
347
|
+
description='Resize images in a COCO dataset and scale annotations'
|
|
348
|
+
)
|
|
349
|
+
parser.add_argument(
|
|
350
|
+
'input_folder',
|
|
351
|
+
type=str,
|
|
352
|
+
help='Path to the folder containing original images'
|
|
353
|
+
)
|
|
354
|
+
parser.add_argument(
|
|
355
|
+
'input_filename',
|
|
356
|
+
type=str,
|
|
357
|
+
help='Path to the input COCO .json file'
|
|
358
|
+
)
|
|
359
|
+
parser.add_argument(
|
|
360
|
+
'output_folder',
|
|
361
|
+
type=str,
|
|
362
|
+
help='Path to the folder where resized images will be saved'
|
|
363
|
+
)
|
|
364
|
+
parser.add_argument(
|
|
365
|
+
'output_filename',
|
|
366
|
+
type=str,
|
|
367
|
+
help='Path to the output COCO .json file for resized data'
|
|
368
|
+
)
|
|
369
|
+
parser.add_argument(
|
|
370
|
+
'--target_size',
|
|
371
|
+
type=str,
|
|
372
|
+
default='-1,-1',
|
|
373
|
+
help='Target size as "width,height". Use -1 to preserve aspect ratio for a dimension. ' + \
|
|
374
|
+
'E.g., "800,600" or "1024,-1".'
|
|
375
|
+
)
|
|
376
|
+
parser.add_argument(
|
|
377
|
+
'--correct_size_image_handling',
|
|
378
|
+
type=str,
|
|
379
|
+
default='copy',
|
|
380
|
+
choices=['copy', 'rewrite'],
|
|
381
|
+
help='How to handle images already at target size'
|
|
382
|
+
)
|
|
383
|
+
parser.add_argument(
|
|
384
|
+
'--n_workers',
|
|
385
|
+
type=int,
|
|
386
|
+
default=1,
|
|
387
|
+
help='Number of workers for parallel processing. <=1 for sequential'
|
|
388
|
+
)
|
|
389
|
+
parser.add_argument(
|
|
390
|
+
'--pool_type',
|
|
391
|
+
type=str,
|
|
392
|
+
default='thread',
|
|
393
|
+
choices=['thread', 'process'],
|
|
394
|
+
help='Type of multiprocessing pool if n_workers > 1'
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
if len(sys.argv[1:]) == 0:
|
|
398
|
+
parser.print_help()
|
|
399
|
+
parser.exit()
|
|
400
|
+
|
|
401
|
+
args = parser.parse_args()
|
|
402
|
+
|
|
403
|
+
try:
|
|
404
|
+
target_size_parts = args.target_size.split(',')
|
|
405
|
+
if len(target_size_parts) != 2:
|
|
406
|
+
raise ValueError("target_size must have two comma-separated parts (width,height).")
|
|
407
|
+
parsed_target_size = (int(target_size_parts[0]), int(target_size_parts[1]))
|
|
408
|
+
except ValueError as e:
|
|
409
|
+
print(f"Error parsing target_size: {e}")
|
|
410
|
+
parser.print_help()
|
|
411
|
+
parser.exit()
|
|
412
|
+
|
|
413
|
+
resize_coco_dataset(
|
|
414
|
+
args.input_folder,
|
|
415
|
+
args.input_filename,
|
|
416
|
+
args.output_folder,
|
|
417
|
+
args.output_filename,
|
|
418
|
+
target_size=parsed_target_size,
|
|
419
|
+
correct_size_image_handling=args.correct_size_image_handling,
|
|
420
|
+
n_workers=args.n_workers,
|
|
421
|
+
pool_type=args.pool_type
|
|
422
|
+
)
|
|
423
|
+
print("Dataset resizing complete")
|
|
424
|
+
|
|
425
|
+
if __name__ == '__main__':
|
|
426
|
+
main()
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
#%% Tests
|
|
430
|
+
|
|
431
|
+
class TestResizeCocoDataset:
|
|
432
|
+
"""
|
|
433
|
+
Test class for the resize_coco_dataset function.
|
|
434
|
+
"""
|
|
435
|
+
|
|
436
|
+
def set_up(self): # noqa
|
|
437
|
+
self.test_dir = make_test_folder(subfolder='resize_coco_tests')
|
|
438
|
+
|
|
439
|
+
self.input_images_dir_seq = os.path.join(self.test_dir, 'input_images_seq')
|
|
440
|
+
os.makedirs(self.input_images_dir_seq, exist_ok=True)
|
|
441
|
+
|
|
442
|
+
self.input_images_dir_par = os.path.join(self.test_dir, 'input_images_par')
|
|
443
|
+
os.makedirs(self.input_images_dir_par, exist_ok=True)
|
|
444
|
+
|
|
445
|
+
self.output_images_dir_seq = os.path.join(self.test_dir, 'output_images_seq')
|
|
446
|
+
os.makedirs(self.output_images_dir_seq, exist_ok=True)
|
|
447
|
+
|
|
448
|
+
self.output_images_dir_par = os.path.join(self.test_dir, 'output_images_par')
|
|
449
|
+
os.makedirs(self.output_images_dir_par, exist_ok=True)
|
|
450
|
+
|
|
451
|
+
def tear_down(self): # noqa
|
|
452
|
+
|
|
453
|
+
# Ensure shutil is imported if not already globally in the file
|
|
454
|
+
# (it is, under '#%% Imports and constants')
|
|
455
|
+
if hasattr(self, 'test_dir') and os.path.exists(self.test_dir):
|
|
456
|
+
shutil.rmtree(self.test_dir)
|
|
457
|
+
|
|
458
|
+
def _create_dummy_image_and_coco_json(self,
|
|
459
|
+
image_dir,
|
|
460
|
+
json_filename_base="input_coco.json",
|
|
461
|
+
num_images=2,
|
|
462
|
+
original_size=(100, 100),
|
|
463
|
+
num_annotations_per_image=2):
|
|
464
|
+
coco_data = {
|
|
465
|
+
"images": [],
|
|
466
|
+
"annotations": [],
|
|
467
|
+
"categories": [{"id": 1, "name": "test_category"}]
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
annotation_id_counter = 1
|
|
471
|
+
|
|
472
|
+
for i in range(num_images):
|
|
473
|
+
image_name = f"image_{i}.png"
|
|
474
|
+
image_path = os.path.join(image_dir, image_name)
|
|
475
|
+
|
|
476
|
+
# Create a dummy image
|
|
477
|
+
try:
|
|
478
|
+
img = Image.new('RGB', original_size, color='red')
|
|
479
|
+
img.save(image_path)
|
|
480
|
+
except Exception as e:
|
|
481
|
+
# In some environments, font loading for default PIL text might fail.
|
|
482
|
+
# For a simple color image, this shouldn't be an issue.
|
|
483
|
+
# If it is, consider a simpler save or pre-creating a tiny PNG.
|
|
484
|
+
print(f"Warning: Could not create dummy image {image_path}: {e}")
|
|
485
|
+
# Fallback: create an empty file, though this will fail later steps
|
|
486
|
+
# open(image_path, 'a').close()
|
|
487
|
+
|
|
488
|
+
image_entry = {
|
|
489
|
+
"id": i + 1,
|
|
490
|
+
"file_name": image_name, # Filename only, not path
|
|
491
|
+
"width": original_size[0],
|
|
492
|
+
"height": original_size[1]
|
|
493
|
+
}
|
|
494
|
+
coco_data["images"].append(image_entry)
|
|
495
|
+
|
|
496
|
+
for j in range(num_annotations_per_image):
|
|
497
|
+
annotation_entry = {
|
|
498
|
+
"id": annotation_id_counter,
|
|
499
|
+
"image_id": image_entry["id"],
|
|
500
|
+
"category_id": 1, # Corresponds to "test_category"
|
|
501
|
+
# Simple, non-overlapping bbox for testing scaling
|
|
502
|
+
"bbox": [10 + j*30, 10 + j*5, 20, 15]
|
|
503
|
+
}
|
|
504
|
+
coco_data["annotations"].append(annotation_entry)
|
|
505
|
+
annotation_id_counter += 1
|
|
506
|
+
|
|
507
|
+
json_file_path = os.path.join(self.test_dir, json_filename_base)
|
|
508
|
+
with open(json_file_path, 'w') as f:
|
|
509
|
+
json.dump(coco_data, f, indent=1)
|
|
510
|
+
|
|
511
|
+
return json_file_path, coco_data
|
|
512
|
+
|
|
513
|
+
def test_resize_sequential_vs_parallel(self):
|
|
514
|
+
"""
|
|
515
|
+
Test driver for sequence vs. parallel COCO dataset resizing.
|
|
516
|
+
"""
|
|
517
|
+
|
|
518
|
+
self.set_up()
|
|
519
|
+
|
|
520
|
+
try:
|
|
521
|
+
num_images_to_test = 3
|
|
522
|
+
original_w, original_h = 120, 80
|
|
523
|
+
target_w, target_h = 60, 40
|
|
524
|
+
target_size_test = (target_w, target_h)
|
|
525
|
+
|
|
526
|
+
# Sequential run
|
|
527
|
+
input_json_path_seq, _ = self._create_dummy_image_and_coco_json(
|
|
528
|
+
image_dir=self.input_images_dir_seq,
|
|
529
|
+
json_filename_base="input_coco_seq.json",
|
|
530
|
+
num_images=num_images_to_test,
|
|
531
|
+
original_size=(original_w, original_h)
|
|
532
|
+
)
|
|
533
|
+
output_json_path_seq = os.path.join(self.test_dir, 'output_coco_seq.json')
|
|
534
|
+
|
|
535
|
+
print("Test: starting sequential resize (1 worker)...")
|
|
536
|
+
resize_coco_dataset(
|
|
537
|
+
input_folder=self.input_images_dir_seq,
|
|
538
|
+
input_filename=input_json_path_seq,
|
|
539
|
+
output_folder=self.output_images_dir_seq,
|
|
540
|
+
output_filename=output_json_path_seq,
|
|
541
|
+
target_size=target_size_test,
|
|
542
|
+
n_workers=1
|
|
543
|
+
)
|
|
544
|
+
print(f"Test: Sequential resize complete. Output: {output_json_path_seq}")
|
|
545
|
+
|
|
546
|
+
# Parallel run
|
|
547
|
+
# For the parallel run, we use different input/output directories but can reuse the same logic
|
|
548
|
+
# for creating the dummy dataset structure. The image files will be new.
|
|
549
|
+
input_json_path_par, _ = self._create_dummy_image_and_coco_json(
|
|
550
|
+
image_dir=self.input_images_dir_par,
|
|
551
|
+
json_filename_base="input_coco_par.json",
|
|
552
|
+
num_images=num_images_to_test,
|
|
553
|
+
original_size=(original_w, original_h)
|
|
554
|
+
)
|
|
555
|
+
output_json_path_par = os.path.join(self.test_dir, 'output_coco_par.json')
|
|
556
|
+
|
|
557
|
+
print("Test: Starting parallel resize (2 workers, thread pool)...")
|
|
558
|
+
resize_coco_dataset(
|
|
559
|
+
input_folder=self.input_images_dir_par,
|
|
560
|
+
input_filename=input_json_path_par,
|
|
561
|
+
output_folder=self.output_images_dir_par,
|
|
562
|
+
output_filename=output_json_path_par,
|
|
563
|
+
target_size=target_size_test,
|
|
564
|
+
n_workers=2, # Using 2 workers for testing parallelism
|
|
565
|
+
pool_type='thread'
|
|
566
|
+
)
|
|
567
|
+
print(f"Test: Parallel resize complete. Output: {output_json_path_par}")
|
|
568
|
+
|
|
569
|
+
# Load results
|
|
570
|
+
with open(output_json_path_seq, 'r') as f:
|
|
571
|
+
data_seq = json.load(f)
|
|
572
|
+
with open(output_json_path_par, 'r') as f:
|
|
573
|
+
data_par = json.load(f)
|
|
574
|
+
|
|
575
|
+
# Compare COCO JSON data
|
|
576
|
+
# Compare images
|
|
577
|
+
assert len(data_seq['images']) == num_images_to_test
|
|
578
|
+
assert len(data_seq['images']) == len(data_par['images']), "Number of images differs"
|
|
579
|
+
|
|
580
|
+
sorted_images_seq = sorted(data_seq['images'], key=lambda x: x['id'])
|
|
581
|
+
sorted_images_par = sorted(data_par['images'], key=lambda x: x['id'])
|
|
582
|
+
|
|
583
|
+
for img_s, img_p in zip(sorted_images_seq, sorted_images_par, strict=True):
|
|
584
|
+
assert img_s['id'] == img_p['id'], \
|
|
585
|
+
f"Image IDs differ: {img_s['id']} vs {img_p['id']}"
|
|
586
|
+
# Filenames are generated independently, so we only check structure, not exact name matching
|
|
587
|
+
# across seq/par runs' inputs, but output structure should be consistent if input
|
|
588
|
+
# names were e.g. image_0, image_1
|
|
589
|
+
assert img_s['file_name'] == img_p['file_name']
|
|
590
|
+
assert img_s['width'] == target_w, \
|
|
591
|
+
f"Seq image {img_s['id']} width incorrect"
|
|
592
|
+
assert img_s['height'] == target_h, \
|
|
593
|
+
f"Seq image {img_s['id']} height incorrect"
|
|
594
|
+
assert img_p['width'] == target_w, \
|
|
595
|
+
f"Par image {img_p['id']} width incorrect"
|
|
596
|
+
assert img_p['height'] == target_h, \
|
|
597
|
+
f"Par image {img_p['id']} height incorrect"
|
|
598
|
+
|
|
599
|
+
# Compare annotations
|
|
600
|
+
assert len(data_seq['annotations']) == len(data_par['annotations']), \
|
|
601
|
+
"Number of annotations differs"
|
|
602
|
+
# Assuming _create_dummy_image_and_coco_json creates the same number of annotations for each test run
|
|
603
|
+
|
|
604
|
+
sorted_anns_seq = sorted(data_seq['annotations'], key=lambda x: x['id'])
|
|
605
|
+
sorted_anns_par = sorted(data_par['annotations'], key=lambda x: x['id'])
|
|
606
|
+
|
|
607
|
+
for ann_s, ann_p in zip(sorted_anns_seq, sorted_anns_par, strict=True):
|
|
608
|
+
assert ann_s['id'] == ann_p['id'], \
|
|
609
|
+
f"Annotation IDs differ: {ann_s['id']} vs {ann_p['id']}"
|
|
610
|
+
assert ann_s['image_id'] == ann_p['image_id'], \
|
|
611
|
+
f"Annotation image_ids differ for ann_id {ann_s['id']}"
|
|
612
|
+
assert ann_s['category_id'] == ann_p['category_id'], \
|
|
613
|
+
f"Annotation category_ids differ for ann_id {ann_s['id']}"
|
|
614
|
+
|
|
615
|
+
# Check bbox scaling (example: original width 120, target 60 -> scale 0.5)
|
|
616
|
+
# Original bbox: [10, 10, 20, 15] -> Scaled: [5, 5, 10, 7.5] (Floats possible)
|
|
617
|
+
# Need to compare with tolerance or ensure rounding is handled if expecting ints
|
|
618
|
+
# For this test, let's assume direct comparison works due to simple scaling.
|
|
619
|
+
# If PIL's resize causes slight pixel shifts affecting precise sub-pixel bbox calculations,
|
|
620
|
+
# then a tolerance (pytest.approx) would be better.
|
|
621
|
+
# Given the current resize_coco_dataset logic, it's direct multiplication.
|
|
622
|
+
for i in range(4):
|
|
623
|
+
assert abs(ann_s['bbox'][i] - ann_p['bbox'][i]) < 1e-5, \
|
|
624
|
+
f"Bbox element {i} differs for ann_id {ann_s['id']}: {ann_s['bbox']} vs {ann_p['bbox']}"
|
|
625
|
+
|
|
626
|
+
# Compare actual image files
|
|
627
|
+
seq_files = sorted(os.listdir(self.output_images_dir_seq))
|
|
628
|
+
par_files = sorted(os.listdir(self.output_images_dir_par))
|
|
629
|
+
|
|
630
|
+
assert len(seq_files) == num_images_to_test, "Incorrect number of output images (sequential)"
|
|
631
|
+
assert len(seq_files) == len(par_files), "Number of output image files differs"
|
|
632
|
+
|
|
633
|
+
for fname_s, fname_p in zip(seq_files, par_files, strict=True):
|
|
634
|
+
assert fname_s == fname_p, "Output image filenames differ between seq and par runs"
|
|
635
|
+
img_s_path = os.path.join(self.output_images_dir_seq, fname_s)
|
|
636
|
+
img_p_path = os.path.join(self.output_images_dir_par, fname_p)
|
|
637
|
+
|
|
638
|
+
with Image.open(img_s_path) as img_s_pil:
|
|
639
|
+
assert img_s_pil.size == target_size_test, \
|
|
640
|
+
f"Image {fname_s} (seq) has wrong dimensions: {img_s_pil.size}"
|
|
641
|
+
with Image.open(img_p_path) as img_p_pil:
|
|
642
|
+
assert img_p_pil.size == target_size_test, \
|
|
643
|
+
f"Image {fname_p} (par) has wrong dimensions: {img_p_pil.size}"
|
|
644
|
+
|
|
645
|
+
print("Test test_resize_sequential_vs_parallel PASSED")
|
|
646
|
+
|
|
647
|
+
finally:
|
|
648
|
+
self.tear_down()
|
|
649
|
+
|
|
650
|
+
# ...def test_resize_sequential_vs_parallel(...)
|
|
651
|
+
|
|
652
|
+
# ...class TestResizeCocoDataset
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
def test_resize_coco_dataset_main():
|
|
656
|
+
"""
|
|
657
|
+
Driver for the TestResizeCocoDataset() class.
|
|
658
|
+
"""
|
|
659
|
+
|
|
660
|
+
print("Starting TestResizeCocoDataset main runner...")
|
|
661
|
+
test_runner = TestResizeCocoDataset()
|
|
662
|
+
test_runner.test_resize_sequential_vs_parallel()
|
|
663
|
+
print("TestResizeCocoDataset main runner finished.")
|