megadetector 10.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- megadetector/__init__.py +0 -0
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +626 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +853 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +608 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +696 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +509 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/animl_to_md.py +161 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +33 -0
- megadetector/data_management/camtrap_dp_to_coco.py +270 -0
- megadetector/data_management/cct_json_utils.py +566 -0
- megadetector/data_management/cct_to_md.py +184 -0
- megadetector/data_management/cct_to_wi.py +293 -0
- megadetector/data_management/coco_to_labelme.py +284 -0
- megadetector/data_management/coco_to_yolo.py +701 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
- megadetector/data_management/databases/integrity_check_json_db.py +563 -0
- megadetector/data_management/databases/subset_json_db.py +195 -0
- megadetector/data_management/generate_crops_from_cct.py +200 -0
- megadetector/data_management/get_image_sizes.py +164 -0
- megadetector/data_management/labelme_to_coco.py +559 -0
- megadetector/data_management/labelme_to_yolo.py +349 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/create_lila_blank_set.py +556 -0
- megadetector/data_management/lila/create_lila_test_set.py +192 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +182 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +319 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
- megadetector/data_management/mewc_to_md.py +344 -0
- megadetector/data_management/ocr_tools.py +873 -0
- megadetector/data_management/read_exif.py +964 -0
- megadetector/data_management/remap_coco_categories.py +195 -0
- megadetector/data_management/remove_exif.py +156 -0
- megadetector/data_management/rename_images.py +194 -0
- megadetector/data_management/resize_coco_dataset.py +665 -0
- megadetector/data_management/speciesnet_to_md.py +41 -0
- megadetector/data_management/wi_download_csv_to_coco.py +247 -0
- megadetector/data_management/yolo_output_to_md_output.py +594 -0
- megadetector/data_management/yolo_to_coco.py +984 -0
- megadetector/data_management/zamba_to_md.py +188 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/change_detection.py +840 -0
- megadetector/detection/process_video.py +479 -0
- megadetector/detection/pytorch_detector.py +1451 -0
- megadetector/detection/run_detector.py +1267 -0
- megadetector/detection/run_detector_batch.py +2172 -0
- megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
- megadetector/detection/run_md_and_speciesnet.py +1604 -0
- megadetector/detection/run_tiled_inference.py +1044 -0
- megadetector/detection/tf_detector.py +209 -0
- megadetector/detection/video_utils.py +1379 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +72 -0
- megadetector/postprocessing/categorize_detections_by_size.py +166 -0
- megadetector/postprocessing/classification_postprocessing.py +1943 -0
- megadetector/postprocessing/combine_batch_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +2110 -0
- megadetector/postprocessing/convert_output_format.py +403 -0
- megadetector/postprocessing/create_crop_folder.py +629 -0
- megadetector/postprocessing/detector_calibration.py +570 -0
- megadetector/postprocessing/generate_csv_report.py +522 -0
- megadetector/postprocessing/load_api_results.py +223 -0
- megadetector/postprocessing/md_to_coco.py +428 -0
- megadetector/postprocessing/md_to_labelme.py +351 -0
- megadetector/postprocessing/md_to_wi.py +41 -0
- megadetector/postprocessing/merge_detections.py +392 -0
- megadetector/postprocessing/postprocess_batch_results.py +2140 -0
- megadetector/postprocessing/remap_detection_categories.py +226 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
- megadetector/postprocessing/separate_detections_into_folders.py +795 -0
- megadetector/postprocessing/subset_json_detector_output.py +964 -0
- megadetector/postprocessing/top_folders_to_bottom.py +238 -0
- megadetector/postprocessing/validate_batch_results.py +332 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +211 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +231 -0
- megadetector/taxonomy_mapping/species_lookup.py +1008 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/tests/__init__.py +0 -0
- megadetector/tests/test_nms_synthetic.py +335 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/ct_utils.py +1857 -0
- megadetector/utils/directory_listing.py +199 -0
- megadetector/utils/extract_frames_from_video.py +307 -0
- megadetector/utils/gpu_test.py +125 -0
- megadetector/utils/md_tests.py +2072 -0
- megadetector/utils/path_utils.py +2872 -0
- megadetector/utils/process_utils.py +172 -0
- megadetector/utils/split_locations_into_train_val.py +237 -0
- megadetector/utils/string_utils.py +234 -0
- megadetector/utils/url_utils.py +825 -0
- megadetector/utils/wi_platform_utils.py +968 -0
- megadetector/utils/wi_taxonomy_utils.py +1766 -0
- megadetector/utils/write_html_image_list.py +239 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +309 -0
- megadetector/visualization/render_images_with_thumbnails.py +243 -0
- megadetector/visualization/visualization_utils.py +1973 -0
- megadetector/visualization/visualize_db.py +630 -0
- megadetector/visualization/visualize_detector_output.py +498 -0
- megadetector/visualization/visualize_video_output.py +705 -0
- megadetector-10.0.15.dist-info/METADATA +115 -0
- megadetector-10.0.15.dist-info/RECORD +147 -0
- megadetector-10.0.15.dist-info/WHEEL +5 -0
- megadetector-10.0.15.dist-info/licenses/LICENSE +19 -0
- megadetector-10.0.15.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,498 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
visualize_detector_output.py
|
|
4
|
+
|
|
5
|
+
Render images with bounding boxes annotated on them to a folder, based on a
|
|
6
|
+
detector output result file (.json), optionally writing an HTML index file.
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
#%% Imports
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import os
|
|
14
|
+
import random
|
|
15
|
+
import sys
|
|
16
|
+
|
|
17
|
+
from multiprocessing.pool import ThreadPool
|
|
18
|
+
from multiprocessing.pool import Pool
|
|
19
|
+
from functools import partial
|
|
20
|
+
from tqdm import tqdm
|
|
21
|
+
|
|
22
|
+
from megadetector.data_management.annotations.annotation_constants import detector_bbox_category_id_to_name
|
|
23
|
+
from megadetector.detection.run_detector import get_typical_confidence_threshold_from_results
|
|
24
|
+
from megadetector.utils.ct_utils import get_max_conf
|
|
25
|
+
from megadetector.utils import write_html_image_list
|
|
26
|
+
from megadetector.utils.path_utils import path_is_abs
|
|
27
|
+
from megadetector.utils.path_utils import open_file
|
|
28
|
+
from megadetector.utils.wi_taxonomy_utils import load_md_or_speciesnet_file
|
|
29
|
+
from megadetector.visualization import visualization_utils as vis_utils
|
|
30
|
+
from megadetector.visualization.visualization_utils import blur_detections
|
|
31
|
+
|
|
32
|
+
default_box_sort_order = 'confidence'
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
#%% Constants
|
|
36
|
+
|
|
37
|
+
# This will only be used if a category mapping is not available in the results file.
|
|
38
|
+
DEFAULT_DETECTOR_LABEL_MAP = {
|
|
39
|
+
str(k): v for k, v in detector_bbox_category_id_to_name.items()
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
#%% Support functions
|
|
44
|
+
|
|
45
|
+
def _render_image(entry,
|
|
46
|
+
detector_label_map,
|
|
47
|
+
classification_label_map,
|
|
48
|
+
confidence_threshold,
|
|
49
|
+
classification_confidence_threshold,
|
|
50
|
+
render_detections_only,
|
|
51
|
+
preserve_path_structure,
|
|
52
|
+
out_dir,
|
|
53
|
+
images_dir,
|
|
54
|
+
output_image_width,
|
|
55
|
+
box_sort_order=default_box_sort_order,
|
|
56
|
+
category_names_to_blur=None):
|
|
57
|
+
"""
|
|
58
|
+
Internal function for rendering a single image.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
rendering_result = {'failed_image':False,
|
|
62
|
+
'missing_image':False,
|
|
63
|
+
'skipped_image':False,
|
|
64
|
+
'annotated_image_path':None,
|
|
65
|
+
'max_conf':None,
|
|
66
|
+
'image_filename_in_abs':None,
|
|
67
|
+
'file':entry['file']}
|
|
68
|
+
|
|
69
|
+
image_id = entry['file']
|
|
70
|
+
|
|
71
|
+
if 'failure' in entry and entry['failure'] is not None:
|
|
72
|
+
rendering_result['failed_image'] = True
|
|
73
|
+
return rendering_result
|
|
74
|
+
|
|
75
|
+
assert 'detections' in entry and entry['detections'] is not None
|
|
76
|
+
|
|
77
|
+
max_conf = get_max_conf(entry)
|
|
78
|
+
rendering_result['max_conf'] = max_conf
|
|
79
|
+
|
|
80
|
+
if (max_conf < confidence_threshold) and render_detections_only:
|
|
81
|
+
rendering_result['skipped_image'] = True
|
|
82
|
+
return rendering_result
|
|
83
|
+
|
|
84
|
+
if images_dir is None:
|
|
85
|
+
image_filename_in_abs = image_id
|
|
86
|
+
assert path_is_abs(image_filename_in_abs), \
|
|
87
|
+
'Absolute paths are required when no image base dir is supplied'
|
|
88
|
+
else:
|
|
89
|
+
assert not path_is_abs(image_id), \
|
|
90
|
+
'Relative paths are required when an image base dir is supplied'
|
|
91
|
+
image_filename_in_abs = os.path.join(images_dir, image_id)
|
|
92
|
+
if not os.path.exists(image_filename_in_abs):
|
|
93
|
+
print(f'Image {image_id} not found')
|
|
94
|
+
rendering_result['missing_image'] = True
|
|
95
|
+
return rendering_result
|
|
96
|
+
|
|
97
|
+
rendering_result['image_filename_in_abs'] = image_filename_in_abs
|
|
98
|
+
|
|
99
|
+
# Load the image
|
|
100
|
+
image = vis_utils.open_image(image_filename_in_abs)
|
|
101
|
+
|
|
102
|
+
# Find categories we're supposed to blur
|
|
103
|
+
category_ids_to_blur = []
|
|
104
|
+
if category_names_to_blur is not None:
|
|
105
|
+
if isinstance(category_names_to_blur,str):
|
|
106
|
+
category_names_to_blur = [category_names_to_blur]
|
|
107
|
+
for category_id in detector_label_map:
|
|
108
|
+
if detector_label_map[category_id] in category_names_to_blur:
|
|
109
|
+
category_ids_to_blur.append(category_id)
|
|
110
|
+
|
|
111
|
+
detections_to_blur = []
|
|
112
|
+
for d in entry['detections']:
|
|
113
|
+
if d['conf'] >= confidence_threshold and d['category'] in category_ids_to_blur:
|
|
114
|
+
detections_to_blur.append(d)
|
|
115
|
+
if len(detections_to_blur) > 0:
|
|
116
|
+
blur_detections(image,detections_to_blur)
|
|
117
|
+
|
|
118
|
+
# Resize if necessary
|
|
119
|
+
#
|
|
120
|
+
# If output_image_width is -1 or None, this will just return the original image
|
|
121
|
+
image = vis_utils.resize_image(image, output_image_width)
|
|
122
|
+
|
|
123
|
+
vis_utils.render_detection_bounding_boxes(
|
|
124
|
+
entry['detections'], image,
|
|
125
|
+
label_map=detector_label_map,
|
|
126
|
+
classification_label_map=classification_label_map,
|
|
127
|
+
confidence_threshold=confidence_threshold,
|
|
128
|
+
classification_confidence_threshold=classification_confidence_threshold,
|
|
129
|
+
box_sort_order=box_sort_order)
|
|
130
|
+
|
|
131
|
+
if not preserve_path_structure:
|
|
132
|
+
for char in ['/', '\\', ':']:
|
|
133
|
+
image_id = image_id.replace(char, '~')
|
|
134
|
+
annotated_img_path = os.path.join(out_dir, f'anno_{image_id}')
|
|
135
|
+
else:
|
|
136
|
+
assert not os.path.isabs(image_id), "Can't preserve paths when operating on absolute paths"
|
|
137
|
+
annotated_img_path = os.path.join(out_dir, image_id)
|
|
138
|
+
os.makedirs(os.path.dirname(annotated_img_path),exist_ok=True)
|
|
139
|
+
|
|
140
|
+
image.save(annotated_img_path)
|
|
141
|
+
rendering_result['annotated_image_path'] = annotated_img_path
|
|
142
|
+
|
|
143
|
+
return rendering_result
|
|
144
|
+
|
|
145
|
+
# ...def _render_image(...)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
#%% Main function
|
|
149
|
+
|
|
150
|
+
def visualize_detector_output(detector_output_path,
|
|
151
|
+
out_dir,
|
|
152
|
+
images_dir=None,
|
|
153
|
+
confidence_threshold=0.15,
|
|
154
|
+
sample=-1,
|
|
155
|
+
output_image_width=700,
|
|
156
|
+
random_seed=None,
|
|
157
|
+
render_detections_only=False,
|
|
158
|
+
classification_confidence_threshold=0.1,
|
|
159
|
+
html_output_file=None,
|
|
160
|
+
html_output_options=None,
|
|
161
|
+
preserve_path_structure=False,
|
|
162
|
+
parallelize_rendering=False,
|
|
163
|
+
parallelize_rendering_n_cores=10,
|
|
164
|
+
parallelize_rendering_with_threads=True,
|
|
165
|
+
box_sort_order=default_box_sort_order,
|
|
166
|
+
category_names_to_blur=None,
|
|
167
|
+
link_images_to_originals=False):
|
|
168
|
+
"""
|
|
169
|
+
Draws bounding boxes on images given the output of a detector.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
detector_output_path (str): path to detector output .json file
|
|
173
|
+
out_dir (str): path to directory for saving annotated images
|
|
174
|
+
images_dir (str, optional): folder where the images live; filenames in
|
|
175
|
+
[detector_output_path] should be relative to [image_dir]. Can be None if paths are
|
|
176
|
+
absolute.
|
|
177
|
+
confidence_threshold (float, optional): threshold above which detections will be rendered
|
|
178
|
+
sample (int, optional): maximum number of images to render, -1 for all
|
|
179
|
+
output_image_width (int, optional): width in pixels to resize images for display,
|
|
180
|
+
preserving aspect ration; set to -1 to use original image width
|
|
181
|
+
random_seed (int, optional): seed to use for choosing images when sample != -1
|
|
182
|
+
render_detections_only (bool, optional): only render images with above-threshold detections.
|
|
183
|
+
Empty images are discarded after sampling, so if you want to see, e.g., 1000 non-empty
|
|
184
|
+
images, you can set [render_detections_only], but you need to sample more than 1000 images.
|
|
185
|
+
classification_confidence_threshold (float, optional): only show classifications
|
|
186
|
+
above this threshold; does not impact whether images are rendered, only whether
|
|
187
|
+
classification labels (not detection categories) are displayed
|
|
188
|
+
html_output_file (str, optional): output path for an HTML index file (not written
|
|
189
|
+
if None)
|
|
190
|
+
html_output_options (dict, optional): HTML formatting options; see write_html_image_list
|
|
191
|
+
for details. The most common option you may want to supply here is
|
|
192
|
+
'maxFiguresPerHtmlFile'.
|
|
193
|
+
preserve_path_structure (bool, optional): if False (default), writes images to unique
|
|
194
|
+
names in a flat structure in the output folder; if True, preserves relative paths
|
|
195
|
+
within the output folder
|
|
196
|
+
parallelize_rendering (bool, optional): whether to use concurrent workers for rendering
|
|
197
|
+
parallelize_rendering_n_cores (int, optional): number of concurrent workers to use
|
|
198
|
+
(ignored if parallelize_rendering is False)
|
|
199
|
+
parallelize_rendering_with_threads (bool, optional): determines whether we use
|
|
200
|
+
threads (True) or processes (False) for parallelization (ignored if parallelize_rendering
|
|
201
|
+
is False)
|
|
202
|
+
box_sort_order (str, optional): sorting scheme for detection boxes, can be None, "confidence", or
|
|
203
|
+
"reverse_confidence"
|
|
204
|
+
category_names_to_blur (list of str, optional): category names for which we should blur detections,
|
|
205
|
+
most commonly ['person']
|
|
206
|
+
link_images_to_originals (bool, optional): include a link from every rendered image back to
|
|
207
|
+
the corresponding original image
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
list: list of paths to annotated images
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
assert os.path.exists(detector_output_path), \
|
|
214
|
+
'Detector output file does not exist at {}'.format(detector_output_path)
|
|
215
|
+
|
|
216
|
+
if images_dir is not None:
|
|
217
|
+
assert os.path.isdir(images_dir), \
|
|
218
|
+
'Image folder {} is not available'.format(images_dir)
|
|
219
|
+
|
|
220
|
+
os.makedirs(out_dir, exist_ok=True)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
##%% Load detector output
|
|
224
|
+
|
|
225
|
+
detector_output = load_md_or_speciesnet_file(detector_output_path)
|
|
226
|
+
|
|
227
|
+
images = detector_output['images']
|
|
228
|
+
|
|
229
|
+
if confidence_threshold is None:
|
|
230
|
+
confidence_threshold = get_typical_confidence_threshold_from_results(detector_output)
|
|
231
|
+
|
|
232
|
+
assert confidence_threshold >= 0 and confidence_threshold <= 1, \
|
|
233
|
+
f'Confidence threshold {confidence_threshold} is invalid, must be in (0, 1).'
|
|
234
|
+
|
|
235
|
+
if 'detection_categories' in detector_output:
|
|
236
|
+
detector_label_map = detector_output['detection_categories']
|
|
237
|
+
else:
|
|
238
|
+
detector_label_map = DEFAULT_DETECTOR_LABEL_MAP
|
|
239
|
+
|
|
240
|
+
num_images = len(images)
|
|
241
|
+
print(f'Detector output file contains {num_images} entries.')
|
|
242
|
+
|
|
243
|
+
if (sample is not None) and (sample > 0) and (num_images > sample):
|
|
244
|
+
|
|
245
|
+
if random_seed is not None:
|
|
246
|
+
images = sorted(images, key=lambda x: x['file'])
|
|
247
|
+
random.seed(random_seed)
|
|
248
|
+
|
|
249
|
+
random.shuffle(images)
|
|
250
|
+
images = sorted(images[:sample], key=lambda x: x['file'])
|
|
251
|
+
print(f'Sampled {len(images)} entries from the detector output file.')
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
##%% Load images, annotate them and save
|
|
255
|
+
|
|
256
|
+
print('Rendering detections above a confidence threshold of {}'.format(
|
|
257
|
+
confidence_threshold))
|
|
258
|
+
|
|
259
|
+
classification_label_map = None
|
|
260
|
+
|
|
261
|
+
if 'classification_categories' in detector_output:
|
|
262
|
+
classification_label_map = detector_output['classification_categories']
|
|
263
|
+
|
|
264
|
+
rendering_results = []
|
|
265
|
+
|
|
266
|
+
if parallelize_rendering:
|
|
267
|
+
|
|
268
|
+
if parallelize_rendering_with_threads:
|
|
269
|
+
worker_string = 'threads'
|
|
270
|
+
else:
|
|
271
|
+
worker_string = 'processes'
|
|
272
|
+
|
|
273
|
+
pool = None
|
|
274
|
+
try:
|
|
275
|
+
if parallelize_rendering_n_cores is None:
|
|
276
|
+
if parallelize_rendering_with_threads:
|
|
277
|
+
pool = ThreadPool()
|
|
278
|
+
else:
|
|
279
|
+
pool = Pool()
|
|
280
|
+
else:
|
|
281
|
+
if parallelize_rendering_with_threads:
|
|
282
|
+
pool = ThreadPool(parallelize_rendering_n_cores)
|
|
283
|
+
else:
|
|
284
|
+
pool = Pool(parallelize_rendering_n_cores)
|
|
285
|
+
print('Rendering images with {} {}'.format(parallelize_rendering_n_cores,
|
|
286
|
+
worker_string))
|
|
287
|
+
rendering_results = list(tqdm(pool.imap(
|
|
288
|
+
partial(_render_image,detector_label_map=detector_label_map,
|
|
289
|
+
classification_label_map=classification_label_map,
|
|
290
|
+
confidence_threshold=confidence_threshold,
|
|
291
|
+
classification_confidence_threshold=classification_confidence_threshold,
|
|
292
|
+
render_detections_only=render_detections_only,
|
|
293
|
+
preserve_path_structure=preserve_path_structure,
|
|
294
|
+
out_dir=out_dir,
|
|
295
|
+
images_dir=images_dir,
|
|
296
|
+
output_image_width=output_image_width,
|
|
297
|
+
box_sort_order=box_sort_order,
|
|
298
|
+
category_names_to_blur=category_names_to_blur),
|
|
299
|
+
images), total=len(images)))
|
|
300
|
+
finally:
|
|
301
|
+
if pool is not None:
|
|
302
|
+
pool.close()
|
|
303
|
+
pool.join()
|
|
304
|
+
print('Pool closed and joined for detector output visualization')
|
|
305
|
+
|
|
306
|
+
else:
|
|
307
|
+
|
|
308
|
+
for entry in tqdm(images):
|
|
309
|
+
|
|
310
|
+
rendering_result = _render_image(entry,
|
|
311
|
+
detector_label_map,
|
|
312
|
+
classification_label_map,
|
|
313
|
+
confidence_threshold,
|
|
314
|
+
classification_confidence_threshold,
|
|
315
|
+
render_detections_only,
|
|
316
|
+
preserve_path_structure,
|
|
317
|
+
out_dir,
|
|
318
|
+
images_dir,
|
|
319
|
+
output_image_width,
|
|
320
|
+
box_sort_order,
|
|
321
|
+
category_names_to_blur=category_names_to_blur)
|
|
322
|
+
rendering_results.append(rendering_result)
|
|
323
|
+
|
|
324
|
+
# ...for each image
|
|
325
|
+
|
|
326
|
+
failed_images = [r for r in rendering_results if r['failed_image']]
|
|
327
|
+
missing_images = [r for r in rendering_results if r['missing_image']]
|
|
328
|
+
skipped_images = [r for r in rendering_results if r['skipped_image']]
|
|
329
|
+
|
|
330
|
+
print('Skipped {} failed images (of {})'.format(len(failed_images),len(images)))
|
|
331
|
+
print('Skipped {} missing images (of {})'.format(len(missing_images),len(images)))
|
|
332
|
+
print('Skipped {} below-threshold images (of {})'.format(len(skipped_images),len(images)))
|
|
333
|
+
|
|
334
|
+
print(f'Rendered detection results to {out_dir}')
|
|
335
|
+
|
|
336
|
+
annotated_image_paths = [r['annotated_image_path'] for r in rendering_results if \
|
|
337
|
+
r['annotated_image_path'] is not None]
|
|
338
|
+
|
|
339
|
+
if html_output_file is not None:
|
|
340
|
+
|
|
341
|
+
html_dir = os.path.dirname(html_output_file)
|
|
342
|
+
|
|
343
|
+
html_image_info = []
|
|
344
|
+
|
|
345
|
+
for r in rendering_results:
|
|
346
|
+
d = {}
|
|
347
|
+
if r['annotated_image_path'] is None:
|
|
348
|
+
assert r['failed_image'] or r['missing_image'] or r['skipped_image']
|
|
349
|
+
continue
|
|
350
|
+
annotated_image_path_relative = os.path.relpath(r['annotated_image_path'],html_dir)
|
|
351
|
+
d['filename'] = annotated_image_path_relative
|
|
352
|
+
d['textStyle'] = \
|
|
353
|
+
'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
354
|
+
'text-align:left;margin-top:20;margin-bottom:5'
|
|
355
|
+
d['title'] = '{} (max conf: {})'.format(r['file'],r['max_conf'])
|
|
356
|
+
if link_images_to_originals:
|
|
357
|
+
d['linkTarget'] = r['image_filename_in_abs']
|
|
358
|
+
html_image_info.append(d)
|
|
359
|
+
|
|
360
|
+
_ = write_html_image_list.write_html_image_list(html_output_file,
|
|
361
|
+
html_image_info,
|
|
362
|
+
options=html_output_options)
|
|
363
|
+
|
|
364
|
+
return annotated_image_paths
|
|
365
|
+
|
|
366
|
+
# ...def visualize_detector_output(...)
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
#%% Command-line driver
|
|
370
|
+
|
|
371
|
+
def main(): # noqa
|
|
372
|
+
|
|
373
|
+
parser = argparse.ArgumentParser(
|
|
374
|
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
375
|
+
description='Annotate the bounding boxes predicted by a detector above '
|
|
376
|
+
'some confidence threshold, and save the annotated images.')
|
|
377
|
+
parser.add_argument(
|
|
378
|
+
'detector_output_path', type=str,
|
|
379
|
+
help='Path to json output file of the detector')
|
|
380
|
+
parser.add_argument(
|
|
381
|
+
'out_dir', type=str,
|
|
382
|
+
help='Path to directory where the annotated images will be saved. '
|
|
383
|
+
'The directory will be created if it does not exist.')
|
|
384
|
+
parser.add_argument(
|
|
385
|
+
'--confidence', type=float, default=0.15,
|
|
386
|
+
help='Value between 0 and 1, indicating the confidence threshold '
|
|
387
|
+
'above which to visualize bounding boxes')
|
|
388
|
+
parser.add_argument(
|
|
389
|
+
'--images_dir', type=str, default=None,
|
|
390
|
+
help='Path to a local directory where images are stored. This '
|
|
391
|
+
'serves as the root directory for image paths in '
|
|
392
|
+
'detector_output_path. Omit if image paths are absolute.')
|
|
393
|
+
parser.add_argument(
|
|
394
|
+
'--sample', type=int, default=-1,
|
|
395
|
+
help='Number of images to be annotated and rendered. Set to -1 '
|
|
396
|
+
'(default) to annotate all images in the detector output file. '
|
|
397
|
+
'There may be fewer images if some are not found in images_dir.')
|
|
398
|
+
parser.add_argument(
|
|
399
|
+
'--output_image_width', type=int, default=700,
|
|
400
|
+
help='Integer, desired width in pixels of the output annotated images. '
|
|
401
|
+
'Use -1 to not resize. Default: 700.')
|
|
402
|
+
parser.add_argument(
|
|
403
|
+
'--random_seed', type=int, default=None,
|
|
404
|
+
help='Integer, for deterministic order of image sampling')
|
|
405
|
+
parser.add_argument(
|
|
406
|
+
'--html_output_file', type=str, default=None,
|
|
407
|
+
help='Filename to which we should write an HTML image index (off by default)')
|
|
408
|
+
parser.add_argument(
|
|
409
|
+
'--open_html_output_file', action='store_true',
|
|
410
|
+
help='Open the .html output file when done')
|
|
411
|
+
parser.add_argument(
|
|
412
|
+
'--detections_only', action='store_true',
|
|
413
|
+
help='Only render images with above-threshold detections (by default, '
|
|
414
|
+
'both empty and non-empty images are rendered).')
|
|
415
|
+
parser.add_argument(
|
|
416
|
+
'--preserve_path_structure', action='store_true',
|
|
417
|
+
help='Preserve relative image paths (otherwise flattens and assigns unique file names)')
|
|
418
|
+
parser.add_argument(
|
|
419
|
+
'--category_names_to_blur', default=None, type=str,
|
|
420
|
+
help='Comma-separated list of category names to blur (or a single category name, typically "person")')
|
|
421
|
+
parser.add_argument(
|
|
422
|
+
'--classification_confidence', type=float, default=0.3,
|
|
423
|
+
help='If classification results are present, render results above this threshold')
|
|
424
|
+
|
|
425
|
+
if len(sys.argv[1:]) == 0:
|
|
426
|
+
parser.print_help()
|
|
427
|
+
parser.exit()
|
|
428
|
+
|
|
429
|
+
args = parser.parse_args()
|
|
430
|
+
|
|
431
|
+
category_names_to_blur = args.category_names_to_blur
|
|
432
|
+
if category_names_to_blur is not None:
|
|
433
|
+
category_names_to_blur = category_names_to_blur.split(',')
|
|
434
|
+
|
|
435
|
+
visualize_detector_output(
|
|
436
|
+
detector_output_path=args.detector_output_path,
|
|
437
|
+
out_dir=args.out_dir,
|
|
438
|
+
confidence_threshold=args.confidence,
|
|
439
|
+
images_dir=args.images_dir,
|
|
440
|
+
sample=args.sample,
|
|
441
|
+
output_image_width=args.output_image_width,
|
|
442
|
+
random_seed=args.random_seed,
|
|
443
|
+
render_detections_only=args.detections_only,
|
|
444
|
+
classification_confidence_threshold=args.classification_confidence,
|
|
445
|
+
preserve_path_structure=args.preserve_path_structure,
|
|
446
|
+
html_output_file=args.html_output_file,
|
|
447
|
+
category_names_to_blur=category_names_to_blur)
|
|
448
|
+
|
|
449
|
+
if (args.html_output_file is not None) and args.open_html_output_file:
|
|
450
|
+
print('Opening output file {}'.format(args.html_output_file))
|
|
451
|
+
open_file(args.html_output_file)
|
|
452
|
+
|
|
453
|
+
if __name__ == '__main__':
|
|
454
|
+
main()
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
#%% Interactive driver
|
|
458
|
+
|
|
459
|
+
if False:
|
|
460
|
+
|
|
461
|
+
pass
|
|
462
|
+
|
|
463
|
+
#%%
|
|
464
|
+
|
|
465
|
+
detector_output_path = os.path.expanduser('detections.json')
|
|
466
|
+
out_dir = r'g:\temp\preview'
|
|
467
|
+
images_dir = r'g:\camera_traps\camera_trap_images'
|
|
468
|
+
confidence_threshold = 0.15
|
|
469
|
+
sample = 50
|
|
470
|
+
output_image_width = 700
|
|
471
|
+
random_seed = 1
|
|
472
|
+
render_detections_only = True
|
|
473
|
+
classification_confidence_threshold = 0.1
|
|
474
|
+
html_output_file = os.path.join(out_dir,'index.html')
|
|
475
|
+
html_output_options = None
|
|
476
|
+
preserve_path_structure = False
|
|
477
|
+
parallelize_rendering = True
|
|
478
|
+
parallelize_rendering_n_cores = 10
|
|
479
|
+
parallelize_rendering_with_threads = False
|
|
480
|
+
|
|
481
|
+
_ = visualize_detector_output(detector_output_path,
|
|
482
|
+
out_dir,
|
|
483
|
+
images_dir,
|
|
484
|
+
confidence_threshold,
|
|
485
|
+
sample,
|
|
486
|
+
output_image_width,
|
|
487
|
+
random_seed,
|
|
488
|
+
render_detections_only,
|
|
489
|
+
classification_confidence_threshold,
|
|
490
|
+
html_output_file,
|
|
491
|
+
html_output_options,
|
|
492
|
+
preserve_path_structure,
|
|
493
|
+
parallelize_rendering,
|
|
494
|
+
parallelize_rendering_n_cores,
|
|
495
|
+
parallelize_rendering_with_threads)
|
|
496
|
+
|
|
497
|
+
from megadetector.utils.path_utils import open_file
|
|
498
|
+
open_file(html_output_file)
|