megadetector 10.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- megadetector/__init__.py +0 -0
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +626 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +853 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +608 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +696 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +509 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/animl_to_md.py +161 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +33 -0
- megadetector/data_management/camtrap_dp_to_coco.py +270 -0
- megadetector/data_management/cct_json_utils.py +566 -0
- megadetector/data_management/cct_to_md.py +184 -0
- megadetector/data_management/cct_to_wi.py +293 -0
- megadetector/data_management/coco_to_labelme.py +284 -0
- megadetector/data_management/coco_to_yolo.py +701 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
- megadetector/data_management/databases/integrity_check_json_db.py +563 -0
- megadetector/data_management/databases/subset_json_db.py +195 -0
- megadetector/data_management/generate_crops_from_cct.py +200 -0
- megadetector/data_management/get_image_sizes.py +164 -0
- megadetector/data_management/labelme_to_coco.py +559 -0
- megadetector/data_management/labelme_to_yolo.py +349 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/create_lila_blank_set.py +556 -0
- megadetector/data_management/lila/create_lila_test_set.py +192 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +182 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +319 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
- megadetector/data_management/mewc_to_md.py +344 -0
- megadetector/data_management/ocr_tools.py +873 -0
- megadetector/data_management/read_exif.py +964 -0
- megadetector/data_management/remap_coco_categories.py +195 -0
- megadetector/data_management/remove_exif.py +156 -0
- megadetector/data_management/rename_images.py +194 -0
- megadetector/data_management/resize_coco_dataset.py +665 -0
- megadetector/data_management/speciesnet_to_md.py +41 -0
- megadetector/data_management/wi_download_csv_to_coco.py +247 -0
- megadetector/data_management/yolo_output_to_md_output.py +594 -0
- megadetector/data_management/yolo_to_coco.py +984 -0
- megadetector/data_management/zamba_to_md.py +188 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/change_detection.py +840 -0
- megadetector/detection/process_video.py +479 -0
- megadetector/detection/pytorch_detector.py +1451 -0
- megadetector/detection/run_detector.py +1267 -0
- megadetector/detection/run_detector_batch.py +2172 -0
- megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
- megadetector/detection/run_md_and_speciesnet.py +1604 -0
- megadetector/detection/run_tiled_inference.py +1044 -0
- megadetector/detection/tf_detector.py +209 -0
- megadetector/detection/video_utils.py +1379 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +72 -0
- megadetector/postprocessing/categorize_detections_by_size.py +166 -0
- megadetector/postprocessing/classification_postprocessing.py +1943 -0
- megadetector/postprocessing/combine_batch_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +2110 -0
- megadetector/postprocessing/convert_output_format.py +403 -0
- megadetector/postprocessing/create_crop_folder.py +629 -0
- megadetector/postprocessing/detector_calibration.py +570 -0
- megadetector/postprocessing/generate_csv_report.py +522 -0
- megadetector/postprocessing/load_api_results.py +223 -0
- megadetector/postprocessing/md_to_coco.py +428 -0
- megadetector/postprocessing/md_to_labelme.py +351 -0
- megadetector/postprocessing/md_to_wi.py +41 -0
- megadetector/postprocessing/merge_detections.py +392 -0
- megadetector/postprocessing/postprocess_batch_results.py +2140 -0
- megadetector/postprocessing/remap_detection_categories.py +226 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
- megadetector/postprocessing/separate_detections_into_folders.py +795 -0
- megadetector/postprocessing/subset_json_detector_output.py +964 -0
- megadetector/postprocessing/top_folders_to_bottom.py +238 -0
- megadetector/postprocessing/validate_batch_results.py +332 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +211 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +231 -0
- megadetector/taxonomy_mapping/species_lookup.py +1008 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/tests/__init__.py +0 -0
- megadetector/tests/test_nms_synthetic.py +335 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/ct_utils.py +1857 -0
- megadetector/utils/directory_listing.py +199 -0
- megadetector/utils/extract_frames_from_video.py +307 -0
- megadetector/utils/gpu_test.py +125 -0
- megadetector/utils/md_tests.py +2072 -0
- megadetector/utils/path_utils.py +2872 -0
- megadetector/utils/process_utils.py +172 -0
- megadetector/utils/split_locations_into_train_val.py +237 -0
- megadetector/utils/string_utils.py +234 -0
- megadetector/utils/url_utils.py +825 -0
- megadetector/utils/wi_platform_utils.py +968 -0
- megadetector/utils/wi_taxonomy_utils.py +1766 -0
- megadetector/utils/write_html_image_list.py +239 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +309 -0
- megadetector/visualization/render_images_with_thumbnails.py +243 -0
- megadetector/visualization/visualization_utils.py +1973 -0
- megadetector/visualization/visualize_db.py +630 -0
- megadetector/visualization/visualize_detector_output.py +498 -0
- megadetector/visualization/visualize_video_output.py +705 -0
- megadetector-10.0.15.dist-info/METADATA +115 -0
- megadetector-10.0.15.dist-info/RECORD +147 -0
- megadetector-10.0.15.dist-info/WHEEL +5 -0
- megadetector-10.0.15.dist-info/licenses/LICENSE +19 -0
- megadetector-10.0.15.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2110 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
compare_batch_results.py
|
|
4
|
+
|
|
5
|
+
Compare sets of batch results; typically used to compare:
|
|
6
|
+
|
|
7
|
+
* Results from different MegaDetector versions
|
|
8
|
+
* Results before/after RDE
|
|
9
|
+
* Results with/without augmentation
|
|
10
|
+
|
|
11
|
+
Makes pairwise comparisons between sets of results, but can take lists of results files
|
|
12
|
+
(will perform all pairwise comparisons). Results are written to an HTML page that shows the
|
|
13
|
+
number and nature of disagreements (in the sense of each image being a detection or non-detection),
|
|
14
|
+
with sample images for each category.
|
|
15
|
+
|
|
16
|
+
Operates in one of three modes, depending on whether ground truth labels/boxes are available:
|
|
17
|
+
|
|
18
|
+
* The most common mode assumes no ground truth, just finds agreement/disagreement between
|
|
19
|
+
results files, or class discrepancies.
|
|
20
|
+
|
|
21
|
+
* If image-level ground truth is available, finds image-level agreements on TPs/TNs/FPs/FNs, but also
|
|
22
|
+
finds image-level TPs/TNs/FPs/FNs that are unique to each set of results (at the specified confidence
|
|
23
|
+
threshold).
|
|
24
|
+
|
|
25
|
+
* If box-level ground truth is available, finds box-level agreements on TPs/TNs/FPs/FNs, but also finds
|
|
26
|
+
image-level TPs/TNs/FPs/FNs that are unique to each set of results (at the specified confidence
|
|
27
|
+
threshold).
|
|
28
|
+
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
#%% Imports
|
|
32
|
+
|
|
33
|
+
import json
|
|
34
|
+
import os
|
|
35
|
+
import re
|
|
36
|
+
import random
|
|
37
|
+
import copy
|
|
38
|
+
import urllib
|
|
39
|
+
import itertools
|
|
40
|
+
import sys
|
|
41
|
+
import argparse
|
|
42
|
+
import textwrap
|
|
43
|
+
|
|
44
|
+
import numpy as np
|
|
45
|
+
|
|
46
|
+
from tqdm import tqdm
|
|
47
|
+
from functools import partial
|
|
48
|
+
from collections import defaultdict
|
|
49
|
+
|
|
50
|
+
from PIL import ImageFont, ImageDraw
|
|
51
|
+
|
|
52
|
+
from multiprocessing.pool import ThreadPool
|
|
53
|
+
from multiprocessing.pool import Pool
|
|
54
|
+
|
|
55
|
+
from megadetector.visualization import visualization_utils
|
|
56
|
+
from megadetector.utils.write_html_image_list import write_html_image_list
|
|
57
|
+
from megadetector.utils.ct_utils import invert_dictionary, get_iou
|
|
58
|
+
from megadetector.utils import path_utils
|
|
59
|
+
from megadetector.visualization.visualization_utils import get_text_size
|
|
60
|
+
|
|
61
|
+
def _maxempty(L): # noqa
|
|
62
|
+
"""
|
|
63
|
+
Return the maximum value in a list, or 0 if the list is empty
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
if len(L) == 0:
|
|
67
|
+
return 0
|
|
68
|
+
else:
|
|
69
|
+
return max(L)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
#%% Constants and support classes
|
|
73
|
+
|
|
74
|
+
class PairwiseBatchComparisonOptions:
|
|
75
|
+
"""
|
|
76
|
+
Defines the options used for a single pairwise comparison; a list of these
|
|
77
|
+
pairwise options sets is stored in the BatchComparisonsOptions class.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(self):
|
|
81
|
+
|
|
82
|
+
#: First filename to compare
|
|
83
|
+
self.results_filename_a = None
|
|
84
|
+
|
|
85
|
+
#: Second filename to compare
|
|
86
|
+
self.results_filename_b = None
|
|
87
|
+
|
|
88
|
+
#: Description to use in the output HTML for filename A
|
|
89
|
+
self.results_description_a = None
|
|
90
|
+
|
|
91
|
+
#: Description to use in the output HTML for filename B
|
|
92
|
+
self.results_description_b = None
|
|
93
|
+
|
|
94
|
+
#: Per-class detection thresholds to use for filename A (including a 'default' threshold)
|
|
95
|
+
self.detection_thresholds_a = {'animal':0.15,'person':0.15,'vehicle':0.15,'default':0.15}
|
|
96
|
+
|
|
97
|
+
#: Per-class detection thresholds to use for filename B (including a 'default' threshold)
|
|
98
|
+
self.detection_thresholds_b = {'animal':0.15,'person':0.15,'vehicle':0.15,'default':0.15}
|
|
99
|
+
|
|
100
|
+
#: Rendering threshold to use for all categories for filename A
|
|
101
|
+
self.rendering_confidence_threshold_a = 0.1
|
|
102
|
+
|
|
103
|
+
#: Rendering threshold to use for all categories for filename B
|
|
104
|
+
self.rendering_confidence_threshold_b = 0.1
|
|
105
|
+
|
|
106
|
+
# ...class PairwiseBatchComparisonOptions
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class BatchComparisonOptions:
|
|
110
|
+
"""
|
|
111
|
+
Defines the options for a set of (possibly many) pairwise comparisons.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(self):
|
|
115
|
+
|
|
116
|
+
#: Folder to which we should write HTML output
|
|
117
|
+
self.output_folder = None
|
|
118
|
+
|
|
119
|
+
#: Base folder for images (which are specified as relative files)
|
|
120
|
+
self.image_folder = None
|
|
121
|
+
|
|
122
|
+
#: Job name to use in the HTML output file
|
|
123
|
+
self.job_name = ''
|
|
124
|
+
|
|
125
|
+
#: Maximum number of images to render for each category, where a "category" here is
|
|
126
|
+
#: "detections_a_only", "detections_b_only", etc., or None to render all images.
|
|
127
|
+
self.max_images_per_category = 1000
|
|
128
|
+
|
|
129
|
+
#: Maximum number of images per HTML page (paginates if a category page goes beyond this),
|
|
130
|
+
#: or None to disable pagination.
|
|
131
|
+
self.max_images_per_page = None
|
|
132
|
+
|
|
133
|
+
#: Colormap to use for detections in file A (maps detection categories to colors)
|
|
134
|
+
self.colormap_a = ['Red']
|
|
135
|
+
|
|
136
|
+
#: Colormap to use for detections in file B (maps detection categories to colors)
|
|
137
|
+
self.colormap_b = ['RoyalBlue']
|
|
138
|
+
|
|
139
|
+
#: Whether to render images with threads (True) or processes (False)
|
|
140
|
+
self.parallelize_rendering_with_threads = True
|
|
141
|
+
|
|
142
|
+
#: List of filenames to include in the comparison, or None to use all files
|
|
143
|
+
self.filenames_to_include = None
|
|
144
|
+
|
|
145
|
+
#: List of category names to include in the comparison, or None to use all categories
|
|
146
|
+
self.category_names_to_include = None
|
|
147
|
+
|
|
148
|
+
#: Compare only detections/non-detections, ignore categories (still renders categories)
|
|
149
|
+
self.class_agnostic_comparison = False
|
|
150
|
+
|
|
151
|
+
#: Width of images to render in the output HTML
|
|
152
|
+
self.target_width = 800
|
|
153
|
+
|
|
154
|
+
#: Number of workers to use for rendering, or <=1 to disable parallelization
|
|
155
|
+
self.n_rendering_workers = 10
|
|
156
|
+
|
|
157
|
+
#: Random seed for image sampling (not used if max_images_per_category is None)
|
|
158
|
+
self.random_seed = 0
|
|
159
|
+
|
|
160
|
+
#: Whether to sort results by confidence; if this is False, sorts by filename
|
|
161
|
+
self.sort_by_confidence = False
|
|
162
|
+
|
|
163
|
+
#: The expectation is that all results sets being compared will refer to the same images; if this
|
|
164
|
+
#: is True (default), we'll error if that's not the case, otherwise non-matching lists will just be
|
|
165
|
+
#: a warning.
|
|
166
|
+
self.error_on_non_matching_lists = True
|
|
167
|
+
|
|
168
|
+
#: Ground truth .json file in COCO Camera Traps format, or an already-loaded COCO dictionary
|
|
169
|
+
self.ground_truth_file = None
|
|
170
|
+
|
|
171
|
+
#: IoU threshold to use when comparing to ground truth with boxes
|
|
172
|
+
self.gt_iou_threshold = 0.5
|
|
173
|
+
|
|
174
|
+
#: Category names that refer to empty images when image-level ground truth is provided
|
|
175
|
+
self.gt_empty_categories = ['empty','blank','misfire']
|
|
176
|
+
|
|
177
|
+
#: Should we show image-level labels as text on each image when boxes are not available?
|
|
178
|
+
self.show_labels_for_image_level_gt = True
|
|
179
|
+
|
|
180
|
+
#: Should we show category names (instead of numbers) on GT boxes?
|
|
181
|
+
self.show_category_names_on_gt_boxes = True
|
|
182
|
+
|
|
183
|
+
#: Should we show category names (instead of numbers) on detected boxes?
|
|
184
|
+
self.show_category_names_on_detected_boxes = True
|
|
185
|
+
|
|
186
|
+
#: List of PairwiseBatchComparisonOptions that defines the comparisons we'll render
|
|
187
|
+
self.pairwise_options = []
|
|
188
|
+
|
|
189
|
+
#: Only process images whose file names contain this token
|
|
190
|
+
#:
|
|
191
|
+
#: This can also be a pointer to a function that takes a string (filename)
|
|
192
|
+
#: and returns a bool (if the function returns True, the image will be
|
|
193
|
+
#: included in the comparison).
|
|
194
|
+
self.required_token = None
|
|
195
|
+
|
|
196
|
+
#: Enable additional debug output
|
|
197
|
+
self.verbose = False
|
|
198
|
+
|
|
199
|
+
#: Separate out the "clean TP" and "clean TN" categories, only relevant when GT is
|
|
200
|
+
#: available
|
|
201
|
+
self.include_clean_categories = True
|
|
202
|
+
|
|
203
|
+
#: When rendering to the output table, optionally write alternative strings
|
|
204
|
+
#: to describe images
|
|
205
|
+
self.fn_to_display_fn = None
|
|
206
|
+
|
|
207
|
+
#: Should we run urllib.parse.quote() on paths before using them as links in the
|
|
208
|
+
#: output page?
|
|
209
|
+
self.parse_link_paths = True
|
|
210
|
+
|
|
211
|
+
#: Should we include a TOC? TOC is always omitted if <=2 comparisons are performed.
|
|
212
|
+
self.include_toc = True
|
|
213
|
+
|
|
214
|
+
#: Should we return the mapping from categories (e.g. "common detections") to image
|
|
215
|
+
#: pairs? Makes the return dict much larger, but allows post-hoc exploration.
|
|
216
|
+
self.return_images_by_category = False
|
|
217
|
+
|
|
218
|
+
# ...class BatchComparisonOptions
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class PairwiseBatchComparisonResults:
|
|
222
|
+
"""
|
|
223
|
+
The results from a single pairwise comparison.
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
def __init__(self):
|
|
227
|
+
|
|
228
|
+
#: String of HTML content suitable for rendering to an HTML file
|
|
229
|
+
self.html_content = None
|
|
230
|
+
|
|
231
|
+
#: Possibly-modified version of the PairwiseBatchComparisonOptions supplied as input
|
|
232
|
+
self.pairwise_options = None
|
|
233
|
+
|
|
234
|
+
#: A dictionary with keys representing category names; in the no-ground-truth case, for example,
|
|
235
|
+
#: category names are:
|
|
236
|
+
#:
|
|
237
|
+
#: common_detections
|
|
238
|
+
#: common_non_detections
|
|
239
|
+
#: detections_a_only
|
|
240
|
+
#: detections_b_only
|
|
241
|
+
#: class_transitions
|
|
242
|
+
#
|
|
243
|
+
#: Values are dicts with fields 'im_a', 'im_b', 'sort_conf', and 'im_gt'
|
|
244
|
+
self.categories_to_image_pairs = None
|
|
245
|
+
|
|
246
|
+
#: Short identifier for this comparison
|
|
247
|
+
self.comparison_short_name = None
|
|
248
|
+
|
|
249
|
+
#: Friendly identifier for this comparison
|
|
250
|
+
self.comparison_friendly_name = None
|
|
251
|
+
|
|
252
|
+
# ...class PairwiseBatchComparisonResults
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
class BatchComparisonResults:
|
|
256
|
+
"""
|
|
257
|
+
The results from a set of pairwise comparisons
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
def __init__(self):
|
|
261
|
+
|
|
262
|
+
#: Filename containing HTML output
|
|
263
|
+
self.html_output_file = None
|
|
264
|
+
|
|
265
|
+
#: A list of PairwiseBatchComparisonResults
|
|
266
|
+
self.pairwise_results = None
|
|
267
|
+
|
|
268
|
+
# ...class BatchComparisonResults
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
main_page_style_header = """<head><title>Results comparison</title>
|
|
272
|
+
<style type="text/css">
|
|
273
|
+
a { text-decoration: none; }
|
|
274
|
+
body { font-family: segoe ui, calibri, "trebuchet ms", verdana, arial, sans-serif; }
|
|
275
|
+
div.contentdiv { margin-left: 20px; }
|
|
276
|
+
</style>
|
|
277
|
+
</head>"""
|
|
278
|
+
|
|
279
|
+
main_page_header = '<html>\n{}\n<body>\n'.format(main_page_style_header)
|
|
280
|
+
main_page_footer = '<br/><br/><br/></body></html>\n'
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
#%% Comparison functions
|
|
284
|
+
|
|
285
|
+
def _render_image_pair(fn,image_pairs,category_folder,options,pairwise_options):
|
|
286
|
+
"""
|
|
287
|
+
Render two sets of results (i.e., a comparison) for a single image.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
fn (str): image filename
|
|
291
|
+
image_pairs (dict): dict mapping filenames to pairs of image dicts
|
|
292
|
+
category_folder (str): folder to which to render this image, typically
|
|
293
|
+
"detections_a_only", "detections_b_only", etc.
|
|
294
|
+
options (BatchComparisonOptions): job options
|
|
295
|
+
pairwise_options (PairwiseBatchComparisonOptions): pairwise comparison options
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
str: rendered image filename
|
|
299
|
+
"""
|
|
300
|
+
|
|
301
|
+
input_image_path = os.path.join(options.image_folder,fn)
|
|
302
|
+
assert os.path.isfile(input_image_path), \
|
|
303
|
+
'Image {} does not exist'.format(input_image_path)
|
|
304
|
+
|
|
305
|
+
im = visualization_utils.open_image(input_image_path)
|
|
306
|
+
image_pair = image_pairs[fn]
|
|
307
|
+
detections_a = image_pair['im_a']['detections']
|
|
308
|
+
detections_b = image_pair['im_b']['detections']
|
|
309
|
+
|
|
310
|
+
custom_strings_a = [''] * len(detections_a)
|
|
311
|
+
custom_strings_b = [''] * len(detections_b)
|
|
312
|
+
|
|
313
|
+
# This function is often used to compare results before/after various merging
|
|
314
|
+
# steps, so we have some special-case formatting based on the "transferred_from"
|
|
315
|
+
# field generated in merge_detections.py.
|
|
316
|
+
for i_det,det in enumerate(detections_a):
|
|
317
|
+
if 'transferred_from' in det:
|
|
318
|
+
custom_strings_a[i_det] = '({})'.format(
|
|
319
|
+
det['transferred_from'].split('.')[0])
|
|
320
|
+
|
|
321
|
+
for i_det,det in enumerate(detections_b):
|
|
322
|
+
if 'transferred_from' in det:
|
|
323
|
+
custom_strings_b[i_det] = '({})'.format(
|
|
324
|
+
det['transferred_from'].split('.')[0])
|
|
325
|
+
|
|
326
|
+
if options.target_width is not None:
|
|
327
|
+
im = visualization_utils.resize_image(im, options.target_width)
|
|
328
|
+
|
|
329
|
+
label_map = None
|
|
330
|
+
if options.show_category_names_on_detected_boxes:
|
|
331
|
+
label_map=options.detection_category_id_to_name
|
|
332
|
+
|
|
333
|
+
visualization_utils.render_detection_bounding_boxes(detections_a,im,
|
|
334
|
+
confidence_threshold=pairwise_options.rendering_confidence_threshold_a,
|
|
335
|
+
thickness=4,expansion=0,
|
|
336
|
+
label_map=label_map,
|
|
337
|
+
colormap=options.colormap_a,
|
|
338
|
+
textalign=visualization_utils.TEXTALIGN_LEFT,
|
|
339
|
+
vtextalign=visualization_utils.VTEXTALIGN_TOP,
|
|
340
|
+
custom_strings=custom_strings_a)
|
|
341
|
+
visualization_utils.render_detection_bounding_boxes(detections_b,im,
|
|
342
|
+
confidence_threshold=pairwise_options.rendering_confidence_threshold_b,
|
|
343
|
+
thickness=2,expansion=0,
|
|
344
|
+
label_map=label_map,
|
|
345
|
+
colormap=options.colormap_b,
|
|
346
|
+
textalign=visualization_utils.TEXTALIGN_LEFT,
|
|
347
|
+
vtextalign=visualization_utils.VTEXTALIGN_BOTTOM,
|
|
348
|
+
custom_strings=custom_strings_b)
|
|
349
|
+
|
|
350
|
+
# Do we also need to render ground truth?
|
|
351
|
+
if 'im_gt' in image_pair and image_pair['im_gt'] is not None:
|
|
352
|
+
|
|
353
|
+
im_gt = image_pair['im_gt']
|
|
354
|
+
annotations_gt = image_pair['annotations_gt']
|
|
355
|
+
gt_boxes = []
|
|
356
|
+
gt_categories = []
|
|
357
|
+
for ann in annotations_gt:
|
|
358
|
+
if 'bbox' in ann:
|
|
359
|
+
gt_boxes.append(ann['bbox'])
|
|
360
|
+
gt_categories.append(ann['category_id'])
|
|
361
|
+
|
|
362
|
+
if len(gt_boxes) > 0:
|
|
363
|
+
|
|
364
|
+
label_map = None
|
|
365
|
+
if options.show_category_names_on_gt_boxes:
|
|
366
|
+
label_map=options.gt_category_id_to_name
|
|
367
|
+
|
|
368
|
+
assert len(gt_boxes) == len(gt_categories)
|
|
369
|
+
gt_colormap = ['yellow']*(max(gt_categories)+1)
|
|
370
|
+
visualization_utils.render_db_bounding_boxes(boxes=gt_boxes,
|
|
371
|
+
classes=gt_categories,
|
|
372
|
+
image=im,
|
|
373
|
+
original_size=(im_gt['width'],im_gt['height']),
|
|
374
|
+
label_map=label_map,
|
|
375
|
+
thickness=1,
|
|
376
|
+
expansion=0,
|
|
377
|
+
textalign=visualization_utils.TEXTALIGN_RIGHT,
|
|
378
|
+
vtextalign=visualization_utils.VTEXTALIGN_TOP,
|
|
379
|
+
text_rotation=-90,
|
|
380
|
+
colormap=gt_colormap)
|
|
381
|
+
|
|
382
|
+
else:
|
|
383
|
+
|
|
384
|
+
if options.show_labels_for_image_level_gt:
|
|
385
|
+
|
|
386
|
+
gt_categories_set = set([ann['category_id'] for ann in annotations_gt])
|
|
387
|
+
gt_category_names = [options.gt_category_id_to_name[category_name] for
|
|
388
|
+
category_name in gt_categories_set]
|
|
389
|
+
category_string = ','.join(gt_category_names)
|
|
390
|
+
category_string = '(' + category_string + ')'
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
font = ImageFont.truetype('arial.ttf', 25)
|
|
394
|
+
except OSError:
|
|
395
|
+
font = ImageFont.load_default()
|
|
396
|
+
|
|
397
|
+
draw = ImageDraw.Draw(im)
|
|
398
|
+
|
|
399
|
+
text_width, text_height = get_text_size(font,category_string)
|
|
400
|
+
|
|
401
|
+
text_left = 10
|
|
402
|
+
text_bottom = text_height + 10
|
|
403
|
+
margin = np.ceil(0.05 * text_height)
|
|
404
|
+
|
|
405
|
+
draw.text(
|
|
406
|
+
(text_left + margin, text_bottom - text_height - margin),
|
|
407
|
+
category_string,
|
|
408
|
+
fill='white',
|
|
409
|
+
font=font)
|
|
410
|
+
|
|
411
|
+
# ...if we have boxes in the GT
|
|
412
|
+
|
|
413
|
+
# ...if we need to render ground truth
|
|
414
|
+
|
|
415
|
+
output_image_fn = path_utils.flatten_path(fn)
|
|
416
|
+
output_image_path = os.path.join(category_folder,output_image_fn)
|
|
417
|
+
im.save(output_image_path)
|
|
418
|
+
return output_image_path
|
|
419
|
+
|
|
420
|
+
# ...def _render_image_pair()
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def _result_types_to_comparison_category(result_types_present_a,
|
|
424
|
+
result_types_present_b,
|
|
425
|
+
ground_truth_type,
|
|
426
|
+
options):
|
|
427
|
+
"""
|
|
428
|
+
Given the set of result types (tp,tn,fp,fn) present in each of two sets of results
|
|
429
|
+
for an image, determine the category to which we want to assign this image.
|
|
430
|
+
"""
|
|
431
|
+
|
|
432
|
+
# The "common_tp" category is for the case where both models have *only* TPs
|
|
433
|
+
if ('tp' in result_types_present_a) and ('tp' in result_types_present_b) and \
|
|
434
|
+
(len(result_types_present_a) == 1) and (len(result_types_present_b) == 1):
|
|
435
|
+
return 'common_tp'
|
|
436
|
+
|
|
437
|
+
# The "common_tn" category is for the case where both models have *only* TNs
|
|
438
|
+
if ('tn' in result_types_present_a) and ('tn' in result_types_present_b) and \
|
|
439
|
+
(len(result_types_present_a) == 1) and (len(result_types_present_b) == 1):
|
|
440
|
+
return 'common_tn'
|
|
441
|
+
|
|
442
|
+
"""
|
|
443
|
+
# The "common_fp" category is for the case where both models have *only* FPs
|
|
444
|
+
if ('fp' in result_types_present_a) and ('fp' in result_types_present_b) and \
|
|
445
|
+
(len(result_types_present_a) == 1) and (len(result_types_present_b) == 1):
|
|
446
|
+
return 'common_fp'
|
|
447
|
+
"""
|
|
448
|
+
|
|
449
|
+
# The "common_fp" category is for the case where both models have at least one FP,
|
|
450
|
+
# and no FNs.
|
|
451
|
+
if ('fp' in result_types_present_a) and ('fp' in result_types_present_b) and \
|
|
452
|
+
('fn' not in result_types_present_a) and ('fn' not in result_types_present_b):
|
|
453
|
+
return 'common_fp'
|
|
454
|
+
|
|
455
|
+
"""
|
|
456
|
+
# The "common_fn" category is for the case where both models have *only* FNs
|
|
457
|
+
if ('fn' in result_types_present_a) and ('fn' in result_types_present_b) and \
|
|
458
|
+
(len(result_types_present_a) == 1) and (len(result_types_present_b) == 1):
|
|
459
|
+
return 'common_fn'
|
|
460
|
+
"""
|
|
461
|
+
|
|
462
|
+
# The "common_fn" category is for the case where both models have at least one FN,
|
|
463
|
+
# and no FPs
|
|
464
|
+
if ('fn' in result_types_present_a) and ('fn' in result_types_present_b) and \
|
|
465
|
+
('fp' not in result_types_present_a) and ('fp' not in result_types_present_b):
|
|
466
|
+
return 'common_fn'
|
|
467
|
+
|
|
468
|
+
## The tp-only categories are for the case where one model has *only* TPs
|
|
469
|
+
|
|
470
|
+
if ('tp' in result_types_present_a) and (len(result_types_present_a) == 1):
|
|
471
|
+
# Clean TPs are cases where the other model has only FNs, no FPs
|
|
472
|
+
if options.include_clean_categories:
|
|
473
|
+
if ('fn' in result_types_present_b) and \
|
|
474
|
+
('fp' not in result_types_present_b) and \
|
|
475
|
+
('tp' not in result_types_present_b):
|
|
476
|
+
return 'clean_tp_a_only'
|
|
477
|
+
# Otherwise, TPs are cases where one model has only TPs, and the other model
|
|
478
|
+
# has any mistakes
|
|
479
|
+
if ('fn' in result_types_present_b) or ('fp' in result_types_present_b):
|
|
480
|
+
return 'tp_a_only'
|
|
481
|
+
|
|
482
|
+
if ('tp' in result_types_present_b) and (len(result_types_present_b) == 1):
|
|
483
|
+
# Clean TPs are cases where the other model has only FNs, no FPs
|
|
484
|
+
if options.include_clean_categories:
|
|
485
|
+
if ('fn' in result_types_present_a) and \
|
|
486
|
+
('fp' not in result_types_present_a) and \
|
|
487
|
+
('tp' not in result_types_present_a):
|
|
488
|
+
return 'clean_tp_b_only'
|
|
489
|
+
# Otherwise, TPs are cases where one model has only TPs, and the other model
|
|
490
|
+
# has any mistakes
|
|
491
|
+
if ('fn' in result_types_present_a) or ('fp' in result_types_present_a):
|
|
492
|
+
return 'tp_b_only'
|
|
493
|
+
|
|
494
|
+
# The tn-only categories are for the case where one model has a TN and the
|
|
495
|
+
# other has at least one fp
|
|
496
|
+
if 'tn' in result_types_present_a and 'fp' in result_types_present_b:
|
|
497
|
+
assert len(result_types_present_a) == 1
|
|
498
|
+
assert len(result_types_present_b) == 1
|
|
499
|
+
return 'tn_a_only'
|
|
500
|
+
if 'tn' in result_types_present_b and 'fp' in result_types_present_a:
|
|
501
|
+
assert len(result_types_present_a) == 1
|
|
502
|
+
assert len(result_types_present_b) == 1
|
|
503
|
+
return 'tn_b_only'
|
|
504
|
+
|
|
505
|
+
# The 'fpfn' category is for everything else
|
|
506
|
+
return 'fpfn'
|
|
507
|
+
|
|
508
|
+
# ...def _result_types_to_comparison_category(...)
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def _subset_md_results(results,options):
|
|
512
|
+
"""
|
|
513
|
+
Subset a set of MegaDetector results according to the rules defined in the
|
|
514
|
+
BatchComparisonOptions object [options]. Typically used to filter for files
|
|
515
|
+
containing a particular string. Modifies [results] in place, also returns.
|
|
516
|
+
|
|
517
|
+
Args:
|
|
518
|
+
results (dict): MD results
|
|
519
|
+
options (BatchComparisonOptions): job options containing filtering rules
|
|
520
|
+
"""
|
|
521
|
+
|
|
522
|
+
if options.required_token is None:
|
|
523
|
+
return results
|
|
524
|
+
|
|
525
|
+
images_to_keep = []
|
|
526
|
+
for im in results['images']:
|
|
527
|
+
# Is [required_token] a string?
|
|
528
|
+
if isinstance(options.required_token,str):
|
|
529
|
+
if options.required_token in im['file']:
|
|
530
|
+
images_to_keep.append(im)
|
|
531
|
+
# Otherwise [required_token] is a function
|
|
532
|
+
else:
|
|
533
|
+
assert callable(options.required_token), 'Illegal value for required_token'
|
|
534
|
+
if options.required_token(im['file']):
|
|
535
|
+
images_to_keep.append(im)
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
if options.verbose:
|
|
539
|
+
print('Keeping {} of {} images in MD results'.format(
|
|
540
|
+
len(images_to_keep),len(results['images'])))
|
|
541
|
+
|
|
542
|
+
results['images'] = images_to_keep
|
|
543
|
+
return results
|
|
544
|
+
|
|
545
|
+
# ...def _subset_md_results(...)
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def _subset_ground_truth(gt_data,options):
|
|
549
|
+
"""
|
|
550
|
+
Subset a set of COCO annotations according to the rules defined in the
|
|
551
|
+
BatchComparisonOptions object [options]. Typically used to filter for files
|
|
552
|
+
containing a particular string. Modifies [results] in place, also returns.
|
|
553
|
+
|
|
554
|
+
Args:
|
|
555
|
+
gt_data (dict): COCO-formatted annotations
|
|
556
|
+
options (BatchComparisonOptions): job options containing filtering rules
|
|
557
|
+
"""
|
|
558
|
+
|
|
559
|
+
if options.required_token is None:
|
|
560
|
+
return gt_data
|
|
561
|
+
|
|
562
|
+
images_to_keep = []
|
|
563
|
+
for im in gt_data['images']:
|
|
564
|
+
if isinstance(options.required_token,str):
|
|
565
|
+
if options.required_token in im['file_name']:
|
|
566
|
+
images_to_keep.append(im)
|
|
567
|
+
else:
|
|
568
|
+
if options.required_token(im['file_name']):
|
|
569
|
+
images_to_keep.append(im)
|
|
570
|
+
|
|
571
|
+
image_ids_to_keep_set = set([im['id'] for im in images_to_keep])
|
|
572
|
+
|
|
573
|
+
annotations_to_keep = []
|
|
574
|
+
for ann in gt_data['annotations']:
|
|
575
|
+
if ann['image_id'] in image_ids_to_keep_set:
|
|
576
|
+
annotations_to_keep.append(ann)
|
|
577
|
+
|
|
578
|
+
if options.verbose:
|
|
579
|
+
print('Keeping {} of {} images, {} of {} annotations in GT data'.format(
|
|
580
|
+
len(images_to_keep),len(gt_data['images']),
|
|
581
|
+
len(annotations_to_keep),len(gt_data['annotations'])))
|
|
582
|
+
|
|
583
|
+
gt_data['images'] = images_to_keep
|
|
584
|
+
gt_data['annotations'] = annotations_to_keep
|
|
585
|
+
|
|
586
|
+
return gt_data
|
|
587
|
+
|
|
588
|
+
# ...def _subset_ground_truth(...)
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
592
|
+
"""
|
|
593
|
+
The main entry point for this module is compare_batch_results(), which calls
|
|
594
|
+
this function for each pair of comparisons the caller has requested. Generates an
|
|
595
|
+
HTML page for this comparison. Returns a BatchComparisonResults object.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
options (BatchComparisonOptions): overall job options for this comparison group
|
|
599
|
+
output_index (int): a numeric index used for generating HTML titles
|
|
600
|
+
pairwise_options (PairwiseBatchComparisonOptions): job options for this comparison
|
|
601
|
+
|
|
602
|
+
Returns:
|
|
603
|
+
PairwiseBatchComparisonResults: the results of this pairwise comparison
|
|
604
|
+
"""
|
|
605
|
+
|
|
606
|
+
# pairwise_options is passed as a parameter here, and should not be specified
|
|
607
|
+
# in the options object.
|
|
608
|
+
assert options.pairwise_options is None
|
|
609
|
+
|
|
610
|
+
if options.random_seed is not None:
|
|
611
|
+
random.seed(options.random_seed)
|
|
612
|
+
|
|
613
|
+
# Warn the user if some "detections" might not get rendered
|
|
614
|
+
max_classification_threshold_a = max(list(pairwise_options.detection_thresholds_a.values()))
|
|
615
|
+
max_classification_threshold_b = max(list(pairwise_options.detection_thresholds_b.values()))
|
|
616
|
+
|
|
617
|
+
if pairwise_options.rendering_confidence_threshold_a > max_classification_threshold_a:
|
|
618
|
+
print('*** Warning: rendering threshold A ({}) is higher than max confidence threshold A ({}) ***'.format(
|
|
619
|
+
pairwise_options.rendering_confidence_threshold_a,max_classification_threshold_a))
|
|
620
|
+
|
|
621
|
+
if pairwise_options.rendering_confidence_threshold_b > max_classification_threshold_b:
|
|
622
|
+
print('*** Warning: rendering threshold B ({}) is higher than max confidence threshold B ({}) ***'.format(
|
|
623
|
+
pairwise_options.rendering_confidence_threshold_b,max_classification_threshold_b))
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
##%% Validate inputs
|
|
627
|
+
|
|
628
|
+
assert os.path.isfile(pairwise_options.results_filename_a), \
|
|
629
|
+
"Can't find results file {}".format(pairwise_options.results_filename_a)
|
|
630
|
+
assert os.path.isfile(pairwise_options.results_filename_b), \
|
|
631
|
+
"Can't find results file {}".format(pairwise_options.results_filename_b)
|
|
632
|
+
assert os.path.isdir(options.image_folder), \
|
|
633
|
+
"Can't find image folder {}".format(options.image_folder)
|
|
634
|
+
os.makedirs(options.output_folder,exist_ok=True)
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
# Just in case the user provided a single category instead of a list
|
|
638
|
+
# for category_names_to_include
|
|
639
|
+
if options.category_names_to_include is not None:
|
|
640
|
+
if isinstance(options.category_names_to_include,str):
|
|
641
|
+
options.category_names_to_include = [options.category_names_to_include]
|
|
642
|
+
|
|
643
|
+
##%% Load both result sets
|
|
644
|
+
|
|
645
|
+
if options.verbose:
|
|
646
|
+
print('Loading {}'.format(pairwise_options.results_filename_a))
|
|
647
|
+
with open(pairwise_options.results_filename_a,'r') as f:
|
|
648
|
+
results_a = json.load(f)
|
|
649
|
+
|
|
650
|
+
if options.verbose:
|
|
651
|
+
print('Loading {}'.format(pairwise_options.results_filename_b))
|
|
652
|
+
with open(pairwise_options.results_filename_b,'r') as f:
|
|
653
|
+
results_b = json.load(f)
|
|
654
|
+
|
|
655
|
+
# Don't let path separators confuse things
|
|
656
|
+
for im in results_a['images']:
|
|
657
|
+
if 'file' in im:
|
|
658
|
+
im['file'] = im['file'].replace('\\','/')
|
|
659
|
+
for im in results_b['images']:
|
|
660
|
+
if 'file' in im:
|
|
661
|
+
im['file'] = im['file'].replace('\\','/')
|
|
662
|
+
|
|
663
|
+
if not options.class_agnostic_comparison:
|
|
664
|
+
assert results_a['detection_categories'] == results_b['detection_categories'], \
|
|
665
|
+
"Cannot perform a class-sensitive comparison across results with different categories"
|
|
666
|
+
|
|
667
|
+
detection_categories_a = results_a['detection_categories']
|
|
668
|
+
detection_categories_b = results_b['detection_categories']
|
|
669
|
+
detection_category_id_to_name = detection_categories_a
|
|
670
|
+
detection_category_name_to_id = invert_dictionary(detection_categories_a)
|
|
671
|
+
options.detection_category_id_to_name = detection_category_id_to_name
|
|
672
|
+
|
|
673
|
+
category_name_to_id_a = invert_dictionary(detection_categories_a)
|
|
674
|
+
category_name_to_id_b = invert_dictionary(detection_categories_b)
|
|
675
|
+
category_ids_to_include_a = []
|
|
676
|
+
category_ids_to_include_b = []
|
|
677
|
+
|
|
678
|
+
# If we're supposed to be including all categories, we don't actually need to
|
|
679
|
+
# populate category_ids_to_include_a/b, but we're doing this for future-proofing.
|
|
680
|
+
if options.category_names_to_include is None:
|
|
681
|
+
category_ids_to_include_a = sorted(list(category_name_to_id_a.values()))
|
|
682
|
+
category_ids_to_include_b = sorted(list(category_name_to_id_b.values()))
|
|
683
|
+
else:
|
|
684
|
+
for category_name in options.category_names_to_include:
|
|
685
|
+
if category_name in category_name_to_id_a:
|
|
686
|
+
category_ids_to_include_a.append(category_name_to_id_a[category_name])
|
|
687
|
+
if category_name in category_name_to_id_b:
|
|
688
|
+
category_ids_to_include_b.append(category_name_to_id_b[category_name])
|
|
689
|
+
|
|
690
|
+
if pairwise_options.results_description_a is None:
|
|
691
|
+
if 'detector' not in results_a['info']:
|
|
692
|
+
print('No model metadata supplied for results-A, assuming MDv4')
|
|
693
|
+
pairwise_options.results_description_a = 'MDv4 (assumed)'
|
|
694
|
+
else:
|
|
695
|
+
pairwise_options.results_description_a = results_a['info']['detector']
|
|
696
|
+
|
|
697
|
+
if pairwise_options.results_description_b is None:
|
|
698
|
+
if 'detector' not in results_b['info']:
|
|
699
|
+
print('No model metadata supplied for results-B, assuming MDv4')
|
|
700
|
+
pairwise_options.results_description_b = 'MDv4 (assumed)'
|
|
701
|
+
else:
|
|
702
|
+
pairwise_options.results_description_b = results_b['info']['detector']
|
|
703
|
+
|
|
704
|
+
# Restrict this comparison to specific files if requested
|
|
705
|
+
results_a = _subset_md_results(results_a, options)
|
|
706
|
+
results_b = _subset_md_results(results_b, options)
|
|
707
|
+
|
|
708
|
+
images_a = results_a['images']
|
|
709
|
+
images_b = results_b['images']
|
|
710
|
+
|
|
711
|
+
filename_to_image_a = {im['file']:im for im in images_a}
|
|
712
|
+
filename_to_image_b = {im['file']:im for im in images_b}
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
##%% Make sure the two result sets represent the same set of images
|
|
716
|
+
|
|
717
|
+
filenames_a = [im['file'] for im in images_a]
|
|
718
|
+
filenames_b_set = set([im['file'] for im in images_b])
|
|
719
|
+
|
|
720
|
+
if len(images_a) != len(images_b):
|
|
721
|
+
s = 'set A has {} images, set B has {}'.format(len(images_a),len(images_b))
|
|
722
|
+
if options.error_on_non_matching_lists:
|
|
723
|
+
raise ValueError(s)
|
|
724
|
+
else:
|
|
725
|
+
print('Warning: ' + s)
|
|
726
|
+
else:
|
|
727
|
+
if options.error_on_non_matching_lists:
|
|
728
|
+
for fn in filenames_a:
|
|
729
|
+
assert fn in filenames_b_set
|
|
730
|
+
|
|
731
|
+
assert len(filenames_a) == len(images_a)
|
|
732
|
+
assert len(filenames_b_set) == len(images_b)
|
|
733
|
+
|
|
734
|
+
if options.filenames_to_include is None:
|
|
735
|
+
filenames_to_compare = filenames_a
|
|
736
|
+
else:
|
|
737
|
+
filenames_to_compare = options.filenames_to_include
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
##%% Determine whether ground truth is available
|
|
741
|
+
|
|
742
|
+
# ...and determine what type of GT is available, boxes or image-level labels
|
|
743
|
+
|
|
744
|
+
gt_data = None
|
|
745
|
+
gt_category_id_to_detection_category_id = None
|
|
746
|
+
|
|
747
|
+
if options.ground_truth_file is None:
|
|
748
|
+
|
|
749
|
+
ground_truth_type = 'no_gt'
|
|
750
|
+
|
|
751
|
+
else:
|
|
752
|
+
|
|
753
|
+
# Read ground truth data if necessary
|
|
754
|
+
if isinstance(options.ground_truth_file,dict):
|
|
755
|
+
gt_data = options.ground_truth_file
|
|
756
|
+
else:
|
|
757
|
+
assert isinstance(options.ground_truth_file,str)
|
|
758
|
+
with open(options.ground_truth_file,'r') as f:
|
|
759
|
+
gt_data = json.load(f)
|
|
760
|
+
|
|
761
|
+
# Restrict this comparison to specific files if requested
|
|
762
|
+
gt_data = _subset_ground_truth(gt_data, options)
|
|
763
|
+
|
|
764
|
+
# Do we have box-level ground truth or image-level ground truth?
|
|
765
|
+
found_box = False
|
|
766
|
+
|
|
767
|
+
for ann in gt_data['annotations']:
|
|
768
|
+
if 'bbox' in ann:
|
|
769
|
+
found_box = True
|
|
770
|
+
break
|
|
771
|
+
|
|
772
|
+
if found_box:
|
|
773
|
+
ground_truth_type = 'bbox_gt'
|
|
774
|
+
else:
|
|
775
|
+
ground_truth_type = 'image_level_gt'
|
|
776
|
+
|
|
777
|
+
gt_category_name_to_id = {c['name']:c['id'] for c in gt_data['categories']}
|
|
778
|
+
gt_category_id_to_name = invert_dictionary(gt_category_name_to_id)
|
|
779
|
+
options.gt_category_id_to_name = gt_category_id_to_name
|
|
780
|
+
|
|
781
|
+
if ground_truth_type == 'bbox_gt':
|
|
782
|
+
|
|
783
|
+
if not options.class_agnostic_comparison:
|
|
784
|
+
assert set(gt_category_name_to_id.keys()) == set(detection_category_name_to_id.keys()), \
|
|
785
|
+
'Cannot compare detections to GT with different categories when class_agnostic_comparison is False'
|
|
786
|
+
gt_category_id_to_detection_category_id = {}
|
|
787
|
+
for category_name in gt_category_name_to_id:
|
|
788
|
+
gt_category_id = gt_category_name_to_id[category_name]
|
|
789
|
+
detection_category_id = detection_category_name_to_id[category_name]
|
|
790
|
+
gt_category_id_to_detection_category_id[gt_category_id] = detection_category_id
|
|
791
|
+
|
|
792
|
+
elif ground_truth_type == 'image_level_gt':
|
|
793
|
+
|
|
794
|
+
if not options.class_agnostic_comparison:
|
|
795
|
+
for detection_category_name in detection_category_name_to_id:
|
|
796
|
+
if detection_category_name not in gt_category_name_to_id:
|
|
797
|
+
raise ValueError('Detection category {} not available in GT category list'.format(
|
|
798
|
+
detection_category_name))
|
|
799
|
+
for gt_category_name in gt_category_name_to_id:
|
|
800
|
+
if gt_category_name in options.gt_empty_categories:
|
|
801
|
+
continue
|
|
802
|
+
if (gt_category_name not in detection_category_name_to_id):
|
|
803
|
+
raise ValueError('GT category {} not available in detection category list'.format(
|
|
804
|
+
gt_category_name))
|
|
805
|
+
|
|
806
|
+
assert ground_truth_type in ('no_gt','bbox_gt','image_level_gt')
|
|
807
|
+
|
|
808
|
+
# Make sure ground truth data refers to at least *some* of the same files that are in our
|
|
809
|
+
# results files
|
|
810
|
+
if gt_data is not None:
|
|
811
|
+
|
|
812
|
+
filenames_to_compare_set = set(filenames_to_compare)
|
|
813
|
+
gt_filenames = [im['file_name'] for im in gt_data['images']]
|
|
814
|
+
gt_filenames_set = set(gt_filenames)
|
|
815
|
+
|
|
816
|
+
common_filenames = filenames_to_compare_set.intersection(gt_filenames_set)
|
|
817
|
+
assert len(common_filenames) > 0, 'MD results files and ground truth file have no images in common'
|
|
818
|
+
|
|
819
|
+
filenames_only_in_gt = gt_filenames_set.difference(filenames_to_compare_set)
|
|
820
|
+
if len(filenames_only_in_gt) > 0:
|
|
821
|
+
print('Warning: {} files are only available in the ground truth (not in MD results)'.format(
|
|
822
|
+
len(filenames_only_in_gt)))
|
|
823
|
+
|
|
824
|
+
filenames_only_in_results = filenames_to_compare_set.difference(gt_filenames_set)
|
|
825
|
+
if len(filenames_only_in_results) > 0:
|
|
826
|
+
print('Warning: {} files are only available in the MD results (not in ground truth)'.format(
|
|
827
|
+
len(filenames_only_in_results)))
|
|
828
|
+
|
|
829
|
+
if options.error_on_non_matching_lists:
|
|
830
|
+
if len(filenames_only_in_gt) > 0 or len(filenames_only_in_results) > 0:
|
|
831
|
+
raise ValueError('GT image set is not identical to result image sets')
|
|
832
|
+
|
|
833
|
+
filenames_to_compare = sorted(list(common_filenames))
|
|
834
|
+
|
|
835
|
+
# Map filenames to ground truth images and annotations
|
|
836
|
+
filename_to_image_gt = {im['file_name']:im for im in gt_data['images']}
|
|
837
|
+
gt_image_id_to_image = {}
|
|
838
|
+
for im in gt_data['images']:
|
|
839
|
+
gt_image_id_to_image[im['id']] = im
|
|
840
|
+
gt_image_id_to_annotations = defaultdict(list)
|
|
841
|
+
for ann in gt_data['annotations']:
|
|
842
|
+
gt_image_id_to_annotations[ann['image_id']].append(ann)
|
|
843
|
+
|
|
844
|
+
# Convert annotations to relative (MD) coordinates
|
|
845
|
+
|
|
846
|
+
# ann = gt_data['annotations'][0]
|
|
847
|
+
for ann in gt_data['annotations']:
|
|
848
|
+
gt_image = gt_image_id_to_image[ann['image_id']]
|
|
849
|
+
if 'bbox' not in ann:
|
|
850
|
+
continue
|
|
851
|
+
# COCO format: [x,y,width,height]
|
|
852
|
+
# normalized format: [x_min, y_min, width_of_box, height_of_box]
|
|
853
|
+
normalized_bbox = [ann['bbox'][0]/gt_image['width'],ann['bbox'][1]/gt_image['height'],
|
|
854
|
+
ann['bbox'][2]/gt_image['width'],ann['bbox'][3]/gt_image['height']]
|
|
855
|
+
ann['normalized_bbox'] = normalized_bbox
|
|
856
|
+
|
|
857
|
+
|
|
858
|
+
##%% Find differences
|
|
859
|
+
|
|
860
|
+
# See PairwiseBatchComparisonResults for a description
|
|
861
|
+
categories_to_image_pairs = {}
|
|
862
|
+
|
|
863
|
+
# This will map category names that can be used in filenames (e.g. "common_non_detections" or
|
|
864
|
+
# "false_positives_a_only" to friendly names (e.g. "Common non-detections")
|
|
865
|
+
categories_to_page_titles = None
|
|
866
|
+
|
|
867
|
+
if ground_truth_type == 'no_gt':
|
|
868
|
+
|
|
869
|
+
categories_to_image_pairs['common_detections'] = {}
|
|
870
|
+
categories_to_image_pairs['common_non_detections'] = {}
|
|
871
|
+
categories_to_image_pairs['detections_a_only'] = {}
|
|
872
|
+
categories_to_image_pairs['detections_b_only'] = {}
|
|
873
|
+
categories_to_image_pairs['class_transitions'] = {}
|
|
874
|
+
|
|
875
|
+
categories_to_page_titles = {
|
|
876
|
+
'common_detections':'Detections common to both models',
|
|
877
|
+
'common_non_detections':'Non-detections common to both models',
|
|
878
|
+
'detections_a_only':'Detections reported by model A only',
|
|
879
|
+
'detections_b_only':'Detections reported by model B only',
|
|
880
|
+
'class_transitions':'Detections reported as different classes by models A and B'
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
|
|
884
|
+
elif (ground_truth_type == 'bbox_gt') or (ground_truth_type == 'image_level_gt'):
|
|
885
|
+
|
|
886
|
+
categories_to_image_pairs['common_tp'] = {}
|
|
887
|
+
categories_to_image_pairs['common_tn'] = {}
|
|
888
|
+
categories_to_image_pairs['common_fp'] = {}
|
|
889
|
+
categories_to_image_pairs['common_fn'] = {}
|
|
890
|
+
|
|
891
|
+
categories_to_image_pairs['tp_a_only'] = {}
|
|
892
|
+
categories_to_image_pairs['tp_b_only'] = {}
|
|
893
|
+
categories_to_image_pairs['tn_a_only'] = {}
|
|
894
|
+
categories_to_image_pairs['tn_b_only'] = {}
|
|
895
|
+
|
|
896
|
+
categories_to_image_pairs['fpfn'] = {}
|
|
897
|
+
|
|
898
|
+
categories_to_page_titles = {
|
|
899
|
+
'common_tp':'Common true positives',
|
|
900
|
+
'common_tn':'Common true negatives',
|
|
901
|
+
'common_fp':'Common false positives',
|
|
902
|
+
'common_fn':'Common false negatives',
|
|
903
|
+
'tp_a_only':'TP (A only)',
|
|
904
|
+
'tp_b_only':'TP (B only)',
|
|
905
|
+
'tn_a_only':'TN (A only)',
|
|
906
|
+
'tn_b_only':'TN (B only)',
|
|
907
|
+
'fpfn':'More complicated discrepancies'
|
|
908
|
+
}
|
|
909
|
+
|
|
910
|
+
if options.include_clean_categories:
|
|
911
|
+
|
|
912
|
+
categories_to_image_pairs['clean_tp_a_only'] = {}
|
|
913
|
+
categories_to_image_pairs['clean_tp_b_only'] = {}
|
|
914
|
+
# categories_to_image_pairs['clean_tn_a_only'] = {}
|
|
915
|
+
# categories_to_image_pairs['clean_tn_b_only'] = {}
|
|
916
|
+
|
|
917
|
+
categories_to_page_titles['clean_tp_a_only'] = 'Clean TP wins for A'
|
|
918
|
+
categories_to_page_titles['clean_tp_b_only'] = 'Clean TP wins for B'
|
|
919
|
+
# categories_to_page_titles['clean_tn_a_only'] = 'Clean TN wins for A'
|
|
920
|
+
# categories_to_page_titles['clean_tn_b_only'] = 'Clean TN wins for B'
|
|
921
|
+
|
|
922
|
+
|
|
923
|
+
else:
|
|
924
|
+
|
|
925
|
+
raise Exception('Unknown ground truth type: {}'.format(ground_truth_type))
|
|
926
|
+
|
|
927
|
+
# Map category IDs to thresholds
|
|
928
|
+
category_id_to_threshold_a = {}
|
|
929
|
+
category_id_to_threshold_b = {}
|
|
930
|
+
|
|
931
|
+
for category_id in detection_categories_a:
|
|
932
|
+
category_name = detection_categories_a[category_id]
|
|
933
|
+
if category_name in pairwise_options.detection_thresholds_a:
|
|
934
|
+
category_id_to_threshold_a[category_id] = \
|
|
935
|
+
pairwise_options.detection_thresholds_a[category_name]
|
|
936
|
+
else:
|
|
937
|
+
category_id_to_threshold_a[category_id] = \
|
|
938
|
+
pairwise_options.detection_thresholds_a['default']
|
|
939
|
+
|
|
940
|
+
for category_id in detection_categories_b:
|
|
941
|
+
category_name = detection_categories_b[category_id]
|
|
942
|
+
if category_name in pairwise_options.detection_thresholds_b:
|
|
943
|
+
category_id_to_threshold_b[category_id] = \
|
|
944
|
+
pairwise_options.detection_thresholds_b[category_name]
|
|
945
|
+
else:
|
|
946
|
+
category_id_to_threshold_b[category_id] = \
|
|
947
|
+
pairwise_options.detection_thresholds_b['default']
|
|
948
|
+
|
|
949
|
+
# fn = filenames_to_compare[0]
|
|
950
|
+
for i_file,fn in tqdm(enumerate(filenames_to_compare),
|
|
951
|
+
total=len(filenames_to_compare)):
|
|
952
|
+
|
|
953
|
+
if fn not in filename_to_image_b:
|
|
954
|
+
|
|
955
|
+
# We shouldn't have gotten this far if error_on_non_matching_lists is set
|
|
956
|
+
assert not options.error_on_non_matching_lists
|
|
957
|
+
|
|
958
|
+
print('Skipping filename {}, not in image set B'.format(fn))
|
|
959
|
+
continue
|
|
960
|
+
|
|
961
|
+
im_a = filename_to_image_a[fn]
|
|
962
|
+
im_b = filename_to_image_b[fn]
|
|
963
|
+
|
|
964
|
+
im_pair = {}
|
|
965
|
+
im_pair['im_a'] = im_a
|
|
966
|
+
im_pair['im_b'] = im_b
|
|
967
|
+
im_pair['im_gt'] = None
|
|
968
|
+
im_pair['annotations_gt'] = None
|
|
969
|
+
|
|
970
|
+
if gt_data is not None:
|
|
971
|
+
|
|
972
|
+
if fn not in filename_to_image_gt:
|
|
973
|
+
|
|
974
|
+
# We shouldn't have gotten this far if error_on_non_matching_lists is set
|
|
975
|
+
assert not options.error_on_non_matching_lists
|
|
976
|
+
|
|
977
|
+
print('Skipping filename {}, not in ground truth'.format(fn))
|
|
978
|
+
continue
|
|
979
|
+
|
|
980
|
+
im_gt = filename_to_image_gt[fn]
|
|
981
|
+
annotations_gt = gt_image_id_to_annotations[im_gt['id']]
|
|
982
|
+
im_pair['im_gt'] = im_gt
|
|
983
|
+
im_pair['annotations_gt'] = annotations_gt
|
|
984
|
+
|
|
985
|
+
comparison_category = None
|
|
986
|
+
|
|
987
|
+
# Compare image A to image B, without ground truth
|
|
988
|
+
if ground_truth_type == 'no_gt':
|
|
989
|
+
|
|
990
|
+
categories_above_threshold_a = set()
|
|
991
|
+
|
|
992
|
+
if 'detections' not in im_a or im_a['detections'] is None:
|
|
993
|
+
assert 'failure' in im_a and im_a['failure'] is not None
|
|
994
|
+
continue
|
|
995
|
+
|
|
996
|
+
if 'detections' not in im_b or im_b['detections'] is None:
|
|
997
|
+
assert 'failure' in im_b and im_b['failure'] is not None
|
|
998
|
+
continue
|
|
999
|
+
|
|
1000
|
+
invalid_category_error = False
|
|
1001
|
+
|
|
1002
|
+
# det = im_a['detections'][0]
|
|
1003
|
+
for det in im_a['detections']:
|
|
1004
|
+
|
|
1005
|
+
category_id = det['category']
|
|
1006
|
+
|
|
1007
|
+
if category_id not in category_id_to_threshold_a:
|
|
1008
|
+
print('Warning: unexpected category {} for model A on file {}'.format(category_id,fn))
|
|
1009
|
+
invalid_category_error = True
|
|
1010
|
+
break
|
|
1011
|
+
|
|
1012
|
+
conf = det['conf']
|
|
1013
|
+
conf_thresh = category_id_to_threshold_a[category_id]
|
|
1014
|
+
if conf >= conf_thresh:
|
|
1015
|
+
categories_above_threshold_a.add(category_id)
|
|
1016
|
+
|
|
1017
|
+
if invalid_category_error:
|
|
1018
|
+
continue
|
|
1019
|
+
|
|
1020
|
+
categories_above_threshold_b = set()
|
|
1021
|
+
|
|
1022
|
+
for det in im_b['detections']:
|
|
1023
|
+
|
|
1024
|
+
category_id = det['category']
|
|
1025
|
+
|
|
1026
|
+
if category_id not in category_id_to_threshold_b:
|
|
1027
|
+
print('Warning: unexpected category {} for model B on file {}'.format(category_id,fn))
|
|
1028
|
+
invalid_category_error = True
|
|
1029
|
+
break
|
|
1030
|
+
|
|
1031
|
+
conf = det['conf']
|
|
1032
|
+
conf_thresh = category_id_to_threshold_b[category_id]
|
|
1033
|
+
if conf >= conf_thresh:
|
|
1034
|
+
categories_above_threshold_b.add(category_id)
|
|
1035
|
+
|
|
1036
|
+
if invalid_category_error:
|
|
1037
|
+
continue
|
|
1038
|
+
|
|
1039
|
+
# Should we be restricting the comparison to only certain categories?
|
|
1040
|
+
if options.category_names_to_include is not None:
|
|
1041
|
+
|
|
1042
|
+
# Restrict the categories we treat as above-threshold to the set we're supposed
|
|
1043
|
+
# to be using
|
|
1044
|
+
categories_above_threshold_a = [category_id for category_id in categories_above_threshold_a if \
|
|
1045
|
+
category_id in category_ids_to_include_a]
|
|
1046
|
+
categories_above_threshold_b = [category_id for category_id in categories_above_threshold_b if \
|
|
1047
|
+
category_id in category_ids_to_include_b]
|
|
1048
|
+
|
|
1049
|
+
detection_a = (len(categories_above_threshold_a) > 0)
|
|
1050
|
+
detection_b = (len(categories_above_threshold_b) > 0)
|
|
1051
|
+
|
|
1052
|
+
if detection_a and detection_b:
|
|
1053
|
+
if (categories_above_threshold_a == categories_above_threshold_b) or \
|
|
1054
|
+
options.class_agnostic_comparison:
|
|
1055
|
+
comparison_category = 'common_detections'
|
|
1056
|
+
else:
|
|
1057
|
+
comparison_category = 'class_transitions'
|
|
1058
|
+
elif (not detection_a) and (not detection_b):
|
|
1059
|
+
comparison_category = 'common_non_detections'
|
|
1060
|
+
elif detection_a and (not detection_b):
|
|
1061
|
+
comparison_category = 'detections_a_only'
|
|
1062
|
+
else:
|
|
1063
|
+
assert detection_b and (not detection_a)
|
|
1064
|
+
comparison_category = 'detections_b_only'
|
|
1065
|
+
|
|
1066
|
+
max_conf_a = _maxempty([det['conf'] for det in im_a['detections']])
|
|
1067
|
+
max_conf_b = _maxempty([det['conf'] for det in im_b['detections']])
|
|
1068
|
+
|
|
1069
|
+
# Only used if sort_by_confidence is True
|
|
1070
|
+
if comparison_category == 'common_detections':
|
|
1071
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
1072
|
+
elif comparison_category == 'common_non_detections':
|
|
1073
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
1074
|
+
elif comparison_category == 'detections_a_only':
|
|
1075
|
+
sort_conf = max_conf_a
|
|
1076
|
+
elif comparison_category == 'detections_b_only':
|
|
1077
|
+
sort_conf = max_conf_b
|
|
1078
|
+
elif comparison_category == 'class_transitions':
|
|
1079
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
1080
|
+
else:
|
|
1081
|
+
print('Warning: unknown comparison category {}'.format(comparison_category))
|
|
1082
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
1083
|
+
|
|
1084
|
+
elif ground_truth_type == 'bbox_gt':
|
|
1085
|
+
|
|
1086
|
+
def _boxes_match(det,gt_ann):
|
|
1087
|
+
|
|
1088
|
+
# if we're doing class-sensitive comparisons, only match same-category classes
|
|
1089
|
+
if not options.class_agnostic_comparison:
|
|
1090
|
+
detection_category_id = det['category']
|
|
1091
|
+
gt_category_id = gt_ann['category_id']
|
|
1092
|
+
if detection_category_id != \
|
|
1093
|
+
gt_category_id_to_detection_category_id[gt_category_id]:
|
|
1094
|
+
return False
|
|
1095
|
+
|
|
1096
|
+
if 'bbox' not in gt_ann:
|
|
1097
|
+
return False
|
|
1098
|
+
|
|
1099
|
+
assert 'normalized_bbox' in gt_ann
|
|
1100
|
+
iou = get_iou(det['bbox'],gt_ann['normalized_bbox'])
|
|
1101
|
+
|
|
1102
|
+
return iou >= options.gt_iou_threshold
|
|
1103
|
+
|
|
1104
|
+
# ...def _boxes_match(...)
|
|
1105
|
+
|
|
1106
|
+
# Categorize each model into TP/TN/FP/FN
|
|
1107
|
+
def _categorize_image_with_box_gt(im_detection,im_gt,annotations_gt,category_id_to_threshold):
|
|
1108
|
+
|
|
1109
|
+
annotations_gt = [ann for ann in annotations_gt if 'bbox' in ann]
|
|
1110
|
+
|
|
1111
|
+
assert im_detection['file'] == im_gt['file_name']
|
|
1112
|
+
|
|
1113
|
+
# List of result types - tn, tp, fp, fn - present in this image. tn is
|
|
1114
|
+
# mutually exclusive with the others.
|
|
1115
|
+
result_types_present = set()
|
|
1116
|
+
|
|
1117
|
+
# Find detections above threshold
|
|
1118
|
+
detections_above_threshold = []
|
|
1119
|
+
|
|
1120
|
+
# det = im_detection['detections'][0]
|
|
1121
|
+
for det in im_detection['detections']:
|
|
1122
|
+
category_id = det['category']
|
|
1123
|
+
threshold = category_id_to_threshold[category_id]
|
|
1124
|
+
if det['conf'] > threshold:
|
|
1125
|
+
detections_above_threshold.append(det)
|
|
1126
|
+
|
|
1127
|
+
if len(detections_above_threshold) == 0 and len(annotations_gt) == 0:
|
|
1128
|
+
result_types_present.add('tn')
|
|
1129
|
+
return result_types_present
|
|
1130
|
+
|
|
1131
|
+
# Look for a match for each detection
|
|
1132
|
+
#
|
|
1133
|
+
# det = detections_above_threshold[0]
|
|
1134
|
+
for det in detections_above_threshold:
|
|
1135
|
+
|
|
1136
|
+
det_matches_annotation = False
|
|
1137
|
+
|
|
1138
|
+
# gt_ann = annotations_gt[0]
|
|
1139
|
+
for gt_ann in annotations_gt:
|
|
1140
|
+
if _boxes_match(det, gt_ann):
|
|
1141
|
+
det_matches_annotation = True
|
|
1142
|
+
break
|
|
1143
|
+
|
|
1144
|
+
if det_matches_annotation:
|
|
1145
|
+
result_types_present.add('tp')
|
|
1146
|
+
else:
|
|
1147
|
+
result_types_present.add('fp')
|
|
1148
|
+
|
|
1149
|
+
# Look for a match for each GT bbox
|
|
1150
|
+
#
|
|
1151
|
+
# gt_ann = annotations_gt[0]
|
|
1152
|
+
for gt_ann in annotations_gt:
|
|
1153
|
+
|
|
1154
|
+
annotation_matches_det = False
|
|
1155
|
+
|
|
1156
|
+
for det in detections_above_threshold:
|
|
1157
|
+
|
|
1158
|
+
if _boxes_match(det, gt_ann):
|
|
1159
|
+
annotation_matches_det = True
|
|
1160
|
+
break
|
|
1161
|
+
|
|
1162
|
+
if annotation_matches_det:
|
|
1163
|
+
# We should have found this when we looped over detections
|
|
1164
|
+
assert 'tp' in result_types_present
|
|
1165
|
+
else:
|
|
1166
|
+
result_types_present.add('fn')
|
|
1167
|
+
|
|
1168
|
+
# ...for each above-threshold detection
|
|
1169
|
+
|
|
1170
|
+
return result_types_present
|
|
1171
|
+
|
|
1172
|
+
# ...def _categorize_image_with_box_gt(...)
|
|
1173
|
+
|
|
1174
|
+
# im_detection = im_a; category_id_to_threshold = category_id_to_threshold_a
|
|
1175
|
+
result_types_present_a = \
|
|
1176
|
+
_categorize_image_with_box_gt(im_a,im_gt,annotations_gt,category_id_to_threshold_a)
|
|
1177
|
+
result_types_present_b = \
|
|
1178
|
+
_categorize_image_with_box_gt(im_b,im_gt,annotations_gt,category_id_to_threshold_b)
|
|
1179
|
+
|
|
1180
|
+
|
|
1181
|
+
## Some combinations are nonsense
|
|
1182
|
+
|
|
1183
|
+
# TNs are mutually exclusive with other categories
|
|
1184
|
+
if 'tn' in result_types_present_a or 'tn' in result_types_present_b:
|
|
1185
|
+
assert len(result_types_present_a) == 1
|
|
1186
|
+
assert len(result_types_present_b) == 1
|
|
1187
|
+
|
|
1188
|
+
# If either model has a TP or FN, the other has to have a TP or FN, since
|
|
1189
|
+
# there was something in the GT
|
|
1190
|
+
if ('tp' in result_types_present_a) or ('fn' in result_types_present_a):
|
|
1191
|
+
assert 'tp' in result_types_present_b or 'fn' in result_types_present_b
|
|
1192
|
+
if ('tp' in result_types_present_b) or ('fn' in result_types_present_b):
|
|
1193
|
+
assert 'tp' in result_types_present_a or 'fn' in result_types_present_a
|
|
1194
|
+
|
|
1195
|
+
|
|
1196
|
+
## Choose a comparison category based on result types
|
|
1197
|
+
|
|
1198
|
+
comparison_category = _result_types_to_comparison_category(
|
|
1199
|
+
result_types_present_a,result_types_present_b,ground_truth_type,options)
|
|
1200
|
+
|
|
1201
|
+
# TODO: this may or may not be the right way to interpret sorting
|
|
1202
|
+
# by confidence in this case, e.g., we may want to sort by confidence
|
|
1203
|
+
# of correct or incorrect matches. But this isn't *wrong*.
|
|
1204
|
+
max_conf_a = _maxempty([det['conf'] for det in im_a['detections']])
|
|
1205
|
+
max_conf_b = _maxempty([det['conf'] for det in im_b['detections']])
|
|
1206
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
1207
|
+
|
|
1208
|
+
else:
|
|
1209
|
+
|
|
1210
|
+
# Categorize each model into TP/TN/FP/FN
|
|
1211
|
+
def _categorize_image_with_image_level_gt(im_detection,im_gt,annotations_gt,
|
|
1212
|
+
category_id_to_threshold):
|
|
1213
|
+
|
|
1214
|
+
assert im_detection['file'] == im_gt['file_name']
|
|
1215
|
+
|
|
1216
|
+
# List of result types - tn, tp, fp, fn - present in this image.
|
|
1217
|
+
result_types_present = set()
|
|
1218
|
+
|
|
1219
|
+
# Find detections above threshold
|
|
1220
|
+
category_names_detected = set()
|
|
1221
|
+
|
|
1222
|
+
# det = im_detection['detections'][0]
|
|
1223
|
+
for det in im_detection['detections']:
|
|
1224
|
+
category_id = det['category']
|
|
1225
|
+
threshold = category_id_to_threshold[category_id]
|
|
1226
|
+
if det['conf'] > threshold:
|
|
1227
|
+
category_name = detection_category_id_to_name[det['category']]
|
|
1228
|
+
category_names_detected.add(category_name)
|
|
1229
|
+
|
|
1230
|
+
category_names_in_gt = set()
|
|
1231
|
+
|
|
1232
|
+
# ann = annotations_gt[0]
|
|
1233
|
+
for ann in annotations_gt:
|
|
1234
|
+
category_name = gt_category_id_to_name[ann['category_id']]
|
|
1235
|
+
category_names_in_gt.add(category_name)
|
|
1236
|
+
|
|
1237
|
+
for category_name in category_names_detected:
|
|
1238
|
+
|
|
1239
|
+
if category_name in category_names_in_gt:
|
|
1240
|
+
result_types_present.add('tp')
|
|
1241
|
+
else:
|
|
1242
|
+
result_types_present.add('fp')
|
|
1243
|
+
|
|
1244
|
+
for category_name in category_names_in_gt:
|
|
1245
|
+
|
|
1246
|
+
# Is this an empty image?
|
|
1247
|
+
if category_name in options.gt_empty_categories:
|
|
1248
|
+
|
|
1249
|
+
assert all([cn in options.gt_empty_categories for cn in category_names_in_gt]), \
|
|
1250
|
+
'Image {} has both empty and non-empty ground truth labels'.format(
|
|
1251
|
+
im_detection['file'])
|
|
1252
|
+
if len(category_names_detected) > 0:
|
|
1253
|
+
result_types_present.add('fp')
|
|
1254
|
+
# If there is a false positive present in an empty image, there can't
|
|
1255
|
+
# be any other result types present
|
|
1256
|
+
assert len(result_types_present) == 1
|
|
1257
|
+
else:
|
|
1258
|
+
result_types_present.add('tn')
|
|
1259
|
+
|
|
1260
|
+
elif category_name in category_names_detected:
|
|
1261
|
+
|
|
1262
|
+
assert 'tp' in result_types_present
|
|
1263
|
+
|
|
1264
|
+
else:
|
|
1265
|
+
|
|
1266
|
+
result_types_present.add('fn')
|
|
1267
|
+
|
|
1268
|
+
return result_types_present
|
|
1269
|
+
|
|
1270
|
+
# ...def _categorize_image_with_image_level_gt(...)
|
|
1271
|
+
|
|
1272
|
+
# im_detection = im_a; category_id_to_threshold = category_id_to_threshold_a
|
|
1273
|
+
result_types_present_a = \
|
|
1274
|
+
_categorize_image_with_image_level_gt(im_a,im_gt,annotations_gt,category_id_to_threshold_a)
|
|
1275
|
+
result_types_present_b = \
|
|
1276
|
+
_categorize_image_with_image_level_gt(im_b,im_gt,annotations_gt,category_id_to_threshold_b)
|
|
1277
|
+
|
|
1278
|
+
|
|
1279
|
+
## Some combinations are nonsense
|
|
1280
|
+
|
|
1281
|
+
# If either model has a TP or FN, the other has to have a TP or FN, since
|
|
1282
|
+
# there was something in the GT
|
|
1283
|
+
if ('tp' in result_types_present_a) or ('fn' in result_types_present_a):
|
|
1284
|
+
assert 'tp' in result_types_present_b or 'fn' in result_types_present_b
|
|
1285
|
+
if ('tp' in result_types_present_b) or ('fn' in result_types_present_b):
|
|
1286
|
+
assert 'tp' in result_types_present_a or 'fn' in result_types_present_a
|
|
1287
|
+
|
|
1288
|
+
|
|
1289
|
+
## Choose a comparison category based on result types
|
|
1290
|
+
|
|
1291
|
+
comparison_category = _result_types_to_comparison_category(
|
|
1292
|
+
result_types_present_a,result_types_present_b,ground_truth_type,options)
|
|
1293
|
+
|
|
1294
|
+
# TODO: this may or may not be the right way to interpret sorting
|
|
1295
|
+
# by confidence in this case, e.g., we may want to sort by confidence
|
|
1296
|
+
# of correct or incorrect matches. But this isn't *wrong*.
|
|
1297
|
+
max_conf_a = _maxempty([det['conf'] for det in im_a['detections']])
|
|
1298
|
+
max_conf_b = _maxempty([det['conf'] for det in im_b['detections']])
|
|
1299
|
+
sort_conf = max(max_conf_a,max_conf_b)
|
|
1300
|
+
|
|
1301
|
+
# ...what kind of ground truth (if any) do we have?
|
|
1302
|
+
|
|
1303
|
+
assert comparison_category is not None
|
|
1304
|
+
categories_to_image_pairs[comparison_category][fn] = im_pair
|
|
1305
|
+
im_pair['sort_conf'] = sort_conf
|
|
1306
|
+
|
|
1307
|
+
# ...for each filename
|
|
1308
|
+
|
|
1309
|
+
|
|
1310
|
+
##%% Sample and plot differences
|
|
1311
|
+
|
|
1312
|
+
pool = None
|
|
1313
|
+
|
|
1314
|
+
if options.n_rendering_workers > 1:
|
|
1315
|
+
worker_type = 'processes'
|
|
1316
|
+
if options.parallelize_rendering_with_threads:
|
|
1317
|
+
worker_type = 'threads'
|
|
1318
|
+
print('Rendering images with {} {}'.format(options.n_rendering_workers,worker_type))
|
|
1319
|
+
if options.parallelize_rendering_with_threads:
|
|
1320
|
+
pool = ThreadPool(options.n_rendering_workers)
|
|
1321
|
+
else:
|
|
1322
|
+
pool = Pool(options.n_rendering_workers)
|
|
1323
|
+
|
|
1324
|
+
local_output_folder = os.path.join(options.output_folder,'cmp_' + \
|
|
1325
|
+
str(output_index).zfill(3))
|
|
1326
|
+
|
|
1327
|
+
def _render_detection_comparisons(category,image_pairs,image_filenames):
|
|
1328
|
+
"""
|
|
1329
|
+
Render all the detection results pairs for the sampled images in a
|
|
1330
|
+
particular category (e.g. all the "common detections").
|
|
1331
|
+
"""
|
|
1332
|
+
|
|
1333
|
+
print('Rendering detections for category {}'.format(category))
|
|
1334
|
+
|
|
1335
|
+
category_folder = os.path.join(local_output_folder,category)
|
|
1336
|
+
os.makedirs(category_folder,exist_ok=True)
|
|
1337
|
+
|
|
1338
|
+
# fn = image_filenames[0]
|
|
1339
|
+
if options.n_rendering_workers <= 1:
|
|
1340
|
+
output_image_paths = []
|
|
1341
|
+
for fn in tqdm(image_filenames):
|
|
1342
|
+
output_image_paths.append(_render_image_pair(fn,image_pairs,category_folder,
|
|
1343
|
+
options,pairwise_options))
|
|
1344
|
+
else:
|
|
1345
|
+
output_image_paths = list(tqdm(pool.imap(
|
|
1346
|
+
partial(_render_image_pair, image_pairs=image_pairs,
|
|
1347
|
+
category_folder=category_folder,options=options,
|
|
1348
|
+
pairwise_options=pairwise_options),
|
|
1349
|
+
image_filenames),
|
|
1350
|
+
total=len(image_filenames)))
|
|
1351
|
+
|
|
1352
|
+
return output_image_paths
|
|
1353
|
+
|
|
1354
|
+
# ...def _render_detection_comparisons()
|
|
1355
|
+
|
|
1356
|
+
if len(options.colormap_a) > 1:
|
|
1357
|
+
color_string_a = str(options.colormap_a)
|
|
1358
|
+
else:
|
|
1359
|
+
color_string_a = options.colormap_a[0]
|
|
1360
|
+
|
|
1361
|
+
if len(options.colormap_b) > 1:
|
|
1362
|
+
color_string_b = str(options.colormap_b)
|
|
1363
|
+
else:
|
|
1364
|
+
color_string_b = options.colormap_b[0]
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
# For each category, generate comparison images and the
|
|
1368
|
+
# comparison HTML page.
|
|
1369
|
+
#
|
|
1370
|
+
# category = 'common_detections'
|
|
1371
|
+
for category in categories_to_image_pairs.keys():
|
|
1372
|
+
|
|
1373
|
+
# Choose detection pairs we're going to render for this category
|
|
1374
|
+
image_pairs = categories_to_image_pairs[category]
|
|
1375
|
+
image_filenames = list(image_pairs.keys())
|
|
1376
|
+
|
|
1377
|
+
if options.max_images_per_category is not None and options.max_images_per_category > 0:
|
|
1378
|
+
if len(image_filenames) > options.max_images_per_category:
|
|
1379
|
+
print('Sampling {} of {} image pairs for category {}'.format(
|
|
1380
|
+
options.max_images_per_category,
|
|
1381
|
+
len(image_filenames),
|
|
1382
|
+
category))
|
|
1383
|
+
image_filenames = random.sample(image_filenames,
|
|
1384
|
+
options.max_images_per_category)
|
|
1385
|
+
assert len(image_filenames) <= options.max_images_per_category
|
|
1386
|
+
|
|
1387
|
+
input_image_absolute_paths = [os.path.join(options.image_folder,fn) for fn in image_filenames]
|
|
1388
|
+
|
|
1389
|
+
category_image_output_paths = _render_detection_comparisons(category,
|
|
1390
|
+
image_pairs,image_filenames)
|
|
1391
|
+
|
|
1392
|
+
category_html_filename = os.path.join(local_output_folder,
|
|
1393
|
+
category + '.html')
|
|
1394
|
+
category_image_output_paths_relative = [os.path.relpath(s,local_output_folder) \
|
|
1395
|
+
for s in category_image_output_paths]
|
|
1396
|
+
|
|
1397
|
+
image_info = []
|
|
1398
|
+
|
|
1399
|
+
assert len(category_image_output_paths_relative) == len(input_image_absolute_paths)
|
|
1400
|
+
|
|
1401
|
+
for i_fn,fn in enumerate(category_image_output_paths_relative):
|
|
1402
|
+
|
|
1403
|
+
input_path_relative = image_filenames[i_fn]
|
|
1404
|
+
image_pair = image_pairs[input_path_relative]
|
|
1405
|
+
image_a = image_pair['im_a']
|
|
1406
|
+
image_b = image_pair['im_b']
|
|
1407
|
+
|
|
1408
|
+
if options.fn_to_display_fn is not None:
|
|
1409
|
+
assert input_path_relative in options.fn_to_display_fn, \
|
|
1410
|
+
'fn_to_display_fn provided, but {} is not mapped'.format(input_path_relative)
|
|
1411
|
+
display_path = options.fn_to_display_fn[input_path_relative]
|
|
1412
|
+
else:
|
|
1413
|
+
display_path = input_path_relative
|
|
1414
|
+
|
|
1415
|
+
sort_conf = image_pair['sort_conf']
|
|
1416
|
+
|
|
1417
|
+
max_conf_a = _maxempty([det['conf'] for det in image_a['detections']])
|
|
1418
|
+
max_conf_b = _maxempty([det['conf'] for det in image_b['detections']])
|
|
1419
|
+
|
|
1420
|
+
title = display_path + ' (max conf {:.2f},{:.2f})'.format(max_conf_a,max_conf_b)
|
|
1421
|
+
|
|
1422
|
+
if options.parse_link_paths:
|
|
1423
|
+
link_target_string = urllib.parse.quote(input_image_absolute_paths[i_fn])
|
|
1424
|
+
else:
|
|
1425
|
+
link_target_string = input_image_absolute_paths[i_fn]
|
|
1426
|
+
|
|
1427
|
+
info = {
|
|
1428
|
+
'filename': fn,
|
|
1429
|
+
'title': title,
|
|
1430
|
+
'textStyle': 'font-family:verdana,arial,calibri;font-size:' + \
|
|
1431
|
+
'80%;text-align:left;margin-top:20;margin-bottom:5',
|
|
1432
|
+
'linkTarget': link_target_string,
|
|
1433
|
+
'sort_conf':sort_conf
|
|
1434
|
+
}
|
|
1435
|
+
|
|
1436
|
+
image_info.append(info)
|
|
1437
|
+
|
|
1438
|
+
# ...for each image
|
|
1439
|
+
|
|
1440
|
+
category_page_header_string = '<h1>{}</h1>\n'.format(categories_to_page_titles[category])
|
|
1441
|
+
category_page_header_string += '<p style="font-weight:bold;">\n'
|
|
1442
|
+
category_page_header_string += 'Model A: {} ({})<br/>\n'.format(
|
|
1443
|
+
pairwise_options.results_description_a,color_string_a)
|
|
1444
|
+
category_page_header_string += 'Model B: {} ({})'.format(
|
|
1445
|
+
pairwise_options.results_description_b,color_string_b)
|
|
1446
|
+
category_page_header_string += '</p>\n'
|
|
1447
|
+
|
|
1448
|
+
category_page_header_string += '<p>\n'
|
|
1449
|
+
category_page_header_string += 'Detection thresholds for A ({}):\n{}<br/>'.format(
|
|
1450
|
+
pairwise_options.results_description_a,str(pairwise_options.detection_thresholds_a))
|
|
1451
|
+
category_page_header_string += 'Detection thresholds for B ({}):\n{}<br/>'.format(
|
|
1452
|
+
pairwise_options.results_description_b,str(pairwise_options.detection_thresholds_b))
|
|
1453
|
+
category_page_header_string += 'Rendering threshold for A ({}):\n{}<br/>'.format(
|
|
1454
|
+
pairwise_options.results_description_a,
|
|
1455
|
+
str(pairwise_options.rendering_confidence_threshold_a))
|
|
1456
|
+
category_page_header_string += 'Rendering threshold for B ({}):\n{}<br/>'.format(
|
|
1457
|
+
pairwise_options.results_description_b,
|
|
1458
|
+
str(pairwise_options.rendering_confidence_threshold_b))
|
|
1459
|
+
category_page_header_string += '</p>\n'
|
|
1460
|
+
|
|
1461
|
+
subpage_header_string = '\n'.join(category_page_header_string.split('\n')[1:])
|
|
1462
|
+
|
|
1463
|
+
# Default to sorting by filename
|
|
1464
|
+
if options.sort_by_confidence:
|
|
1465
|
+
image_info = sorted(image_info, key=lambda d: d['sort_conf'], reverse=True)
|
|
1466
|
+
else:
|
|
1467
|
+
image_info = sorted(image_info, key=lambda d: d['filename'])
|
|
1468
|
+
|
|
1469
|
+
write_html_image_list(
|
|
1470
|
+
category_html_filename,
|
|
1471
|
+
images=image_info,
|
|
1472
|
+
options={
|
|
1473
|
+
'headerHtml': category_page_header_string,
|
|
1474
|
+
'subPageHeaderHtml': subpage_header_string,
|
|
1475
|
+
'maxFiguresPerHtmlFile': options.max_images_per_page
|
|
1476
|
+
})
|
|
1477
|
+
|
|
1478
|
+
# ...for each category
|
|
1479
|
+
|
|
1480
|
+
if pool is not None:
|
|
1481
|
+
try:
|
|
1482
|
+
pool.close()
|
|
1483
|
+
pool.join()
|
|
1484
|
+
print('Pool closed and joined for comparison rendering')
|
|
1485
|
+
except Exception:
|
|
1486
|
+
pass
|
|
1487
|
+
|
|
1488
|
+
|
|
1489
|
+
##%% Write the top-level HTML file content
|
|
1490
|
+
|
|
1491
|
+
html_output_string = ''
|
|
1492
|
+
|
|
1493
|
+
def _sanitize_id_name(s, lower=True):
|
|
1494
|
+
"""
|
|
1495
|
+
Remove characters in [s] that are not allowed in HTML id attributes
|
|
1496
|
+
"""
|
|
1497
|
+
|
|
1498
|
+
s = re.sub(r'[^a-zA-Z0-9_-]', '', s)
|
|
1499
|
+
s = re.sub(r'^[^a-zA-Z]*', '', s)
|
|
1500
|
+
if lower:
|
|
1501
|
+
s = s.lower()
|
|
1502
|
+
return s
|
|
1503
|
+
|
|
1504
|
+
comparison_short_name = '{}_vs_{}'.format(
|
|
1505
|
+
_sanitize_id_name(pairwise_options.results_description_a),
|
|
1506
|
+
_sanitize_id_name(pairwise_options.results_description_b))
|
|
1507
|
+
|
|
1508
|
+
comparison_friendly_name = '{} vs {}'.format(
|
|
1509
|
+
pairwise_options.results_description_a,
|
|
1510
|
+
pairwise_options.results_description_b
|
|
1511
|
+
)
|
|
1512
|
+
|
|
1513
|
+
html_output_string += '<p id="{}">Comparing <b>{}</b> (A, {}) to <b>{}</b> (B, {})</p>'.format(
|
|
1514
|
+
comparison_short_name,
|
|
1515
|
+
pairwise_options.results_description_a,color_string_a.lower(),
|
|
1516
|
+
pairwise_options.results_description_b,color_string_b.lower())
|
|
1517
|
+
html_output_string += '<div class="contentdiv">\n'
|
|
1518
|
+
html_output_string += 'Detection thresholds for {}:\n{}<br/>'.format(
|
|
1519
|
+
pairwise_options.results_description_a,
|
|
1520
|
+
str(pairwise_options.detection_thresholds_a))
|
|
1521
|
+
html_output_string += 'Detection thresholds for {}:\n{}<br/>'.format(
|
|
1522
|
+
pairwise_options.results_description_b,
|
|
1523
|
+
str(pairwise_options.detection_thresholds_b))
|
|
1524
|
+
html_output_string += 'Rendering threshold for {}:\n{}<br/>'.format(
|
|
1525
|
+
pairwise_options.results_description_a,
|
|
1526
|
+
str(pairwise_options.rendering_confidence_threshold_a))
|
|
1527
|
+
html_output_string += 'Rendering threshold for {}:\n{}<br/>'.format(
|
|
1528
|
+
pairwise_options.results_description_b,
|
|
1529
|
+
str(pairwise_options.rendering_confidence_threshold_b))
|
|
1530
|
+
|
|
1531
|
+
html_output_string += '<br/>'
|
|
1532
|
+
|
|
1533
|
+
html_output_string += 'Rendering a maximum of {} images per category<br/>'.format(
|
|
1534
|
+
options.max_images_per_category)
|
|
1535
|
+
|
|
1536
|
+
html_output_string += '<br/>'
|
|
1537
|
+
|
|
1538
|
+
category_summary = ''
|
|
1539
|
+
for i_category,category_name in enumerate(categories_to_image_pairs):
|
|
1540
|
+
if i_category > 0:
|
|
1541
|
+
category_summary += '<br/>'
|
|
1542
|
+
category_summary += '{} {}'.format(
|
|
1543
|
+
len(categories_to_image_pairs[category_name]),
|
|
1544
|
+
category_name.replace('_',' '))
|
|
1545
|
+
|
|
1546
|
+
category_summary = \
|
|
1547
|
+
'Of {} total files:<br/><br/><div style="margin-left:15px;">{}</div><br/>'.format(
|
|
1548
|
+
len(filenames_to_compare),category_summary)
|
|
1549
|
+
|
|
1550
|
+
html_output_string += category_summary
|
|
1551
|
+
|
|
1552
|
+
html_output_string += 'Comparison pages:<br/><br/>\n'
|
|
1553
|
+
html_output_string += '<div style="margin-left:15px;">\n'
|
|
1554
|
+
|
|
1555
|
+
comparison_path_relative = os.path.relpath(local_output_folder,options.output_folder)
|
|
1556
|
+
for category in categories_to_image_pairs.keys():
|
|
1557
|
+
category_html_filename = os.path.join(comparison_path_relative,category + '.html')
|
|
1558
|
+
html_output_string += '<a href="{}">{}</a><br/>\n'.format(
|
|
1559
|
+
category_html_filename,category)
|
|
1560
|
+
|
|
1561
|
+
html_output_string += '</div>\n'
|
|
1562
|
+
html_output_string += '</div>\n'
|
|
1563
|
+
|
|
1564
|
+
pairwise_results = PairwiseBatchComparisonResults()
|
|
1565
|
+
|
|
1566
|
+
pairwise_results.comparison_short_name = comparison_short_name
|
|
1567
|
+
pairwise_results.comparison_friendly_name = comparison_friendly_name
|
|
1568
|
+
pairwise_results.html_content = html_output_string
|
|
1569
|
+
pairwise_results.pairwise_options = pairwise_options
|
|
1570
|
+
pairwise_results.categories_to_image_pairs = categories_to_image_pairs
|
|
1571
|
+
|
|
1572
|
+
return pairwise_results
|
|
1573
|
+
|
|
1574
|
+
# ...def _pairwise_compare_batch_results()
|
|
1575
|
+
|
|
1576
|
+
|
|
1577
|
+
def compare_batch_results(options):
|
|
1578
|
+
"""
|
|
1579
|
+
The main entry point for this module. Runs one or more batch results comparisons,
|
|
1580
|
+
writing results to an html page. Most of the work is deferred to _pairwise_compare_batch_results().
|
|
1581
|
+
|
|
1582
|
+
Args:
|
|
1583
|
+
options (BatchComparisonOptions): job options to use for this comparison task, including the
|
|
1584
|
+
list of specific pairswise comparisons to make (in the pairwise_options field)
|
|
1585
|
+
|
|
1586
|
+
Returns:
|
|
1587
|
+
BatchComparisonResults: the results of this comparison task
|
|
1588
|
+
"""
|
|
1589
|
+
|
|
1590
|
+
assert options.output_folder is not None
|
|
1591
|
+
assert options.image_folder is not None
|
|
1592
|
+
assert options.pairwise_options is not None
|
|
1593
|
+
|
|
1594
|
+
options = copy.deepcopy(options)
|
|
1595
|
+
|
|
1596
|
+
if not isinstance(options.pairwise_options,list):
|
|
1597
|
+
options.pairwise_options = [options.pairwise_options]
|
|
1598
|
+
|
|
1599
|
+
pairwise_options_list = options.pairwise_options
|
|
1600
|
+
n_comparisons = len(pairwise_options_list)
|
|
1601
|
+
|
|
1602
|
+
options.pairwise_options = None
|
|
1603
|
+
|
|
1604
|
+
html_content = ''
|
|
1605
|
+
all_pairwise_results = []
|
|
1606
|
+
|
|
1607
|
+
# i_comparison = 0; pairwise_options = pairwise_options_list[i_comparison]
|
|
1608
|
+
for i_comparison,pairwise_options in enumerate(pairwise_options_list):
|
|
1609
|
+
|
|
1610
|
+
print('Running comparison {} of {}'.format(i_comparison,n_comparisons))
|
|
1611
|
+
pairwise_options.verbose = options.verbose
|
|
1612
|
+
pairwise_results = \
|
|
1613
|
+
_pairwise_compare_batch_results(options,i_comparison,pairwise_options)
|
|
1614
|
+
if not options.return_images_by_category:
|
|
1615
|
+
pairwise_results.categories_to_image_pairs = None
|
|
1616
|
+
html_content += pairwise_results.html_content
|
|
1617
|
+
all_pairwise_results.append(pairwise_results)
|
|
1618
|
+
|
|
1619
|
+
# ...for each pairwise comparison
|
|
1620
|
+
|
|
1621
|
+
html_output_string = main_page_header
|
|
1622
|
+
job_name_string = ''
|
|
1623
|
+
if len(options.job_name) > 0:
|
|
1624
|
+
job_name_string = ' for {}'.format(options.job_name)
|
|
1625
|
+
html_output_string += '<h2>Comparison of results{}</h2>\n'.format(
|
|
1626
|
+
job_name_string)
|
|
1627
|
+
|
|
1628
|
+
if options.include_toc and (len(pairwise_options_list) > 2):
|
|
1629
|
+
toc_string = '<p><b>Contents</b></p>\n'
|
|
1630
|
+
toc_string += '<div class="contentdiv">\n'
|
|
1631
|
+
for r in all_pairwise_results:
|
|
1632
|
+
toc_string += '<a href="#{}">{}</a><br/>'.format(r.comparison_short_name,
|
|
1633
|
+
r.comparison_friendly_name)
|
|
1634
|
+
toc_string += '</div>\n'
|
|
1635
|
+
html_output_string += toc_string
|
|
1636
|
+
|
|
1637
|
+
html_output_string += html_content
|
|
1638
|
+
html_output_string += main_page_footer
|
|
1639
|
+
|
|
1640
|
+
html_output_file = os.path.join(options.output_folder,'index.html')
|
|
1641
|
+
with open(html_output_file,'w') as f:
|
|
1642
|
+
f.write(html_output_string)
|
|
1643
|
+
|
|
1644
|
+
results = BatchComparisonResults()
|
|
1645
|
+
results.html_output_file = html_output_file
|
|
1646
|
+
results.pairwise_results = all_pairwise_results
|
|
1647
|
+
return results
|
|
1648
|
+
|
|
1649
|
+
|
|
1650
|
+
def n_way_comparison(filenames,
|
|
1651
|
+
options,
|
|
1652
|
+
detection_thresholds=None,
|
|
1653
|
+
rendering_thresholds=None,
|
|
1654
|
+
model_names=None):
|
|
1655
|
+
"""
|
|
1656
|
+
Performs N pairwise comparisons for the list of results files in [filenames], by generating
|
|
1657
|
+
sets of pairwise options and calling compare_batch_results.
|
|
1658
|
+
|
|
1659
|
+
Args:
|
|
1660
|
+
filenames (list): list of MD results filenames to compare
|
|
1661
|
+
options (BatchComparisonOptions): task options set in which pairwise_options is still
|
|
1662
|
+
empty; that will get populated from [filenames]
|
|
1663
|
+
detection_thresholds (list, optional): list of detection thresholds with the same length
|
|
1664
|
+
as [filenames], or None to use sensible defaults
|
|
1665
|
+
rendering_thresholds (list, optional): list of rendering thresholds with the same length
|
|
1666
|
+
as [filenames], or None to use sensible defaults
|
|
1667
|
+
model_names (list, optional): list of model names to use the output HTML file, with
|
|
1668
|
+
the same length as [filenames], or None to use sensible defaults
|
|
1669
|
+
|
|
1670
|
+
Returns:
|
|
1671
|
+
BatchComparisonResults: the results of this comparison task
|
|
1672
|
+
"""
|
|
1673
|
+
|
|
1674
|
+
if detection_thresholds is None:
|
|
1675
|
+
detection_thresholds = [0.15] * len(filenames)
|
|
1676
|
+
assert len(detection_thresholds) == len(filenames), \
|
|
1677
|
+
'[detection_thresholds] should be the same length as [filenames]'
|
|
1678
|
+
|
|
1679
|
+
if rendering_thresholds is not None:
|
|
1680
|
+
assert len(rendering_thresholds) == len(filenames), \
|
|
1681
|
+
'[rendering_thresholds] should be the same length as [filenames]'
|
|
1682
|
+
else:
|
|
1683
|
+
rendering_thresholds = [(x*0.6666) for x in detection_thresholds]
|
|
1684
|
+
|
|
1685
|
+
if model_names is not None:
|
|
1686
|
+
assert len(model_names) == len(filenames), \
|
|
1687
|
+
'[model_names] should be the same length as [filenames]'
|
|
1688
|
+
|
|
1689
|
+
options.pairwise_options = []
|
|
1690
|
+
|
|
1691
|
+
# Choose all pairwise combinations of the files in [filenames]
|
|
1692
|
+
for i, j in itertools.combinations(list(range(0,len(filenames))),2):
|
|
1693
|
+
|
|
1694
|
+
pairwise_options = PairwiseBatchComparisonOptions()
|
|
1695
|
+
|
|
1696
|
+
pairwise_options.results_filename_a = filenames[i]
|
|
1697
|
+
pairwise_options.results_filename_b = filenames[j]
|
|
1698
|
+
|
|
1699
|
+
pairwise_options.rendering_confidence_threshold_a = rendering_thresholds[i]
|
|
1700
|
+
pairwise_options.rendering_confidence_threshold_b = rendering_thresholds[j]
|
|
1701
|
+
|
|
1702
|
+
pairwise_options.detection_thresholds_a = {'default':detection_thresholds[i]}
|
|
1703
|
+
pairwise_options.detection_thresholds_b = {'default':detection_thresholds[j]}
|
|
1704
|
+
|
|
1705
|
+
if model_names is not None:
|
|
1706
|
+
pairwise_options.results_description_a = model_names[i]
|
|
1707
|
+
pairwise_options.results_description_b = model_names[j]
|
|
1708
|
+
|
|
1709
|
+
options.pairwise_options.append(pairwise_options)
|
|
1710
|
+
|
|
1711
|
+
return compare_batch_results(options)
|
|
1712
|
+
|
|
1713
|
+
# ...def n_way_comparison(...)
|
|
1714
|
+
|
|
1715
|
+
|
|
1716
|
+
def find_image_level_detections_above_threshold(results,threshold=0.2,category_names=None):
|
|
1717
|
+
"""
|
|
1718
|
+
Returns images in the set of MD results [results] with detections above
|
|
1719
|
+
a threshold confidence level, optionally only counting certain categories.
|
|
1720
|
+
|
|
1721
|
+
Args:
|
|
1722
|
+
results (str or dict): the set of results, either a .json filename or a results
|
|
1723
|
+
dict
|
|
1724
|
+
threshold (float, optional): the threshold used to determine the target number of
|
|
1725
|
+
detections in [results]
|
|
1726
|
+
category_names (list or str, optional): the list of category names to consider (defaults
|
|
1727
|
+
to using all categories), or the name of a single category.
|
|
1728
|
+
|
|
1729
|
+
Returns:
|
|
1730
|
+
list: the images with above-threshold detections
|
|
1731
|
+
"""
|
|
1732
|
+
if isinstance(results,str):
|
|
1733
|
+
with open(results,'r') as f:
|
|
1734
|
+
results = json.load(f)
|
|
1735
|
+
|
|
1736
|
+
category_ids_to_consider = None
|
|
1737
|
+
|
|
1738
|
+
if category_names is not None:
|
|
1739
|
+
|
|
1740
|
+
if isinstance(category_names,str):
|
|
1741
|
+
category_names = [category_names]
|
|
1742
|
+
|
|
1743
|
+
category_id_to_name = results['detection_categories']
|
|
1744
|
+
category_name_to_id = invert_dictionary(category_id_to_name)
|
|
1745
|
+
|
|
1746
|
+
category_ids_to_consider = []
|
|
1747
|
+
|
|
1748
|
+
# category_name = category_names[0]
|
|
1749
|
+
for category_name in category_names:
|
|
1750
|
+
category_id = category_name_to_id[category_name]
|
|
1751
|
+
category_ids_to_consider.append(category_id)
|
|
1752
|
+
|
|
1753
|
+
assert len(category_ids_to_consider) > 0, \
|
|
1754
|
+
'Category name list did not map to any category IDs'
|
|
1755
|
+
|
|
1756
|
+
images_above_threshold = []
|
|
1757
|
+
|
|
1758
|
+
for im in results['images']:
|
|
1759
|
+
|
|
1760
|
+
if ('detections' in im) and (im['detections'] is not None) and (len(im['detections']) > 0):
|
|
1761
|
+
confidence_values_this_image = [0]
|
|
1762
|
+
for det in im['detections']:
|
|
1763
|
+
if category_ids_to_consider is not None:
|
|
1764
|
+
if det['category'] not in category_ids_to_consider:
|
|
1765
|
+
continue
|
|
1766
|
+
confidence_values_this_image.append(det['conf'])
|
|
1767
|
+
if max(confidence_values_this_image) >= threshold:
|
|
1768
|
+
images_above_threshold.append(im)
|
|
1769
|
+
|
|
1770
|
+
# ...for each image
|
|
1771
|
+
|
|
1772
|
+
return images_above_threshold
|
|
1773
|
+
|
|
1774
|
+
# ...def find_image_level_detections_above_threshold(...)
|
|
1775
|
+
|
|
1776
|
+
|
|
1777
|
+
def find_equivalent_threshold(results_a,
|
|
1778
|
+
results_b,
|
|
1779
|
+
threshold_a=0.2,
|
|
1780
|
+
category_names=None,
|
|
1781
|
+
verbose=False):
|
|
1782
|
+
"""
|
|
1783
|
+
Given two sets of detector results, finds the confidence threshold for results_b
|
|
1784
|
+
that produces the same fraction of *images* with detections as threshold_a does for
|
|
1785
|
+
results_a. Uses all categories.
|
|
1786
|
+
|
|
1787
|
+
Args:
|
|
1788
|
+
results_a (str or dict): the first set of results, either a .json filename or a results
|
|
1789
|
+
dict
|
|
1790
|
+
results_b (str or dict): the second set of results, either a .json filename or a results
|
|
1791
|
+
dict
|
|
1792
|
+
threshold_a (float, optional): the threshold used to determine the target number of
|
|
1793
|
+
detections in results_a
|
|
1794
|
+
category_names (list or str, optional): the list of category names to consider (defaults
|
|
1795
|
+
to using all categories), or the name of a single category.
|
|
1796
|
+
verbose (bool, optional): enable additional debug output
|
|
1797
|
+
|
|
1798
|
+
Returns:
|
|
1799
|
+
float: the threshold that - when applied to results_b - produces the same number
|
|
1800
|
+
of image-level detections that results from applying threshold_a to results_a
|
|
1801
|
+
"""
|
|
1802
|
+
|
|
1803
|
+
if isinstance(results_a,str):
|
|
1804
|
+
if verbose:
|
|
1805
|
+
print('Loading results from {}'.format(results_a))
|
|
1806
|
+
with open(results_a,'r') as f:
|
|
1807
|
+
results_a = json.load(f)
|
|
1808
|
+
|
|
1809
|
+
if isinstance(results_b,str):
|
|
1810
|
+
if verbose:
|
|
1811
|
+
print('Loading results from {}'.format(results_b))
|
|
1812
|
+
with open(results_b,'r') as f:
|
|
1813
|
+
results_b = json.load(f)
|
|
1814
|
+
|
|
1815
|
+
category_ids_to_consider_a = None
|
|
1816
|
+
category_ids_to_consider_b = None
|
|
1817
|
+
|
|
1818
|
+
if category_names is not None:
|
|
1819
|
+
|
|
1820
|
+
if isinstance(category_names,str):
|
|
1821
|
+
category_names = [category_names]
|
|
1822
|
+
|
|
1823
|
+
categories_a = results_a['detection_categories']
|
|
1824
|
+
categories_b = results_b['detection_categories']
|
|
1825
|
+
category_name_to_id_a = invert_dictionary(categories_a)
|
|
1826
|
+
category_name_to_id_b = invert_dictionary(categories_b)
|
|
1827
|
+
|
|
1828
|
+
category_ids_to_consider_a = []
|
|
1829
|
+
category_ids_to_consider_b = []
|
|
1830
|
+
|
|
1831
|
+
# category_name = category_names[0]
|
|
1832
|
+
for category_name in category_names:
|
|
1833
|
+
category_id_a = category_name_to_id_a[category_name]
|
|
1834
|
+
category_id_b = category_name_to_id_b[category_name]
|
|
1835
|
+
category_ids_to_consider_a.append(category_id_a)
|
|
1836
|
+
category_ids_to_consider_b.append(category_id_b)
|
|
1837
|
+
|
|
1838
|
+
assert len(category_ids_to_consider_a) > 0 and len(category_ids_to_consider_b) > 0, \
|
|
1839
|
+
'Category name list did not map to any category IDs in one or both detection sets'
|
|
1840
|
+
|
|
1841
|
+
def _get_confidence_values_for_results(images,category_ids_to_consider,threshold):
|
|
1842
|
+
"""
|
|
1843
|
+
Return a list of the maximum confidence value for each image in [images].
|
|
1844
|
+
Returns zero confidence for images with no detections (or no detections
|
|
1845
|
+
in the specified categories). Does not return anything for invalid images.
|
|
1846
|
+
"""
|
|
1847
|
+
|
|
1848
|
+
confidence_values = []
|
|
1849
|
+
images_above_threshold = []
|
|
1850
|
+
|
|
1851
|
+
for im in images:
|
|
1852
|
+
if 'detections' in im and im['detections'] is not None:
|
|
1853
|
+
if len(im['detections']) == 0:
|
|
1854
|
+
confidence_values.append(0)
|
|
1855
|
+
else:
|
|
1856
|
+
confidence_values_this_image = []
|
|
1857
|
+
for det in im['detections']:
|
|
1858
|
+
if category_ids_to_consider is not None:
|
|
1859
|
+
if det['category'] not in category_ids_to_consider:
|
|
1860
|
+
continue
|
|
1861
|
+
confidence_values_this_image.append(det['conf'])
|
|
1862
|
+
if len(confidence_values_this_image) == 0:
|
|
1863
|
+
confidence_values.append(0)
|
|
1864
|
+
else:
|
|
1865
|
+
max_conf_value = max(confidence_values_this_image)
|
|
1866
|
+
|
|
1867
|
+
if threshold is not None and max_conf_value >= threshold:
|
|
1868
|
+
images_above_threshold.append(im)
|
|
1869
|
+
confidence_values.append(max_conf_value)
|
|
1870
|
+
# ...for each image
|
|
1871
|
+
|
|
1872
|
+
return confidence_values, images_above_threshold
|
|
1873
|
+
|
|
1874
|
+
# ...def _get_confidence_values_for_results(...)
|
|
1875
|
+
|
|
1876
|
+
confidence_values_a,images_above_threshold_a = \
|
|
1877
|
+
_get_confidence_values_for_results(results_a['images'],
|
|
1878
|
+
category_ids_to_consider_a,
|
|
1879
|
+
threshold_a)
|
|
1880
|
+
|
|
1881
|
+
# Not necessary, but facilitates debugging
|
|
1882
|
+
confidence_values_a = sorted(confidence_values_a)
|
|
1883
|
+
|
|
1884
|
+
if verbose:
|
|
1885
|
+
print('For result set A, considering {} of {} images'.format(
|
|
1886
|
+
len(confidence_values_a),len(results_a['images'])))
|
|
1887
|
+
confidence_values_a_above_threshold = [c for c in confidence_values_a if c >= threshold_a]
|
|
1888
|
+
|
|
1889
|
+
confidence_values_b,_ = _get_confidence_values_for_results(results_b['images'],
|
|
1890
|
+
category_ids_to_consider_b,
|
|
1891
|
+
threshold=None)
|
|
1892
|
+
if verbose:
|
|
1893
|
+
print('For result set B, considering {} of {} images'.format(
|
|
1894
|
+
len(confidence_values_b),len(results_b['images'])))
|
|
1895
|
+
|
|
1896
|
+
confidence_values_b = sorted(confidence_values_b)
|
|
1897
|
+
|
|
1898
|
+
# Find the threshold that produces the same fraction of detections for results_b
|
|
1899
|
+
target_detection_fraction = len(confidence_values_a_above_threshold) / len(confidence_values_a)
|
|
1900
|
+
|
|
1901
|
+
# How many detections do we want in results_b?
|
|
1902
|
+
target_number_of_detections = round(len(confidence_values_b) * target_detection_fraction)
|
|
1903
|
+
|
|
1904
|
+
# How many non-detections do we want in results_b?
|
|
1905
|
+
target_number_of_non_detections = len(confidence_values_b) - target_number_of_detections
|
|
1906
|
+
detection_cutoff_index = max(target_number_of_non_detections,0)
|
|
1907
|
+
threshold_b = confidence_values_b[detection_cutoff_index]
|
|
1908
|
+
|
|
1909
|
+
confidence_values_b_above_threshold = [c for c in confidence_values_b if c >= threshold_b]
|
|
1910
|
+
confidence_values_b_above_reference_threshold = [c for c in confidence_values_b if c >= threshold_a]
|
|
1911
|
+
|
|
1912
|
+
# Special case: if the number of detections above the selected threshold is the same as the
|
|
1913
|
+
# number above the reference threshold, use the reference threshold
|
|
1914
|
+
if len(confidence_values_b_above_threshold) == len(confidence_values_b_above_reference_threshold):
|
|
1915
|
+
print('Detection count for reference threshold matches target threshold')
|
|
1916
|
+
threshold_b = threshold_a
|
|
1917
|
+
|
|
1918
|
+
if verbose:
|
|
1919
|
+
print('{} confidence values above threshold (A)'.format(
|
|
1920
|
+
len(confidence_values_a_above_threshold)))
|
|
1921
|
+
confidence_values_b_above_threshold = \
|
|
1922
|
+
[c for c in confidence_values_b if c >= threshold_b]
|
|
1923
|
+
print('{} confidence values above threshold (B)'.format(
|
|
1924
|
+
len(confidence_values_b_above_threshold)))
|
|
1925
|
+
|
|
1926
|
+
return threshold_b
|
|
1927
|
+
|
|
1928
|
+
# ...def find_equivalent_threshold(...)
|
|
1929
|
+
|
|
1930
|
+
|
|
1931
|
+
#%% Interactive driver
|
|
1932
|
+
|
|
1933
|
+
if False:
|
|
1934
|
+
|
|
1935
|
+
#%% Prepare test files
|
|
1936
|
+
|
|
1937
|
+
from megadetector.utils.path_utils import insert_before_extension
|
|
1938
|
+
|
|
1939
|
+
model_names = ['mdv5a','mdv5b']
|
|
1940
|
+
image_folder = 'g:/temp/md-test-images'
|
|
1941
|
+
output_filename_base = os.path.join(image_folder,'comparison_test.json')
|
|
1942
|
+
|
|
1943
|
+
output_filenames = []
|
|
1944
|
+
|
|
1945
|
+
commands = []
|
|
1946
|
+
|
|
1947
|
+
for model_name in model_names:
|
|
1948
|
+
output_filename = insert_before_extension(output_filename_base,model_name)
|
|
1949
|
+
output_filenames.append(output_filename)
|
|
1950
|
+
cmd = 'python -m megadetector.detection.run_detector_batch'
|
|
1951
|
+
cmd += ' {} {} {} --recursive --output_relative_filenames'.format(
|
|
1952
|
+
model_name, image_folder,output_filename)
|
|
1953
|
+
commands.append(cmd)
|
|
1954
|
+
|
|
1955
|
+
cmd = '\n\n'.join(commands)
|
|
1956
|
+
print(cmd)
|
|
1957
|
+
import clipboard
|
|
1958
|
+
clipboard.copy(cmd)
|
|
1959
|
+
|
|
1960
|
+
|
|
1961
|
+
#%% Test two-way comparison
|
|
1962
|
+
|
|
1963
|
+
options = BatchComparisonOptions()
|
|
1964
|
+
|
|
1965
|
+
options.parallelize_rendering_with_threads = True
|
|
1966
|
+
|
|
1967
|
+
options.job_name = 'md-test-images'
|
|
1968
|
+
options.output_folder = r'g:\temp\comparisons'
|
|
1969
|
+
options.image_folder = image_folder
|
|
1970
|
+
options.max_images_per_category = 100
|
|
1971
|
+
options.sort_by_confidence = True
|
|
1972
|
+
|
|
1973
|
+
options.pairwise_options = []
|
|
1974
|
+
|
|
1975
|
+
results_base = os.path.expanduser('~/postprocessing/bellevue-camera-traps')
|
|
1976
|
+
|
|
1977
|
+
detection_thresholds = [0.15,0.15]
|
|
1978
|
+
rendering_thresholds = None
|
|
1979
|
+
|
|
1980
|
+
results = n_way_comparison(filenames=output_filenames,
|
|
1981
|
+
options=options,
|
|
1982
|
+
detection_thresholds=detection_thresholds,
|
|
1983
|
+
rendering_thresholds=rendering_thresholds)
|
|
1984
|
+
|
|
1985
|
+
from megadetector.utils.path_utils import open_file
|
|
1986
|
+
open_file(results.html_output_file)
|
|
1987
|
+
|
|
1988
|
+
|
|
1989
|
+
#%% Test three-way comparison
|
|
1990
|
+
|
|
1991
|
+
options = BatchComparisonOptions()
|
|
1992
|
+
|
|
1993
|
+
options.parallelize_rendering_with_threads = False
|
|
1994
|
+
|
|
1995
|
+
options.job_name = 'KGA-test'
|
|
1996
|
+
options.output_folder = os.path.expanduser('~/tmp/md-comparison-test')
|
|
1997
|
+
options.image_folder = os.path.expanduser('~/data/KGA')
|
|
1998
|
+
|
|
1999
|
+
options.pairwise_options = []
|
|
2000
|
+
|
|
2001
|
+
filenames = [
|
|
2002
|
+
os.path.expanduser('~/data/KGA-4.json'),
|
|
2003
|
+
os.path.expanduser('~/data/KGA-5a.json'),
|
|
2004
|
+
os.path.expanduser('~/data/KGA-5b.json')
|
|
2005
|
+
]
|
|
2006
|
+
|
|
2007
|
+
detection_thresholds = [0.7,0.15,0.15]
|
|
2008
|
+
|
|
2009
|
+
results = n_way_comparison(filenames,options,detection_thresholds,rendering_thresholds=None)
|
|
2010
|
+
|
|
2011
|
+
from megadetector.utils.path_utils import open_file
|
|
2012
|
+
open_file(results.html_output_file)
|
|
2013
|
+
|
|
2014
|
+
|
|
2015
|
+
#%% Command-line driver
|
|
2016
|
+
|
|
2017
|
+
"""
|
|
2018
|
+
python compare_batch_results.py ~/tmp/comparison-test ~/data/KGA \
|
|
2019
|
+
~/data/KGA-5a.json ~/data/KGA-5b.json ~/data/KGA-4.json \
|
|
2020
|
+
--detection_thresholds 0.15 0.15 0.7 --rendering_thresholds 0.1 0.1 0.6 --use_processes
|
|
2021
|
+
"""
|
|
2022
|
+
|
|
2023
|
+
def main(): # noqa
|
|
2024
|
+
|
|
2025
|
+
options = BatchComparisonOptions()
|
|
2026
|
+
|
|
2027
|
+
parser = argparse.ArgumentParser(
|
|
2028
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2029
|
+
epilog=textwrap.dedent('''\
|
|
2030
|
+
Example:
|
|
2031
|
+
|
|
2032
|
+
python compare_batch_results.py output_folder image_folder mdv5a.json mdv5b.json mdv4.json --detection_thresholds 0.15 0.15 0.7
|
|
2033
|
+
'''))
|
|
2034
|
+
|
|
2035
|
+
parser.add_argument('output_folder', type=str, help='folder to which to write html results')
|
|
2036
|
+
|
|
2037
|
+
parser.add_argument('image_folder', type=str, help='image source folder')
|
|
2038
|
+
|
|
2039
|
+
parser.add_argument('results_files', nargs='*', type=str, help='list of .json files to be compared')
|
|
2040
|
+
|
|
2041
|
+
parser.add_argument('--detection_thresholds', nargs='*', type=float,
|
|
2042
|
+
help='list of detection thresholds, same length as the number of .json files, ' + \
|
|
2043
|
+
'defaults to 0.15 for all files')
|
|
2044
|
+
|
|
2045
|
+
parser.add_argument('--rendering_thresholds', nargs='*', type=float,
|
|
2046
|
+
help='list of rendering thresholds, same length as the number of .json files, ' + \
|
|
2047
|
+
'defaults to 0.10 for all files')
|
|
2048
|
+
|
|
2049
|
+
parser.add_argument('--max_images_per_category', type=int, default=options.max_images_per_category,
|
|
2050
|
+
help='number of images to sample for each agreement category (common detections, etc.)')
|
|
2051
|
+
|
|
2052
|
+
parser.add_argument('--target_width', type=int, default=options.target_width,
|
|
2053
|
+
help='output image width, defaults to {}'.format(options.target_width))
|
|
2054
|
+
|
|
2055
|
+
parser.add_argument('--use_processes', action='store_true',
|
|
2056
|
+
help='use processes rather than threads for parallelization')
|
|
2057
|
+
|
|
2058
|
+
parser.add_argument('--open_results', action='store_true',
|
|
2059
|
+
help='open the output html file when done')
|
|
2060
|
+
|
|
2061
|
+
parser.add_argument('--n_rendering_workers', type=int, default=options.n_rendering_workers,
|
|
2062
|
+
help='number of workers for parallel rendering, defaults to {}'.format(
|
|
2063
|
+
options.n_rendering_workers))
|
|
2064
|
+
|
|
2065
|
+
if len(sys.argv[1:])==0:
|
|
2066
|
+
parser.print_help()
|
|
2067
|
+
parser.exit()
|
|
2068
|
+
|
|
2069
|
+
args = parser.parse_args()
|
|
2070
|
+
|
|
2071
|
+
print('Output folder:')
|
|
2072
|
+
print(args.output_folder)
|
|
2073
|
+
|
|
2074
|
+
print('\nResults files:')
|
|
2075
|
+
print(args.results_files)
|
|
2076
|
+
|
|
2077
|
+
print('\nDetection thresholds:')
|
|
2078
|
+
print(args.detection_thresholds)
|
|
2079
|
+
|
|
2080
|
+
print('\nRendering thresholds:')
|
|
2081
|
+
print(args.rendering_thresholds)
|
|
2082
|
+
|
|
2083
|
+
# Convert to options objects
|
|
2084
|
+
options = BatchComparisonOptions()
|
|
2085
|
+
|
|
2086
|
+
options.output_folder = args.output_folder
|
|
2087
|
+
options.image_folder = args.image_folder
|
|
2088
|
+
options.target_width = args.target_width
|
|
2089
|
+
options.n_rendering_workers = args.n_rendering_workers
|
|
2090
|
+
options.max_images_per_category = args.max_images_per_category
|
|
2091
|
+
|
|
2092
|
+
if args.use_processes:
|
|
2093
|
+
options.parallelize_rendering_with_threads = False
|
|
2094
|
+
|
|
2095
|
+
results = n_way_comparison(args.results_files,
|
|
2096
|
+
options,
|
|
2097
|
+
args.detection_thresholds,
|
|
2098
|
+
args.rendering_thresholds)
|
|
2099
|
+
|
|
2100
|
+
if args.open_results:
|
|
2101
|
+
path_utils.open_file(results.html_output_file)
|
|
2102
|
+
|
|
2103
|
+
print('Wrote results to {}'.format(results.html_output_file))
|
|
2104
|
+
|
|
2105
|
+
# ...main()
|
|
2106
|
+
|
|
2107
|
+
|
|
2108
|
+
if __name__ == '__main__':
|
|
2109
|
+
|
|
2110
|
+
main()
|