megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +232 -223
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +341 -338
- megadetector/detection/pytorch_detector.py +308 -266
- megadetector/detection/run_detector.py +186 -166
- megadetector/detection/run_detector_batch.py +366 -364
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +312 -253
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +291 -283
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +808 -311
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +220 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -0
- megadetector/postprocessing/load_api_results.py +25 -22
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -302
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1019 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1511 -406
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +73 -60
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2868
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +424 -404
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +126 -98
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.27.dist-info/RECORD +0 -208
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
|
@@ -36,12 +36,12 @@ def _image_to_output_file(im,preview_images_folder):
|
|
|
36
36
|
"""
|
|
37
37
|
Produces a clean filename from im (if [im] is a str) or im['file'] (if [im] is a dict).
|
|
38
38
|
"""
|
|
39
|
-
|
|
39
|
+
|
|
40
40
|
if isinstance(im,str):
|
|
41
41
|
filename_relative = im
|
|
42
42
|
else:
|
|
43
43
|
filename_relative = im['file']
|
|
44
|
-
|
|
44
|
+
|
|
45
45
|
fn_clean = flatten_path(filename_relative).replace(' ','_')
|
|
46
46
|
return os.path.join(preview_images_folder,fn_clean)
|
|
47
47
|
|
|
@@ -50,7 +50,7 @@ def _render_image(im,render_image_constants):
|
|
|
50
50
|
"""
|
|
51
51
|
Internal function for rendering a single image to the confusion matrix preview folder.
|
|
52
52
|
"""
|
|
53
|
-
|
|
53
|
+
|
|
54
54
|
filename_to_ground_truth_im = render_image_constants['filename_to_ground_truth_im']
|
|
55
55
|
image_folder = render_image_constants['image_folder']
|
|
56
56
|
preview_images_folder = render_image_constants['preview_images_folder']
|
|
@@ -58,18 +58,18 @@ def _render_image(im,render_image_constants):
|
|
|
58
58
|
results_category_id_to_name = render_image_constants['results_category_id_to_name']
|
|
59
59
|
rendering_confidence_thresholds = render_image_constants['rendering_confidence_thresholds']
|
|
60
60
|
target_image_size = render_image_constants['target_image_size']
|
|
61
|
-
|
|
61
|
+
|
|
62
62
|
assert im['file'] in filename_to_ground_truth_im
|
|
63
|
-
|
|
63
|
+
|
|
64
64
|
output_file = _image_to_output_file(im,preview_images_folder)
|
|
65
65
|
if os.path.isfile(output_file) and not force_render_images:
|
|
66
66
|
return output_file
|
|
67
|
-
|
|
67
|
+
|
|
68
68
|
input_file = os.path.join(image_folder,im['file'])
|
|
69
69
|
assert os.path.isfile(input_file)
|
|
70
|
-
|
|
70
|
+
|
|
71
71
|
detections_to_render = []
|
|
72
|
-
|
|
72
|
+
|
|
73
73
|
for det in im['detections']:
|
|
74
74
|
category_name = results_category_id_to_name[det['category']]
|
|
75
75
|
detection_threshold = rendering_confidence_thresholds['default']
|
|
@@ -77,11 +77,11 @@ def _render_image(im,render_image_constants):
|
|
|
77
77
|
detection_threshold = rendering_confidence_thresholds[category_name]
|
|
78
78
|
if det['conf'] > detection_threshold:
|
|
79
79
|
detections_to_render.append(det)
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
vis_utils.draw_bounding_boxes_on_file(input_file, output_file, detections_to_render,
|
|
82
82
|
detector_label_map=results_category_id_to_name,
|
|
83
83
|
label_font_size=20,target_size=target_image_size)
|
|
84
|
-
|
|
84
|
+
|
|
85
85
|
return output_file
|
|
86
86
|
|
|
87
87
|
|
|
@@ -91,7 +91,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
91
91
|
results_file,
|
|
92
92
|
image_folder,
|
|
93
93
|
preview_folder,
|
|
94
|
-
force_render_images=False,
|
|
94
|
+
force_render_images=False,
|
|
95
95
|
confidence_thresholds=None,
|
|
96
96
|
rendering_confidence_thresholds=None,
|
|
97
97
|
target_image_size=(1280,-1),
|
|
@@ -102,15 +102,15 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
102
102
|
model_file=None,
|
|
103
103
|
empty_category_name='empty',
|
|
104
104
|
html_image_list_options=None):
|
|
105
|
-
"""
|
|
105
|
+
"""
|
|
106
106
|
Given a CCT-formatted ground truth file and a MegaDetector-formatted results file,
|
|
107
|
-
render an HTML confusion matrix in [preview_folder. Typically used for multi-class detectors.
|
|
107
|
+
render an HTML confusion matrix in [preview_folder. Typically used for multi-class detectors.
|
|
108
108
|
Currently assumes a single class per image.
|
|
109
|
-
|
|
109
|
+
|
|
110
110
|
confidence_thresholds and rendering_confidence_thresholds are dictionaries mapping
|
|
111
111
|
class names to thresholds. "default" is a special token that will be used for all
|
|
112
112
|
classes not otherwise assigned thresholds.
|
|
113
|
-
|
|
113
|
+
|
|
114
114
|
Args:
|
|
115
115
|
ground_truth_file (str): the CCT-formatted .json file with ground truth information
|
|
116
116
|
results_file (str): the MegaDetector results .json file
|
|
@@ -123,7 +123,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
123
123
|
all classes not explicitly named here will use the threshold for the "default" category.
|
|
124
124
|
rendering_thresholds (dict, optional): a dictionary mapping class names to thresholds;
|
|
125
125
|
all classes not explicitly named here will use the threshold for the "default" category.
|
|
126
|
-
target_image_size (tuple, optional): output image size, as a pair of ints (width,height). If one
|
|
126
|
+
target_image_size (tuple, optional): output image size, as a pair of ints (width,height). If one
|
|
127
127
|
value is -1 and the other is not, aspect ratio is preserved. If both are -1, the original image
|
|
128
128
|
sizes are preserved.
|
|
129
129
|
parallelize_rendering (bool, optional): enable (default) or disable parallelization when rendering
|
|
@@ -135,12 +135,12 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
135
135
|
model_file (str, optional) model filename to include in HTML output
|
|
136
136
|
empty_category_name (str, optional): special category name that we should treat as empty, typically
|
|
137
137
|
"empty"
|
|
138
|
-
html_image_list_options (dict, optional): options listed passed along to write_html_image_list;
|
|
139
|
-
see write_html_image_list for documentation.
|
|
138
|
+
html_image_list_options (dict, optional): options listed passed along to write_html_image_list;
|
|
139
|
+
see write_html_image_list for documentation.
|
|
140
140
|
"""
|
|
141
|
-
|
|
141
|
+
|
|
142
142
|
##%% Argument and path handling
|
|
143
|
-
|
|
143
|
+
|
|
144
144
|
preview_images_folder = os.path.join(preview_folder,'images')
|
|
145
145
|
os.makedirs(preview_images_folder,exist_ok=True)
|
|
146
146
|
|
|
@@ -148,75 +148,75 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
148
148
|
confidence_thresholds = {'default':0.5}
|
|
149
149
|
if rendering_confidence_thresholds is None:
|
|
150
150
|
rendering_confidence_thresholds = {'default':0.4}
|
|
151
|
-
|
|
152
151
|
|
|
153
|
-
|
|
154
|
-
|
|
152
|
+
|
|
153
|
+
##%% Load ground truth
|
|
154
|
+
|
|
155
155
|
with open(ground_truth_file,'r') as f:
|
|
156
156
|
ground_truth_data_cct = json.load(f)
|
|
157
|
-
|
|
157
|
+
|
|
158
158
|
filename_to_ground_truth_im = {}
|
|
159
159
|
for im in ground_truth_data_cct['images']:
|
|
160
160
|
assert im['file_name'] not in filename_to_ground_truth_im
|
|
161
161
|
filename_to_ground_truth_im[im['file_name']] = im
|
|
162
|
-
|
|
163
|
-
|
|
162
|
+
|
|
163
|
+
|
|
164
164
|
##%% Confirm that the ground truth images are present in the image folder
|
|
165
|
-
|
|
165
|
+
|
|
166
166
|
ground_truth_images = find_images(image_folder,return_relative_paths=True,recursive=True)
|
|
167
167
|
assert len(ground_truth_images) == len(ground_truth_data_cct['images'])
|
|
168
168
|
del ground_truth_images
|
|
169
|
-
|
|
170
|
-
|
|
169
|
+
|
|
170
|
+
|
|
171
171
|
##%% Map images to categories
|
|
172
|
-
|
|
172
|
+
|
|
173
173
|
# gt_image_id_to_image = {im['id']:im for im in ground_truth_data_cct['images']}
|
|
174
174
|
gt_image_id_to_annotations = defaultdict(list)
|
|
175
|
-
|
|
175
|
+
|
|
176
176
|
ground_truth_category_id_to_name = {}
|
|
177
177
|
for c in ground_truth_data_cct['categories']:
|
|
178
178
|
ground_truth_category_id_to_name[c['id']] = c['name']
|
|
179
|
-
|
|
179
|
+
|
|
180
180
|
# Add the empty category if necessary
|
|
181
181
|
if empty_category_name not in ground_truth_category_id_to_name.values():
|
|
182
182
|
empty_category_id = max(ground_truth_category_id_to_name.keys())+1
|
|
183
183
|
ground_truth_category_id_to_name[empty_category_id] = empty_category_name
|
|
184
|
-
|
|
184
|
+
|
|
185
185
|
ground_truth_category_names = sorted(list(ground_truth_category_id_to_name.values()))
|
|
186
|
-
|
|
186
|
+
|
|
187
187
|
for ann in ground_truth_data_cct['annotations']:
|
|
188
188
|
gt_image_id_to_annotations[ann['image_id']].append(ann)
|
|
189
|
-
|
|
189
|
+
|
|
190
190
|
gt_filename_to_category_names = defaultdict(set)
|
|
191
|
-
|
|
191
|
+
|
|
192
192
|
for im in ground_truth_data_cct['images']:
|
|
193
193
|
annotations_this_image = gt_image_id_to_annotations[im['id']]
|
|
194
194
|
for ann in annotations_this_image:
|
|
195
195
|
category_name = ground_truth_category_id_to_name[ann['category_id']]
|
|
196
196
|
gt_filename_to_category_names[im['file_name']].add(category_name)
|
|
197
|
-
|
|
197
|
+
|
|
198
198
|
for filename in gt_filename_to_category_names:
|
|
199
|
-
|
|
199
|
+
|
|
200
200
|
category_names_this_file = gt_filename_to_category_names[filename]
|
|
201
|
-
|
|
201
|
+
|
|
202
202
|
# The empty category should be exclusive
|
|
203
203
|
if empty_category_name in category_names_this_file:
|
|
204
204
|
assert len(category_names_this_file) == 1, \
|
|
205
205
|
'Empty category assigned along with another category for {}'.format(filename)
|
|
206
206
|
assert len(category_names_this_file) > 0, \
|
|
207
207
|
'No ground truth category assigned to {}'.format(filename)
|
|
208
|
-
|
|
209
|
-
|
|
208
|
+
|
|
209
|
+
|
|
210
210
|
##%% Load results
|
|
211
|
-
|
|
211
|
+
|
|
212
212
|
with open(results_file,'r') as f:
|
|
213
213
|
md_formatted_results = json.load(f)
|
|
214
|
-
|
|
214
|
+
|
|
215
215
|
results_category_id_to_name = md_formatted_results['detection_categories']
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
##%% Render images with detections
|
|
219
|
-
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
##%% Render images with detections
|
|
219
|
+
|
|
220
220
|
render_image_constants = {}
|
|
221
221
|
render_image_constants['filename_to_ground_truth_im'] = filename_to_ground_truth_im
|
|
222
222
|
render_image_constants['image_folder'] = image_folder
|
|
@@ -224,46 +224,52 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
224
224
|
render_image_constants['force_render_images'] = force_render_images
|
|
225
225
|
render_image_constants['results_category_id_to_name'] = results_category_id_to_name
|
|
226
226
|
render_image_constants['rendering_confidence_thresholds'] = rendering_confidence_thresholds
|
|
227
|
-
render_image_constants['target_image_size'] = target_image_size
|
|
228
|
-
|
|
227
|
+
render_image_constants['target_image_size'] = target_image_size
|
|
228
|
+
|
|
229
229
|
if parallelize_rendering:
|
|
230
230
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
pool = ThreadPool(parallelize_rendering_n_cores)
|
|
239
|
-
worker_string = 'threads'
|
|
231
|
+
pool = None
|
|
232
|
+
try:
|
|
233
|
+
if parallelize_rendering_n_cores is None:
|
|
234
|
+
if parallelize_rendering_with_threads:
|
|
235
|
+
pool = ThreadPool()
|
|
236
|
+
else:
|
|
237
|
+
pool = Pool()
|
|
240
238
|
else:
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
239
|
+
if parallelize_rendering_with_threads:
|
|
240
|
+
pool = ThreadPool(parallelize_rendering_n_cores)
|
|
241
|
+
worker_string = 'threads'
|
|
242
|
+
else:
|
|
243
|
+
pool = Pool(parallelize_rendering_n_cores)
|
|
244
|
+
worker_string = 'processes'
|
|
245
|
+
print('Rendering images with {} {}'.format(parallelize_rendering_n_cores,
|
|
246
|
+
worker_string))
|
|
247
|
+
|
|
248
|
+
_ = list(tqdm(pool.imap(partial(_render_image,render_image_constants=render_image_constants),
|
|
249
|
+
md_formatted_results['images']),
|
|
250
|
+
total=len(md_formatted_results['images'])))
|
|
251
|
+
finally:
|
|
252
|
+
pool.close()
|
|
253
|
+
pool.join()
|
|
254
|
+
print("Pool closed and joined for confusion matrix rendering")
|
|
255
|
+
|
|
250
256
|
else:
|
|
251
|
-
|
|
257
|
+
|
|
252
258
|
# im = md_formatted_results['images'][0]
|
|
253
|
-
for im in tqdm(md_formatted_results['images']):
|
|
259
|
+
for im in tqdm(md_formatted_results['images']):
|
|
254
260
|
_render_image(im,render_image_constants)
|
|
255
|
-
|
|
256
|
-
|
|
261
|
+
|
|
262
|
+
|
|
257
263
|
##%% Map images to predicted categories, and vice-versa
|
|
258
|
-
|
|
264
|
+
|
|
259
265
|
filename_to_predicted_categories = defaultdict(set)
|
|
260
266
|
predicted_category_name_to_filenames = defaultdict(set)
|
|
261
|
-
|
|
267
|
+
|
|
262
268
|
# im = md_formatted_results['images'][0]
|
|
263
269
|
for im in tqdm(md_formatted_results['images']):
|
|
264
|
-
|
|
270
|
+
|
|
265
271
|
assert im['file'] in filename_to_ground_truth_im
|
|
266
|
-
|
|
272
|
+
|
|
267
273
|
# det = im['detections'][0]
|
|
268
274
|
for det in im['detections']:
|
|
269
275
|
category_name = results_category_id_to_name[det['category']]
|
|
@@ -273,34 +279,34 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
273
279
|
if det['conf'] > detection_threshold:
|
|
274
280
|
filename_to_predicted_categories[im['file']].add(category_name)
|
|
275
281
|
predicted_category_name_to_filenames[category_name].add(im['file'])
|
|
276
|
-
|
|
282
|
+
|
|
277
283
|
# ...for each detection
|
|
278
|
-
|
|
284
|
+
|
|
279
285
|
# ...for each image
|
|
280
|
-
|
|
281
|
-
|
|
286
|
+
|
|
287
|
+
|
|
282
288
|
##%% Create TP/TN/FP/FN lists
|
|
283
|
-
|
|
289
|
+
|
|
284
290
|
category_name_to_image_lists = {}
|
|
285
|
-
|
|
291
|
+
|
|
286
292
|
sub_page_tokens = ['fn','tn','fp','tp']
|
|
287
|
-
|
|
293
|
+
|
|
288
294
|
for category_name in ground_truth_category_names:
|
|
289
|
-
|
|
295
|
+
|
|
290
296
|
category_name_to_image_lists[category_name] = {}
|
|
291
297
|
for sub_page_token in sub_page_tokens:
|
|
292
298
|
category_name_to_image_lists[category_name][sub_page_token] = []
|
|
293
|
-
|
|
299
|
+
|
|
294
300
|
# filename = next(iter(gt_filename_to_category_names))
|
|
295
301
|
for filename in gt_filename_to_category_names.keys():
|
|
296
|
-
|
|
302
|
+
|
|
297
303
|
ground_truth_categories_this_image = gt_filename_to_category_names[filename]
|
|
298
304
|
predicted_categories_this_image = filename_to_predicted_categories[filename]
|
|
299
|
-
|
|
305
|
+
|
|
300
306
|
for category_name in ground_truth_category_names:
|
|
301
|
-
|
|
307
|
+
|
|
302
308
|
assignment = None
|
|
303
|
-
|
|
309
|
+
|
|
304
310
|
if category_name == empty_category_name:
|
|
305
311
|
# If this is an empty image
|
|
306
312
|
if category_name in ground_truth_categories_this_image:
|
|
@@ -315,7 +321,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
315
321
|
assignment = 'fp'
|
|
316
322
|
else:
|
|
317
323
|
assignment = 'tn'
|
|
318
|
-
|
|
324
|
+
|
|
319
325
|
else:
|
|
320
326
|
if category_name in ground_truth_categories_this_image:
|
|
321
327
|
if category_name in predicted_categories_this_image:
|
|
@@ -326,43 +332,43 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
326
332
|
if category_name in predicted_categories_this_image:
|
|
327
333
|
assignment = 'fp'
|
|
328
334
|
else:
|
|
329
|
-
assignment = 'tn'
|
|
330
|
-
|
|
335
|
+
assignment = 'tn'
|
|
336
|
+
|
|
331
337
|
category_name_to_image_lists[category_name][assignment].append(filename)
|
|
332
|
-
|
|
338
|
+
|
|
333
339
|
# ...for each filename
|
|
334
|
-
|
|
335
|
-
|
|
340
|
+
|
|
341
|
+
|
|
336
342
|
##%% Create confusion matrix
|
|
337
|
-
|
|
343
|
+
|
|
338
344
|
gt_category_name_to_category_index = {}
|
|
339
|
-
|
|
345
|
+
|
|
340
346
|
for i_category,category_name in enumerate(ground_truth_category_names):
|
|
341
347
|
gt_category_name_to_category_index[category_name] = i_category
|
|
342
|
-
|
|
343
|
-
n_categories = len(gt_category_name_to_category_index)
|
|
344
|
-
|
|
348
|
+
|
|
349
|
+
n_categories = len(gt_category_name_to_category_index)
|
|
350
|
+
|
|
345
351
|
# indexed as [true,predicted]
|
|
346
352
|
confusion_matrix = np.zeros(shape=(n_categories,n_categories),dtype=int)
|
|
347
|
-
|
|
353
|
+
|
|
348
354
|
filename_to_results_im = {im['file']:im for im in md_formatted_results['images']}
|
|
349
|
-
|
|
355
|
+
|
|
350
356
|
true_predicted_to_file_list = defaultdict(list)
|
|
351
|
-
|
|
357
|
+
|
|
352
358
|
# filename = next(iter(gt_filename_to_category_names.keys()))
|
|
353
359
|
for filename in gt_filename_to_category_names.keys():
|
|
354
|
-
|
|
360
|
+
|
|
355
361
|
ground_truth_categories_this_image = gt_filename_to_category_names[filename]
|
|
356
362
|
assert len(ground_truth_categories_this_image) == 1
|
|
357
363
|
ground_truth_category_name = next(iter(ground_truth_categories_this_image))
|
|
358
|
-
|
|
364
|
+
|
|
359
365
|
results_im = filename_to_results_im[filename]
|
|
360
|
-
|
|
366
|
+
|
|
361
367
|
# If there were no detections at all, call this image empty
|
|
362
368
|
if len(results_im['detections']) == 0:
|
|
363
369
|
predicted_category_name = empty_category_name
|
|
364
370
|
# Otherwise look for above-threshold detections
|
|
365
|
-
else:
|
|
371
|
+
else:
|
|
366
372
|
results_category_name_to_confidence = defaultdict(int)
|
|
367
373
|
for det in results_im['detections']:
|
|
368
374
|
category_name = results_category_id_to_name[det['category']]
|
|
@@ -378,23 +384,23 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
378
384
|
else:
|
|
379
385
|
predicted_category_name = max(results_category_name_to_confidence,
|
|
380
386
|
key=results_category_name_to_confidence.get)
|
|
381
|
-
|
|
387
|
+
|
|
382
388
|
ground_truth_category_index = gt_category_name_to_category_index[ground_truth_category_name]
|
|
383
389
|
predicted_category_index = gt_category_name_to_category_index[predicted_category_name]
|
|
384
|
-
|
|
390
|
+
|
|
385
391
|
true_predicted_token = ground_truth_category_name + '_' + predicted_category_name
|
|
386
392
|
true_predicted_to_file_list[true_predicted_token].append(filename)
|
|
387
|
-
|
|
393
|
+
|
|
388
394
|
confusion_matrix[ground_truth_category_index,predicted_category_index] += 1
|
|
389
|
-
|
|
395
|
+
|
|
390
396
|
# ...for each file
|
|
391
|
-
|
|
392
|
-
plt.ioff()
|
|
393
|
-
|
|
397
|
+
|
|
398
|
+
plt.ioff()
|
|
399
|
+
|
|
394
400
|
fig_h = 3 + 0.3 * n_categories
|
|
395
401
|
fig_w = fig_h
|
|
396
402
|
fig = plt.figure(figsize=(fig_w, fig_h),tight_layout=True)
|
|
397
|
-
|
|
403
|
+
|
|
398
404
|
plot_utils.plot_confusion_matrix(
|
|
399
405
|
matrix=confusion_matrix,
|
|
400
406
|
classes=ground_truth_category_names,
|
|
@@ -405,32 +411,32 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
405
411
|
use_colorbar=False,
|
|
406
412
|
y_label=True,
|
|
407
413
|
fig=fig)
|
|
408
|
-
|
|
414
|
+
|
|
409
415
|
cm_figure_fn_relative = 'confusion_matrix.png'
|
|
410
416
|
cm_figure_fn_abs = os.path.join(preview_folder, cm_figure_fn_relative)
|
|
411
417
|
# fig.show()
|
|
412
418
|
fig.savefig(cm_figure_fn_abs,dpi=100)
|
|
413
419
|
plt.close(fig)
|
|
414
|
-
|
|
420
|
+
|
|
415
421
|
# open_file(cm_figure_fn_abs)
|
|
416
|
-
|
|
417
|
-
|
|
422
|
+
|
|
423
|
+
|
|
418
424
|
##%% Create HTML confusion matrix
|
|
419
|
-
|
|
425
|
+
|
|
420
426
|
html_confusion_matrix = '<table class="result-table">\n'
|
|
421
427
|
html_confusion_matrix += '<tr>\n'
|
|
422
428
|
html_confusion_matrix += '<td>{}</td>\n'.format('True category')
|
|
423
429
|
for category_name in ground_truth_category_names:
|
|
424
430
|
html_confusion_matrix += '<td>{}</td>\n'.format(' ')
|
|
425
431
|
html_confusion_matrix += '</tr>\n'
|
|
426
|
-
|
|
432
|
+
|
|
427
433
|
for true_category in ground_truth_category_names:
|
|
428
|
-
|
|
434
|
+
|
|
429
435
|
html_confusion_matrix += '<tr>\n'
|
|
430
436
|
html_confusion_matrix += '<td>{}</td>\n'.format(true_category)
|
|
431
|
-
|
|
437
|
+
|
|
432
438
|
for predicted_category in ground_truth_category_names:
|
|
433
|
-
|
|
439
|
+
|
|
434
440
|
true_predicted_token = true_category + '_' + predicted_category
|
|
435
441
|
image_list = true_predicted_to_file_list[true_predicted_token]
|
|
436
442
|
if len(image_list) == 0:
|
|
@@ -441,9 +447,9 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
441
447
|
title_string = 'true: {}, predicted {}'.format(
|
|
442
448
|
true_category,predicted_category)
|
|
443
449
|
html_image_list_options['headerHtml'] = '<h1>{}</h1>'.format(title_string)
|
|
444
|
-
|
|
450
|
+
|
|
445
451
|
html_image_info_list = []
|
|
446
|
-
|
|
452
|
+
|
|
447
453
|
for image_filename_relative in image_list:
|
|
448
454
|
html_image_info = {}
|
|
449
455
|
detections = filename_to_results_im[image_filename_relative]['detections']
|
|
@@ -451,7 +457,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
451
457
|
max_conf = 0
|
|
452
458
|
else:
|
|
453
459
|
max_conf = max([d['conf'] for d in detections])
|
|
454
|
-
|
|
460
|
+
|
|
455
461
|
title = '<b>Image</b>: {}, <b>Max conf</b>: {:0.3f}'.format(
|
|
456
462
|
image_filename_relative, max_conf)
|
|
457
463
|
image_link = 'images/' + os.path.basename(
|
|
@@ -462,100 +468,100 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
462
468
|
'textStyle':\
|
|
463
469
|
'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
464
470
|
'text-align:left;margin-top:20;margin-bottom:5'
|
|
465
|
-
}
|
|
466
|
-
|
|
471
|
+
}
|
|
472
|
+
|
|
467
473
|
html_image_info_list.append(html_image_info)
|
|
468
|
-
|
|
474
|
+
|
|
469
475
|
target_html_file_relative = true_predicted_token + '.html'
|
|
470
476
|
target_html_file_abs = os.path.join(preview_folder,target_html_file_relative)
|
|
471
477
|
write_html_image_list(
|
|
472
478
|
filename=target_html_file_abs,
|
|
473
479
|
images=html_image_info_list,
|
|
474
480
|
options=html_image_list_options)
|
|
475
|
-
|
|
481
|
+
|
|
476
482
|
td_content = '<a href="{}">{}</a>'.format(target_html_file_relative,
|
|
477
483
|
len(image_list))
|
|
478
|
-
|
|
484
|
+
|
|
479
485
|
html_confusion_matrix += '<td>{}</td>\n'.format(td_content)
|
|
480
|
-
|
|
486
|
+
|
|
481
487
|
# ...for each predicted category
|
|
482
|
-
|
|
488
|
+
|
|
483
489
|
html_confusion_matrix += '</tr>\n'
|
|
484
|
-
|
|
485
|
-
# ...for each true category
|
|
486
|
-
|
|
490
|
+
|
|
491
|
+
# ...for each true category
|
|
492
|
+
|
|
487
493
|
html_confusion_matrix += '<tr>\n'
|
|
488
494
|
html_confusion_matrix += '<td> </td>\n'
|
|
489
|
-
|
|
495
|
+
|
|
490
496
|
for category_name in ground_truth_category_names:
|
|
491
497
|
html_confusion_matrix += '<td class="rotate"><p style="margin-left:20px;">{}</p></td>\n'.format(
|
|
492
498
|
category_name)
|
|
493
499
|
html_confusion_matrix += '</tr>\n'
|
|
494
|
-
|
|
500
|
+
|
|
495
501
|
html_confusion_matrix += '</table>'
|
|
496
|
-
|
|
497
|
-
|
|
502
|
+
|
|
503
|
+
|
|
498
504
|
##%% Create HTML sub-pages and HTML table
|
|
499
|
-
|
|
505
|
+
|
|
500
506
|
html_table = '<table class="result-table">\n'
|
|
501
|
-
|
|
507
|
+
|
|
502
508
|
html_table += '<tr>\n'
|
|
503
509
|
html_table += '<td>{}</td>\n'.format('True category')
|
|
504
510
|
for sub_page_token in sub_page_tokens:
|
|
505
511
|
html_table += '<td>{}</td>'.format(sub_page_token)
|
|
506
512
|
html_table += '</tr>\n'
|
|
507
|
-
|
|
513
|
+
|
|
508
514
|
filename_to_results_im = {im['file']:im for im in md_formatted_results['images']}
|
|
509
|
-
|
|
515
|
+
|
|
510
516
|
sub_page_token_to_page_name = {
|
|
511
517
|
'fp':'false positives',
|
|
512
518
|
'tp':'true positives',
|
|
513
519
|
'fn':'false negatives',
|
|
514
520
|
'tn':'true negatives'
|
|
515
521
|
}
|
|
516
|
-
|
|
522
|
+
|
|
517
523
|
# category_name = ground_truth_category_names[0]
|
|
518
524
|
for category_name in ground_truth_category_names:
|
|
519
|
-
|
|
525
|
+
|
|
520
526
|
html_table += '<tr>\n'
|
|
521
|
-
|
|
527
|
+
|
|
522
528
|
html_table += '<td>{}</td>\n'.format(category_name)
|
|
523
|
-
|
|
529
|
+
|
|
524
530
|
# sub_page_token = sub_page_tokens[0]
|
|
525
531
|
for sub_page_token in sub_page_tokens:
|
|
526
|
-
|
|
532
|
+
|
|
527
533
|
html_table += '<td>\n'
|
|
528
|
-
|
|
534
|
+
|
|
529
535
|
image_list = category_name_to_image_lists[category_name][sub_page_token]
|
|
530
|
-
|
|
536
|
+
|
|
531
537
|
if len(image_list) == 0:
|
|
532
|
-
|
|
538
|
+
|
|
533
539
|
html_table += '0\n'
|
|
534
|
-
|
|
540
|
+
|
|
535
541
|
else:
|
|
536
|
-
|
|
542
|
+
|
|
537
543
|
html_image_list_options = {}
|
|
538
544
|
title_string = '{}: {}'.format(category_name,sub_page_token_to_page_name[sub_page_token])
|
|
539
545
|
html_image_list_options['headerHtml'] = '<h1>{}</h1>'.format(title_string)
|
|
540
|
-
|
|
546
|
+
|
|
541
547
|
target_html_file_relative = '{}_{}.html'.format(category_name,sub_page_token)
|
|
542
548
|
target_html_file_abs = os.path.join(preview_folder,target_html_file_relative)
|
|
543
|
-
|
|
549
|
+
|
|
544
550
|
html_image_info_list = []
|
|
545
|
-
|
|
551
|
+
|
|
546
552
|
# image_filename_relative = image_list[0]
|
|
547
553
|
for image_filename_relative in image_list:
|
|
548
|
-
|
|
554
|
+
|
|
549
555
|
source_file = os.path.join(image_folder,image_filename_relative)
|
|
550
556
|
assert os.path.isfile(source_file)
|
|
551
|
-
|
|
557
|
+
|
|
552
558
|
html_image_info = {}
|
|
553
559
|
detections = filename_to_results_im[image_filename_relative]['detections']
|
|
554
560
|
if len(detections) == 0:
|
|
555
561
|
max_conf = 0
|
|
556
562
|
else:
|
|
557
563
|
max_conf = max([d['conf'] for d in detections])
|
|
558
|
-
|
|
564
|
+
|
|
559
565
|
title = '<b>Image</b>: {}, <b>Max conf</b>: {:0.3f}'.format(
|
|
560
566
|
image_filename_relative, max_conf)
|
|
561
567
|
image_link = 'images/' + os.path.basename(
|
|
@@ -567,31 +573,31 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
567
573
|
'textStyle':\
|
|
568
574
|
'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
569
575
|
'text-align:left;margin-top:20;margin-bottom:5'
|
|
570
|
-
}
|
|
571
|
-
|
|
576
|
+
}
|
|
577
|
+
|
|
572
578
|
html_image_info_list.append(html_image_info)
|
|
573
|
-
|
|
579
|
+
|
|
574
580
|
# ...for each image
|
|
575
|
-
|
|
581
|
+
|
|
576
582
|
write_html_image_list(
|
|
577
583
|
filename=target_html_file_abs,
|
|
578
584
|
images=html_image_info_list,
|
|
579
585
|
options=html_image_list_options)
|
|
580
|
-
|
|
586
|
+
|
|
581
587
|
html_table += '<a href="{}">{}</a>\n'.format(target_html_file_relative,len(image_list))
|
|
582
|
-
|
|
588
|
+
|
|
583
589
|
html_table += '</td>\n'
|
|
584
|
-
|
|
590
|
+
|
|
585
591
|
# ...for each sub-page
|
|
586
|
-
|
|
592
|
+
|
|
587
593
|
html_table += '</tr>\n'
|
|
588
|
-
|
|
594
|
+
|
|
589
595
|
# ...for each category
|
|
590
|
-
|
|
591
|
-
html_table += '</table>'
|
|
592
|
-
|
|
596
|
+
|
|
597
|
+
html_table += '</table>'
|
|
598
|
+
|
|
593
599
|
html = '<html>\n'
|
|
594
|
-
|
|
600
|
+
|
|
595
601
|
style_header = """<head>
|
|
596
602
|
<style type="text/css">
|
|
597
603
|
a { text-decoration: none; }
|
|
@@ -599,62 +605,62 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
599
605
|
div.contentdiv { margin-left: 20px; }
|
|
600
606
|
table.result-table { border:1px solid black; border-collapse: collapse; margin-left:50px;}
|
|
601
607
|
td,th { padding:10px; }
|
|
602
|
-
.rotate {
|
|
608
|
+
.rotate {
|
|
603
609
|
padding:0px;
|
|
604
610
|
writing-mode:vertical-lr;
|
|
605
|
-
-webkit-transform: rotate(-180deg);
|
|
606
|
-
-moz-transform: rotate(-180deg);
|
|
607
|
-
-ms-transform: rotate(-180deg);
|
|
608
|
-
-o-transform: rotate(-180deg);
|
|
611
|
+
-webkit-transform: rotate(-180deg);
|
|
612
|
+
-moz-transform: rotate(-180deg);
|
|
613
|
+
-ms-transform: rotate(-180deg);
|
|
614
|
+
-o-transform: rotate(-180deg);
|
|
609
615
|
transform: rotate(-180deg);
|
|
610
616
|
}
|
|
611
617
|
</style>
|
|
612
618
|
</head>"""
|
|
613
|
-
|
|
619
|
+
|
|
614
620
|
html += style_header + '\n'
|
|
615
|
-
|
|
621
|
+
|
|
616
622
|
html += '<body>\n'
|
|
617
|
-
|
|
623
|
+
|
|
618
624
|
html += '<h1>Results summary for {}</h1>\n'.format(job_name)
|
|
619
|
-
|
|
625
|
+
|
|
620
626
|
if model_file is not None and len(model_file) > 0:
|
|
621
627
|
html += '<p><b>Model file</b>: {}</p>'.format(os.path.basename(model_file))
|
|
622
|
-
|
|
628
|
+
|
|
623
629
|
html += '<p><b>Confidence thresholds</b></p>'
|
|
624
|
-
|
|
630
|
+
|
|
625
631
|
for c in confidence_thresholds.keys():
|
|
626
632
|
html += '<p style="margin-left:15px;">{}: {}</p>'.format(c,confidence_thresholds[c])
|
|
627
|
-
|
|
633
|
+
|
|
628
634
|
html += '<h2>Confusion matrix</h2>\n'
|
|
629
|
-
|
|
635
|
+
|
|
630
636
|
html += '<p>...assuming a single category per image.</p>\n'
|
|
631
|
-
|
|
637
|
+
|
|
632
638
|
html += '<img src="{}"/>\n'.format(cm_figure_fn_relative)
|
|
633
|
-
|
|
639
|
+
|
|
634
640
|
html += '<h2>Confusion matrix (with links)</h2>\n'
|
|
635
|
-
|
|
641
|
+
|
|
636
642
|
html += '<p>...assuming a single category per image.</p>\n'
|
|
637
|
-
|
|
643
|
+
|
|
638
644
|
html += html_confusion_matrix
|
|
639
|
-
|
|
645
|
+
|
|
640
646
|
html += '<h2>Per-class statistics</h2>\n'
|
|
641
|
-
|
|
647
|
+
|
|
642
648
|
html += html_table
|
|
643
|
-
|
|
649
|
+
|
|
644
650
|
html += '</body>\n'
|
|
645
651
|
html += '<html>\n'
|
|
646
|
-
|
|
652
|
+
|
|
647
653
|
target_html_file = os.path.join(preview_folder,'index.html')
|
|
648
|
-
|
|
654
|
+
|
|
649
655
|
with open(target_html_file,'w') as f:
|
|
650
656
|
f.write(html)
|
|
651
|
-
|
|
652
|
-
|
|
657
|
+
|
|
658
|
+
|
|
653
659
|
##%% Prepare return data
|
|
654
|
-
|
|
660
|
+
|
|
655
661
|
confusion_matrix_info = {}
|
|
656
662
|
confusion_matrix_info['html_file'] = target_html_file
|
|
657
|
-
|
|
663
|
+
|
|
658
664
|
return confusion_matrix_info
|
|
659
665
|
|
|
660
666
|
# ...render_detection_confusion_matrix(...)
|