megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/efficientnet/model.py +8 -8
- megadetector/classification/efficientnet/utils.py +6 -5
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +26 -26
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -2
- megadetector/data_management/camtrap_dp_to_coco.py +79 -46
- megadetector/data_management/cct_json_utils.py +103 -103
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +210 -193
- megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
- megadetector/data_management/databases/integrity_check_json_db.py +228 -200
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +88 -39
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +133 -125
- megadetector/data_management/labelme_to_yolo.py +159 -73
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
- megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +73 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
- megadetector/data_management/mewc_to_md.py +344 -340
- megadetector/data_management/ocr_tools.py +262 -255
- megadetector/data_management/read_exif.py +249 -227
- megadetector/data_management/remap_coco_categories.py +90 -28
- megadetector/data_management/remove_exif.py +81 -21
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +588 -120
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +248 -122
- megadetector/data_management/yolo_to_coco.py +333 -191
- megadetector/detection/change_detection.py +832 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +358 -278
- megadetector/detection/run_detector.py +399 -186
- megadetector/detection/run_detector_batch.py +404 -377
- megadetector/detection/run_inference_with_yolov5_val.py +340 -327
- megadetector/detection/run_tiled_inference.py +257 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +332 -295
- megadetector/postprocessing/add_max_conf.py +19 -11
- megadetector/postprocessing/categorize_detections_by_size.py +45 -45
- megadetector/postprocessing/classification_postprocessing.py +468 -433
- megadetector/postprocessing/combine_batch_outputs.py +23 -23
- megadetector/postprocessing/compare_batch_results.py +590 -525
- megadetector/postprocessing/convert_output_format.py +106 -102
- megadetector/postprocessing/create_crop_folder.py +347 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +48 -27
- megadetector/postprocessing/md_to_coco.py +133 -102
- megadetector/postprocessing/md_to_labelme.py +107 -90
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +92 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -301
- megadetector/postprocessing/remap_detection_categories.py +91 -38
- megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +156 -74
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/ct_utils.py +1049 -211
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +632 -529
- megadetector/utils/path_utils.py +1520 -431
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/split_locations_into_train_val.py +62 -62
- megadetector/utils/string_utils.py +148 -27
- megadetector/utils/url_utils.py +489 -176
- megadetector/utils/wi_utils.py +2658 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +34 -30
- megadetector/visualization/render_images_with_thumbnails.py +39 -74
- megadetector/visualization/visualization_utils.py +487 -435
- megadetector/visualization/visualize_db.py +232 -198
- megadetector/visualization/visualize_detector_output.py +82 -76
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
- megadetector-10.0.0.dist-info/RECORD +139 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
- megadetector/api/batch_processing/api_core/server.py +0 -294
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
- megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
- megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
- megadetector/api/batch_processing/api_core/server_utils.py +0 -88
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector/utils/azure_utils.py +0 -178
- megadetector/utils/sas_blob_utils.py +0 -509
- megadetector-5.0.28.dist-info/RECORD +0 -209
- /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
|
@@ -36,12 +36,12 @@ def _image_to_output_file(im,preview_images_folder):
|
|
|
36
36
|
"""
|
|
37
37
|
Produces a clean filename from im (if [im] is a str) or im['file'] (if [im] is a dict).
|
|
38
38
|
"""
|
|
39
|
-
|
|
39
|
+
|
|
40
40
|
if isinstance(im,str):
|
|
41
41
|
filename_relative = im
|
|
42
42
|
else:
|
|
43
43
|
filename_relative = im['file']
|
|
44
|
-
|
|
44
|
+
|
|
45
45
|
fn_clean = flatten_path(filename_relative).replace(' ','_')
|
|
46
46
|
return os.path.join(preview_images_folder,fn_clean)
|
|
47
47
|
|
|
@@ -50,7 +50,7 @@ def _render_image(im,render_image_constants):
|
|
|
50
50
|
"""
|
|
51
51
|
Internal function for rendering a single image to the confusion matrix preview folder.
|
|
52
52
|
"""
|
|
53
|
-
|
|
53
|
+
|
|
54
54
|
filename_to_ground_truth_im = render_image_constants['filename_to_ground_truth_im']
|
|
55
55
|
image_folder = render_image_constants['image_folder']
|
|
56
56
|
preview_images_folder = render_image_constants['preview_images_folder']
|
|
@@ -58,18 +58,18 @@ def _render_image(im,render_image_constants):
|
|
|
58
58
|
results_category_id_to_name = render_image_constants['results_category_id_to_name']
|
|
59
59
|
rendering_confidence_thresholds = render_image_constants['rendering_confidence_thresholds']
|
|
60
60
|
target_image_size = render_image_constants['target_image_size']
|
|
61
|
-
|
|
61
|
+
|
|
62
62
|
assert im['file'] in filename_to_ground_truth_im
|
|
63
|
-
|
|
63
|
+
|
|
64
64
|
output_file = _image_to_output_file(im,preview_images_folder)
|
|
65
65
|
if os.path.isfile(output_file) and not force_render_images:
|
|
66
66
|
return output_file
|
|
67
|
-
|
|
67
|
+
|
|
68
68
|
input_file = os.path.join(image_folder,im['file'])
|
|
69
69
|
assert os.path.isfile(input_file)
|
|
70
|
-
|
|
70
|
+
|
|
71
71
|
detections_to_render = []
|
|
72
|
-
|
|
72
|
+
|
|
73
73
|
for det in im['detections']:
|
|
74
74
|
category_name = results_category_id_to_name[det['category']]
|
|
75
75
|
detection_threshold = rendering_confidence_thresholds['default']
|
|
@@ -77,11 +77,11 @@ def _render_image(im,render_image_constants):
|
|
|
77
77
|
detection_threshold = rendering_confidence_thresholds[category_name]
|
|
78
78
|
if det['conf'] > detection_threshold:
|
|
79
79
|
detections_to_render.append(det)
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
vis_utils.draw_bounding_boxes_on_file(input_file, output_file, detections_to_render,
|
|
82
82
|
detector_label_map=results_category_id_to_name,
|
|
83
83
|
label_font_size=20,target_size=target_image_size)
|
|
84
|
-
|
|
84
|
+
|
|
85
85
|
return output_file
|
|
86
86
|
|
|
87
87
|
|
|
@@ -91,7 +91,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
91
91
|
results_file,
|
|
92
92
|
image_folder,
|
|
93
93
|
preview_folder,
|
|
94
|
-
force_render_images=False,
|
|
94
|
+
force_render_images=False,
|
|
95
95
|
confidence_thresholds=None,
|
|
96
96
|
rendering_confidence_thresholds=None,
|
|
97
97
|
target_image_size=(1280,-1),
|
|
@@ -102,15 +102,15 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
102
102
|
model_file=None,
|
|
103
103
|
empty_category_name='empty',
|
|
104
104
|
html_image_list_options=None):
|
|
105
|
-
"""
|
|
105
|
+
"""
|
|
106
106
|
Given a CCT-formatted ground truth file and a MegaDetector-formatted results file,
|
|
107
|
-
render an HTML confusion matrix in [preview_folder. Typically used for multi-class detectors.
|
|
107
|
+
render an HTML confusion matrix in [preview_folder. Typically used for multi-class detectors.
|
|
108
108
|
Currently assumes a single class per image.
|
|
109
|
-
|
|
109
|
+
|
|
110
110
|
confidence_thresholds and rendering_confidence_thresholds are dictionaries mapping
|
|
111
111
|
class names to thresholds. "default" is a special token that will be used for all
|
|
112
112
|
classes not otherwise assigned thresholds.
|
|
113
|
-
|
|
113
|
+
|
|
114
114
|
Args:
|
|
115
115
|
ground_truth_file (str): the CCT-formatted .json file with ground truth information
|
|
116
116
|
results_file (str): the MegaDetector results .json file
|
|
@@ -118,29 +118,32 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
118
118
|
[results_file] should be relative to this folder.
|
|
119
119
|
preview_folder (str): the output folder, i.e. the folder in which we'll create our nifty
|
|
120
120
|
HTML stuff.
|
|
121
|
-
|
|
121
|
+
force_render_images (bool, optional): if False, skips images that already exist
|
|
122
122
|
confidence_thresholds (dict, optional): a dictionary mapping class names to thresholds;
|
|
123
123
|
all classes not explicitly named here will use the threshold for the "default" category.
|
|
124
|
-
|
|
124
|
+
rendering_confidence_thresholds (dict, optional): a dictionary mapping class names to thresholds;
|
|
125
125
|
all classes not explicitly named here will use the threshold for the "default" category.
|
|
126
|
-
target_image_size (tuple, optional): output image size, as a pair of ints (width,height). If one
|
|
126
|
+
target_image_size (tuple, optional): output image size, as a pair of ints (width,height). If one
|
|
127
127
|
value is -1 and the other is not, aspect ratio is preserved. If both are -1, the original image
|
|
128
128
|
sizes are preserved.
|
|
129
129
|
parallelize_rendering (bool, optional): enable (default) or disable parallelization when rendering
|
|
130
|
-
|
|
130
|
+
parallelize_rendering_n_cores (int, optional): number of threads or processes to use for rendering, only
|
|
131
131
|
used if parallelize_rendering is True
|
|
132
|
-
parallelize_rendering_with_threads: whether to use threads (True) or processes (False)
|
|
133
|
-
only used if parallelize_rendering is True
|
|
132
|
+
parallelize_rendering_with_threads (bool, optional): whether to use threads (True) or processes (False)
|
|
133
|
+
when rendering, only used if parallelize_rendering is True
|
|
134
134
|
job_name (str, optional): job name to include in big letters in the output file
|
|
135
|
-
model_file (str, optional) model filename to include in HTML output
|
|
135
|
+
model_file (str, optional): model filename to include in HTML output
|
|
136
136
|
empty_category_name (str, optional): special category name that we should treat as empty, typically
|
|
137
137
|
"empty"
|
|
138
|
-
html_image_list_options (dict, optional): options listed passed along to write_html_image_list;
|
|
139
|
-
see write_html_image_list for documentation.
|
|
138
|
+
html_image_list_options (dict, optional): options listed passed along to write_html_image_list;
|
|
139
|
+
see write_html_image_list for documentation.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
dict: confusion matrix information, containing at least the key "html_file"
|
|
140
143
|
"""
|
|
141
|
-
|
|
144
|
+
|
|
142
145
|
##%% Argument and path handling
|
|
143
|
-
|
|
146
|
+
|
|
144
147
|
preview_images_folder = os.path.join(preview_folder,'images')
|
|
145
148
|
os.makedirs(preview_images_folder,exist_ok=True)
|
|
146
149
|
|
|
@@ -148,75 +151,75 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
148
151
|
confidence_thresholds = {'default':0.5}
|
|
149
152
|
if rendering_confidence_thresholds is None:
|
|
150
153
|
rendering_confidence_thresholds = {'default':0.4}
|
|
151
|
-
|
|
152
154
|
|
|
153
|
-
|
|
154
|
-
|
|
155
|
+
|
|
156
|
+
##%% Load ground truth
|
|
157
|
+
|
|
155
158
|
with open(ground_truth_file,'r') as f:
|
|
156
159
|
ground_truth_data_cct = json.load(f)
|
|
157
|
-
|
|
160
|
+
|
|
158
161
|
filename_to_ground_truth_im = {}
|
|
159
162
|
for im in ground_truth_data_cct['images']:
|
|
160
163
|
assert im['file_name'] not in filename_to_ground_truth_im
|
|
161
164
|
filename_to_ground_truth_im[im['file_name']] = im
|
|
162
|
-
|
|
163
|
-
|
|
165
|
+
|
|
166
|
+
|
|
164
167
|
##%% Confirm that the ground truth images are present in the image folder
|
|
165
|
-
|
|
168
|
+
|
|
166
169
|
ground_truth_images = find_images(image_folder,return_relative_paths=True,recursive=True)
|
|
167
170
|
assert len(ground_truth_images) == len(ground_truth_data_cct['images'])
|
|
168
171
|
del ground_truth_images
|
|
169
|
-
|
|
170
|
-
|
|
172
|
+
|
|
173
|
+
|
|
171
174
|
##%% Map images to categories
|
|
172
|
-
|
|
175
|
+
|
|
173
176
|
# gt_image_id_to_image = {im['id']:im for im in ground_truth_data_cct['images']}
|
|
174
177
|
gt_image_id_to_annotations = defaultdict(list)
|
|
175
|
-
|
|
178
|
+
|
|
176
179
|
ground_truth_category_id_to_name = {}
|
|
177
180
|
for c in ground_truth_data_cct['categories']:
|
|
178
181
|
ground_truth_category_id_to_name[c['id']] = c['name']
|
|
179
|
-
|
|
182
|
+
|
|
180
183
|
# Add the empty category if necessary
|
|
181
184
|
if empty_category_name not in ground_truth_category_id_to_name.values():
|
|
182
185
|
empty_category_id = max(ground_truth_category_id_to_name.keys())+1
|
|
183
186
|
ground_truth_category_id_to_name[empty_category_id] = empty_category_name
|
|
184
|
-
|
|
187
|
+
|
|
185
188
|
ground_truth_category_names = sorted(list(ground_truth_category_id_to_name.values()))
|
|
186
|
-
|
|
189
|
+
|
|
187
190
|
for ann in ground_truth_data_cct['annotations']:
|
|
188
191
|
gt_image_id_to_annotations[ann['image_id']].append(ann)
|
|
189
|
-
|
|
192
|
+
|
|
190
193
|
gt_filename_to_category_names = defaultdict(set)
|
|
191
|
-
|
|
194
|
+
|
|
192
195
|
for im in ground_truth_data_cct['images']:
|
|
193
196
|
annotations_this_image = gt_image_id_to_annotations[im['id']]
|
|
194
197
|
for ann in annotations_this_image:
|
|
195
198
|
category_name = ground_truth_category_id_to_name[ann['category_id']]
|
|
196
199
|
gt_filename_to_category_names[im['file_name']].add(category_name)
|
|
197
|
-
|
|
200
|
+
|
|
198
201
|
for filename in gt_filename_to_category_names:
|
|
199
|
-
|
|
202
|
+
|
|
200
203
|
category_names_this_file = gt_filename_to_category_names[filename]
|
|
201
|
-
|
|
204
|
+
|
|
202
205
|
# The empty category should be exclusive
|
|
203
206
|
if empty_category_name in category_names_this_file:
|
|
204
207
|
assert len(category_names_this_file) == 1, \
|
|
205
208
|
'Empty category assigned along with another category for {}'.format(filename)
|
|
206
209
|
assert len(category_names_this_file) > 0, \
|
|
207
210
|
'No ground truth category assigned to {}'.format(filename)
|
|
208
|
-
|
|
209
|
-
|
|
211
|
+
|
|
212
|
+
|
|
210
213
|
##%% Load results
|
|
211
|
-
|
|
214
|
+
|
|
212
215
|
with open(results_file,'r') as f:
|
|
213
216
|
md_formatted_results = json.load(f)
|
|
214
|
-
|
|
217
|
+
|
|
215
218
|
results_category_id_to_name = md_formatted_results['detection_categories']
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
##%% Render images with detections
|
|
219
|
-
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
##%% Render images with detections
|
|
222
|
+
|
|
220
223
|
render_image_constants = {}
|
|
221
224
|
render_image_constants['filename_to_ground_truth_im'] = filename_to_ground_truth_im
|
|
222
225
|
render_image_constants['image_folder'] = image_folder
|
|
@@ -224,46 +227,52 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
224
227
|
render_image_constants['force_render_images'] = force_render_images
|
|
225
228
|
render_image_constants['results_category_id_to_name'] = results_category_id_to_name
|
|
226
229
|
render_image_constants['rendering_confidence_thresholds'] = rendering_confidence_thresholds
|
|
227
|
-
render_image_constants['target_image_size'] = target_image_size
|
|
228
|
-
|
|
230
|
+
render_image_constants['target_image_size'] = target_image_size
|
|
231
|
+
|
|
229
232
|
if parallelize_rendering:
|
|
230
233
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
pool = ThreadPool(parallelize_rendering_n_cores)
|
|
239
|
-
worker_string = 'threads'
|
|
234
|
+
pool = None
|
|
235
|
+
try:
|
|
236
|
+
if parallelize_rendering_n_cores is None:
|
|
237
|
+
if parallelize_rendering_with_threads:
|
|
238
|
+
pool = ThreadPool()
|
|
239
|
+
else:
|
|
240
|
+
pool = Pool()
|
|
240
241
|
else:
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
242
|
+
if parallelize_rendering_with_threads:
|
|
243
|
+
pool = ThreadPool(parallelize_rendering_n_cores)
|
|
244
|
+
worker_string = 'threads'
|
|
245
|
+
else:
|
|
246
|
+
pool = Pool(parallelize_rendering_n_cores)
|
|
247
|
+
worker_string = 'processes'
|
|
248
|
+
print('Rendering images with {} {}'.format(parallelize_rendering_n_cores,
|
|
249
|
+
worker_string))
|
|
250
|
+
|
|
251
|
+
_ = list(tqdm(pool.imap(partial(_render_image,render_image_constants=render_image_constants),
|
|
252
|
+
md_formatted_results['images']),
|
|
253
|
+
total=len(md_formatted_results['images'])))
|
|
254
|
+
finally:
|
|
255
|
+
pool.close()
|
|
256
|
+
pool.join()
|
|
257
|
+
print("Pool closed and joined for confusion matrix rendering")
|
|
258
|
+
|
|
250
259
|
else:
|
|
251
|
-
|
|
260
|
+
|
|
252
261
|
# im = md_formatted_results['images'][0]
|
|
253
|
-
for im in tqdm(md_formatted_results['images']):
|
|
262
|
+
for im in tqdm(md_formatted_results['images']):
|
|
254
263
|
_render_image(im,render_image_constants)
|
|
255
|
-
|
|
256
|
-
|
|
264
|
+
|
|
265
|
+
|
|
257
266
|
##%% Map images to predicted categories, and vice-versa
|
|
258
|
-
|
|
267
|
+
|
|
259
268
|
filename_to_predicted_categories = defaultdict(set)
|
|
260
269
|
predicted_category_name_to_filenames = defaultdict(set)
|
|
261
|
-
|
|
270
|
+
|
|
262
271
|
# im = md_formatted_results['images'][0]
|
|
263
272
|
for im in tqdm(md_formatted_results['images']):
|
|
264
|
-
|
|
273
|
+
|
|
265
274
|
assert im['file'] in filename_to_ground_truth_im
|
|
266
|
-
|
|
275
|
+
|
|
267
276
|
# det = im['detections'][0]
|
|
268
277
|
for det in im['detections']:
|
|
269
278
|
category_name = results_category_id_to_name[det['category']]
|
|
@@ -273,34 +282,34 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
273
282
|
if det['conf'] > detection_threshold:
|
|
274
283
|
filename_to_predicted_categories[im['file']].add(category_name)
|
|
275
284
|
predicted_category_name_to_filenames[category_name].add(im['file'])
|
|
276
|
-
|
|
285
|
+
|
|
277
286
|
# ...for each detection
|
|
278
|
-
|
|
287
|
+
|
|
279
288
|
# ...for each image
|
|
280
|
-
|
|
281
|
-
|
|
289
|
+
|
|
290
|
+
|
|
282
291
|
##%% Create TP/TN/FP/FN lists
|
|
283
|
-
|
|
292
|
+
|
|
284
293
|
category_name_to_image_lists = {}
|
|
285
|
-
|
|
294
|
+
|
|
286
295
|
sub_page_tokens = ['fn','tn','fp','tp']
|
|
287
|
-
|
|
296
|
+
|
|
288
297
|
for category_name in ground_truth_category_names:
|
|
289
|
-
|
|
298
|
+
|
|
290
299
|
category_name_to_image_lists[category_name] = {}
|
|
291
300
|
for sub_page_token in sub_page_tokens:
|
|
292
301
|
category_name_to_image_lists[category_name][sub_page_token] = []
|
|
293
|
-
|
|
302
|
+
|
|
294
303
|
# filename = next(iter(gt_filename_to_category_names))
|
|
295
304
|
for filename in gt_filename_to_category_names.keys():
|
|
296
|
-
|
|
305
|
+
|
|
297
306
|
ground_truth_categories_this_image = gt_filename_to_category_names[filename]
|
|
298
307
|
predicted_categories_this_image = filename_to_predicted_categories[filename]
|
|
299
|
-
|
|
308
|
+
|
|
300
309
|
for category_name in ground_truth_category_names:
|
|
301
|
-
|
|
310
|
+
|
|
302
311
|
assignment = None
|
|
303
|
-
|
|
312
|
+
|
|
304
313
|
if category_name == empty_category_name:
|
|
305
314
|
# If this is an empty image
|
|
306
315
|
if category_name in ground_truth_categories_this_image:
|
|
@@ -315,7 +324,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
315
324
|
assignment = 'fp'
|
|
316
325
|
else:
|
|
317
326
|
assignment = 'tn'
|
|
318
|
-
|
|
327
|
+
|
|
319
328
|
else:
|
|
320
329
|
if category_name in ground_truth_categories_this_image:
|
|
321
330
|
if category_name in predicted_categories_this_image:
|
|
@@ -326,43 +335,43 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
326
335
|
if category_name in predicted_categories_this_image:
|
|
327
336
|
assignment = 'fp'
|
|
328
337
|
else:
|
|
329
|
-
assignment = 'tn'
|
|
330
|
-
|
|
338
|
+
assignment = 'tn'
|
|
339
|
+
|
|
331
340
|
category_name_to_image_lists[category_name][assignment].append(filename)
|
|
332
|
-
|
|
341
|
+
|
|
333
342
|
# ...for each filename
|
|
334
|
-
|
|
335
|
-
|
|
343
|
+
|
|
344
|
+
|
|
336
345
|
##%% Create confusion matrix
|
|
337
|
-
|
|
346
|
+
|
|
338
347
|
gt_category_name_to_category_index = {}
|
|
339
|
-
|
|
348
|
+
|
|
340
349
|
for i_category,category_name in enumerate(ground_truth_category_names):
|
|
341
350
|
gt_category_name_to_category_index[category_name] = i_category
|
|
342
|
-
|
|
343
|
-
n_categories = len(gt_category_name_to_category_index)
|
|
344
|
-
|
|
351
|
+
|
|
352
|
+
n_categories = len(gt_category_name_to_category_index)
|
|
353
|
+
|
|
345
354
|
# indexed as [true,predicted]
|
|
346
355
|
confusion_matrix = np.zeros(shape=(n_categories,n_categories),dtype=int)
|
|
347
|
-
|
|
356
|
+
|
|
348
357
|
filename_to_results_im = {im['file']:im for im in md_formatted_results['images']}
|
|
349
|
-
|
|
358
|
+
|
|
350
359
|
true_predicted_to_file_list = defaultdict(list)
|
|
351
|
-
|
|
360
|
+
|
|
352
361
|
# filename = next(iter(gt_filename_to_category_names.keys()))
|
|
353
362
|
for filename in gt_filename_to_category_names.keys():
|
|
354
|
-
|
|
363
|
+
|
|
355
364
|
ground_truth_categories_this_image = gt_filename_to_category_names[filename]
|
|
356
365
|
assert len(ground_truth_categories_this_image) == 1
|
|
357
366
|
ground_truth_category_name = next(iter(ground_truth_categories_this_image))
|
|
358
|
-
|
|
367
|
+
|
|
359
368
|
results_im = filename_to_results_im[filename]
|
|
360
|
-
|
|
369
|
+
|
|
361
370
|
# If there were no detections at all, call this image empty
|
|
362
371
|
if len(results_im['detections']) == 0:
|
|
363
372
|
predicted_category_name = empty_category_name
|
|
364
373
|
# Otherwise look for above-threshold detections
|
|
365
|
-
else:
|
|
374
|
+
else:
|
|
366
375
|
results_category_name_to_confidence = defaultdict(int)
|
|
367
376
|
for det in results_im['detections']:
|
|
368
377
|
category_name = results_category_id_to_name[det['category']]
|
|
@@ -378,23 +387,23 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
378
387
|
else:
|
|
379
388
|
predicted_category_name = max(results_category_name_to_confidence,
|
|
380
389
|
key=results_category_name_to_confidence.get)
|
|
381
|
-
|
|
390
|
+
|
|
382
391
|
ground_truth_category_index = gt_category_name_to_category_index[ground_truth_category_name]
|
|
383
392
|
predicted_category_index = gt_category_name_to_category_index[predicted_category_name]
|
|
384
|
-
|
|
393
|
+
|
|
385
394
|
true_predicted_token = ground_truth_category_name + '_' + predicted_category_name
|
|
386
395
|
true_predicted_to_file_list[true_predicted_token].append(filename)
|
|
387
|
-
|
|
396
|
+
|
|
388
397
|
confusion_matrix[ground_truth_category_index,predicted_category_index] += 1
|
|
389
|
-
|
|
398
|
+
|
|
390
399
|
# ...for each file
|
|
391
|
-
|
|
392
|
-
plt.ioff()
|
|
393
|
-
|
|
400
|
+
|
|
401
|
+
plt.ioff()
|
|
402
|
+
|
|
394
403
|
fig_h = 3 + 0.3 * n_categories
|
|
395
404
|
fig_w = fig_h
|
|
396
405
|
fig = plt.figure(figsize=(fig_w, fig_h),tight_layout=True)
|
|
397
|
-
|
|
406
|
+
|
|
398
407
|
plot_utils.plot_confusion_matrix(
|
|
399
408
|
matrix=confusion_matrix,
|
|
400
409
|
classes=ground_truth_category_names,
|
|
@@ -405,32 +414,32 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
405
414
|
use_colorbar=False,
|
|
406
415
|
y_label=True,
|
|
407
416
|
fig=fig)
|
|
408
|
-
|
|
417
|
+
|
|
409
418
|
cm_figure_fn_relative = 'confusion_matrix.png'
|
|
410
419
|
cm_figure_fn_abs = os.path.join(preview_folder, cm_figure_fn_relative)
|
|
411
420
|
# fig.show()
|
|
412
421
|
fig.savefig(cm_figure_fn_abs,dpi=100)
|
|
413
422
|
plt.close(fig)
|
|
414
|
-
|
|
423
|
+
|
|
415
424
|
# open_file(cm_figure_fn_abs)
|
|
416
|
-
|
|
417
|
-
|
|
425
|
+
|
|
426
|
+
|
|
418
427
|
##%% Create HTML confusion matrix
|
|
419
|
-
|
|
428
|
+
|
|
420
429
|
html_confusion_matrix = '<table class="result-table">\n'
|
|
421
430
|
html_confusion_matrix += '<tr>\n'
|
|
422
431
|
html_confusion_matrix += '<td>{}</td>\n'.format('True category')
|
|
423
432
|
for category_name in ground_truth_category_names:
|
|
424
433
|
html_confusion_matrix += '<td>{}</td>\n'.format(' ')
|
|
425
434
|
html_confusion_matrix += '</tr>\n'
|
|
426
|
-
|
|
435
|
+
|
|
427
436
|
for true_category in ground_truth_category_names:
|
|
428
|
-
|
|
437
|
+
|
|
429
438
|
html_confusion_matrix += '<tr>\n'
|
|
430
439
|
html_confusion_matrix += '<td>{}</td>\n'.format(true_category)
|
|
431
|
-
|
|
440
|
+
|
|
432
441
|
for predicted_category in ground_truth_category_names:
|
|
433
|
-
|
|
442
|
+
|
|
434
443
|
true_predicted_token = true_category + '_' + predicted_category
|
|
435
444
|
image_list = true_predicted_to_file_list[true_predicted_token]
|
|
436
445
|
if len(image_list) == 0:
|
|
@@ -441,9 +450,9 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
441
450
|
title_string = 'true: {}, predicted {}'.format(
|
|
442
451
|
true_category,predicted_category)
|
|
443
452
|
html_image_list_options['headerHtml'] = '<h1>{}</h1>'.format(title_string)
|
|
444
|
-
|
|
453
|
+
|
|
445
454
|
html_image_info_list = []
|
|
446
|
-
|
|
455
|
+
|
|
447
456
|
for image_filename_relative in image_list:
|
|
448
457
|
html_image_info = {}
|
|
449
458
|
detections = filename_to_results_im[image_filename_relative]['detections']
|
|
@@ -451,7 +460,7 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
451
460
|
max_conf = 0
|
|
452
461
|
else:
|
|
453
462
|
max_conf = max([d['conf'] for d in detections])
|
|
454
|
-
|
|
463
|
+
|
|
455
464
|
title = '<b>Image</b>: {}, <b>Max conf</b>: {:0.3f}'.format(
|
|
456
465
|
image_filename_relative, max_conf)
|
|
457
466
|
image_link = 'images/' + os.path.basename(
|
|
@@ -462,100 +471,100 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
462
471
|
'textStyle':\
|
|
463
472
|
'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
464
473
|
'text-align:left;margin-top:20;margin-bottom:5'
|
|
465
|
-
}
|
|
466
|
-
|
|
474
|
+
}
|
|
475
|
+
|
|
467
476
|
html_image_info_list.append(html_image_info)
|
|
468
|
-
|
|
477
|
+
|
|
469
478
|
target_html_file_relative = true_predicted_token + '.html'
|
|
470
479
|
target_html_file_abs = os.path.join(preview_folder,target_html_file_relative)
|
|
471
480
|
write_html_image_list(
|
|
472
481
|
filename=target_html_file_abs,
|
|
473
482
|
images=html_image_info_list,
|
|
474
483
|
options=html_image_list_options)
|
|
475
|
-
|
|
484
|
+
|
|
476
485
|
td_content = '<a href="{}">{}</a>'.format(target_html_file_relative,
|
|
477
486
|
len(image_list))
|
|
478
|
-
|
|
487
|
+
|
|
479
488
|
html_confusion_matrix += '<td>{}</td>\n'.format(td_content)
|
|
480
|
-
|
|
489
|
+
|
|
481
490
|
# ...for each predicted category
|
|
482
|
-
|
|
491
|
+
|
|
483
492
|
html_confusion_matrix += '</tr>\n'
|
|
484
|
-
|
|
485
|
-
# ...for each true category
|
|
486
|
-
|
|
493
|
+
|
|
494
|
+
# ...for each true category
|
|
495
|
+
|
|
487
496
|
html_confusion_matrix += '<tr>\n'
|
|
488
497
|
html_confusion_matrix += '<td> </td>\n'
|
|
489
|
-
|
|
498
|
+
|
|
490
499
|
for category_name in ground_truth_category_names:
|
|
491
500
|
html_confusion_matrix += '<td class="rotate"><p style="margin-left:20px;">{}</p></td>\n'.format(
|
|
492
501
|
category_name)
|
|
493
502
|
html_confusion_matrix += '</tr>\n'
|
|
494
|
-
|
|
503
|
+
|
|
495
504
|
html_confusion_matrix += '</table>'
|
|
496
|
-
|
|
497
|
-
|
|
505
|
+
|
|
506
|
+
|
|
498
507
|
##%% Create HTML sub-pages and HTML table
|
|
499
|
-
|
|
508
|
+
|
|
500
509
|
html_table = '<table class="result-table">\n'
|
|
501
|
-
|
|
510
|
+
|
|
502
511
|
html_table += '<tr>\n'
|
|
503
512
|
html_table += '<td>{}</td>\n'.format('True category')
|
|
504
513
|
for sub_page_token in sub_page_tokens:
|
|
505
514
|
html_table += '<td>{}</td>'.format(sub_page_token)
|
|
506
515
|
html_table += '</tr>\n'
|
|
507
|
-
|
|
516
|
+
|
|
508
517
|
filename_to_results_im = {im['file']:im for im in md_formatted_results['images']}
|
|
509
|
-
|
|
518
|
+
|
|
510
519
|
sub_page_token_to_page_name = {
|
|
511
520
|
'fp':'false positives',
|
|
512
521
|
'tp':'true positives',
|
|
513
522
|
'fn':'false negatives',
|
|
514
523
|
'tn':'true negatives'
|
|
515
524
|
}
|
|
516
|
-
|
|
525
|
+
|
|
517
526
|
# category_name = ground_truth_category_names[0]
|
|
518
527
|
for category_name in ground_truth_category_names:
|
|
519
|
-
|
|
528
|
+
|
|
520
529
|
html_table += '<tr>\n'
|
|
521
|
-
|
|
530
|
+
|
|
522
531
|
html_table += '<td>{}</td>\n'.format(category_name)
|
|
523
|
-
|
|
532
|
+
|
|
524
533
|
# sub_page_token = sub_page_tokens[0]
|
|
525
534
|
for sub_page_token in sub_page_tokens:
|
|
526
|
-
|
|
535
|
+
|
|
527
536
|
html_table += '<td>\n'
|
|
528
|
-
|
|
537
|
+
|
|
529
538
|
image_list = category_name_to_image_lists[category_name][sub_page_token]
|
|
530
|
-
|
|
539
|
+
|
|
531
540
|
if len(image_list) == 0:
|
|
532
|
-
|
|
541
|
+
|
|
533
542
|
html_table += '0\n'
|
|
534
|
-
|
|
543
|
+
|
|
535
544
|
else:
|
|
536
|
-
|
|
545
|
+
|
|
537
546
|
html_image_list_options = {}
|
|
538
547
|
title_string = '{}: {}'.format(category_name,sub_page_token_to_page_name[sub_page_token])
|
|
539
548
|
html_image_list_options['headerHtml'] = '<h1>{}</h1>'.format(title_string)
|
|
540
|
-
|
|
549
|
+
|
|
541
550
|
target_html_file_relative = '{}_{}.html'.format(category_name,sub_page_token)
|
|
542
551
|
target_html_file_abs = os.path.join(preview_folder,target_html_file_relative)
|
|
543
|
-
|
|
552
|
+
|
|
544
553
|
html_image_info_list = []
|
|
545
|
-
|
|
554
|
+
|
|
546
555
|
# image_filename_relative = image_list[0]
|
|
547
556
|
for image_filename_relative in image_list:
|
|
548
|
-
|
|
557
|
+
|
|
549
558
|
source_file = os.path.join(image_folder,image_filename_relative)
|
|
550
559
|
assert os.path.isfile(source_file)
|
|
551
|
-
|
|
560
|
+
|
|
552
561
|
html_image_info = {}
|
|
553
562
|
detections = filename_to_results_im[image_filename_relative]['detections']
|
|
554
563
|
if len(detections) == 0:
|
|
555
564
|
max_conf = 0
|
|
556
565
|
else:
|
|
557
566
|
max_conf = max([d['conf'] for d in detections])
|
|
558
|
-
|
|
567
|
+
|
|
559
568
|
title = '<b>Image</b>: {}, <b>Max conf</b>: {:0.3f}'.format(
|
|
560
569
|
image_filename_relative, max_conf)
|
|
561
570
|
image_link = 'images/' + os.path.basename(
|
|
@@ -567,31 +576,31 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
567
576
|
'textStyle':\
|
|
568
577
|
'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
569
578
|
'text-align:left;margin-top:20;margin-bottom:5'
|
|
570
|
-
}
|
|
571
|
-
|
|
579
|
+
}
|
|
580
|
+
|
|
572
581
|
html_image_info_list.append(html_image_info)
|
|
573
|
-
|
|
582
|
+
|
|
574
583
|
# ...for each image
|
|
575
|
-
|
|
584
|
+
|
|
576
585
|
write_html_image_list(
|
|
577
586
|
filename=target_html_file_abs,
|
|
578
587
|
images=html_image_info_list,
|
|
579
588
|
options=html_image_list_options)
|
|
580
|
-
|
|
589
|
+
|
|
581
590
|
html_table += '<a href="{}">{}</a>\n'.format(target_html_file_relative,len(image_list))
|
|
582
|
-
|
|
591
|
+
|
|
583
592
|
html_table += '</td>\n'
|
|
584
|
-
|
|
593
|
+
|
|
585
594
|
# ...for each sub-page
|
|
586
|
-
|
|
595
|
+
|
|
587
596
|
html_table += '</tr>\n'
|
|
588
|
-
|
|
597
|
+
|
|
589
598
|
# ...for each category
|
|
590
|
-
|
|
591
|
-
html_table += '</table>'
|
|
592
|
-
|
|
599
|
+
|
|
600
|
+
html_table += '</table>'
|
|
601
|
+
|
|
593
602
|
html = '<html>\n'
|
|
594
|
-
|
|
603
|
+
|
|
595
604
|
style_header = """<head>
|
|
596
605
|
<style type="text/css">
|
|
597
606
|
a { text-decoration: none; }
|
|
@@ -599,62 +608,62 @@ def render_detection_confusion_matrix(ground_truth_file,
|
|
|
599
608
|
div.contentdiv { margin-left: 20px; }
|
|
600
609
|
table.result-table { border:1px solid black; border-collapse: collapse; margin-left:50px;}
|
|
601
610
|
td,th { padding:10px; }
|
|
602
|
-
.rotate {
|
|
611
|
+
.rotate {
|
|
603
612
|
padding:0px;
|
|
604
613
|
writing-mode:vertical-lr;
|
|
605
|
-
-webkit-transform: rotate(-180deg);
|
|
606
|
-
-moz-transform: rotate(-180deg);
|
|
607
|
-
-ms-transform: rotate(-180deg);
|
|
608
|
-
-o-transform: rotate(-180deg);
|
|
614
|
+
-webkit-transform: rotate(-180deg);
|
|
615
|
+
-moz-transform: rotate(-180deg);
|
|
616
|
+
-ms-transform: rotate(-180deg);
|
|
617
|
+
-o-transform: rotate(-180deg);
|
|
609
618
|
transform: rotate(-180deg);
|
|
610
619
|
}
|
|
611
620
|
</style>
|
|
612
621
|
</head>"""
|
|
613
|
-
|
|
622
|
+
|
|
614
623
|
html += style_header + '\n'
|
|
615
|
-
|
|
624
|
+
|
|
616
625
|
html += '<body>\n'
|
|
617
|
-
|
|
626
|
+
|
|
618
627
|
html += '<h1>Results summary for {}</h1>\n'.format(job_name)
|
|
619
|
-
|
|
628
|
+
|
|
620
629
|
if model_file is not None and len(model_file) > 0:
|
|
621
630
|
html += '<p><b>Model file</b>: {}</p>'.format(os.path.basename(model_file))
|
|
622
|
-
|
|
631
|
+
|
|
623
632
|
html += '<p><b>Confidence thresholds</b></p>'
|
|
624
|
-
|
|
633
|
+
|
|
625
634
|
for c in confidence_thresholds.keys():
|
|
626
635
|
html += '<p style="margin-left:15px;">{}: {}</p>'.format(c,confidence_thresholds[c])
|
|
627
|
-
|
|
636
|
+
|
|
628
637
|
html += '<h2>Confusion matrix</h2>\n'
|
|
629
|
-
|
|
638
|
+
|
|
630
639
|
html += '<p>...assuming a single category per image.</p>\n'
|
|
631
|
-
|
|
640
|
+
|
|
632
641
|
html += '<img src="{}"/>\n'.format(cm_figure_fn_relative)
|
|
633
|
-
|
|
642
|
+
|
|
634
643
|
html += '<h2>Confusion matrix (with links)</h2>\n'
|
|
635
|
-
|
|
644
|
+
|
|
636
645
|
html += '<p>...assuming a single category per image.</p>\n'
|
|
637
|
-
|
|
646
|
+
|
|
638
647
|
html += html_confusion_matrix
|
|
639
|
-
|
|
648
|
+
|
|
640
649
|
html += '<h2>Per-class statistics</h2>\n'
|
|
641
|
-
|
|
650
|
+
|
|
642
651
|
html += html_table
|
|
643
|
-
|
|
652
|
+
|
|
644
653
|
html += '</body>\n'
|
|
645
654
|
html += '<html>\n'
|
|
646
|
-
|
|
655
|
+
|
|
647
656
|
target_html_file = os.path.join(preview_folder,'index.html')
|
|
648
|
-
|
|
657
|
+
|
|
649
658
|
with open(target_html_file,'w') as f:
|
|
650
659
|
f.write(html)
|
|
651
|
-
|
|
652
|
-
|
|
660
|
+
|
|
661
|
+
|
|
653
662
|
##%% Prepare return data
|
|
654
|
-
|
|
663
|
+
|
|
655
664
|
confusion_matrix_info = {}
|
|
656
665
|
confusion_matrix_info['html_file'] = target_html_file
|
|
657
|
-
|
|
666
|
+
|
|
658
667
|
return confusion_matrix_info
|
|
659
668
|
|
|
660
669
|
# ...render_detection_confusion_matrix(...)
|