megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/efficientnet/model.py +8 -8
- megadetector/classification/efficientnet/utils.py +6 -5
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +26 -26
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -2
- megadetector/data_management/camtrap_dp_to_coco.py +79 -46
- megadetector/data_management/cct_json_utils.py +103 -103
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +210 -193
- megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
- megadetector/data_management/databases/integrity_check_json_db.py +228 -200
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +88 -39
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +133 -125
- megadetector/data_management/labelme_to_yolo.py +159 -73
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
- megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +73 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
- megadetector/data_management/mewc_to_md.py +344 -340
- megadetector/data_management/ocr_tools.py +262 -255
- megadetector/data_management/read_exif.py +249 -227
- megadetector/data_management/remap_coco_categories.py +90 -28
- megadetector/data_management/remove_exif.py +81 -21
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +588 -120
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +248 -122
- megadetector/data_management/yolo_to_coco.py +333 -191
- megadetector/detection/change_detection.py +832 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +358 -278
- megadetector/detection/run_detector.py +399 -186
- megadetector/detection/run_detector_batch.py +404 -377
- megadetector/detection/run_inference_with_yolov5_val.py +340 -327
- megadetector/detection/run_tiled_inference.py +257 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +332 -295
- megadetector/postprocessing/add_max_conf.py +19 -11
- megadetector/postprocessing/categorize_detections_by_size.py +45 -45
- megadetector/postprocessing/classification_postprocessing.py +468 -433
- megadetector/postprocessing/combine_batch_outputs.py +23 -23
- megadetector/postprocessing/compare_batch_results.py +590 -525
- megadetector/postprocessing/convert_output_format.py +106 -102
- megadetector/postprocessing/create_crop_folder.py +347 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +48 -27
- megadetector/postprocessing/md_to_coco.py +133 -102
- megadetector/postprocessing/md_to_labelme.py +107 -90
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +92 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -301
- megadetector/postprocessing/remap_detection_categories.py +91 -38
- megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +156 -74
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/ct_utils.py +1049 -211
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +632 -529
- megadetector/utils/path_utils.py +1520 -431
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/split_locations_into_train_val.py +62 -62
- megadetector/utils/string_utils.py +148 -27
- megadetector/utils/url_utils.py +489 -176
- megadetector/utils/wi_utils.py +2658 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +34 -30
- megadetector/visualization/render_images_with_thumbnails.py +39 -74
- megadetector/visualization/visualization_utils.py +487 -435
- megadetector/visualization/visualize_db.py +232 -198
- megadetector/visualization/visualize_detector_output.py +82 -76
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
- megadetector-10.0.0.dist-info/RECORD +139 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
- megadetector/api/batch_processing/api_core/server.py +0 -294
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
- megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
- megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
- megadetector/api/batch_processing/api_core/server_utils.py +0 -88
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector/utils/azure_utils.py +0 -178
- megadetector/utils/sas_blob_utils.py +0 -509
- megadetector-5.0.28.dist-info/RECORD +0 -209
- /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
|
@@ -32,7 +32,7 @@ from megadetector.utils.write_html_image_list import write_html_image_list
|
|
|
32
32
|
from megadetector.data_management.cct_json_utils import IndexedJsonDb
|
|
33
33
|
from megadetector.visualization import visualization_utils as vis_utils
|
|
34
34
|
|
|
35
|
-
def
|
|
35
|
+
def _isnan(x):
|
|
36
36
|
return (isinstance(x,float) and np.isnan(x))
|
|
37
37
|
|
|
38
38
|
|
|
@@ -42,104 +42,104 @@ class DbVizOptions:
|
|
|
42
42
|
"""
|
|
43
43
|
Parameters controlling the behavior of visualize_db().
|
|
44
44
|
"""
|
|
45
|
-
|
|
45
|
+
|
|
46
46
|
def __init__(self):
|
|
47
|
-
|
|
47
|
+
|
|
48
48
|
#: Number of images to sample from the database, or None to visualize all images
|
|
49
49
|
self.num_to_visualize = None
|
|
50
|
-
|
|
50
|
+
|
|
51
51
|
#: Target size for rendering; set either dimension to -1 to preserve aspect ratio.
|
|
52
52
|
#:
|
|
53
53
|
#: If viz_size is None or (-1,-1), the original image size is used.
|
|
54
54
|
self.viz_size = (1000, -1)
|
|
55
|
-
|
|
55
|
+
|
|
56
56
|
#: HTML rendering options; see write_html_image_list for details
|
|
57
57
|
#:
|
|
58
58
|
#:The most relevant option one might want to set here is:
|
|
59
59
|
#:
|
|
60
|
-
#:
|
|
60
|
+
#: html_options['maxFiguresPerHtmlFile']
|
|
61
61
|
#:
|
|
62
62
|
#: ...which can be used to paginate previews to a number of images that will load well
|
|
63
63
|
#: in a browser (5000 is a reasonable limit).
|
|
64
|
-
self.
|
|
65
|
-
|
|
64
|
+
self.html_options = write_html_image_list()
|
|
65
|
+
|
|
66
66
|
#: Whether to sort images by filename (True) or randomly (False)
|
|
67
67
|
self.sort_by_filename = True
|
|
68
|
-
|
|
68
|
+
|
|
69
69
|
#: Only show images that contain bounding boxes
|
|
70
70
|
self.trim_to_images_with_bboxes = False
|
|
71
|
-
|
|
71
|
+
|
|
72
72
|
#: Random seed to use for sampling images
|
|
73
73
|
self.random_seed = 0
|
|
74
|
-
|
|
75
|
-
#: Should we include Web search links for each category name?
|
|
74
|
+
|
|
75
|
+
#: Should we include Web search links for each category name?
|
|
76
76
|
self.add_search_links = False
|
|
77
|
-
|
|
77
|
+
|
|
78
78
|
#: Should each thumbnail image link back to the original image?
|
|
79
79
|
self.include_image_links = False
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
#: Should there be a text link back to each original image?
|
|
82
82
|
self.include_filename_links = False
|
|
83
|
-
|
|
83
|
+
|
|
84
84
|
#: Line width in pixels
|
|
85
85
|
self.box_thickness = 4
|
|
86
|
-
|
|
86
|
+
|
|
87
87
|
#: Number of pixels to expand each bounding box
|
|
88
88
|
self.box_expansion = 0
|
|
89
|
-
|
|
89
|
+
|
|
90
90
|
#: Only include images that contain annotations with these class names (not IDs) (list)
|
|
91
91
|
#:
|
|
92
92
|
#: Mutually exclusive with classes_to_exclude
|
|
93
93
|
self.classes_to_include = None
|
|
94
|
-
|
|
94
|
+
|
|
95
95
|
#: Exclude images that contain annotations with these class names (not IDs) (list)
|
|
96
96
|
#:
|
|
97
97
|
#: Mutually exclusive with classes_to_include
|
|
98
98
|
self.classes_to_exclude = None
|
|
99
|
-
|
|
99
|
+
|
|
100
100
|
#: Special tag used to say "show me all images with multiple categories"
|
|
101
101
|
#:
|
|
102
102
|
#: :meta private:
|
|
103
103
|
self.multiple_categories_tag = '*multiple*'
|
|
104
|
-
|
|
105
|
-
#: We sometimes flatten image directories by replacing a path separator with
|
|
104
|
+
|
|
105
|
+
#: We sometimes flatten image directories by replacing a path separator with
|
|
106
106
|
#: another character. Leave blank for the typical case where this isn't necessary.
|
|
107
107
|
self.pathsep_replacement = '' # '~'
|
|
108
|
-
|
|
108
|
+
|
|
109
109
|
#: Parallelize rendering across multiple workers
|
|
110
110
|
self.parallelize_rendering = False
|
|
111
|
-
|
|
111
|
+
|
|
112
112
|
#: In theory, whether to parallelize with threads (True) or processes (False), but
|
|
113
113
|
#: process-based parallelization in this function is currently unsupported
|
|
114
114
|
self.parallelize_rendering_with_threads = True
|
|
115
|
-
|
|
115
|
+
|
|
116
116
|
#: Number of workers to use for parallelization; ignored if parallelize_rendering
|
|
117
117
|
#: is False
|
|
118
118
|
self.parallelize_rendering_n_cores = 25
|
|
119
|
-
|
|
119
|
+
|
|
120
120
|
#: Should we show absolute (True) or relative (False) paths for each image?
|
|
121
121
|
self.show_full_paths = False
|
|
122
|
-
|
|
122
|
+
|
|
123
123
|
#: List of additional fields in the image struct that we should print in image headers
|
|
124
124
|
self.extra_image_fields_to_print = None
|
|
125
|
-
|
|
125
|
+
|
|
126
126
|
#: List of additional fields in the annotation struct that we should print in image headers
|
|
127
127
|
self.extra_annotation_fields_to_print = None
|
|
128
|
-
|
|
128
|
+
|
|
129
129
|
#: Set to False to skip existing images
|
|
130
130
|
self.force_rendering = True
|
|
131
|
-
|
|
131
|
+
|
|
132
132
|
#: Enable additionald debug console output
|
|
133
133
|
self.verbose = False
|
|
134
|
-
|
|
134
|
+
|
|
135
135
|
#: COCO files used for evaluation may contain confidence scores, this
|
|
136
136
|
#: determines the field name used for confidence scores
|
|
137
137
|
self.confidence_field_name = 'score'
|
|
138
|
-
|
|
139
|
-
#: Optionally apply a confidence threshold; this requires that [confidence_field_name]
|
|
138
|
+
|
|
139
|
+
#: Optionally apply a confidence threshold; this requires that [confidence_field_name]
|
|
140
140
|
#: be present in all detections.
|
|
141
141
|
self.confidence_threshold = None
|
|
142
|
-
|
|
142
|
+
|
|
143
143
|
|
|
144
144
|
#%% Helper functions
|
|
145
145
|
|
|
@@ -148,9 +148,9 @@ def _image_filename_to_path(image_file_name, image_base_dir, pathsep_replacement
|
|
|
148
148
|
Translates the file name in an image entry in the json database to a path, possibly doing
|
|
149
149
|
some manipulation of path separators.
|
|
150
150
|
"""
|
|
151
|
-
|
|
151
|
+
|
|
152
152
|
if len(pathsep_replacement) > 0:
|
|
153
|
-
image_file_name = os.path.normpath(image_file_name).replace(os.pathsep,pathsep_replacement)
|
|
153
|
+
image_file_name = os.path.normpath(image_file_name).replace(os.pathsep,pathsep_replacement)
|
|
154
154
|
return os.path.join(image_base_dir, image_file_name)
|
|
155
155
|
|
|
156
156
|
|
|
@@ -159,40 +159,41 @@ def _image_filename_to_path(image_file_name, image_base_dir, pathsep_replacement
|
|
|
159
159
|
def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
160
160
|
"""
|
|
161
161
|
Writes images and html to output_dir to visualize the annotations in a .json file.
|
|
162
|
-
|
|
162
|
+
|
|
163
163
|
Args:
|
|
164
164
|
db_path (str or dict): the .json filename to load, or a previously-loaded database
|
|
165
|
+
output_dir (str): the folder to which we should write annotated images
|
|
165
166
|
image_base_dir (str): the folder where the images live; filenames in [db_path] should
|
|
166
167
|
be relative to this folder.
|
|
167
168
|
options (DbVizOptions, optional): See DbVizOptions for details
|
|
168
|
-
|
|
169
|
+
|
|
169
170
|
Returns:
|
|
170
171
|
tuple: A length-two tuple containing (the html filename) and (the loaded database).
|
|
171
|
-
"""
|
|
172
|
-
|
|
172
|
+
"""
|
|
173
|
+
|
|
173
174
|
if options is None:
|
|
174
175
|
options = DbVizOptions()
|
|
175
|
-
|
|
176
|
+
|
|
176
177
|
# Consistency checking for fields with specific format requirements
|
|
177
|
-
|
|
178
|
+
|
|
178
179
|
# This should be a list, but if someone specifies a string, do a reasonable thing
|
|
179
180
|
if isinstance(options.extra_image_fields_to_print,str):
|
|
180
181
|
options.extra_image_fields_to_print = [options.extra_image_fields_to_print]
|
|
181
|
-
|
|
182
|
+
|
|
182
183
|
if not options.parallelize_rendering_with_threads:
|
|
183
184
|
print('Warning: process-based parallelization is not yet supported by visualize_db')
|
|
184
185
|
options.parallelize_rendering_with_threads = True
|
|
185
|
-
|
|
186
|
+
|
|
186
187
|
if image_base_dir.startswith('http'):
|
|
187
188
|
if not image_base_dir.endswith('/'):
|
|
188
189
|
image_base_dir += '/'
|
|
189
190
|
else:
|
|
190
191
|
assert(os.path.isdir(image_base_dir))
|
|
191
|
-
|
|
192
|
+
|
|
192
193
|
os.makedirs(os.path.join(output_dir, 'rendered_images'), exist_ok=True)
|
|
193
|
-
|
|
194
|
+
|
|
194
195
|
if isinstance(db_path,str):
|
|
195
|
-
assert(os.path.isfile(db_path))
|
|
196
|
+
assert(os.path.isfile(db_path))
|
|
196
197
|
print('Loading database from {}...'.format(db_path))
|
|
197
198
|
image_db = json.load(open(db_path))
|
|
198
199
|
print('...done')
|
|
@@ -200,52 +201,55 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
200
201
|
print('Using previously-loaded DB')
|
|
201
202
|
image_db = db_path
|
|
202
203
|
else:
|
|
203
|
-
raise ValueError('Illegal dictionary or filename')
|
|
204
|
-
|
|
204
|
+
raise ValueError('Illegal dictionary or filename')
|
|
205
|
+
|
|
205
206
|
annotations = image_db['annotations']
|
|
206
207
|
images = image_db['images']
|
|
207
208
|
categories = image_db['categories']
|
|
208
|
-
|
|
209
|
+
|
|
209
210
|
# Optionally remove all images without bounding boxes, *before* sampling
|
|
210
211
|
if options.trim_to_images_with_bboxes:
|
|
211
|
-
|
|
212
|
+
|
|
212
213
|
b_has_bbox = [False] * len(annotations)
|
|
213
214
|
for i_ann,ann in enumerate(annotations):
|
|
214
|
-
if 'bbox' in ann:
|
|
215
|
-
|
|
215
|
+
if 'bbox' in ann or 'bbox_relative' in ann:
|
|
216
|
+
if 'bbox' in ann:
|
|
217
|
+
assert isinstance(ann['bbox'],list)
|
|
218
|
+
else:
|
|
219
|
+
assert isinstance(ann['bbox_relative'],list)
|
|
216
220
|
b_has_bbox[i_ann] = True
|
|
217
221
|
annotations_with_boxes = list(compress(annotations, b_has_bbox))
|
|
218
|
-
|
|
222
|
+
|
|
219
223
|
image_ids_with_boxes = [x['image_id'] for x in annotations_with_boxes]
|
|
220
224
|
image_ids_with_boxes = set(image_ids_with_boxes)
|
|
221
|
-
|
|
225
|
+
|
|
222
226
|
image_has_box = [False] * len(images)
|
|
223
227
|
for i_image,image in enumerate(images):
|
|
224
|
-
|
|
225
|
-
if
|
|
228
|
+
image_id = image['id']
|
|
229
|
+
if image_id in image_ids_with_boxes:
|
|
226
230
|
image_has_box[i_image] = True
|
|
227
231
|
images_with_bboxes = list(compress(images, image_has_box))
|
|
228
232
|
images = images_with_bboxes
|
|
229
|
-
|
|
233
|
+
|
|
230
234
|
# Optionally include/remove images with specific labels, *before* sampling
|
|
231
|
-
|
|
235
|
+
|
|
232
236
|
assert (not ((options.classes_to_exclude is not None) and \
|
|
233
237
|
(options.classes_to_include is not None))), \
|
|
234
238
|
'Cannot specify an inclusion and exclusion list'
|
|
235
|
-
|
|
239
|
+
|
|
236
240
|
if options.classes_to_exclude is not None:
|
|
237
241
|
assert isinstance(options.classes_to_exclude,list), \
|
|
238
242
|
'If supplied, classes_to_exclude should be a list'
|
|
239
|
-
|
|
243
|
+
|
|
240
244
|
if options.classes_to_include is not None:
|
|
241
245
|
assert isinstance(options.classes_to_include,list), \
|
|
242
246
|
'If supplied, classes_to_include should be a list'
|
|
243
|
-
|
|
247
|
+
|
|
244
248
|
if (options.classes_to_exclude is not None) or (options.classes_to_include is not None):
|
|
245
|
-
|
|
249
|
+
|
|
246
250
|
print('Indexing database')
|
|
247
251
|
indexed_db = IndexedJsonDb(image_db)
|
|
248
|
-
b_valid_class = [True] * len(images)
|
|
252
|
+
b_valid_class = [True] * len(images)
|
|
249
253
|
for i_image,image in enumerate(images):
|
|
250
254
|
classes = indexed_db.get_classes_for_image(image)
|
|
251
255
|
if options.classes_to_exclude is not None:
|
|
@@ -257,85 +261,88 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
257
261
|
b_valid_class[i_image] = False
|
|
258
262
|
if options.multiple_categories_tag in options.classes_to_include:
|
|
259
263
|
if len(classes) > 1:
|
|
260
|
-
b_valid_class[i_image] = True
|
|
264
|
+
b_valid_class[i_image] = True
|
|
261
265
|
if not b_valid_class[i_image]:
|
|
262
266
|
for c in classes:
|
|
263
267
|
if c in options.classes_to_include:
|
|
264
268
|
b_valid_class[i_image] = True
|
|
265
|
-
break
|
|
269
|
+
break
|
|
266
270
|
else:
|
|
267
271
|
raise ValueError('Illegal include/exclude combination')
|
|
268
|
-
|
|
272
|
+
|
|
269
273
|
images_with_valid_classes = list(compress(images, b_valid_class))
|
|
270
|
-
images = images_with_valid_classes
|
|
271
|
-
|
|
274
|
+
images = images_with_valid_classes
|
|
275
|
+
|
|
272
276
|
# ...if we need to include/exclude categories
|
|
273
|
-
|
|
277
|
+
|
|
274
278
|
# Put the annotations in a dataframe so we can select all annotations for a given image
|
|
275
279
|
print('Creating data frames')
|
|
276
280
|
df_anno = pd.DataFrame(annotations)
|
|
277
281
|
df_img = pd.DataFrame(images)
|
|
278
|
-
|
|
282
|
+
|
|
279
283
|
# Construct label map
|
|
280
284
|
label_map = {}
|
|
281
285
|
for cat in categories:
|
|
282
286
|
label_map[int(cat['id'])] = cat['name']
|
|
283
|
-
|
|
287
|
+
|
|
284
288
|
# Take a sample of images
|
|
285
289
|
if options.num_to_visualize is not None:
|
|
286
290
|
if options.num_to_visualize > len(df_img):
|
|
287
291
|
print('Warning: asked to visualize {} images, but only {} are available, keeping them all'.\
|
|
288
292
|
format(options.num_to_visualize,len(df_img)))
|
|
289
|
-
else:
|
|
293
|
+
else:
|
|
290
294
|
df_img = df_img.sample(n=options.num_to_visualize,random_state=options.random_seed)
|
|
291
|
-
|
|
295
|
+
|
|
292
296
|
images_html = []
|
|
293
|
-
|
|
297
|
+
|
|
294
298
|
# Set of dicts representing inputs to render_db_bounding_boxes:
|
|
295
299
|
#
|
|
296
300
|
# bboxes, box_classes, image_path
|
|
297
301
|
rendering_info = []
|
|
298
|
-
|
|
302
|
+
|
|
299
303
|
print('Preparing rendering list')
|
|
300
|
-
|
|
304
|
+
|
|
301
305
|
for i_image,img in tqdm(df_img.iterrows(),total=len(df_img)):
|
|
302
|
-
|
|
306
|
+
|
|
303
307
|
img_id = img['id']
|
|
304
308
|
assert img_id is not None
|
|
305
|
-
|
|
309
|
+
|
|
306
310
|
img_relative_path = img['file_name']
|
|
307
|
-
|
|
311
|
+
|
|
308
312
|
if image_base_dir.startswith('http'):
|
|
309
313
|
img_path = image_base_dir + img_relative_path
|
|
310
314
|
else:
|
|
311
|
-
img_path = os.path.join(image_base_dir,
|
|
315
|
+
img_path = os.path.join(image_base_dir,
|
|
312
316
|
_image_filename_to_path(img_relative_path, image_base_dir))
|
|
313
|
-
|
|
317
|
+
|
|
314
318
|
annos_i = df_anno.loc[df_anno['image_id'] == img_id, :] # all annotations on this image
|
|
315
|
-
|
|
319
|
+
|
|
316
320
|
bboxes = []
|
|
317
321
|
box_classes = []
|
|
318
322
|
box_score_strings = []
|
|
319
|
-
|
|
323
|
+
|
|
320
324
|
# All the class labels we've seen for this image (with out without bboxes)
|
|
321
325
|
image_categories = set()
|
|
322
|
-
|
|
326
|
+
|
|
323
327
|
extra_annotation_field_string = ''
|
|
324
328
|
annotation_level_for_image = ''
|
|
325
|
-
|
|
329
|
+
|
|
330
|
+
# Did this image come with already-normalized bounding boxes?
|
|
331
|
+
boxes_are_normalized = None
|
|
332
|
+
|
|
326
333
|
# Iterate over annotations for this image
|
|
327
334
|
# i_ann = 0; anno = annos_i.iloc[i_ann]
|
|
328
335
|
for i_ann,anno in annos_i.iterrows():
|
|
329
|
-
|
|
336
|
+
|
|
330
337
|
if options.extra_annotation_fields_to_print is not None:
|
|
331
338
|
field_names = list(anno.index)
|
|
332
339
|
for field_name in field_names:
|
|
333
340
|
if field_name in options.extra_annotation_fields_to_print:
|
|
334
341
|
field_value = anno[field_name]
|
|
335
|
-
if (field_value is not None) and (not
|
|
342
|
+
if (field_value is not None) and (not _isnan(field_value)):
|
|
336
343
|
extra_annotation_field_string += ' ({}:{})'.format(
|
|
337
|
-
field_name,field_value)
|
|
338
|
-
|
|
344
|
+
field_name,field_value)
|
|
345
|
+
|
|
339
346
|
if options.confidence_threshold is not None:
|
|
340
347
|
assert options.confidence_field_name in anno, \
|
|
341
348
|
'Error: confidence thresholding requested, ' + \
|
|
@@ -343,18 +350,18 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
343
350
|
options.confidence_field_name)
|
|
344
351
|
if anno[options.confidence_field_name] < options.confidence_threshold:
|
|
345
352
|
continue
|
|
346
|
-
|
|
353
|
+
|
|
347
354
|
if 'sequence_level_annotation' in anno:
|
|
348
|
-
|
|
349
|
-
if
|
|
350
|
-
|
|
355
|
+
b_sequence_level_annotation = anno['sequence_level_annotation']
|
|
356
|
+
if b_sequence_level_annotation:
|
|
357
|
+
annotation_level = 'sequence'
|
|
351
358
|
else:
|
|
352
|
-
|
|
359
|
+
annotation_level = 'image'
|
|
353
360
|
if annotation_level_for_image == '':
|
|
354
|
-
annotation_level_for_image =
|
|
355
|
-
elif annotation_level_for_image !=
|
|
361
|
+
annotation_level_for_image = annotation_level
|
|
362
|
+
elif annotation_level_for_image != annotation_level:
|
|
356
363
|
annotation_level_for_image = 'mixed'
|
|
357
|
-
|
|
364
|
+
|
|
358
365
|
category_id = anno['category_id']
|
|
359
366
|
category_name = label_map[category_id]
|
|
360
367
|
if options.add_search_links:
|
|
@@ -362,66 +369,81 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
362
369
|
category_name = '<a href="https://www.google.com/search?tbm=isch&q={}">{}</a>'.format(
|
|
363
370
|
category_name,category_name)
|
|
364
371
|
image_categories.add(category_name)
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
372
|
+
|
|
373
|
+
assert not ('bbox' in anno and 'bbox_relative' in anno), \
|
|
374
|
+
"An annotation can't have both an absolute and a relative bounding box"
|
|
375
|
+
|
|
376
|
+
box_field = 'bbox'
|
|
377
|
+
if 'bbox_relative' in anno:
|
|
378
|
+
box_field = 'bbox_relative'
|
|
379
|
+
assert (boxes_are_normalized is None) or (boxes_are_normalized), \
|
|
380
|
+
"An image can't have both absolute and relative bounding boxes"
|
|
381
|
+
boxes_are_normalized = True
|
|
382
|
+
elif 'bbox' in anno:
|
|
383
|
+
assert (boxes_are_normalized is None) or (not boxes_are_normalized), \
|
|
384
|
+
"An image can't have both absolute and relative bounding boxes"
|
|
385
|
+
boxes_are_normalized = False
|
|
386
|
+
|
|
387
|
+
if box_field in anno:
|
|
388
|
+
bbox = anno[box_field]
|
|
368
389
|
if isinstance(bbox,float):
|
|
369
390
|
assert math.isnan(bbox), "I shouldn't see a bbox that's neither a box nor NaN"
|
|
370
391
|
continue
|
|
371
392
|
bboxes.append(bbox)
|
|
372
393
|
box_classes.append(anno['category_id'])
|
|
373
|
-
|
|
394
|
+
|
|
374
395
|
box_score_string = ''
|
|
375
396
|
if options.confidence_field_name is not None and \
|
|
376
397
|
options.confidence_field_name in anno:
|
|
377
398
|
score = anno[options.confidence_field_name]
|
|
378
399
|
box_score_string = '({}%)'.format(round(100 * score))
|
|
379
400
|
box_score_strings.append(box_score_string)
|
|
380
|
-
|
|
401
|
+
|
|
381
402
|
# ...for each of this image's annotations
|
|
382
|
-
|
|
403
|
+
|
|
383
404
|
image_classes = ', '.join(image_categories)
|
|
384
|
-
|
|
385
|
-
img_id_string = str(img_id).lower()
|
|
405
|
+
|
|
406
|
+
img_id_string = str(img_id).lower()
|
|
386
407
|
file_name = '{}_gt.jpg'.format(os.path.splitext(img_id_string)[0])
|
|
387
|
-
|
|
408
|
+
|
|
388
409
|
# Replace characters that muck up image links
|
|
389
410
|
illegal_characters = ['/','\\',':','\t','#',' ','%']
|
|
390
411
|
for c in illegal_characters:
|
|
391
412
|
file_name = file_name.replace(c,'~')
|
|
392
|
-
|
|
413
|
+
|
|
393
414
|
rendering_info_this_image = {'bboxes':bboxes,
|
|
394
415
|
'box_classes':box_classes,
|
|
395
416
|
'tags':box_score_strings,
|
|
396
417
|
'img_path':img_path,
|
|
397
|
-
'output_file_name':file_name
|
|
418
|
+
'output_file_name':file_name,
|
|
419
|
+
'boxes_are_normalized':boxes_are_normalized}
|
|
398
420
|
rendering_info.append(rendering_info_this_image)
|
|
399
|
-
|
|
421
|
+
|
|
400
422
|
label_level_string = ''
|
|
401
423
|
if len(annotation_level_for_image) > 0:
|
|
402
424
|
label_level_string = ' (annotation level: {})'.format(annotation_level_for_image)
|
|
403
|
-
|
|
425
|
+
|
|
404
426
|
if 'frame_num' in img and 'seq_num_frames' in img:
|
|
405
427
|
frame_string = ' frame: {} of {},'.format(img['frame_num'],img['seq_num_frames'])
|
|
406
428
|
elif 'frame_num' in img:
|
|
407
429
|
frame_string = ' frame: {},'.format(img['frame_num'])
|
|
408
430
|
else:
|
|
409
431
|
frame_string = ''
|
|
410
|
-
|
|
432
|
+
|
|
411
433
|
if options.show_full_paths:
|
|
412
434
|
filename_text = img_path
|
|
413
435
|
else:
|
|
414
436
|
filename_text = img_relative_path
|
|
415
437
|
if options.include_filename_links:
|
|
416
438
|
filename_text = '<a href="{}">{}</a>'.format(img_path,filename_text)
|
|
417
|
-
|
|
439
|
+
|
|
418
440
|
flag_string = ''
|
|
419
|
-
|
|
420
|
-
if ('flags' in img) and (not
|
|
441
|
+
|
|
442
|
+
if ('flags' in img) and (not _isnan(img['flags'])):
|
|
421
443
|
flag_string = ', flags: {}'.format(str(img['flags']))
|
|
422
|
-
|
|
444
|
+
|
|
423
445
|
extra_field_string = ''
|
|
424
|
-
|
|
446
|
+
|
|
425
447
|
if options.extra_image_fields_to_print is not None:
|
|
426
448
|
for field_name in options.extra_image_fields_to_print:
|
|
427
449
|
if field_name in img:
|
|
@@ -429,7 +451,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
429
451
|
# previous field in [extra_fields_to_print] or from the rest of the string
|
|
430
452
|
extra_field_string += ', {}: {}'.format(
|
|
431
453
|
field_name,str(img[field_name]))
|
|
432
|
-
|
|
454
|
+
|
|
433
455
|
# We're adding html for an image before we render it, so it's possible this image will
|
|
434
456
|
# fail to render. For applications where this script is being used to debua a database
|
|
435
457
|
# (the common case?), this is useful behavior, for other applications, this is annoying.
|
|
@@ -437,191 +459,203 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
437
459
|
{
|
|
438
460
|
'filename': '{}/{}'.format('rendered_images', file_name),
|
|
439
461
|
'title': '{}<br/>{}, num boxes: {},{} class labels: {}{}{}{}{}'.format(
|
|
440
|
-
filename_text, img_id, len(bboxes), frame_string, image_classes,
|
|
462
|
+
filename_text, img_id, len(bboxes), frame_string, image_classes,
|
|
441
463
|
label_level_string, flag_string, extra_field_string, extra_annotation_field_string),
|
|
442
464
|
'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
443
465
|
'text-align:left;margin-top:20;margin-bottom:5'
|
|
444
466
|
}
|
|
445
467
|
if options.include_image_links:
|
|
446
468
|
image_dict['linkTarget'] = img_path
|
|
447
|
-
|
|
469
|
+
|
|
448
470
|
images_html.append(image_dict)
|
|
449
|
-
|
|
471
|
+
|
|
450
472
|
# ...for each image
|
|
451
473
|
|
|
452
474
|
def render_image_info(rendering_info):
|
|
453
|
-
|
|
475
|
+
|
|
454
476
|
img_path = rendering_info['img_path']
|
|
455
477
|
bboxes = rendering_info['bboxes']
|
|
456
478
|
bbox_classes = rendering_info['box_classes']
|
|
479
|
+
boxes_are_normalized = rendering_info['boxes_are_normalized']
|
|
457
480
|
bbox_tags = None
|
|
458
481
|
if 'tags' in rendering_info:
|
|
459
|
-
bbox_tags = rendering_info['tags']
|
|
482
|
+
bbox_tags = rendering_info['tags']
|
|
460
483
|
output_file_name = rendering_info['output_file_name']
|
|
461
484
|
output_full_path = os.path.join(output_dir, 'rendered_images', output_file_name)
|
|
462
|
-
|
|
485
|
+
|
|
463
486
|
if (os.path.isfile(output_full_path)) and (not options.force_rendering):
|
|
464
487
|
if options.verbose:
|
|
465
488
|
print('Skipping existing image {}'.format(output_full_path))
|
|
466
489
|
return True
|
|
467
|
-
|
|
490
|
+
|
|
468
491
|
if not img_path.startswith('http'):
|
|
469
492
|
if not os.path.exists(img_path):
|
|
470
493
|
print('Image {} cannot be found'.format(img_path))
|
|
471
494
|
return False
|
|
472
|
-
|
|
495
|
+
|
|
473
496
|
try:
|
|
474
497
|
original_image = vis_utils.open_image(img_path)
|
|
475
498
|
original_size = original_image.size
|
|
476
|
-
if (options.viz_size is None) or
|
|
499
|
+
if (options.viz_size is None) or \
|
|
500
|
+
(options.viz_size[0] == -1 and options.viz_size[1] == -1):
|
|
477
501
|
image = original_image
|
|
478
502
|
else:
|
|
479
|
-
image = vis_utils.resize_image(original_image,
|
|
480
|
-
options.viz_size[
|
|
503
|
+
image = vis_utils.resize_image(original_image,
|
|
504
|
+
options.viz_size[0],
|
|
505
|
+
options.viz_size[1],
|
|
506
|
+
no_enlarge_width=True)
|
|
481
507
|
except Exception as e:
|
|
482
508
|
print('Image {} failed to open, error: {}'.format(img_path, e))
|
|
483
509
|
return False
|
|
484
|
-
|
|
485
|
-
vis_utils.render_db_bounding_boxes(boxes=bboxes,
|
|
510
|
+
|
|
511
|
+
vis_utils.render_db_bounding_boxes(boxes=bboxes,
|
|
486
512
|
classes=bbox_classes,
|
|
487
|
-
image=image,
|
|
513
|
+
image=image,
|
|
488
514
|
original_size=original_size,
|
|
489
515
|
label_map=label_map,
|
|
490
516
|
thickness=options.box_thickness,
|
|
491
517
|
expansion=options.box_expansion,
|
|
492
|
-
tags=bbox_tags
|
|
493
|
-
|
|
518
|
+
tags=bbox_tags,
|
|
519
|
+
boxes_are_normalized=boxes_are_normalized)
|
|
520
|
+
|
|
494
521
|
image.save(output_full_path)
|
|
495
|
-
|
|
522
|
+
|
|
496
523
|
return True
|
|
497
|
-
|
|
524
|
+
|
|
498
525
|
# ...def render_image_info
|
|
499
|
-
|
|
526
|
+
|
|
500
527
|
print('Rendering images')
|
|
501
528
|
start_time = time.time()
|
|
502
|
-
|
|
529
|
+
|
|
503
530
|
if options.parallelize_rendering:
|
|
504
|
-
|
|
531
|
+
|
|
505
532
|
if options.parallelize_rendering_with_threads:
|
|
506
533
|
worker_string = 'threads'
|
|
507
534
|
else:
|
|
508
535
|
worker_string = 'processes'
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
pool = ThreadPool(options.parallelize_rendering_n_cores)
|
|
536
|
+
|
|
537
|
+
pool = None
|
|
538
|
+
try:
|
|
539
|
+
if options.parallelize_rendering_n_cores is None:
|
|
540
|
+
if options.parallelize_rendering_with_threads:
|
|
541
|
+
pool = ThreadPool()
|
|
542
|
+
else:
|
|
543
|
+
pool = Pool()
|
|
518
544
|
else:
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
545
|
+
if options.parallelize_rendering_with_threads:
|
|
546
|
+
pool = ThreadPool(options.parallelize_rendering_n_cores)
|
|
547
|
+
else:
|
|
548
|
+
pool = Pool(options.parallelize_rendering_n_cores)
|
|
549
|
+
print('Rendering images with {} {}'.format(options.parallelize_rendering_n_cores,
|
|
550
|
+
worker_string))
|
|
551
|
+
rendering_success = list(tqdm(pool.imap(render_image_info, rendering_info),
|
|
552
|
+
total=len(rendering_info)))
|
|
553
|
+
finally:
|
|
554
|
+
if pool is not None:
|
|
555
|
+
pool.close()
|
|
556
|
+
pool.join()
|
|
557
|
+
print("Pool closed and joined for DB visualization")
|
|
558
|
+
|
|
525
559
|
else:
|
|
526
|
-
|
|
560
|
+
|
|
527
561
|
rendering_success = []
|
|
528
|
-
for file_info in tqdm(rendering_info):
|
|
562
|
+
for file_info in tqdm(rendering_info):
|
|
529
563
|
rendering_success.append(render_image_info(file_info))
|
|
530
|
-
|
|
564
|
+
|
|
531
565
|
elapsed = time.time() - start_time
|
|
532
|
-
|
|
566
|
+
|
|
533
567
|
print('Rendered {} images in {} ({} successful)'.format(
|
|
534
568
|
len(rendering_info),humanfriendly.format_timespan(elapsed),sum(rendering_success)))
|
|
535
|
-
|
|
536
|
-
if options.sort_by_filename:
|
|
569
|
+
|
|
570
|
+
if options.sort_by_filename:
|
|
537
571
|
images_html = sorted(images_html, key=lambda x: x['filename'])
|
|
538
572
|
else:
|
|
539
573
|
random.shuffle(images_html)
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
574
|
+
|
|
575
|
+
html_output_file = os.path.join(output_dir, 'index.html')
|
|
576
|
+
|
|
577
|
+
html_options = options.html_options
|
|
544
578
|
if isinstance(db_path,str):
|
|
545
|
-
|
|
579
|
+
html_options['headerHtml'] = '<h1>Sample annotations from {}</h1>'.format(db_path)
|
|
546
580
|
else:
|
|
547
|
-
|
|
548
|
-
|
|
581
|
+
html_options['headerHtml'] = '<h1>Sample annotations</h1>'
|
|
582
|
+
|
|
549
583
|
write_html_image_list(
|
|
550
|
-
filename=
|
|
584
|
+
filename=html_output_file,
|
|
551
585
|
images=images_html,
|
|
552
|
-
options=
|
|
586
|
+
options=html_options)
|
|
553
587
|
|
|
554
|
-
print('Visualized {} images, wrote results to {}'.format(len(images_html),
|
|
555
|
-
|
|
556
|
-
return
|
|
588
|
+
print('Visualized {} images, wrote results to {}'.format(len(images_html),html_output_file))
|
|
589
|
+
|
|
590
|
+
return html_output_file,image_db
|
|
557
591
|
|
|
558
592
|
# def visualize_db(...)
|
|
559
|
-
|
|
560
|
-
|
|
593
|
+
|
|
594
|
+
|
|
561
595
|
#%% Command-line driver
|
|
562
|
-
|
|
563
|
-
# Copy all fields from a Namespace (i.e., the output from parse_args) to an object.
|
|
596
|
+
|
|
597
|
+
# Copy all fields from a Namespace (i.e., the output from parse_args) to an object.
|
|
564
598
|
#
|
|
565
599
|
# Skips fields starting with _. Does not check existence in the target object.
|
|
566
|
-
def
|
|
567
|
-
|
|
600
|
+
def _args_to_object(args, obj):
|
|
601
|
+
|
|
568
602
|
for n, v in inspect.getmembers(args):
|
|
569
603
|
if not n.startswith('_'):
|
|
570
604
|
setattr(obj, n, v)
|
|
571
605
|
|
|
572
606
|
|
|
573
|
-
def main():
|
|
574
|
-
|
|
607
|
+
def main(): # noqa
|
|
608
|
+
|
|
575
609
|
parser = argparse.ArgumentParser()
|
|
576
|
-
parser.add_argument('db_path', action='store', type=str,
|
|
610
|
+
parser.add_argument('db_path', action='store', type=str,
|
|
577
611
|
help='.json file to visualize')
|
|
578
|
-
parser.add_argument('output_dir', action='store', type=str,
|
|
612
|
+
parser.add_argument('output_dir', action='store', type=str,
|
|
579
613
|
help='Output directory for html and rendered images')
|
|
580
|
-
parser.add_argument('image_base_dir', action='store', type=str,
|
|
614
|
+
parser.add_argument('image_base_dir', action='store', type=str,
|
|
581
615
|
help='Base directory (or URL) for input images')
|
|
582
616
|
|
|
583
|
-
parser.add_argument('--num_to_visualize', action='store', type=int, default=None,
|
|
617
|
+
parser.add_argument('--num_to_visualize', action='store', type=int, default=None,
|
|
584
618
|
help='Number of images to visualize (randomly drawn) (defaults to all)')
|
|
585
|
-
parser.add_argument('--random_sort', action='store_true',
|
|
619
|
+
parser.add_argument('--random_sort', action='store_true',
|
|
586
620
|
help='Sort randomly (rather than by filename) in output html')
|
|
587
|
-
parser.add_argument('--trim_to_images_with_bboxes', action='store_true',
|
|
621
|
+
parser.add_argument('--trim_to_images_with_bboxes', action='store_true',
|
|
588
622
|
help='Only include images with bounding boxes (defaults to false)')
|
|
589
623
|
parser.add_argument('--random_seed', action='store', type=int, default=None,
|
|
590
624
|
help='Random seed for image selection')
|
|
591
625
|
parser.add_argument('--pathsep_replacement', action='store', type=str, default='',
|
|
592
626
|
help='Replace path separators in relative filenames with another ' + \
|
|
593
627
|
'character (frequently ~)')
|
|
594
|
-
|
|
628
|
+
|
|
595
629
|
if len(sys.argv[1:]) == 0:
|
|
596
630
|
parser.print_help()
|
|
597
631
|
parser.exit()
|
|
598
|
-
|
|
632
|
+
|
|
599
633
|
args = parser.parse_args()
|
|
600
|
-
|
|
634
|
+
|
|
601
635
|
# Convert to an options object
|
|
602
636
|
options = DbVizOptions()
|
|
603
|
-
|
|
637
|
+
_args_to_object(args, options)
|
|
604
638
|
if options.random_sort:
|
|
605
639
|
options.sort_by_filename = False
|
|
606
|
-
|
|
607
|
-
visualize_db(options.db_path,options.output_dir,options.image_base_dir,options)
|
|
608
640
|
|
|
609
|
-
|
|
641
|
+
visualize_db(options.db_path,options.output_dir,options.image_base_dir,options)
|
|
642
|
+
|
|
643
|
+
if __name__ == '__main__':
|
|
610
644
|
main()
|
|
611
645
|
|
|
612
646
|
|
|
613
647
|
#%% Interactive driver
|
|
614
648
|
|
|
615
649
|
if False:
|
|
616
|
-
|
|
650
|
+
|
|
617
651
|
#%%
|
|
618
|
-
|
|
652
|
+
|
|
619
653
|
db_path = r'e:\wildlife_data\missouri_camera_traps\missouri_camera_traps_set1.json'
|
|
620
654
|
output_dir = r'e:\wildlife_data\missouri_camera_traps\preview'
|
|
621
655
|
image_base_dir = r'e:\wildlife_data\missouri_camera_traps'
|
|
622
|
-
|
|
656
|
+
|
|
623
657
|
options = DbVizOptions()
|
|
624
658
|
options.num_to_visualize = 100
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
# os.startfile(
|
|
659
|
+
|
|
660
|
+
html_output_file, db = visualize_db(db_path,output_dir,image_base_dir,options)
|
|
661
|
+
# os.startfile(html_output_file)
|