megadetector 5.0.28__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +231 -224
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +304 -262
- megadetector/detection/run_detector.py +177 -164
- megadetector/detection/run_detector_batch.py +364 -363
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +256 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +290 -282
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +415 -415
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +219 -146
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +23 -20
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +313 -298
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -66
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1018 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1457 -398
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +61 -61
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +401 -397
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +79 -73
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/METADATA +135 -132
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.28.dist-info/RECORD +0 -209
|
@@ -25,24 +25,24 @@ def remap_coco_categories(input_data,
|
|
|
25
25
|
"""
|
|
26
26
|
Given a COCO-formatted dataset, remap the categories to a new categories mapping, optionally
|
|
27
27
|
writing the results to a new file.
|
|
28
|
-
|
|
28
|
+
|
|
29
29
|
Args:
|
|
30
|
-
input_data (str or dict): a COCO-formatted dict or a filename. If it's a dict, it will
|
|
30
|
+
input_data (str or dict): a COCO-formatted dict or a filename. If it's a dict, it will
|
|
31
31
|
be copied, not modified in place.
|
|
32
|
-
output_category_name_to_id (dict) a dict mapping strings to ints. Categories not in
|
|
32
|
+
output_category_name_to_id (dict) a dict mapping strings to ints. Categories not in
|
|
33
33
|
this dict will be ignored or will result in errors, depending on allow_unused_categories.
|
|
34
|
-
input_category_name_to_output_category_name: a dict mapping strings to strings.
|
|
35
|
-
Annotations using categories not in this dict will be omitted or will result in
|
|
34
|
+
input_category_name_to_output_category_name: a dict mapping strings to strings.
|
|
35
|
+
Annotations using categories not in this dict will be omitted or will result in
|
|
36
36
|
errors, depending on allow_unused_categories.
|
|
37
37
|
output_file (str, optional): output file to which we should write remapped COCO data
|
|
38
38
|
allow_unused_categories (bool, optional): should we ignore categories not present in the
|
|
39
39
|
input/output mappings? If this is False and we encounter an unmapped category, we'll
|
|
40
40
|
error.
|
|
41
|
-
|
|
41
|
+
|
|
42
42
|
Returns:
|
|
43
43
|
dict: COCO-formatted dict
|
|
44
44
|
"""
|
|
45
|
-
|
|
45
|
+
|
|
46
46
|
if isinstance(input_data,str):
|
|
47
47
|
assert os.path.isfile(input_data), "Can't find file {}".format(input_data)
|
|
48
48
|
with open(input_data,'r') as f:
|
|
@@ -51,45 +51,45 @@ def remap_coco_categories(input_data,
|
|
|
51
51
|
else:
|
|
52
52
|
assert isinstance(input_data,dict), 'Illegal COCO input data'
|
|
53
53
|
input_data = deepcopy(input_data)
|
|
54
|
-
|
|
54
|
+
|
|
55
55
|
# It's safe to modify in-place now
|
|
56
56
|
output_data = input_data
|
|
57
|
-
|
|
57
|
+
|
|
58
58
|
# Read input name --> ID mapping
|
|
59
59
|
input_category_name_to_input_category_id = {}
|
|
60
60
|
for c in input_data['categories']:
|
|
61
61
|
input_category_name_to_input_category_id[c['name']] = c['id']
|
|
62
62
|
input_category_id_to_input_category_name = \
|
|
63
63
|
invert_dictionary(input_category_name_to_input_category_id)
|
|
64
|
-
|
|
64
|
+
|
|
65
65
|
# Map input IDs --> output IDs
|
|
66
66
|
input_category_id_to_output_category_id = {}
|
|
67
67
|
input_category_names = list(input_category_name_to_output_category_name.keys())
|
|
68
|
-
|
|
68
|
+
|
|
69
69
|
# input_name = input_category_names[0]
|
|
70
70
|
for input_name in input_category_names:
|
|
71
|
-
|
|
71
|
+
|
|
72
72
|
output_name = input_category_name_to_output_category_name[input_name]
|
|
73
73
|
assert output_name in output_category_name_to_id, \
|
|
74
74
|
'No output ID for {} --> {}'.format(input_name,output_name)
|
|
75
75
|
input_id = input_category_name_to_input_category_id[input_name]
|
|
76
76
|
output_id = output_category_name_to_id[output_name]
|
|
77
77
|
input_category_id_to_output_category_id[input_id] = output_id
|
|
78
|
-
|
|
78
|
+
|
|
79
79
|
# ...for each category we want to keep
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
printed_unused_category_warnings = set()
|
|
82
|
-
|
|
82
|
+
|
|
83
83
|
valid_annotations = []
|
|
84
|
-
|
|
84
|
+
|
|
85
85
|
# Map annotations
|
|
86
86
|
for ann in output_data['annotations']:
|
|
87
|
-
|
|
87
|
+
|
|
88
88
|
input_category_id = ann['category_id']
|
|
89
89
|
if input_category_id not in input_category_id_to_output_category_id:
|
|
90
90
|
if allow_unused_categories:
|
|
91
91
|
if input_category_id not in printed_unused_category_warnings:
|
|
92
|
-
printed_unused_category_warnings.add(input_category_id)
|
|
92
|
+
printed_unused_category_warnings.add(input_category_id)
|
|
93
93
|
input_category_name = \
|
|
94
94
|
input_category_id_to_input_category_name[input_category_id]
|
|
95
95
|
s = 'Skipping unmapped category ID {} ({})'.format(
|
|
@@ -98,31 +98,31 @@ def remap_coco_categories(input_data,
|
|
|
98
98
|
continue
|
|
99
99
|
else:
|
|
100
100
|
s = 'Unmapped category ID {}'.format(input_category_id)
|
|
101
|
-
raise ValueError(s)
|
|
101
|
+
raise ValueError(s)
|
|
102
102
|
output_category_id = input_category_id_to_output_category_id[input_category_id]
|
|
103
103
|
ann['category_id'] = output_category_id
|
|
104
|
-
valid_annotations.append(ann)
|
|
105
|
-
|
|
104
|
+
valid_annotations.append(ann)
|
|
105
|
+
|
|
106
106
|
# ...for each annotation
|
|
107
|
-
|
|
107
|
+
|
|
108
108
|
# The only reason annotations should get excluded is the case where we allow
|
|
109
109
|
# unused categories
|
|
110
110
|
if not allow_unused_categories:
|
|
111
111
|
assert len(valid_annotations) == len(output_data['annotations'])
|
|
112
|
-
|
|
112
|
+
|
|
113
113
|
output_data['annotations'] = valid_annotations
|
|
114
|
-
|
|
114
|
+
|
|
115
115
|
# Update the category list
|
|
116
116
|
output_categories = []
|
|
117
117
|
for output_name in output_category_name_to_id:
|
|
118
118
|
category = {'name':output_name,'id':output_category_name_to_id[output_name]}
|
|
119
119
|
output_categories.append(category)
|
|
120
120
|
output_data['categories'] = output_categories
|
|
121
|
-
|
|
121
|
+
|
|
122
122
|
if output_file is not None:
|
|
123
123
|
with open(output_file,'w') as f:
|
|
124
124
|
json.dump(output_data,f,indent=1)
|
|
125
|
-
|
|
125
|
+
|
|
126
126
|
return input_data
|
|
127
127
|
|
|
128
128
|
# ...def remap_coco_categories(...)
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
remove_exif.py
|
|
4
4
|
|
|
5
|
-
Removes all EXIF/IPTC/XMP metadata from a folder of images, without making
|
|
5
|
+
Removes all EXIF/IPTC/XMP metadata from a folder of images, without making
|
|
6
6
|
backup copies, using pyexiv2. Ignores non-jpeg images.
|
|
7
7
|
|
|
8
8
|
This module is rarely used, and pyexiv2 is not thread-safe, so pyexiv2 is not
|
|
@@ -21,22 +21,26 @@ from tqdm import tqdm
|
|
|
21
21
|
|
|
22
22
|
#%% Support functions
|
|
23
23
|
|
|
24
|
-
# Pyexif2 is not thread safe, do not call this function in parallel within a process
|
|
25
|
-
#
|
|
26
|
-
# Parallelizing across processes is fine.
|
|
27
24
|
def remove_exif_from_image(fn):
|
|
25
|
+
"""
|
|
26
|
+
Remove EXIF information from a single image
|
|
27
|
+
|
|
28
|
+
pyexiv2 is not thread safe, do not call this function in parallel within a process.
|
|
29
|
+
|
|
30
|
+
Parallelizing across processes is fine.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
import pyexiv2 # type: ignore
|
|
28
34
|
|
|
29
|
-
import pyexiv2
|
|
30
|
-
|
|
31
35
|
try:
|
|
32
36
|
img = pyexiv2.Image(fn)
|
|
33
37
|
img.clear_exif()
|
|
34
38
|
img.clear_iptc()
|
|
35
39
|
img.clear_xmp()
|
|
36
|
-
img.close()
|
|
40
|
+
img.close()
|
|
37
41
|
except Exception as e:
|
|
38
42
|
print('EXIF error on {}: {}'.format(fn,str(e)))
|
|
39
|
-
|
|
43
|
+
|
|
40
44
|
return True
|
|
41
45
|
|
|
42
46
|
|
|
@@ -44,22 +48,23 @@ def remove_exif_from_image(fn):
|
|
|
44
48
|
|
|
45
49
|
def remove_exif(image_base_folder,recursive=True,n_processes=1):
|
|
46
50
|
"""
|
|
47
|
-
Removes all EXIF/IPTC/XMP metadata from a folder of images, without making
|
|
51
|
+
Removes all EXIF/IPTC/XMP metadata from a folder of images, without making
|
|
48
52
|
backup copies, using pyexiv2. Ignores non-jpeg images.
|
|
49
|
-
|
|
53
|
+
|
|
50
54
|
Args:
|
|
51
55
|
image_base_folder (str): the folder from which we should remove EXIF data
|
|
52
56
|
recursive (bool, optional): whether to process [image_base_folder] recursively
|
|
53
57
|
n_processes (int, optional): number of concurrent workers. Because pyexiv2 is not
|
|
54
|
-
thread-safe, only process-based parallelism is supported.
|
|
58
|
+
thread-safe, only process-based parallelism is supported.
|
|
55
59
|
"""
|
|
60
|
+
|
|
56
61
|
try:
|
|
57
|
-
import pyexiv2 #noqa
|
|
62
|
+
import pyexiv2 # type: ignore #noqa
|
|
58
63
|
except:
|
|
59
64
|
print('pyexiv2 not available; try "pip install pyexiv2"')
|
|
60
65
|
raise
|
|
61
66
|
|
|
62
|
-
|
|
67
|
+
|
|
63
68
|
##%% List files
|
|
64
69
|
|
|
65
70
|
assert os.path.isdir(image_base_folder), \
|
|
@@ -67,22 +72,28 @@ def remove_exif(image_base_folder,recursive=True,n_processes=1):
|
|
|
67
72
|
all_files = [f for f in glob.glob(image_base_folder+ "*/**", recursive=recursive)]
|
|
68
73
|
image_files = [s for s in all_files if \
|
|
69
74
|
(s.lower().endswith('.jpg') or s.lower().endswith('.jpeg'))]
|
|
70
|
-
|
|
75
|
+
|
|
71
76
|
|
|
72
77
|
##%% Remove EXIF data (execution)
|
|
73
78
|
|
|
74
79
|
if n_processes == 1:
|
|
75
|
-
|
|
80
|
+
|
|
76
81
|
# fn = image_files[0]
|
|
77
82
|
for fn in tqdm(image_files):
|
|
78
83
|
remove_exif_from_image(fn)
|
|
79
|
-
|
|
84
|
+
|
|
80
85
|
else:
|
|
81
86
|
# pyexiv2 is not thread-safe, so we need to use processes
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
87
|
+
pool = None
|
|
88
|
+
try:
|
|
89
|
+
print('Starting parallel process pool with {} workers'.format(n_processes))
|
|
90
|
+
pool = Pool(n_processes)
|
|
91
|
+
_ = list(tqdm(pool.imap(remove_exif_from_image,image_files),total=len(image_files)))
|
|
92
|
+
finally:
|
|
93
|
+
pool.close()
|
|
94
|
+
pool.join()
|
|
95
|
+
print("Pool closed and joined for EXIF removal")
|
|
96
|
+
|
|
86
97
|
# ...remove_exif(...)
|
|
87
98
|
|
|
88
99
|
|
|
@@ -1,187 +1,187 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
rename_images.py
|
|
4
|
-
|
|
5
|
-
Copies images from a possibly-nested folder structure to a flat folder structure, including EXIF
|
|
6
|
-
timestamps in each filename. Loosely equivalent to camtrapR's imageRename() function.
|
|
7
|
-
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
#%% Imports and constants
|
|
11
|
-
|
|
12
|
-
import os
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
from megadetector.
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
read_exif_options.
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
read_exif_options.
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def main():
|
|
157
|
-
|
|
158
|
-
parser = argparse.ArgumentParser(
|
|
159
|
-
description='Copies images from a possibly-nested folder structure to a flat folder structure, ' + \
|
|
160
|
-
'adding datetime information from EXIF to each filename')
|
|
161
|
-
|
|
162
|
-
parser.add_argument(
|
|
163
|
-
'input_folder',
|
|
164
|
-
type=str,
|
|
165
|
-
help='The folder to search for images, always recursive')
|
|
166
|
-
|
|
167
|
-
parser.add_argument(
|
|
168
|
-
'output_folder',
|
|
169
|
-
type=str,
|
|
170
|
-
help='The folder to which we should write the flattened image structure')
|
|
171
|
-
|
|
172
|
-
parser.add_argument(
|
|
173
|
-
'--dry_run',
|
|
174
|
-
action='store_true',
|
|
175
|
-
help="Only map images, don't actually copy")
|
|
176
|
-
|
|
177
|
-
if len(sys.argv[1:]) == 0:
|
|
178
|
-
parser.print_help()
|
|
179
|
-
parser.exit()
|
|
180
|
-
|
|
181
|
-
args = parser.parse_args()
|
|
182
|
-
|
|
183
|
-
rename_images(args.input_folder,args.output_folder,dry_run=args.dry_run,
|
|
184
|
-
verbose=True,read_exif_options=None)
|
|
185
|
-
|
|
186
|
-
if __name__ == '__main__':
|
|
187
|
-
main()
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
rename_images.py
|
|
4
|
+
|
|
5
|
+
Copies images from a possibly-nested folder structure to a flat folder structure, including EXIF
|
|
6
|
+
timestamps in each filename. Loosely equivalent to camtrapR's imageRename() function.
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
#%% Imports and constants
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import sys
|
|
14
|
+
import argparse
|
|
15
|
+
|
|
16
|
+
from megadetector.utils.path_utils import \
|
|
17
|
+
find_images, insert_before_extension, parallel_copy_files
|
|
18
|
+
from megadetector.data_management.read_exif import \
|
|
19
|
+
ReadExifOptions, read_exif_from_folder
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
#%% Functions
|
|
23
|
+
|
|
24
|
+
def rename_images(input_folder,
|
|
25
|
+
output_folder,
|
|
26
|
+
dry_run=False,
|
|
27
|
+
verbose=False,
|
|
28
|
+
read_exif_options=None,
|
|
29
|
+
n_copy_workers=8):
|
|
30
|
+
"""
|
|
31
|
+
For the given image struct in COCO format and associated list of annotations, reformats the
|
|
32
|
+
detections into labelme format.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
input_folder: the folder to search for images, always recursive
|
|
36
|
+
output_folder: the folder to which we will copy images; cannot be the
|
|
37
|
+
same as [input_folder]
|
|
38
|
+
dry_run: only map images, don't actually copy
|
|
39
|
+
verbose (bool, optional): enable additional debug output
|
|
40
|
+
read_exif_options (ReadExifOptions, optional): parameters controlling the reading of
|
|
41
|
+
EXIF information
|
|
42
|
+
n_copy_workers (int, optional): number of parallel threads to use for copying
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
dict: a dict mapping relative filenames in the input folder to relative filenames in the output
|
|
46
|
+
folder
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
assert os.path.isdir(input_folder), 'Input folder {} does not exist'.format(
|
|
50
|
+
input_folder)
|
|
51
|
+
|
|
52
|
+
if not dry_run:
|
|
53
|
+
os.makedirs(output_folder,exist_ok=True)
|
|
54
|
+
|
|
55
|
+
# Read exif information
|
|
56
|
+
if read_exif_options is None:
|
|
57
|
+
read_exif_options = ReadExifOptions()
|
|
58
|
+
|
|
59
|
+
read_exif_options.tags_to_include = ['DateTime','Model','Make','ExifImageWidth','ExifImageHeight','DateTime',
|
|
60
|
+
'DateTimeOriginal']
|
|
61
|
+
read_exif_options.verbose = False
|
|
62
|
+
|
|
63
|
+
exif_info = read_exif_from_folder(input_folder=input_folder,
|
|
64
|
+
output_file=None,
|
|
65
|
+
options=read_exif_options,
|
|
66
|
+
filenames=None,recursive=True)
|
|
67
|
+
|
|
68
|
+
print('Read EXIF information for {} images'.format(len(exif_info)))
|
|
69
|
+
|
|
70
|
+
filename_to_exif_info = {info['file_name']:info for info in exif_info}
|
|
71
|
+
|
|
72
|
+
image_files = find_images(input_folder,return_relative_paths=True,convert_slashes=True,recursive=True)
|
|
73
|
+
|
|
74
|
+
for fn in image_files:
|
|
75
|
+
assert fn in filename_to_exif_info, 'No EXIF info available for {}'.format(fn)
|
|
76
|
+
|
|
77
|
+
input_fn_relative_to_output_fn_relative = {}
|
|
78
|
+
|
|
79
|
+
# fn_relative = image_files[0]
|
|
80
|
+
for fn_relative in image_files:
|
|
81
|
+
|
|
82
|
+
input_fn_abs = os.path.join(input_folder,fn_relative)
|
|
83
|
+
image_exif_info = filename_to_exif_info[fn_relative]
|
|
84
|
+
if 'exif_tags' in image_exif_info:
|
|
85
|
+
image_exif_info = image_exif_info['exif_tags']
|
|
86
|
+
|
|
87
|
+
if image_exif_info is None or \
|
|
88
|
+
'DateTimeOriginal' not in image_exif_info or \
|
|
89
|
+
image_exif_info['DateTimeOriginal'] is None:
|
|
90
|
+
|
|
91
|
+
dt_tag = 'unknown_datetime'
|
|
92
|
+
print('Warning: no datetime for {}'.format(fn_relative))
|
|
93
|
+
|
|
94
|
+
else:
|
|
95
|
+
|
|
96
|
+
dt_tag = str(image_exif_info['DateTimeOriginal']).replace(':','-').replace(' ','_').strip()
|
|
97
|
+
|
|
98
|
+
flat_filename = fn_relative.replace('\\','/').replace('/','_')
|
|
99
|
+
|
|
100
|
+
output_fn_relative = insert_before_extension(flat_filename,dt_tag)
|
|
101
|
+
|
|
102
|
+
input_fn_relative_to_output_fn_relative[fn_relative] = output_fn_relative
|
|
103
|
+
|
|
104
|
+
if not dry_run:
|
|
105
|
+
|
|
106
|
+
input_fn_abs_to_output_fn_abs = {}
|
|
107
|
+
for input_fn_relative in input_fn_relative_to_output_fn_relative:
|
|
108
|
+
output_fn_relative = input_fn_relative_to_output_fn_relative[input_fn_relative]
|
|
109
|
+
input_fn_abs = os.path.join(input_folder,input_fn_relative)
|
|
110
|
+
output_fn_abs = os.path.join(output_folder,output_fn_relative)
|
|
111
|
+
input_fn_abs_to_output_fn_abs[input_fn_abs] = output_fn_abs
|
|
112
|
+
|
|
113
|
+
parallel_copy_files(input_file_to_output_file=input_fn_abs_to_output_fn_abs,
|
|
114
|
+
max_workers=n_copy_workers,
|
|
115
|
+
use_threads=True,
|
|
116
|
+
overwrite=True,
|
|
117
|
+
verbose=verbose)
|
|
118
|
+
|
|
119
|
+
return input_fn_relative_to_output_fn_relative
|
|
120
|
+
|
|
121
|
+
# ...def rename_images()
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
#%% Interactive driver
|
|
125
|
+
|
|
126
|
+
if False:
|
|
127
|
+
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
#%% Configure options
|
|
131
|
+
|
|
132
|
+
input_folder = r'G:\camera_traps\camera_trap_videos\2024.05.25\cam3'
|
|
133
|
+
output_folder = r'G:\camera_traps\camera_trap_videos\2024.05.25\cam3_flat'
|
|
134
|
+
dry_run = False
|
|
135
|
+
verbose = True
|
|
136
|
+
read_exif_options = ReadExifOptions()
|
|
137
|
+
read_exif_options.tags_to_include = ['DateTime','Model','Make','ExifImageWidth','ExifImageHeight','DateTime',
|
|
138
|
+
'DateTimeOriginal']
|
|
139
|
+
read_exif_options.n_workers = 8
|
|
140
|
+
read_exif_options.verbose = verbose
|
|
141
|
+
n_copy_workers = 8
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
#%% Programmatic execution
|
|
145
|
+
|
|
146
|
+
input_fn_relative_to_output_fn_relative = rename_images(input_folder,
|
|
147
|
+
output_folder,
|
|
148
|
+
dry_run=dry_run,
|
|
149
|
+
verbose=verbose,
|
|
150
|
+
read_exif_options=read_exif_options,
|
|
151
|
+
n_copy_workers=n_copy_workers)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
#%% Command-line driver
|
|
155
|
+
|
|
156
|
+
def main(): # noqa
|
|
157
|
+
|
|
158
|
+
parser = argparse.ArgumentParser(
|
|
159
|
+
description='Copies images from a possibly-nested folder structure to a flat folder structure, ' + \
|
|
160
|
+
'adding datetime information from EXIF to each filename')
|
|
161
|
+
|
|
162
|
+
parser.add_argument(
|
|
163
|
+
'input_folder',
|
|
164
|
+
type=str,
|
|
165
|
+
help='The folder to search for images, always recursive')
|
|
166
|
+
|
|
167
|
+
parser.add_argument(
|
|
168
|
+
'output_folder',
|
|
169
|
+
type=str,
|
|
170
|
+
help='The folder to which we should write the flattened image structure')
|
|
171
|
+
|
|
172
|
+
parser.add_argument(
|
|
173
|
+
'--dry_run',
|
|
174
|
+
action='store_true',
|
|
175
|
+
help="Only map images, don't actually copy")
|
|
176
|
+
|
|
177
|
+
if len(sys.argv[1:]) == 0:
|
|
178
|
+
parser.print_help()
|
|
179
|
+
parser.exit()
|
|
180
|
+
|
|
181
|
+
args = parser.parse_args()
|
|
182
|
+
|
|
183
|
+
rename_images(args.input_folder,args.output_folder,dry_run=args.dry_run,
|
|
184
|
+
verbose=True,read_exif_options=None)
|
|
185
|
+
|
|
186
|
+
if __name__ == '__main__':
|
|
187
|
+
main()
|