megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (176) hide show
  1. megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
  2. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
  3. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  7. megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
  8. megadetector/classification/aggregate_classifier_probs.py +3 -3
  9. megadetector/classification/analyze_failed_images.py +5 -5
  10. megadetector/classification/cache_batchapi_outputs.py +5 -5
  11. megadetector/classification/create_classification_dataset.py +11 -12
  12. megadetector/classification/crop_detections.py +10 -10
  13. megadetector/classification/csv_to_json.py +8 -8
  14. megadetector/classification/detect_and_crop.py +13 -15
  15. megadetector/classification/evaluate_model.py +7 -7
  16. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  17. megadetector/classification/json_to_azcopy_list.py +1 -1
  18. megadetector/classification/json_validator.py +29 -32
  19. megadetector/classification/map_classification_categories.py +9 -9
  20. megadetector/classification/merge_classification_detection_output.py +12 -9
  21. megadetector/classification/prepare_classification_script.py +19 -19
  22. megadetector/classification/prepare_classification_script_mc.py +23 -23
  23. megadetector/classification/run_classifier.py +4 -4
  24. megadetector/classification/save_mislabeled.py +6 -6
  25. megadetector/classification/train_classifier.py +1 -1
  26. megadetector/classification/train_classifier_tf.py +9 -9
  27. megadetector/classification/train_utils.py +10 -10
  28. megadetector/data_management/annotations/annotation_constants.py +1 -1
  29. megadetector/data_management/camtrap_dp_to_coco.py +45 -45
  30. megadetector/data_management/cct_json_utils.py +101 -101
  31. megadetector/data_management/cct_to_md.py +49 -49
  32. megadetector/data_management/cct_to_wi.py +33 -33
  33. megadetector/data_management/coco_to_labelme.py +75 -75
  34. megadetector/data_management/coco_to_yolo.py +189 -189
  35. megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
  36. megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
  37. megadetector/data_management/databases/integrity_check_json_db.py +202 -188
  38. megadetector/data_management/databases/subset_json_db.py +33 -33
  39. megadetector/data_management/generate_crops_from_cct.py +38 -38
  40. megadetector/data_management/get_image_sizes.py +54 -49
  41. megadetector/data_management/labelme_to_coco.py +130 -124
  42. megadetector/data_management/labelme_to_yolo.py +78 -72
  43. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  44. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  45. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  46. megadetector/data_management/lila/download_lila_subset.py +21 -24
  47. megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
  48. megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
  49. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  50. megadetector/data_management/lila/lila_common.py +70 -70
  51. megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
  52. megadetector/data_management/mewc_to_md.py +339 -340
  53. megadetector/data_management/ocr_tools.py +258 -252
  54. megadetector/data_management/read_exif.py +232 -223
  55. megadetector/data_management/remap_coco_categories.py +26 -26
  56. megadetector/data_management/remove_exif.py +31 -20
  57. megadetector/data_management/rename_images.py +187 -187
  58. megadetector/data_management/resize_coco_dataset.py +41 -41
  59. megadetector/data_management/speciesnet_to_md.py +41 -41
  60. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  61. megadetector/data_management/yolo_output_to_md_output.py +117 -120
  62. megadetector/data_management/yolo_to_coco.py +195 -188
  63. megadetector/detection/change_detection.py +831 -0
  64. megadetector/detection/process_video.py +341 -338
  65. megadetector/detection/pytorch_detector.py +308 -266
  66. megadetector/detection/run_detector.py +186 -166
  67. megadetector/detection/run_detector_batch.py +366 -364
  68. megadetector/detection/run_inference_with_yolov5_val.py +328 -325
  69. megadetector/detection/run_tiled_inference.py +312 -253
  70. megadetector/detection/tf_detector.py +24 -24
  71. megadetector/detection/video_utils.py +291 -283
  72. megadetector/postprocessing/add_max_conf.py +15 -11
  73. megadetector/postprocessing/categorize_detections_by_size.py +44 -44
  74. megadetector/postprocessing/classification_postprocessing.py +808 -311
  75. megadetector/postprocessing/combine_batch_outputs.py +20 -21
  76. megadetector/postprocessing/compare_batch_results.py +528 -517
  77. megadetector/postprocessing/convert_output_format.py +97 -97
  78. megadetector/postprocessing/create_crop_folder.py +220 -147
  79. megadetector/postprocessing/detector_calibration.py +173 -168
  80. megadetector/postprocessing/generate_csv_report.py +508 -0
  81. megadetector/postprocessing/load_api_results.py +25 -22
  82. megadetector/postprocessing/md_to_coco.py +129 -98
  83. megadetector/postprocessing/md_to_labelme.py +89 -83
  84. megadetector/postprocessing/md_to_wi.py +40 -40
  85. megadetector/postprocessing/merge_detections.py +87 -114
  86. megadetector/postprocessing/postprocess_batch_results.py +319 -302
  87. megadetector/postprocessing/remap_detection_categories.py +36 -36
  88. megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
  89. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  90. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  91. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
  92. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  93. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  94. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  95. megadetector/postprocessing/validate_batch_results.py +70 -70
  96. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  97. megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
  98. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
  99. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
  100. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  101. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  102. megadetector/taxonomy_mapping/species_lookup.py +33 -33
  103. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  104. megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
  105. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  106. megadetector/utils/azure_utils.py +22 -22
  107. megadetector/utils/ct_utils.py +1019 -200
  108. megadetector/utils/directory_listing.py +21 -77
  109. megadetector/utils/gpu_test.py +22 -22
  110. megadetector/utils/md_tests.py +541 -518
  111. megadetector/utils/path_utils.py +1511 -406
  112. megadetector/utils/process_utils.py +41 -41
  113. megadetector/utils/sas_blob_utils.py +53 -49
  114. megadetector/utils/split_locations_into_train_val.py +73 -60
  115. megadetector/utils/string_utils.py +147 -26
  116. megadetector/utils/url_utils.py +463 -173
  117. megadetector/utils/wi_utils.py +2629 -2868
  118. megadetector/utils/write_html_image_list.py +137 -137
  119. megadetector/visualization/plot_utils.py +21 -21
  120. megadetector/visualization/render_images_with_thumbnails.py +37 -73
  121. megadetector/visualization/visualization_utils.py +424 -404
  122. megadetector/visualization/visualize_db.py +197 -190
  123. megadetector/visualization/visualize_detector_output.py +126 -98
  124. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
  125. megadetector-5.0.29.dist-info/RECORD +163 -0
  126. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
  127. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  128. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  129. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  130. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  131. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  132. megadetector/data_management/importers/awc_to_json.py +0 -191
  133. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  134. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  135. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  136. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  137. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  138. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  139. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  140. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  141. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  142. megadetector/data_management/importers/ena24_to_json.py +0 -276
  143. megadetector/data_management/importers/filenames_to_json.py +0 -386
  144. megadetector/data_management/importers/helena_to_cct.py +0 -283
  145. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  146. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  147. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  148. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  149. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  150. megadetector/data_management/importers/missouri_to_json.py +0 -490
  151. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  152. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  153. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  154. megadetector/data_management/importers/pc_to_json.py +0 -365
  155. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  156. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  157. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  158. megadetector/data_management/importers/rspb_to_json.py +0 -356
  159. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  160. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  161. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  162. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  163. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  164. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  165. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  166. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  167. megadetector/data_management/importers/ubc_to_json.py +0 -399
  168. megadetector/data_management/importers/umn_to_json.py +0 -507
  169. megadetector/data_management/importers/wellington_to_json.py +0 -263
  170. megadetector/data_management/importers/wi_to_json.py +0 -442
  171. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  172. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  173. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  174. megadetector-5.0.27.dist-info/RECORD +0 -208
  175. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
  176. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
@@ -1,913 +0,0 @@
1
- """
2
-
3
- channel_islands_to_cct.py
4
-
5
- Convert the Channel Islands data set to a COCO-camera-traps .json file
6
-
7
- Uses the command-line tool ExifTool (exiftool.org) to pull EXIF tags from images,
8
- because every Python package we tried failed to pull the "Maker Notes" field properly.
9
-
10
- """
11
-
12
- #%% Imports, constants, paths
13
-
14
- ## Imports ##
15
-
16
- import os
17
- import json
18
- import uuid
19
- import datetime
20
- import glob
21
- import subprocess
22
- import requests
23
- import shutil
24
-
25
- from multiprocessing.pool import ThreadPool
26
- from collections import defaultdict
27
- from urllib.parse import urlparse
28
- from tqdm import tqdm
29
- from PIL import Image
30
-
31
-
32
- ## Constants ##
33
-
34
- required_input_annotation_fields = set([
35
- 'task_id','batch_id','name','url','Object','Output','teams','task_url','Step A Agent'
36
- ])
37
-
38
- n_download_threads = 10
39
- n_exif_threads = 20
40
- n_copy_threads = n_exif_threads
41
-
42
-
43
- ## Paths ##
44
-
45
- input_base = r'e:\channel-islands-in'
46
- output_base = r'g:\channel-islands-out'
47
- exiftool_command_name = r'c:\exiftool-12.13\exiftool(-k).exe'
48
-
49
- input_annotation_folder = os.path.join(input_base,'SCI Cameratrap Samasource Labels')
50
- input_image_folder = os.path.join(input_base,'images')
51
-
52
- assert os.path.isdir(input_base)
53
- assert os.path.isdir(input_annotation_folder)
54
- assert os.path.isdir(input_image_folder)
55
- assert not input_annotation_folder.endswith('/')
56
-
57
- output_file = os.path.join(output_base,'channel_islands_camera_traps.json')
58
- output_image_folder = os.path.join(output_base,'images')
59
- output_image_folder_humans = os.path.join(output_base,'human_images')
60
-
61
- os.makedirs(output_base,exist_ok=True)
62
- os.makedirs(output_image_folder,exist_ok=True)
63
- os.makedirs(output_image_folder_humans,exist_ok=True)
64
-
65
- # Confirm that exiftool is available
66
- # assert which(exiftool_command_name) is not None, 'Could not locate the ExifTool executable'
67
- assert os.path.isfile(exiftool_command_name), 'Could not locate the ExifTool executable'
68
-
69
- parsed_input_file = os.path.join(output_base,'parsed_input.json')
70
- exif_load_results_file = os.path.join(output_base,'exif_load_results.json')
71
- sequence_info_results_file = os.path.join(output_base,'sequence_info_results.json')
72
-
73
-
74
- #%% Load information from every .json file
75
-
76
- json_files = glob.glob(input_annotation_folder+'/**/*.json', recursive=True)
77
- print('Found {} .json files'.format(len(json_files)))
78
-
79
- # Ignore the sample file... actually, first make sure there is a sample file
80
- sample_files = [fn for fn in json_files if 'sample' in fn]
81
- assert len(sample_files) == 1
82
-
83
- # ...and now ignore that sample file.
84
- json_files = [fn for fn in json_files if 'sample' not in fn]
85
- input_images = []
86
-
87
- json_basenames = set()
88
-
89
- # json_file = json_files[0]
90
- for json_file in tqdm(json_files):
91
-
92
- json_filename = os.path.basename(json_file)
93
- assert json_filename not in json_basenames
94
- json_basenames.add(json_filename)
95
-
96
- with open(json_file,'r') as f:
97
- annotations = json.load(f)
98
-
99
- # ann = annotations[0]
100
- for ann in annotations:
101
-
102
- assert isinstance(ann,dict)
103
- ann_keys = set(ann.keys())
104
- assert required_input_annotation_fields == ann_keys
105
- ann['json_filename'] = json_filename
106
- input_images.append(ann)
107
-
108
- # ...for each annotation in this file
109
-
110
- # ...for each .json file
111
-
112
- print('\nLoaded {} image records from {} .json files'.format(len(input_images),len(json_files)))
113
-
114
- image_urls = [ann['url'] for ann in input_images]
115
-
116
-
117
- #%% Confirm URL uniqueness, handle redundant tags
118
-
119
- output_images = []
120
-
121
- urls = set()
122
- for im in tqdm(input_images):
123
-
124
- url = im['url']
125
-
126
- if url in urls:
127
-
128
- for existing_im in input_images:
129
-
130
- # Have we already added this image?
131
- if url == existing_im['url']:
132
-
133
- # One .json file was basically duplicated, but as:
134
- #
135
- # Ellie_2016-2017 SC12.json
136
- # Ellie_2016-2017-SC12.json
137
- assert im['json_filename'].replace('-',' ') == existing_im['json_filename'].replace('-',' ')
138
-
139
- # If the new image has no output, just leave the old one there
140
- if im['Output'] is None:
141
- print('Warning: duplicate URL {}, keeping existing output'.format(url))
142
- break
143
-
144
- # If the old image has no output, and the new one has output, default to the one with output
145
- if (existing_im['Output'] is None) and (im['Output'] is not None):
146
- print('Warning: duplicate URL {}, adding new output'.format(url))
147
- existing_im['Output'] = im['Output']
148
- break
149
-
150
- else:
151
- # Don't worry about the cases where someone tagged 'fox' and someone tagged 'fox_partial'
152
- obj1 = im['Output'][0]['tags']['Object'].replace('_partial','')
153
- obj2 = existing_im['Output'][0]['tags']['Object'].replace('_partial','')
154
- if obj1 != obj2:
155
- print('Warning: image {} tagged with {} and {}'.format(url,obj1,obj2))
156
-
157
- # ...for each image we've already added
158
-
159
- else:
160
-
161
- urls.add(url)
162
- output_images.append(im)
163
-
164
- # ...if this URL is/isn't in the list of URLs we've already processed
165
-
166
- # ...for each image
167
-
168
- print('Kept {} of {} annotation records'.format(len(output_images),len(input_images)))
169
-
170
- images = output_images
171
-
172
-
173
- #%% Save progress
174
-
175
- with open(parsed_input_file,'w') as f:
176
- json.dump(images,f,indent=1)
177
-
178
- #%%
179
-
180
- if False:
181
-
182
- #%%
183
-
184
- with open(parsed_input_file,'r') as f:
185
- images = json.load(f)
186
- assert not any(['exif_tags' in im for im in images])
187
-
188
-
189
- #%% Download files (functions)
190
-
191
- # https://www.quickprogrammingtips.com/python/how-to-download-multiple-files-concurrently-in-python.html
192
-
193
- def download_relative_url(url,overwrite=False):
194
- """
195
- Download:
196
-
197
- https://somestuff.com/my/relative/path/image.jpg
198
-
199
- ...to:
200
-
201
- [input_image_folder]/my/relative/path/image.jpg
202
- """
203
-
204
- parsed_url = urlparse(url)
205
- relative_path = parsed_url.path
206
-
207
- # This is returned with a leading slash, remove it
208
- relative_path = relative_path[1:]
209
-
210
- target_file = os.path.join(input_image_folder,relative_path).replace('\\','/')
211
-
212
- if os.path.isfile(target_file and not overwrite):
213
- print('{} exists, skipping'.format(target_file))
214
- return url
215
-
216
- os.makedirs(os.path.dirname(target_file),exist_ok=True)
217
-
218
- print('Downloading {} to {}'.format(url, target_file))
219
-
220
- r = requests.get(url, stream=True)
221
- if r.status_code == requests.codes.ok:
222
- with open(target_file, 'wb') as f:
223
- for data in r:
224
- f.write(data)
225
- else:
226
- print('Warning: failed to download {}'.format(url))
227
-
228
- return url
229
-
230
-
231
- def download_relative_urls(urls,n_threads = n_download_threads):
232
- """
233
- Download all URLs in [urls]
234
- """
235
- if n_threads == 1:
236
- results = []
237
- for url in urls:
238
- results.append(download_relative_url(url))
239
- else:
240
- results = ThreadPool(n_threads).map(download_relative_url, urls)
241
- return results
242
-
243
-
244
- #%% Download files (execution)
245
-
246
- download_relative_urls(image_urls)
247
-
248
-
249
- #%% Read required fields from EXIF data (functions)
250
-
251
- def process_exif(file_path):
252
- """
253
- Get relevant fields from EXIF data for an image
254
- """
255
-
256
- # -G means "Print group name for each tag", e.g. print:
257
- #
258
- # [File] Bits Per Sample : 8
259
- #
260
- # ...instead of:
261
- #
262
- # Bits Per Sample : 8
263
-
264
- proc = subprocess.Popen([exiftool_command_name, '-G', file_path], stdout=subprocess.PIPE, encoding='utf8')
265
- exif_lines = proc.stdout.readlines()
266
- exif_lines = [s.strip() for s in exif_lines]
267
- assert exif_lines is not None and len(exif_lines) > 0, 'Failed to read EXIF data from {}'.format(file_path)
268
-
269
- # If we don't get any EXIF information, this probably isn't an image
270
- assert any([s.lower().startswith('[exif]') for s in exif_lines])
271
-
272
- exif_tags = {}
273
-
274
- found_makernotes = False
275
-
276
- # line_raw = exif_lines[0]
277
- for line_raw in exif_lines:
278
-
279
- line = line_raw.lower()
280
-
281
- # Split on the first occurrence of ":"
282
- tokens = line.split(':',1)
283
-
284
- assert(len(tokens) == 2)
285
- field_name = tokens[0].strip()
286
- field_value = tokens[1].strip()
287
-
288
- if field_name.startswith('[makernotes]'):
289
-
290
- found_makernotes = True
291
-
292
- if 'sequence' in field_name:
293
- # Typically:
294
- #
295
- # '[MakerNotes] Sequence ', '1 of 3']
296
- frame_num, seq_num_frames = field_value.split('of')
297
- exif_tags['frame_num'] = int(frame_num.strip())
298
- exif_tags['seq_num_frames'] = int(seq_num_frames.strip())
299
-
300
- # Not a typo; we are using serial number as a location
301
- elif 'serial number' in line:
302
- exif_tags['location'] = field_value
303
-
304
- elif ('date/time original' in line and '[file]' not in line and '[composite]' not in line):
305
-
306
- previous_dt = None
307
-
308
- if 'datetime' in exif_tags:
309
- previous_dt = exif_tags['datetime']
310
- dt = datetime.datetime.strptime(field_value,'%Y:%m:%d %H:%M:%S')
311
-
312
- # If there are multiple timestamps, make sure they're *almost* the same
313
- if previous_dt is not None:
314
- delta = abs((dt-previous_dt).total_seconds())
315
- assert delta < 1.0
316
-
317
- exif_tags['datetime'] = dt
318
-
319
- if False:
320
- if 'time' in line:
321
- assert 'datetime' not in exif_tags
322
- exif_tags['datetime'] = field_value
323
-
324
- if (('datetime original' in line) or ('create date' in line) or ('date/time created' in line) or ('date/time original' in line)) \
325
- and ('[file]' not in line) and ('[composite]' not in line):
326
-
327
- previous_dt = None
328
-
329
- if 'datetime' in exif_tags:
330
- previous_dt = exif_tags['datetime']
331
- dt = datetime.datetime.strptime(field_value,'%Y:%m:%d %H:%M:%S')
332
-
333
- # If there are multiple timestamps, make sure they're *almost* the same
334
- if previous_dt is not None:
335
- delta = abs((dt-previous_dt).total_seconds())
336
- assert delta < 1.0
337
-
338
- exif_tags['datetime'] = dt
339
-
340
- if 'image width' in line:
341
- exif_tags['width'] = int(field_value)
342
-
343
- if 'image height' in line:
344
- exif_tags['height'] = int(field_value)
345
-
346
- if 'temperature' in line and not 'fahrenheit' in line:
347
- exif_tags['temperature'] = field_value
348
-
349
- # ...for each line in the exiftool output
350
-
351
- makernotes_fields = ['frame_num', 'seq_num_frames', 'location', 'temperature']
352
-
353
- if not found_makernotes:
354
-
355
- print('Warning: could not find maker notes in {}'.format(file_path))
356
-
357
- # This isn't directly related to the lack of maker notes, but it happens that files that are missing
358
- # maker notes also happen to be missing EXIF date information
359
- if not 'datetime' in exif_tags:
360
- print('Warning: could not find datetime information in {}'.format(file_path))
361
-
362
- for field_name in makernotes_fields:
363
- assert field_name not in exif_tags
364
- exif_tags[field_name] = 'unknown'
365
-
366
- else:
367
-
368
- assert 'datetime' in exif_tags, 'Could not find datetime information in {}'.format(file_path)
369
- for field_name in makernotes_fields:
370
- assert field_name in exif_tags, 'Could not find {} in {}'.format(field_name,file_path)
371
-
372
- return exif_tags
373
-
374
- # ...process_exif()
375
-
376
-
377
- def get_image_local_path(im):
378
-
379
- url = im['url']
380
- parsed_url = urlparse(url)
381
- relative_path = parsed_url.path
382
-
383
- # This is returned with a leading slash, remove it
384
- relative_path = relative_path[1:]
385
-
386
- absolute_path = os.path.join(input_image_folder,relative_path).replace('\\','/')
387
- return absolute_path
388
-
389
-
390
- def add_exif_data(im, overwrite=False):
391
-
392
- if ('exif_tags' in im) and (overwrite==False):
393
- return None
394
-
395
- url = im['url']
396
-
397
- # Ignore non-image files
398
- if url.lower().endswith('ds_store') or ('dropbox.device' in url.lower()):
399
- im['exif_tags'] = None
400
- return
401
-
402
- try:
403
- input_image_path = get_image_local_path(im)
404
- assert os.path.isfile(input_image_path)
405
- exif_tags = process_exif(input_image_path)
406
- im['exif_tags'] = exif_tags
407
- except Exception as e:
408
- s = 'Error on {}: {}'.format(url,str(e))
409
- print(s)
410
- return s
411
- return None
412
-
413
-
414
- #%% Read EXIF data (execution)
415
-
416
- if n_exif_threads == 1:
417
- # ann = images[0]
418
- for im in tqdm(images):
419
- add_exif_data(im)
420
- else:
421
- pool = ThreadPool(n_exif_threads)
422
- exif_read_results = list(tqdm(pool.imap(add_exif_data, images), total=len(images)))
423
-
424
-
425
- #%% Save progress
426
-
427
- with open(exif_load_results_file,'w') as f:
428
-
429
- # Use default=str to handle datetime objects
430
- json.dump(images, f, indent=1, default=str)
431
-
432
- #%%
433
-
434
- if False:
435
-
436
- #%%
437
-
438
- with open(exif_load_results_file,'r') as f:
439
- # Not deserializing datetimes yet, will do this if I actually need to run this
440
- images = json.load(f)
441
-
442
-
443
- #%% Check for EXIF read errors
444
-
445
- for i_result,result in enumerate(exif_read_results):
446
-
447
- if result is not None:
448
-
449
- print('\nError found on image {}: {}'.format(i_result,result))
450
- im = images[i_result]
451
- file_path = get_image_local_path(im)
452
- assert images[i_result] == im
453
- result = add_exif_data(im)
454
- assert result is None
455
- print('\nFixed!\n')
456
- exif_read_results[i_result] = result
457
-
458
-
459
- #%% Remove junk
460
-
461
- images_out = []
462
- for im in images:
463
-
464
- url = im['url']
465
-
466
- # Ignore non-image files
467
- if ('ds_store' in url.lower()) or ('dropbox.device' in url.lower()):
468
- continue
469
- images_out.append(im)
470
-
471
- images = images_out
472
-
473
-
474
- #%% Fill in some None values
475
-
476
- # ...so we can sort by datetime later, and let None's be sorted arbitrarily
477
-
478
- for im in images:
479
- if 'exif_tags' not in im:
480
- im['exif_tags'] = None
481
- if 'datetime' not in im['exif_tags']:
482
- im['exif_tags']['datetime'] = None
483
-
484
- images = sorted(images, key = lambda im: im['url'])
485
-
486
-
487
- #%% Find unique locations
488
-
489
- locations = set()
490
-
491
- for ann in tqdm(images):
492
-
493
- assert 'exif_tags' in ann
494
- location = ann['exif_tags']['location']
495
- assert location is not None and len(location) > 0
496
- locations.add(location)
497
-
498
-
499
- #%% Synthesize sequence information
500
-
501
- print('Found {} locations'.format(len(locations)))
502
-
503
- locations = list(locations)
504
-
505
- sequences = set()
506
- sequence_to_images = defaultdict(list)
507
- images = images
508
- max_seconds_within_sequence = 10
509
-
510
- # Sort images by time within each location
511
- # i_location=0; location = locations[i_location]
512
- for i_location,location in enumerate(locations):
513
-
514
- images_this_location = [im for im in images if im['exif_tags']['location'] == location]
515
- sorted_images_this_location = sorted(images_this_location, key = lambda im: im['exif_tags']['datetime'])
516
-
517
- current_sequence_id = None
518
- next_frame_number = 0
519
- previous_datetime = None
520
-
521
- # previous_datetime = sorted_images_this_location[0]['datetime']
522
- # im = sorted_images_this_camera[1]
523
- for i_image,im in enumerate(sorted_images_this_location):
524
-
525
- # Timestamp for this image, may be None
526
- dt = im['exif_tags']['datetime']
527
-
528
- # Start a new sequence if:
529
- #
530
- # * This image has no timestamp
531
- # * This image has a frame number of zero
532
- # * We have no previous image timestamp
533
- #
534
- if dt is None:
535
- delta = None
536
- elif previous_datetime is None:
537
- delta = None
538
- else:
539
- assert isinstance(dt,datetime.datetime)
540
- delta = (dt - previous_datetime).total_seconds()
541
-
542
- # Start a new sequence if necessary
543
- if delta is None or delta > max_seconds_within_sequence:
544
- next_frame_number = 0
545
- current_sequence_id = str(uuid.uuid1())
546
- sequences.add(current_sequence_id)
547
- assert current_sequence_id is not None
548
-
549
- im['seq_id'] = current_sequence_id
550
- im['synthetic_frame_number'] = next_frame_number
551
- next_frame_number = next_frame_number + 1
552
- previous_datetime = dt
553
- sequence_to_images[im['seq_id']].append(im)
554
-
555
- # ...for each image in this location
556
-
557
- # ...for each location
558
-
559
-
560
- #%% Count frames in each sequence
561
-
562
- print('Created {} sequences from {} images'.format(len(sequences),len(images)))
563
-
564
- num_frames_per_sequence = {}
565
- for seq_id in sequences:
566
- # images_this_sequence = [im for im in images if im['seq_id'] == seq_id]
567
- images_this_sequence = sequence_to_images[seq_id]
568
- num_frames_per_sequence[seq_id] = len(images_this_sequence)
569
- for im in images_this_sequence:
570
- im['synthetic_seq_num_frames'] = len(images_this_sequence)
571
-
572
-
573
- #%% Create output filenames for each image, store original filenames
574
-
575
- images_per_folder = 1000
576
- output_paths = set()
577
-
578
- # i_location = 0; location = locations[i_location]
579
- for i_location,location in enumerate(locations):
580
-
581
- images_this_location = [im for im in images if im['exif_tags']['location'] == location]
582
- sorted_images_this_location = sorted(images_this_location, key = lambda im: im['exif_tags']['datetime'])
583
-
584
- # i_image = 0; im = sorted_images_this_location[i_image]
585
- for i_image,im in enumerate(sorted_images_this_location):
586
-
587
- url = im['url']
588
- parsed_url = urlparse(url)
589
- relative_path = parsed_url.path
590
- relative_path = relative_path[1:]
591
- im['original_relative_path'] = relative_path
592
- image_id = uuid.uuid1()
593
- im['id'] = image_id
594
- folder_number = i_image // images_per_folder
595
- image_number = i_image % images_per_folder
596
- output_relative_path = 'loc-' + location + '/' + '{0:03d}'.format(folder_number) + '/' + '{0:03d}'.format(image_number) + '.jpg'
597
- im['output_relative_path'] = output_relative_path
598
- assert output_relative_path not in output_paths
599
- output_paths.add(output_relative_path)
600
-
601
- assert len(output_paths) == len(images)
602
-
603
-
604
- #%% Save progress
605
-
606
- with open(sequence_info_results_file,'w') as f:
607
-
608
- # Use default=str to handle datetime objects
609
- json.dump(images, f, indent=1, default=str)
610
-
611
- #%%
612
-
613
- if False:
614
-
615
- #%%
616
-
617
- with open(sequence_info_results_file,'r') as f:
618
- images = json.load(f)
619
-
620
-
621
- #%% Copy images to their output files (functions)
622
-
623
- def copy_image_to_output(im):
624
-
625
- source_path = os.path.join(input_image_folder,im['original_relative_path'])
626
- assert(os.path.isfile(source_path))
627
- dest_path = os.path.join(output_image_folder,im['output_relative_path'])
628
- os.makedirs(os.path.dirname(dest_path),exist_ok=True)
629
- shutil.copyfile(source_path,dest_path)
630
- print('Copying {} to {}'.format(source_path,dest_path))
631
- return None
632
-
633
-
634
- #%% Copy images to output files (execution)
635
-
636
- if n_copy_threads == 1:
637
- for im in tqdm(images):
638
- copy_image_to_output(im)
639
- else:
640
- pool = ThreadPool(n_copy_threads)
641
- copy_image_results = list(tqdm(pool.imap(copy_image_to_output, images), total=len(images)))
642
-
643
-
644
- #%% Rename the main image list for consistency with other scripts
645
-
646
- all_image_info = images
647
-
648
-
649
- #%% Create CCT dictionaries
650
-
651
- def transform_bbox(coords):
652
- """
653
- Derive CCT-formatted bounding boxes from the SamaSource coordinate system.
654
-
655
- SamaSource provides a list of four points (x,y) that should make a box.
656
-
657
- CCT coordinates are absolute, with the origin at the upper-left, as x,y,w,h.
658
- """
659
-
660
- # Make sure this is really a box
661
- assert len(coords) == 4
662
- assert all(len(coord) == 2 for coord in coords)
663
- assert coords[0][1] == coords[1][1]
664
- assert coords[2][1] == coords[3][1]
665
- assert coords[0][0] == coords[2][0]
666
- assert coords[1][0] == coords[3][0]
667
-
668
- # Transform to CCT format
669
- x = coords[0][0]
670
- y = coords[0][1]
671
- h = coords[2][1] - coords[0][1]
672
- w = coords[1][0] - coords[0][0]
673
- return [x, y, w, h]
674
-
675
- annotations = []
676
- image_ids_to_images = {}
677
- category_name_to_category = {}
678
-
679
- # Force the empty category to be ID 0
680
- empty_category = {}
681
- empty_category['id'] = 0
682
- empty_category['name'] = 'empty'
683
- category_name_to_category['empty'] = empty_category
684
- next_category_id = 1
685
-
686
- default_annotation = {}
687
- default_annotation['tags'] = {}
688
- default_annotation['tags']['Object'] = None
689
-
690
- # i_image = 0; input_im = all_image_info[0]
691
- for i_image,input_im in tqdm(enumerate(all_image_info),total=len(all_image_info)):
692
-
693
- output_im = {}
694
- output_im['id'] = input_im['id']
695
- output_im['file_name'] = input_im['output_relative_path']
696
- output_im['seq_id'] = input_im['seq_id']
697
- output_im['seq_num_frames'] = input_im['synthetic_seq_num_frames']
698
- output_im['frame_num'] = input_im['synthetic_frame_number']
699
- output_im['original_relative_path'] = input_im['original_relative_path']
700
-
701
- # This issue only impacted one image that wasn't a real image, it was just a screenshot
702
- # showing "no images available for this camera"
703
- if 'location' not in input_im['exif_tags'] or input_im['exif_tags']['location'] == 'unknown':
704
- print('Warning: no location for image {}, skipping'.format(
705
- input_im['url']))
706
- continue
707
- output_im['location'] = input_im['exif_tags']['location']
708
-
709
- assert output_im['id'] not in image_ids_to_images
710
- image_ids_to_images[output_im['id']] = output_im
711
-
712
- exif_tags = input_im['exif_tags']
713
-
714
- # Convert datetime if necessary
715
- dt = exif_tags['datetime']
716
- if dt is not None and isinstance(dt,str):
717
- dt = datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')
718
-
719
- # Process temperature if available
720
- output_im['temperature'] = exif_tags['temperature'] if 'temperature' in exif_tags else None
721
-
722
- # Read width and height if necessary
723
- w = None
724
- h = None
725
-
726
- if 'width' in exif_tags:
727
- w = exif_tags['width']
728
- if 'height' in exif_tags:
729
- h = exif_tags['height']
730
-
731
- output_image_full_path = os.path.join(output_image_folder,input_im['output_relative_path'])
732
-
733
- if w is None or h is None:
734
- pil_image = Image.open(output_image_full_path)
735
- w, h = pil_image.size
736
-
737
- output_im['width'] = w
738
- output_im['height'] = h
739
-
740
- # I don't know what this field is; confirming that it's always None
741
- assert input_im['Object'] is None
742
-
743
- # Process object and bbox
744
- input_annotations = input_im['Output']
745
-
746
- if input_annotations is None:
747
- input_annotations = [default_annotation]
748
-
749
- # os.startfile(output_image_full_path)
750
-
751
- for i_ann,input_annotation in enumerate(input_annotations):
752
-
753
- bbox = None
754
-
755
- assert isinstance(input_annotation,dict)
756
-
757
- if input_annotation['tags']['Object'] is None:
758
-
759
- # Zero is hard-coded as the empty category, but check to be safe
760
- category_id = 0
761
- assert category_name_to_category['empty']['id'] == category_id
762
-
763
- else:
764
-
765
- # I can't figure out the 'index' field, but I'm not losing sleep about it
766
- # assert input_annotation['index'] == 1+i_ann
767
-
768
- points = input_annotation['points']
769
- assert points is not None and len(points) == 4
770
- bbox = transform_bbox(points)
771
- assert len(input_annotation['tags']) == 1 and 'Object' in input_annotation['tags']
772
-
773
- # Some annotators (but not all) included "_partial" when animals were partially obscured
774
- category_name = input_annotation['tags']['Object'].replace('_partial','').lower().strip()
775
-
776
- # Annotators *mostly* used 'none', but sometimes 'empty'. 'empty' is CCT-correct.
777
- if category_name == 'none':
778
- category_name = 'empty'
779
-
780
- category_id = None
781
-
782
- # If we've seen this category before...
783
- if category_name in category_name_to_category:
784
-
785
- category = category_name_to_category[category_name]
786
- category_id = category['id']
787
-
788
- # If this is a new category...
789
- else:
790
-
791
- category_id = next_category_id
792
- category = {}
793
- category['id'] = category_id
794
- category['name'] = category_name
795
- category_name_to_category[category_name] = category
796
- next_category_id += 1
797
-
798
- # ...if this is an empty/non-empty annotation
799
-
800
- # Create an annotation
801
- annotation = {}
802
- annotation['id'] = str(uuid.uuid1())
803
- annotation['image_id'] = output_im['id']
804
- annotation['category_id'] = category_id
805
- annotation['sequence_level_annotation'] = False
806
- if bbox is not None:
807
- annotation['bbox'] = bbox
808
-
809
- annotations.append(annotation)
810
-
811
- # ...for each annotation on this image
812
-
813
- # ...for each image
814
-
815
- images = list(image_ids_to_images.values())
816
- categories = list(category_name_to_category.values())
817
- print('Loaded {} annotations in {} categories for {} images'.format(
818
- len(annotations),len(categories),len(images)))
819
-
820
-
821
- #%% Change *two* annotations on images that I discovered contains a human after running MDv4
822
-
823
- manual_human_ids = ['a07fc88a-6dd8-4d66-b552-d21d50fa39d0','285363f9-d76d-4727-b530-a6bd401bb4c7']
824
- human_id = [cat['id'] for cat in categories if cat['name'] == 'human'][0]
825
- for ann in tqdm(annotations):
826
- if ann['image_id'] in manual_human_ids:
827
- old_cat_id = ann['category_id']
828
- print('Changing annotation for image {} from {} to {}'.format(
829
- ann['image_id'],old_cat_id,human_id))
830
- ann['category_id'] = human_id
831
-
832
-
833
- #%% Move human images
834
-
835
- human_image_ids = set()
836
- human_id = [cat['id'] for cat in categories if cat['name'] == 'human'][0]
837
-
838
- # ann = annotations[0]
839
- for ann in tqdm(annotations):
840
- if ann['category_id'] == human_id:
841
- human_image_ids.add(ann['image_id'])
842
-
843
- print('\nFound {} human images'.format(len(human_image_ids)))
844
-
845
- for im in tqdm(images):
846
- if im['id'] not in human_image_ids:
847
- continue
848
- source_path = os.path.join(output_image_folder,im['file_name'])
849
- if not os.path.isfile(source_path):
850
- continue
851
- target_path = os.path.join(output_image_folder_humans,im['file_name'])
852
- print('Moving {} to {}'.format(source_path,target_path))
853
- os.makedirs(os.path.dirname(target_path),exist_ok=True)
854
- shutil.move(source_path,target_path)
855
-
856
-
857
- #%% Count images by location
858
-
859
- locations_to_images = defaultdict(list)
860
- for im in tqdm(images):
861
- locations_to_images[im['location']].append(im)
862
-
863
-
864
- #%% Write output
865
-
866
- info = {}
867
- info['year'] = 2020
868
- info['version'] = 1.0
869
- info['description'] = 'Camera trap data collected from the Channel Islands, California'
870
- info['contributor'] = 'The Nature Conservancy of California'
871
-
872
- json_data = {}
873
- json_data['images'] = images
874
- json_data['annotations'] = annotations
875
- json_data['categories'] = categories
876
- json_data['info'] = info
877
- json.dump(json_data, open(output_file, 'w'), indent=1)
878
-
879
- print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
880
- len(images),len(annotations),len(categories)))
881
-
882
-
883
- #%% Validate output
884
-
885
- from megadetector.data_management.databases import integrity_check_json_db
886
-
887
- fn = output_file
888
- options = integrity_check_json_db.IntegrityCheckOptions()
889
- options.baseDir = output_image_folder
890
- options.bCheckImageSizes = False
891
- options.bCheckImageExistence = False
892
- options.bFindUnusedImages = False
893
-
894
- sortedCategories, data, error = integrity_check_json_db.integrity_check_json_db(fn,options)
895
-
896
-
897
- #%% Preview labels
898
-
899
- from megadetector.visualization import visualize_db
900
-
901
- viz_options = visualize_db.DbVizOptions()
902
- viz_options.num_to_visualize = 159
903
- # viz_options.classes_to_exclude = [0]
904
- viz_options.classes_to_include = ['other']
905
- viz_options.trim_to_images_with_bboxes = False
906
- viz_options.add_search_links = False
907
- viz_options.sort_by_filename = False
908
- viz_options.parallelize_rendering = True
909
- html_output_file,image_db = visualize_db.visualize_db(db_path=output_file,
910
- output_dir=os.path.join(output_base,'preview'),
911
- image_base_dir=output_image_folder,
912
- options=viz_options)
913
- os.startfile(html_output_file)