megadetector 5.0.11__py3-none-any.whl → 5.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (203) hide show
  1. megadetector/api/__init__.py +0 -0
  2. megadetector/api/batch_processing/__init__.py +0 -0
  3. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  4. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  5. megadetector/api/batch_processing/api_core/batch_service/score.py +439 -0
  6. megadetector/api/batch_processing/api_core/server.py +294 -0
  7. megadetector/api/batch_processing/api_core/server_api_config.py +97 -0
  8. megadetector/api/batch_processing/api_core/server_app_config.py +55 -0
  9. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +220 -0
  10. megadetector/api/batch_processing/api_core/server_job_status_table.py +149 -0
  11. megadetector/api/batch_processing/api_core/server_orchestration.py +360 -0
  12. megadetector/api/batch_processing/api_core/server_utils.py +88 -0
  13. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  14. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +46 -0
  15. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  16. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +152 -0
  17. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  18. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  19. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  20. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  21. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
  22. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  23. megadetector/api/synchronous/__init__.py +0 -0
  24. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  25. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +152 -0
  26. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +263 -0
  27. megadetector/api/synchronous/api_core/animal_detection_api/config.py +35 -0
  28. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  29. megadetector/api/synchronous/api_core/tests/load_test.py +110 -0
  30. megadetector/classification/__init__.py +0 -0
  31. megadetector/classification/aggregate_classifier_probs.py +108 -0
  32. megadetector/classification/analyze_failed_images.py +227 -0
  33. megadetector/classification/cache_batchapi_outputs.py +198 -0
  34. megadetector/classification/create_classification_dataset.py +627 -0
  35. megadetector/classification/crop_detections.py +516 -0
  36. megadetector/classification/csv_to_json.py +226 -0
  37. megadetector/classification/detect_and_crop.py +855 -0
  38. megadetector/classification/efficientnet/__init__.py +9 -0
  39. megadetector/classification/efficientnet/model.py +415 -0
  40. megadetector/classification/efficientnet/utils.py +607 -0
  41. megadetector/classification/evaluate_model.py +520 -0
  42. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  43. megadetector/classification/json_to_azcopy_list.py +63 -0
  44. megadetector/classification/json_validator.py +699 -0
  45. megadetector/classification/map_classification_categories.py +276 -0
  46. megadetector/classification/merge_classification_detection_output.py +506 -0
  47. megadetector/classification/prepare_classification_script.py +194 -0
  48. megadetector/classification/prepare_classification_script_mc.py +228 -0
  49. megadetector/classification/run_classifier.py +287 -0
  50. megadetector/classification/save_mislabeled.py +110 -0
  51. megadetector/classification/train_classifier.py +827 -0
  52. megadetector/classification/train_classifier_tf.py +725 -0
  53. megadetector/classification/train_utils.py +323 -0
  54. megadetector/data_management/__init__.py +0 -0
  55. megadetector/data_management/annotations/__init__.py +0 -0
  56. megadetector/data_management/annotations/annotation_constants.py +34 -0
  57. megadetector/data_management/camtrap_dp_to_coco.py +237 -0
  58. megadetector/data_management/cct_json_utils.py +404 -0
  59. megadetector/data_management/cct_to_md.py +176 -0
  60. megadetector/data_management/cct_to_wi.py +289 -0
  61. megadetector/data_management/coco_to_labelme.py +283 -0
  62. megadetector/data_management/coco_to_yolo.py +662 -0
  63. megadetector/data_management/databases/__init__.py +0 -0
  64. megadetector/data_management/databases/add_width_and_height_to_db.py +33 -0
  65. megadetector/data_management/databases/combine_coco_camera_traps_files.py +206 -0
  66. megadetector/data_management/databases/integrity_check_json_db.py +493 -0
  67. megadetector/data_management/databases/subset_json_db.py +115 -0
  68. megadetector/data_management/generate_crops_from_cct.py +149 -0
  69. megadetector/data_management/get_image_sizes.py +189 -0
  70. megadetector/data_management/importers/add_nacti_sizes.py +52 -0
  71. megadetector/data_management/importers/add_timestamps_to_icct.py +79 -0
  72. megadetector/data_management/importers/animl_results_to_md_results.py +158 -0
  73. megadetector/data_management/importers/auckland_doc_test_to_json.py +373 -0
  74. megadetector/data_management/importers/auckland_doc_to_json.py +201 -0
  75. megadetector/data_management/importers/awc_to_json.py +191 -0
  76. megadetector/data_management/importers/bellevue_to_json.py +273 -0
  77. megadetector/data_management/importers/cacophony-thermal-importer.py +793 -0
  78. megadetector/data_management/importers/carrizo_shrubfree_2018.py +269 -0
  79. megadetector/data_management/importers/carrizo_trail_cam_2017.py +289 -0
  80. megadetector/data_management/importers/cct_field_adjustments.py +58 -0
  81. megadetector/data_management/importers/channel_islands_to_cct.py +913 -0
  82. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +180 -0
  83. megadetector/data_management/importers/eMammal/eMammal_helpers.py +249 -0
  84. megadetector/data_management/importers/eMammal/make_eMammal_json.py +223 -0
  85. megadetector/data_management/importers/ena24_to_json.py +276 -0
  86. megadetector/data_management/importers/filenames_to_json.py +386 -0
  87. megadetector/data_management/importers/helena_to_cct.py +283 -0
  88. megadetector/data_management/importers/idaho-camera-traps.py +1407 -0
  89. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +294 -0
  90. megadetector/data_management/importers/jb_csv_to_json.py +150 -0
  91. megadetector/data_management/importers/mcgill_to_json.py +250 -0
  92. megadetector/data_management/importers/missouri_to_json.py +490 -0
  93. megadetector/data_management/importers/nacti_fieldname_adjustments.py +79 -0
  94. megadetector/data_management/importers/noaa_seals_2019.py +181 -0
  95. megadetector/data_management/importers/pc_to_json.py +365 -0
  96. megadetector/data_management/importers/plot_wni_giraffes.py +123 -0
  97. megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -0
  98. megadetector/data_management/importers/prepare_zsl_imerit.py +131 -0
  99. megadetector/data_management/importers/rspb_to_json.py +356 -0
  100. megadetector/data_management/importers/save_the_elephants_survey_A.py +320 -0
  101. megadetector/data_management/importers/save_the_elephants_survey_B.py +329 -0
  102. megadetector/data_management/importers/snapshot_safari_importer.py +758 -0
  103. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +665 -0
  104. megadetector/data_management/importers/snapshot_serengeti_lila.py +1067 -0
  105. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +150 -0
  106. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +153 -0
  107. megadetector/data_management/importers/sulross_get_exif.py +65 -0
  108. megadetector/data_management/importers/timelapse_csv_set_to_json.py +490 -0
  109. megadetector/data_management/importers/ubc_to_json.py +399 -0
  110. megadetector/data_management/importers/umn_to_json.py +507 -0
  111. megadetector/data_management/importers/wellington_to_json.py +263 -0
  112. megadetector/data_management/importers/wi_to_json.py +442 -0
  113. megadetector/data_management/importers/zamba_results_to_md_results.py +181 -0
  114. megadetector/data_management/labelme_to_coco.py +547 -0
  115. megadetector/data_management/labelme_to_yolo.py +272 -0
  116. megadetector/data_management/lila/__init__.py +0 -0
  117. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +97 -0
  118. megadetector/data_management/lila/add_locations_to_nacti.py +147 -0
  119. megadetector/data_management/lila/create_lila_blank_set.py +558 -0
  120. megadetector/data_management/lila/create_lila_test_set.py +152 -0
  121. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  122. megadetector/data_management/lila/download_lila_subset.py +178 -0
  123. megadetector/data_management/lila/generate_lila_per_image_labels.py +516 -0
  124. megadetector/data_management/lila/get_lila_annotation_counts.py +170 -0
  125. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  126. megadetector/data_management/lila/lila_common.py +300 -0
  127. megadetector/data_management/lila/test_lila_metadata_urls.py +132 -0
  128. megadetector/data_management/ocr_tools.py +870 -0
  129. megadetector/data_management/read_exif.py +809 -0
  130. megadetector/data_management/remap_coco_categories.py +84 -0
  131. megadetector/data_management/remove_exif.py +66 -0
  132. megadetector/data_management/rename_images.py +187 -0
  133. megadetector/data_management/resize_coco_dataset.py +189 -0
  134. megadetector/data_management/wi_download_csv_to_coco.py +247 -0
  135. megadetector/data_management/yolo_output_to_md_output.py +446 -0
  136. megadetector/data_management/yolo_to_coco.py +676 -0
  137. megadetector/detection/__init__.py +0 -0
  138. megadetector/detection/detector_training/__init__.py +0 -0
  139. megadetector/detection/detector_training/model_main_tf2.py +114 -0
  140. megadetector/detection/process_video.py +846 -0
  141. megadetector/detection/pytorch_detector.py +355 -0
  142. megadetector/detection/run_detector.py +779 -0
  143. megadetector/detection/run_detector_batch.py +1219 -0
  144. megadetector/detection/run_inference_with_yolov5_val.py +1087 -0
  145. megadetector/detection/run_tiled_inference.py +934 -0
  146. megadetector/detection/tf_detector.py +192 -0
  147. megadetector/detection/video_utils.py +698 -0
  148. megadetector/postprocessing/__init__.py +0 -0
  149. megadetector/postprocessing/add_max_conf.py +64 -0
  150. megadetector/postprocessing/categorize_detections_by_size.py +165 -0
  151. megadetector/postprocessing/classification_postprocessing.py +716 -0
  152. megadetector/postprocessing/combine_api_outputs.py +249 -0
  153. megadetector/postprocessing/compare_batch_results.py +966 -0
  154. megadetector/postprocessing/convert_output_format.py +396 -0
  155. megadetector/postprocessing/load_api_results.py +195 -0
  156. megadetector/postprocessing/md_to_coco.py +310 -0
  157. megadetector/postprocessing/md_to_labelme.py +330 -0
  158. megadetector/postprocessing/merge_detections.py +412 -0
  159. megadetector/postprocessing/postprocess_batch_results.py +1908 -0
  160. megadetector/postprocessing/remap_detection_categories.py +170 -0
  161. megadetector/postprocessing/render_detection_confusion_matrix.py +660 -0
  162. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +211 -0
  163. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +83 -0
  164. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1635 -0
  165. megadetector/postprocessing/separate_detections_into_folders.py +730 -0
  166. megadetector/postprocessing/subset_json_detector_output.py +700 -0
  167. megadetector/postprocessing/top_folders_to_bottom.py +223 -0
  168. megadetector/taxonomy_mapping/__init__.py +0 -0
  169. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  170. megadetector/taxonomy_mapping/map_new_lila_datasets.py +150 -0
  171. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -0
  172. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +588 -0
  173. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  174. megadetector/taxonomy_mapping/simple_image_download.py +219 -0
  175. megadetector/taxonomy_mapping/species_lookup.py +834 -0
  176. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  177. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  178. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  179. megadetector/utils/__init__.py +0 -0
  180. megadetector/utils/azure_utils.py +178 -0
  181. megadetector/utils/ct_utils.py +613 -0
  182. megadetector/utils/directory_listing.py +246 -0
  183. megadetector/utils/md_tests.py +1164 -0
  184. megadetector/utils/path_utils.py +1045 -0
  185. megadetector/utils/process_utils.py +160 -0
  186. megadetector/utils/sas_blob_utils.py +509 -0
  187. megadetector/utils/split_locations_into_train_val.py +228 -0
  188. megadetector/utils/string_utils.py +92 -0
  189. megadetector/utils/url_utils.py +323 -0
  190. megadetector/utils/write_html_image_list.py +225 -0
  191. megadetector/visualization/__init__.py +0 -0
  192. megadetector/visualization/plot_utils.py +293 -0
  193. megadetector/visualization/render_images_with_thumbnails.py +275 -0
  194. megadetector/visualization/visualization_utils.py +1536 -0
  195. megadetector/visualization/visualize_db.py +552 -0
  196. megadetector/visualization/visualize_detector_output.py +405 -0
  197. {megadetector-5.0.11.dist-info → megadetector-5.0.13.dist-info}/LICENSE +0 -0
  198. {megadetector-5.0.11.dist-info → megadetector-5.0.13.dist-info}/METADATA +2 -2
  199. megadetector-5.0.13.dist-info/RECORD +201 -0
  200. megadetector-5.0.13.dist-info/top_level.txt +1 -0
  201. megadetector-5.0.11.dist-info/RECORD +0 -5
  202. megadetector-5.0.11.dist-info/top_level.txt +0 -1
  203. {megadetector-5.0.11.dist-info → megadetector-5.0.13.dist-info}/WHEEL +0 -0
@@ -0,0 +1,269 @@
1
+ """
2
+
3
+ carrizo_shrubfree_2018.py
4
+
5
+ Convert the .csv file provided for the Carrizo Mojave data set to a
6
+ COCO-camera-traps .json file
7
+
8
+ """
9
+
10
+ #%% Constants and environment
11
+
12
+ import pandas as pd
13
+ import os
14
+ import json
15
+ import uuid
16
+ import time
17
+ import humanfriendly
18
+
19
+ from tqdm import tqdm
20
+ from PIL import Image
21
+
22
+ import numpy as np
23
+
24
+ from megadetector.utils.path_utils import find_images
25
+
26
+ input_base = r'Z:\Shrub-free zone Carrizo 2018'
27
+ input_metadata_file = os.path.join(input_base,'shrub-free 2018.csv')
28
+
29
+ output_base = r'G:\carrizo-mojave'
30
+ output_json_file = os.path.join(output_base,'carrizo shrub-free 2018.json')
31
+
32
+ image_directory = input_base
33
+
34
+ load_width_and_height = False
35
+
36
+ assert(os.path.isdir(image_directory))
37
+
38
+ category_replacements = {'ammosphermophilus nelsoni':'ammospermophilus nelsoni'}
39
+
40
+ annotation_fields_to_copy = ['rep','photo.rep','timeblock','night.day','observations']
41
+
42
+
43
+ #%% Read source data
44
+
45
+ input_metadata = pd.read_csv(input_metadata_file)
46
+
47
+ # Original .csv file had superfluous spaces in column names
48
+ input_metadata = input_metadata.rename(columns=lambda x: x.strip())
49
+
50
+ print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
51
+ len(input_metadata)))
52
+
53
+ input_metadata['file'] = 1 + input_metadata.groupby('rep').cumcount()
54
+ input_metadata['file'] = input_metadata[['file', 'rep']].apply(lambda x: "{0}\IMG_{1}.JPG".format(x[1], str(x[0]).zfill(4)), axis=1)
55
+
56
+
57
+ #%% Map filenames to rows, verify image existence
58
+
59
+ start_time = time.time()
60
+ filenames_to_rows = {}
61
+ image_filenames = input_metadata.file
62
+
63
+ missing_files = []
64
+ duplicate_rows = []
65
+
66
+ # Build up a map from filenames to a list of rows, checking image existence as we go
67
+ for iFile, fn in tqdm(enumerate(image_filenames),total=len(image_filenames)):
68
+ if (fn in filenames_to_rows):
69
+ duplicate_rows.append(iFile)
70
+ filenames_to_rows[fn].append(iFile)
71
+ else:
72
+ filenames_to_rows[fn] = [iFile]
73
+ image_path = os.path.join(image_directory, fn)
74
+ if not os.path.isfile(image_path):
75
+ missing_files.append(fn)
76
+
77
+ elapsed = time.time() - start_time
78
+
79
+ print('Finished verifying image existence in {}, found {} missing files (of {})'.format(
80
+ humanfriendly.format_timespan(elapsed),
81
+ len(missing_files),len(image_filenames)))
82
+
83
+ assert len(duplicate_rows) == 0
84
+
85
+ # 58 missing files (of 17652)
86
+
87
+
88
+ #%% Check for images that aren't included in the metadata file
89
+
90
+ image_full_paths = find_images(image_directory, bRecursive=True)
91
+ images_missing_from_metadata = []
92
+
93
+ for iImage, image_path in tqdm(enumerate(image_full_paths),total=len(image_full_paths)):
94
+
95
+ relative_path = os.path.relpath(image_path,input_base)
96
+ if relative_path not in filenames_to_rows:
97
+ images_missing_from_metadata.append(relative_path)
98
+
99
+ print('{} of {} files are not in metadata'.format(len(images_missing_from_metadata),len(image_full_paths)))
100
+
101
+ # 3012 of 20606 files are not in metadata
102
+
103
+
104
+ #%% Create CCT dictionaries
105
+
106
+ images = []
107
+ annotations = []
108
+
109
+ # Map categories to integer IDs
110
+ #
111
+ # The category '0' is reserved for 'empty'
112
+
113
+ categories_to_category_id = {}
114
+ categories_to_counts = {}
115
+ categories_to_category_id['empty'] = 0
116
+ categories_to_counts['empty'] = 0
117
+
118
+ next_category_id = 1
119
+
120
+ # For each image
121
+ #
122
+ # Because in practice images are 1:1 with annotations in this data set,
123
+ # this is also a loop over annotations.
124
+
125
+ start_time = time.time()
126
+
127
+ for image_name in image_filenames:
128
+
129
+ rows = filenames_to_rows[image_name]
130
+
131
+ # Each filename should just match one row
132
+ assert(len(rows) == 1)
133
+
134
+ iRow = rows[0]
135
+ row = input_metadata.iloc[iRow]
136
+ im = {}
137
+ im['id'] = image_name.split('.')[0]
138
+ im['file_name'] = image_name
139
+ im['datetime'] = row['date']
140
+ im['location'] = "{0}_{1}_{2}".format(row['region'], row['site'], row['microsite'])
141
+
142
+ image_path = os.path.join(image_directory, image_name)
143
+
144
+ # Don't include images that don't exist on disk
145
+ if not os.path.isfile(image_path):
146
+ continue
147
+
148
+ if load_width_and_height:
149
+ pilImage = Image.open(image_path)
150
+ width, height = pilImage.size
151
+ im['width'] = width
152
+ im['height'] = height
153
+ else:
154
+ im['width'] = -1
155
+ im['height'] = -1
156
+
157
+ images.append(im)
158
+
159
+ is_image = row['animal.capture']
160
+
161
+ if (is_image == 0):
162
+ category = 'empty'
163
+ else:
164
+ if row['latin.bionomial'] is np.nan:
165
+ category = 'unidentifiable'
166
+ else:
167
+ category = row['latin.bionomial'].replace(' ',' ').lower().strip()
168
+
169
+ if category in category_replacements:
170
+ category = category_replacements[category]
171
+
172
+ # Have we seen this category before?
173
+ if category in categories_to_category_id:
174
+ categoryID = categories_to_category_id[category]
175
+ categories_to_counts[category] += 1
176
+ else:
177
+ categoryID = next_category_id
178
+ categories_to_category_id[category] = categoryID
179
+ categories_to_counts[category] = 1
180
+ next_category_id += 1
181
+
182
+ # Create an annotation
183
+ ann = {}
184
+
185
+ # The Internet tells me this guarantees uniqueness to a reasonable extent, even
186
+ # beyond the sheer improbability of collisions.
187
+ ann['id'] = str(uuid.uuid1())
188
+ ann['image_id'] = im['id']
189
+ ann['category_id'] = categoryID
190
+
191
+ for fieldname in annotation_fields_to_copy:
192
+ ann[fieldname] = row[fieldname]
193
+ if ann[fieldname] is np.nan:
194
+ ann[fieldname] = ''
195
+ ann[fieldname] = str(ann[fieldname])
196
+
197
+ annotations.append(ann)
198
+
199
+ # ...for each image
200
+
201
+ # Convert categories to a CCT-style dictionary
202
+ categories = []
203
+
204
+ for category in categories_to_counts:
205
+ print('Category {}, count {}'.format(category,categories_to_counts[category]))
206
+ categoryID = categories_to_category_id[category]
207
+ cat = {}
208
+ cat['name'] = category
209
+ cat['id'] = categoryID
210
+ categories.append(cat)
211
+
212
+ elapsed = time.time() - start_time
213
+ print('Finished creating CCT dictionaries in {}'.format(
214
+ humanfriendly.format_timespan(elapsed)))
215
+
216
+
217
+ #%% Create info struct
218
+
219
+ info = {}
220
+ info['year'] = 2018
221
+ info['version'] = 1
222
+ info['description'] = 'Carrizo Shrub-Free 2018'
223
+ info['contributor'] = 'York University'
224
+
225
+
226
+ #%% Write output
227
+
228
+ json_data = {}
229
+ json_data['images'] = images
230
+ json_data['annotations'] = annotations
231
+ json_data['categories'] = categories
232
+ json_data['info'] = info
233
+ json.dump(json_data, open(output_json_file, 'w'), indent=4)
234
+
235
+ print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
236
+ len(images),len(annotations),len(categories)))
237
+
238
+
239
+ #%% Validate output
240
+
241
+ from megadetector.data_management.databases import integrity_check_json_db
242
+
243
+ options = integrity_check_json_db.IntegrityCheckOptions()
244
+ options.baseDir = image_directory
245
+ options.bCheckImageSizes = False
246
+ options.bCheckImageExistence = False
247
+ options.bFindUnusedImages = False
248
+ data = integrity_check_json_db.integrity_check_json_db(output_json_file,options)
249
+
250
+
251
+ #%% Preview labels
252
+
253
+ from megadetector.visualization import visualize_db
254
+ from megadetector.data_management.databases import integrity_check_json_db
255
+
256
+ viz_options = visualize_db.DbVizOptions()
257
+ viz_options.num_to_visualize = None
258
+ viz_options.trim_to_images_with_bboxes = False
259
+ viz_options.add_search_links = True
260
+ viz_options.sort_by_filename = False
261
+ viz_options.parallelize_rendering = True
262
+ viz_options.classes_to_exclude = ['empty']
263
+ html_output_file,image_db = visualize_db.visualize_db(db_path=output_json_file,
264
+ output_dir=os.path.join(
265
+ output_base, 'carrizo shrub-free 2018/preview'),
266
+ image_base_dir=image_directory,
267
+ options=viz_options)
268
+ os.startfile(html_output_file)
269
+
@@ -0,0 +1,289 @@
1
+ """
2
+
3
+ carrizo_trail_cam_2017.py
4
+
5
+ Convert the .csv files provided for the "Trail Cam Carrizo" 2017 data set to
6
+ a COCO-camera-traps .json file.
7
+
8
+ """
9
+
10
+ #%% Constants and environment
11
+
12
+ import os
13
+ import json
14
+ import uuid
15
+ import time
16
+ import humanfriendly
17
+
18
+ import pandas as pd
19
+ import numpy as np
20
+
21
+ from PIL import Image
22
+ from tqdm import tqdm
23
+
24
+ from megadetector.utils.path_utils import find_images
25
+
26
+ input_base = r'Z:\Trail Cam Carrizo 2017'
27
+ open_metadata_file = os.path.join(input_base, 'Carrizo open 2017.csv')
28
+ shrub_metadata_file = os.path.join(input_base, 'Carrizo Shrub 2017.csv')
29
+
30
+ output_base = r'G:\carrizo-mojave'
31
+ output_json_file = os.path.join(output_base, 'carrizo trail cam 2017.json')
32
+ image_directory = input_base
33
+ input_metadata_files = [open_metadata_file, shrub_metadata_file]
34
+
35
+ load_width_and_height = False
36
+
37
+ assert(os.path.isdir(image_directory))
38
+
39
+ category_replacements = {'unidnetifiable':'unidentifiable','unidentifiable animal':'unidentifiable'}
40
+
41
+ annotation_fields_to_copy = ['rep','photo rep','timeblock','night.day','observations']
42
+
43
+
44
+ #%% Read source data
45
+
46
+ final_data = pd.DataFrame()
47
+
48
+ for inp_file in input_metadata_files:
49
+
50
+ print("Reading: {0}".format(inp_file))
51
+ input_metadata = pd.read_csv(inp_file)
52
+
53
+ print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
54
+ len(input_metadata)))
55
+
56
+ # Removing the empty records
57
+ input_metadata = input_metadata[~np.isnan(input_metadata['rep'])]
58
+
59
+ input_metadata['file'] = input_metadata.groupby(["rep", "week"]).cumcount()
60
+ week_folder_format = {1: 'week 1- 2017', 2: 'week2- 2017', 3: 'week3-2017'}
61
+
62
+ input_metadata['file'] = input_metadata[['file', 'rep', 'week', 'microsite']].apply(
63
+ lambda x: "{3}/{4}{1}-week{0}-carrizo-2017/IMG_{2}.JPG".format(int(x[2]), int(x[1]),
64
+ str(int(x[0]+1)).zfill(4),
65
+ week_folder_format[int(x[2])],
66
+ x[3].lower()), axis=1)
67
+
68
+ final_data = final_data.append(input_metadata)
69
+
70
+ print('Read {} metadata rows'.format(len(final_data)))
71
+
72
+
73
+ #%% Map filenames to rows, verify image existence
74
+
75
+ start_time = time.time()
76
+ filenames_to_rows = {}
77
+ image_filenames = input_metadata.file
78
+
79
+ missing_files = []
80
+ duplicate_rows = []
81
+
82
+ # Build up a map from filenames to a list of rows, checking image existence as we go
83
+ for iFile, fn in tqdm(enumerate(image_filenames),total=len(image_filenames)):
84
+ if (fn in filenames_to_rows):
85
+ duplicate_rows.append(iFile)
86
+ filenames_to_rows[fn].append(iFile)
87
+ else:
88
+ filenames_to_rows[fn] = [iFile]
89
+ image_path = os.path.join(image_directory, fn)
90
+ if not os.path.isfile(image_path):
91
+ missing_files.append(fn)
92
+
93
+ elapsed = time.time() - start_time
94
+
95
+ print('Finished verifying image existence in {}, found {} missing files (of {})'.format(
96
+ humanfriendly.format_timespan(elapsed),
97
+ len(missing_files),len(image_filenames)))
98
+
99
+ assert len(duplicate_rows) == 0
100
+
101
+ # 908 missing files (of 60562)
102
+
103
+
104
+ #%% Check for images that aren't included in the metadata file
105
+
106
+ image_full_paths = find_images(image_directory, bRecursive=True)
107
+ images_missing_from_metadata = []
108
+
109
+ for iImage, image_path in tqdm(enumerate(image_full_paths),total=len(image_full_paths)):
110
+
111
+ relative_path = os.path.relpath(image_path,input_base).replace('\\','/')
112
+ if relative_path not in filenames_to_rows:
113
+ images_missing_from_metadata.append(relative_path)
114
+
115
+ print('{} of {} files are not in metadata'.format(len(images_missing_from_metadata),len(image_full_paths)))
116
+
117
+ # 105329 of 164983 files are not in metadata
118
+
119
+
120
+ #%% Create CCT dictionaries
121
+
122
+ images = []
123
+ annotations = []
124
+
125
+ # Map categories to integer IDs
126
+ #
127
+ # The category '0' is reserved for 'empty'
128
+
129
+ categories_to_category_id = {}
130
+ categories_to_counts = {}
131
+ categories_to_category_id['empty'] = 0
132
+ categories_to_counts['empty'] = 0
133
+
134
+ next_category_id = 1
135
+
136
+ # For each image
137
+ #
138
+ # Because in practice images are 1:1 with annotations in this data set,
139
+ # this is also a loop over annotations.
140
+
141
+ start_time = time.time()
142
+
143
+ for image_name in tqdm(image_filenames):
144
+
145
+ rows = filenames_to_rows[image_name]
146
+
147
+ # Each filename should just match one row
148
+ assert(len(rows) == 1)
149
+
150
+ iRow = rows[0]
151
+ row = input_metadata.iloc[iRow]
152
+ im = {}
153
+ im['id'] = image_name.replace('\\','/').replace('/','_').replace(' ','_')
154
+ im['file_name'] = image_name
155
+ im['region'] = row['region']
156
+ im['site']= row['site']
157
+ im['mircosite'] = row['microsite']
158
+ im['datetime'] = row['calendar date']
159
+ im['location'] = "{0}_{1}_{2}".format(row['region'], row['site'], row['microsite'])
160
+
161
+ image_path = os.path.join(image_directory, image_name)
162
+
163
+ # Don't include images that don't exist on disk
164
+ if not os.path.isfile(image_path):
165
+ continue
166
+
167
+ if load_width_and_height:
168
+ pilImage = Image.open(image_path)
169
+ width, height = pilImage.size
170
+ im['width'] = width
171
+ im['height'] = height
172
+ else:
173
+ im['width'] = -1
174
+ im['height'] = -1
175
+
176
+ images.append(im)
177
+
178
+ is_image = row['animal.capture']
179
+
180
+ if (is_image == 0):
181
+ category = 'empty'
182
+ else:
183
+ if row['animal'] is np.nan:
184
+ category = 'unidentifiable'
185
+ else:
186
+ category = row['animal'].strip()
187
+
188
+ if category in category_replacements:
189
+ category = category_replacements[category]
190
+
191
+ # Have we seen this category before?
192
+ if category in categories_to_category_id:
193
+ categoryID = categories_to_category_id[category]
194
+ categories_to_counts[category] += 1
195
+ else:
196
+ categoryID = next_category_id
197
+ categories_to_category_id[category] = categoryID
198
+ categories_to_counts[category] = 1
199
+ next_category_id += 1
200
+
201
+ # Create an annotation
202
+ ann = {}
203
+
204
+ # The Internet tells me this guarantees uniqueness to a reasonable extent, even
205
+ # beyond the sheer improbability of collisions.
206
+ ann['id'] = str(uuid.uuid1())
207
+ ann['image_id'] = im['id']
208
+ ann['category_id'] = categoryID
209
+
210
+ for fieldname in annotation_fields_to_copy:
211
+ ann[fieldname] = row[fieldname]
212
+ if ann[fieldname] is np.nan:
213
+ ann[fieldname] = ''
214
+ ann[fieldname] = str(ann[fieldname])
215
+
216
+ annotations.append(ann)
217
+
218
+ # ...for each image
219
+
220
+ # Convert categories to a CCT-style dictionary
221
+
222
+ categories = []
223
+
224
+ for category in categories_to_counts:
225
+ print('Category {}, count {}'.format(category,categories_to_counts[category]))
226
+ categoryID = categories_to_category_id[category]
227
+ cat = {}
228
+ cat['name'] = category
229
+ cat['id'] = categoryID
230
+ categories.append(cat)
231
+
232
+ elapsed = time.time() - start_time
233
+ print('Finished creating CCT dictionaries in {}'.format(
234
+ humanfriendly.format_timespan(elapsed)))
235
+
236
+
237
+ #%% Create info struct
238
+
239
+ info = {}
240
+ info['year'] = 2017
241
+ info['version'] = 1
242
+ info['description'] = 'Carrizo Trail Cam 2017'
243
+ info['contributor'] = 'York University'
244
+
245
+
246
+ #%% Write output
247
+
248
+ json_data = {}
249
+ json_data['images'] = images
250
+ json_data['annotations'] = annotations
251
+ json_data['categories'] = categories
252
+ json_data['info'] = info
253
+ json.dump(json_data, open(output_json_file, 'w'), indent=1)
254
+
255
+ print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
256
+ len(images),len(annotations),len(categories)))
257
+
258
+
259
+ #%% Validate output
260
+
261
+ from megadetector.data_management.databases import integrity_check_json_db
262
+
263
+ options = integrity_check_json_db.IntegrityCheckOptions()
264
+ options.baseDir = image_directory
265
+ options.bCheckImageSizes = False
266
+ options.bCheckImageExistence = False
267
+ options.bFindUnusedImages = False
268
+ data = integrity_check_json_db.integrity_check_json_db(output_json_file,options)
269
+
270
+
271
+ #%% Preview labels
272
+
273
+ from megadetector.visualization import visualize_db
274
+ from megadetector.data_management.databases import integrity_check_json_db
275
+
276
+ viz_options = visualize_db.DbVizOptions()
277
+ viz_options.num_to_visualize = None
278
+ viz_options.trim_to_images_with_bboxes = False
279
+ viz_options.add_search_links = False
280
+ viz_options.sort_by_filename = False
281
+ viz_options.parallelize_rendering = True
282
+ viz_options.classes_to_exclude = ['empty']
283
+ html_output_file,image_db = visualize_db.visualize_db(db_path=output_json_file,
284
+ output_dir=os.path.join(
285
+ output_base, 'carrizo trail cam 2017/preview'),
286
+ image_base_dir=image_directory,
287
+ options=viz_options)
288
+ os.startfile(html_output_file)
289
+
@@ -0,0 +1,58 @@
1
+ """
2
+
3
+ cct_field_adjustments.py
4
+
5
+ CCT metadata was posted with int locations instead of strings.
6
+
7
+ This script fixes those issues and rev's the version number.
8
+
9
+ """
10
+
11
+ #%% Constants and environment
12
+
13
+ import json
14
+ import os
15
+
16
+ from megadetector.data_management.databases import integrity_check_json_db
17
+
18
+ inputJsonFile = r"D:\temp\CaltechCameraTraps_v2.0.json"
19
+ outputJsonFile = r"D:\temp\CaltechCameraTraps_v2.1.json"
20
+
21
+ assert os.path.isfile(inputJsonFile)
22
+
23
+
24
+ #%% Read .json file
25
+
26
+ with open(inputJsonFile,'r') as f:
27
+ data = json.load(f)
28
+
29
+ images = data['images']
30
+ annotations = data['annotations']
31
+ categories = data['categories']
32
+ info = data['info']
33
+
34
+ print('Finished reading input .json')
35
+
36
+
37
+ #%% Rev version number, update field names and types
38
+
39
+ assert(info['version'] == 'Caltech Camera Traps - v2')
40
+ info['version'] = 2.1
41
+ info['description'] = 'Caltech Camera Traps: camera trap images collected from the NPS and the USGS with help from Justin Brown and Erin Boydston'
42
+
43
+ for image in images:
44
+
45
+ assert 'location' in image and isinstance(image['location'],int)
46
+ image['location'] = str(image['location'])
47
+
48
+
49
+ #%% Write json file
50
+
51
+ json.dump(data, open(outputJsonFile, 'w'), indent=4)
52
+
53
+ print('Finished writing output .json to {}'.format(outputJsonFile))
54
+
55
+
56
+ #%% Check output data file
57
+
58
+ integrity_check_json_db.integrity_check_json_db(outputJsonFile)