megadetector 5.0.11__py3-none-any.whl → 5.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (201) hide show
  1. megadetector/api/__init__.py +0 -0
  2. megadetector/api/batch_processing/__init__.py +0 -0
  3. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  4. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  5. megadetector/api/batch_processing/api_core/batch_service/score.py +439 -0
  6. megadetector/api/batch_processing/api_core/server.py +294 -0
  7. megadetector/api/batch_processing/api_core/server_api_config.py +98 -0
  8. megadetector/api/batch_processing/api_core/server_app_config.py +55 -0
  9. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +220 -0
  10. megadetector/api/batch_processing/api_core/server_job_status_table.py +152 -0
  11. megadetector/api/batch_processing/api_core/server_orchestration.py +360 -0
  12. megadetector/api/batch_processing/api_core/server_utils.py +92 -0
  13. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  14. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +46 -0
  15. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  16. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +152 -0
  17. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  18. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  19. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  20. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  21. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +126 -0
  22. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  23. megadetector/api/synchronous/__init__.py +0 -0
  24. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  25. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +152 -0
  26. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -0
  27. megadetector/api/synchronous/api_core/animal_detection_api/config.py +35 -0
  28. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  29. megadetector/api/synchronous/api_core/tests/load_test.py +110 -0
  30. megadetector/classification/__init__.py +0 -0
  31. megadetector/classification/aggregate_classifier_probs.py +108 -0
  32. megadetector/classification/analyze_failed_images.py +227 -0
  33. megadetector/classification/cache_batchapi_outputs.py +198 -0
  34. megadetector/classification/create_classification_dataset.py +627 -0
  35. megadetector/classification/crop_detections.py +516 -0
  36. megadetector/classification/csv_to_json.py +226 -0
  37. megadetector/classification/detect_and_crop.py +855 -0
  38. megadetector/classification/efficientnet/__init__.py +9 -0
  39. megadetector/classification/efficientnet/model.py +415 -0
  40. megadetector/classification/efficientnet/utils.py +610 -0
  41. megadetector/classification/evaluate_model.py +520 -0
  42. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  43. megadetector/classification/json_to_azcopy_list.py +63 -0
  44. megadetector/classification/json_validator.py +699 -0
  45. megadetector/classification/map_classification_categories.py +276 -0
  46. megadetector/classification/merge_classification_detection_output.py +506 -0
  47. megadetector/classification/prepare_classification_script.py +194 -0
  48. megadetector/classification/prepare_classification_script_mc.py +228 -0
  49. megadetector/classification/run_classifier.py +287 -0
  50. megadetector/classification/save_mislabeled.py +110 -0
  51. megadetector/classification/train_classifier.py +827 -0
  52. megadetector/classification/train_classifier_tf.py +725 -0
  53. megadetector/classification/train_utils.py +323 -0
  54. megadetector/data_management/__init__.py +0 -0
  55. megadetector/data_management/annotations/__init__.py +0 -0
  56. megadetector/data_management/annotations/annotation_constants.py +34 -0
  57. megadetector/data_management/camtrap_dp_to_coco.py +239 -0
  58. megadetector/data_management/cct_json_utils.py +395 -0
  59. megadetector/data_management/cct_to_md.py +176 -0
  60. megadetector/data_management/cct_to_wi.py +289 -0
  61. megadetector/data_management/coco_to_labelme.py +272 -0
  62. megadetector/data_management/coco_to_yolo.py +662 -0
  63. megadetector/data_management/databases/__init__.py +0 -0
  64. megadetector/data_management/databases/add_width_and_height_to_db.py +33 -0
  65. megadetector/data_management/databases/combine_coco_camera_traps_files.py +206 -0
  66. megadetector/data_management/databases/integrity_check_json_db.py +477 -0
  67. megadetector/data_management/databases/subset_json_db.py +115 -0
  68. megadetector/data_management/generate_crops_from_cct.py +149 -0
  69. megadetector/data_management/get_image_sizes.py +189 -0
  70. megadetector/data_management/importers/add_nacti_sizes.py +52 -0
  71. megadetector/data_management/importers/add_timestamps_to_icct.py +79 -0
  72. megadetector/data_management/importers/animl_results_to_md_results.py +158 -0
  73. megadetector/data_management/importers/auckland_doc_test_to_json.py +373 -0
  74. megadetector/data_management/importers/auckland_doc_to_json.py +201 -0
  75. megadetector/data_management/importers/awc_to_json.py +191 -0
  76. megadetector/data_management/importers/bellevue_to_json.py +273 -0
  77. megadetector/data_management/importers/cacophony-thermal-importer.py +796 -0
  78. megadetector/data_management/importers/carrizo_shrubfree_2018.py +269 -0
  79. megadetector/data_management/importers/carrizo_trail_cam_2017.py +289 -0
  80. megadetector/data_management/importers/cct_field_adjustments.py +58 -0
  81. megadetector/data_management/importers/channel_islands_to_cct.py +913 -0
  82. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +180 -0
  83. megadetector/data_management/importers/eMammal/eMammal_helpers.py +249 -0
  84. megadetector/data_management/importers/eMammal/make_eMammal_json.py +223 -0
  85. megadetector/data_management/importers/ena24_to_json.py +276 -0
  86. megadetector/data_management/importers/filenames_to_json.py +386 -0
  87. megadetector/data_management/importers/helena_to_cct.py +283 -0
  88. megadetector/data_management/importers/idaho-camera-traps.py +1407 -0
  89. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +294 -0
  90. megadetector/data_management/importers/jb_csv_to_json.py +150 -0
  91. megadetector/data_management/importers/mcgill_to_json.py +250 -0
  92. megadetector/data_management/importers/missouri_to_json.py +490 -0
  93. megadetector/data_management/importers/nacti_fieldname_adjustments.py +79 -0
  94. megadetector/data_management/importers/noaa_seals_2019.py +181 -0
  95. megadetector/data_management/importers/pc_to_json.py +365 -0
  96. megadetector/data_management/importers/plot_wni_giraffes.py +123 -0
  97. megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -0
  98. megadetector/data_management/importers/prepare_zsl_imerit.py +131 -0
  99. megadetector/data_management/importers/rspb_to_json.py +356 -0
  100. megadetector/data_management/importers/save_the_elephants_survey_A.py +320 -0
  101. megadetector/data_management/importers/save_the_elephants_survey_B.py +329 -0
  102. megadetector/data_management/importers/snapshot_safari_importer.py +758 -0
  103. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +665 -0
  104. megadetector/data_management/importers/snapshot_serengeti_lila.py +1067 -0
  105. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +150 -0
  106. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +153 -0
  107. megadetector/data_management/importers/sulross_get_exif.py +65 -0
  108. megadetector/data_management/importers/timelapse_csv_set_to_json.py +490 -0
  109. megadetector/data_management/importers/ubc_to_json.py +399 -0
  110. megadetector/data_management/importers/umn_to_json.py +507 -0
  111. megadetector/data_management/importers/wellington_to_json.py +263 -0
  112. megadetector/data_management/importers/wi_to_json.py +442 -0
  113. megadetector/data_management/importers/zamba_results_to_md_results.py +181 -0
  114. megadetector/data_management/labelme_to_coco.py +547 -0
  115. megadetector/data_management/labelme_to_yolo.py +272 -0
  116. megadetector/data_management/lila/__init__.py +0 -0
  117. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +97 -0
  118. megadetector/data_management/lila/add_locations_to_nacti.py +147 -0
  119. megadetector/data_management/lila/create_lila_blank_set.py +558 -0
  120. megadetector/data_management/lila/create_lila_test_set.py +152 -0
  121. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  122. megadetector/data_management/lila/download_lila_subset.py +178 -0
  123. megadetector/data_management/lila/generate_lila_per_image_labels.py +516 -0
  124. megadetector/data_management/lila/get_lila_annotation_counts.py +170 -0
  125. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  126. megadetector/data_management/lila/lila_common.py +300 -0
  127. megadetector/data_management/lila/test_lila_metadata_urls.py +132 -0
  128. megadetector/data_management/ocr_tools.py +874 -0
  129. megadetector/data_management/read_exif.py +681 -0
  130. megadetector/data_management/remap_coco_categories.py +84 -0
  131. megadetector/data_management/remove_exif.py +66 -0
  132. megadetector/data_management/resize_coco_dataset.py +189 -0
  133. megadetector/data_management/wi_download_csv_to_coco.py +246 -0
  134. megadetector/data_management/yolo_output_to_md_output.py +441 -0
  135. megadetector/data_management/yolo_to_coco.py +676 -0
  136. megadetector/detection/__init__.py +0 -0
  137. megadetector/detection/detector_training/__init__.py +0 -0
  138. megadetector/detection/detector_training/model_main_tf2.py +114 -0
  139. megadetector/detection/process_video.py +702 -0
  140. megadetector/detection/pytorch_detector.py +341 -0
  141. megadetector/detection/run_detector.py +779 -0
  142. megadetector/detection/run_detector_batch.py +1219 -0
  143. megadetector/detection/run_inference_with_yolov5_val.py +917 -0
  144. megadetector/detection/run_tiled_inference.py +934 -0
  145. megadetector/detection/tf_detector.py +189 -0
  146. megadetector/detection/video_utils.py +606 -0
  147. megadetector/postprocessing/__init__.py +0 -0
  148. megadetector/postprocessing/add_max_conf.py +64 -0
  149. megadetector/postprocessing/categorize_detections_by_size.py +163 -0
  150. megadetector/postprocessing/combine_api_outputs.py +249 -0
  151. megadetector/postprocessing/compare_batch_results.py +958 -0
  152. megadetector/postprocessing/convert_output_format.py +396 -0
  153. megadetector/postprocessing/load_api_results.py +195 -0
  154. megadetector/postprocessing/md_to_coco.py +310 -0
  155. megadetector/postprocessing/md_to_labelme.py +330 -0
  156. megadetector/postprocessing/merge_detections.py +401 -0
  157. megadetector/postprocessing/postprocess_batch_results.py +1902 -0
  158. megadetector/postprocessing/remap_detection_categories.py +170 -0
  159. megadetector/postprocessing/render_detection_confusion_matrix.py +660 -0
  160. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +211 -0
  161. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +83 -0
  162. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1631 -0
  163. megadetector/postprocessing/separate_detections_into_folders.py +730 -0
  164. megadetector/postprocessing/subset_json_detector_output.py +696 -0
  165. megadetector/postprocessing/top_folders_to_bottom.py +223 -0
  166. megadetector/taxonomy_mapping/__init__.py +0 -0
  167. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  168. megadetector/taxonomy_mapping/map_new_lila_datasets.py +150 -0
  169. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -0
  170. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +590 -0
  171. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  172. megadetector/taxonomy_mapping/simple_image_download.py +219 -0
  173. megadetector/taxonomy_mapping/species_lookup.py +834 -0
  174. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  175. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  176. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  177. megadetector/utils/__init__.py +0 -0
  178. megadetector/utils/azure_utils.py +178 -0
  179. megadetector/utils/ct_utils.py +612 -0
  180. megadetector/utils/directory_listing.py +246 -0
  181. megadetector/utils/md_tests.py +968 -0
  182. megadetector/utils/path_utils.py +1044 -0
  183. megadetector/utils/process_utils.py +157 -0
  184. megadetector/utils/sas_blob_utils.py +509 -0
  185. megadetector/utils/split_locations_into_train_val.py +228 -0
  186. megadetector/utils/string_utils.py +92 -0
  187. megadetector/utils/url_utils.py +323 -0
  188. megadetector/utils/write_html_image_list.py +225 -0
  189. megadetector/visualization/__init__.py +0 -0
  190. megadetector/visualization/plot_utils.py +293 -0
  191. megadetector/visualization/render_images_with_thumbnails.py +275 -0
  192. megadetector/visualization/visualization_utils.py +1536 -0
  193. megadetector/visualization/visualize_db.py +550 -0
  194. megadetector/visualization/visualize_detector_output.py +405 -0
  195. {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/METADATA +1 -1
  196. megadetector-5.0.12.dist-info/RECORD +199 -0
  197. megadetector-5.0.12.dist-info/top_level.txt +1 -0
  198. megadetector-5.0.11.dist-info/RECORD +0 -5
  199. megadetector-5.0.11.dist-info/top_level.txt +0 -1
  200. {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/LICENSE +0 -0
  201. {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,201 @@
1
+ """
2
+
3
+ auckland_doc_to_json.py
4
+
5
+ Convert Auckland DOC data set to COCO camera traps format. This was
6
+ for a training data set where class names were encoded in path names.
7
+
8
+ """
9
+
10
+ #%% Constants and imports
11
+
12
+ import json
13
+ import os
14
+ import uuid
15
+ import datetime
16
+
17
+ from tqdm import tqdm
18
+
19
+ from megadetector.visualization import visualize_db
20
+ from megadetector.data_management.databases import integrity_check_json_db
21
+ from megadetector.utils.path_utils import find_images, split_path, insert_before_extension
22
+
23
+ # Filenames will be stored in the output .json relative to this base dir
24
+ input_base_dir = 'y:\\'
25
+ output_base_dir = r'f:\auckland-doc'
26
+ output_json_filename = os.path.join(output_base_dir, 'auckland-doc-Maukahuka_Auckland_Island.json')
27
+
28
+ assert os.path.isdir(input_base_dir)
29
+ os.makedirs(output_base_dir,exist_ok=True)
30
+
31
+ output_encoding = 'utf-8'
32
+ read_image_sizes = True
33
+
34
+ info = {}
35
+ info['year'] = 2019
36
+ info['version'] = '1.0'
37
+ info['description'] = 'Auckaland DOC Camera Traps'
38
+ info['contributor'] = 'Auckland DOC'
39
+ info['date_created'] = str(datetime.date.today())
40
+
41
+
42
+ #%% Enumerate files
43
+
44
+ print('Enumerating files from {}'.format(input_base_dir))
45
+ image_files = find_images(input_base_dir, bRecursive=True)
46
+ print('Enumerated {} images'.format(len(image_files)))
47
+
48
+
49
+ #%% Assemble dictionaries
50
+
51
+ images = []
52
+ image_id_to_image = {}
53
+ annotations = []
54
+ categories = []
55
+
56
+ category_name_to_category = {}
57
+ category_id_to_category = {}
58
+
59
+ # Force the empty category to be ID 0
60
+ empty_category = {}
61
+ empty_category['name'] = 'empty'
62
+ empty_category['id'] = 0
63
+ category_id_to_category[0] = empty_category
64
+ categories.append(empty_category)
65
+ next_id = 1
66
+
67
+ behaviors = set()
68
+
69
+ # fn = image_files[0]; print(fn)
70
+ for fn in tqdm(image_files):
71
+
72
+ # Typically y:\Maukahuka_Auckland_Island\1_Training\Winter_Trial_2019\cat\cat\eat\20190903_IDdY_34_E3_tmp_201908240051.JPG
73
+ relative_path = os.path.relpath(fn,input_base_dir)
74
+ tokens = split_path(fn)
75
+ assert tokens[1] == 'Maukahuka_Auckland_Island'
76
+
77
+ trainval_split = tokens[2]
78
+ assert trainval_split in ['1_Training','2_Testing']
79
+
80
+ # This data set has two top-level folders, "1_Training" (which has class names encoded
81
+ # in paths) and "2_Testing" (which has no class information).
82
+ if trainval_split == '2_Testing':
83
+ category_name = 'test'
84
+ else:
85
+ category_name = tokens[-3]
86
+ if category_name.startswith('2_'):
87
+ category_name = category_name.replace('2_', '')
88
+ category_name = category_name.lower().strip()
89
+
90
+ if category_name not in category_name_to_category:
91
+
92
+ category_id = next_id
93
+ next_id += 1
94
+ category = {}
95
+ category['id'] = category_id
96
+ category['name'] = category_name
97
+ category['count'] = 0
98
+ categories.append(category)
99
+ category_name_to_category[category_name] = category
100
+ category_id_to_category[category_id] = category
101
+
102
+ else:
103
+
104
+ category = category_name_to_category[category_name]
105
+
106
+ category_id = category['id']
107
+
108
+ category['count'] += 1
109
+ behavior = None
110
+ if (category_name) != 'test':
111
+ behavior = fn.split('\\')[-2]
112
+ behaviors.add(behavior)
113
+
114
+ im = {}
115
+ im['id'] = str(uuid.uuid1())
116
+ im['file_name'] = relative_path
117
+ image_id_to_image[im['id']] = im
118
+
119
+ images.append(im)
120
+
121
+ ann = {}
122
+
123
+ ann['id'] = str(uuid.uuid1())
124
+ ann['image_id'] = im['id']
125
+ ann['category_id'] = category_id
126
+ if behavior is not None:
127
+ ann['behavior'] = behavior
128
+ annotations.append(ann)
129
+
130
+ # ...for each image
131
+
132
+
133
+ #%% Write output .json
134
+
135
+ data = {}
136
+ data['info'] = info
137
+ data['images'] = images
138
+ data['annotations'] = annotations
139
+ data['categories'] = categories
140
+
141
+ json.dump(data, open(output_json_filename, 'w'), indent=2)
142
+ print('Finished writing json to {}'.format(output_json_filename))
143
+
144
+
145
+ #%% Write train/test .jsons
146
+
147
+ train_images = []; test_images = []
148
+ train_annotations = []; test_annotations = []
149
+
150
+ for ann in tqdm(annotations):
151
+ category_id = ann['category_id']
152
+ image_id = ann['image_id']
153
+ category_name = category_id_to_category[category_id]['name']
154
+ im = image_id_to_image[image_id]
155
+ if category_name == 'test':
156
+ test_images.append(im)
157
+ test_annotations.append(ann)
158
+ else:
159
+ train_images.append(im)
160
+ train_annotations.append(ann)
161
+
162
+ train_fn = insert_before_extension(output_json_filename,'train')
163
+ test_fn = insert_before_extension(output_json_filename,'test')
164
+
165
+ data['images'] = train_images
166
+ data['annotations'] = train_annotations
167
+ json.dump(data, open(train_fn, 'w'), indent=2)
168
+
169
+ data['images'] = test_images
170
+ data['annotations'] = test_annotations
171
+ json.dump(data, open(test_fn, 'w'), indent=2)
172
+
173
+
174
+ #%% Validate .json files
175
+
176
+ options = integrity_check_json_db.IntegrityCheckOptions()
177
+ options.baseDir = input_base_dir
178
+ options.bCheckImageSizes = False
179
+ options.bCheckImageExistence = True
180
+ options.bFindUnusedImages = True
181
+
182
+ sorted_categories, data, _ = integrity_check_json_db.integrity_check_json_db(output_json_filename, options)
183
+ sorted_categories, data, _ = integrity_check_json_db.integrity_check_json_db(train_fn, options)
184
+ sorted_categories, data, _ = integrity_check_json_db.integrity_check_json_db(test_fn, options)
185
+
186
+
187
+ #%% Preview labels
188
+
189
+ viz_options = visualize_db.DbVizOptions()
190
+ viz_options.num_to_visualize = 2000
191
+ viz_options.trim_to_images_with_bboxes = False
192
+ viz_options.add_search_links = False
193
+ viz_options.sort_by_filename = False
194
+ viz_options.parallelize_rendering = True
195
+ viz_options.classes_to_exclude = ['test']
196
+ html_output_file, image_db = visualize_db.visualize_db(db_path=output_json_filename,
197
+ output_dir=os.path.join(
198
+ output_base_dir, 'preview'),
199
+ image_base_dir=input_base_dir,
200
+ options=viz_options)
201
+ os.startfile(html_output_file)
@@ -0,0 +1,191 @@
1
+ """
2
+
3
+ awc_to_json.py
4
+
5
+ Convert a particular .csv file from Australian Wildlife Conservancy to CCT format.
6
+
7
+ """
8
+
9
+ #%% Constants and environment
10
+
11
+ import pandas as pd
12
+ import uuid
13
+ import json
14
+ import time
15
+ import humanfriendly
16
+ import os
17
+ import PIL
18
+
19
+ from tqdm import tqdm
20
+
21
+ from megadetector.visualization import visualize_db
22
+ from megadetector.utils import path_utils
23
+
24
+ input_metadata_file = r"D:\wildlife_data\awc\awc_imageinfo.csv"
25
+ output_file = r"D:\wildlife_data\awc\awc_imageinfo.json"
26
+ image_base = r"D:\wildlife_data\awc"
27
+ preview_base = r"D:\wildlife_data\awc\label_preview"
28
+
29
+ filename_replacements = {'D:\\Wet Tropics':'WetTropics'}
30
+ category_mappings = {'none':'empty'}
31
+
32
+
33
+ #%% Read source data
34
+
35
+ input_metadata = pd.read_csv(input_metadata_file)
36
+
37
+ print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
38
+ len(input_metadata)))
39
+
40
+
41
+ #%% Main loop over labels
42
+
43
+ startTime = time.time()
44
+
45
+ relativePathToImage = {}
46
+
47
+ images = []
48
+ annotations = []
49
+ categoryIDToCategories = {}
50
+ missingFiles = []
51
+
52
+ duplicateImageIDs = set()
53
+
54
+ # Force the empty category to be ID 0
55
+ emptyCat = {}
56
+ emptyCat['name'] = 'empty'
57
+ emptyCat['id'] = 0
58
+ categoryIDToCategories[0] = emptyCat
59
+
60
+ # iRow = 0; row = input_metadata.iloc[iRow]
61
+ for iRow,row in tqdm(input_metadata.iterrows(),total=len(input_metadata)):
62
+
63
+ # ImageID,FileName,FilePath,SpeciesID,CommonName
64
+ imageID = str(row['ImageID'])
65
+ fn = row['FileName']
66
+ for k in filename_replacements:
67
+ dirName = row['FilePath'].replace(k,filename_replacements[k])
68
+ relativePath = os.path.join(dirName,fn)
69
+
70
+ # This makes an assumption of one annotation per image, which happens to be
71
+ # true in this data set.
72
+ if relativePath in relativePathToImage:
73
+
74
+ im = relativePathToImage[relativePath]
75
+ assert im['id'] == imageID
76
+ duplicateImageIDs.add(imageID)
77
+
78
+ else:
79
+ im = {}
80
+ im['id'] = imageID
81
+ im['file_name'] = relativePath
82
+ im['seq_id'] = '-1'
83
+ images.append(im)
84
+ relativePathToImage[relativePath] = im
85
+
86
+ fullPath = os.path.join(image_base,relativePath)
87
+
88
+ if not os.path.isfile(fullPath):
89
+
90
+ missingFiles.append(fullPath)
91
+
92
+ else:
93
+ # Retrieve image width and height
94
+ pilImage = PIL.Image.open(fullPath)
95
+ width, height = pilImage.size
96
+ im['width'] = width
97
+ im['height'] = height
98
+
99
+ categoryName = row['CommonName'].lower()
100
+ if categoryName in category_mappings:
101
+ categoryName = category_mappings[categoryName]
102
+
103
+ categoryID = row['SpeciesID']
104
+ assert isinstance(categoryID,int)
105
+
106
+ if categoryID not in categoryIDToCategories:
107
+ category = {}
108
+ category['name'] = categoryName
109
+ category['id'] = categoryID
110
+ categoryIDToCategories[categoryID] = category
111
+ else:
112
+ assert categoryIDToCategories[categoryID]['name'] == categoryName
113
+
114
+ # Create an annotation
115
+ ann = {}
116
+
117
+ # The Internet tells me this guarantees uniqueness to a reasonable extent, even
118
+ # beyond the sheer improbability of collisions.
119
+ ann['id'] = str(uuid.uuid1())
120
+ ann['image_id'] = im['id']
121
+ ann['category_id'] = categoryID
122
+
123
+ annotations.append(ann)
124
+
125
+ categories = list(categoryIDToCategories.values())
126
+
127
+ elapsed = time.time() - startTime
128
+ print('Finished verifying file loop in {}, {} images, {} missing images, {} repeat labels'.format(
129
+ humanfriendly.format_timespan(elapsed), len(images), len(missingFiles), len(duplicateImageIDs)))
130
+
131
+
132
+ #%% Check for images that aren't included in the metadata file
133
+
134
+ # Enumerate all images
135
+ # list(relativePathToImage.keys())[0]
136
+
137
+ imageFullPaths = path_utils.find_images(image_base,bRecursive=True)
138
+ unmatchedFiles = []
139
+
140
+ for iImage,imagePath in enumerate(imageFullPaths):
141
+
142
+ fn = os.path.relpath(imagePath,image_base)
143
+ if fn not in relativePathToImage:
144
+ unmatchedFiles.append(fn)
145
+
146
+ print('Finished checking {} images to make sure they\'re in the metadata, found {} mismatches'.format(
147
+ len(imageFullPaths),len(unmatchedFiles)))
148
+
149
+
150
+ #%% Create info struct
151
+
152
+ info = {}
153
+ info['year'] = 2019
154
+ info['version'] = 1
155
+ info['description'] = 'COCO style database'
156
+ info['secondary_contributor'] = 'Converted to COCO .json by Dan Morris'
157
+ info['contributor'] = ''
158
+
159
+
160
+ #%% Write output
161
+
162
+ json_data = {}
163
+ json_data['images'] = images
164
+ json_data['annotations'] = annotations
165
+ json_data['categories'] = categories
166
+ json_data['info'] = info
167
+ json.dump(json_data, open(output_file,'w'), indent=4)
168
+
169
+ print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
170
+ len(images),len(annotations),len(categories)))
171
+
172
+
173
+ #%% Validate the database's integrity
174
+
175
+ from megadetector.data_management.databases import integrity_check_json_db
176
+
177
+ options = integrity_check_json_db.IntegrityCheckOptions()
178
+ sortedCategories,data = integrity_check_json_db.integrity_check_json_db(output_file, options)
179
+
180
+
181
+ #%% Render a bunch of images to make sure the labels got carried along correctly
182
+
183
+ bbox_db_path = output_file
184
+ output_dir = preview_base
185
+
186
+ options = visualize_db.BboxDbVizOptions()
187
+ options.num_to_visualize = 1000
188
+ options.sort_by_filename = False
189
+
190
+ htmlOutputFile = visualize_db.visualize_db(bbox_db_path,output_dir,image_base,options)
191
+
@@ -0,0 +1,273 @@
1
+ """
2
+
3
+ bellevue_to_json.py
4
+
5
+ "Bellevue Camera Traps" is the rather unremarkable camera trap data set
6
+ used by one of the repo's maintainers for testing. It's organized as:
7
+
8
+ approximate_date/[loose_camera_specifier/]/species
9
+
10
+ E.g.:
11
+
12
+ "2018.03.30\coyote\DSCF0091.JPG"
13
+ "2018.07.18\oldcam\empty\DSCF0001.JPG"
14
+
15
+ """
16
+
17
+ #%% Constants and imports
18
+
19
+ import json
20
+ import os
21
+ import uuid
22
+ import datetime
23
+
24
+ from PIL import Image
25
+ from PIL.ExifTags import TAGS
26
+ from tqdm import tqdm
27
+
28
+ from megadetector.utils.path_utils import find_images
29
+
30
+ # Filenames will be stored in the output .json relative to this base dir
31
+ base_dir = r'C:\temp\camera_trap_images_no_people'
32
+ output_base = r'c:\temp\previews'
33
+ output_filename = os.path.join(base_dir,'bellevue_camera_traps.{}.json'.format(str(datetime.date.today())))
34
+
35
+ class_mappings = {'transitional':'unlabeled','moving':'unlabeled','setup':'unlabeled','blurry':'unlabeled','transitional':'unlabeled','junk':'unlabeled','unknown':'unlabeled','blurry':'unlabeled'}
36
+ class_mappings['dan'] = 'human'
37
+ class_mappings['dan_and_dog'] = 'human,dog'
38
+ class_mappings['dan and dog'] = 'human,dog'
39
+ class_mappings['unknown'] = 'unknown animal'
40
+ class_mappings['racoon'] = 'raccoon'
41
+
42
+
43
+ info = {}
44
+ info['year'] = 2020
45
+ info['version'] = '2.0'
46
+ info['description'] = 'Bellevue Camera Traps'
47
+ info['contributor'] = 'Dan Morris'
48
+ info['date_created'] = str(datetime.date.today())
49
+
50
+ max_files = -1
51
+
52
+ max_seconds_within_sequence = 10.0
53
+
54
+ assert os.path.isdir(base_dir)
55
+
56
+ #%% Exif functions
57
+
58
+ def get_exif_tags(fn=None,im=None):
59
+
60
+ assert (fn is not None) or (im is not None)
61
+ ret = {}
62
+ if im is None:
63
+ im = Image.open(fn)
64
+ info = im._getexif()
65
+ for tag, value in info.items():
66
+ decoded = TAGS.get(tag, tag)
67
+ ret[decoded] = value
68
+
69
+ return ret
70
+
71
+
72
+ #%% Enumerate files, create image/annotation/category info
73
+
74
+ annotations = []
75
+
76
+ category_name_to_category = {}
77
+
78
+ # Force the empty category to be ID 0
79
+ empty_category = {}
80
+ empty_category['id'] = 0
81
+ empty_category['name'] = 'empty'
82
+ category_name_to_category['empty'] = empty_category
83
+ next_category_id = 1
84
+
85
+ # Keep track of unique camera folders
86
+ camera_folders = set()
87
+
88
+ # Each element will be a dictionary with fields:
89
+ #
90
+ # relative_path, width, height, datetime
91
+ images = []
92
+ non_image_files = []
93
+
94
+ print('Enumerating files from {}'.format(base_dir))
95
+
96
+ image_files = find_images(base_dir,recursive=True)
97
+ print('Enumerated {} images'.format(len(image_files)))
98
+
99
+ # fname = image_files[0]
100
+ for fname in tqdm(image_files):
101
+
102
+ if max_files >= 0 and len(images) > max_files:
103
+ print('Warning: early break at {} files'.format(max_files))
104
+ break
105
+
106
+ full_path = fname
107
+ relative_path = os.path.relpath(full_path,base_dir)
108
+
109
+ try:
110
+ im = Image.open(full_path)
111
+ h = im.height
112
+ w = im.width
113
+ tags = get_exif_tags(None,im)
114
+ s = tags['DateTimeOriginal']
115
+ dt = datetime.datetime.strptime(s,'%Y:%m:%d %H:%M:%S')
116
+ except:
117
+ # Corrupt or not an image
118
+ print('Warning: could not read {}'.format(fname))
119
+ non_image_files.append(full_path)
120
+ continue
121
+
122
+ # Store file info
123
+ image_info = {}
124
+ image_info['file_name'] = relative_path
125
+ image_info['width'] = w
126
+ image_info['height'] = h
127
+ image_info['datetime'] = dt
128
+ image_info['location'] = 'unknown'
129
+ image_info['id'] = str(uuid.uuid4())
130
+
131
+ images.append(image_info)
132
+
133
+ # E.g. 2018.03.30/coyote/DSCF0091.JPG
134
+ relative_path = image_info['file_name'].replace('\\','/')
135
+ tokens = relative_path.split('/')
136
+ camera_path_tokens = tokens[0:-2]
137
+ camera_path = '/'.join(camera_path_tokens)
138
+ camera_folders.add(camera_path)
139
+ image_info['camera_path'] = camera_path
140
+
141
+ category_name = tokens[-2].lower()
142
+ if category_name in class_mappings:
143
+ category_name = class_mappings[category_name]
144
+
145
+ if category_name not in category_name_to_category:
146
+ category = {}
147
+ category['id'] = next_category_id
148
+ category['name'] = category_name
149
+ next_category_id = next_category_id + 1
150
+ category_name_to_category[category_name] = category
151
+ else:
152
+ category = category_name_to_category[category_name]
153
+
154
+ annotation = {}
155
+ annotation['sequence_level_annotation'] = False
156
+ annotation['id'] = str(uuid.uuid4())
157
+ annotation['category_id'] = category['id']
158
+ annotation['image_id'] = image_info['id']
159
+ annotations.append(annotation)
160
+
161
+ # ...for each image file
162
+
163
+ assert len(annotations) == len(images)
164
+
165
+ categories = list(category_name_to_category.values())
166
+
167
+
168
+ #%% Synthesize sequence information
169
+
170
+ print('Found {} camera folders'.format(len(camera_folders)))
171
+
172
+ camera_folders = list(camera_folders)
173
+
174
+ all_sequences = set()
175
+
176
+ # Sort images by time within each folder
177
+ # camera_path = camera_folders[0]
178
+ for i_camera,camera_path in enumerate(camera_folders):
179
+
180
+ images_this_camera = [im for im in images if im['camera_path'] == camera_path]
181
+ sorted_images_this_camera = sorted(images_this_camera, key = lambda im: im['datetime'])
182
+
183
+ current_sequence_id = None
184
+ next_sequence_index = 0
185
+ previous_datetime = None
186
+
187
+ # previous_datetime = sorted_images_this_camera[0]['datetime']
188
+ # im = sorted_images_this_camera[1]
189
+ for im in sorted_images_this_camera:
190
+
191
+ if previous_datetime is None:
192
+ delta = None
193
+ else:
194
+ delta = (im['datetime'] - previous_datetime).total_seconds()
195
+
196
+ # Start a new sequence if necessary
197
+ if delta is None or delta > max_seconds_within_sequence:
198
+ next_sequence_index = 0
199
+ current_sequence_id = str(uuid.uuid4())
200
+ all_sequences.add(current_sequence_id)
201
+
202
+ im['seq_id'] = current_sequence_id
203
+ im['seq_num_frames'] = None
204
+ im['frame_num'] = next_sequence_index
205
+ next_sequence_index = next_sequence_index + 1
206
+ previous_datetime = im['datetime']
207
+
208
+ # ...for each image in this camera
209
+
210
+ # ...for each camera
211
+
212
+ print('Created {} sequences from {} images'.format(len(all_sequences),len(images)))
213
+
214
+ # Fill in seq_num_frames
215
+ num_frames_per_sequence = {}
216
+ for seq_id in all_sequences:
217
+ images_this_sequence = [im for im in images if im['seq_id'] == seq_id]
218
+ num_frames_per_sequence[seq_id] = len(images_this_sequence)
219
+ for im in images_this_sequence:
220
+ im['seq_num_frames'] = len(images_this_sequence)
221
+
222
+
223
+ #%% A little cleanup
224
+
225
+ for im in tqdm(images):
226
+ if 'camera_path' in im:
227
+ del im['camera_path']
228
+ if not isinstance(im['datetime'],str):
229
+ im['datetime'] = str(im['datetime'])
230
+
231
+
232
+ #%% Write output .json
233
+
234
+ data = {}
235
+ data['info'] = info
236
+ data['images'] = images
237
+ data['annotations'] = annotations
238
+ data['categories'] = categories
239
+
240
+ json.dump(data, open(output_filename,'w'), indent=1)
241
+
242
+ print('Finished writing json to {}'.format(output_filename))
243
+
244
+
245
+ #%% Validate data
246
+
247
+ from megadetector.data_management.databases import integrity_check_json_db
248
+
249
+ options = integrity_check_json_db.IntegrityCheckOptions()
250
+ options.baseDir = base_dir
251
+ options.bCheckImageSizes = False
252
+ options.bCheckImageExistence = True
253
+ options.bFindUnusedImages = False
254
+
255
+ sorted_categories = integrity_check_json_db.integrity_check_json_db(output_filename,options)
256
+
257
+
258
+ #%% Label previews
259
+
260
+ from megadetector.visualization import visualize_db
261
+
262
+ viz_options = visualize_db.DbVizOptions()
263
+ viz_options.num_to_visualize = None
264
+ viz_options.parallelize_rendering_n_cores = 8
265
+ viz_options.parallelize_rendering = True
266
+ viz_options.trim_to_images_with_bboxes = False
267
+ viz_options.add_search_links = True
268
+ viz_options.sort_by_filename = False
269
+ html_output_file,image_db = visualize_db.visualize_db(output_filename,
270
+ os.path.join(output_base,'preview'),
271
+ base_dir,viz_options)
272
+ os.startfile(html_output_file)
273
+