megadetector 5.0.28__py3-none-any.whl → 5.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (176) hide show
  1. megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
  2. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
  3. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  7. megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
  8. megadetector/classification/aggregate_classifier_probs.py +3 -3
  9. megadetector/classification/analyze_failed_images.py +5 -5
  10. megadetector/classification/cache_batchapi_outputs.py +5 -5
  11. megadetector/classification/create_classification_dataset.py +11 -12
  12. megadetector/classification/crop_detections.py +10 -10
  13. megadetector/classification/csv_to_json.py +8 -8
  14. megadetector/classification/detect_and_crop.py +13 -15
  15. megadetector/classification/evaluate_model.py +7 -7
  16. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  17. megadetector/classification/json_to_azcopy_list.py +1 -1
  18. megadetector/classification/json_validator.py +29 -32
  19. megadetector/classification/map_classification_categories.py +9 -9
  20. megadetector/classification/merge_classification_detection_output.py +12 -9
  21. megadetector/classification/prepare_classification_script.py +19 -19
  22. megadetector/classification/prepare_classification_script_mc.py +23 -23
  23. megadetector/classification/run_classifier.py +4 -4
  24. megadetector/classification/save_mislabeled.py +6 -6
  25. megadetector/classification/train_classifier.py +1 -1
  26. megadetector/classification/train_classifier_tf.py +9 -9
  27. megadetector/classification/train_utils.py +10 -10
  28. megadetector/data_management/annotations/annotation_constants.py +1 -1
  29. megadetector/data_management/camtrap_dp_to_coco.py +45 -45
  30. megadetector/data_management/cct_json_utils.py +101 -101
  31. megadetector/data_management/cct_to_md.py +49 -49
  32. megadetector/data_management/cct_to_wi.py +33 -33
  33. megadetector/data_management/coco_to_labelme.py +75 -75
  34. megadetector/data_management/coco_to_yolo.py +189 -189
  35. megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
  36. megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
  37. megadetector/data_management/databases/integrity_check_json_db.py +202 -188
  38. megadetector/data_management/databases/subset_json_db.py +33 -33
  39. megadetector/data_management/generate_crops_from_cct.py +38 -38
  40. megadetector/data_management/get_image_sizes.py +54 -49
  41. megadetector/data_management/labelme_to_coco.py +130 -124
  42. megadetector/data_management/labelme_to_yolo.py +78 -72
  43. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  44. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  45. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  46. megadetector/data_management/lila/download_lila_subset.py +21 -24
  47. megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
  48. megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
  49. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  50. megadetector/data_management/lila/lila_common.py +70 -70
  51. megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
  52. megadetector/data_management/mewc_to_md.py +339 -340
  53. megadetector/data_management/ocr_tools.py +258 -252
  54. megadetector/data_management/read_exif.py +231 -224
  55. megadetector/data_management/remap_coco_categories.py +26 -26
  56. megadetector/data_management/remove_exif.py +31 -20
  57. megadetector/data_management/rename_images.py +187 -187
  58. megadetector/data_management/resize_coco_dataset.py +41 -41
  59. megadetector/data_management/speciesnet_to_md.py +41 -41
  60. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  61. megadetector/data_management/yolo_output_to_md_output.py +117 -120
  62. megadetector/data_management/yolo_to_coco.py +195 -188
  63. megadetector/detection/change_detection.py +831 -0
  64. megadetector/detection/process_video.py +340 -337
  65. megadetector/detection/pytorch_detector.py +304 -262
  66. megadetector/detection/run_detector.py +177 -164
  67. megadetector/detection/run_detector_batch.py +364 -363
  68. megadetector/detection/run_inference_with_yolov5_val.py +328 -325
  69. megadetector/detection/run_tiled_inference.py +256 -249
  70. megadetector/detection/tf_detector.py +24 -24
  71. megadetector/detection/video_utils.py +290 -282
  72. megadetector/postprocessing/add_max_conf.py +15 -11
  73. megadetector/postprocessing/categorize_detections_by_size.py +44 -44
  74. megadetector/postprocessing/classification_postprocessing.py +415 -415
  75. megadetector/postprocessing/combine_batch_outputs.py +20 -21
  76. megadetector/postprocessing/compare_batch_results.py +528 -517
  77. megadetector/postprocessing/convert_output_format.py +97 -97
  78. megadetector/postprocessing/create_crop_folder.py +219 -146
  79. megadetector/postprocessing/detector_calibration.py +173 -168
  80. megadetector/postprocessing/generate_csv_report.py +508 -499
  81. megadetector/postprocessing/load_api_results.py +23 -20
  82. megadetector/postprocessing/md_to_coco.py +129 -98
  83. megadetector/postprocessing/md_to_labelme.py +89 -83
  84. megadetector/postprocessing/md_to_wi.py +40 -40
  85. megadetector/postprocessing/merge_detections.py +87 -114
  86. megadetector/postprocessing/postprocess_batch_results.py +313 -298
  87. megadetector/postprocessing/remap_detection_categories.py +36 -36
  88. megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
  89. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  90. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  91. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
  92. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  93. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  94. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  95. megadetector/postprocessing/validate_batch_results.py +70 -70
  96. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  97. megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
  98. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
  99. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -66
  100. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  101. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  102. megadetector/taxonomy_mapping/species_lookup.py +33 -33
  103. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  104. megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
  105. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  106. megadetector/utils/azure_utils.py +22 -22
  107. megadetector/utils/ct_utils.py +1018 -200
  108. megadetector/utils/directory_listing.py +21 -77
  109. megadetector/utils/gpu_test.py +22 -22
  110. megadetector/utils/md_tests.py +541 -518
  111. megadetector/utils/path_utils.py +1457 -398
  112. megadetector/utils/process_utils.py +41 -41
  113. megadetector/utils/sas_blob_utils.py +53 -49
  114. megadetector/utils/split_locations_into_train_val.py +61 -61
  115. megadetector/utils/string_utils.py +147 -26
  116. megadetector/utils/url_utils.py +463 -173
  117. megadetector/utils/wi_utils.py +2629 -2526
  118. megadetector/utils/write_html_image_list.py +137 -137
  119. megadetector/visualization/plot_utils.py +21 -21
  120. megadetector/visualization/render_images_with_thumbnails.py +37 -73
  121. megadetector/visualization/visualization_utils.py +401 -397
  122. megadetector/visualization/visualize_db.py +197 -190
  123. megadetector/visualization/visualize_detector_output.py +79 -73
  124. {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/METADATA +135 -132
  125. megadetector-5.0.29.dist-info/RECORD +163 -0
  126. {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
  127. {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
  128. {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
  129. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  130. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  131. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  132. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  133. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  134. megadetector/data_management/importers/awc_to_json.py +0 -191
  135. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  136. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  137. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  138. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  139. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  140. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  141. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  142. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  143. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  144. megadetector/data_management/importers/ena24_to_json.py +0 -276
  145. megadetector/data_management/importers/filenames_to_json.py +0 -386
  146. megadetector/data_management/importers/helena_to_cct.py +0 -283
  147. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  148. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  149. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  150. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  151. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  152. megadetector/data_management/importers/missouri_to_json.py +0 -490
  153. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  154. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  155. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  156. megadetector/data_management/importers/pc_to_json.py +0 -365
  157. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  158. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  159. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  160. megadetector/data_management/importers/rspb_to_json.py +0 -356
  161. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  162. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  163. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  164. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  165. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  166. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  167. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  168. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  169. megadetector/data_management/importers/ubc_to_json.py +0 -399
  170. megadetector/data_management/importers/umn_to_json.py +0 -507
  171. megadetector/data_management/importers/wellington_to_json.py +0 -263
  172. megadetector/data_management/importers/wi_to_json.py +0 -442
  173. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  174. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  175. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  176. megadetector-5.0.28.dist-info/RECORD +0 -209
@@ -1,276 +0,0 @@
1
- """
2
-
3
- ena24_to_json_2017.py
4
-
5
- Convert the ENA24 data set to a COCO-camera-traps .json file
6
-
7
- """
8
-
9
- #%% Constants and environment
10
-
11
- import os
12
- import json
13
- import uuid
14
- import time
15
- import humanfriendly
16
- import numpy as np
17
- import shutil
18
- import zipfile
19
-
20
- from PIL import Image
21
- from tqdm import tqdm
22
-
23
- base_directory = r'e:\wildlife_data\ena24'
24
- output_file = os.path.join(base_directory,'ena24.json')
25
- image_directory = os.path.join(base_directory,'images')
26
- label_directory = os.path.join(base_directory,'labels')
27
-
28
- assert(os.path.isdir(label_directory))
29
- assert(os.path.isdir(image_directory))
30
-
31
- # Temporary folders for human and non-human images
32
- human_dir = os.path.join(base_directory, 'human')
33
- non_human_dir = os.path.join(base_directory, 'non-human')
34
-
35
- human_zipfile = os.path.join(base_directory, 'ena24_humans.zip')
36
- non_human_zipfile = os.path.join(base_directory, 'ena24.zip')
37
-
38
- # Clean existing output folders/zipfiles
39
- if os.path.isdir(human_dir):
40
- shutil.rmtree(human_dir)
41
- if os.path.isdir(non_human_dir):
42
- shutil.rmtree(non_human_dir)
43
-
44
- if os.path.isfile(human_zipfile):
45
- os.remove(human_zipfile)
46
- if os.path.isfile(human_zipfile):
47
- os.remove(non_human_zipfile)
48
-
49
- os.makedirs(human_dir,exist_ok=True)
50
- os.makedirs(non_human_dir,exist_ok=True)
51
-
52
- labels = ['White_Tailed_Deer', 'Dog', 'Bobcat', 'Red Fox', 'Horse',
53
- 'Domestic Cat', 'American Black Bear', 'Eastern Cottontail', 'Grey Fox', 'Coyote',
54
- 'Eastern Fox Squirrel', 'Eastern Gray Squirrel', 'Vehicle', 'Eastern Chipmunk', 'Wild Turkey',
55
- 'Northern Raccoon', 'Striped Skunk', 'Woodchuck', 'Virginia Opossum', 'Human',
56
- 'Bird', 'American Crow', 'Chicken']
57
-
58
-
59
- #%% Support functions
60
-
61
- def zipdir(path, zipfilename, basepath=None):
62
- """
63
- Zip everything in [path] into [zipfilename], with paths in the zipfile relative to [basepath]
64
- """
65
- ziph = zipfile.ZipFile(zipfilename, 'w', zipfile.ZIP_STORED)
66
-
67
- for root, dirs, files in os.walk(path):
68
- for file in files:
69
- src = os.path.join(root, file)
70
- if basepath is None:
71
- dst = file
72
- else:
73
- dst = os.path.relpath(src,basepath)
74
- ziph.write(src, dst, zipfile.ZIP_STORED)
75
-
76
- ziph.close()
77
-
78
-
79
- #%% Read source data
80
-
81
- image_list = os.listdir(label_directory)
82
- print('Enumerated {} label files'.format(len(image_list)))
83
-
84
-
85
- #%% Map filenames to rows, verify image existence
86
-
87
- startTime = time.time()
88
-
89
- # Build up a map from filenames to a list of rows, checking image existence as we go
90
- for filename in image_list:
91
- imagePath = os.path.join(image_directory, "{}.jpg".format(filename.split(".")[0]))
92
- assert(os.path.isfile(imagePath))
93
-
94
- elapsed = time.time() - startTime
95
- print('Finished verifying image existence for {} files in {}'.format(
96
- len(image_list),humanfriendly.format_timespan(elapsed)))
97
-
98
-
99
- #%% Create CCT dictionaries
100
-
101
- # Also gets image sizes, so this takes ~6 minutes
102
- #
103
- # Implicitly checks images for overt corruptness, i.e. by not crashing.
104
-
105
- images = []
106
- annotations = []
107
-
108
- # Map categories to integer IDs (that's what COCO likes)
109
- nextCategoryID = 0
110
- categoriesToCategoryId = {}
111
- categoriesToCounts = {}
112
-
113
- # For each image
114
- #
115
- # Because in practice images are 1:1 with annotations in this data set,
116
- # this is also a loop over annotations.
117
-
118
- startTime = time.time()
119
- for filename in tqdm(image_list):
120
-
121
- contains_human = False
122
- im = {}
123
- im['id'] = filename.split('.')[0]
124
- fn = "{}.jpg".format(filename.split('.')[0])
125
- im['file_name'] = fn
126
-
127
- # Check image height and width
128
- imagePath = os.path.join(image_directory, fn)
129
- assert(os.path.isfile(imagePath))
130
- pilImage = Image.open(imagePath)
131
- width, height = pilImage.size
132
- im['width'] = width
133
- im['height'] = height
134
-
135
- images.append(im)
136
-
137
- label_path = os.path.join(label_directory, filename)
138
- file_data = open(label_path, 'r').read()
139
- row = file_data.split()
140
- category = labels[int(row[0])-1]
141
-
142
- rows = np.loadtxt(label_path)
143
-
144
- # Each row is category, [box coordinates]
145
-
146
- # If there's just one row, loadtxt reads it as a 1d array; make it a 2d array
147
- # with one row
148
- if len(rows.shape)==1:
149
- rows = rows.reshape(1,-5)
150
-
151
- assert (len(rows.shape)==2 and rows.shape[1] == 5)
152
-
153
- categories_this_image = set()
154
-
155
- # Each row is a bounding box
156
- for row in rows:
157
-
158
- i_category = int(row[0])-1
159
- category = labels[i_category]
160
- if category == 'Human':
161
- contains_human = True
162
- categories_this_image.add(category)
163
-
164
- # Have we seen this category before?
165
- if category in categoriesToCategoryId:
166
- categoryID = categoriesToCategoryId[category]
167
- categoriesToCounts[category] += 1
168
- else:
169
- categoryID = nextCategoryID
170
- categoriesToCategoryId[category] = categoryID
171
- categoriesToCounts[category] = 0
172
- nextCategoryID += 1
173
-
174
- # Create an annotation
175
- ann = {}
176
-
177
- ann['id'] = str(uuid.uuid1())
178
- ann['image_id'] = im['id']
179
- ann['category_id'] = categoryID
180
- ann['bbox'] = [row[1]*width, row[2]*height, row[3]*width, row[4]*height]
181
- annotations.append(ann)
182
-
183
- # ...for each bounding box
184
-
185
- # This was here for debugging; nearly every instance is Human+Horse, Human+Vehicle,
186
- # or Human+Dog, but there is one Rabbit+Opossium, and a few Deer+Chicken!
187
- if False:
188
- if len(categories_this_image) > 1:
189
- print('Image {} has multiple categories: '.format(filename),end='')
190
- for c in categories_this_image:
191
- print(c, end=',')
192
- print('')
193
-
194
- if contains_human:
195
- shutil.copy(imagePath, os.path.join(base_directory, human_dir))
196
- else:
197
- shutil.copy(imagePath, os.path.join(base_directory, non_human_dir))
198
-
199
- # ...for each image
200
-
201
- # Convert categories to a CCT-style dictionary
202
-
203
- categories = []
204
-
205
- for category in categoriesToCounts:
206
- print('Category {}, count {}'.format(category, categoriesToCounts[category]))
207
- categoryID = categoriesToCategoryId[category]
208
- cat = {}
209
- cat['name'] = category
210
- cat['id'] = categoryID
211
- categories.append(cat)
212
-
213
- elapsed = time.time() - startTime
214
- print('Finished creating CCT dictionaries in {}'.format(
215
- humanfriendly.format_timespan(elapsed)))
216
-
217
-
218
- #%% Create info struct
219
-
220
- info = {}
221
- info['year'] = 2016
222
- info['version'] = 1
223
- info['description'] = ''
224
- info['secondary_contributor'] = 'Converted to COCO .json by Vardhan Duvvuri'
225
- info['contributor'] = 'University of Missouri'
226
-
227
-
228
- #%% Write output
229
-
230
- json_data = {}
231
- json_data['images'] = images
232
- json_data['annotations'] = annotations
233
- json_data['categories'] = categories
234
- json_data['info'] = info
235
- json.dump(json_data, open(output_file, 'w'), indent=2)
236
-
237
- print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
238
- len(images),len(annotations),len(categories)))
239
-
240
-
241
- #%% Create ZIP files for human and non human
242
-
243
- zipdir(human_dir,human_zipfile)
244
- zipdir(non_human_dir,non_human_zipfile)
245
-
246
-
247
- #%% Validate output
248
-
249
- from megadetector.data_management.databases import integrity_check_json_db
250
-
251
- fn = output_file
252
- options = integrity_check_json_db.IntegrityCheckOptions()
253
- options.baseDir = image_directory
254
- options.bCheckImageSizes = False
255
- options.bCheckImageExistence = True
256
- options.bFindUnusedImages = True
257
-
258
- sortedCategories, data = integrity_check_json_db.integrity_check_json_db(fn,options)
259
-
260
-
261
- #%% Preview labels
262
-
263
- from megadetector.visualization import visualize_db
264
- from megadetector.data_management.databases import integrity_check_json_db
265
-
266
- viz_options = visualize_db.DbVizOptions()
267
- viz_options.num_to_visualize = None
268
- viz_options.trim_to_images_with_bboxes = False
269
- viz_options.add_search_links = True
270
- viz_options.sort_by_filename = False
271
- viz_options.parallelize_rendering = True
272
- html_output_file,image_db = visualize_db.visualize_db(db_path=output_file,
273
- output_dir=os.path.join(base_directory,'preview'),
274
- image_base_dir=image_directory,
275
- options=viz_options)
276
- os.startfile(html_output_file)
@@ -1,386 +0,0 @@
1
- """
2
-
3
- filenames_to_json.py
4
-
5
- Take a directory of images in which species labels are encoded by folder
6
- names, and produces a COCO-style .json file
7
-
8
- """
9
-
10
- #%% Constants and imports
11
-
12
- import json
13
- import io
14
- import os
15
- import uuid
16
- import csv
17
- import warnings
18
- import datetime
19
-
20
- from PIL import Image
21
-
22
- from megadetector.utils.path_utils import find_images
23
-
24
- # ignoring all "PIL cannot read EXIF metainfo for the images" warnings
25
- warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
26
- # Metadata Warning, tag 256 had too many entries: 42, expected 1
27
- warnings.filterwarnings("ignore", "Metadata warning", UserWarning)
28
-
29
- # Filenames will be stored in the output .json relative to this base dir
30
- baseDir = r'D:\wildlife_data\bellevue_camera_traps\bellevue_camera_traps.19.06.02.1320'
31
- outputJsonFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320.json')
32
- outputCsvFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320.csv')
33
-
34
- # rawClassListFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320_classes.csv')
35
- # classMappingsFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320_class_mapping.csv')
36
- outputEncoding = 'utf-8'
37
-
38
- classMappings = {'transitional':'unlabeled','moving':'unlabeled','setup':'unlabeled','blurry':'unlabeled','transitional':'unlabeled','junk':'unlabeled','unknown':'unlabeled'}
39
-
40
- bLoadFileListIfAvailable = True
41
-
42
- info = {}
43
- info['year'] = 2019
44
- info['version'] = '1.0'
45
- info['description'] = 'Bellevue Camera Traps'
46
- info['contributor'] = 'Dan Morris'
47
- info['date_created'] = str(datetime.date.today())
48
-
49
- maxFiles = -1
50
- bReadImageSizes = False
51
- bUseExternalRemappingTable = False
52
-
53
-
54
- #%% Enumerate files, read image sizes
55
-
56
- # Each element will be a list of relative path/full path/width/height
57
- fileInfo = []
58
- nonImages = []
59
- nFiles = 0
60
-
61
- if bLoadFileListIfAvailable and os.path.isfile(outputCsvFilename):
62
-
63
- print('Loading file list from {}'.format(outputCsvFilename))
64
-
65
- with open(outputCsvFilename,'r') as f:
66
- reader = csv.reader(f)
67
- csvInfo = list(list(item) for item in csv.reader(f, delimiter=','))
68
-
69
- for iRow in range(len(csvInfo)):
70
- csvInfo[iRow][2] = int(csvInfo[iRow][2])
71
- csvInfo[iRow][3] = int(csvInfo[iRow][3])
72
-
73
- fileInfo = csvInfo
74
-
75
- print('Finished reading list of {} files'.format(len(fileInfo)))
76
-
77
- else:
78
-
79
- print('Enumerating files from {} to {}'.format(baseDir,outputCsvFilename))
80
-
81
- image_files = find_images(baseDir,bRecursive=True)
82
- print('Enumerated {} images'.format(len(image_files)))
83
-
84
- with io.open(outputCsvFilename, "w", encoding=outputEncoding) as outputFileHandle:
85
-
86
- for fname in image_files:
87
-
88
- nFiles = nFiles + 1
89
- if maxFiles >= 0 and nFiles > maxFiles:
90
- print('Warning: early break at {} files'.format(maxFiles))
91
- break
92
-
93
- fullPath = fname
94
- relativePath = os.path.relpath(fullPath,baseDir)
95
-
96
- if maxFiles >= 0:
97
- print(relativePath)
98
-
99
- h = -1
100
- w = -1
101
-
102
- if bReadImageSizes:
103
-
104
- # Read the image
105
- try:
106
-
107
- im = Image.open(fullPath)
108
- h = im.height
109
- w = im.width
110
-
111
- except:
112
- # Corrupt or not an image
113
- nonImages.append(fullPath)
114
- continue
115
-
116
- # Store file info
117
- imageInfo = [relativePath, fullPath, w, h]
118
- fileInfo.append(imageInfo)
119
-
120
- # Write to output file
121
- outputFileHandle.write('"' + relativePath + '"' + ',' +
122
- '"' + fullPath + '"' + ',' +
123
- str(w) + ',' + str(h) + '\n')
124
-
125
- # ...for each image file
126
-
127
- # ...csv file output
128
-
129
- print("Finished writing {} file names to {}".format(nFiles,outputCsvFilename))
130
-
131
- # ...if the file list is/isn't available
132
-
133
-
134
- #%% Enumerate classes
135
-
136
- # Maps classes to counts
137
- classList = {}
138
-
139
- for iRow,row in enumerate(fileInfo):
140
-
141
- fullPath = row[0]
142
- className = os.path.split(os.path.dirname(fullPath))[1]
143
- className = className.lower().strip()
144
- if className in classList:
145
- classList[className] += 1
146
- else:
147
- classList[className] = 1
148
- row.append(className)
149
-
150
- classNames = list(classList.keys())
151
-
152
- # We like 'empty' to be class 0
153
- if 'empty' in classNames:
154
- classNames.remove('empty')
155
- classNames.insert(0,'empty')
156
-
157
- print('Finished enumerating {} classes'.format(len(classList)))
158
-
159
-
160
- #%% Assemble dictionaries
161
-
162
- images = []
163
- annotations = []
164
- categories = []
165
-
166
- categoryNameToId = {}
167
- idToCategory = {}
168
- imageIdToImage = {}
169
-
170
- nextId = 0
171
-
172
- for categoryName in classNames:
173
-
174
- catId = nextId
175
- nextId += 1
176
- categoryNameToId[categoryName] = catId
177
- newCat = {}
178
- newCat['id'] = categoryNameToId[categoryName]
179
- newCat['name'] = categoryName
180
- newCat['count'] = 0
181
- categories.append(newCat)
182
- idToCategory[catId] = newCat
183
-
184
- # ...for each category
185
-
186
-
187
- # Each element is a list of relative path/full path/width/height/className
188
-
189
- for iRow,row in enumerate(fileInfo):
190
-
191
- relativePath = row[0]
192
- w = row[2]
193
- h = row[3]
194
- className = row[4]
195
-
196
- assert className in categoryNameToId
197
- categoryId = categoryNameToId[className]
198
-
199
- im = {}
200
- im['id'] = str(uuid.uuid1())
201
- im['file_name'] = relativePath
202
- im['height'] = h
203
- im['width'] = w
204
- images.append(im)
205
- imageIdToImage[im['id']] = im
206
-
207
- ann = {}
208
- ann['id'] = str(uuid.uuid1())
209
- ann['image_id'] = im['id']
210
- ann['category_id'] = categoryId
211
- annotations.append(ann)
212
-
213
- cat = idToCategory[categoryId]
214
- cat['count'] += 1
215
-
216
- # ...for each image
217
-
218
- oldNameToOldId = categoryNameToId
219
- originalCategories = categories
220
-
221
- print('Finished assembling dictionaries')
222
-
223
-
224
- #%% External class mapping
225
-
226
- if bUseExternalRemappingTable:
227
-
228
- assert classMappings is None
229
-
230
-
231
- #%% Write raw class table
232
-
233
- # cat = categories[0]
234
- if os.path.isfile(rawClassListFilename):
235
-
236
- print('Not over-writing raw class table')
237
-
238
- else:
239
-
240
- with io.open(rawClassListFilename, "w", encoding=outputEncoding) as classListFileHandle:
241
- for cat in categories:
242
- catId = cat['id']
243
- categoryName = cat['name']
244
- categoryCount = cat['count']
245
- classListFileHandle.write(str(catId) + ',"' + categoryName + '",' + str(categoryCount) + '\n')
246
-
247
- print('Finished writing raw class table')
248
-
249
-
250
- #%% Read the mapped class table
251
-
252
- classMappings = {}
253
-
254
- if os.path.isfile(classMappingsFilename):
255
-
256
- print('Loading file list from {}'.format(classMappingsFilename))
257
-
258
- with open(classMappingsFilename,'r') as f:
259
- reader = csv.reader(f)
260
- mappingInfo = list(list(item) for item in csv.reader(f, delimiter=','))
261
-
262
- for mapping in mappingInfo:
263
- assert len(mapping) == 4
264
-
265
- # id, source, count, target
266
- sourceClass = mapping[1]
267
- targetClass = mapping[3]
268
- assert sourceClass not in classMappings
269
- classMappings[sourceClass] = targetClass
270
-
271
- print('Finished reading list of {} class mappings'.format(len(mappingInfo)))
272
-
273
- else:
274
-
275
- #%% Make classMappings contain *all* classes, not just remapped classes
276
-
277
- # cat = categories[0]
278
- for cat in categories:
279
- if cat['name'] not in classMappings:
280
- classMappings[cat['name']] = cat['name']
281
-
282
-
283
- #%% Create new class list
284
-
285
- categories = []
286
- categoryNameToId = {}
287
- oldIdToNewId = {}
288
-
289
- # Start at 1, explicitly assign 0 to "empty"
290
- nextCategoryId = 1
291
- for sourceClass in classMappings:
292
- targetClass = classMappings[sourceClass]
293
-
294
- if targetClass not in categoryNameToId:
295
-
296
- if targetClass == 'empty':
297
- categoryId = 0
298
- else:
299
- categoryId = nextCategoryId
300
- nextCategoryId = nextCategoryId + 1
301
-
302
- categoryNameToId[targetClass] = categoryId
303
- newCat = {}
304
- newCat['id'] = categoryId
305
- newCat['name'] = targetClass
306
- newCat['count'] = 0
307
-
308
- if targetClass == 'empty':
309
- categories.insert(0,newCat)
310
- else:
311
- categories.append(newCat)
312
-
313
- else:
314
-
315
- categoryId = categoryNameToId[targetClass]
316
-
317
- # One-off issue with character encoding
318
- if sourceClass == 'human':
319
- sourceClass = 'human'
320
-
321
- assert sourceClass in oldNameToOldId
322
- oldId = oldNameToOldId[sourceClass]
323
- oldIdToNewId[oldId] = categoryId
324
-
325
- categoryIdToCat = {}
326
- for cat in categories:
327
- categoryIdToCat[cat['id']] = cat
328
-
329
- print('Mapped {} original classes to {} new classes'.format(len(originalCategories),len(categories)))
330
-
331
-
332
- #%% Re-map annotations
333
-
334
- # ann = annotations[0]
335
- for ann in annotations:
336
-
337
- ann['category_id'] = oldIdToNewId[ann['category_id']]
338
-
339
-
340
- #%% Write output .json
341
-
342
- data = {}
343
- data['info'] = info
344
- data['images'] = images
345
- data['annotations'] = annotations
346
- data['categories'] = categories
347
-
348
- json.dump(data, open(outputJsonFilename,'w'), indent=4)
349
-
350
- print('Finished writing json to {}'.format(outputJsonFilename))
351
-
352
-
353
- #%% Utilities
354
-
355
- if False:
356
-
357
- #%%
358
- # Find images with a particular tag
359
- className = 'hum'
360
- matches = []
361
- assert className in categoryNameToId
362
- catId = categoryNameToId[className]
363
- for ann in annotations:
364
- if ann['category_id'] == catId:
365
- imageId = ann['image_id']
366
- im = imageIdToImage[imageId]
367
- matches.append(im['file_name'])
368
- print('Found {} matches'.format(len(matches)))
369
-
370
- os.startfile(os.path.join(baseDir,matches[0]))
371
-
372
-
373
- #%% Randomly sample annotations
374
-
375
- import random
376
- nAnnotations = len(annotations)
377
- iAnn = random.randint(0,nAnnotations)
378
- ann = annotations[iAnn]
379
- catId = ann['category_id']
380
- imageId = ann['image_id']
381
- im = imageIdToImage[imageId]
382
- fn = os.path.join(baseDir,im['file_name'])
383
- cat = categoryIdToCat[catId]
384
- className = cat['name']
385
- print('This should be a {}'.format(className))
386
- os.startfile(fn)