megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (197) hide show
  1. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  2. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  3. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  4. megadetector/classification/aggregate_classifier_probs.py +3 -3
  5. megadetector/classification/analyze_failed_images.py +5 -5
  6. megadetector/classification/cache_batchapi_outputs.py +5 -5
  7. megadetector/classification/create_classification_dataset.py +11 -12
  8. megadetector/classification/crop_detections.py +10 -10
  9. megadetector/classification/csv_to_json.py +8 -8
  10. megadetector/classification/detect_and_crop.py +13 -15
  11. megadetector/classification/efficientnet/model.py +8 -8
  12. megadetector/classification/efficientnet/utils.py +6 -5
  13. megadetector/classification/evaluate_model.py +7 -7
  14. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  15. megadetector/classification/json_to_azcopy_list.py +1 -1
  16. megadetector/classification/json_validator.py +29 -32
  17. megadetector/classification/map_classification_categories.py +9 -9
  18. megadetector/classification/merge_classification_detection_output.py +12 -9
  19. megadetector/classification/prepare_classification_script.py +19 -19
  20. megadetector/classification/prepare_classification_script_mc.py +26 -26
  21. megadetector/classification/run_classifier.py +4 -4
  22. megadetector/classification/save_mislabeled.py +6 -6
  23. megadetector/classification/train_classifier.py +1 -1
  24. megadetector/classification/train_classifier_tf.py +9 -9
  25. megadetector/classification/train_utils.py +10 -10
  26. megadetector/data_management/annotations/annotation_constants.py +1 -2
  27. megadetector/data_management/camtrap_dp_to_coco.py +79 -46
  28. megadetector/data_management/cct_json_utils.py +103 -103
  29. megadetector/data_management/cct_to_md.py +49 -49
  30. megadetector/data_management/cct_to_wi.py +33 -33
  31. megadetector/data_management/coco_to_labelme.py +75 -75
  32. megadetector/data_management/coco_to_yolo.py +210 -193
  33. megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
  34. megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
  35. megadetector/data_management/databases/integrity_check_json_db.py +228 -200
  36. megadetector/data_management/databases/subset_json_db.py +33 -33
  37. megadetector/data_management/generate_crops_from_cct.py +88 -39
  38. megadetector/data_management/get_image_sizes.py +54 -49
  39. megadetector/data_management/labelme_to_coco.py +133 -125
  40. megadetector/data_management/labelme_to_yolo.py +159 -73
  41. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  42. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  43. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  44. megadetector/data_management/lila/download_lila_subset.py +21 -24
  45. megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
  46. megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
  47. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  48. megadetector/data_management/lila/lila_common.py +73 -70
  49. megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
  50. megadetector/data_management/mewc_to_md.py +344 -340
  51. megadetector/data_management/ocr_tools.py +262 -255
  52. megadetector/data_management/read_exif.py +249 -227
  53. megadetector/data_management/remap_coco_categories.py +90 -28
  54. megadetector/data_management/remove_exif.py +81 -21
  55. megadetector/data_management/rename_images.py +187 -187
  56. megadetector/data_management/resize_coco_dataset.py +588 -120
  57. megadetector/data_management/speciesnet_to_md.py +41 -41
  58. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  59. megadetector/data_management/yolo_output_to_md_output.py +248 -122
  60. megadetector/data_management/yolo_to_coco.py +333 -191
  61. megadetector/detection/change_detection.py +832 -0
  62. megadetector/detection/process_video.py +340 -337
  63. megadetector/detection/pytorch_detector.py +358 -278
  64. megadetector/detection/run_detector.py +399 -186
  65. megadetector/detection/run_detector_batch.py +404 -377
  66. megadetector/detection/run_inference_with_yolov5_val.py +340 -327
  67. megadetector/detection/run_tiled_inference.py +257 -249
  68. megadetector/detection/tf_detector.py +24 -24
  69. megadetector/detection/video_utils.py +332 -295
  70. megadetector/postprocessing/add_max_conf.py +19 -11
  71. megadetector/postprocessing/categorize_detections_by_size.py +45 -45
  72. megadetector/postprocessing/classification_postprocessing.py +468 -433
  73. megadetector/postprocessing/combine_batch_outputs.py +23 -23
  74. megadetector/postprocessing/compare_batch_results.py +590 -525
  75. megadetector/postprocessing/convert_output_format.py +106 -102
  76. megadetector/postprocessing/create_crop_folder.py +347 -147
  77. megadetector/postprocessing/detector_calibration.py +173 -168
  78. megadetector/postprocessing/generate_csv_report.py +508 -499
  79. megadetector/postprocessing/load_api_results.py +48 -27
  80. megadetector/postprocessing/md_to_coco.py +133 -102
  81. megadetector/postprocessing/md_to_labelme.py +107 -90
  82. megadetector/postprocessing/md_to_wi.py +40 -40
  83. megadetector/postprocessing/merge_detections.py +92 -114
  84. megadetector/postprocessing/postprocess_batch_results.py +319 -301
  85. megadetector/postprocessing/remap_detection_categories.py +91 -38
  86. megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
  87. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  88. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  89. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
  90. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  91. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  92. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  93. megadetector/postprocessing/validate_batch_results.py +70 -70
  94. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  95. megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
  96. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
  97. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
  98. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  99. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  100. megadetector/taxonomy_mapping/species_lookup.py +156 -74
  101. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  102. megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
  103. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  104. megadetector/utils/ct_utils.py +1049 -211
  105. megadetector/utils/directory_listing.py +21 -77
  106. megadetector/utils/gpu_test.py +22 -22
  107. megadetector/utils/md_tests.py +632 -529
  108. megadetector/utils/path_utils.py +1520 -431
  109. megadetector/utils/process_utils.py +41 -41
  110. megadetector/utils/split_locations_into_train_val.py +62 -62
  111. megadetector/utils/string_utils.py +148 -27
  112. megadetector/utils/url_utils.py +489 -176
  113. megadetector/utils/wi_utils.py +2658 -2526
  114. megadetector/utils/write_html_image_list.py +137 -137
  115. megadetector/visualization/plot_utils.py +34 -30
  116. megadetector/visualization/render_images_with_thumbnails.py +39 -74
  117. megadetector/visualization/visualization_utils.py +487 -435
  118. megadetector/visualization/visualize_db.py +232 -198
  119. megadetector/visualization/visualize_detector_output.py +82 -76
  120. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
  121. megadetector-10.0.0.dist-info/RECORD +139 -0
  122. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
  123. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  124. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  125. megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
  126. megadetector/api/batch_processing/api_core/server.py +0 -294
  127. megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
  128. megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
  129. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  130. megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
  131. megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
  132. megadetector/api/batch_processing/api_core/server_utils.py +0 -88
  133. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  134. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  135. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  136. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  137. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  138. megadetector/api/synchronous/__init__.py +0 -0
  139. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  140. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
  141. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
  142. megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
  143. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  144. megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
  145. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  146. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  147. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  148. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  149. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  150. megadetector/data_management/importers/awc_to_json.py +0 -191
  151. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  152. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  153. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  154. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  155. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  156. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  157. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  158. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  159. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  160. megadetector/data_management/importers/ena24_to_json.py +0 -276
  161. megadetector/data_management/importers/filenames_to_json.py +0 -386
  162. megadetector/data_management/importers/helena_to_cct.py +0 -283
  163. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  164. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  165. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  166. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  167. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  168. megadetector/data_management/importers/missouri_to_json.py +0 -490
  169. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  170. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  171. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  172. megadetector/data_management/importers/pc_to_json.py +0 -365
  173. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  174. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  175. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  176. megadetector/data_management/importers/rspb_to_json.py +0 -356
  177. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  178. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  179. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  180. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  181. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  182. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  183. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  184. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  185. megadetector/data_management/importers/ubc_to_json.py +0 -399
  186. megadetector/data_management/importers/umn_to_json.py +0 -507
  187. megadetector/data_management/importers/wellington_to_json.py +0 -263
  188. megadetector/data_management/importers/wi_to_json.py +0 -442
  189. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  190. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  191. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  192. megadetector/utils/azure_utils.py +0 -178
  193. megadetector/utils/sas_blob_utils.py +0 -509
  194. megadetector-5.0.28.dist-info/RECORD +0 -209
  195. /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
  196. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
  197. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
@@ -1,150 +0,0 @@
1
- #
2
- # make_full_SS_json.py
3
- #
4
- # Create a COCO-camera-traps .json file for Snapshot Serengeti data from
5
- # the original .csv files provided on Dryad.
6
- #
7
- # This was used for "version 1.0" of the public Snapshot Serengeti archive; it's no
8
- # longer used as of v2.0 (early 2020). See snapshot_serengeti_lila.py for the updated
9
- # Snapshot Safari preparation process.
10
- #
11
-
12
- #%% Imports and constants
13
-
14
- import csv
15
- import json
16
- import uuid
17
- import datetime
18
-
19
- output_file = '/datadrive/snapshotserengeti/databases/SnapshotSerengeti_multiple_classes.json'
20
- csv_file_name = '/datadrive/snapshotserengeti/databases/consensus_data.csv'
21
- all_image_file = '/datadrive/snapshotserengeti/databases/all_images.csv'
22
-
23
-
24
- #%% Read annotation .csv file, format into a dictionary mapping field names to data arrays
25
-
26
- data = []
27
- with open(csv_file_name,'r') as f:
28
- reader = csv.reader(f, dialect = 'excel')
29
- for row in reader:
30
- data.append(row)
31
-
32
- data_fields = data[0]
33
-
34
- data_dicts = {}
35
- for event in data[1:]:
36
- if event[0] not in data_dicts:
37
- data_dicts[event[0]] = []
38
- data_dicts[event[0]].append({data_fields[i]:event[i] for i in range(len(data_fields))})
39
-
40
- # Count the number of images with multiple species
41
- mult_species = 0
42
- for event in data_dicts:
43
- if len(data_dicts[event]) > 1:
44
- mult_species += 1
45
-
46
-
47
- #%% Read image .csv file, format into a dictionary mapping images to capture events
48
-
49
- with open(all_image_file,'r') as f:
50
- reader = csv.reader(f,dialect = 'excel')
51
- next(reader)
52
- im_name_to_cap_id = {row[1]:row[0] for row in reader}
53
-
54
- total_ims = len(im_name_to_cap_id)
55
- total_seqs = len(data_dicts)
56
- print('Percent seqs with mult species: ',float(mult_species)/float(total_seqs))
57
-
58
-
59
- #%% Create CCT-style .json
60
-
61
- images = []
62
- annotations = []
63
- categories = []
64
-
65
- capture_ims = {i:[] for i in im_name_to_cap_id.values()}
66
- for im_id in im_name_to_cap_id:
67
- capture_ims[im_name_to_cap_id[im_id]].append(im_id)
68
-
69
- im_to_seq_num = {im:None for im in im_name_to_cap_id}
70
- for event in capture_ims:
71
- capture_ims[event] = sorted(capture_ims[event])
72
- seq_count = 0
73
- for im in capture_ims[event]:
74
- im_to_seq_num[im] = seq_count
75
- seq_count += 1
76
-
77
- cat_to_id = {}
78
- cat_to_id['empty'] = 0
79
- cat_count = 1
80
- seasons = []
81
-
82
- for im_id in im_name_to_cap_id:
83
-
84
- im = {}
85
- im['id'] = im_id.split('.')[0]
86
- im['file_name'] = im_id
87
-
88
- im['location'] = im_id.split('/')[1]
89
- im['season'] = im_id.split('/')[0]
90
- if im['season'] not in seasons:
91
- seasons.append(im['season'])
92
- im['seq_id'] = im_name_to_cap_id[im_id]
93
- im['frame_num'] = im_to_seq_num[im_id]
94
- im['seq_num_frames'] = len(capture_ims[im['seq_id']])
95
-
96
- ann = {}
97
- ann['id'] = str(uuid.uuid1())
98
- ann['image_id'] = im['id']
99
-
100
- if im_name_to_cap_id[im_id] in data_dicts:
101
- im_data_per_ann = data_dicts[im_name_to_cap_id[im_id]]
102
- for im_data in im_data_per_ann:
103
- im['datetime'] = im_data['DateTime']
104
- if im_data['Species'] not in cat_to_id:
105
- cat_to_id[im_data['Species']] = cat_count
106
- cat_count += 1
107
- ann = {}
108
- ann['id'] = str(uuid.uuid1())
109
- ann['image_id'] = im['id']
110
- ann['category_id'] = cat_to_id[im_data['Species']]
111
- annotations.append(ann)
112
- else:
113
- ann = {}
114
- ann['id'] = str(uuid.uuid1())
115
- ann['image_id'] = im['id']
116
- ann['category_id'] = 0
117
- annotations.append(ann)
118
-
119
- # still need image width and height
120
- images.append(im)
121
-
122
- # ...for each image
123
-
124
- print(seasons)
125
-
126
- for cat in cat_to_id:
127
- new_cat = {}
128
- new_cat['id'] = cat_to_id[cat]
129
- new_cat['name'] = cat
130
- categories.append(new_cat)
131
-
132
-
133
- #%% Write output files
134
-
135
- json_data = {}
136
- json_data['images'] = images
137
- json_data['annotations'] = annotations
138
- json_data['categories'] = categories
139
- info = {}
140
- info['year'] = 2018
141
- info['version'] = 1
142
- info['description'] = 'COCO style Snapshot Serengeti database'
143
- info['contributor'] = 'SMB'
144
- info['date_created'] = str(datetime.date.today())
145
- json_data['info'] = info
146
- json.dump(json_data,open(output_file,'w'))
147
-
148
- print(images[0])
149
- print(annotations[0])
150
-
@@ -1,153 +0,0 @@
1
- #
2
- # make_per_season_SS_json.py
3
- #
4
- # Create a COCO-camera-traps .json file for each Snapshot Serengeti season from
5
- # the original .csv files provided on Dryad.
6
- #
7
- # This was used for "version 1.0" of the public Snapshot Serengeti archive; it's no
8
- # longer used as of v2.0 (early 2020). See snapshot_serengeti_lila.py for the updated
9
- # Snapshot Safari preparation process.
10
- #
11
-
12
- #%% Imports and constants
13
-
14
- import csv
15
- import json
16
- import uuid
17
- import datetime
18
-
19
- output_file_folder = 'C:/Users/t-sabeer/Documents/databases/'
20
- csv_file_name = 'D:/consensus_data.csv'
21
-
22
-
23
- #%% Read annotation .csv file, format into a dictionary mapping field names to data arrays
24
-
25
- data = []
26
- with open(csv_file_name,'r') as f:
27
- reader = csv.reader(f, dialect = 'excel')
28
- for row in reader:
29
- data.append(row)
30
-
31
- data_fields = data[0]
32
-
33
- data_dicts = {}
34
- for event in data[1:]:
35
- data_dicts[event[0]] = {data_fields[i]:event[i] for i in range(len(data_fields))}
36
-
37
-
38
- #%% Read image .csv file, format into a dictionary mapping images to capture events
39
-
40
- all_image_file = 'D:/all_images.csv'
41
- with open(all_image_file,'r') as f:
42
- reader = csv.reader(f,dialect = 'excel')
43
- next(reader)
44
- im_name_to_cap_id = {row[1]:row[0] for row in reader}
45
-
46
-
47
- #%% Create CCT-style .json
48
-
49
- images = []
50
- annotations = []
51
- categories = []
52
-
53
- capture_ims = {i:[] for i in im_name_to_cap_id.values()}
54
- for im_id in im_name_to_cap_id:
55
- capture_ims[im_name_to_cap_id[im_id]].append(im_id)
56
-
57
- im_to_seq_num = {im:None for im in im_name_to_cap_id}
58
- for event in capture_ims:
59
- capture_ims[event] = sorted(capture_ims[event])
60
- seq_count = 0
61
- for im in capture_ims[event]:
62
- im_to_seq_num[im] = seq_count
63
- seq_count += 1
64
-
65
- cat_to_id = {}
66
- cat_to_id['empty'] = 0
67
- cat_count = 1
68
- seasons = []
69
-
70
- for im_id in im_name_to_cap_id:
71
- im = {}
72
- im['id'] = im_id.split('.')[0]
73
- im['file_name'] = im_id
74
-
75
- im['location'] = im_id.split('/')[1]
76
- im['season'] = im_id.split('/')[0]
77
- im['seq_id'] = im_name_to_cap_id[im_id]
78
- im['frame_num'] = im_to_seq_num[im_id]
79
- im['seq_num_frames'] = len(capture_ims[im['seq_id']])
80
- if im['season'] not in seasons:
81
- seasons.append(im['season'])
82
-
83
- ann = {}
84
- ann['id'] = str(uuid.uuid1())
85
- ann['image_id'] = im['id']
86
-
87
- if im_name_to_cap_id[im_id] in data_dicts:
88
- im_data = data_dicts[im_name_to_cap_id[im_id]]
89
- im['datetime'] = im_data['DateTime']
90
- if im_data['Species'] not in cat_to_id:
91
- cat_to_id[im_data['Species']] = cat_count
92
- cat_count += 1
93
- ann['category_id'] = cat_to_id[im_data['Species']]
94
- else:
95
- ann['category_id'] = 0
96
-
97
- #still need image width and height
98
- images.append(im)
99
- annotations.append(ann)
100
-
101
- # ...for each image ID
102
-
103
- for cat in cat_to_id:
104
- new_cat = {}
105
- new_cat['id'] = cat_to_id[cat]
106
- new_cat['name'] = cat
107
- categories.append(new_cat)
108
-
109
-
110
- #%% Write output files
111
-
112
- output_file = output_file_folder + 'SnapshotSerengeti.json'
113
- json_data = {}
114
- json_data['images'] = images
115
- json_data['annotations'] = annotations
116
- json_data['categories'] = categories
117
- info = {}
118
- info['year'] = 2018
119
- info['version'] = 1
120
- info['description'] = 'COCO style Snapshot Serengeti database'
121
- info['contributor'] = 'SMB'
122
- info['date_created'] = str(datetime.date.today())
123
- json_data['info'] = info
124
- json.dump(json_data,open(output_file,'w'))
125
-
126
- for season in seasons:
127
-
128
- output_file = output_file_folder + season + '.json'
129
- inSeason = {im['id']:False for im in images}
130
- for im in images:
131
- if im['season'] == season:
132
- inSeason[im['id']] = True
133
- new_ims = [im for im in images if inSeason[im['id']]]
134
- new_anns = [ann for ann in annotations if inSeason[ann['image_id']]]
135
-
136
- json_data = {}
137
- json_data['images'] = new_ims
138
- json_data['annotations'] = new_anns
139
- json_data['categories'] = categories
140
- info = {}
141
- info['year'] = 2018
142
- info['version'] = 1
143
- info['description'] = 'COCO style Snapshot Serengeti database. season ' + season
144
- info['contributor'] = 'SMB'
145
- info['date_created'] = str(datetime.date.today())
146
- json_data['info'] = info
147
- json.dump(json_data,open(output_file,'w'))
148
-
149
- print('Season ' + season)
150
- print(str(len(new_ims)) + ' images')
151
- print(str(len(new_anns)) + ' annotations')
152
-
153
- # ...for each season
@@ -1,65 +0,0 @@
1
- """
2
-
3
- sulross_get_exif.py
4
-
5
- For the Sul Ross dataset, species informationw was stored in XMP metadata; pull
6
- all that metadata out to .json.
7
-
8
- """
9
-
10
- import os
11
- import json
12
- from tqdm import tqdm
13
-
14
- import exiftool
15
-
16
- image_ids_path = '/home/beaver/cameratraps/data/sulross/20190522_image_ids.json'
17
- data_dir = '/home/beaver/cameratraps/mnt/sulross'
18
-
19
-
20
- def get_metadata():
21
-
22
- image_ids = json.load(open(image_ids_path))
23
-
24
- image_id_to_metadata = {}
25
-
26
- # exiftool can process a batch of images at a time, but bottleneck is blobfuse reading the images
27
- batch_size = 20
28
-
29
- num_images_processed = 0
30
-
31
- with exiftool.ExifTool() as et:
32
- for i in tqdm(range(0, len(image_ids), batch_size)):
33
- batch_ids = image_ids[i: i + batch_size]
34
-
35
- batch_paths = [os.path.join(data_dir, i) for i in batch_ids]
36
-
37
- try:
38
- metadatas = et.get_metadata_batch(batch_paths)
39
-
40
- for id, metadata in zip(batch_ids, metadatas):
41
- image_id_to_metadata[id] = metadata['XMP:HierarchicalSubject']
42
- except Exception as e:
43
- print('Exception! {}'.format(e))
44
- continue
45
-
46
- num_images_processed += batch_size
47
- if num_images_processed % 1000 == 0:
48
- print('Finished processing {} images; image ID {}'.format(
49
- num_images_processed, image_ids[num_images_processed - 1]))
50
- print(image_id_to_metadata[id])
51
- print()
52
-
53
- # checkpoint
54
- if num_images_processed % 10000 == 0:
55
- print('Saving results so far...')
56
- with open('/home/beaver/cameratraps/data/sulross/20190522_metadata.json', 'w') as f:
57
- json.dump(image_id_to_metadata, f, indent=1)
58
-
59
- print('Length of meta data read: ', len(image_id_to_metadata))
60
- with open('/home/beaver/cameratraps/data/sulross/20190522_metadata.json', 'w') as f:
61
- json.dump(image_id_to_metadata, f, indent=1)
62
- print('Results saved. Done!')
63
-
64
- if __name__ == '__main__':
65
- get_metadata()