megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (197) hide show
  1. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  2. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  3. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  4. megadetector/classification/aggregate_classifier_probs.py +3 -3
  5. megadetector/classification/analyze_failed_images.py +5 -5
  6. megadetector/classification/cache_batchapi_outputs.py +5 -5
  7. megadetector/classification/create_classification_dataset.py +11 -12
  8. megadetector/classification/crop_detections.py +10 -10
  9. megadetector/classification/csv_to_json.py +8 -8
  10. megadetector/classification/detect_and_crop.py +13 -15
  11. megadetector/classification/efficientnet/model.py +8 -8
  12. megadetector/classification/efficientnet/utils.py +6 -5
  13. megadetector/classification/evaluate_model.py +7 -7
  14. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  15. megadetector/classification/json_to_azcopy_list.py +1 -1
  16. megadetector/classification/json_validator.py +29 -32
  17. megadetector/classification/map_classification_categories.py +9 -9
  18. megadetector/classification/merge_classification_detection_output.py +12 -9
  19. megadetector/classification/prepare_classification_script.py +19 -19
  20. megadetector/classification/prepare_classification_script_mc.py +26 -26
  21. megadetector/classification/run_classifier.py +4 -4
  22. megadetector/classification/save_mislabeled.py +6 -6
  23. megadetector/classification/train_classifier.py +1 -1
  24. megadetector/classification/train_classifier_tf.py +9 -9
  25. megadetector/classification/train_utils.py +10 -10
  26. megadetector/data_management/annotations/annotation_constants.py +1 -2
  27. megadetector/data_management/camtrap_dp_to_coco.py +79 -46
  28. megadetector/data_management/cct_json_utils.py +103 -103
  29. megadetector/data_management/cct_to_md.py +49 -49
  30. megadetector/data_management/cct_to_wi.py +33 -33
  31. megadetector/data_management/coco_to_labelme.py +75 -75
  32. megadetector/data_management/coco_to_yolo.py +210 -193
  33. megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
  34. megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
  35. megadetector/data_management/databases/integrity_check_json_db.py +228 -200
  36. megadetector/data_management/databases/subset_json_db.py +33 -33
  37. megadetector/data_management/generate_crops_from_cct.py +88 -39
  38. megadetector/data_management/get_image_sizes.py +54 -49
  39. megadetector/data_management/labelme_to_coco.py +133 -125
  40. megadetector/data_management/labelme_to_yolo.py +159 -73
  41. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  42. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  43. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  44. megadetector/data_management/lila/download_lila_subset.py +21 -24
  45. megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
  46. megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
  47. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  48. megadetector/data_management/lila/lila_common.py +73 -70
  49. megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
  50. megadetector/data_management/mewc_to_md.py +344 -340
  51. megadetector/data_management/ocr_tools.py +262 -255
  52. megadetector/data_management/read_exif.py +249 -227
  53. megadetector/data_management/remap_coco_categories.py +90 -28
  54. megadetector/data_management/remove_exif.py +81 -21
  55. megadetector/data_management/rename_images.py +187 -187
  56. megadetector/data_management/resize_coco_dataset.py +588 -120
  57. megadetector/data_management/speciesnet_to_md.py +41 -41
  58. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  59. megadetector/data_management/yolo_output_to_md_output.py +248 -122
  60. megadetector/data_management/yolo_to_coco.py +333 -191
  61. megadetector/detection/change_detection.py +832 -0
  62. megadetector/detection/process_video.py +340 -337
  63. megadetector/detection/pytorch_detector.py +358 -278
  64. megadetector/detection/run_detector.py +399 -186
  65. megadetector/detection/run_detector_batch.py +404 -377
  66. megadetector/detection/run_inference_with_yolov5_val.py +340 -327
  67. megadetector/detection/run_tiled_inference.py +257 -249
  68. megadetector/detection/tf_detector.py +24 -24
  69. megadetector/detection/video_utils.py +332 -295
  70. megadetector/postprocessing/add_max_conf.py +19 -11
  71. megadetector/postprocessing/categorize_detections_by_size.py +45 -45
  72. megadetector/postprocessing/classification_postprocessing.py +468 -433
  73. megadetector/postprocessing/combine_batch_outputs.py +23 -23
  74. megadetector/postprocessing/compare_batch_results.py +590 -525
  75. megadetector/postprocessing/convert_output_format.py +106 -102
  76. megadetector/postprocessing/create_crop_folder.py +347 -147
  77. megadetector/postprocessing/detector_calibration.py +173 -168
  78. megadetector/postprocessing/generate_csv_report.py +508 -499
  79. megadetector/postprocessing/load_api_results.py +48 -27
  80. megadetector/postprocessing/md_to_coco.py +133 -102
  81. megadetector/postprocessing/md_to_labelme.py +107 -90
  82. megadetector/postprocessing/md_to_wi.py +40 -40
  83. megadetector/postprocessing/merge_detections.py +92 -114
  84. megadetector/postprocessing/postprocess_batch_results.py +319 -301
  85. megadetector/postprocessing/remap_detection_categories.py +91 -38
  86. megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
  87. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  88. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  89. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
  90. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  91. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  92. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  93. megadetector/postprocessing/validate_batch_results.py +70 -70
  94. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  95. megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
  96. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
  97. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
  98. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  99. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  100. megadetector/taxonomy_mapping/species_lookup.py +156 -74
  101. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  102. megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
  103. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  104. megadetector/utils/ct_utils.py +1049 -211
  105. megadetector/utils/directory_listing.py +21 -77
  106. megadetector/utils/gpu_test.py +22 -22
  107. megadetector/utils/md_tests.py +632 -529
  108. megadetector/utils/path_utils.py +1520 -431
  109. megadetector/utils/process_utils.py +41 -41
  110. megadetector/utils/split_locations_into_train_val.py +62 -62
  111. megadetector/utils/string_utils.py +148 -27
  112. megadetector/utils/url_utils.py +489 -176
  113. megadetector/utils/wi_utils.py +2658 -2526
  114. megadetector/utils/write_html_image_list.py +137 -137
  115. megadetector/visualization/plot_utils.py +34 -30
  116. megadetector/visualization/render_images_with_thumbnails.py +39 -74
  117. megadetector/visualization/visualization_utils.py +487 -435
  118. megadetector/visualization/visualize_db.py +232 -198
  119. megadetector/visualization/visualize_detector_output.py +82 -76
  120. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
  121. megadetector-10.0.0.dist-info/RECORD +139 -0
  122. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
  123. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  124. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  125. megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
  126. megadetector/api/batch_processing/api_core/server.py +0 -294
  127. megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
  128. megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
  129. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  130. megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
  131. megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
  132. megadetector/api/batch_processing/api_core/server_utils.py +0 -88
  133. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  134. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  135. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  136. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  137. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  138. megadetector/api/synchronous/__init__.py +0 -0
  139. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  140. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
  141. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
  142. megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
  143. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  144. megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
  145. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  146. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  147. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  148. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  149. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  150. megadetector/data_management/importers/awc_to_json.py +0 -191
  151. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  152. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  153. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  154. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  155. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  156. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  157. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  158. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  159. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  160. megadetector/data_management/importers/ena24_to_json.py +0 -276
  161. megadetector/data_management/importers/filenames_to_json.py +0 -386
  162. megadetector/data_management/importers/helena_to_cct.py +0 -283
  163. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  164. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  165. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  166. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  167. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  168. megadetector/data_management/importers/missouri_to_json.py +0 -490
  169. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  170. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  171. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  172. megadetector/data_management/importers/pc_to_json.py +0 -365
  173. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  174. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  175. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  176. megadetector/data_management/importers/rspb_to_json.py +0 -356
  177. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  178. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  179. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  180. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  181. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  182. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  183. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  184. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  185. megadetector/data_management/importers/ubc_to_json.py +0 -399
  186. megadetector/data_management/importers/umn_to_json.py +0 -507
  187. megadetector/data_management/importers/wellington_to_json.py +0 -263
  188. megadetector/data_management/importers/wi_to_json.py +0 -442
  189. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  190. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  191. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  192. megadetector/utils/azure_utils.py +0 -178
  193. megadetector/utils/sas_blob_utils.py +0 -509
  194. megadetector-5.0.28.dist-info/RECORD +0 -209
  195. /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
  196. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
  197. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
@@ -1,294 +0,0 @@
1
- """
2
-
3
- idfg_iwildcam_lila_prep.py
4
-
5
- Adding class labels (from the private test .csv) to the iWildCam 2019 IDFG
6
- test set, in preparation for release on LILA.
7
-
8
- This version works with the public iWildCam release images.
9
-
10
- """
11
-
12
- #%% ############ Take one, from iWildCam .json files ############
13
-
14
- #%% Imports and constants
15
-
16
- import uuid
17
- import json
18
- import os
19
- from tqdm import tqdm
20
-
21
- base_folder = r'h:\iWildCam_2019_IDFG'
22
- input_json = os.path.join(base_folder,'iWildCam_2019_IDFG_info.json')
23
- input_csv = os.path.join(base_folder,'IDFG_eval_public_v_private.csv')
24
- output_json = os.path.join(base_folder,'idaho_camera_traps.json')
25
-
26
- assert os.path.isfile(input_json)
27
- assert os.path.isfile(input_csv)
28
-
29
-
30
- #%% Read input files
31
-
32
- with open(input_json,'r') as f:
33
- input_data = json.load(f)
34
-
35
- with open(input_csv,'r') as f:
36
- private_csv_lines = f.readlines()
37
-
38
- private_csv_lines = [s.strip() for s in private_csv_lines]
39
-
40
- # Remove the header line
41
- assert private_csv_lines[0] == 'Id,Category,Usage'
42
- private_csv_lines = private_csv_lines[1:]
43
-
44
- print('Read {} annotations for {} images'.format(len(private_csv_lines),len(input_data['images'])))
45
-
46
- assert len(private_csv_lines) == len(input_data['images'])
47
- n_images = len(input_data['images'])
48
-
49
-
50
- #%% Parse annotations
51
-
52
- image_id_to_category_ids = {}
53
- for line in tqdm(private_csv_lines):
54
-
55
- # Lines look like:
56
- #
57
- # b005e5b2-2c0b-11e9-bcad-06f1011196c4,1,Private
58
-
59
- tokens = line.split(',')
60
- assert len(tokens) == 3
61
- assert tokens[2] in ['Private','Public']
62
- image_id_to_category_ids[tokens[0]] = int(tokens[1])
63
-
64
- assert len(image_id_to_category_ids) == n_images
65
-
66
-
67
- #%% Minor cleanup re: images
68
-
69
- for im in tqdm(input_data['images']):
70
- image_id = im['id']
71
- im['file_name'] = im['file_name'].replace('iWildCam_IDFG_images/','')
72
- assert isinstance(im['location'],int)
73
- im['location'] = str(im['location'])
74
-
75
-
76
- #%% Create annotations
77
-
78
- annotations = []
79
-
80
- for image_id in tqdm(image_id_to_category_ids):
81
- category_id = image_id_to_category_ids[image_id]
82
- ann = {}
83
- ann['id'] = str(uuid.uuid1())
84
- ann['image_id'] = image_id
85
- ann['category_id'] = category_id
86
- annotations.append(ann)
87
-
88
-
89
- #%% Prepare info
90
-
91
- info = input_data['info']
92
- info['contributor'] = 'Images acquired by the Idaho Department of Fish and Game, dataset curated by Sara Beery'
93
- info['description'] = 'Idaho Camera traps'
94
- info['version'] = '2021.07.19'
95
-
96
-
97
- #%% Minor adjustments to categories
98
-
99
- input_categories = input_data['categories']
100
-
101
- category_id_to_name = {cat['id']:cat['name'] for cat in input_categories}
102
- category_name_to_id = {cat['name']:cat['id'] for cat in input_categories}
103
- assert category_id_to_name[0] == 'empty'
104
-
105
- category_names_to_counts = {}
106
- for category in input_categories:
107
- category_names_to_counts[category['name']] = 0
108
-
109
- for ann in annotations:
110
- category_id = ann['category_id']
111
- category_name = category_id_to_name[category_id]
112
- category_names_to_counts[category_name] = category_names_to_counts[category_name] + 1
113
-
114
- categories = []
115
-
116
- for category_name in category_names_to_counts:
117
- count = category_names_to_counts[category_name]
118
-
119
- # Remove unused categories
120
- if count == 0:
121
- continue
122
-
123
- category_id = category_name_to_id[category_name]
124
-
125
- # Name adjustments
126
- if category_name == 'prongs':
127
- category_name = 'pronghorn'
128
-
129
- categories.append({'id':category_id,'name':category_name})
130
-
131
-
132
- #%% Create output
133
-
134
- output_data = {}
135
- output_data['images'] = input_data['images']
136
- output_data['annotations'] = annotations
137
- output_data['categories'] = categories
138
- output_data['info'] = info
139
-
140
-
141
- #%% Write output
142
-
143
- with open(output_json,'w') as f:
144
- json.dump(output_data,f,indent=2)
145
-
146
-
147
- #%% Validate .json file
148
-
149
- from megadetector.data_management.databases import integrity_check_json_db
150
-
151
- options = integrity_check_json_db.IntegrityCheckOptions()
152
- options.baseDir = os.path.join(base_folder,'images'); assert os.path.isdir(options.baseDir)
153
- options.bCheckImageSizes = False
154
- options.bCheckImageExistence = False
155
- options.bFindUnusedImages = False
156
-
157
- _, _, _ = integrity_check_json_db.integrity_check_json_db(output_json, options)
158
-
159
-
160
- #%% Preview labels
161
-
162
- from megadetector.visualization import visualize_db
163
-
164
- viz_options = visualize_db.DbVizOptions()
165
- viz_options.num_to_visualize = 100
166
- viz_options.trim_to_images_with_bboxes = False
167
- viz_options.add_search_links = False
168
- viz_options.sort_by_filename = False
169
- viz_options.parallelize_rendering = True
170
- viz_options.include_filename_links = True
171
-
172
- # viz_options.classes_to_exclude = ['test']
173
- html_output_file, _ = visualize_db.visualize_db(db_path=output_json,
174
- output_dir=os.path.join(
175
- base_folder,'preview'),
176
- image_base_dir=os.path.join(base_folder,'images'),
177
- options=viz_options)
178
- os.startfile(html_output_file)
179
-
180
-
181
- #%% ############ Take two, from pre-iWildCam .json files created from IDFG .csv files ############
182
-
183
- #%% Imports and constants
184
-
185
- import json
186
- import os
187
-
188
- base_folder = r'h:\idaho-camera-traps'
189
- input_json_sl = os.path.join(base_folder,'iWildCam_IDFG.json')
190
- input_json = os.path.join(base_folder,'iWildCam_IDFG_ml.json')
191
- output_json = os.path.join(base_folder,'idaho_camera_traps.json')
192
- remote_image_base_dir = r'z:\idfg'
193
-
194
- assert os.path.isfile(input_json)
195
-
196
-
197
- #%% One-time line break addition
198
-
199
- if not os.path.isfile(input_json):
200
-
201
- sl_json = input_json_sl
202
- ml_json = input_json
203
-
204
- with open(sl_json,'r') as f:
205
- d = json.load(f)
206
- with open(ml_json,'w') as f:
207
- json.dump(d,f,indent=2)
208
-
209
-
210
- #%% Read input files
211
-
212
- with open(input_json,'r') as f:
213
- input_data = json.load(f)
214
-
215
- print('Read {} annotations for {} images'.format(len(input_data['annotations']),len(input_data['images'])))
216
-
217
- n_images = len(input_data['images'])
218
-
219
-
220
- #%% Prepare info
221
-
222
- info = {}
223
- info['contributor'] = 'Images acquired by the Idaho Department of Fish and Game, dataset curated by Sara Beery'
224
- info['description'] = 'Idaho Camera traps'
225
- info['version'] = '2021.07.19'
226
-
227
-
228
- #%% Minor adjustments to categories
229
-
230
- input_categories = input_data['categories']
231
- output_categories = []
232
-
233
- for c in input_categories:
234
- category_name = c['name']
235
- category_id = c['id']
236
- if category_name == 'prong':
237
- category_name = 'pronghorn'
238
- category_name = category_name.lower()
239
- output_categories.append({'name':category_name,'id':category_id})
240
-
241
-
242
- #%% Minor adjustments to annotations
243
-
244
- for ann in input_data['annotations']:
245
- ann['id'] = str(ann['id'])
246
-
247
-
248
- #%% Create output
249
-
250
- output_data = {}
251
- output_data['images'] = input_data['images']
252
- output_data['annotations'] = input_data['annotations']
253
- output_data['categories'] = output_categories
254
- output_data['info'] = info
255
-
256
-
257
- #%% Write output
258
-
259
- with open(output_json,'w') as f:
260
- json.dump(output_data,f,indent=2)
261
-
262
-
263
- #%% Validate .json file
264
-
265
- from megadetector.data_management.databases import integrity_check_json_db
266
-
267
- options = integrity_check_json_db.IntegrityCheckOptions()
268
- options.baseDir = remote_image_base_dir
269
- options.bCheckImageSizes = False
270
- options.bCheckImageExistence = False
271
- options.bFindUnusedImages = False
272
-
273
- _, _, _ = integrity_check_json_db.integrity_check_json_db(output_json, options)
274
-
275
-
276
- #%% Preview labels
277
-
278
- from megadetector.visualization import visualize_db
279
-
280
- viz_options = visualize_db.DbVizOptions()
281
- viz_options.num_to_visualize = 100
282
- viz_options.trim_to_images_with_bboxes = False
283
- viz_options.add_search_links = False
284
- viz_options.sort_by_filename = False
285
- viz_options.parallelize_rendering = True
286
- viz_options.include_filename_links = True
287
-
288
- # viz_options.classes_to_exclude = ['test']
289
- html_output_file, _ = visualize_db.visualize_db(db_path=output_json,
290
- output_dir=os.path.join(
291
- base_folder,'preview'),
292
- image_base_dir=remote_image_base_dir,
293
- options=viz_options)
294
- os.startfile(html_output_file)
@@ -1,387 +0,0 @@
1
- """
2
-
3
- import_desert_lion_conservation_camera_traps.py
4
-
5
- Prepare the Desert Lion Conservation Camera Traps dataset for release on LILA.
6
-
7
- """
8
-
9
- #%% Imports and constants
10
-
11
- import os
12
- import json
13
-
14
- input_base_folder = r'i:/data/desert-lion'
15
- assert os.path.isdir(input_base_folder)
16
-
17
- # md_results_file = r'i:/data/desert-lion/desert-lion-camera-traps-2024-07-14-v5a.0.0_detections-all.json'
18
- md_results_file = r'i:/data/desert-lion/desert-lion-camera-traps-2024-07-14-v5a.0.0_detections.json'
19
- assert os.path.isfile(md_results_file)
20
-
21
- export_base = os.path.join(input_base_folder,'annotated-imgs')
22
- assert os.path.isdir(export_base)
23
-
24
- preview_dir = r'g:\temp\desert-lion-viz'
25
- output_file = os.path.join(input_base_folder,'desert_lion_camera_traps.json')
26
- output_zipfile = os.path.join(input_base_folder,'desert-lion-camera-traps-images.zip')
27
-
28
- exif_cache_file_post_exif_removal = os.path.join(input_base_folder,'exif_data_post_exif_removal.json')
29
- exif_cache_file = os.path.join(input_base_folder,'exif_data.json')
30
-
31
-
32
- #%% Find images and videos
33
-
34
- from megadetector.detection.video_utils import find_videos
35
- from megadetector.utils.path_utils import find_images
36
-
37
- video_files = find_videos(input_base_folder,recursive=True,return_relative_paths=True,convert_slashes=True)
38
- image_files = find_images(input_base_folder,recursive=True,return_relative_paths=True,convert_slashes=True)
39
-
40
- n_annotated_imgs = len([fn for fn in image_files if 'annotated-imgs' in fn])
41
- print('Found {} images ({} in the annotated-imgs folder), {} videos'.format(
42
- len(image_files),n_annotated_imgs,len(video_files)))
43
-
44
-
45
- #%% Read EXIF data
46
-
47
- from megadetector.data_management.read_exif import read_exif_from_folder, ReadExifOptions
48
-
49
- exif_options = ReadExifOptions()
50
- exif_options.n_workers = 10
51
-
52
- if os.path.isfile(exif_cache_file):
53
- print('EXIF cache {} exists, skipping EXIF read'.format(exif_cache_file))
54
- with open(exif_cache_file,'r') as f:
55
- exif_data = json.load(f)
56
- else:
57
- exif_data = read_exif_from_folder(input_folder=input_base_folder,
58
- output_file=exif_cache_file,
59
- options=exif_options,
60
- filenames=None,
61
- recursive=True)
62
-
63
- assert len(exif_data) == len(image_files)
64
-
65
-
66
- #%% Remove EXIF data
67
-
68
- from megadetector.data_management.remove_exif import remove_exif
69
- remove_exif(input_base_folder,recursive=True,n_processes=1)
70
-
71
-
72
- #%% Read EXIF data again
73
-
74
- exif_data_post_exif_removal = read_exif_from_folder(input_folder=input_base_folder,
75
- output_file=exif_cache_file_post_exif_removal,
76
- options=exif_options,
77
- filenames=None,
78
- recursive=True)
79
-
80
-
81
- #%% Make sure no lat/lon data is present
82
-
83
- from tqdm import tqdm
84
-
85
- for i_image,im in enumerate(tqdm(exif_data_post_exif_removal)):
86
- tags = im['exif_tags']
87
- if tags is None:
88
- continue
89
- for k in tags:
90
- assert 'gps' not in str(k).lower()
91
-
92
-
93
- #%% Look for images that contain humans
94
-
95
- with open(md_results_file,'r') as f:
96
- md_results = json.load(f)
97
-
98
- assert len(md_results['images']) == len(image_files)
99
-
100
- human_threshold = 0.1
101
- human_categories = ['2','3']
102
-
103
- candidate_human_images = set()
104
- failed_images = set()
105
-
106
- # i_image = 0; im = md_results['images'][0]
107
- for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
108
-
109
- if 'failure' in im:
110
- failed_images.add(im['file'])
111
- continue
112
-
113
- for det in im['detections']:
114
- if det['category'] in human_categories and det['conf'] >= human_threshold:
115
- candidate_human_images.add(im['file'])
116
- break
117
-
118
- # ...for each detection
119
-
120
- # ...for each image
121
-
122
- print('Found {} failed images and {} candidate human images'.format(
123
- len(failed_images),len(candidate_human_images)))
124
-
125
-
126
- #%% Copy failed images and human images to a temporary folder for review
127
-
128
- review_folder_base = r'g:/temp/review_images'
129
- os.makedirs(review_folder_base,exist_ok=True)
130
-
131
- images_to_review = failed_images.union(candidate_human_images)
132
- images_to_review = sorted(list(images_to_review))
133
-
134
- source_file_to_target_file = {}
135
-
136
- # fn_relative = images_to_review[0]
137
- for fn_relative in images_to_review:
138
- assert '\\' not in fn_relative
139
- fn_abs_source = input_base_folder + '/' + fn_relative
140
- assert os.path.isfile(fn_abs_source)
141
- fn_abs_dest = review_folder_base + '/' + fn_relative.replace('/','_')
142
- source_file_to_target_file[fn_abs_source] = fn_abs_dest
143
-
144
- from megadetector.utils.path_utils import parallel_copy_files
145
-
146
- parallel_copy_files(input_file_to_output_file=source_file_to_target_file,
147
- max_workers=16,
148
- use_threads=True,
149
- overwrite=False,verbose=False)
150
-
151
-
152
- #%% Copy videos to a temporary folder for review
153
-
154
- review_folder_base = r'g:/temp/review_videos'
155
- os.makedirs(review_folder_base,exist_ok=True)
156
-
157
- source_file_to_target_file = {}
158
-
159
- # fn_relative = video_files[0]
160
- for fn_relative in video_files:
161
- assert '\\' not in fn_relative
162
- fn_abs_source = input_base_folder + '/' + fn_relative
163
- assert os.path.isfile(fn_abs_source)
164
- fn_abs_dest = review_folder_base + '/' + fn_relative.replace('/','_')
165
- source_file_to_target_file[fn_abs_source] = fn_abs_dest
166
-
167
- from megadetector.utils.path_utils import parallel_copy_files
168
-
169
- parallel_copy_files(input_file_to_output_file=source_file_to_target_file,
170
- max_workers=16,
171
- use_threads=True,
172
- overwrite=False,verbose=False)
173
-
174
-
175
- #%% Track removed images
176
-
177
- removed_images = [
178
- "annotated-imgs\panthera leo\Camera Trap\Events\X73Okngwe\2013\02\PvL_seq_41468415-4518-44d6-acac-2113b442f723\PICT0190.JPG",
179
- "annotated-imgs\panthera leo\Camera Trap\Hoanib\FldPln_Arch\211011\PvL_seq_5a9c6379-6980-4ab8-903a-b3bcba2ad21b\PICT0039.JPG",
180
- "annotated-imgs\panthera leo\Camera Trap\Hoanib\FldPln_Arch\211011\PvL_seq_5a9c6379-6980-4ab8-903a-b3bcba2ad21b\PICT0037.JPG",
181
- "annotated-imgs\panthera leo\Camera Trap\Hoanib\FldPln_Arch\211011\PvL_seq_5a9c6379-6980-4ab8-903a-b3bcba2ad21b\PICT0038.JPG",
182
- "annotated-imgs\panthera leo\Camera Trap\2015\09\PvL_seq_da9c9ab1-74a2-485e-b6e7-3827b0c2a2f0\20150924-RCX_0835.JPG",
183
- "annotated-imgs\panthera leo\Camera Trap\2015\09\PvL_seq_b0c1c6c5-474e-4844-a66c-e2bf5513d47a\20150924-RCX_0841.JPG",
184
- "annotated-imgs\oryx gazella\Camera Trap\Video_Clips\Leylands\CDY_0003.AVI"
185
- ]
186
-
187
- removed_images = [fn.replace('\\','/') for fn in removed_images]
188
-
189
-
190
- #%% Map filenames to datetimes
191
-
192
- filename_to_datetime = {}
193
- n_valid_datetimes = 0
194
-
195
- # im = exif_data[0]
196
- for im in tqdm(exif_data):
197
- if im['exif_tags'] is None or len(im['exif_tags']) == 0:
198
- filename_to_datetime[im['file_name']] = None
199
- continue
200
- dt = im['exif_tags']['DateTime']
201
- assert len(dt) == 19
202
- filename_to_datetime[im['file_name']] = dt
203
- n_valid_datetimes += 1
204
-
205
- print('\nFound datetime information for {} of {} images'.format(
206
- n_valid_datetimes,len(exif_data)))
207
-
208
-
209
- #%% Convert "annotated_imgs" folder to COCO Camera Traps
210
-
211
- from megadetector.utils.path_utils import recursive_file_list
212
-
213
- species_name_to_category_id = {}
214
-
215
- filenames_relative = \
216
- recursive_file_list(export_base,return_relative_paths=True,recursive=True,convert_slashes=True)
217
-
218
- short_species_names = ['aves','cn-owls','cn-francolins','cn-raptors',
219
- 'columbidae','equus zebra hartmannae','numididae',
220
- 'pteroclidae']
221
-
222
- images = []
223
- annotations = []
224
- n_datetimes = 0
225
-
226
- for fn in filenames_relative:
227
-
228
- assert fn.lower().endswith('.jpg') or fn.lower().endswith('.avi') or fn.lower().endswith('.json')
229
-
230
- if fn.lower().endswith('.json'):
231
- continue
232
-
233
- tokens = fn.split('/')
234
- species_name = tokens[0]
235
- assert species_name in short_species_names or len(species_name.split(' ')) == 2
236
-
237
- if species_name not in species_name_to_category_id:
238
- category_id = len(species_name_to_category_id)
239
- species_name_to_category_id[species_name] = category_id
240
- else:
241
- category_id = species_name_to_category_id[species_name]
242
-
243
- im = {}
244
- im['id'] = fn
245
- im['file_name'] = fn
246
- im['location'] = 'unknown'
247
-
248
- fn_for_datetime_lookup = 'annotated-imgs/' + fn
249
- if (fn_for_datetime_lookup in filename_to_datetime) and \
250
- (filename_to_datetime[fn_for_datetime_lookup] is not None):
251
- im['datetime'] = filename_to_datetime[fn_for_datetime_lookup]
252
- n_datetimes += 1
253
-
254
- ann = {}
255
- ann['image_id'] = im['id']
256
- ann['id'] = im['id'] + ':ann_00'
257
- ann['sequence_level_annotation'] = False
258
- ann['category_id'] = category_id
259
-
260
- images.append(im)
261
- annotations.append(ann)
262
-
263
- # ...for each filename
264
-
265
- categories = []
266
- for species_name in species_name_to_category_id:
267
- category = {}
268
- category['name'] = species_name
269
- category['id'] = species_name_to_category_id[species_name]
270
- categories.append(category)
271
-
272
- info = {}
273
- info['version'] = '2024.07.15_00'
274
- info['description'] = 'Desert Lion Camera Traps'
275
-
276
- d = {}
277
- d['info'] = info
278
- d['images'] = images
279
- d['annotations'] = annotations
280
- d['categories'] = categories
281
-
282
- with open(output_file,'w') as f:
283
- json.dump(d,f,indent=1)
284
-
285
-
286
- #%% Integrity check
287
-
288
- from megadetector.data_management.databases.integrity_check_json_db import \
289
- IntegrityCheckOptions, integrity_check_json_db
290
-
291
- integrity_check_options = IntegrityCheckOptions()
292
-
293
- integrity_check_options.baseDir = export_base
294
- integrity_check_options.bCheckImageExistence = True
295
- integrity_check_options.bRequireLocation = True
296
- integrity_check_options.nThreads = 10
297
- integrity_check_options.verbose = True
298
- integrity_check_options.allowIntIDs = False
299
-
300
- integrity_check_results = integrity_check_json_db(output_file,integrity_check_options)
301
-
302
-
303
- #%% Preview
304
-
305
- from megadetector.visualization.visualize_db \
306
- import DbVizOptions, visualize_db
307
-
308
- viz_options = DbVizOptions()
309
- viz_options.num_to_visualize = 2500
310
-
311
- html_output_file,_ = visualize_db(output_file, preview_dir, export_base, options=viz_options)
312
-
313
- from megadetector.utils.path_utils import open_file
314
- open_file(html_output_file)
315
-
316
-
317
- #%% Make MD results paths line up with the output
318
-
319
- md_results_remapped_file = md_results_file.replace('-all','')
320
- assert md_results_remapped_file != md_results_file
321
-
322
- with open(output_file,'r') as f:
323
- d = json.load(f)
324
-
325
- image_filenames = [im['file_name'] for im in d['images']]
326
- image_filenames_set = set(image_filenames)
327
-
328
- with open(md_results_file,'r') as f:
329
- md_results = json.load(f)
330
-
331
- md_results_images_remapped = []
332
-
333
- # im = md_results['images'][0]
334
- for im in md_results['images']:
335
- assert im['file'].startswith('annotated-imgs/') or im['file'].startswith('bboxes/')
336
- if im['file'].startswith('bboxes/'):
337
- continue
338
- im['file'] = im['file'].replace('annotated-imgs/','')
339
- md_results_images_remapped.append(im)
340
-
341
- print('Keeping {} of {} images in MD results'.format(
342
- len(md_results_images_remapped),len(md_results['images'])))
343
-
344
- d['images'] = md_results_images_remapped
345
-
346
- with open(md_results_remapped_file,'w') as f:
347
- json.dump(d,f,indent=1)
348
-
349
-
350
- #%% Zip MD results and COCO file
351
-
352
- from megadetector.utils.path_utils import zip_file
353
-
354
- zip_file(input_fn=md_results_remapped_file, output_fn=None, overwrite=True, verbose=True, compresslevel=9)
355
- zip_file(input_fn=output_file, output_fn=None, overwrite=True, verbose=True, compresslevel=9)
356
-
357
-
358
- #%% Zip images
359
-
360
- from megadetector.utils.path_utils import zip_folder
361
-
362
- zip_folder(input_folder=export_base, output_fn=output_zipfile, overwrite=True, verbose=True, compresslevel=0)
363
-
364
-
365
- #%% Copy lion images to a folder for thumbnail selection
366
-
367
- review_folder_base = r'g:/temp/thumbnail-candidates'
368
- os.makedirs(review_folder_base,exist_ok=True)
369
-
370
- source_file_to_target_file = {}
371
-
372
- # fn_relative = image_files[0]
373
- for fn_relative in image_files:
374
- assert '\\' not in fn_relative
375
- if '/lion/' not in fn_relative and '/panthera leo/' not in fn_relative:
376
- continue
377
- fn_abs_source = input_base_folder + '/' + fn_relative
378
- assert os.path.isfile(fn_abs_source)
379
- fn_abs_dest = review_folder_base + '/' + fn_relative.replace('/','_')
380
- source_file_to_target_file[fn_abs_source] = fn_abs_dest
381
-
382
- from megadetector.utils.path_utils import parallel_copy_files
383
-
384
- parallel_copy_files(input_file_to_output_file=source_file_to_target_file,
385
- max_workers=16,
386
- use_threads=True,
387
- overwrite=False,verbose=False)