megadetector 5.0.9__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.9.dist-info/RECORD +0 -224
  214. megadetector-5.0.9.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,758 +0,0 @@
1
- """
2
-
3
- Import a Snapshot Safari project (one project, one season)
4
-
5
- Before running this script:
6
-
7
- * Mount the blob container where the images live, or copy the
8
- images to local storage
9
-
10
- What this script does:
11
-
12
- * Creates a .json file
13
- * Creates zip archives of the season without humans.
14
- * Copies animals and humans to separate folders
15
-
16
- After running this script:
17
-
18
- * Create or update LILA page
19
- * Push zipfile and unzipped images to LILA
20
- * Push unzipped humans to wildlifeblobssc
21
- * Delete images from UMN upload storage
22
-
23
- Snapshot Serengeti is handled specially, because we're dealing with bounding
24
- boxes too. See snapshot_serengeti_lila.py.
25
-
26
- """
27
-
28
- #%% Imports
29
-
30
- import pandas as pd
31
- import json
32
- import os
33
- import uuid
34
- import humanfriendly
35
- import time
36
- import pprint
37
- import numpy as np
38
- import shutil
39
-
40
- from PIL import Image
41
- from multiprocessing.pool import ThreadPool
42
- from tqdm import tqdm
43
- from zipfile import ZipFile
44
- import zipfile
45
-
46
- from md_utils import path_utils
47
- from md_visualization import visualize_db
48
-
49
-
50
- #%% Constants
51
-
52
- # project_name = 'XXX'; season_name = 'S1'; project_friendly_name = 'Snapshot Unknown'
53
- # project_name = 'SER'; season_name = 'S1-11'; project_friendly_name = 'Snapshot Serengeti'
54
- # project_name = 'KRU'; season_name = 'S1'; project_friendly_name = 'Snapshot Kruger'
55
- # project_name = 'CDB'; season_name = 'S1'; project_friendly_name = 'Snapshot Camdeboo'
56
- # project_name = 'MTZ'; season_name = 'S1'; project_friendly_name = 'Snapshot Mountain Zebra'
57
- # project_name = 'ENO'; season_name = 'S1'; project_friendly_name = 'Snapshot Enonkishu'
58
- # project_name = 'KAR'; season_name = 'S1'; project_friendly_name = 'Snapshot Karoo'
59
- # project_name = 'KGA'; season_name = 'S1'; project_friendly_name = 'Snapshot Kgalagadi'
60
- project_name = 'SER'; season_name = 'S1'; project_friendly_name = 'APN'
61
-
62
- json_version = '2.1'
63
-
64
- snapshot_safari_input_base = 'f:\\'
65
- snapshot_safari_output_base = r'g:\temp\snapshot-safari-out'
66
-
67
- category_mappings = {'blank':'empty'}
68
-
69
- process_images_n_threads = 20
70
-
71
- max_files_per_archive = None
72
-
73
- #%% Folder/file creation
74
-
75
- # E.g. KRU_S1
76
- project_season_name = project_name + '_' + season_name
77
-
78
- # E.g. Z:\KRU
79
- project_base = os.path.join(snapshot_safari_input_base,project_name)
80
- assert(os.path.isdir(project_base))
81
-
82
- # E.g. Z:\KRU\KRU_S1
83
- season_base = os.path.join(project_base,project_season_name)
84
- assert(os.path.isdir(season_base))
85
-
86
- # Contains annotations for each capture event (sequence)
87
- annotation_file = os.path.join(project_base,project_season_name + '_report_lila.csv')
88
-
89
- # Maps image IDs to filenames; each line looks like:
90
- #
91
- # KRU_S1#1#1#2,3,KRU_S1/1/1_R1/KRU_S1_1_R1_IMAG0004.JPG
92
- image_inventory_file = os.path.join(project_base,project_season_name + '_report_lila_image_inventory.csv')
93
-
94
- # Total number of each answer to each question, e.g. total number of times each species was identified
95
- #
96
- # Not used here
97
- response_overview_file = os.path.join(project_base,project_season_name + '_report_lila_overview.csv')
98
-
99
- assert(os.path.isfile(annotation_file))
100
- assert(os.path.isfile(image_inventory_file))
101
- assert(os.path.isfile(response_overview_file))
102
-
103
- # Create output folders
104
- assert(os.path.isdir(snapshot_safari_output_base))
105
-
106
- output_base = os.path.join(snapshot_safari_output_base,project_name)
107
-
108
- json_filename = os.path.join(output_base,project_friendly_name.replace(' ','') + '_' + season_name \
109
- + '_v' + json_version + '.json')
110
- species_list_filename = os.path.join(output_base,project_friendly_name.replace(' ','') + '_' + season_name \
111
- + '_v' + json_version + '.species_list.csv')
112
- summary_info_filename = os.path.join(output_base,project_friendly_name.replace(' ','') + '_' + season_name \
113
- + '_v' + json_version + '.summary_info.txt')
114
-
115
- # Images will be placed in a season-specific folder inside this (the source data includes
116
- # this in path names)
117
- output_public_folder = os.path.join(output_base,project_name + '_public')
118
-
119
- output_public_zipfile = os.path.join(output_base,project_season_name + '.lila.zip')
120
- output_private_folder = os.path.join(output_base,project_season_name + '_private')
121
- output_preview_folder = os.path.join(output_base,project_season_name + '_preview')
122
-
123
- os.makedirs(output_base,exist_ok=True)
124
- os.makedirs(output_public_folder,exist_ok=True)
125
- os.makedirs(output_private_folder,exist_ok=True)
126
- os.makedirs(output_preview_folder,exist_ok=True)
127
-
128
-
129
- #%% Load metadata files
130
-
131
- image_table = pd.read_csv(image_inventory_file)
132
- annotation_table = pd.read_csv(annotation_file)
133
-
134
- print('Finished loading {} image mappings and {} annotations'.format(len(image_table),len(annotation_table)))
135
-
136
-
137
- #%% Convert to dictionaries (prep)
138
-
139
- im_id_to_image = {}
140
- images = []
141
- seq_id_to_images = {}
142
- seq_id_to_annotations = {}
143
-
144
- annotations = []
145
- categories = []
146
-
147
- species_to_category = {}
148
-
149
- empty_category_id = 0
150
- empty_category_name = 'empty'
151
-
152
- empty_cat = {}
153
- empty_cat['id'] = empty_category_id
154
- empty_cat['name'] = empty_category_name
155
- empty_cat['count'] = 0
156
- species_to_category['empty'] = empty_cat
157
- categories.append(empty_cat)
158
-
159
- next_category_id = empty_category_id + 1
160
-
161
-
162
- #%% Convert to dictionaries (loops)
163
-
164
- # iterrows() is a terrible way to do this, but this is one of those days
165
- # where I want to get this done, not get better at Python.
166
-
167
- print('Processing image table')
168
-
169
- start_time = time.time()
170
-
171
- # irow = 0; row = image_table.iloc[0]
172
- for iRow,row in tqdm(image_table.iterrows(),total=len(image_table)):
173
-
174
- # Loaded as an int64, converting to int here
175
- frame_num = int(row['image_rank_in_capture'])
176
- assert frame_num > 0
177
- sequence_id = row['capture_id']
178
- frame_num = int(frame_num)
179
- filename = row['image_path_rel']
180
- tokens = filename.split('.')
181
- assert(len(tokens)==2)
182
- assert(tokens[1] == 'JPG')
183
- id = tokens[0]
184
- im = {}
185
- im['id'] = id
186
- im['file_name'] = filename
187
- im['frame_num'] = frame_num
188
- im['seq_id'] = sequence_id
189
-
190
- assert id not in im_id_to_image
191
- im_id_to_image[id] = im
192
- seq_id_to_images.setdefault(sequence_id,[]).append(im)
193
-
194
- images.append(im)
195
-
196
- # ...for each row in the image table
197
-
198
- # Make sure image IDs are what we think they are
199
- for im in tqdm(images):
200
- assert im['id'] == im['file_name'].replace('.JPG','')
201
-
202
- print('Processing annotation table')
203
-
204
- def is_float_and_nan(x):
205
- return isinstance(x,float) and np.isnan(x)
206
-
207
- n_invalid_dates = 0
208
-
209
- for iRow,row in tqdm(annotation_table.iterrows(),total=len(annotation_table)):
210
-
211
- sequence_id = row['capture_id']
212
-
213
- species = row['question__species'].lower()
214
- if species in category_mappings:
215
- species = category_mappings[species]
216
-
217
- category = None
218
-
219
- if species not in species_to_category:
220
- category = {}
221
- category['id'] = next_category_id
222
- next_category_id = next_category_id + 1
223
- category['name'] = species
224
- category['count'] = 1
225
- categories.append(category)
226
- species_to_category[species] = category
227
- else:
228
- category = species_to_category[species]
229
- category['count'] += 1
230
-
231
- ann = {}
232
- ann['sequence_level_annotation'] = True
233
- ann['id'] = str(uuid.uuid1())
234
- ann['category_id'] = category['id']
235
- ann['seq_id'] = sequence_id
236
-
237
- ann['season'] = row['season']
238
- ann['site'] = row['site']
239
- if is_float_and_nan(row['capture_date_local']) or is_float_and_nan(row['capture_time_local']):
240
- ann['datetime'] = ''
241
- n_invalid_dates += 1
242
- else:
243
- ann['datetime'] = row['capture_date_local'] + ' ' + row['capture_time_local']
244
- ann['subject_id'] = row['subject_id']
245
- ann['count'] = row['question__count_median']
246
- ann['standing'] = row['question__standing']
247
- ann['resting'] = row['question__resting']
248
- ann['moving'] = row['question__moving']
249
- ann['interacting'] = row['question__interacting']
250
- ann['young_present'] = row['question__young_present']
251
-
252
- seq_id_to_annotations.setdefault(sequence_id,[]).append(ann)
253
-
254
- annotations.append(ann)
255
-
256
- # ...for each row in the annotation table
257
-
258
- elapsed = time.time() - start_time
259
- print('Done converting tables to dictionaries in {}'.format(humanfriendly.format_timespan(elapsed)))
260
-
261
- print('Converted {} annotations, {} images, {} categories ({} invalid dates)'.format(
262
- len(annotations),len(images),len(categories),n_invalid_dates))
263
-
264
-
265
- #%% Take a look at categories
266
-
267
- assert(len(im_id_to_image)==len(images))
268
- print('Loaded metadata about {} images and {} sequences'.format(len(images),len(seq_id_to_annotations)))
269
-
270
- categories_by_species = sorted(categories, key = lambda i: i['name'])
271
- categories_by_count = sorted(categories, key = lambda i: i['count'])
272
-
273
- pp = pprint.PrettyPrinter(depth=6)
274
-
275
- # print('\nCategories by species:')
276
- # pp.pprint(categories_by_species)
277
- print('\nCategories by count:')
278
- pp.pprint(categories_by_count)
279
-
280
-
281
- #%% Fill in some image fields we didn't have when we created the image table
282
-
283
- # width, height, corrupt, seq_num_frames, location, datetime
284
-
285
- def process_image(im):
286
-
287
- im['width'] = -1
288
- im['height'] = -1
289
- im['corrupt'] = False
290
- im['location'] = 'unknown'
291
- im['seq_num_frames'] = -1
292
- im['datetime'] = 'unknown'
293
- im['status'] = ''
294
-
295
- if im['seq_id'] not in seq_id_to_annotations:
296
- im['status'] = 'no_annotation'
297
- return im
298
-
299
- seq_annotations = seq_id_to_annotations[im['seq_id']]
300
-
301
- # Every annotation in this list should have the same sequence ID
302
- assert all(ann['seq_id'] == im['seq_id'] for ann in seq_annotations) , 'Error on image {}'.format(im['id'])
303
-
304
- # Figure out "seq_num_frames", which really should be done in a separate lopp;
305
- # there's no reason to do this redundantly for every image
306
- images_in_sequence = seq_id_to_images[im['seq_id']]
307
-
308
- # Every image in this sequence should point back to the same equence
309
- assert all(seqim['seq_id'] == im['seq_id'] for seqim in images_in_sequence), 'Error on image {}'.format(im['id'])
310
-
311
- frame_nums = [seqim['frame_num'] for seqim in images_in_sequence]
312
- seq_num_frames = max(frame_nums)
313
- im['seq_num_frames'] = seq_num_frames
314
-
315
- im['location'] = str(seq_annotations[0]['site'])
316
-
317
- # Every annotation in this list should have the same location
318
- assert all(str(ann['site']) == im['location'] for ann in seq_annotations), 'Error on image {}'.format(im['id'])
319
-
320
- im['datetime'] = seq_annotations[0]['datetime']
321
-
322
- # Every annotation in this list should have the same datetime
323
- assert all(ann['datetime'] == im['datetime'] for ann in seq_annotations), 'Error on image {}'.format(im['id'])
324
-
325
- # Is this image on disk?
326
- fullpath = os.path.join(project_base,im['file_name'])
327
- if not os.path.isfile(fullpath):
328
- im['status'] = 'not_on_disk'
329
- return im
330
-
331
- try:
332
-
333
- pil_im = Image.open(fullpath)
334
- im['height'] = pil_im.height
335
- im['width'] = pil_im.width
336
-
337
- except:
338
-
339
- im['corrupt'] = True
340
-
341
- return im
342
-
343
-
344
- if process_images_n_threads <= 1:
345
-
346
- # iImage = 0; im = images[0]
347
- for iImage,im in tqdm(enumerate(images),total=len(images)):
348
- process_image(im)
349
- # ...for each image
350
-
351
- else:
352
-
353
- pool = ThreadPool(process_images_n_threads)
354
-
355
- # images_processed = pool.map(process_image, images)
356
- # images_processed = list(tqdm(pool.imap_unordered(process_image, images), total=len(images)))
357
- images_processed = list(tqdm(pool.imap(process_image, images), total=len(images)))
358
-
359
- print('Finished adding missing fields to {} images'.format(len(images_processed)))
360
-
361
-
362
- #%% Count missing/corrupted images
363
-
364
- n_missing = 0
365
- n_corrupt = 0
366
- n_no_annotation = 0
367
-
368
- corrupted_images = []
369
- missing_images = []
370
- no_annotation_images = []
371
-
372
- for im in tqdm(images):
373
-
374
- if im['corrupt']:
375
- n_corrupt += 1
376
- corrupted_images.append(im['file_name'])
377
- if im['status'] == '':
378
- continue
379
- elif im['status'] == 'not_on_disk':
380
- n_missing += 1
381
- missing_images.append(im['file_name'])
382
- elif im['status'] == 'no_annotation':
383
- n_no_annotation += 1
384
- no_annotation_images.append(im['file_name'])
385
- else:
386
- raise ValueError('Unrecognized status {}'.format(im['status']))
387
-
388
- print('\nOf {} images: {} missing, {} corrupt, {} no annotation'.format(len(images),
389
- n_missing, n_corrupt, n_no_annotation))
390
-
391
-
392
- #%% Print distribution of sequence lengths
393
-
394
- seq_id_to_sequence_length = {}
395
-
396
- for im in tqdm(images):
397
-
398
- seq_id = im['seq_id']
399
- seq_num_frames = im['seq_num_frames']
400
- if seq_id not in seq_id_to_sequence_length:
401
- seq_id_to_sequence_length[seq_id] = seq_num_frames
402
-
403
- sequence_lengths = list(seq_id_to_sequence_length.values())
404
-
405
- print('\nMean/min/max sequence length is {}/{}/{}'.format(np.mean(sequence_lengths),min(sequence_lengths),max(sequence_lengths)))
406
-
407
-
408
- #%% Replicate annotations across images
409
-
410
- annotations_replicated = []
411
-
412
- # iAnn = 0; ann = annotations[iAnn]
413
- for iAnn,ann in tqdm(enumerate(annotations), total=len(annotations)):
414
-
415
- associated_images = seq_id_to_images[ann['seq_id']]
416
- assert len(associated_images) > 0
417
- for associated_image in associated_images:
418
- new_ann = ann.copy()
419
- new_ann['image_id'] = associated_image['id']
420
- new_ann['id'] = str(uuid.uuid1())
421
- annotations_replicated.append(new_ann)
422
-
423
- print('\nCreated {} replicated annotations from {} original annotations'.format(len(annotations_replicated),
424
- len(annotations)))
425
-
426
- annotations = annotations_replicated
427
-
428
-
429
- #%% See what files are on disk but not annotated
430
-
431
- print('Listing images from disk...')
432
- start_time = time.time()
433
- image_files = path_utils.find_images(project_base,bRecursive=True)
434
- elapsed = time.time() - start_time
435
- print('Finished listing {} files in {}'.format(len(image_files),humanfriendly.format_timespan(elapsed)))
436
-
437
- files_not_in_db = []
438
-
439
- for fn in tqdm(image_files):
440
- id = os.path.relpath(fn,project_base).replace('\\','/').replace('.JPG','')
441
- if id not in im_id_to_image:
442
- files_not_in_db.append(fn)
443
-
444
- print('{} files not in the database (of {})'.format(len(files_not_in_db),len(image_files)))
445
- del fn
446
-
447
-
448
- #%% Validate image and annotation uniqueness
449
-
450
- tmp_img_ids = set()
451
- tmp_ann_ids = set()
452
-
453
- for im in tqdm(images):
454
- assert im['id'] not in tmp_img_ids
455
- tmp_img_ids.add(im['id'])
456
-
457
- for ann in tqdm(annotations):
458
- assert ann['id'] not in tmp_ann_ids
459
- tmp_ann_ids.add(ann['id'])
460
-
461
- print('Finished uniqueness check')
462
-
463
-
464
- #%% Minor updates to fields
465
-
466
- for ann in tqdm(annotations):
467
- ann['location'] = ann['site']
468
- del ann['site']
469
- try:
470
- icount = ann['count']
471
- except:
472
- icount = -1
473
- ann['count'] = icount
474
-
475
- for im in tqdm(images):
476
- del im['status']
477
-
478
- for c in categories:
479
- del c['count']
480
-
481
-
482
- #%% Write .json file
483
-
484
- info = {}
485
- info['version'] = json_version
486
- info['description'] = 'Camera trap data from the {} program'.format(project_friendly_name)
487
- info['date_created'] = '2019'
488
- info['contributor'] = 'Snapshot Safari'
489
-
490
- data = {}
491
- data['info'] = info
492
- data['categories'] = categories
493
- data['annotations'] = annotations
494
- data['images'] = images
495
-
496
- print('Writing data to {}'.format(json_filename))
497
-
498
- s = json.dumps(data,indent=1)
499
- with open(json_filename, "w+") as f:
500
- f.write(s)
501
-
502
-
503
- #%% Create a list of human files
504
-
505
- human_image_ids = set()
506
- human_id = species_to_category['human']['id']
507
-
508
- # ann = annotations[0]
509
- for ann in tqdm(annotations):
510
- if ann['category_id'] == human_id:
511
- human_image_ids.add(ann['image_id'])
512
-
513
- print('Found {} images with humans'.format(len(human_image_ids)))
514
-
515
-
516
- #%% Create public archive and public/private folders
517
-
518
- debug_max_files = -1
519
- n_dot = 1000
520
- n_print = 10000
521
-
522
- n_images_added = 0
523
- zipfilename = output_public_zipfile
524
- zip = ZipFile(zipfilename,'w')
525
-
526
- print('Creating archive {}'.format(output_public_zipfile))
527
-
528
- # im = images[0]
529
- for iImage,im in tqdm(enumerate(images),total=len(images)):
530
-
531
- # E.g. KRU_S1/1/1_R1/KRU_S1_1_R1_IMAG0001.JPG
532
- im_relative_path = im['file_name']
533
- im_absolute_path = os.path.join(project_base,im_relative_path)
534
- assert(os.path.isfile(im_absolute_path))
535
-
536
- image_is_private = (im['id'] in human_image_ids)
537
-
538
- if image_is_private:
539
-
540
- # Copy to private output folder
541
- output_file = os.path.join(output_private_folder,im_relative_path)
542
- os.makedirs(os.path.dirname(output_file),exist_ok=True)
543
- shutil.copyfile(src=im_absolute_path,dst=output_file)
544
- continue
545
-
546
- # Add to zipfile
547
- n_images_added += 1
548
-
549
- # Possibly start a new archive
550
- if (max_files_per_archive is not None) and (n_images_added >= max_files_per_archive):
551
- zip.close()
552
- zipfilename = zipfilename.replace('.zip','.{}.zip'.format(n_images_added))
553
- print('Starting new archive: {}'.format(zipfilename))
554
- zip = ZipFile(zipfilename,'w')
555
- n_images_added = 0
556
-
557
- if (n_images_added % n_dot)==0:
558
- print('.',end='')
559
- if (n_images_added % n_print)==0:
560
- print('{} images added to {}'.format(n_images_added,zipfilename))
561
- if debug_max_files > 0 and n_images_added > debug_max_files:
562
- break
563
-
564
- source_file = os.path.join(project_base,im_relative_path)
565
- dest_file = im['file_name']
566
- zip.write(source_file,dest_file,zipfile.ZIP_STORED)
567
-
568
- # Copy to public output folder
569
- output_file = os.path.join(output_public_folder,im_relative_path)
570
- os.makedirs(os.path.dirname(output_file),exist_ok=True)
571
- shutil.copyfile(src=im_absolute_path,dst=output_file)
572
-
573
- # ...for each image
574
-
575
- zip.close()
576
-
577
- print('\nFinished writing {}, added {} files'.format(zipfilename,n_images_added))
578
-
579
-
580
- #%% Consistency-check-check .json file
581
-
582
- from data_management.databases import integrity_check_json_db
583
-
584
- options = integrity_check_json_db.IntegrityCheckOptions()
585
- options.baseDir = output_public_folder
586
- options.bCheckImageSizes = False
587
- options.bCheckImageExistence = True
588
- options.bFindUnusedImages = False
589
-
590
- sortedCategories, data, errorInfo = integrity_check_json_db.integrity_check_json_db(json_filename,options)
591
-
592
- # This will produce some validation errors, because this zipfile doesn't include humans
593
- assert(len(errorInfo['validationErrors']) == len(human_image_ids))
594
-
595
-
596
- #%% Zip up .json and .csv files
597
-
598
- def zip_single_file(fn,zipfilename=None):
599
- '''
600
- Zips a single file fn, by default to fn.zip
601
-
602
- Discards path information, only uses fn's base name.
603
- '''
604
- if zipfilename is None:
605
- zipfilename = fn + '.zip'
606
-
607
- print('Zipping {} to {}'.format(fn,zipfilename))
608
- with ZipFile(zipfilename,'w') as zip:
609
- source_file = fn
610
- dest_file = os.path.basename(fn)
611
- zip.write(source_file,dest_file,zipfile.ZIP_DEFLATED)
612
- return zipfilename
613
-
614
- def zip_files_to_flat(filenames,zipfilename):
615
- '''
616
- Remove path information from everything in [filenames] and
617
- zip to a flat file. Does not check uniqueness.
618
- '''
619
- with ZipFile(zipfilename,'w') as zip:
620
- for fn in filenames:
621
- source_file = fn
622
- dest_file = os.path.basename(fn)
623
- zip.write(source_file,dest_file,zipfile.ZIP_DEFLATED)
624
-
625
- zip_single_file(json_filename)
626
- zip_files_to_flat([annotation_file,image_inventory_file],os.path.join(output_base,project_season_name + '.csv.zip'))
627
-
628
- print('Finished zipping .csv and .json files')
629
-
630
-
631
- #%% When I skip to this part (using a pre-rendered .json file)
632
-
633
- if False:
634
-
635
- #%%
636
-
637
- species_to_category = {}
638
- for cat in categories:
639
- species_to_category[cat['name']] = cat
640
-
641
- #%%
642
-
643
- human_image_ids = set()
644
- human_id = species_to_category['human']['id']
645
-
646
- # ann = annotations[0]
647
- for ann in tqdm(annotations):
648
- if ann['category_id'] == human_id:
649
- human_image_ids.add(ann['image_id'])
650
-
651
- print('Found {} images with humans'.format(len(human_image_ids)))
652
-
653
-
654
- #%% Summary prep for LILA
655
-
656
- with open(json_filename,'r') as f:
657
- data = json.load(f)
658
-
659
- categories = data['categories']
660
- annotations = data['annotations']
661
- images = data['images']
662
-
663
- n_empty = 0
664
- n_species = len(categories)
665
- n_images = len(images)
666
-
667
- sequences = set()
668
- for im in tqdm(images):
669
- sequences.add(im['seq_id'])
670
-
671
- category_id_to_count = {}
672
- for ann in tqdm(annotations):
673
- if ann['category_id'] == 0:
674
- n_empty += 1
675
- if ann['category_id'] in category_id_to_count:
676
- category_id_to_count[ann['category_id']] += 1
677
- else:
678
- category_id_to_count[ann['category_id']] = 1
679
-
680
- empty_categories = []
681
- for c in categories:
682
- if c['id'] in category_id_to_count:
683
- c['count'] = category_id_to_count[c['id']]
684
- else:
685
- empty_categories.append(c)
686
- c['count'] = 0
687
-
688
- categories = [c for c in categories if c['count'] > 0]
689
- sorted_categories = sorted(categories, key=lambda k: k['count'], reverse=True)
690
-
691
- with open(species_list_filename,'w') as f:
692
- for c in sorted_categories:
693
- f.write(c['name'] + ',' + str(c['count']) + '\n')
694
-
695
- n_images = len(images) - len(human_image_ids)
696
- n_sequences = len(sequences)
697
- percent_empty = (100*n_empty)/len(images)
698
- n_categories = len(categories)
699
- top_categories = []
700
-
701
- for i_category in range(0,len(sorted_categories)):
702
- c = sorted_categories[i_category]
703
- cat_name = c['name']
704
- if cat_name != 'human' and cat_name != 'empty':
705
- top_categories.append(cat_name)
706
- if len(top_categories) == 3:
707
- break
708
-
709
- s = 'This data set contains {} sequences of camera trap images, totaling {} images, from the {} project. Labels are provided for {} categories, primarily at the species level (for example, the most common labels are {}, {}, and {}). Approximately {:.2f}% of images are labeled as empty. A full list of species and associated image counts is available <a href="{}">here</a>.'.format(
710
- n_sequences,n_images,project_friendly_name,n_categories,
711
- top_categories[0],top_categories[1],top_categories[2],
712
- percent_empty,
713
- 'https://lilablobssc.blob.core.windows.net/snapshot-safari/{}/{}_{}_v{}.species_list.csv'.format(
714
- project_name,project_friendly_name.replace(' ',''),season_name,json_version))
715
- print(s)
716
-
717
- with open(summary_info_filename,'w') as f:
718
- f.write(s)
719
-
720
-
721
- #%% Generate preview, integrity-check labels
722
-
723
- viz_options = visualize_db.DbVizOptions()
724
- viz_options.num_to_visualize = 5000
725
- viz_options.trim_to_images_with_bboxes = False
726
- viz_options.add_search_links = True
727
- viz_options.sort_by_filename = False
728
- viz_options.parallelize_rendering = True
729
- viz_options.classes_to_exclude = ['test','empty']
730
- # viz_options.classes_to_include = ['jackalblackbacked','bustardkori']
731
- html_output_file, image_db = visualize_db.visualize_db(db_path=json_filename,
732
- output_dir=output_preview_folder,
733
- image_base_dir=output_public_folder,
734
- options=viz_options)
735
- os.startfile(html_output_file)
736
-
737
-
738
- #%% Scrap
739
-
740
- if False:
741
-
742
- pass
743
-
744
- #%% Find annotations for a particular image
745
-
746
- fn = missing_images[1000]
747
- id = fn.replace('.JPG','')
748
- im = im_id_to_image[id]
749
- seq_id = im['seq_id']
750
- matching_annotations = [ann for ann in annotations if ann['seq_id'] == seq_id]
751
- print(matching_annotations)
752
-
753
- #%% Write a list of missing images
754
-
755
- with open(os.path.join(output_base,project_name + '_' + season_name + '_missing_images.txt'), 'w') as f:
756
- for fn in missing_images:
757
- f.write('{}\n'.format(fn))
758
-