megadetector 5.0.11__py3-none-any.whl → 5.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (201) hide show
  1. megadetector/api/__init__.py +0 -0
  2. megadetector/api/batch_processing/__init__.py +0 -0
  3. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  4. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  5. megadetector/api/batch_processing/api_core/batch_service/score.py +439 -0
  6. megadetector/api/batch_processing/api_core/server.py +294 -0
  7. megadetector/api/batch_processing/api_core/server_api_config.py +98 -0
  8. megadetector/api/batch_processing/api_core/server_app_config.py +55 -0
  9. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +220 -0
  10. megadetector/api/batch_processing/api_core/server_job_status_table.py +152 -0
  11. megadetector/api/batch_processing/api_core/server_orchestration.py +360 -0
  12. megadetector/api/batch_processing/api_core/server_utils.py +92 -0
  13. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  14. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +46 -0
  15. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  16. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +152 -0
  17. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  18. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  19. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  20. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  21. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +126 -0
  22. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  23. megadetector/api/synchronous/__init__.py +0 -0
  24. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  25. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +152 -0
  26. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -0
  27. megadetector/api/synchronous/api_core/animal_detection_api/config.py +35 -0
  28. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  29. megadetector/api/synchronous/api_core/tests/load_test.py +110 -0
  30. megadetector/classification/__init__.py +0 -0
  31. megadetector/classification/aggregate_classifier_probs.py +108 -0
  32. megadetector/classification/analyze_failed_images.py +227 -0
  33. megadetector/classification/cache_batchapi_outputs.py +198 -0
  34. megadetector/classification/create_classification_dataset.py +627 -0
  35. megadetector/classification/crop_detections.py +516 -0
  36. megadetector/classification/csv_to_json.py +226 -0
  37. megadetector/classification/detect_and_crop.py +855 -0
  38. megadetector/classification/efficientnet/__init__.py +9 -0
  39. megadetector/classification/efficientnet/model.py +415 -0
  40. megadetector/classification/efficientnet/utils.py +610 -0
  41. megadetector/classification/evaluate_model.py +520 -0
  42. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  43. megadetector/classification/json_to_azcopy_list.py +63 -0
  44. megadetector/classification/json_validator.py +699 -0
  45. megadetector/classification/map_classification_categories.py +276 -0
  46. megadetector/classification/merge_classification_detection_output.py +506 -0
  47. megadetector/classification/prepare_classification_script.py +194 -0
  48. megadetector/classification/prepare_classification_script_mc.py +228 -0
  49. megadetector/classification/run_classifier.py +287 -0
  50. megadetector/classification/save_mislabeled.py +110 -0
  51. megadetector/classification/train_classifier.py +827 -0
  52. megadetector/classification/train_classifier_tf.py +725 -0
  53. megadetector/classification/train_utils.py +323 -0
  54. megadetector/data_management/__init__.py +0 -0
  55. megadetector/data_management/annotations/__init__.py +0 -0
  56. megadetector/data_management/annotations/annotation_constants.py +34 -0
  57. megadetector/data_management/camtrap_dp_to_coco.py +239 -0
  58. megadetector/data_management/cct_json_utils.py +395 -0
  59. megadetector/data_management/cct_to_md.py +176 -0
  60. megadetector/data_management/cct_to_wi.py +289 -0
  61. megadetector/data_management/coco_to_labelme.py +272 -0
  62. megadetector/data_management/coco_to_yolo.py +662 -0
  63. megadetector/data_management/databases/__init__.py +0 -0
  64. megadetector/data_management/databases/add_width_and_height_to_db.py +33 -0
  65. megadetector/data_management/databases/combine_coco_camera_traps_files.py +206 -0
  66. megadetector/data_management/databases/integrity_check_json_db.py +477 -0
  67. megadetector/data_management/databases/subset_json_db.py +115 -0
  68. megadetector/data_management/generate_crops_from_cct.py +149 -0
  69. megadetector/data_management/get_image_sizes.py +189 -0
  70. megadetector/data_management/importers/add_nacti_sizes.py +52 -0
  71. megadetector/data_management/importers/add_timestamps_to_icct.py +79 -0
  72. megadetector/data_management/importers/animl_results_to_md_results.py +158 -0
  73. megadetector/data_management/importers/auckland_doc_test_to_json.py +373 -0
  74. megadetector/data_management/importers/auckland_doc_to_json.py +201 -0
  75. megadetector/data_management/importers/awc_to_json.py +191 -0
  76. megadetector/data_management/importers/bellevue_to_json.py +273 -0
  77. megadetector/data_management/importers/cacophony-thermal-importer.py +796 -0
  78. megadetector/data_management/importers/carrizo_shrubfree_2018.py +269 -0
  79. megadetector/data_management/importers/carrizo_trail_cam_2017.py +289 -0
  80. megadetector/data_management/importers/cct_field_adjustments.py +58 -0
  81. megadetector/data_management/importers/channel_islands_to_cct.py +913 -0
  82. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +180 -0
  83. megadetector/data_management/importers/eMammal/eMammal_helpers.py +249 -0
  84. megadetector/data_management/importers/eMammal/make_eMammal_json.py +223 -0
  85. megadetector/data_management/importers/ena24_to_json.py +276 -0
  86. megadetector/data_management/importers/filenames_to_json.py +386 -0
  87. megadetector/data_management/importers/helena_to_cct.py +283 -0
  88. megadetector/data_management/importers/idaho-camera-traps.py +1407 -0
  89. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +294 -0
  90. megadetector/data_management/importers/jb_csv_to_json.py +150 -0
  91. megadetector/data_management/importers/mcgill_to_json.py +250 -0
  92. megadetector/data_management/importers/missouri_to_json.py +490 -0
  93. megadetector/data_management/importers/nacti_fieldname_adjustments.py +79 -0
  94. megadetector/data_management/importers/noaa_seals_2019.py +181 -0
  95. megadetector/data_management/importers/pc_to_json.py +365 -0
  96. megadetector/data_management/importers/plot_wni_giraffes.py +123 -0
  97. megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -0
  98. megadetector/data_management/importers/prepare_zsl_imerit.py +131 -0
  99. megadetector/data_management/importers/rspb_to_json.py +356 -0
  100. megadetector/data_management/importers/save_the_elephants_survey_A.py +320 -0
  101. megadetector/data_management/importers/save_the_elephants_survey_B.py +329 -0
  102. megadetector/data_management/importers/snapshot_safari_importer.py +758 -0
  103. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +665 -0
  104. megadetector/data_management/importers/snapshot_serengeti_lila.py +1067 -0
  105. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +150 -0
  106. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +153 -0
  107. megadetector/data_management/importers/sulross_get_exif.py +65 -0
  108. megadetector/data_management/importers/timelapse_csv_set_to_json.py +490 -0
  109. megadetector/data_management/importers/ubc_to_json.py +399 -0
  110. megadetector/data_management/importers/umn_to_json.py +507 -0
  111. megadetector/data_management/importers/wellington_to_json.py +263 -0
  112. megadetector/data_management/importers/wi_to_json.py +442 -0
  113. megadetector/data_management/importers/zamba_results_to_md_results.py +181 -0
  114. megadetector/data_management/labelme_to_coco.py +547 -0
  115. megadetector/data_management/labelme_to_yolo.py +272 -0
  116. megadetector/data_management/lila/__init__.py +0 -0
  117. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +97 -0
  118. megadetector/data_management/lila/add_locations_to_nacti.py +147 -0
  119. megadetector/data_management/lila/create_lila_blank_set.py +558 -0
  120. megadetector/data_management/lila/create_lila_test_set.py +152 -0
  121. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  122. megadetector/data_management/lila/download_lila_subset.py +178 -0
  123. megadetector/data_management/lila/generate_lila_per_image_labels.py +516 -0
  124. megadetector/data_management/lila/get_lila_annotation_counts.py +170 -0
  125. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  126. megadetector/data_management/lila/lila_common.py +300 -0
  127. megadetector/data_management/lila/test_lila_metadata_urls.py +132 -0
  128. megadetector/data_management/ocr_tools.py +874 -0
  129. megadetector/data_management/read_exif.py +681 -0
  130. megadetector/data_management/remap_coco_categories.py +84 -0
  131. megadetector/data_management/remove_exif.py +66 -0
  132. megadetector/data_management/resize_coco_dataset.py +189 -0
  133. megadetector/data_management/wi_download_csv_to_coco.py +246 -0
  134. megadetector/data_management/yolo_output_to_md_output.py +441 -0
  135. megadetector/data_management/yolo_to_coco.py +676 -0
  136. megadetector/detection/__init__.py +0 -0
  137. megadetector/detection/detector_training/__init__.py +0 -0
  138. megadetector/detection/detector_training/model_main_tf2.py +114 -0
  139. megadetector/detection/process_video.py +702 -0
  140. megadetector/detection/pytorch_detector.py +341 -0
  141. megadetector/detection/run_detector.py +779 -0
  142. megadetector/detection/run_detector_batch.py +1219 -0
  143. megadetector/detection/run_inference_with_yolov5_val.py +917 -0
  144. megadetector/detection/run_tiled_inference.py +934 -0
  145. megadetector/detection/tf_detector.py +189 -0
  146. megadetector/detection/video_utils.py +606 -0
  147. megadetector/postprocessing/__init__.py +0 -0
  148. megadetector/postprocessing/add_max_conf.py +64 -0
  149. megadetector/postprocessing/categorize_detections_by_size.py +163 -0
  150. megadetector/postprocessing/combine_api_outputs.py +249 -0
  151. megadetector/postprocessing/compare_batch_results.py +958 -0
  152. megadetector/postprocessing/convert_output_format.py +396 -0
  153. megadetector/postprocessing/load_api_results.py +195 -0
  154. megadetector/postprocessing/md_to_coco.py +310 -0
  155. megadetector/postprocessing/md_to_labelme.py +330 -0
  156. megadetector/postprocessing/merge_detections.py +401 -0
  157. megadetector/postprocessing/postprocess_batch_results.py +1902 -0
  158. megadetector/postprocessing/remap_detection_categories.py +170 -0
  159. megadetector/postprocessing/render_detection_confusion_matrix.py +660 -0
  160. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +211 -0
  161. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +83 -0
  162. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1631 -0
  163. megadetector/postprocessing/separate_detections_into_folders.py +730 -0
  164. megadetector/postprocessing/subset_json_detector_output.py +696 -0
  165. megadetector/postprocessing/top_folders_to_bottom.py +223 -0
  166. megadetector/taxonomy_mapping/__init__.py +0 -0
  167. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  168. megadetector/taxonomy_mapping/map_new_lila_datasets.py +150 -0
  169. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -0
  170. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +590 -0
  171. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  172. megadetector/taxonomy_mapping/simple_image_download.py +219 -0
  173. megadetector/taxonomy_mapping/species_lookup.py +834 -0
  174. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  175. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  176. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  177. megadetector/utils/__init__.py +0 -0
  178. megadetector/utils/azure_utils.py +178 -0
  179. megadetector/utils/ct_utils.py +612 -0
  180. megadetector/utils/directory_listing.py +246 -0
  181. megadetector/utils/md_tests.py +968 -0
  182. megadetector/utils/path_utils.py +1044 -0
  183. megadetector/utils/process_utils.py +157 -0
  184. megadetector/utils/sas_blob_utils.py +509 -0
  185. megadetector/utils/split_locations_into_train_val.py +228 -0
  186. megadetector/utils/string_utils.py +92 -0
  187. megadetector/utils/url_utils.py +323 -0
  188. megadetector/utils/write_html_image_list.py +225 -0
  189. megadetector/visualization/__init__.py +0 -0
  190. megadetector/visualization/plot_utils.py +293 -0
  191. megadetector/visualization/render_images_with_thumbnails.py +275 -0
  192. megadetector/visualization/visualization_utils.py +1536 -0
  193. megadetector/visualization/visualize_db.py +550 -0
  194. megadetector/visualization/visualize_detector_output.py +405 -0
  195. {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/METADATA +1 -1
  196. megadetector-5.0.12.dist-info/RECORD +199 -0
  197. megadetector-5.0.12.dist-info/top_level.txt +1 -0
  198. megadetector-5.0.11.dist-info/RECORD +0 -5
  199. megadetector-5.0.11.dist-info/top_level.txt +0 -1
  200. {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/LICENSE +0 -0
  201. {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,665 @@
1
+ """
2
+
3
+ snapshot_safari_importer_reprise.py
4
+
5
+ This is a 2023 update to snapshot_safari_importer.py. We do a bunch of things now that
6
+ we didn't do the last time we imported Snapshot data (like updating the big taxonomy)
7
+ file, and we skip a bunch of things now that we used to do (like generating massive
8
+ zipfiles). So, new year, new importer.
9
+
10
+ """
11
+
12
+ #%% Constants and imports
13
+
14
+ import os
15
+ import glob
16
+ import json
17
+ import shutil
18
+ import random
19
+
20
+ import pandas as pd
21
+
22
+ from tqdm import tqdm
23
+ from collections import defaultdict
24
+
25
+ from megadetector.utils import path_utils
26
+
27
+ input_base = '/media/user/Elements'
28
+ output_base = os.path.expanduser('~/data/snapshot-safari-metadata')
29
+ file_list_cache_file = os.path.join(output_base,'file_list.json')
30
+
31
+ assert os.path.isdir(input_base)
32
+ os.makedirs(output_base,exist_ok=True)
33
+
34
+ # We're going to copy all the .csv files to a faster location
35
+ annotation_cache_dir = os.path.join(output_base,'csv_files')
36
+ os.makedirs(annotation_cache_dir,exist_ok=True)
37
+
38
+
39
+ #%% List files
40
+
41
+ # Do a one-time enumeration of the entire drive; this will take a long time,
42
+ # but will save a lot of hassle later.
43
+
44
+ if os.path.isfile(file_list_cache_file):
45
+ print('Loading file list from {}'.format(file_list_cache_file))
46
+ with open(file_list_cache_file,'r') as f:
47
+ all_files = json.load(f)
48
+ else:
49
+ all_files = glob.glob(os.path.join(input_base,'**','*.*'),recursive=True)
50
+ all_files = [fn for fn in all_files if '$RECYCLE.BIN' not in fn]
51
+ all_files = [fn for fn in all_files if 'System Volume Information' not in fn]
52
+ print('Enumerated {} files'.format(len(all_files)))
53
+ with open(file_list_cache_file,'w') as f:
54
+ json.dump(all_files,f,indent=1)
55
+ print('Wrote file list to {}'.format(file_list_cache_file))
56
+
57
+
58
+ #%% Create derived lists
59
+
60
+ # Takes about 60 seconds
61
+
62
+ all_files_relative = [os.path.relpath(fn,input_base) for fn in all_files]
63
+ all_files_relative = [fn.replace('\\','/') for fn in all_files_relative]
64
+ all_files_relative_set = set(all_files_relative)
65
+
66
+ # CSV files are one of:
67
+ #
68
+ # _report_lila.csv (species/count/etc. for each capture)
69
+ # _report_lila_image_inventory.csv (maps captures to images)
70
+ # _report_lila_overview.csv (distribution of species)
71
+ csv_files = [fn for fn in all_files_relative if fn.endswith('.csv')]
72
+
73
+ all_image_files = path_utils.find_image_strings(all_files_relative)
74
+
75
+ print('Found a total of {} files, {} of which are images'.format(
76
+ len(all_files_relative),len(all_image_files)))
77
+
78
+
79
+ #%% Copy all csv files to the annotation cache folder
80
+
81
+ # fn = csv_files[0]
82
+ for fn in csv_files:
83
+ target_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
84
+ source_file = os.path.join(input_base,fn)
85
+ shutil.copyfile(source_file,target_file)
86
+
87
+ def read_cached_csv_file(fn):
88
+ """
89
+ Later cells will ask to read a .csv file from the original hard drive;
90
+ read from the annotation cache instead.
91
+ """
92
+
93
+ cached_csv_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
94
+ df = pd.read_csv(cached_csv_file)
95
+ return df
96
+
97
+
98
+ #%% List project folders
99
+
100
+ # There are two formats for project folder names:
101
+ #
102
+ # APN
103
+ # Snapshot Cameo/DEB
104
+ project_code_to_project_folder = {}
105
+
106
+ folders = os.listdir(input_base)
107
+ folders = [fn for fn in folders if (not fn.startswith('$') and \
108
+ not 'System Volume' in fn)]
109
+
110
+ for fn in folders:
111
+ if len(fn) == 3:
112
+ assert fn not in project_code_to_project_folder
113
+ project_code_to_project_folder[fn] = fn
114
+ else:
115
+ assert 'Snapshot' in fn
116
+ subfolders = os.listdir('/'.join([input_base,fn]))
117
+ for subfn in subfolders:
118
+ assert len(subfn) == 3
119
+ assert subfn not in project_code_to_project_folder
120
+ project_code_to_project_folder[subfn] = '/'.join([fn,subfn])
121
+
122
+ project_folder_to_project_code = {v: k for k, v in project_code_to_project_folder.items()}
123
+ project_codes = sorted(list(project_code_to_project_folder.keys()))
124
+ project_folders = sorted(list(project_code_to_project_folder.values()))
125
+
126
+ def file_to_project_folder(fn):
127
+ """
128
+ For a given filename relative to the drive root, return the corresponding
129
+ project folder (also relative to the drive root).
130
+ """
131
+
132
+ tokens = fn.split('/')
133
+ if len(tokens[0]) == 3:
134
+ project_folder = tokens[0]
135
+ else:
136
+ assert 'Snapshot' in tokens[0]
137
+ project_folder = '/'.join(tokens[0:2])
138
+ assert project_folder in project_folders
139
+ return project_folder
140
+
141
+
142
+ def file_to_project_code(fn):
143
+ """
144
+ For a given filename relative to the drive root, return the corresponding
145
+ three-letter project code (e.g. "CDB").
146
+ """
147
+
148
+ return project_folder_to_project_code[file_to_project_folder(fn)]
149
+
150
+ assert file_to_project_folder(
151
+ 'APN/APN_S2/DW/DW_R5/APN_S2_DW_R5_IMAG0003.JPG') == 'APN'
152
+ assert file_to_project_folder(
153
+ 'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
154
+ 'Snapshot South Africa/BLO'
155
+ assert file_to_project_code(
156
+ 'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
157
+ 'BLO'
158
+
159
+
160
+ #%% Map report and inventory files to codes
161
+
162
+ # Maps a three-letter project code to a list of per-season _report_lila.csv files
163
+ #
164
+ # E.g.:
165
+ #
166
+ # 'DHP': ['Snapshot South Africa/DHP/LILA_Reports/DHP_S1_report_lila.csv',
167
+ # 'Snapshot South Africa/DHP/LILA_Reports/DHP_S2_report_lila.csv',
168
+ # 'Snapshot South Africa/DHP/LILA_Reports/DHP_S3_report_lila.csv']
169
+ #
170
+ project_code_to_report_files = defaultdict(list)
171
+
172
+ # fn = csv_files[0]
173
+ for fn in csv_files:
174
+ if 'report_lila.csv' not in fn:
175
+ continue
176
+ project_code = project_folder_to_project_code[file_to_project_folder(fn)]
177
+ project_code_to_report_files[project_code].append(fn)
178
+
179
+ project_codes_with_no_reports = set()
180
+
181
+ for project_code in project_code_to_project_folder.keys():
182
+ if project_code not in project_code_to_report_files:
183
+ project_codes_with_no_reports.add(project_code)
184
+ print('Warning: no report files available for {}'.format(project_code))
185
+
186
+
187
+ #%% Make sure that every report has a corresponding inventory file
188
+
189
+ all_report_files = [item for sublist in project_code_to_report_files.values() \
190
+ for item in sublist]
191
+
192
+ for fn in all_report_files:
193
+ inventory_file = fn.replace('.csv','_image_inventory.csv')
194
+ assert inventory_file in csv_files
195
+
196
+
197
+ #%% Count species based on overview and report files
198
+
199
+ # The overview and report files should produce the same counts; we'll verify this
200
+ # in the next cell.
201
+
202
+ species_to_count_overview = defaultdict(int)
203
+ species_to_count_report = defaultdict(int)
204
+
205
+ for report_file in all_report_files:
206
+
207
+ overview_file = report_file.replace('.csv','_overview.csv')
208
+
209
+ df = read_cached_csv_file(overview_file)
210
+
211
+ for i_row,row in df.iterrows():
212
+
213
+ if row['question'] == 'question__species':
214
+
215
+ assert isinstance(row['answer'],str)
216
+ assert isinstance(row['count'],int)
217
+ species = row['answer']
218
+
219
+ if len(species) < 3:
220
+ assert species == '0' or species == '1'
221
+
222
+ species_to_count_overview[species] += row['count']
223
+
224
+ # ...for each capture in the overview file
225
+
226
+ df = read_cached_csv_file(report_file)
227
+
228
+ for i_row,row in df.iterrows():
229
+
230
+ species = row['question__species']
231
+ assert isinstance(species,str)
232
+
233
+ # Ignore results from the blank/non-blank workflow
234
+ if len(species) < 3:
235
+ assert species == '0' or species == '1'
236
+ species_to_count_report[species] += 1
237
+
238
+ # ...for each capture in the report file
239
+
240
+ # ...for each report file
241
+
242
+
243
+ #%% Print counts
244
+
245
+ species_to_count_overview_sorted = \
246
+ {k: v for k, v in sorted(species_to_count_overview.items(),
247
+ key=lambda item: item[1], reverse=True)}
248
+ species_to_count_report_sorted = \
249
+ {k: v for k, v in sorted(species_to_count_report.items(),
250
+ key=lambda item: item[1], reverse=True)}
251
+
252
+ string_count = 0
253
+ non_blank_count = 0
254
+
255
+ for species in species_to_count_overview_sorted.keys():
256
+
257
+ # The overview and report files should produce the same counts
258
+ assert species_to_count_overview_sorted[species] == \
259
+ species_to_count_report[species]
260
+ count = species_to_count_overview_sorted[species]
261
+ if species not in ('0','1'):
262
+ string_count += count
263
+ if species != 'blank':
264
+ non_blank_count += count
265
+
266
+ print('{}{}'.format(species.ljust(25),count))
267
+
268
+ n_images = len(all_files)
269
+ n_sequences = sum(species_to_count_overview_sorted.values())
270
+
271
+ print('\n{} total images\n{} total sequences'.format(n_images,n_sequences))
272
+
273
+ print('\nString count: {}'.format(string_count))
274
+ print('Non-blank count: {}'.format(non_blank_count))
275
+
276
+
277
+ #%% Make sure that capture IDs in the reports/inventory files match
278
+
279
+ # ...and confirm that (almost) all the images in the inventory tables are
280
+ # present on disk.
281
+
282
+ all_relative_paths_in_inventory = set()
283
+ files_missing_on_disk = []
284
+
285
+ for report_file in all_report_files:
286
+
287
+ project_base = file_to_project_folder(report_file)
288
+ inventory_file = report_file.replace('.csv','_image_inventory.csv')
289
+
290
+ inventory_df = read_cached_csv_file(inventory_file)
291
+ report_df = read_cached_csv_file(report_file)
292
+
293
+ capture_ids_in_report = set()
294
+ for i_row,row in report_df.iterrows():
295
+ capture_ids_in_report.add(row['capture_id'])
296
+
297
+ capture_ids_in_inventory = set()
298
+ for i_row,row in inventory_df.iterrows():
299
+
300
+ capture_ids_in_inventory.add(row['capture_id'])
301
+ image_path_relative = project_base + '/' + row['image_path_rel']
302
+
303
+ # assert image_path_relative in all_files_relative_set
304
+ if image_path_relative not in all_files_relative_set:
305
+
306
+ # Make sure this isn't just a case issue
307
+ assert image_path_relative.replace('.JPG','.jpg') \
308
+ not in all_files_relative_set
309
+ assert image_path_relative.replace('.jpg','.JPG') \
310
+ not in all_files_relative_set
311
+ files_missing_on_disk.append(image_path_relative)
312
+
313
+ assert image_path_relative not in all_relative_paths_in_inventory
314
+ all_relative_paths_in_inventory.add(image_path_relative)
315
+
316
+ # Make sure the set of capture IDs appearing in this report is
317
+ # the same as the set of capture IDs appearing in the corresponding
318
+ # inventory file.
319
+ assert capture_ids_in_report == capture_ids_in_inventory
320
+
321
+ # ...for each report file
322
+
323
+ print('\n{} missing files (of {})'.format(
324
+ len(files_missing_on_disk),len(all_relative_paths_in_inventory)))
325
+
326
+
327
+ #%% For all the files we have on disk, see which are and aren't in the inventory files
328
+
329
+ # There aren't any capital-P .PNG files, but if I don't include .PNG
330
+ # in this list, I'll look at this in a year and wonder whether I forgot
331
+ # to include it.
332
+ image_extensions = set(['.JPG','.jpg','.PNG','.png'])
333
+
334
+ images_not_in_inventory = []
335
+ n_images_in_inventoried_projects = 0
336
+
337
+ # fn = all_files_relative[0]
338
+ for fn in tqdm(all_files_relative):
339
+
340
+ if os.path.splitext(fn)[1] not in image_extensions:
341
+ continue
342
+ project_code = file_to_project_code(fn)
343
+ if project_code in project_codes_with_no_reports:
344
+ # print('Skipping project {}'.format(project_code))
345
+ continue
346
+ n_images_in_inventoried_projects += 1
347
+ if fn not in all_relative_paths_in_inventory:
348
+ images_not_in_inventory.append(fn)
349
+
350
+ print('\n{} images on disk are not in inventory (of {} in eligible projects)'.format(
351
+ len(images_not_in_inventory),n_images_in_inventoried_projects))
352
+
353
+
354
+ #%% Map captures to images, and vice-versa
355
+
356
+ capture_id_to_images = defaultdict(list)
357
+ image_to_capture_id = {}
358
+
359
+ # report_file = all_report_files[0]
360
+ for report_file in tqdm(all_report_files):
361
+
362
+ inventory_file = report_file.replace('.csv','_image_inventory.csv')
363
+ inventory_df = read_cached_csv_file(inventory_file)
364
+
365
+ project_folder = file_to_project_folder(inventory_file)
366
+
367
+ # row = inventory_df.iloc[0]
368
+ for i_row,row in inventory_df.iterrows():
369
+
370
+ capture_id = row['capture_id']
371
+ image_file_relative = os.path.join(project_folder,row['image_path_rel'])
372
+ capture_id_to_images[capture_id].append(image_file_relative)
373
+ assert image_file_relative not in image_to_capture_id
374
+ image_to_capture_id[image_file_relative] = capture_id
375
+
376
+ # ...for each row (one image per row)
377
+
378
+ # ...for each report file
379
+
380
+
381
+ #%% Map captures to species (just species for now, we'll go back and get other metadata later)
382
+
383
+ capture_id_to_species = defaultdict(list)
384
+
385
+ for project_code in tqdm(project_codes):
386
+
387
+ report_files = project_code_to_report_files[project_code]
388
+
389
+ for report_file in report_files:
390
+
391
+ report_df = read_cached_csv_file(report_file)
392
+
393
+ for i_row,row in report_df.iterrows():
394
+
395
+ capture_id = row['capture_id']
396
+ species = row['question__species']
397
+ capture_id_to_species[capture_id].append(species)
398
+
399
+ # ...for each row
400
+
401
+ # ...for each report file in this project
402
+
403
+ # ...for each project
404
+
405
+
406
+ #%% Take a look at the annotations "0" and "1"
407
+
408
+ captures_0 = []
409
+ captures_1 = []
410
+ captures_1_alone = []
411
+ captures_1_with_species = []
412
+
413
+ for capture_id in tqdm(capture_id_to_species):
414
+
415
+ species_this_capture_id = capture_id_to_species[capture_id]
416
+
417
+ # Multiple rows may be present for a capture, but they should be unique
418
+ assert len(species_this_capture_id) == len(set(species_this_capture_id))
419
+
420
+ if '0' in species_this_capture_id:
421
+ captures_0.append(capture_id)
422
+ # '0' should always appear alone
423
+ assert len(species_this_capture_id) == 1
424
+
425
+ if '1' in species_this_capture_id:
426
+ captures_1.append(capture_id)
427
+ assert '0' not in species_this_capture_id
428
+ # '1' should never appear alone
429
+ # assert len(species_this_capture_id) > 1
430
+ if len(species_this_capture_id) == 1:
431
+ captures_1_alone.append(capture_id)
432
+ else:
433
+ captures_1_with_species.append(capture_id)
434
+
435
+ # ...for each capture ID
436
+
437
+ print('')
438
+ print('Number of captures with "0" as the species (always appears alone): {}'.format(len(captures_0)))
439
+ print('Number of captures with "1" as the species: {}'.format(len(captures_1)))
440
+ print('Number of captures with "1" as the species, with no other species: {}'.format(
441
+ len(captures_1_alone)))
442
+ print('Number of captures with "1" as the species, with other species: {}'.format(
443
+ len(captures_1_with_species)))
444
+
445
+
446
+ #%% Sample some of those captures with mysterious "0" and "1" annotations
447
+
448
+ random.seed(0)
449
+ n_to_sample = 500
450
+ captures_0_samples = random.sample(captures_0,n_to_sample)
451
+ captures_1_samples = random.sample(captures_1,n_to_sample)
452
+
453
+ capture_0_sample_output_folder = os.path.join(output_base,'capture_0_samples')
454
+ capture_1_sample_output_folder = os.path.join(output_base,'capture_1_samples')
455
+ os.makedirs(capture_0_sample_output_folder,exist_ok=True)
456
+ os.makedirs(capture_1_sample_output_folder,exist_ok=True)
457
+
458
+ def copy_sampled_captures(sampled_captures,sample_capture_output_folder):
459
+
460
+ for capture_id in tqdm(sampled_captures):
461
+ images_this_capture = capture_id_to_images[capture_id]
462
+ for fn in images_this_capture:
463
+ # assert fn in all_files_relative_set
464
+ if fn not in all_files_relative_set:
465
+ print('Warning: missing file {}'.format(fn))
466
+ continue
467
+ source_image = os.path.join(input_base,fn)
468
+ target_image = os.path.join(sample_capture_output_folder,os.path.basename(fn))
469
+ shutil.copyfile(source_image,target_image)
470
+ # ....for each image
471
+ # ...for each capture
472
+
473
+ copy_sampled_captures(captures_0_samples,capture_0_sample_output_folder)
474
+ copy_sampled_captures(captures_1_samples,capture_1_sample_output_folder)
475
+
476
+
477
+ #%% Find images that MD thinks contain people
478
+
479
+ md_results_folder = os.path.expanduser(
480
+ '~/postprocessing/snapshot-safari/snapshot-safari-2023-04-21-v5a.0.0/json_subsets')
481
+ md_results_files = os.listdir(md_results_folder)
482
+
483
+ md_human_detection_threshold = 0.2
484
+ md_vehicle_detection_threshold = 0.2
485
+
486
+ # We'll make sure this is actually correct for all the files we load
487
+ md_human_category = '2'
488
+ md_vehicle_category = '3'
489
+
490
+ md_human_images = set()
491
+ md_vehicle_images = set()
492
+
493
+ # project_code = project_codes[0]
494
+ for project_code in project_codes:
495
+
496
+ print('Finding human images for {}'.format(project_code))
497
+
498
+ project_folder = project_code_to_project_folder[project_code]
499
+
500
+ md_results_file = [fn for fn in md_results_files if project_code in fn]
501
+ assert len(md_results_file) == 1
502
+ md_results_file = os.path.join(md_results_folder,md_results_file[0])
503
+
504
+ with open(md_results_file,'r') as f:
505
+ md_results = json.load(f)
506
+ assert md_results['detection_categories'][md_human_category] == 'person'
507
+ assert md_results['detection_categories'][md_vehicle_category] == 'vehicle'
508
+
509
+ # im = md_results['images'][0]
510
+ for im in tqdm(md_results['images']):
511
+
512
+ if 'detections' not in im:
513
+ continue
514
+
515
+ # MD results files are each relative to their own projects, we want
516
+ # filenames to be relative to the base of the drive
517
+ fn = os.path.join(project_folder,im['file'])
518
+ for det in im['detections']:
519
+ if det['category'] == md_human_category and \
520
+ det['conf'] >= md_human_detection_threshold:
521
+ md_human_images.add(fn)
522
+ if det['category'] == md_vehicle_category and \
523
+ det['conf'] >= md_vehicle_detection_threshold:
524
+ md_vehicle_images.add(fn)
525
+
526
+ # ...for each detection
527
+
528
+ # ...for each image
529
+
530
+ # ...for each project
531
+
532
+ print('MD found {} human images, {} vehicle images'.format(
533
+ len(md_human_images),len(md_vehicle_images)))
534
+
535
+ md_human_or_vehicle_images = \
536
+ set(md_human_images).union(set(md_vehicle_images))
537
+
538
+ # next(iter(md_human_or_vehicle_images))
539
+
540
+
541
+ #%% Find images where the ground truth says humans or vehicles are present
542
+
543
+ human_species_id = 'human'
544
+ vehicle_species_id = 'humanvehicle'
545
+
546
+ gt_human_capture_ids = set()
547
+ gt_vehicle_capture_ids = set()
548
+
549
+ for capture_id in capture_id_to_species:
550
+
551
+ species_this_capture_id = capture_id_to_species[capture_id]
552
+
553
+ for species in species_this_capture_id:
554
+ if species == human_species_id:
555
+ gt_human_capture_ids.add(capture_id)
556
+ elif species == vehicle_species_id:
557
+ gt_vehicle_capture_ids.add(capture_id)
558
+
559
+ # ...for each capture ID
560
+
561
+ gt_human_images = []
562
+ gt_vehicle_images = []
563
+
564
+ for capture_id in gt_human_capture_ids:
565
+ images_this_capture_id = capture_id_to_images[capture_id]
566
+ gt_human_images.extend(images_this_capture_id)
567
+ for capture_id in gt_vehicle_capture_ids:
568
+ images_this_capture_id = capture_id_to_images[capture_id]
569
+ gt_vehicle_images.extend(images_this_capture_id)
570
+
571
+ print('Ground truth includes {} human images ({} captures), {} vehicle images ({} captures)'.format(
572
+ len(gt_human_images),len(gt_human_capture_ids),
573
+ len(gt_vehicle_images),len(gt_vehicle_capture_ids)))
574
+
575
+ ground_truth_human_or_vehicle_images = \
576
+ set(gt_human_images).union(set(gt_vehicle_images))
577
+
578
+ # next(iter(ground_truth_human_or_vehicle_images))
579
+
580
+
581
+ #%% Find mismatches
582
+
583
+ gt_missing_human_images = []
584
+ gt_missing_vehicle_images = []
585
+
586
+ for fn in md_human_images:
587
+ if fn not in ground_truth_human_or_vehicle_images:
588
+ gt_missing_human_images.append(fn)
589
+
590
+ for fn in md_vehicle_images:
591
+ if fn not in ground_truth_human_or_vehicle_images:
592
+ gt_missing_vehicle_images.append(fn)
593
+
594
+ print('Of {} images where MD found a human, {} are not in the ground truth'.format(
595
+ len(md_human_images),len(gt_missing_human_images)))
596
+
597
+ print('Of {} images where MD found a vehicle, {} are not in the ground truth'.format(
598
+ len(md_vehicle_images),len(gt_missing_vehicle_images)))
599
+
600
+
601
+ #%% Sample mismatches
602
+
603
+ random.seed(0)
604
+ n_to_sample = 1000
605
+ sampled_human_mismatches = random.sample(gt_missing_human_images,n_to_sample)
606
+ sampled_vehicle_mismatches = random.sample(gt_missing_vehicle_images,n_to_sample)
607
+
608
+ human_mismatch_output_folder = os.path.join(output_base,'mismatches_human')
609
+ vehicle_mismatch_output_folder = os.path.join(output_base,'mismatches_vehicle')
610
+ os.makedirs(human_mismatch_output_folder,exist_ok=True)
611
+ os.makedirs(vehicle_mismatch_output_folder,exist_ok=True)
612
+
613
+ def copy_sampled_images(sampled_images,sampled_images_output_folder):
614
+
615
+ for fn in tqdm(sampled_images):
616
+ if fn not in all_files_relative_set:
617
+ print('Warning: missing file {}'.format(fn))
618
+ continue
619
+ source_image = os.path.join(input_base,fn)
620
+ target_image = os.path.join(sampled_images_output_folder,os.path.basename(fn))
621
+ shutil.copyfile(source_image,target_image)
622
+
623
+ copy_sampled_images(sampled_human_mismatches,human_mismatch_output_folder)
624
+ copy_sampled_images(sampled_vehicle_mismatches,vehicle_mismatch_output_folder)
625
+
626
+
627
+ #%% See what's up with some of the mismatches
628
+
629
+ filename_base_to_filename = {}
630
+
631
+ from megadetector.utils.path_utils import is_image_file
632
+
633
+ # fn = all_files_relative[0]
634
+ for fn in tqdm(all_files_relative):
635
+
636
+ if not is_image_file(fn):
637
+ continue
638
+ if 'Indiv_Recognition' in fn:
639
+ continue
640
+ bn = os.path.basename(fn)
641
+ assert bn not in filename_base_to_filename
642
+ filename_base_to_filename[bn] = fn
643
+
644
+
645
+ if False:
646
+
647
+ bn = 'TSW_S2_KA02_R3_IMAG0002.JPG'
648
+ fn = filename_base_to_filename[bn]
649
+ capture_id = image_to_capture_id[fn]
650
+ species = capture_id_to_species[capture_id]
651
+
652
+
653
+ #%% Look at the distribution of labels for the mismatched images
654
+
655
+ gt_missing_images = set(gt_missing_human_images).union(set(gt_missing_vehicle_images))
656
+
657
+ missing_image_species_to_count = defaultdict(int)
658
+
659
+ for fn in gt_missing_images:
660
+ if fn not in image_to_capture_id:
661
+ continue
662
+ capture_id = image_to_capture_id[fn]
663
+ species = capture_id_to_species[capture_id]
664
+ for s in species:
665
+ missing_image_species_to_count[s] += 1