megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,665 +0,0 @@
1
- """
2
-
3
- snapshot_safari_importer_reprise.py
4
-
5
- This is a 2023 update to snapshot_safari_importer.py. We do a bunch of things now that
6
- we didn't do the last time we imported Snapshot data (like updating the big taxonomy)
7
- file, and we skip a bunch of things now that we used to do (like generating massive
8
- zipfiles). So, new year, new importer.
9
-
10
- """
11
-
12
- #%% Constants and imports
13
-
14
- import os
15
- import glob
16
- import json
17
- import shutil
18
- import random
19
-
20
- import pandas as pd
21
-
22
- from tqdm import tqdm
23
- from collections import defaultdict
24
-
25
- from md_utils import path_utils
26
-
27
- input_base = '/media/user/Elements'
28
- output_base = os.path.expanduser('~/data/snapshot-safari-metadata')
29
- file_list_cache_file = os.path.join(output_base,'file_list.json')
30
-
31
- assert os.path.isdir(input_base)
32
- os.makedirs(output_base,exist_ok=True)
33
-
34
- # We're going to copy all the .csv files to a faster location
35
- annotation_cache_dir = os.path.join(output_base,'csv_files')
36
- os.makedirs(annotation_cache_dir,exist_ok=True)
37
-
38
-
39
- #%% List files
40
-
41
- # Do a one-time enumeration of the entire drive; this will take a long time,
42
- # but will save a lot of hassle later.
43
-
44
- if os.path.isfile(file_list_cache_file):
45
- print('Loading file list from {}'.format(file_list_cache_file))
46
- with open(file_list_cache_file,'r') as f:
47
- all_files = json.load(f)
48
- else:
49
- all_files = glob.glob(os.path.join(input_base,'**','*.*'),recursive=True)
50
- all_files = [fn for fn in all_files if '$RECYCLE.BIN' not in fn]
51
- all_files = [fn for fn in all_files if 'System Volume Information' not in fn]
52
- print('Enumerated {} files'.format(len(all_files)))
53
- with open(file_list_cache_file,'w') as f:
54
- json.dump(all_files,f,indent=1)
55
- print('Wrote file list to {}'.format(file_list_cache_file))
56
-
57
-
58
- #%% Create derived lists
59
-
60
- # Takes about 60 seconds
61
-
62
- all_files_relative = [os.path.relpath(fn,input_base) for fn in all_files]
63
- all_files_relative = [fn.replace('\\','/') for fn in all_files_relative]
64
- all_files_relative_set = set(all_files_relative)
65
-
66
- # CSV files are one of:
67
- #
68
- # _report_lila.csv (species/count/etc. for each capture)
69
- # _report_lila_image_inventory.csv (maps captures to images)
70
- # _report_lila_overview.csv (distribution of species)
71
- csv_files = [fn for fn in all_files_relative if fn.endswith('.csv')]
72
-
73
- all_image_files = path_utils.find_image_strings(all_files_relative)
74
-
75
- print('Found a total of {} files, {} of which are images'.format(
76
- len(all_files_relative),len(all_image_files)))
77
-
78
-
79
- #%% Copy all csv files to the annotation cache folder
80
-
81
- # fn = csv_files[0]
82
- for fn in csv_files:
83
- target_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
84
- source_file = os.path.join(input_base,fn)
85
- shutil.copyfile(source_file,target_file)
86
-
87
- def read_cached_csv_file(fn):
88
- """
89
- Later cells will ask to read a .csv file from the original hard drive;
90
- read from the annotation cache instead.
91
- """
92
-
93
- cached_csv_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
94
- df = pd.read_csv(cached_csv_file)
95
- return df
96
-
97
-
98
- #%% List project folders
99
-
100
- # There are two formats for project folder names:
101
- #
102
- # APN
103
- # Snapshot Cameo/DEB
104
- project_code_to_project_folder = {}
105
-
106
- folders = os.listdir(input_base)
107
- folders = [fn for fn in folders if (not fn.startswith('$') and \
108
- not 'System Volume' in fn)]
109
-
110
- for fn in folders:
111
- if len(fn) == 3:
112
- assert fn not in project_code_to_project_folder
113
- project_code_to_project_folder[fn] = fn
114
- else:
115
- assert 'Snapshot' in fn
116
- subfolders = os.listdir('/'.join([input_base,fn]))
117
- for subfn in subfolders:
118
- assert len(subfn) == 3
119
- assert subfn not in project_code_to_project_folder
120
- project_code_to_project_folder[subfn] = '/'.join([fn,subfn])
121
-
122
- project_folder_to_project_code = {v: k for k, v in project_code_to_project_folder.items()}
123
- project_codes = sorted(list(project_code_to_project_folder.keys()))
124
- project_folders = sorted(list(project_code_to_project_folder.values()))
125
-
126
- def file_to_project_folder(fn):
127
- """
128
- For a given filename relative to the drive root, return the corresponding
129
- project folder (also relative to the drive root).
130
- """
131
-
132
- tokens = fn.split('/')
133
- if len(tokens[0]) == 3:
134
- project_folder = tokens[0]
135
- else:
136
- assert 'Snapshot' in tokens[0]
137
- project_folder = '/'.join(tokens[0:2])
138
- assert project_folder in project_folders
139
- return project_folder
140
-
141
-
142
- def file_to_project_code(fn):
143
- """
144
- For a given filename relative to the drive root, return the corresponding
145
- three-letter project code (e.g. "CDB").
146
- """
147
-
148
- return project_folder_to_project_code[file_to_project_folder(fn)]
149
-
150
- assert file_to_project_folder(
151
- 'APN/APN_S2/DW/DW_R5/APN_S2_DW_R5_IMAG0003.JPG') == 'APN'
152
- assert file_to_project_folder(
153
- 'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
154
- 'Snapshot South Africa/BLO'
155
- assert file_to_project_code(
156
- 'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
157
- 'BLO'
158
-
159
-
160
- #%% Map report and inventory files to codes
161
-
162
- # Maps a three-letter project code to a list of per-season _report_lila.csv files
163
- #
164
- # E.g.:
165
- #
166
- # 'DHP': ['Snapshot South Africa/DHP/LILA_Reports/DHP_S1_report_lila.csv',
167
- # 'Snapshot South Africa/DHP/LILA_Reports/DHP_S2_report_lila.csv',
168
- # 'Snapshot South Africa/DHP/LILA_Reports/DHP_S3_report_lila.csv']
169
- #
170
- project_code_to_report_files = defaultdict(list)
171
-
172
- # fn = csv_files[0]
173
- for fn in csv_files:
174
- if 'report_lila.csv' not in fn:
175
- continue
176
- project_code = project_folder_to_project_code[file_to_project_folder(fn)]
177
- project_code_to_report_files[project_code].append(fn)
178
-
179
- project_codes_with_no_reports = set()
180
-
181
- for project_code in project_code_to_project_folder.keys():
182
- if project_code not in project_code_to_report_files:
183
- project_codes_with_no_reports.add(project_code)
184
- print('Warning: no report files available for {}'.format(project_code))
185
-
186
-
187
- #%% Make sure that every report has a corresponding inventory file
188
-
189
- all_report_files = [item for sublist in project_code_to_report_files.values() \
190
- for item in sublist]
191
-
192
- for fn in all_report_files:
193
- inventory_file = fn.replace('.csv','_image_inventory.csv')
194
- assert inventory_file in csv_files
195
-
196
-
197
- #%% Count species based on overview and report files
198
-
199
- # The overview and report files should produce the same counts; we'll verify this
200
- # in the next cell.
201
-
202
- species_to_count_overview = defaultdict(int)
203
- species_to_count_report = defaultdict(int)
204
-
205
- for report_file in all_report_files:
206
-
207
- overview_file = report_file.replace('.csv','_overview.csv')
208
-
209
- df = read_cached_csv_file(overview_file)
210
-
211
- for i_row,row in df.iterrows():
212
-
213
- if row['question'] == 'question__species':
214
-
215
- assert isinstance(row['answer'],str)
216
- assert isinstance(row['count'],int)
217
- species = row['answer']
218
-
219
- if len(species) < 3:
220
- assert species == '0' or species == '1'
221
-
222
- species_to_count_overview[species] += row['count']
223
-
224
- # ...for each capture in the overview file
225
-
226
- df = read_cached_csv_file(report_file)
227
-
228
- for i_row,row in df.iterrows():
229
-
230
- species = row['question__species']
231
- assert isinstance(species,str)
232
-
233
- # Ignore results from the blank/non-blank workflow
234
- if len(species) < 3:
235
- assert species == '0' or species == '1'
236
- species_to_count_report[species] += 1
237
-
238
- # ...for each capture in the report file
239
-
240
- # ...for each report file
241
-
242
-
243
- #%% Print counts
244
-
245
- species_to_count_overview_sorted = \
246
- {k: v for k, v in sorted(species_to_count_overview.items(),
247
- key=lambda item: item[1], reverse=True)}
248
- species_to_count_report_sorted = \
249
- {k: v for k, v in sorted(species_to_count_report.items(),
250
- key=lambda item: item[1], reverse=True)}
251
-
252
- string_count = 0
253
- non_blank_count = 0
254
-
255
- for species in species_to_count_overview_sorted.keys():
256
-
257
- # The overview and report files should produce the same counts
258
- assert species_to_count_overview_sorted[species] == \
259
- species_to_count_report[species]
260
- count = species_to_count_overview_sorted[species]
261
- if species not in ('0','1'):
262
- string_count += count
263
- if species != 'blank':
264
- non_blank_count += count
265
-
266
- print('{}{}'.format(species.ljust(25),count))
267
-
268
- n_images = len(all_files)
269
- n_sequences = sum(species_to_count_overview_sorted.values())
270
-
271
- print('\n{} total images\n{} total sequences'.format(n_images,n_sequences))
272
-
273
- print('\nString count: {}'.format(string_count))
274
- print('Non-blank count: {}'.format(non_blank_count))
275
-
276
-
277
- #%% Make sure that capture IDs in the reports/inventory files match
278
-
279
- # ...and confirm that (almost) all the images in the inventory tables are
280
- # present on disk.
281
-
282
- all_relative_paths_in_inventory = set()
283
- files_missing_on_disk = []
284
-
285
- for report_file in all_report_files:
286
-
287
- project_base = file_to_project_folder(report_file)
288
- inventory_file = report_file.replace('.csv','_image_inventory.csv')
289
-
290
- inventory_df = read_cached_csv_file(inventory_file)
291
- report_df = read_cached_csv_file(report_file)
292
-
293
- capture_ids_in_report = set()
294
- for i_row,row in report_df.iterrows():
295
- capture_ids_in_report.add(row['capture_id'])
296
-
297
- capture_ids_in_inventory = set()
298
- for i_row,row in inventory_df.iterrows():
299
-
300
- capture_ids_in_inventory.add(row['capture_id'])
301
- image_path_relative = project_base + '/' + row['image_path_rel']
302
-
303
- # assert image_path_relative in all_files_relative_set
304
- if image_path_relative not in all_files_relative_set:
305
-
306
- # Make sure this isn't just a case issue
307
- assert image_path_relative.replace('.JPG','.jpg') \
308
- not in all_files_relative_set
309
- assert image_path_relative.replace('.jpg','.JPG') \
310
- not in all_files_relative_set
311
- files_missing_on_disk.append(image_path_relative)
312
-
313
- assert image_path_relative not in all_relative_paths_in_inventory
314
- all_relative_paths_in_inventory.add(image_path_relative)
315
-
316
- # Make sure the set of capture IDs appearing in this report is
317
- # the same as the set of capture IDs appearing in the corresponding
318
- # inventory file.
319
- assert capture_ids_in_report == capture_ids_in_inventory
320
-
321
- # ...for each report file
322
-
323
- print('\n{} missing files (of {})'.format(
324
- len(files_missing_on_disk),len(all_relative_paths_in_inventory)))
325
-
326
-
327
- #%% For all the files we have on disk, see which are and aren't in the inventory files
328
-
329
- # There aren't any capital-P .PNG files, but if I don't include .PNG
330
- # in this list, I'll look at this in a year and wonder whether I forgot
331
- # to include it.
332
- image_extensions = set(['.JPG','.jpg','.PNG','.png'])
333
-
334
- images_not_in_inventory = []
335
- n_images_in_inventoried_projects = 0
336
-
337
- # fn = all_files_relative[0]
338
- for fn in tqdm(all_files_relative):
339
-
340
- if os.path.splitext(fn)[1] not in image_extensions:
341
- continue
342
- project_code = file_to_project_code(fn)
343
- if project_code in project_codes_with_no_reports:
344
- # print('Skipping project {}'.format(project_code))
345
- continue
346
- n_images_in_inventoried_projects += 1
347
- if fn not in all_relative_paths_in_inventory:
348
- images_not_in_inventory.append(fn)
349
-
350
- print('\n{} images on disk are not in inventory (of {} in eligible projects)'.format(
351
- len(images_not_in_inventory),n_images_in_inventoried_projects))
352
-
353
-
354
- #%% Map captures to images, and vice-versa
355
-
356
- capture_id_to_images = defaultdict(list)
357
- image_to_capture_id = {}
358
-
359
- # report_file = all_report_files[0]
360
- for report_file in tqdm(all_report_files):
361
-
362
- inventory_file = report_file.replace('.csv','_image_inventory.csv')
363
- inventory_df = read_cached_csv_file(inventory_file)
364
-
365
- project_folder = file_to_project_folder(inventory_file)
366
-
367
- # row = inventory_df.iloc[0]
368
- for i_row,row in inventory_df.iterrows():
369
-
370
- capture_id = row['capture_id']
371
- image_file_relative = os.path.join(project_folder,row['image_path_rel'])
372
- capture_id_to_images[capture_id].append(image_file_relative)
373
- assert image_file_relative not in image_to_capture_id
374
- image_to_capture_id[image_file_relative] = capture_id
375
-
376
- # ...for each row (one image per row)
377
-
378
- # ...for each report file
379
-
380
-
381
- #%% Map captures to species (just species for now, we'll go back and get other metadata later)
382
-
383
- capture_id_to_species = defaultdict(list)
384
-
385
- for project_code in tqdm(project_codes):
386
-
387
- report_files = project_code_to_report_files[project_code]
388
-
389
- for report_file in report_files:
390
-
391
- report_df = read_cached_csv_file(report_file)
392
-
393
- for i_row,row in report_df.iterrows():
394
-
395
- capture_id = row['capture_id']
396
- species = row['question__species']
397
- capture_id_to_species[capture_id].append(species)
398
-
399
- # ...for each row
400
-
401
- # ...for each report file in this project
402
-
403
- # ...for each project
404
-
405
-
406
- #%% Take a look at the annotations "0" and "1"
407
-
408
- captures_0 = []
409
- captures_1 = []
410
- captures_1_alone = []
411
- captures_1_with_species = []
412
-
413
- for capture_id in tqdm(capture_id_to_species):
414
-
415
- species_this_capture_id = capture_id_to_species[capture_id]
416
-
417
- # Multiple rows may be present for a capture, but they should be unique
418
- assert len(species_this_capture_id) == len(set(species_this_capture_id))
419
-
420
- if '0' in species_this_capture_id:
421
- captures_0.append(capture_id)
422
- # '0' should always appear alone
423
- assert len(species_this_capture_id) == 1
424
-
425
- if '1' in species_this_capture_id:
426
- captures_1.append(capture_id)
427
- assert '0' not in species_this_capture_id
428
- # '1' should never appear alone
429
- # assert len(species_this_capture_id) > 1
430
- if len(species_this_capture_id) == 1:
431
- captures_1_alone.append(capture_id)
432
- else:
433
- captures_1_with_species.append(capture_id)
434
-
435
- # ...for each capture ID
436
-
437
- print('')
438
- print('Number of captures with "0" as the species (always appears alone): {}'.format(len(captures_0)))
439
- print('Number of captures with "1" as the species: {}'.format(len(captures_1)))
440
- print('Number of captures with "1" as the species, with no other species: {}'.format(
441
- len(captures_1_alone)))
442
- print('Number of captures with "1" as the species, with other species: {}'.format(
443
- len(captures_1_with_species)))
444
-
445
-
446
- #%% Sample some of those captures with mysterious "0" and "1" annotations
447
-
448
- random.seed(0)
449
- n_to_sample = 500
450
- captures_0_samples = random.sample(captures_0,n_to_sample)
451
- captures_1_samples = random.sample(captures_1,n_to_sample)
452
-
453
- capture_0_sample_output_folder = os.path.join(output_base,'capture_0_samples')
454
- capture_1_sample_output_folder = os.path.join(output_base,'capture_1_samples')
455
- os.makedirs(capture_0_sample_output_folder,exist_ok=True)
456
- os.makedirs(capture_1_sample_output_folder,exist_ok=True)
457
-
458
- def copy_sampled_captures(sampled_captures,sample_capture_output_folder):
459
-
460
- for capture_id in tqdm(sampled_captures):
461
- images_this_capture = capture_id_to_images[capture_id]
462
- for fn in images_this_capture:
463
- # assert fn in all_files_relative_set
464
- if fn not in all_files_relative_set:
465
- print('Warning: missing file {}'.format(fn))
466
- continue
467
- source_image = os.path.join(input_base,fn)
468
- target_image = os.path.join(sample_capture_output_folder,os.path.basename(fn))
469
- shutil.copyfile(source_image,target_image)
470
- # ....for each image
471
- # ...for each capture
472
-
473
- copy_sampled_captures(captures_0_samples,capture_0_sample_output_folder)
474
- copy_sampled_captures(captures_1_samples,capture_1_sample_output_folder)
475
-
476
-
477
- #%% Find images that MD thinks contain people
478
-
479
- md_results_folder = os.path.expanduser(
480
- '~/postprocessing/snapshot-safari/snapshot-safari-2023-04-21-v5a.0.0/json_subsets')
481
- md_results_files = os.listdir(md_results_folder)
482
-
483
- md_human_detection_threshold = 0.2
484
- md_vehicle_detection_threshold = 0.2
485
-
486
- # We'll make sure this is actually correct for all the files we load
487
- md_human_category = '2'
488
- md_vehicle_category = '3'
489
-
490
- md_human_images = set()
491
- md_vehicle_images = set()
492
-
493
- # project_code = project_codes[0]
494
- for project_code in project_codes:
495
-
496
- print('Finding human images for {}'.format(project_code))
497
-
498
- project_folder = project_code_to_project_folder[project_code]
499
-
500
- md_results_file = [fn for fn in md_results_files if project_code in fn]
501
- assert len(md_results_file) == 1
502
- md_results_file = os.path.join(md_results_folder,md_results_file[0])
503
-
504
- with open(md_results_file,'r') as f:
505
- md_results = json.load(f)
506
- assert md_results['detection_categories'][md_human_category] == 'person'
507
- assert md_results['detection_categories'][md_vehicle_category] == 'vehicle'
508
-
509
- # im = md_results['images'][0]
510
- for im in tqdm(md_results['images']):
511
-
512
- if 'detections' not in im:
513
- continue
514
-
515
- # MD results files are each relative to their own projects, we want
516
- # filenames to be relative to the base of the drive
517
- fn = os.path.join(project_folder,im['file'])
518
- for det in im['detections']:
519
- if det['category'] == md_human_category and \
520
- det['conf'] >= md_human_detection_threshold:
521
- md_human_images.add(fn)
522
- if det['category'] == md_vehicle_category and \
523
- det['conf'] >= md_vehicle_detection_threshold:
524
- md_vehicle_images.add(fn)
525
-
526
- # ...for each detection
527
-
528
- # ...for each image
529
-
530
- # ...for each project
531
-
532
- print('MD found {} human images, {} vehicle images'.format(
533
- len(md_human_images),len(md_vehicle_images)))
534
-
535
- md_human_or_vehicle_images = \
536
- set(md_human_images).union(set(md_vehicle_images))
537
-
538
- # next(iter(md_human_or_vehicle_images))
539
-
540
-
541
- #%% Find images where the ground truth says humans or vehicles are present
542
-
543
- human_species_id = 'human'
544
- vehicle_species_id = 'humanvehicle'
545
-
546
- gt_human_capture_ids = set()
547
- gt_vehicle_capture_ids = set()
548
-
549
- for capture_id in capture_id_to_species:
550
-
551
- species_this_capture_id = capture_id_to_species[capture_id]
552
-
553
- for species in species_this_capture_id:
554
- if species == human_species_id:
555
- gt_human_capture_ids.add(capture_id)
556
- elif species == vehicle_species_id:
557
- gt_vehicle_capture_ids.add(capture_id)
558
-
559
- # ...for each capture ID
560
-
561
- gt_human_images = []
562
- gt_vehicle_images = []
563
-
564
- for capture_id in gt_human_capture_ids:
565
- images_this_capture_id = capture_id_to_images[capture_id]
566
- gt_human_images.extend(images_this_capture_id)
567
- for capture_id in gt_vehicle_capture_ids:
568
- images_this_capture_id = capture_id_to_images[capture_id]
569
- gt_vehicle_images.extend(images_this_capture_id)
570
-
571
- print('Ground truth includes {} human images ({} captures), {} vehicle images ({} captures)'.format(
572
- len(gt_human_images),len(gt_human_capture_ids),
573
- len(gt_vehicle_images),len(gt_vehicle_capture_ids)))
574
-
575
- ground_truth_human_or_vehicle_images = \
576
- set(gt_human_images).union(set(gt_vehicle_images))
577
-
578
- # next(iter(ground_truth_human_or_vehicle_images))
579
-
580
-
581
- #%% Find mismatches
582
-
583
- gt_missing_human_images = []
584
- gt_missing_vehicle_images = []
585
-
586
- for fn in md_human_images:
587
- if fn not in ground_truth_human_or_vehicle_images:
588
- gt_missing_human_images.append(fn)
589
-
590
- for fn in md_vehicle_images:
591
- if fn not in ground_truth_human_or_vehicle_images:
592
- gt_missing_vehicle_images.append(fn)
593
-
594
- print('Of {} images where MD found a human, {} are not in the ground truth'.format(
595
- len(md_human_images),len(gt_missing_human_images)))
596
-
597
- print('Of {} images where MD found a vehicle, {} are not in the ground truth'.format(
598
- len(md_vehicle_images),len(gt_missing_vehicle_images)))
599
-
600
-
601
- #%% Sample mismatches
602
-
603
- random.seed(0)
604
- n_to_sample = 1000
605
- sampled_human_mismatches = random.sample(gt_missing_human_images,n_to_sample)
606
- sampled_vehicle_mismatches = random.sample(gt_missing_vehicle_images,n_to_sample)
607
-
608
- human_mismatch_output_folder = os.path.join(output_base,'mismatches_human')
609
- vehicle_mismatch_output_folder = os.path.join(output_base,'mismatches_vehicle')
610
- os.makedirs(human_mismatch_output_folder,exist_ok=True)
611
- os.makedirs(vehicle_mismatch_output_folder,exist_ok=True)
612
-
613
- def copy_sampled_images(sampled_images,sampled_images_output_folder):
614
-
615
- for fn in tqdm(sampled_images):
616
- if fn not in all_files_relative_set:
617
- print('Warning: missing file {}'.format(fn))
618
- continue
619
- source_image = os.path.join(input_base,fn)
620
- target_image = os.path.join(sampled_images_output_folder,os.path.basename(fn))
621
- shutil.copyfile(source_image,target_image)
622
-
623
- copy_sampled_images(sampled_human_mismatches,human_mismatch_output_folder)
624
- copy_sampled_images(sampled_vehicle_mismatches,vehicle_mismatch_output_folder)
625
-
626
-
627
- #%% See what's up with some of the mismatches
628
-
629
- filename_base_to_filename = {}
630
-
631
- from md_utils.path_utils import is_image_file
632
-
633
- # fn = all_files_relative[0]
634
- for fn in tqdm(all_files_relative):
635
-
636
- if not is_image_file(fn):
637
- continue
638
- if 'Indiv_Recognition' in fn:
639
- continue
640
- bn = os.path.basename(fn)
641
- assert bn not in filename_base_to_filename
642
- filename_base_to_filename[bn] = fn
643
-
644
-
645
- if False:
646
-
647
- bn = 'TSW_S2_KA02_R3_IMAG0002.JPG'
648
- fn = filename_base_to_filename[bn]
649
- capture_id = image_to_capture_id[fn]
650
- species = capture_id_to_species[capture_id]
651
-
652
-
653
- #%% Look at the distribution of labels for the mismatched images
654
-
655
- gt_missing_images = set(gt_missing_human_images).union(set(gt_missing_vehicle_images))
656
-
657
- missing_image_species_to_count = defaultdict(int)
658
-
659
- for fn in gt_missing_images:
660
- if fn not in image_to_capture_id:
661
- continue
662
- capture_id = image_to_capture_id[fn]
663
- species = capture_id_to_species[capture_id]
664
- for s in species:
665
- missing_image_species_to_count[s] += 1