megadetector 5.0.8__py3-none-any.whl → 5.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -1
- api/batch_processing/api_core/server_job_status_table.py +0 -1
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -1
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -1
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +65 -65
- api/batch_processing/data_preparation/manage_video_batch.py +8 -8
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -1
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -1
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +12 -12
- api/batch_processing/postprocessing/categorize_detections_by_size.py +32 -14
- api/batch_processing/postprocessing/combine_api_outputs.py +68 -54
- api/batch_processing/postprocessing/compare_batch_results.py +113 -43
- api/batch_processing/postprocessing/convert_output_format.py +41 -16
- api/batch_processing/postprocessing/load_api_results.py +16 -17
- api/batch_processing/postprocessing/md_to_coco.py +31 -21
- api/batch_processing/postprocessing/md_to_labelme.py +52 -22
- api/batch_processing/postprocessing/merge_detections.py +14 -14
- api/batch_processing/postprocessing/postprocess_batch_results.py +246 -174
- api/batch_processing/postprocessing/remap_detection_categories.py +32 -25
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +60 -27
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +53 -44
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +25 -14
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +242 -158
- api/batch_processing/postprocessing/separate_detections_into_folders.py +159 -114
- api/batch_processing/postprocessing/subset_json_detector_output.py +146 -169
- api/batch_processing/postprocessing/top_folders_to_bottom.py +77 -43
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -2
- api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -268
- api/synchronous/api_core/animal_detection_api/config.py +35 -35
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +109 -109
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +21 -24
- classification/analyze_failed_images.py +11 -13
- classification/cache_batchapi_outputs.py +51 -51
- classification/create_classification_dataset.py +69 -68
- classification/crop_detections.py +54 -53
- classification/csv_to_json.py +97 -100
- classification/detect_and_crop.py +105 -105
- classification/evaluate_model.py +43 -42
- classification/identify_mislabeled_candidates.py +47 -46
- classification/json_to_azcopy_list.py +10 -10
- classification/json_validator.py +72 -71
- classification/map_classification_categories.py +44 -43
- classification/merge_classification_detection_output.py +68 -68
- classification/prepare_classification_script.py +157 -154
- classification/prepare_classification_script_mc.py +228 -228
- classification/run_classifier.py +27 -26
- classification/save_mislabeled.py +30 -30
- classification/train_classifier.py +20 -20
- classification/train_classifier_tf.py +21 -22
- classification/train_utils.py +10 -10
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +18 -31
- data_management/camtrap_dp_to_coco.py +238 -0
- data_management/cct_json_utils.py +102 -59
- data_management/cct_to_md.py +176 -158
- data_management/cct_to_wi.py +247 -219
- data_management/coco_to_labelme.py +272 -263
- data_management/coco_to_yolo.py +79 -58
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +20 -16
- data_management/databases/combine_coco_camera_traps_files.py +35 -31
- data_management/databases/integrity_check_json_db.py +62 -24
- data_management/databases/subset_json_db.py +24 -15
- data_management/generate_crops_from_cct.py +27 -45
- data_management/get_image_sizes.py +188 -162
- data_management/importers/add_nacti_sizes.py +8 -8
- data_management/importers/add_timestamps_to_icct.py +78 -78
- data_management/importers/animl_results_to_md_results.py +158 -158
- data_management/importers/auckland_doc_test_to_json.py +9 -9
- data_management/importers/auckland_doc_to_json.py +8 -8
- data_management/importers/awc_to_json.py +7 -7
- data_management/importers/bellevue_to_json.py +15 -15
- data_management/importers/cacophony-thermal-importer.py +13 -13
- data_management/importers/carrizo_shrubfree_2018.py +8 -8
- data_management/importers/carrizo_trail_cam_2017.py +8 -8
- data_management/importers/cct_field_adjustments.py +9 -9
- data_management/importers/channel_islands_to_cct.py +10 -10
- data_management/importers/eMammal/copy_and_unzip_emammal.py +1 -0
- data_management/importers/ena24_to_json.py +7 -7
- data_management/importers/filenames_to_json.py +8 -8
- data_management/importers/helena_to_cct.py +7 -7
- data_management/importers/idaho-camera-traps.py +7 -7
- data_management/importers/idfg_iwildcam_lila_prep.py +10 -10
- data_management/importers/jb_csv_to_json.py +9 -9
- data_management/importers/mcgill_to_json.py +8 -8
- data_management/importers/missouri_to_json.py +18 -18
- data_management/importers/nacti_fieldname_adjustments.py +10 -10
- data_management/importers/noaa_seals_2019.py +7 -7
- data_management/importers/pc_to_json.py +7 -7
- data_management/importers/plot_wni_giraffes.py +7 -7
- data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -359
- data_management/importers/prepare_zsl_imerit.py +7 -7
- data_management/importers/rspb_to_json.py +8 -8
- data_management/importers/save_the_elephants_survey_A.py +8 -8
- data_management/importers/save_the_elephants_survey_B.py +9 -9
- data_management/importers/snapshot_safari_importer.py +26 -26
- data_management/importers/snapshot_safari_importer_reprise.py +665 -665
- data_management/importers/snapshot_serengeti_lila.py +14 -14
- data_management/importers/sulross_get_exif.py +8 -9
- data_management/importers/timelapse_csv_set_to_json.py +11 -11
- data_management/importers/ubc_to_json.py +13 -13
- data_management/importers/umn_to_json.py +7 -7
- data_management/importers/wellington_to_json.py +8 -8
- data_management/importers/wi_to_json.py +9 -9
- data_management/importers/zamba_results_to_md_results.py +181 -181
- data_management/labelme_to_coco.py +65 -24
- data_management/labelme_to_yolo.py +8 -8
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +9 -9
- data_management/lila/add_locations_to_nacti.py +147 -147
- data_management/lila/create_lila_blank_set.py +13 -13
- data_management/lila/create_lila_test_set.py +8 -8
- data_management/lila/create_links_to_md_results_files.py +106 -106
- data_management/lila/download_lila_subset.py +44 -110
- data_management/lila/generate_lila_per_image_labels.py +55 -42
- data_management/lila/get_lila_annotation_counts.py +18 -15
- data_management/lila/get_lila_image_counts.py +11 -11
- data_management/lila/lila_common.py +96 -33
- data_management/lila/test_lila_metadata_urls.py +132 -116
- data_management/ocr_tools.py +173 -128
- data_management/read_exif.py +110 -97
- data_management/remap_coco_categories.py +83 -83
- data_management/remove_exif.py +58 -62
- data_management/resize_coco_dataset.py +30 -23
- data_management/wi_download_csv_to_coco.py +246 -239
- data_management/yolo_output_to_md_output.py +86 -73
- data_management/yolo_to_coco.py +300 -60
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/process_video.py +85 -33
- detection/pytorch_detector.py +43 -25
- detection/run_detector.py +157 -72
- detection/run_detector_batch.py +179 -113
- detection/run_inference_with_yolov5_val.py +108 -48
- detection/run_tiled_inference.py +111 -40
- detection/tf_detector.py +51 -29
- detection/video_utils.py +606 -521
- docs/source/conf.py +43 -0
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +9 -9
- md_utils/ct_utils.py +228 -68
- md_utils/directory_listing.py +59 -64
- md_utils/md_tests.py +968 -871
- md_utils/path_utils.py +460 -134
- md_utils/process_utils.py +157 -133
- md_utils/sas_blob_utils.py +20 -20
- md_utils/split_locations_into_train_val.py +45 -32
- md_utils/string_utils.py +33 -10
- md_utils/url_utils.py +176 -60
- md_utils/write_html_image_list.py +40 -33
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +102 -109
- md_visualization/render_images_with_thumbnails.py +34 -34
- md_visualization/visualization_utils.py +597 -291
- md_visualization/visualize_db.py +76 -48
- md_visualization/visualize_detector_output.py +61 -42
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/METADATA +13 -7
- megadetector-5.0.10.dist-info/RECORD +224 -0
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/top_level.txt +1 -0
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +342 -335
- taxonomy_mapping/map_new_lila_datasets.py +154 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -134
- taxonomy_mapping/preview_lila_taxonomy.py +591 -591
- taxonomy_mapping/retrieve_sample_image.py +12 -12
- taxonomy_mapping/simple_image_download.py +11 -11
- taxonomy_mapping/species_lookup.py +10 -10
- taxonomy_mapping/taxonomy_csv_checker.py +18 -18
- taxonomy_mapping/taxonomy_graph.py +47 -47
- taxonomy_mapping/validate_lila_category_mappings.py +83 -76
- data_management/cct_json_to_filename_json.py +0 -89
- data_management/cct_to_csv.py +0 -140
- data_management/databases/remove_corrupted_images_from_db.py +0 -191
- detection/detector_training/copy_checkpoints.py +0 -43
- megadetector-5.0.8.dist-info/RECORD +0 -205
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/LICENSE +0 -0
- {megadetector-5.0.8.dist-info → megadetector-5.0.10.dist-info}/WHEEL +0 -0
|
@@ -1,665 +1,665 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
#%% Constants and imports
|
|
13
|
-
|
|
14
|
-
import os
|
|
15
|
-
import glob
|
|
16
|
-
import json
|
|
17
|
-
import shutil
|
|
18
|
-
import random
|
|
19
|
-
|
|
20
|
-
import pandas as pd
|
|
21
|
-
|
|
22
|
-
from tqdm import tqdm
|
|
23
|
-
from collections import defaultdict
|
|
24
|
-
|
|
25
|
-
from md_utils import path_utils
|
|
26
|
-
|
|
27
|
-
input_base = '/media/user/Elements'
|
|
28
|
-
output_base = os.path.expanduser('~/data/snapshot-safari-metadata')
|
|
29
|
-
file_list_cache_file = os.path.join(output_base,'file_list.json')
|
|
30
|
-
|
|
31
|
-
assert os.path.isdir(input_base)
|
|
32
|
-
os.makedirs(output_base,exist_ok=True)
|
|
33
|
-
|
|
34
|
-
# We're going to copy all the .csv files to a faster location
|
|
35
|
-
annotation_cache_dir = os.path.join(output_base,'csv_files')
|
|
36
|
-
os.makedirs(annotation_cache_dir,exist_ok=True)
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
#%% List files
|
|
40
|
-
|
|
41
|
-
# Do a one-time enumeration of the entire drive; this will take a long time,
|
|
42
|
-
# but will save a lot of hassle later.
|
|
43
|
-
|
|
44
|
-
if os.path.isfile(file_list_cache_file):
|
|
45
|
-
print('Loading file list from {}'.format(file_list_cache_file))
|
|
46
|
-
with open(file_list_cache_file,'r') as f:
|
|
47
|
-
all_files = json.load(f)
|
|
48
|
-
else:
|
|
49
|
-
all_files = glob.glob(os.path.join(input_base,'**','*.*'),recursive=True)
|
|
50
|
-
all_files = [fn for fn in all_files if '$RECYCLE.BIN' not in fn]
|
|
51
|
-
all_files = [fn for fn in all_files if 'System Volume Information' not in fn]
|
|
52
|
-
print('Enumerated {} files'.format(len(all_files)))
|
|
53
|
-
with open(file_list_cache_file,'w') as f:
|
|
54
|
-
json.dump(all_files,f,indent=1)
|
|
55
|
-
print('Wrote file list to {}'.format(file_list_cache_file))
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
#%% Create derived lists
|
|
59
|
-
|
|
60
|
-
# Takes about 60 seconds
|
|
61
|
-
|
|
62
|
-
all_files_relative = [os.path.relpath(fn,input_base) for fn in all_files]
|
|
63
|
-
all_files_relative = [fn.replace('\\','/') for fn in all_files_relative]
|
|
64
|
-
all_files_relative_set = set(all_files_relative)
|
|
65
|
-
|
|
66
|
-
# CSV files are one of:
|
|
67
|
-
#
|
|
68
|
-
# _report_lila.csv (species/count/etc. for each capture)
|
|
69
|
-
# _report_lila_image_inventory.csv (maps captures to images)
|
|
70
|
-
# _report_lila_overview.csv (distribution of species)
|
|
71
|
-
csv_files = [fn for fn in all_files_relative if fn.endswith('.csv')]
|
|
72
|
-
|
|
73
|
-
all_image_files = path_utils.find_image_strings(all_files_relative)
|
|
74
|
-
|
|
75
|
-
print('Found a total of {} files, {} of which are images'.format(
|
|
76
|
-
len(all_files_relative),len(all_image_files)))
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
#%% Copy all csv files to the annotation cache folder
|
|
80
|
-
|
|
81
|
-
# fn = csv_files[0]
|
|
82
|
-
for fn in csv_files:
|
|
83
|
-
target_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
84
|
-
source_file = os.path.join(input_base,fn)
|
|
85
|
-
shutil.copyfile(source_file,target_file)
|
|
86
|
-
|
|
87
|
-
def read_cached_csv_file(fn):
|
|
88
|
-
"""
|
|
89
|
-
Later cells will ask to read a .csv file from the original hard drive;
|
|
90
|
-
read from the annotation cache instead.
|
|
91
|
-
"""
|
|
92
|
-
|
|
93
|
-
cached_csv_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
94
|
-
df = pd.read_csv(cached_csv_file)
|
|
95
|
-
return df
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
#%% List project folders
|
|
99
|
-
|
|
100
|
-
# There are two formats for project folder names:
|
|
101
|
-
#
|
|
102
|
-
# APN
|
|
103
|
-
# Snapshot Cameo/DEB
|
|
104
|
-
project_code_to_project_folder = {}
|
|
105
|
-
|
|
106
|
-
folders = os.listdir(input_base)
|
|
107
|
-
folders = [fn for fn in folders if (not fn.startswith('$') and \
|
|
108
|
-
not 'System Volume' in fn)]
|
|
109
|
-
|
|
110
|
-
for fn in folders:
|
|
111
|
-
if len(fn) == 3:
|
|
112
|
-
assert fn not in project_code_to_project_folder
|
|
113
|
-
project_code_to_project_folder[fn] = fn
|
|
114
|
-
else:
|
|
115
|
-
assert 'Snapshot' in fn
|
|
116
|
-
subfolders = os.listdir('/'.join([input_base,fn]))
|
|
117
|
-
for subfn in subfolders:
|
|
118
|
-
assert len(subfn) == 3
|
|
119
|
-
assert subfn not in project_code_to_project_folder
|
|
120
|
-
project_code_to_project_folder[subfn] = '/'.join([fn,subfn])
|
|
121
|
-
|
|
122
|
-
project_folder_to_project_code = {v: k for k, v in project_code_to_project_folder.items()}
|
|
123
|
-
project_codes = sorted(list(project_code_to_project_folder.keys()))
|
|
124
|
-
project_folders = sorted(list(project_code_to_project_folder.values()))
|
|
125
|
-
|
|
126
|
-
def file_to_project_folder(fn):
|
|
127
|
-
"""
|
|
128
|
-
For a given filename relative to the drive root, return the corresponding
|
|
129
|
-
project folder (also relative to the drive root).
|
|
130
|
-
"""
|
|
131
|
-
|
|
132
|
-
tokens = fn.split('/')
|
|
133
|
-
if len(tokens[0]) == 3:
|
|
134
|
-
project_folder = tokens[0]
|
|
135
|
-
else:
|
|
136
|
-
assert 'Snapshot' in tokens[0]
|
|
137
|
-
project_folder = '/'.join(tokens[0:2])
|
|
138
|
-
assert project_folder in project_folders
|
|
139
|
-
return project_folder
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
def file_to_project_code(fn):
|
|
143
|
-
"""
|
|
144
|
-
For a given filename relative to the drive root, return the corresponding
|
|
145
|
-
three-letter project code (e.g. "CDB").
|
|
146
|
-
"""
|
|
147
|
-
|
|
148
|
-
return project_folder_to_project_code[file_to_project_folder(fn)]
|
|
149
|
-
|
|
150
|
-
assert file_to_project_folder(
|
|
151
|
-
'APN/APN_S2/DW/DW_R5/APN_S2_DW_R5_IMAG0003.JPG') == 'APN'
|
|
152
|
-
assert file_to_project_folder(
|
|
153
|
-
'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
|
|
154
|
-
'Snapshot South Africa/BLO'
|
|
155
|
-
assert file_to_project_code(
|
|
156
|
-
'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
|
|
157
|
-
'BLO'
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
#%% Map report and inventory files to codes
|
|
161
|
-
|
|
162
|
-
# Maps a three-letter project code to a list of per-season _report_lila.csv files
|
|
163
|
-
#
|
|
164
|
-
# E.g.:
|
|
165
|
-
#
|
|
166
|
-
# 'DHP': ['Snapshot South Africa/DHP/LILA_Reports/DHP_S1_report_lila.csv',
|
|
167
|
-
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S2_report_lila.csv',
|
|
168
|
-
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S3_report_lila.csv']
|
|
169
|
-
#
|
|
170
|
-
project_code_to_report_files = defaultdict(list)
|
|
171
|
-
|
|
172
|
-
# fn = csv_files[0]
|
|
173
|
-
for fn in csv_files:
|
|
174
|
-
if 'report_lila.csv' not in fn:
|
|
175
|
-
continue
|
|
176
|
-
project_code = project_folder_to_project_code[file_to_project_folder(fn)]
|
|
177
|
-
project_code_to_report_files[project_code].append(fn)
|
|
178
|
-
|
|
179
|
-
project_codes_with_no_reports = set()
|
|
180
|
-
|
|
181
|
-
for project_code in project_code_to_project_folder.keys():
|
|
182
|
-
if project_code not in project_code_to_report_files:
|
|
183
|
-
project_codes_with_no_reports.add(project_code)
|
|
184
|
-
print('Warning: no report files available for {}'.format(project_code))
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
#%% Make sure that every report has a corresponding inventory file
|
|
188
|
-
|
|
189
|
-
all_report_files = [item for sublist in project_code_to_report_files.values() \
|
|
190
|
-
for item in sublist]
|
|
191
|
-
|
|
192
|
-
for fn in all_report_files:
|
|
193
|
-
inventory_file = fn.replace('.csv','_image_inventory.csv')
|
|
194
|
-
assert inventory_file in csv_files
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
#%% Count species based on overview and report files
|
|
198
|
-
|
|
199
|
-
# The overview and report files should produce the same counts; we'll verify this
|
|
200
|
-
# in the next cell.
|
|
201
|
-
|
|
202
|
-
species_to_count_overview = defaultdict(int)
|
|
203
|
-
species_to_count_report = defaultdict(int)
|
|
204
|
-
|
|
205
|
-
for report_file in all_report_files:
|
|
206
|
-
|
|
207
|
-
overview_file = report_file.replace('.csv','_overview.csv')
|
|
208
|
-
|
|
209
|
-
df = read_cached_csv_file(overview_file)
|
|
210
|
-
|
|
211
|
-
for i_row,row in df.iterrows():
|
|
212
|
-
|
|
213
|
-
if row['question'] == 'question__species':
|
|
214
|
-
|
|
215
|
-
assert isinstance(row['answer'],str)
|
|
216
|
-
assert isinstance(row['count'],int)
|
|
217
|
-
species = row['answer']
|
|
218
|
-
|
|
219
|
-
if len(species) < 3:
|
|
220
|
-
assert species == '0' or species == '1'
|
|
221
|
-
|
|
222
|
-
species_to_count_overview[species] += row['count']
|
|
223
|
-
|
|
224
|
-
# ...for each capture in the overview file
|
|
225
|
-
|
|
226
|
-
df = read_cached_csv_file(report_file)
|
|
227
|
-
|
|
228
|
-
for i_row,row in df.iterrows():
|
|
229
|
-
|
|
230
|
-
species = row['question__species']
|
|
231
|
-
assert isinstance(species,str)
|
|
232
|
-
|
|
233
|
-
# Ignore results from the blank/non-blank workflow
|
|
234
|
-
if len(species) < 3:
|
|
235
|
-
assert species == '0' or species == '1'
|
|
236
|
-
species_to_count_report[species] += 1
|
|
237
|
-
|
|
238
|
-
# ...for each capture in the report file
|
|
239
|
-
|
|
240
|
-
# ...for each report file
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
#%% Print counts
|
|
244
|
-
|
|
245
|
-
species_to_count_overview_sorted = \
|
|
246
|
-
{k: v for k, v in sorted(species_to_count_overview.items(),
|
|
247
|
-
key=lambda item: item[1], reverse=True)}
|
|
248
|
-
species_to_count_report_sorted = \
|
|
249
|
-
{k: v for k, v in sorted(species_to_count_report.items(),
|
|
250
|
-
key=lambda item: item[1], reverse=True)}
|
|
251
|
-
|
|
252
|
-
string_count = 0
|
|
253
|
-
non_blank_count = 0
|
|
254
|
-
|
|
255
|
-
for species in species_to_count_overview_sorted.keys():
|
|
256
|
-
|
|
257
|
-
# The overview and report files should produce the same counts
|
|
258
|
-
assert species_to_count_overview_sorted[species] == \
|
|
259
|
-
species_to_count_report[species]
|
|
260
|
-
count = species_to_count_overview_sorted[species]
|
|
261
|
-
if species not in ('0','1'):
|
|
262
|
-
string_count += count
|
|
263
|
-
if species != 'blank':
|
|
264
|
-
non_blank_count += count
|
|
265
|
-
|
|
266
|
-
print('{}{}'.format(species.ljust(25),count))
|
|
267
|
-
|
|
268
|
-
n_images = len(all_files)
|
|
269
|
-
n_sequences = sum(species_to_count_overview_sorted.values())
|
|
270
|
-
|
|
271
|
-
print('\n{} total images\n{} total sequences'.format(n_images,n_sequences))
|
|
272
|
-
|
|
273
|
-
print('\nString count: {}'.format(string_count))
|
|
274
|
-
print('Non-blank count: {}'.format(non_blank_count))
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
#%% Make sure that capture IDs in the reports/inventory files match
|
|
278
|
-
|
|
279
|
-
# ...and confirm that (almost) all the images in the inventory tables are
|
|
280
|
-
# present on disk.
|
|
281
|
-
|
|
282
|
-
all_relative_paths_in_inventory = set()
|
|
283
|
-
files_missing_on_disk = []
|
|
284
|
-
|
|
285
|
-
for report_file in all_report_files:
|
|
286
|
-
|
|
287
|
-
project_base = file_to_project_folder(report_file)
|
|
288
|
-
inventory_file = report_file.replace('.csv','_image_inventory.csv')
|
|
289
|
-
|
|
290
|
-
inventory_df = read_cached_csv_file(inventory_file)
|
|
291
|
-
report_df = read_cached_csv_file(report_file)
|
|
292
|
-
|
|
293
|
-
capture_ids_in_report = set()
|
|
294
|
-
for i_row,row in report_df.iterrows():
|
|
295
|
-
capture_ids_in_report.add(row['capture_id'])
|
|
296
|
-
|
|
297
|
-
capture_ids_in_inventory = set()
|
|
298
|
-
for i_row,row in inventory_df.iterrows():
|
|
299
|
-
|
|
300
|
-
capture_ids_in_inventory.add(row['capture_id'])
|
|
301
|
-
image_path_relative = project_base + '/' + row['image_path_rel']
|
|
302
|
-
|
|
303
|
-
# assert image_path_relative in all_files_relative_set
|
|
304
|
-
if image_path_relative not in all_files_relative_set:
|
|
305
|
-
|
|
306
|
-
# Make sure this isn't just a case issue
|
|
307
|
-
assert image_path_relative.replace('.JPG','.jpg') \
|
|
308
|
-
not in all_files_relative_set
|
|
309
|
-
assert image_path_relative.replace('.jpg','.JPG') \
|
|
310
|
-
not in all_files_relative_set
|
|
311
|
-
files_missing_on_disk.append(image_path_relative)
|
|
312
|
-
|
|
313
|
-
assert image_path_relative not in all_relative_paths_in_inventory
|
|
314
|
-
all_relative_paths_in_inventory.add(image_path_relative)
|
|
315
|
-
|
|
316
|
-
# Make sure the set of capture IDs appearing in this report is
|
|
317
|
-
# the same as the set of capture IDs appearing in the corresponding
|
|
318
|
-
# inventory file.
|
|
319
|
-
assert capture_ids_in_report == capture_ids_in_inventory
|
|
320
|
-
|
|
321
|
-
# ...for each report file
|
|
322
|
-
|
|
323
|
-
print('\n{} missing files (of {})'.format(
|
|
324
|
-
len(files_missing_on_disk),len(all_relative_paths_in_inventory)))
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
#%% For all the files we have on disk, see which are and aren't in the inventory files
|
|
328
|
-
|
|
329
|
-
# There aren't any capital-P .PNG files, but if I don't include .PNG
|
|
330
|
-
# in this list, I'll look at this in a year and wonder whether I forgot
|
|
331
|
-
# to include it.
|
|
332
|
-
image_extensions = set(['.JPG','.jpg','.PNG','.png'])
|
|
333
|
-
|
|
334
|
-
images_not_in_inventory = []
|
|
335
|
-
n_images_in_inventoried_projects = 0
|
|
336
|
-
|
|
337
|
-
# fn = all_files_relative[0]
|
|
338
|
-
for fn in tqdm(all_files_relative):
|
|
339
|
-
|
|
340
|
-
if os.path.splitext(fn)[1] not in image_extensions:
|
|
341
|
-
continue
|
|
342
|
-
project_code = file_to_project_code(fn)
|
|
343
|
-
if project_code in project_codes_with_no_reports:
|
|
344
|
-
# print('Skipping project {}'.format(project_code))
|
|
345
|
-
continue
|
|
346
|
-
n_images_in_inventoried_projects += 1
|
|
347
|
-
if fn not in all_relative_paths_in_inventory:
|
|
348
|
-
images_not_in_inventory.append(fn)
|
|
349
|
-
|
|
350
|
-
print('\n{} images on disk are not in inventory (of {} in eligible projects)'.format(
|
|
351
|
-
len(images_not_in_inventory),n_images_in_inventoried_projects))
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
#%% Map captures to images, and vice-versa
|
|
355
|
-
|
|
356
|
-
capture_id_to_images = defaultdict(list)
|
|
357
|
-
image_to_capture_id = {}
|
|
358
|
-
|
|
359
|
-
# report_file = all_report_files[0]
|
|
360
|
-
for report_file in tqdm(all_report_files):
|
|
361
|
-
|
|
362
|
-
inventory_file = report_file.replace('.csv','_image_inventory.csv')
|
|
363
|
-
inventory_df = read_cached_csv_file(inventory_file)
|
|
364
|
-
|
|
365
|
-
project_folder = file_to_project_folder(inventory_file)
|
|
366
|
-
|
|
367
|
-
# row = inventory_df.iloc[0]
|
|
368
|
-
for i_row,row in inventory_df.iterrows():
|
|
369
|
-
|
|
370
|
-
capture_id = row['capture_id']
|
|
371
|
-
image_file_relative = os.path.join(project_folder,row['image_path_rel'])
|
|
372
|
-
capture_id_to_images[capture_id].append(image_file_relative)
|
|
373
|
-
assert image_file_relative not in image_to_capture_id
|
|
374
|
-
image_to_capture_id[image_file_relative] = capture_id
|
|
375
|
-
|
|
376
|
-
# ...for each row (one image per row)
|
|
377
|
-
|
|
378
|
-
# ...for each report file
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
#%% Map captures to species (just species for now, we'll go back and get other metadata later)
|
|
382
|
-
|
|
383
|
-
capture_id_to_species = defaultdict(list)
|
|
384
|
-
|
|
385
|
-
for project_code in tqdm(project_codes):
|
|
386
|
-
|
|
387
|
-
report_files = project_code_to_report_files[project_code]
|
|
388
|
-
|
|
389
|
-
for report_file in report_files:
|
|
390
|
-
|
|
391
|
-
report_df = read_cached_csv_file(report_file)
|
|
392
|
-
|
|
393
|
-
for i_row,row in report_df.iterrows():
|
|
394
|
-
|
|
395
|
-
capture_id = row['capture_id']
|
|
396
|
-
species = row['question__species']
|
|
397
|
-
capture_id_to_species[capture_id].append(species)
|
|
398
|
-
|
|
399
|
-
# ...for each row
|
|
400
|
-
|
|
401
|
-
# ...for each report file in this project
|
|
402
|
-
|
|
403
|
-
# ...for each project
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
#%% Take a look at the annotations "0" and "1"
|
|
407
|
-
|
|
408
|
-
captures_0 = []
|
|
409
|
-
captures_1 = []
|
|
410
|
-
captures_1_alone = []
|
|
411
|
-
captures_1_with_species = []
|
|
412
|
-
|
|
413
|
-
for capture_id in tqdm(capture_id_to_species):
|
|
414
|
-
|
|
415
|
-
species_this_capture_id = capture_id_to_species[capture_id]
|
|
416
|
-
|
|
417
|
-
# Multiple rows may be present for a capture, but they should be unique
|
|
418
|
-
assert len(species_this_capture_id) == len(set(species_this_capture_id))
|
|
419
|
-
|
|
420
|
-
if '0' in species_this_capture_id:
|
|
421
|
-
captures_0.append(capture_id)
|
|
422
|
-
# '0' should always appear alone
|
|
423
|
-
assert len(species_this_capture_id) == 1
|
|
424
|
-
|
|
425
|
-
if '1' in species_this_capture_id:
|
|
426
|
-
captures_1.append(capture_id)
|
|
427
|
-
assert '0' not in species_this_capture_id
|
|
428
|
-
# '1' should never appear alone
|
|
429
|
-
# assert len(species_this_capture_id) > 1
|
|
430
|
-
if len(species_this_capture_id) == 1:
|
|
431
|
-
captures_1_alone.append(capture_id)
|
|
432
|
-
else:
|
|
433
|
-
captures_1_with_species.append(capture_id)
|
|
434
|
-
|
|
435
|
-
# ...for each capture ID
|
|
436
|
-
|
|
437
|
-
print('')
|
|
438
|
-
print('Number of captures with "0" as the species (always appears alone): {}'.format(len(captures_0)))
|
|
439
|
-
print('Number of captures with "1" as the species: {}'.format(len(captures_1)))
|
|
440
|
-
print('Number of captures with "1" as the species, with no other species: {}'.format(
|
|
441
|
-
len(captures_1_alone)))
|
|
442
|
-
print('Number of captures with "1" as the species, with other species: {}'.format(
|
|
443
|
-
len(captures_1_with_species)))
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
#%% Sample some of those captures with mysterious "0" and "1" annotations
|
|
447
|
-
|
|
448
|
-
random.seed(0)
|
|
449
|
-
n_to_sample = 500
|
|
450
|
-
captures_0_samples = random.sample(captures_0,n_to_sample)
|
|
451
|
-
captures_1_samples = random.sample(captures_1,n_to_sample)
|
|
452
|
-
|
|
453
|
-
capture_0_sample_output_folder = os.path.join(output_base,'capture_0_samples')
|
|
454
|
-
capture_1_sample_output_folder = os.path.join(output_base,'capture_1_samples')
|
|
455
|
-
os.makedirs(capture_0_sample_output_folder,exist_ok=True)
|
|
456
|
-
os.makedirs(capture_1_sample_output_folder,exist_ok=True)
|
|
457
|
-
|
|
458
|
-
def copy_sampled_captures(sampled_captures,sample_capture_output_folder):
|
|
459
|
-
|
|
460
|
-
for capture_id in tqdm(sampled_captures):
|
|
461
|
-
images_this_capture = capture_id_to_images[capture_id]
|
|
462
|
-
for fn in images_this_capture:
|
|
463
|
-
# assert fn in all_files_relative_set
|
|
464
|
-
if fn not in all_files_relative_set:
|
|
465
|
-
print('Warning: missing file {}'.format(fn))
|
|
466
|
-
continue
|
|
467
|
-
source_image = os.path.join(input_base,fn)
|
|
468
|
-
target_image = os.path.join(sample_capture_output_folder,os.path.basename(fn))
|
|
469
|
-
shutil.copyfile(source_image,target_image)
|
|
470
|
-
# ....for each image
|
|
471
|
-
# ...for each capture
|
|
472
|
-
|
|
473
|
-
copy_sampled_captures(captures_0_samples,capture_0_sample_output_folder)
|
|
474
|
-
copy_sampled_captures(captures_1_samples,capture_1_sample_output_folder)
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
#%% Find images that MD thinks contain people
|
|
478
|
-
|
|
479
|
-
md_results_folder = os.path.expanduser(
|
|
480
|
-
'~/postprocessing/snapshot-safari/snapshot-safari-2023-04-21-v5a.0.0/json_subsets')
|
|
481
|
-
md_results_files = os.listdir(md_results_folder)
|
|
482
|
-
|
|
483
|
-
md_human_detection_threshold = 0.2
|
|
484
|
-
md_vehicle_detection_threshold = 0.2
|
|
485
|
-
|
|
486
|
-
# We'll make sure this is actually correct for all the files we load
|
|
487
|
-
md_human_category = '2'
|
|
488
|
-
md_vehicle_category = '3'
|
|
489
|
-
|
|
490
|
-
md_human_images = set()
|
|
491
|
-
md_vehicle_images = set()
|
|
492
|
-
|
|
493
|
-
# project_code = project_codes[0]
|
|
494
|
-
for project_code in project_codes:
|
|
495
|
-
|
|
496
|
-
print('Finding human images for {}'.format(project_code))
|
|
497
|
-
|
|
498
|
-
project_folder = project_code_to_project_folder[project_code]
|
|
499
|
-
|
|
500
|
-
md_results_file = [fn for fn in md_results_files if project_code in fn]
|
|
501
|
-
assert len(md_results_file) == 1
|
|
502
|
-
md_results_file = os.path.join(md_results_folder,md_results_file[0])
|
|
503
|
-
|
|
504
|
-
with open(md_results_file,'r') as f:
|
|
505
|
-
md_results = json.load(f)
|
|
506
|
-
assert md_results['detection_categories'][md_human_category] == 'person'
|
|
507
|
-
assert md_results['detection_categories'][md_vehicle_category] == 'vehicle'
|
|
508
|
-
|
|
509
|
-
# im = md_results['images'][0]
|
|
510
|
-
for im in tqdm(md_results['images']):
|
|
511
|
-
|
|
512
|
-
if 'detections' not in im:
|
|
513
|
-
continue
|
|
514
|
-
|
|
515
|
-
# MD results files are each relative to their own projects, we want
|
|
516
|
-
# filenames to be relative to the base of the drive
|
|
517
|
-
fn = os.path.join(project_folder,im['file'])
|
|
518
|
-
for det in im['detections']:
|
|
519
|
-
if det['category'] == md_human_category and \
|
|
520
|
-
det['conf'] >= md_human_detection_threshold:
|
|
521
|
-
md_human_images.add(fn)
|
|
522
|
-
if det['category'] == md_vehicle_category and \
|
|
523
|
-
det['conf'] >= md_vehicle_detection_threshold:
|
|
524
|
-
md_vehicle_images.add(fn)
|
|
525
|
-
|
|
526
|
-
# ...for each detection
|
|
527
|
-
|
|
528
|
-
# ...for each image
|
|
529
|
-
|
|
530
|
-
# ...for each project
|
|
531
|
-
|
|
532
|
-
print('MD found {} human images, {} vehicle images'.format(
|
|
533
|
-
len(md_human_images),len(md_vehicle_images)))
|
|
534
|
-
|
|
535
|
-
md_human_or_vehicle_images = \
|
|
536
|
-
set(md_human_images).union(set(md_vehicle_images))
|
|
537
|
-
|
|
538
|
-
# next(iter(md_human_or_vehicle_images))
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
#%% Find images where the ground truth says humans or vehicles are present
|
|
542
|
-
|
|
543
|
-
human_species_id = 'human'
|
|
544
|
-
vehicle_species_id = 'humanvehicle'
|
|
545
|
-
|
|
546
|
-
gt_human_capture_ids = set()
|
|
547
|
-
gt_vehicle_capture_ids = set()
|
|
548
|
-
|
|
549
|
-
for capture_id in capture_id_to_species:
|
|
550
|
-
|
|
551
|
-
species_this_capture_id = capture_id_to_species[capture_id]
|
|
552
|
-
|
|
553
|
-
for species in species_this_capture_id:
|
|
554
|
-
if species == human_species_id:
|
|
555
|
-
gt_human_capture_ids.add(capture_id)
|
|
556
|
-
elif species == vehicle_species_id:
|
|
557
|
-
gt_vehicle_capture_ids.add(capture_id)
|
|
558
|
-
|
|
559
|
-
# ...for each capture ID
|
|
560
|
-
|
|
561
|
-
gt_human_images = []
|
|
562
|
-
gt_vehicle_images = []
|
|
563
|
-
|
|
564
|
-
for capture_id in gt_human_capture_ids:
|
|
565
|
-
images_this_capture_id = capture_id_to_images[capture_id]
|
|
566
|
-
gt_human_images.extend(images_this_capture_id)
|
|
567
|
-
for capture_id in gt_vehicle_capture_ids:
|
|
568
|
-
images_this_capture_id = capture_id_to_images[capture_id]
|
|
569
|
-
gt_vehicle_images.extend(images_this_capture_id)
|
|
570
|
-
|
|
571
|
-
print('Ground truth includes {} human images ({} captures), {} vehicle images ({} captures)'.format(
|
|
572
|
-
len(gt_human_images),len(gt_human_capture_ids),
|
|
573
|
-
len(gt_vehicle_images),len(gt_vehicle_capture_ids)))
|
|
574
|
-
|
|
575
|
-
ground_truth_human_or_vehicle_images = \
|
|
576
|
-
set(gt_human_images).union(set(gt_vehicle_images))
|
|
577
|
-
|
|
578
|
-
# next(iter(ground_truth_human_or_vehicle_images))
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
#%% Find mismatches
|
|
582
|
-
|
|
583
|
-
gt_missing_human_images = []
|
|
584
|
-
gt_missing_vehicle_images = []
|
|
585
|
-
|
|
586
|
-
for fn in md_human_images:
|
|
587
|
-
if fn not in ground_truth_human_or_vehicle_images:
|
|
588
|
-
gt_missing_human_images.append(fn)
|
|
589
|
-
|
|
590
|
-
for fn in md_vehicle_images:
|
|
591
|
-
if fn not in ground_truth_human_or_vehicle_images:
|
|
592
|
-
gt_missing_vehicle_images.append(fn)
|
|
593
|
-
|
|
594
|
-
print('Of {} images where MD found a human, {} are not in the ground truth'.format(
|
|
595
|
-
len(md_human_images),len(gt_missing_human_images)))
|
|
596
|
-
|
|
597
|
-
print('Of {} images where MD found a vehicle, {} are not in the ground truth'.format(
|
|
598
|
-
len(md_vehicle_images),len(gt_missing_vehicle_images)))
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
#%% Sample mismatches
|
|
602
|
-
|
|
603
|
-
random.seed(0)
|
|
604
|
-
n_to_sample = 1000
|
|
605
|
-
sampled_human_mismatches = random.sample(gt_missing_human_images,n_to_sample)
|
|
606
|
-
sampled_vehicle_mismatches = random.sample(gt_missing_vehicle_images,n_to_sample)
|
|
607
|
-
|
|
608
|
-
human_mismatch_output_folder = os.path.join(output_base,'mismatches_human')
|
|
609
|
-
vehicle_mismatch_output_folder = os.path.join(output_base,'mismatches_vehicle')
|
|
610
|
-
os.makedirs(human_mismatch_output_folder,exist_ok=True)
|
|
611
|
-
os.makedirs(vehicle_mismatch_output_folder,exist_ok=True)
|
|
612
|
-
|
|
613
|
-
def copy_sampled_images(sampled_images,sampled_images_output_folder):
|
|
614
|
-
|
|
615
|
-
for fn in tqdm(sampled_images):
|
|
616
|
-
if fn not in all_files_relative_set:
|
|
617
|
-
print('Warning: missing file {}'.format(fn))
|
|
618
|
-
continue
|
|
619
|
-
source_image = os.path.join(input_base,fn)
|
|
620
|
-
target_image = os.path.join(sampled_images_output_folder,os.path.basename(fn))
|
|
621
|
-
shutil.copyfile(source_image,target_image)
|
|
622
|
-
|
|
623
|
-
copy_sampled_images(sampled_human_mismatches,human_mismatch_output_folder)
|
|
624
|
-
copy_sampled_images(sampled_vehicle_mismatches,vehicle_mismatch_output_folder)
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
#%% See what's up with some of the mismatches
|
|
628
|
-
|
|
629
|
-
filename_base_to_filename = {}
|
|
630
|
-
|
|
631
|
-
from md_utils.path_utils import is_image_file
|
|
632
|
-
|
|
633
|
-
# fn = all_files_relative[0]
|
|
634
|
-
for fn in tqdm(all_files_relative):
|
|
635
|
-
|
|
636
|
-
if not is_image_file(fn):
|
|
637
|
-
continue
|
|
638
|
-
if 'Indiv_Recognition' in fn:
|
|
639
|
-
continue
|
|
640
|
-
bn = os.path.basename(fn)
|
|
641
|
-
assert bn not in filename_base_to_filename
|
|
642
|
-
filename_base_to_filename[bn] = fn
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
if False:
|
|
646
|
-
|
|
647
|
-
bn = 'TSW_S2_KA02_R3_IMAG0002.JPG'
|
|
648
|
-
fn = filename_base_to_filename[bn]
|
|
649
|
-
capture_id = image_to_capture_id[fn]
|
|
650
|
-
species = capture_id_to_species[capture_id]
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
#%% Look at the distribution of labels for the mismatched images
|
|
654
|
-
|
|
655
|
-
gt_missing_images = set(gt_missing_human_images).union(set(gt_missing_vehicle_images))
|
|
656
|
-
|
|
657
|
-
missing_image_species_to_count = defaultdict(int)
|
|
658
|
-
|
|
659
|
-
for fn in gt_missing_images:
|
|
660
|
-
if fn not in image_to_capture_id:
|
|
661
|
-
continue
|
|
662
|
-
capture_id = image_to_capture_id[fn]
|
|
663
|
-
species = capture_id_to_species[capture_id]
|
|
664
|
-
for s in species:
|
|
665
|
-
missing_image_species_to_count[s] += 1
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
snapshot_safari_importer_reprise.py
|
|
4
|
+
|
|
5
|
+
This is a 2023 update to snapshot_safari_importer.py. We do a bunch of things now that
|
|
6
|
+
we didn't do the last time we imported Snapshot data (like updating the big taxonomy)
|
|
7
|
+
file, and we skip a bunch of things now that we used to do (like generating massive
|
|
8
|
+
zipfiles). So, new year, new importer.
|
|
9
|
+
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
#%% Constants and imports
|
|
13
|
+
|
|
14
|
+
import os
|
|
15
|
+
import glob
|
|
16
|
+
import json
|
|
17
|
+
import shutil
|
|
18
|
+
import random
|
|
19
|
+
|
|
20
|
+
import pandas as pd
|
|
21
|
+
|
|
22
|
+
from tqdm import tqdm
|
|
23
|
+
from collections import defaultdict
|
|
24
|
+
|
|
25
|
+
from md_utils import path_utils
|
|
26
|
+
|
|
27
|
+
input_base = '/media/user/Elements'
|
|
28
|
+
output_base = os.path.expanduser('~/data/snapshot-safari-metadata')
|
|
29
|
+
file_list_cache_file = os.path.join(output_base,'file_list.json')
|
|
30
|
+
|
|
31
|
+
assert os.path.isdir(input_base)
|
|
32
|
+
os.makedirs(output_base,exist_ok=True)
|
|
33
|
+
|
|
34
|
+
# We're going to copy all the .csv files to a faster location
|
|
35
|
+
annotation_cache_dir = os.path.join(output_base,'csv_files')
|
|
36
|
+
os.makedirs(annotation_cache_dir,exist_ok=True)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
#%% List files
|
|
40
|
+
|
|
41
|
+
# Do a one-time enumeration of the entire drive; this will take a long time,
|
|
42
|
+
# but will save a lot of hassle later.
|
|
43
|
+
|
|
44
|
+
if os.path.isfile(file_list_cache_file):
|
|
45
|
+
print('Loading file list from {}'.format(file_list_cache_file))
|
|
46
|
+
with open(file_list_cache_file,'r') as f:
|
|
47
|
+
all_files = json.load(f)
|
|
48
|
+
else:
|
|
49
|
+
all_files = glob.glob(os.path.join(input_base,'**','*.*'),recursive=True)
|
|
50
|
+
all_files = [fn for fn in all_files if '$RECYCLE.BIN' not in fn]
|
|
51
|
+
all_files = [fn for fn in all_files if 'System Volume Information' not in fn]
|
|
52
|
+
print('Enumerated {} files'.format(len(all_files)))
|
|
53
|
+
with open(file_list_cache_file,'w') as f:
|
|
54
|
+
json.dump(all_files,f,indent=1)
|
|
55
|
+
print('Wrote file list to {}'.format(file_list_cache_file))
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
#%% Create derived lists
|
|
59
|
+
|
|
60
|
+
# Takes about 60 seconds
|
|
61
|
+
|
|
62
|
+
all_files_relative = [os.path.relpath(fn,input_base) for fn in all_files]
|
|
63
|
+
all_files_relative = [fn.replace('\\','/') for fn in all_files_relative]
|
|
64
|
+
all_files_relative_set = set(all_files_relative)
|
|
65
|
+
|
|
66
|
+
# CSV files are one of:
|
|
67
|
+
#
|
|
68
|
+
# _report_lila.csv (species/count/etc. for each capture)
|
|
69
|
+
# _report_lila_image_inventory.csv (maps captures to images)
|
|
70
|
+
# _report_lila_overview.csv (distribution of species)
|
|
71
|
+
csv_files = [fn for fn in all_files_relative if fn.endswith('.csv')]
|
|
72
|
+
|
|
73
|
+
all_image_files = path_utils.find_image_strings(all_files_relative)
|
|
74
|
+
|
|
75
|
+
print('Found a total of {} files, {} of which are images'.format(
|
|
76
|
+
len(all_files_relative),len(all_image_files)))
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
#%% Copy all csv files to the annotation cache folder
|
|
80
|
+
|
|
81
|
+
# fn = csv_files[0]
|
|
82
|
+
for fn in csv_files:
|
|
83
|
+
target_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
84
|
+
source_file = os.path.join(input_base,fn)
|
|
85
|
+
shutil.copyfile(source_file,target_file)
|
|
86
|
+
|
|
87
|
+
def read_cached_csv_file(fn):
|
|
88
|
+
"""
|
|
89
|
+
Later cells will ask to read a .csv file from the original hard drive;
|
|
90
|
+
read from the annotation cache instead.
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
cached_csv_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
94
|
+
df = pd.read_csv(cached_csv_file)
|
|
95
|
+
return df
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
#%% List project folders
|
|
99
|
+
|
|
100
|
+
# There are two formats for project folder names:
|
|
101
|
+
#
|
|
102
|
+
# APN
|
|
103
|
+
# Snapshot Cameo/DEB
|
|
104
|
+
project_code_to_project_folder = {}
|
|
105
|
+
|
|
106
|
+
folders = os.listdir(input_base)
|
|
107
|
+
folders = [fn for fn in folders if (not fn.startswith('$') and \
|
|
108
|
+
not 'System Volume' in fn)]
|
|
109
|
+
|
|
110
|
+
for fn in folders:
|
|
111
|
+
if len(fn) == 3:
|
|
112
|
+
assert fn not in project_code_to_project_folder
|
|
113
|
+
project_code_to_project_folder[fn] = fn
|
|
114
|
+
else:
|
|
115
|
+
assert 'Snapshot' in fn
|
|
116
|
+
subfolders = os.listdir('/'.join([input_base,fn]))
|
|
117
|
+
for subfn in subfolders:
|
|
118
|
+
assert len(subfn) == 3
|
|
119
|
+
assert subfn not in project_code_to_project_folder
|
|
120
|
+
project_code_to_project_folder[subfn] = '/'.join([fn,subfn])
|
|
121
|
+
|
|
122
|
+
project_folder_to_project_code = {v: k for k, v in project_code_to_project_folder.items()}
|
|
123
|
+
project_codes = sorted(list(project_code_to_project_folder.keys()))
|
|
124
|
+
project_folders = sorted(list(project_code_to_project_folder.values()))
|
|
125
|
+
|
|
126
|
+
def file_to_project_folder(fn):
|
|
127
|
+
"""
|
|
128
|
+
For a given filename relative to the drive root, return the corresponding
|
|
129
|
+
project folder (also relative to the drive root).
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
tokens = fn.split('/')
|
|
133
|
+
if len(tokens[0]) == 3:
|
|
134
|
+
project_folder = tokens[0]
|
|
135
|
+
else:
|
|
136
|
+
assert 'Snapshot' in tokens[0]
|
|
137
|
+
project_folder = '/'.join(tokens[0:2])
|
|
138
|
+
assert project_folder in project_folders
|
|
139
|
+
return project_folder
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def file_to_project_code(fn):
|
|
143
|
+
"""
|
|
144
|
+
For a given filename relative to the drive root, return the corresponding
|
|
145
|
+
three-letter project code (e.g. "CDB").
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
return project_folder_to_project_code[file_to_project_folder(fn)]
|
|
149
|
+
|
|
150
|
+
assert file_to_project_folder(
|
|
151
|
+
'APN/APN_S2/DW/DW_R5/APN_S2_DW_R5_IMAG0003.JPG') == 'APN'
|
|
152
|
+
assert file_to_project_folder(
|
|
153
|
+
'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
|
|
154
|
+
'Snapshot South Africa/BLO'
|
|
155
|
+
assert file_to_project_code(
|
|
156
|
+
'Snapshot South Africa/BLO/BLO_S1/B05/B05_R1/BLO_S1_B05_R1_IMAG0003.JPG') == \
|
|
157
|
+
'BLO'
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
#%% Map report and inventory files to codes
|
|
161
|
+
|
|
162
|
+
# Maps a three-letter project code to a list of per-season _report_lila.csv files
|
|
163
|
+
#
|
|
164
|
+
# E.g.:
|
|
165
|
+
#
|
|
166
|
+
# 'DHP': ['Snapshot South Africa/DHP/LILA_Reports/DHP_S1_report_lila.csv',
|
|
167
|
+
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S2_report_lila.csv',
|
|
168
|
+
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S3_report_lila.csv']
|
|
169
|
+
#
|
|
170
|
+
project_code_to_report_files = defaultdict(list)
|
|
171
|
+
|
|
172
|
+
# fn = csv_files[0]
|
|
173
|
+
for fn in csv_files:
|
|
174
|
+
if 'report_lila.csv' not in fn:
|
|
175
|
+
continue
|
|
176
|
+
project_code = project_folder_to_project_code[file_to_project_folder(fn)]
|
|
177
|
+
project_code_to_report_files[project_code].append(fn)
|
|
178
|
+
|
|
179
|
+
project_codes_with_no_reports = set()
|
|
180
|
+
|
|
181
|
+
for project_code in project_code_to_project_folder.keys():
|
|
182
|
+
if project_code not in project_code_to_report_files:
|
|
183
|
+
project_codes_with_no_reports.add(project_code)
|
|
184
|
+
print('Warning: no report files available for {}'.format(project_code))
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
#%% Make sure that every report has a corresponding inventory file
|
|
188
|
+
|
|
189
|
+
all_report_files = [item for sublist in project_code_to_report_files.values() \
|
|
190
|
+
for item in sublist]
|
|
191
|
+
|
|
192
|
+
for fn in all_report_files:
|
|
193
|
+
inventory_file = fn.replace('.csv','_image_inventory.csv')
|
|
194
|
+
assert inventory_file in csv_files
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
#%% Count species based on overview and report files
|
|
198
|
+
|
|
199
|
+
# The overview and report files should produce the same counts; we'll verify this
|
|
200
|
+
# in the next cell.
|
|
201
|
+
|
|
202
|
+
species_to_count_overview = defaultdict(int)
|
|
203
|
+
species_to_count_report = defaultdict(int)
|
|
204
|
+
|
|
205
|
+
for report_file in all_report_files:
|
|
206
|
+
|
|
207
|
+
overview_file = report_file.replace('.csv','_overview.csv')
|
|
208
|
+
|
|
209
|
+
df = read_cached_csv_file(overview_file)
|
|
210
|
+
|
|
211
|
+
for i_row,row in df.iterrows():
|
|
212
|
+
|
|
213
|
+
if row['question'] == 'question__species':
|
|
214
|
+
|
|
215
|
+
assert isinstance(row['answer'],str)
|
|
216
|
+
assert isinstance(row['count'],int)
|
|
217
|
+
species = row['answer']
|
|
218
|
+
|
|
219
|
+
if len(species) < 3:
|
|
220
|
+
assert species == '0' or species == '1'
|
|
221
|
+
|
|
222
|
+
species_to_count_overview[species] += row['count']
|
|
223
|
+
|
|
224
|
+
# ...for each capture in the overview file
|
|
225
|
+
|
|
226
|
+
df = read_cached_csv_file(report_file)
|
|
227
|
+
|
|
228
|
+
for i_row,row in df.iterrows():
|
|
229
|
+
|
|
230
|
+
species = row['question__species']
|
|
231
|
+
assert isinstance(species,str)
|
|
232
|
+
|
|
233
|
+
# Ignore results from the blank/non-blank workflow
|
|
234
|
+
if len(species) < 3:
|
|
235
|
+
assert species == '0' or species == '1'
|
|
236
|
+
species_to_count_report[species] += 1
|
|
237
|
+
|
|
238
|
+
# ...for each capture in the report file
|
|
239
|
+
|
|
240
|
+
# ...for each report file
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
#%% Print counts
|
|
244
|
+
|
|
245
|
+
species_to_count_overview_sorted = \
|
|
246
|
+
{k: v for k, v in sorted(species_to_count_overview.items(),
|
|
247
|
+
key=lambda item: item[1], reverse=True)}
|
|
248
|
+
species_to_count_report_sorted = \
|
|
249
|
+
{k: v for k, v in sorted(species_to_count_report.items(),
|
|
250
|
+
key=lambda item: item[1], reverse=True)}
|
|
251
|
+
|
|
252
|
+
string_count = 0
|
|
253
|
+
non_blank_count = 0
|
|
254
|
+
|
|
255
|
+
for species in species_to_count_overview_sorted.keys():
|
|
256
|
+
|
|
257
|
+
# The overview and report files should produce the same counts
|
|
258
|
+
assert species_to_count_overview_sorted[species] == \
|
|
259
|
+
species_to_count_report[species]
|
|
260
|
+
count = species_to_count_overview_sorted[species]
|
|
261
|
+
if species not in ('0','1'):
|
|
262
|
+
string_count += count
|
|
263
|
+
if species != 'blank':
|
|
264
|
+
non_blank_count += count
|
|
265
|
+
|
|
266
|
+
print('{}{}'.format(species.ljust(25),count))
|
|
267
|
+
|
|
268
|
+
n_images = len(all_files)
|
|
269
|
+
n_sequences = sum(species_to_count_overview_sorted.values())
|
|
270
|
+
|
|
271
|
+
print('\n{} total images\n{} total sequences'.format(n_images,n_sequences))
|
|
272
|
+
|
|
273
|
+
print('\nString count: {}'.format(string_count))
|
|
274
|
+
print('Non-blank count: {}'.format(non_blank_count))
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
#%% Make sure that capture IDs in the reports/inventory files match
|
|
278
|
+
|
|
279
|
+
# ...and confirm that (almost) all the images in the inventory tables are
|
|
280
|
+
# present on disk.
|
|
281
|
+
|
|
282
|
+
all_relative_paths_in_inventory = set()
|
|
283
|
+
files_missing_on_disk = []
|
|
284
|
+
|
|
285
|
+
for report_file in all_report_files:
|
|
286
|
+
|
|
287
|
+
project_base = file_to_project_folder(report_file)
|
|
288
|
+
inventory_file = report_file.replace('.csv','_image_inventory.csv')
|
|
289
|
+
|
|
290
|
+
inventory_df = read_cached_csv_file(inventory_file)
|
|
291
|
+
report_df = read_cached_csv_file(report_file)
|
|
292
|
+
|
|
293
|
+
capture_ids_in_report = set()
|
|
294
|
+
for i_row,row in report_df.iterrows():
|
|
295
|
+
capture_ids_in_report.add(row['capture_id'])
|
|
296
|
+
|
|
297
|
+
capture_ids_in_inventory = set()
|
|
298
|
+
for i_row,row in inventory_df.iterrows():
|
|
299
|
+
|
|
300
|
+
capture_ids_in_inventory.add(row['capture_id'])
|
|
301
|
+
image_path_relative = project_base + '/' + row['image_path_rel']
|
|
302
|
+
|
|
303
|
+
# assert image_path_relative in all_files_relative_set
|
|
304
|
+
if image_path_relative not in all_files_relative_set:
|
|
305
|
+
|
|
306
|
+
# Make sure this isn't just a case issue
|
|
307
|
+
assert image_path_relative.replace('.JPG','.jpg') \
|
|
308
|
+
not in all_files_relative_set
|
|
309
|
+
assert image_path_relative.replace('.jpg','.JPG') \
|
|
310
|
+
not in all_files_relative_set
|
|
311
|
+
files_missing_on_disk.append(image_path_relative)
|
|
312
|
+
|
|
313
|
+
assert image_path_relative not in all_relative_paths_in_inventory
|
|
314
|
+
all_relative_paths_in_inventory.add(image_path_relative)
|
|
315
|
+
|
|
316
|
+
# Make sure the set of capture IDs appearing in this report is
|
|
317
|
+
# the same as the set of capture IDs appearing in the corresponding
|
|
318
|
+
# inventory file.
|
|
319
|
+
assert capture_ids_in_report == capture_ids_in_inventory
|
|
320
|
+
|
|
321
|
+
# ...for each report file
|
|
322
|
+
|
|
323
|
+
print('\n{} missing files (of {})'.format(
|
|
324
|
+
len(files_missing_on_disk),len(all_relative_paths_in_inventory)))
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
#%% For all the files we have on disk, see which are and aren't in the inventory files
|
|
328
|
+
|
|
329
|
+
# There aren't any capital-P .PNG files, but if I don't include .PNG
|
|
330
|
+
# in this list, I'll look at this in a year and wonder whether I forgot
|
|
331
|
+
# to include it.
|
|
332
|
+
image_extensions = set(['.JPG','.jpg','.PNG','.png'])
|
|
333
|
+
|
|
334
|
+
images_not_in_inventory = []
|
|
335
|
+
n_images_in_inventoried_projects = 0
|
|
336
|
+
|
|
337
|
+
# fn = all_files_relative[0]
|
|
338
|
+
for fn in tqdm(all_files_relative):
|
|
339
|
+
|
|
340
|
+
if os.path.splitext(fn)[1] not in image_extensions:
|
|
341
|
+
continue
|
|
342
|
+
project_code = file_to_project_code(fn)
|
|
343
|
+
if project_code in project_codes_with_no_reports:
|
|
344
|
+
# print('Skipping project {}'.format(project_code))
|
|
345
|
+
continue
|
|
346
|
+
n_images_in_inventoried_projects += 1
|
|
347
|
+
if fn not in all_relative_paths_in_inventory:
|
|
348
|
+
images_not_in_inventory.append(fn)
|
|
349
|
+
|
|
350
|
+
print('\n{} images on disk are not in inventory (of {} in eligible projects)'.format(
|
|
351
|
+
len(images_not_in_inventory),n_images_in_inventoried_projects))
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
#%% Map captures to images, and vice-versa
|
|
355
|
+
|
|
356
|
+
capture_id_to_images = defaultdict(list)
|
|
357
|
+
image_to_capture_id = {}
|
|
358
|
+
|
|
359
|
+
# report_file = all_report_files[0]
|
|
360
|
+
for report_file in tqdm(all_report_files):
|
|
361
|
+
|
|
362
|
+
inventory_file = report_file.replace('.csv','_image_inventory.csv')
|
|
363
|
+
inventory_df = read_cached_csv_file(inventory_file)
|
|
364
|
+
|
|
365
|
+
project_folder = file_to_project_folder(inventory_file)
|
|
366
|
+
|
|
367
|
+
# row = inventory_df.iloc[0]
|
|
368
|
+
for i_row,row in inventory_df.iterrows():
|
|
369
|
+
|
|
370
|
+
capture_id = row['capture_id']
|
|
371
|
+
image_file_relative = os.path.join(project_folder,row['image_path_rel'])
|
|
372
|
+
capture_id_to_images[capture_id].append(image_file_relative)
|
|
373
|
+
assert image_file_relative not in image_to_capture_id
|
|
374
|
+
image_to_capture_id[image_file_relative] = capture_id
|
|
375
|
+
|
|
376
|
+
# ...for each row (one image per row)
|
|
377
|
+
|
|
378
|
+
# ...for each report file
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
#%% Map captures to species (just species for now, we'll go back and get other metadata later)
|
|
382
|
+
|
|
383
|
+
capture_id_to_species = defaultdict(list)
|
|
384
|
+
|
|
385
|
+
for project_code in tqdm(project_codes):
|
|
386
|
+
|
|
387
|
+
report_files = project_code_to_report_files[project_code]
|
|
388
|
+
|
|
389
|
+
for report_file in report_files:
|
|
390
|
+
|
|
391
|
+
report_df = read_cached_csv_file(report_file)
|
|
392
|
+
|
|
393
|
+
for i_row,row in report_df.iterrows():
|
|
394
|
+
|
|
395
|
+
capture_id = row['capture_id']
|
|
396
|
+
species = row['question__species']
|
|
397
|
+
capture_id_to_species[capture_id].append(species)
|
|
398
|
+
|
|
399
|
+
# ...for each row
|
|
400
|
+
|
|
401
|
+
# ...for each report file in this project
|
|
402
|
+
|
|
403
|
+
# ...for each project
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
#%% Take a look at the annotations "0" and "1"
|
|
407
|
+
|
|
408
|
+
captures_0 = []
|
|
409
|
+
captures_1 = []
|
|
410
|
+
captures_1_alone = []
|
|
411
|
+
captures_1_with_species = []
|
|
412
|
+
|
|
413
|
+
for capture_id in tqdm(capture_id_to_species):
|
|
414
|
+
|
|
415
|
+
species_this_capture_id = capture_id_to_species[capture_id]
|
|
416
|
+
|
|
417
|
+
# Multiple rows may be present for a capture, but they should be unique
|
|
418
|
+
assert len(species_this_capture_id) == len(set(species_this_capture_id))
|
|
419
|
+
|
|
420
|
+
if '0' in species_this_capture_id:
|
|
421
|
+
captures_0.append(capture_id)
|
|
422
|
+
# '0' should always appear alone
|
|
423
|
+
assert len(species_this_capture_id) == 1
|
|
424
|
+
|
|
425
|
+
if '1' in species_this_capture_id:
|
|
426
|
+
captures_1.append(capture_id)
|
|
427
|
+
assert '0' not in species_this_capture_id
|
|
428
|
+
# '1' should never appear alone
|
|
429
|
+
# assert len(species_this_capture_id) > 1
|
|
430
|
+
if len(species_this_capture_id) == 1:
|
|
431
|
+
captures_1_alone.append(capture_id)
|
|
432
|
+
else:
|
|
433
|
+
captures_1_with_species.append(capture_id)
|
|
434
|
+
|
|
435
|
+
# ...for each capture ID
|
|
436
|
+
|
|
437
|
+
print('')
|
|
438
|
+
print('Number of captures with "0" as the species (always appears alone): {}'.format(len(captures_0)))
|
|
439
|
+
print('Number of captures with "1" as the species: {}'.format(len(captures_1)))
|
|
440
|
+
print('Number of captures with "1" as the species, with no other species: {}'.format(
|
|
441
|
+
len(captures_1_alone)))
|
|
442
|
+
print('Number of captures with "1" as the species, with other species: {}'.format(
|
|
443
|
+
len(captures_1_with_species)))
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
#%% Sample some of those captures with mysterious "0" and "1" annotations
|
|
447
|
+
|
|
448
|
+
random.seed(0)
|
|
449
|
+
n_to_sample = 500
|
|
450
|
+
captures_0_samples = random.sample(captures_0,n_to_sample)
|
|
451
|
+
captures_1_samples = random.sample(captures_1,n_to_sample)
|
|
452
|
+
|
|
453
|
+
capture_0_sample_output_folder = os.path.join(output_base,'capture_0_samples')
|
|
454
|
+
capture_1_sample_output_folder = os.path.join(output_base,'capture_1_samples')
|
|
455
|
+
os.makedirs(capture_0_sample_output_folder,exist_ok=True)
|
|
456
|
+
os.makedirs(capture_1_sample_output_folder,exist_ok=True)
|
|
457
|
+
|
|
458
|
+
def copy_sampled_captures(sampled_captures,sample_capture_output_folder):
|
|
459
|
+
|
|
460
|
+
for capture_id in tqdm(sampled_captures):
|
|
461
|
+
images_this_capture = capture_id_to_images[capture_id]
|
|
462
|
+
for fn in images_this_capture:
|
|
463
|
+
# assert fn in all_files_relative_set
|
|
464
|
+
if fn not in all_files_relative_set:
|
|
465
|
+
print('Warning: missing file {}'.format(fn))
|
|
466
|
+
continue
|
|
467
|
+
source_image = os.path.join(input_base,fn)
|
|
468
|
+
target_image = os.path.join(sample_capture_output_folder,os.path.basename(fn))
|
|
469
|
+
shutil.copyfile(source_image,target_image)
|
|
470
|
+
# ....for each image
|
|
471
|
+
# ...for each capture
|
|
472
|
+
|
|
473
|
+
copy_sampled_captures(captures_0_samples,capture_0_sample_output_folder)
|
|
474
|
+
copy_sampled_captures(captures_1_samples,capture_1_sample_output_folder)
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
#%% Find images that MD thinks contain people
|
|
478
|
+
|
|
479
|
+
md_results_folder = os.path.expanduser(
|
|
480
|
+
'~/postprocessing/snapshot-safari/snapshot-safari-2023-04-21-v5a.0.0/json_subsets')
|
|
481
|
+
md_results_files = os.listdir(md_results_folder)
|
|
482
|
+
|
|
483
|
+
md_human_detection_threshold = 0.2
|
|
484
|
+
md_vehicle_detection_threshold = 0.2
|
|
485
|
+
|
|
486
|
+
# We'll make sure this is actually correct for all the files we load
|
|
487
|
+
md_human_category = '2'
|
|
488
|
+
md_vehicle_category = '3'
|
|
489
|
+
|
|
490
|
+
md_human_images = set()
|
|
491
|
+
md_vehicle_images = set()
|
|
492
|
+
|
|
493
|
+
# project_code = project_codes[0]
|
|
494
|
+
for project_code in project_codes:
|
|
495
|
+
|
|
496
|
+
print('Finding human images for {}'.format(project_code))
|
|
497
|
+
|
|
498
|
+
project_folder = project_code_to_project_folder[project_code]
|
|
499
|
+
|
|
500
|
+
md_results_file = [fn for fn in md_results_files if project_code in fn]
|
|
501
|
+
assert len(md_results_file) == 1
|
|
502
|
+
md_results_file = os.path.join(md_results_folder,md_results_file[0])
|
|
503
|
+
|
|
504
|
+
with open(md_results_file,'r') as f:
|
|
505
|
+
md_results = json.load(f)
|
|
506
|
+
assert md_results['detection_categories'][md_human_category] == 'person'
|
|
507
|
+
assert md_results['detection_categories'][md_vehicle_category] == 'vehicle'
|
|
508
|
+
|
|
509
|
+
# im = md_results['images'][0]
|
|
510
|
+
for im in tqdm(md_results['images']):
|
|
511
|
+
|
|
512
|
+
if 'detections' not in im:
|
|
513
|
+
continue
|
|
514
|
+
|
|
515
|
+
# MD results files are each relative to their own projects, we want
|
|
516
|
+
# filenames to be relative to the base of the drive
|
|
517
|
+
fn = os.path.join(project_folder,im['file'])
|
|
518
|
+
for det in im['detections']:
|
|
519
|
+
if det['category'] == md_human_category and \
|
|
520
|
+
det['conf'] >= md_human_detection_threshold:
|
|
521
|
+
md_human_images.add(fn)
|
|
522
|
+
if det['category'] == md_vehicle_category and \
|
|
523
|
+
det['conf'] >= md_vehicle_detection_threshold:
|
|
524
|
+
md_vehicle_images.add(fn)
|
|
525
|
+
|
|
526
|
+
# ...for each detection
|
|
527
|
+
|
|
528
|
+
# ...for each image
|
|
529
|
+
|
|
530
|
+
# ...for each project
|
|
531
|
+
|
|
532
|
+
print('MD found {} human images, {} vehicle images'.format(
|
|
533
|
+
len(md_human_images),len(md_vehicle_images)))
|
|
534
|
+
|
|
535
|
+
md_human_or_vehicle_images = \
|
|
536
|
+
set(md_human_images).union(set(md_vehicle_images))
|
|
537
|
+
|
|
538
|
+
# next(iter(md_human_or_vehicle_images))
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
#%% Find images where the ground truth says humans or vehicles are present
|
|
542
|
+
|
|
543
|
+
human_species_id = 'human'
|
|
544
|
+
vehicle_species_id = 'humanvehicle'
|
|
545
|
+
|
|
546
|
+
gt_human_capture_ids = set()
|
|
547
|
+
gt_vehicle_capture_ids = set()
|
|
548
|
+
|
|
549
|
+
for capture_id in capture_id_to_species:
|
|
550
|
+
|
|
551
|
+
species_this_capture_id = capture_id_to_species[capture_id]
|
|
552
|
+
|
|
553
|
+
for species in species_this_capture_id:
|
|
554
|
+
if species == human_species_id:
|
|
555
|
+
gt_human_capture_ids.add(capture_id)
|
|
556
|
+
elif species == vehicle_species_id:
|
|
557
|
+
gt_vehicle_capture_ids.add(capture_id)
|
|
558
|
+
|
|
559
|
+
# ...for each capture ID
|
|
560
|
+
|
|
561
|
+
gt_human_images = []
|
|
562
|
+
gt_vehicle_images = []
|
|
563
|
+
|
|
564
|
+
for capture_id in gt_human_capture_ids:
|
|
565
|
+
images_this_capture_id = capture_id_to_images[capture_id]
|
|
566
|
+
gt_human_images.extend(images_this_capture_id)
|
|
567
|
+
for capture_id in gt_vehicle_capture_ids:
|
|
568
|
+
images_this_capture_id = capture_id_to_images[capture_id]
|
|
569
|
+
gt_vehicle_images.extend(images_this_capture_id)
|
|
570
|
+
|
|
571
|
+
print('Ground truth includes {} human images ({} captures), {} vehicle images ({} captures)'.format(
|
|
572
|
+
len(gt_human_images),len(gt_human_capture_ids),
|
|
573
|
+
len(gt_vehicle_images),len(gt_vehicle_capture_ids)))
|
|
574
|
+
|
|
575
|
+
ground_truth_human_or_vehicle_images = \
|
|
576
|
+
set(gt_human_images).union(set(gt_vehicle_images))
|
|
577
|
+
|
|
578
|
+
# next(iter(ground_truth_human_or_vehicle_images))
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
#%% Find mismatches
|
|
582
|
+
|
|
583
|
+
gt_missing_human_images = []
|
|
584
|
+
gt_missing_vehicle_images = []
|
|
585
|
+
|
|
586
|
+
for fn in md_human_images:
|
|
587
|
+
if fn not in ground_truth_human_or_vehicle_images:
|
|
588
|
+
gt_missing_human_images.append(fn)
|
|
589
|
+
|
|
590
|
+
for fn in md_vehicle_images:
|
|
591
|
+
if fn not in ground_truth_human_or_vehicle_images:
|
|
592
|
+
gt_missing_vehicle_images.append(fn)
|
|
593
|
+
|
|
594
|
+
print('Of {} images where MD found a human, {} are not in the ground truth'.format(
|
|
595
|
+
len(md_human_images),len(gt_missing_human_images)))
|
|
596
|
+
|
|
597
|
+
print('Of {} images where MD found a vehicle, {} are not in the ground truth'.format(
|
|
598
|
+
len(md_vehicle_images),len(gt_missing_vehicle_images)))
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
#%% Sample mismatches
|
|
602
|
+
|
|
603
|
+
random.seed(0)
|
|
604
|
+
n_to_sample = 1000
|
|
605
|
+
sampled_human_mismatches = random.sample(gt_missing_human_images,n_to_sample)
|
|
606
|
+
sampled_vehicle_mismatches = random.sample(gt_missing_vehicle_images,n_to_sample)
|
|
607
|
+
|
|
608
|
+
human_mismatch_output_folder = os.path.join(output_base,'mismatches_human')
|
|
609
|
+
vehicle_mismatch_output_folder = os.path.join(output_base,'mismatches_vehicle')
|
|
610
|
+
os.makedirs(human_mismatch_output_folder,exist_ok=True)
|
|
611
|
+
os.makedirs(vehicle_mismatch_output_folder,exist_ok=True)
|
|
612
|
+
|
|
613
|
+
def copy_sampled_images(sampled_images,sampled_images_output_folder):
|
|
614
|
+
|
|
615
|
+
for fn in tqdm(sampled_images):
|
|
616
|
+
if fn not in all_files_relative_set:
|
|
617
|
+
print('Warning: missing file {}'.format(fn))
|
|
618
|
+
continue
|
|
619
|
+
source_image = os.path.join(input_base,fn)
|
|
620
|
+
target_image = os.path.join(sampled_images_output_folder,os.path.basename(fn))
|
|
621
|
+
shutil.copyfile(source_image,target_image)
|
|
622
|
+
|
|
623
|
+
copy_sampled_images(sampled_human_mismatches,human_mismatch_output_folder)
|
|
624
|
+
copy_sampled_images(sampled_vehicle_mismatches,vehicle_mismatch_output_folder)
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
#%% See what's up with some of the mismatches
|
|
628
|
+
|
|
629
|
+
filename_base_to_filename = {}
|
|
630
|
+
|
|
631
|
+
from md_utils.path_utils import is_image_file
|
|
632
|
+
|
|
633
|
+
# fn = all_files_relative[0]
|
|
634
|
+
for fn in tqdm(all_files_relative):
|
|
635
|
+
|
|
636
|
+
if not is_image_file(fn):
|
|
637
|
+
continue
|
|
638
|
+
if 'Indiv_Recognition' in fn:
|
|
639
|
+
continue
|
|
640
|
+
bn = os.path.basename(fn)
|
|
641
|
+
assert bn not in filename_base_to_filename
|
|
642
|
+
filename_base_to_filename[bn] = fn
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
if False:
|
|
646
|
+
|
|
647
|
+
bn = 'TSW_S2_KA02_R3_IMAG0002.JPG'
|
|
648
|
+
fn = filename_base_to_filename[bn]
|
|
649
|
+
capture_id = image_to_capture_id[fn]
|
|
650
|
+
species = capture_id_to_species[capture_id]
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
#%% Look at the distribution of labels for the mismatched images
|
|
654
|
+
|
|
655
|
+
gt_missing_images = set(gt_missing_human_images).union(set(gt_missing_vehicle_images))
|
|
656
|
+
|
|
657
|
+
missing_image_species_to_count = defaultdict(int)
|
|
658
|
+
|
|
659
|
+
for fn in gt_missing_images:
|
|
660
|
+
if fn not in image_to_capture_id:
|
|
661
|
+
continue
|
|
662
|
+
capture_id = image_to_capture_id[fn]
|
|
663
|
+
species = capture_id_to_species[capture_id]
|
|
664
|
+
for s in species:
|
|
665
|
+
missing_image_species_to_count[s] += 1
|