megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
- megadetector-5.0.11.dist-info/RECORD +5 -0
- megadetector-5.0.11.dist-info/top_level.txt +1 -0
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -439
- api/batch_processing/api_core/server.py +0 -294
- api/batch_processing/api_core/server_api_config.py +0 -98
- api/batch_processing/api_core/server_app_config.py +0 -55
- api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- api/batch_processing/api_core/server_job_status_table.py +0 -152
- api/batch_processing/api_core/server_orchestration.py +0 -360
- api/batch_processing/api_core/server_utils.py +0 -92
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
- api/batch_processing/data_preparation/manage_video_batch.py +0 -327
- api/batch_processing/integration/digiKam/setup.py +0 -6
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
- api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
- api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +0 -64
- api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
- api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
- api/batch_processing/postprocessing/compare_batch_results.py +0 -958
- api/batch_processing/postprocessing/convert_output_format.py +0 -397
- api/batch_processing/postprocessing/load_api_results.py +0 -195
- api/batch_processing/postprocessing/md_to_coco.py +0 -310
- api/batch_processing/postprocessing/md_to_labelme.py +0 -330
- api/batch_processing/postprocessing/merge_detections.py +0 -401
- api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
- api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
- api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
- api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
- api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
- api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
- api/synchronous/api_core/animal_detection_api/config.py +0 -35
- api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
- api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
- api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
- api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
- api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
- api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
- api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
- api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
- api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
- api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
- api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
- api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
- api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
- api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
- api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
- api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
- api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
- api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
- api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
- api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
- api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +0 -110
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +0 -108
- classification/analyze_failed_images.py +0 -227
- classification/cache_batchapi_outputs.py +0 -198
- classification/create_classification_dataset.py +0 -627
- classification/crop_detections.py +0 -516
- classification/csv_to_json.py +0 -226
- classification/detect_and_crop.py +0 -855
- classification/efficientnet/__init__.py +0 -9
- classification/efficientnet/model.py +0 -415
- classification/efficientnet/utils.py +0 -610
- classification/evaluate_model.py +0 -520
- classification/identify_mislabeled_candidates.py +0 -152
- classification/json_to_azcopy_list.py +0 -63
- classification/json_validator.py +0 -695
- classification/map_classification_categories.py +0 -276
- classification/merge_classification_detection_output.py +0 -506
- classification/prepare_classification_script.py +0 -194
- classification/prepare_classification_script_mc.py +0 -228
- classification/run_classifier.py +0 -286
- classification/save_mislabeled.py +0 -110
- classification/train_classifier.py +0 -825
- classification/train_classifier_tf.py +0 -724
- classification/train_utils.py +0 -322
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +0 -34
- data_management/camtrap_dp_to_coco.py +0 -238
- data_management/cct_json_utils.py +0 -395
- data_management/cct_to_md.py +0 -176
- data_management/cct_to_wi.py +0 -289
- data_management/coco_to_labelme.py +0 -272
- data_management/coco_to_yolo.py +0 -662
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +0 -33
- data_management/databases/combine_coco_camera_traps_files.py +0 -206
- data_management/databases/integrity_check_json_db.py +0 -477
- data_management/databases/subset_json_db.py +0 -115
- data_management/generate_crops_from_cct.py +0 -149
- data_management/get_image_sizes.py +0 -188
- data_management/importers/add_nacti_sizes.py +0 -52
- data_management/importers/add_timestamps_to_icct.py +0 -79
- data_management/importers/animl_results_to_md_results.py +0 -158
- data_management/importers/auckland_doc_test_to_json.py +0 -372
- data_management/importers/auckland_doc_to_json.py +0 -200
- data_management/importers/awc_to_json.py +0 -189
- data_management/importers/bellevue_to_json.py +0 -273
- data_management/importers/cacophony-thermal-importer.py +0 -796
- data_management/importers/carrizo_shrubfree_2018.py +0 -268
- data_management/importers/carrizo_trail_cam_2017.py +0 -287
- data_management/importers/cct_field_adjustments.py +0 -57
- data_management/importers/channel_islands_to_cct.py +0 -913
- data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- data_management/importers/eMammal/eMammal_helpers.py +0 -249
- data_management/importers/eMammal/make_eMammal_json.py +0 -223
- data_management/importers/ena24_to_json.py +0 -275
- data_management/importers/filenames_to_json.py +0 -385
- data_management/importers/helena_to_cct.py +0 -282
- data_management/importers/idaho-camera-traps.py +0 -1407
- data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- data_management/importers/jb_csv_to_json.py +0 -150
- data_management/importers/mcgill_to_json.py +0 -250
- data_management/importers/missouri_to_json.py +0 -489
- data_management/importers/nacti_fieldname_adjustments.py +0 -79
- data_management/importers/noaa_seals_2019.py +0 -181
- data_management/importers/pc_to_json.py +0 -365
- data_management/importers/plot_wni_giraffes.py +0 -123
- data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
- data_management/importers/prepare_zsl_imerit.py +0 -131
- data_management/importers/rspb_to_json.py +0 -356
- data_management/importers/save_the_elephants_survey_A.py +0 -320
- data_management/importers/save_the_elephants_survey_B.py +0 -332
- data_management/importers/snapshot_safari_importer.py +0 -758
- data_management/importers/snapshot_safari_importer_reprise.py +0 -665
- data_management/importers/snapshot_serengeti_lila.py +0 -1067
- data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- data_management/importers/sulross_get_exif.py +0 -65
- data_management/importers/timelapse_csv_set_to_json.py +0 -490
- data_management/importers/ubc_to_json.py +0 -399
- data_management/importers/umn_to_json.py +0 -507
- data_management/importers/wellington_to_json.py +0 -263
- data_management/importers/wi_to_json.py +0 -441
- data_management/importers/zamba_results_to_md_results.py +0 -181
- data_management/labelme_to_coco.py +0 -548
- data_management/labelme_to_yolo.py +0 -272
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +0 -97
- data_management/lila/add_locations_to_nacti.py +0 -147
- data_management/lila/create_lila_blank_set.py +0 -557
- data_management/lila/create_lila_test_set.py +0 -151
- data_management/lila/create_links_to_md_results_files.py +0 -106
- data_management/lila/download_lila_subset.py +0 -177
- data_management/lila/generate_lila_per_image_labels.py +0 -515
- data_management/lila/get_lila_annotation_counts.py +0 -170
- data_management/lila/get_lila_image_counts.py +0 -111
- data_management/lila/lila_common.py +0 -300
- data_management/lila/test_lila_metadata_urls.py +0 -132
- data_management/ocr_tools.py +0 -874
- data_management/read_exif.py +0 -681
- data_management/remap_coco_categories.py +0 -84
- data_management/remove_exif.py +0 -66
- data_management/resize_coco_dataset.py +0 -189
- data_management/wi_download_csv_to_coco.py +0 -246
- data_management/yolo_output_to_md_output.py +0 -441
- data_management/yolo_to_coco.py +0 -676
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/detector_training/model_main_tf2.py +0 -114
- detection/process_video.py +0 -703
- detection/pytorch_detector.py +0 -337
- detection/run_detector.py +0 -779
- detection/run_detector_batch.py +0 -1219
- detection/run_inference_with_yolov5_val.py +0 -917
- detection/run_tiled_inference.py +0 -935
- detection/tf_detector.py +0 -188
- detection/video_utils.py +0 -606
- docs/source/conf.py +0 -43
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +0 -174
- md_utils/ct_utils.py +0 -612
- md_utils/directory_listing.py +0 -246
- md_utils/md_tests.py +0 -968
- md_utils/path_utils.py +0 -1044
- md_utils/process_utils.py +0 -157
- md_utils/sas_blob_utils.py +0 -509
- md_utils/split_locations_into_train_val.py +0 -228
- md_utils/string_utils.py +0 -92
- md_utils/url_utils.py +0 -323
- md_utils/write_html_image_list.py +0 -225
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +0 -293
- md_visualization/render_images_with_thumbnails.py +0 -275
- md_visualization/visualization_utils.py +0 -1537
- md_visualization/visualize_db.py +0 -551
- md_visualization/visualize_detector_output.py +0 -406
- megadetector-5.0.10.dist-info/RECORD +0 -224
- megadetector-5.0.10.dist-info/top_level.txt +0 -8
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
- taxonomy_mapping/map_new_lila_datasets.py +0 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
- taxonomy_mapping/preview_lila_taxonomy.py +0 -591
- taxonomy_mapping/retrieve_sample_image.py +0 -71
- taxonomy_mapping/simple_image_download.py +0 -218
- taxonomy_mapping/species_lookup.py +0 -834
- taxonomy_mapping/taxonomy_csv_checker.py +0 -159
- taxonomy_mapping/taxonomy_graph.py +0 -346
- taxonomy_mapping/validate_lila_category_mappings.py +0 -83
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
|
@@ -1,150 +0,0 @@
|
|
|
1
|
-
#
|
|
2
|
-
# make_full_SS_json.py
|
|
3
|
-
#
|
|
4
|
-
# Create a COCO-camera-traps .json file for Snapshot Serengeti data from
|
|
5
|
-
# the original .csv files provided on Dryad.
|
|
6
|
-
#
|
|
7
|
-
# This was used for "version 1.0" of the public Snapshot Serengeti archive; it's no
|
|
8
|
-
# longer used as of v2.0 (early 2020). See snapshot_serengeti_lila.py for the updated
|
|
9
|
-
# Snapshot Safari preparation process.
|
|
10
|
-
#
|
|
11
|
-
|
|
12
|
-
#%% Imports and constants
|
|
13
|
-
|
|
14
|
-
import csv
|
|
15
|
-
import json
|
|
16
|
-
import uuid
|
|
17
|
-
import datetime
|
|
18
|
-
|
|
19
|
-
output_file = '/datadrive/snapshotserengeti/databases/SnapshotSerengeti_multiple_classes.json'
|
|
20
|
-
csv_file_name = '/datadrive/snapshotserengeti/databases/consensus_data.csv'
|
|
21
|
-
all_image_file = '/datadrive/snapshotserengeti/databases/all_images.csv'
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
#%% Read annotation .csv file, format into a dictionary mapping field names to data arrays
|
|
25
|
-
|
|
26
|
-
data = []
|
|
27
|
-
with open(csv_file_name,'r') as f:
|
|
28
|
-
reader = csv.reader(f, dialect = 'excel')
|
|
29
|
-
for row in reader:
|
|
30
|
-
data.append(row)
|
|
31
|
-
|
|
32
|
-
data_fields = data[0]
|
|
33
|
-
|
|
34
|
-
data_dicts = {}
|
|
35
|
-
for event in data[1:]:
|
|
36
|
-
if event[0] not in data_dicts:
|
|
37
|
-
data_dicts[event[0]] = []
|
|
38
|
-
data_dicts[event[0]].append({data_fields[i]:event[i] for i in range(len(data_fields))})
|
|
39
|
-
|
|
40
|
-
# Count the number of images with multiple species
|
|
41
|
-
mult_species = 0
|
|
42
|
-
for event in data_dicts:
|
|
43
|
-
if len(data_dicts[event]) > 1:
|
|
44
|
-
mult_species += 1
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
#%% Read image .csv file, format into a dictionary mapping images to capture events
|
|
48
|
-
|
|
49
|
-
with open(all_image_file,'r') as f:
|
|
50
|
-
reader = csv.reader(f,dialect = 'excel')
|
|
51
|
-
next(reader)
|
|
52
|
-
im_name_to_cap_id = {row[1]:row[0] for row in reader}
|
|
53
|
-
|
|
54
|
-
total_ims = len(im_name_to_cap_id)
|
|
55
|
-
total_seqs = len(data_dicts)
|
|
56
|
-
print('Percent seqs with mult species: ',float(mult_species)/float(total_seqs))
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
#%% Create CCT-style .json
|
|
60
|
-
|
|
61
|
-
images = []
|
|
62
|
-
annotations = []
|
|
63
|
-
categories = []
|
|
64
|
-
|
|
65
|
-
capture_ims = {i:[] for i in im_name_to_cap_id.values()}
|
|
66
|
-
for im_id in im_name_to_cap_id:
|
|
67
|
-
capture_ims[im_name_to_cap_id[im_id]].append(im_id)
|
|
68
|
-
|
|
69
|
-
im_to_seq_num = {im:None for im in im_name_to_cap_id}
|
|
70
|
-
for event in capture_ims:
|
|
71
|
-
capture_ims[event] = sorted(capture_ims[event])
|
|
72
|
-
seq_count = 0
|
|
73
|
-
for im in capture_ims[event]:
|
|
74
|
-
im_to_seq_num[im] = seq_count
|
|
75
|
-
seq_count += 1
|
|
76
|
-
|
|
77
|
-
cat_to_id = {}
|
|
78
|
-
cat_to_id['empty'] = 0
|
|
79
|
-
cat_count = 1
|
|
80
|
-
seasons = []
|
|
81
|
-
|
|
82
|
-
for im_id in im_name_to_cap_id:
|
|
83
|
-
|
|
84
|
-
im = {}
|
|
85
|
-
im['id'] = im_id.split('.')[0]
|
|
86
|
-
im['file_name'] = im_id
|
|
87
|
-
|
|
88
|
-
im['location'] = im_id.split('/')[1]
|
|
89
|
-
im['season'] = im_id.split('/')[0]
|
|
90
|
-
if im['season'] not in seasons:
|
|
91
|
-
seasons.append(im['season'])
|
|
92
|
-
im['seq_id'] = im_name_to_cap_id[im_id]
|
|
93
|
-
im['frame_num'] = im_to_seq_num[im_id]
|
|
94
|
-
im['seq_num_frames'] = len(capture_ims[im['seq_id']])
|
|
95
|
-
|
|
96
|
-
ann = {}
|
|
97
|
-
ann['id'] = str(uuid.uuid1())
|
|
98
|
-
ann['image_id'] = im['id']
|
|
99
|
-
|
|
100
|
-
if im_name_to_cap_id[im_id] in data_dicts:
|
|
101
|
-
im_data_per_ann = data_dicts[im_name_to_cap_id[im_id]]
|
|
102
|
-
for im_data in im_data_per_ann:
|
|
103
|
-
im['datetime'] = im_data['DateTime']
|
|
104
|
-
if im_data['Species'] not in cat_to_id:
|
|
105
|
-
cat_to_id[im_data['Species']] = cat_count
|
|
106
|
-
cat_count += 1
|
|
107
|
-
ann = {}
|
|
108
|
-
ann['id'] = str(uuid.uuid1())
|
|
109
|
-
ann['image_id'] = im['id']
|
|
110
|
-
ann['category_id'] = cat_to_id[im_data['Species']]
|
|
111
|
-
annotations.append(ann)
|
|
112
|
-
else:
|
|
113
|
-
ann = {}
|
|
114
|
-
ann['id'] = str(uuid.uuid1())
|
|
115
|
-
ann['image_id'] = im['id']
|
|
116
|
-
ann['category_id'] = 0
|
|
117
|
-
annotations.append(ann)
|
|
118
|
-
|
|
119
|
-
# still need image width and height
|
|
120
|
-
images.append(im)
|
|
121
|
-
|
|
122
|
-
# ...for each image
|
|
123
|
-
|
|
124
|
-
print(seasons)
|
|
125
|
-
|
|
126
|
-
for cat in cat_to_id:
|
|
127
|
-
new_cat = {}
|
|
128
|
-
new_cat['id'] = cat_to_id[cat]
|
|
129
|
-
new_cat['name'] = cat
|
|
130
|
-
categories.append(new_cat)
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
#%% Write output files
|
|
134
|
-
|
|
135
|
-
json_data = {}
|
|
136
|
-
json_data['images'] = images
|
|
137
|
-
json_data['annotations'] = annotations
|
|
138
|
-
json_data['categories'] = categories
|
|
139
|
-
info = {}
|
|
140
|
-
info['year'] = 2018
|
|
141
|
-
info['version'] = 1
|
|
142
|
-
info['description'] = 'COCO style Snapshot Serengeti database'
|
|
143
|
-
info['contributor'] = 'SMB'
|
|
144
|
-
info['date_created'] = str(datetime.date.today())
|
|
145
|
-
json_data['info'] = info
|
|
146
|
-
json.dump(json_data,open(output_file,'w'))
|
|
147
|
-
|
|
148
|
-
print(images[0])
|
|
149
|
-
print(annotations[0])
|
|
150
|
-
|
|
@@ -1,153 +0,0 @@
|
|
|
1
|
-
#
|
|
2
|
-
# make_per_season_SS_json.py
|
|
3
|
-
#
|
|
4
|
-
# Create a COCO-camera-traps .json file for each Snapshot Serengeti season from
|
|
5
|
-
# the original .csv files provided on Dryad.
|
|
6
|
-
#
|
|
7
|
-
# This was used for "version 1.0" of the public Snapshot Serengeti archive; it's no
|
|
8
|
-
# longer used as of v2.0 (early 2020). See snapshot_serengeti_lila.py for the updated
|
|
9
|
-
# Snapshot Safari preparation process.
|
|
10
|
-
#
|
|
11
|
-
|
|
12
|
-
#%% Imports and constants
|
|
13
|
-
|
|
14
|
-
import csv
|
|
15
|
-
import json
|
|
16
|
-
import uuid
|
|
17
|
-
import datetime
|
|
18
|
-
|
|
19
|
-
output_file_folder = 'C:/Users/t-sabeer/Documents/databases/'
|
|
20
|
-
csv_file_name = 'D:/consensus_data.csv'
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
#%% Read annotation .csv file, format into a dictionary mapping field names to data arrays
|
|
24
|
-
|
|
25
|
-
data = []
|
|
26
|
-
with open(csv_file_name,'r') as f:
|
|
27
|
-
reader = csv.reader(f, dialect = 'excel')
|
|
28
|
-
for row in reader:
|
|
29
|
-
data.append(row)
|
|
30
|
-
|
|
31
|
-
data_fields = data[0]
|
|
32
|
-
|
|
33
|
-
data_dicts = {}
|
|
34
|
-
for event in data[1:]:
|
|
35
|
-
data_dicts[event[0]] = {data_fields[i]:event[i] for i in range(len(data_fields))}
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
#%% Read image .csv file, format into a dictionary mapping images to capture events
|
|
39
|
-
|
|
40
|
-
all_image_file = 'D:/all_images.csv'
|
|
41
|
-
with open(all_image_file,'r') as f:
|
|
42
|
-
reader = csv.reader(f,dialect = 'excel')
|
|
43
|
-
next(reader)
|
|
44
|
-
im_name_to_cap_id = {row[1]:row[0] for row in reader}
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
#%% Create CCT-style .json
|
|
48
|
-
|
|
49
|
-
images = []
|
|
50
|
-
annotations = []
|
|
51
|
-
categories = []
|
|
52
|
-
|
|
53
|
-
capture_ims = {i:[] for i in im_name_to_cap_id.values()}
|
|
54
|
-
for im_id in im_name_to_cap_id:
|
|
55
|
-
capture_ims[im_name_to_cap_id[im_id]].append(im_id)
|
|
56
|
-
|
|
57
|
-
im_to_seq_num = {im:None for im in im_name_to_cap_id}
|
|
58
|
-
for event in capture_ims:
|
|
59
|
-
capture_ims[event] = sorted(capture_ims[event])
|
|
60
|
-
seq_count = 0
|
|
61
|
-
for im in capture_ims[event]:
|
|
62
|
-
im_to_seq_num[im] = seq_count
|
|
63
|
-
seq_count += 1
|
|
64
|
-
|
|
65
|
-
cat_to_id = {}
|
|
66
|
-
cat_to_id['empty'] = 0
|
|
67
|
-
cat_count = 1
|
|
68
|
-
seasons = []
|
|
69
|
-
|
|
70
|
-
for im_id in im_name_to_cap_id:
|
|
71
|
-
im = {}
|
|
72
|
-
im['id'] = im_id.split('.')[0]
|
|
73
|
-
im['file_name'] = im_id
|
|
74
|
-
|
|
75
|
-
im['location'] = im_id.split('/')[1]
|
|
76
|
-
im['season'] = im_id.split('/')[0]
|
|
77
|
-
im['seq_id'] = im_name_to_cap_id[im_id]
|
|
78
|
-
im['frame_num'] = im_to_seq_num[im_id]
|
|
79
|
-
im['seq_num_frames'] = len(capture_ims[im['seq_id']])
|
|
80
|
-
if im['season'] not in seasons:
|
|
81
|
-
seasons.append(im['season'])
|
|
82
|
-
|
|
83
|
-
ann = {}
|
|
84
|
-
ann['id'] = str(uuid.uuid1())
|
|
85
|
-
ann['image_id'] = im['id']
|
|
86
|
-
|
|
87
|
-
if im_name_to_cap_id[im_id] in data_dicts:
|
|
88
|
-
im_data = data_dicts[im_name_to_cap_id[im_id]]
|
|
89
|
-
im['datetime'] = im_data['DateTime']
|
|
90
|
-
if im_data['Species'] not in cat_to_id:
|
|
91
|
-
cat_to_id[im_data['Species']] = cat_count
|
|
92
|
-
cat_count += 1
|
|
93
|
-
ann['category_id'] = cat_to_id[im_data['Species']]
|
|
94
|
-
else:
|
|
95
|
-
ann['category_id'] = 0
|
|
96
|
-
|
|
97
|
-
#still need image width and height
|
|
98
|
-
images.append(im)
|
|
99
|
-
annotations.append(ann)
|
|
100
|
-
|
|
101
|
-
# ...for each image ID
|
|
102
|
-
|
|
103
|
-
for cat in cat_to_id:
|
|
104
|
-
new_cat = {}
|
|
105
|
-
new_cat['id'] = cat_to_id[cat]
|
|
106
|
-
new_cat['name'] = cat
|
|
107
|
-
categories.append(new_cat)
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
#%% Write output files
|
|
111
|
-
|
|
112
|
-
output_file = output_file_folder + 'SnapshotSerengeti.json'
|
|
113
|
-
json_data = {}
|
|
114
|
-
json_data['images'] = images
|
|
115
|
-
json_data['annotations'] = annotations
|
|
116
|
-
json_data['categories'] = categories
|
|
117
|
-
info = {}
|
|
118
|
-
info['year'] = 2018
|
|
119
|
-
info['version'] = 1
|
|
120
|
-
info['description'] = 'COCO style Snapshot Serengeti database'
|
|
121
|
-
info['contributor'] = 'SMB'
|
|
122
|
-
info['date_created'] = str(datetime.date.today())
|
|
123
|
-
json_data['info'] = info
|
|
124
|
-
json.dump(json_data,open(output_file,'w'))
|
|
125
|
-
|
|
126
|
-
for season in seasons:
|
|
127
|
-
|
|
128
|
-
output_file = output_file_folder + season + '.json'
|
|
129
|
-
inSeason = {im['id']:False for im in images}
|
|
130
|
-
for im in images:
|
|
131
|
-
if im['season'] == season:
|
|
132
|
-
inSeason[im['id']] = True
|
|
133
|
-
new_ims = [im for im in images if inSeason[im['id']]]
|
|
134
|
-
new_anns = [ann for ann in annotations if inSeason[ann['image_id']]]
|
|
135
|
-
|
|
136
|
-
json_data = {}
|
|
137
|
-
json_data['images'] = new_ims
|
|
138
|
-
json_data['annotations'] = new_anns
|
|
139
|
-
json_data['categories'] = categories
|
|
140
|
-
info = {}
|
|
141
|
-
info['year'] = 2018
|
|
142
|
-
info['version'] = 1
|
|
143
|
-
info['description'] = 'COCO style Snapshot Serengeti database. season ' + season
|
|
144
|
-
info['contributor'] = 'SMB'
|
|
145
|
-
info['date_created'] = str(datetime.date.today())
|
|
146
|
-
json_data['info'] = info
|
|
147
|
-
json.dump(json_data,open(output_file,'w'))
|
|
148
|
-
|
|
149
|
-
print('Season ' + season)
|
|
150
|
-
print(str(len(new_ims)) + ' images')
|
|
151
|
-
print(str(len(new_anns)) + ' annotations')
|
|
152
|
-
|
|
153
|
-
# ...for each season
|
|
@@ -1,65 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
sulross_get_exif.py
|
|
4
|
-
|
|
5
|
-
For the Sul Ross dataset, species informationw was stored in XMP metadata; pull
|
|
6
|
-
all that metadata out to .json.
|
|
7
|
-
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
import os
|
|
11
|
-
import json
|
|
12
|
-
from tqdm import tqdm
|
|
13
|
-
|
|
14
|
-
import exiftool
|
|
15
|
-
|
|
16
|
-
image_ids_path = '/home/beaver/cameratraps/data/sulross/20190522_image_ids.json'
|
|
17
|
-
data_dir = '/home/beaver/cameratraps/mnt/sulross'
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def get_metadata():
|
|
21
|
-
|
|
22
|
-
image_ids = json.load(open(image_ids_path))
|
|
23
|
-
|
|
24
|
-
image_id_to_metadata = {}
|
|
25
|
-
|
|
26
|
-
# exiftool can process a batch of images at a time, but bottleneck is blobfuse reading the images
|
|
27
|
-
batch_size = 20
|
|
28
|
-
|
|
29
|
-
num_images_processed = 0
|
|
30
|
-
|
|
31
|
-
with exiftool.ExifTool() as et:
|
|
32
|
-
for i in tqdm(range(0, len(image_ids), batch_size)):
|
|
33
|
-
batch_ids = image_ids[i: i + batch_size]
|
|
34
|
-
|
|
35
|
-
batch_paths = [os.path.join(data_dir, i) for i in batch_ids]
|
|
36
|
-
|
|
37
|
-
try:
|
|
38
|
-
metadatas = et.get_metadata_batch(batch_paths)
|
|
39
|
-
|
|
40
|
-
for id, metadata in zip(batch_ids, metadatas):
|
|
41
|
-
image_id_to_metadata[id] = metadata['XMP:HierarchicalSubject']
|
|
42
|
-
except Exception as e:
|
|
43
|
-
print('Exception! {}'.format(e))
|
|
44
|
-
continue
|
|
45
|
-
|
|
46
|
-
num_images_processed += batch_size
|
|
47
|
-
if num_images_processed % 1000 == 0:
|
|
48
|
-
print('Finished processing {} images; image ID {}'.format(
|
|
49
|
-
num_images_processed, image_ids[num_images_processed - 1]))
|
|
50
|
-
print(image_id_to_metadata[id])
|
|
51
|
-
print()
|
|
52
|
-
|
|
53
|
-
# checkpoint
|
|
54
|
-
if num_images_processed % 10000 == 0:
|
|
55
|
-
print('Saving results so far...')
|
|
56
|
-
with open('/home/beaver/cameratraps/data/sulross/20190522_metadata.json', 'w') as f:
|
|
57
|
-
json.dump(image_id_to_metadata, f, indent=1)
|
|
58
|
-
|
|
59
|
-
print('Length of meta data read: ', len(image_id_to_metadata))
|
|
60
|
-
with open('/home/beaver/cameratraps/data/sulross/20190522_metadata.json', 'w') as f:
|
|
61
|
-
json.dump(image_id_to_metadata, f, indent=1)
|
|
62
|
-
print('Results saved. Done!')
|
|
63
|
-
|
|
64
|
-
if __name__ == '__main__':
|
|
65
|
-
get_metadata()
|