megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
- megadetector-5.0.11.dist-info/RECORD +5 -0
- megadetector-5.0.11.dist-info/top_level.txt +1 -0
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -439
- api/batch_processing/api_core/server.py +0 -294
- api/batch_processing/api_core/server_api_config.py +0 -98
- api/batch_processing/api_core/server_app_config.py +0 -55
- api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- api/batch_processing/api_core/server_job_status_table.py +0 -152
- api/batch_processing/api_core/server_orchestration.py +0 -360
- api/batch_processing/api_core/server_utils.py +0 -92
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
- api/batch_processing/data_preparation/manage_video_batch.py +0 -327
- api/batch_processing/integration/digiKam/setup.py +0 -6
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
- api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
- api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +0 -64
- api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
- api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
- api/batch_processing/postprocessing/compare_batch_results.py +0 -958
- api/batch_processing/postprocessing/convert_output_format.py +0 -397
- api/batch_processing/postprocessing/load_api_results.py +0 -195
- api/batch_processing/postprocessing/md_to_coco.py +0 -310
- api/batch_processing/postprocessing/md_to_labelme.py +0 -330
- api/batch_processing/postprocessing/merge_detections.py +0 -401
- api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
- api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
- api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
- api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
- api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
- api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
- api/synchronous/api_core/animal_detection_api/config.py +0 -35
- api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
- api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
- api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
- api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
- api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
- api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
- api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
- api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
- api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
- api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
- api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
- api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
- api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
- api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
- api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
- api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
- api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
- api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
- api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
- api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
- api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
- api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +0 -110
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +0 -108
- classification/analyze_failed_images.py +0 -227
- classification/cache_batchapi_outputs.py +0 -198
- classification/create_classification_dataset.py +0 -627
- classification/crop_detections.py +0 -516
- classification/csv_to_json.py +0 -226
- classification/detect_and_crop.py +0 -855
- classification/efficientnet/__init__.py +0 -9
- classification/efficientnet/model.py +0 -415
- classification/efficientnet/utils.py +0 -610
- classification/evaluate_model.py +0 -520
- classification/identify_mislabeled_candidates.py +0 -152
- classification/json_to_azcopy_list.py +0 -63
- classification/json_validator.py +0 -695
- classification/map_classification_categories.py +0 -276
- classification/merge_classification_detection_output.py +0 -506
- classification/prepare_classification_script.py +0 -194
- classification/prepare_classification_script_mc.py +0 -228
- classification/run_classifier.py +0 -286
- classification/save_mislabeled.py +0 -110
- classification/train_classifier.py +0 -825
- classification/train_classifier_tf.py +0 -724
- classification/train_utils.py +0 -322
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +0 -34
- data_management/camtrap_dp_to_coco.py +0 -238
- data_management/cct_json_utils.py +0 -395
- data_management/cct_to_md.py +0 -176
- data_management/cct_to_wi.py +0 -289
- data_management/coco_to_labelme.py +0 -272
- data_management/coco_to_yolo.py +0 -662
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +0 -33
- data_management/databases/combine_coco_camera_traps_files.py +0 -206
- data_management/databases/integrity_check_json_db.py +0 -477
- data_management/databases/subset_json_db.py +0 -115
- data_management/generate_crops_from_cct.py +0 -149
- data_management/get_image_sizes.py +0 -188
- data_management/importers/add_nacti_sizes.py +0 -52
- data_management/importers/add_timestamps_to_icct.py +0 -79
- data_management/importers/animl_results_to_md_results.py +0 -158
- data_management/importers/auckland_doc_test_to_json.py +0 -372
- data_management/importers/auckland_doc_to_json.py +0 -200
- data_management/importers/awc_to_json.py +0 -189
- data_management/importers/bellevue_to_json.py +0 -273
- data_management/importers/cacophony-thermal-importer.py +0 -796
- data_management/importers/carrizo_shrubfree_2018.py +0 -268
- data_management/importers/carrizo_trail_cam_2017.py +0 -287
- data_management/importers/cct_field_adjustments.py +0 -57
- data_management/importers/channel_islands_to_cct.py +0 -913
- data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- data_management/importers/eMammal/eMammal_helpers.py +0 -249
- data_management/importers/eMammal/make_eMammal_json.py +0 -223
- data_management/importers/ena24_to_json.py +0 -275
- data_management/importers/filenames_to_json.py +0 -385
- data_management/importers/helena_to_cct.py +0 -282
- data_management/importers/idaho-camera-traps.py +0 -1407
- data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- data_management/importers/jb_csv_to_json.py +0 -150
- data_management/importers/mcgill_to_json.py +0 -250
- data_management/importers/missouri_to_json.py +0 -489
- data_management/importers/nacti_fieldname_adjustments.py +0 -79
- data_management/importers/noaa_seals_2019.py +0 -181
- data_management/importers/pc_to_json.py +0 -365
- data_management/importers/plot_wni_giraffes.py +0 -123
- data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
- data_management/importers/prepare_zsl_imerit.py +0 -131
- data_management/importers/rspb_to_json.py +0 -356
- data_management/importers/save_the_elephants_survey_A.py +0 -320
- data_management/importers/save_the_elephants_survey_B.py +0 -332
- data_management/importers/snapshot_safari_importer.py +0 -758
- data_management/importers/snapshot_safari_importer_reprise.py +0 -665
- data_management/importers/snapshot_serengeti_lila.py +0 -1067
- data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- data_management/importers/sulross_get_exif.py +0 -65
- data_management/importers/timelapse_csv_set_to_json.py +0 -490
- data_management/importers/ubc_to_json.py +0 -399
- data_management/importers/umn_to_json.py +0 -507
- data_management/importers/wellington_to_json.py +0 -263
- data_management/importers/wi_to_json.py +0 -441
- data_management/importers/zamba_results_to_md_results.py +0 -181
- data_management/labelme_to_coco.py +0 -548
- data_management/labelme_to_yolo.py +0 -272
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +0 -97
- data_management/lila/add_locations_to_nacti.py +0 -147
- data_management/lila/create_lila_blank_set.py +0 -557
- data_management/lila/create_lila_test_set.py +0 -151
- data_management/lila/create_links_to_md_results_files.py +0 -106
- data_management/lila/download_lila_subset.py +0 -177
- data_management/lila/generate_lila_per_image_labels.py +0 -515
- data_management/lila/get_lila_annotation_counts.py +0 -170
- data_management/lila/get_lila_image_counts.py +0 -111
- data_management/lila/lila_common.py +0 -300
- data_management/lila/test_lila_metadata_urls.py +0 -132
- data_management/ocr_tools.py +0 -874
- data_management/read_exif.py +0 -681
- data_management/remap_coco_categories.py +0 -84
- data_management/remove_exif.py +0 -66
- data_management/resize_coco_dataset.py +0 -189
- data_management/wi_download_csv_to_coco.py +0 -246
- data_management/yolo_output_to_md_output.py +0 -441
- data_management/yolo_to_coco.py +0 -676
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/detector_training/model_main_tf2.py +0 -114
- detection/process_video.py +0 -703
- detection/pytorch_detector.py +0 -337
- detection/run_detector.py +0 -779
- detection/run_detector_batch.py +0 -1219
- detection/run_inference_with_yolov5_val.py +0 -917
- detection/run_tiled_inference.py +0 -935
- detection/tf_detector.py +0 -188
- detection/video_utils.py +0 -606
- docs/source/conf.py +0 -43
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +0 -174
- md_utils/ct_utils.py +0 -612
- md_utils/directory_listing.py +0 -246
- md_utils/md_tests.py +0 -968
- md_utils/path_utils.py +0 -1044
- md_utils/process_utils.py +0 -157
- md_utils/sas_blob_utils.py +0 -509
- md_utils/split_locations_into_train_val.py +0 -228
- md_utils/string_utils.py +0 -92
- md_utils/url_utils.py +0 -323
- md_utils/write_html_image_list.py +0 -225
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +0 -293
- md_visualization/render_images_with_thumbnails.py +0 -275
- md_visualization/visualization_utils.py +0 -1537
- md_visualization/visualize_db.py +0 -551
- md_visualization/visualize_detector_output.py +0 -406
- megadetector-5.0.10.dist-info/RECORD +0 -224
- megadetector-5.0.10.dist-info/top_level.txt +0 -8
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
- taxonomy_mapping/map_new_lila_datasets.py +0 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
- taxonomy_mapping/preview_lila_taxonomy.py +0 -591
- taxonomy_mapping/retrieve_sample_image.py +0 -71
- taxonomy_mapping/simple_image_download.py +0 -218
- taxonomy_mapping/species_lookup.py +0 -834
- taxonomy_mapping/taxonomy_csv_checker.py +0 -159
- taxonomy_mapping/taxonomy_graph.py +0 -346
- taxonomy_mapping/validate_lila_category_mappings.py +0 -83
- {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
|
@@ -1,294 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
idfg_iwildcam_lila_prep.py
|
|
4
|
-
|
|
5
|
-
Adding class labels (from the private test .csv) to the iWildCam 2019 IDFG
|
|
6
|
-
test set, in preparation for release on LILA.
|
|
7
|
-
|
|
8
|
-
This version works with the public iWildCam release images.
|
|
9
|
-
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
#%% ############ Take one, from iWildCam .json files ############
|
|
13
|
-
|
|
14
|
-
#%% Imports and constants
|
|
15
|
-
|
|
16
|
-
import uuid
|
|
17
|
-
import json
|
|
18
|
-
import os
|
|
19
|
-
from tqdm import tqdm
|
|
20
|
-
|
|
21
|
-
base_folder = r'h:\iWildCam_2019_IDFG'
|
|
22
|
-
input_json = os.path.join(base_folder,'iWildCam_2019_IDFG_info.json')
|
|
23
|
-
input_csv = os.path.join(base_folder,'IDFG_eval_public_v_private.csv')
|
|
24
|
-
output_json = os.path.join(base_folder,'idaho_camera_traps.json')
|
|
25
|
-
|
|
26
|
-
assert os.path.isfile(input_json)
|
|
27
|
-
assert os.path.isfile(input_csv)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
#%% Read input files
|
|
31
|
-
|
|
32
|
-
with open(input_json,'r') as f:
|
|
33
|
-
input_data = json.load(f)
|
|
34
|
-
|
|
35
|
-
with open(input_csv,'r') as f:
|
|
36
|
-
private_csv_lines = f.readlines()
|
|
37
|
-
|
|
38
|
-
private_csv_lines = [s.strip() for s in private_csv_lines]
|
|
39
|
-
|
|
40
|
-
# Remove the header line
|
|
41
|
-
assert private_csv_lines[0] == 'Id,Category,Usage'
|
|
42
|
-
private_csv_lines = private_csv_lines[1:]
|
|
43
|
-
|
|
44
|
-
print('Read {} annotations for {} images'.format(len(private_csv_lines),len(input_data['images'])))
|
|
45
|
-
|
|
46
|
-
assert len(private_csv_lines) == len(input_data['images'])
|
|
47
|
-
n_images = len(input_data['images'])
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
#%% Parse annotations
|
|
51
|
-
|
|
52
|
-
image_id_to_category_ids = {}
|
|
53
|
-
for line in tqdm(private_csv_lines):
|
|
54
|
-
|
|
55
|
-
# Lines look like:
|
|
56
|
-
#
|
|
57
|
-
# b005e5b2-2c0b-11e9-bcad-06f1011196c4,1,Private
|
|
58
|
-
|
|
59
|
-
tokens = line.split(',')
|
|
60
|
-
assert len(tokens) == 3
|
|
61
|
-
assert tokens[2] in ['Private','Public']
|
|
62
|
-
image_id_to_category_ids[tokens[0]] = int(tokens[1])
|
|
63
|
-
|
|
64
|
-
assert len(image_id_to_category_ids) == n_images
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
#%% Minor cleanup re: images
|
|
68
|
-
|
|
69
|
-
for im in tqdm(input_data['images']):
|
|
70
|
-
image_id = im['id']
|
|
71
|
-
im['file_name'] = im['file_name'].replace('iWildCam_IDFG_images/','')
|
|
72
|
-
assert isinstance(im['location'],int)
|
|
73
|
-
im['location'] = str(im['location'])
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
#%% Create annotations
|
|
77
|
-
|
|
78
|
-
annotations = []
|
|
79
|
-
|
|
80
|
-
for image_id in tqdm(image_id_to_category_ids):
|
|
81
|
-
category_id = image_id_to_category_ids[image_id]
|
|
82
|
-
ann = {}
|
|
83
|
-
ann['id'] = str(uuid.uuid1())
|
|
84
|
-
ann['image_id'] = image_id
|
|
85
|
-
ann['category_id'] = category_id
|
|
86
|
-
annotations.append(ann)
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
#%% Prepare info
|
|
90
|
-
|
|
91
|
-
info = input_data['info']
|
|
92
|
-
info['contributor'] = 'Images acquired by the Idaho Department of Fish and Game, dataset curated by Sara Beery'
|
|
93
|
-
info['description'] = 'Idaho Camera traps'
|
|
94
|
-
info['version'] = '2021.07.19'
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
#%% Minor adjustments to categories
|
|
98
|
-
|
|
99
|
-
input_categories = input_data['categories']
|
|
100
|
-
|
|
101
|
-
category_id_to_name = {cat['id']:cat['name'] for cat in input_categories}
|
|
102
|
-
category_name_to_id = {cat['name']:cat['id'] for cat in input_categories}
|
|
103
|
-
assert category_id_to_name[0] == 'empty'
|
|
104
|
-
|
|
105
|
-
category_names_to_counts = {}
|
|
106
|
-
for category in input_categories:
|
|
107
|
-
category_names_to_counts[category['name']] = 0
|
|
108
|
-
|
|
109
|
-
for ann in annotations:
|
|
110
|
-
category_id = ann['category_id']
|
|
111
|
-
category_name = category_id_to_name[category_id]
|
|
112
|
-
category_names_to_counts[category_name] = category_names_to_counts[category_name] + 1
|
|
113
|
-
|
|
114
|
-
categories = []
|
|
115
|
-
|
|
116
|
-
for category_name in category_names_to_counts:
|
|
117
|
-
count = category_names_to_counts[category_name]
|
|
118
|
-
|
|
119
|
-
# Remove unused categories
|
|
120
|
-
if count == 0:
|
|
121
|
-
continue
|
|
122
|
-
|
|
123
|
-
category_id = category_name_to_id[category_name]
|
|
124
|
-
|
|
125
|
-
# Name adjustments
|
|
126
|
-
if category_name == 'prongs':
|
|
127
|
-
category_name = 'pronghorn'
|
|
128
|
-
|
|
129
|
-
categories.append({'id':category_id,'name':category_name})
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
#%% Create output
|
|
133
|
-
|
|
134
|
-
output_data = {}
|
|
135
|
-
output_data['images'] = input_data['images']
|
|
136
|
-
output_data['annotations'] = annotations
|
|
137
|
-
output_data['categories'] = categories
|
|
138
|
-
output_data['info'] = info
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
#%% Write output
|
|
142
|
-
|
|
143
|
-
with open(output_json,'w') as f:
|
|
144
|
-
json.dump(output_data,f,indent=2)
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
#%% Validate .json file
|
|
148
|
-
|
|
149
|
-
from data_management.databases import integrity_check_json_db
|
|
150
|
-
|
|
151
|
-
options = integrity_check_json_db.IntegrityCheckOptions()
|
|
152
|
-
options.baseDir = os.path.join(base_folder,'images'); assert os.path.isdir(options.baseDir)
|
|
153
|
-
options.bCheckImageSizes = False
|
|
154
|
-
options.bCheckImageExistence = False
|
|
155
|
-
options.bFindUnusedImages = False
|
|
156
|
-
|
|
157
|
-
_, _, _ = integrity_check_json_db.integrity_check_json_db(output_json, options)
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
#%% Preview labels
|
|
161
|
-
|
|
162
|
-
from md_visualization import visualize_db
|
|
163
|
-
|
|
164
|
-
viz_options = visualize_db.DbVizOptions()
|
|
165
|
-
viz_options.num_to_visualize = 100
|
|
166
|
-
viz_options.trim_to_images_with_bboxes = False
|
|
167
|
-
viz_options.add_search_links = False
|
|
168
|
-
viz_options.sort_by_filename = False
|
|
169
|
-
viz_options.parallelize_rendering = True
|
|
170
|
-
viz_options.include_filename_links = True
|
|
171
|
-
|
|
172
|
-
# viz_options.classes_to_exclude = ['test']
|
|
173
|
-
html_output_file, _ = visualize_db.visualize_db(db_path=output_json,
|
|
174
|
-
output_dir=os.path.join(
|
|
175
|
-
base_folder,'preview'),
|
|
176
|
-
image_base_dir=os.path.join(base_folder,'images'),
|
|
177
|
-
options=viz_options)
|
|
178
|
-
os.startfile(html_output_file)
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
#%% ############ Take two, from pre-iWildCam .json files created from IDFG .csv files ############
|
|
182
|
-
|
|
183
|
-
#%% Imports and constants
|
|
184
|
-
|
|
185
|
-
import json
|
|
186
|
-
import os
|
|
187
|
-
|
|
188
|
-
base_folder = r'h:\idaho-camera-traps'
|
|
189
|
-
input_json_sl = os.path.join(base_folder,'iWildCam_IDFG.json')
|
|
190
|
-
input_json = os.path.join(base_folder,'iWildCam_IDFG_ml.json')
|
|
191
|
-
output_json = os.path.join(base_folder,'idaho_camera_traps.json')
|
|
192
|
-
remote_image_base_dir = r'z:\idfg'
|
|
193
|
-
|
|
194
|
-
assert os.path.isfile(input_json)
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
#%% One-time line break addition
|
|
198
|
-
|
|
199
|
-
if not os.path.isfile(input_json):
|
|
200
|
-
|
|
201
|
-
sl_json = input_json_sl
|
|
202
|
-
ml_json = input_json
|
|
203
|
-
|
|
204
|
-
with open(sl_json,'r') as f:
|
|
205
|
-
d = json.load(f)
|
|
206
|
-
with open(ml_json,'w') as f:
|
|
207
|
-
json.dump(d,f,indent=2)
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
#%% Read input files
|
|
211
|
-
|
|
212
|
-
with open(input_json,'r') as f:
|
|
213
|
-
input_data = json.load(f)
|
|
214
|
-
|
|
215
|
-
print('Read {} annotations for {} images'.format(len(input_data['annotations']),len(input_data['images'])))
|
|
216
|
-
|
|
217
|
-
n_images = len(input_data['images'])
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
#%% Prepare info
|
|
221
|
-
|
|
222
|
-
info = {}
|
|
223
|
-
info['contributor'] = 'Images acquired by the Idaho Department of Fish and Game, dataset curated by Sara Beery'
|
|
224
|
-
info['description'] = 'Idaho Camera traps'
|
|
225
|
-
info['version'] = '2021.07.19'
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
#%% Minor adjustments to categories
|
|
229
|
-
|
|
230
|
-
input_categories = input_data['categories']
|
|
231
|
-
output_categories = []
|
|
232
|
-
|
|
233
|
-
for c in input_categories:
|
|
234
|
-
category_name = c['name']
|
|
235
|
-
category_id = c['id']
|
|
236
|
-
if category_name == 'prong':
|
|
237
|
-
category_name = 'pronghorn'
|
|
238
|
-
category_name = category_name.lower()
|
|
239
|
-
output_categories.append({'name':category_name,'id':category_id})
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
#%% Minor adjustments to annotations
|
|
243
|
-
|
|
244
|
-
for ann in input_data['annotations']:
|
|
245
|
-
ann['id'] = str(ann['id'])
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
#%% Create output
|
|
249
|
-
|
|
250
|
-
output_data = {}
|
|
251
|
-
output_data['images'] = input_data['images']
|
|
252
|
-
output_data['annotations'] = input_data['annotations']
|
|
253
|
-
output_data['categories'] = output_categories
|
|
254
|
-
output_data['info'] = info
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
#%% Write output
|
|
258
|
-
|
|
259
|
-
with open(output_json,'w') as f:
|
|
260
|
-
json.dump(output_data,f,indent=2)
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
#%% Validate .json file
|
|
264
|
-
|
|
265
|
-
from data_management.databases import integrity_check_json_db
|
|
266
|
-
|
|
267
|
-
options = integrity_check_json_db.IntegrityCheckOptions()
|
|
268
|
-
options.baseDir = remote_image_base_dir
|
|
269
|
-
options.bCheckImageSizes = False
|
|
270
|
-
options.bCheckImageExistence = False
|
|
271
|
-
options.bFindUnusedImages = False
|
|
272
|
-
|
|
273
|
-
_, _, _ = integrity_check_json_db.integrity_check_json_db(output_json, options)
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
#%% Preview labels
|
|
277
|
-
|
|
278
|
-
from md_visualization import visualize_db
|
|
279
|
-
|
|
280
|
-
viz_options = visualize_db.DbVizOptions()
|
|
281
|
-
viz_options.num_to_visualize = 100
|
|
282
|
-
viz_options.trim_to_images_with_bboxes = False
|
|
283
|
-
viz_options.add_search_links = False
|
|
284
|
-
viz_options.sort_by_filename = False
|
|
285
|
-
viz_options.parallelize_rendering = True
|
|
286
|
-
viz_options.include_filename_links = True
|
|
287
|
-
|
|
288
|
-
# viz_options.classes_to_exclude = ['test']
|
|
289
|
-
html_output_file, _ = visualize_db.visualize_db(db_path=output_json,
|
|
290
|
-
output_dir=os.path.join(
|
|
291
|
-
base_folder,'preview'),
|
|
292
|
-
image_base_dir=remote_image_base_dir,
|
|
293
|
-
options=viz_options)
|
|
294
|
-
os.startfile(html_output_file)
|
|
@@ -1,150 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
jb_csv_to_json.py
|
|
4
|
-
|
|
5
|
-
Convert a particular .csv file to CCT format. Images were not available at
|
|
6
|
-
the time I wrote this script, so this is much shorter than other scripts
|
|
7
|
-
in this folder.
|
|
8
|
-
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
#%% Constants and environment
|
|
12
|
-
|
|
13
|
-
import pandas as pd
|
|
14
|
-
import uuid
|
|
15
|
-
import json
|
|
16
|
-
|
|
17
|
-
input_metadata_file = r'd:\temp\pre_bounding_box.csv'
|
|
18
|
-
output_file = r'd:\temp\pre_bounding_box.json'
|
|
19
|
-
filename_col = 'filename'
|
|
20
|
-
label_col = 'category'
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
#%% Read source data
|
|
24
|
-
|
|
25
|
-
input_metadata = pd.read_csv(input_metadata_file)
|
|
26
|
-
|
|
27
|
-
print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
|
|
28
|
-
len(input_metadata)))
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
#%% Confirm filename uniqueness (this data set has one label per image)
|
|
32
|
-
|
|
33
|
-
imageFilenames = input_metadata[filename_col]
|
|
34
|
-
|
|
35
|
-
duplicateRows = []
|
|
36
|
-
filenamesToRows = {}
|
|
37
|
-
|
|
38
|
-
# Build up a map from filenames to a list of rows, checking image existence as we go
|
|
39
|
-
for iFile,fn in enumerate(imageFilenames):
|
|
40
|
-
|
|
41
|
-
if (fn in filenamesToRows):
|
|
42
|
-
duplicateRows.append(iFile)
|
|
43
|
-
filenamesToRows[fn].append(iFile)
|
|
44
|
-
else:
|
|
45
|
-
filenamesToRows[fn] = [iFile]
|
|
46
|
-
|
|
47
|
-
assert(len(duplicateRows) == 0)
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
#%% Create CCT dictionaries
|
|
51
|
-
|
|
52
|
-
images = []
|
|
53
|
-
annotations = []
|
|
54
|
-
|
|
55
|
-
# Map categories to integer IDs (that's what COCO likes)
|
|
56
|
-
nextCategoryID = 1
|
|
57
|
-
categories = []
|
|
58
|
-
categoryNamesToCategories = {}
|
|
59
|
-
|
|
60
|
-
cat = {}
|
|
61
|
-
cat['name'] = 'empty'
|
|
62
|
-
cat['id'] = 0
|
|
63
|
-
categories.append(cat)
|
|
64
|
-
categoryNamesToCategories['empty'] = cat
|
|
65
|
-
|
|
66
|
-
# For each image
|
|
67
|
-
#
|
|
68
|
-
# Because in practice images are 1:1 with annotations in this data set,
|
|
69
|
-
# this is also a loop over annotations.
|
|
70
|
-
|
|
71
|
-
# imageName = imageFilenames[0]
|
|
72
|
-
for imageName in imageFilenames:
|
|
73
|
-
|
|
74
|
-
rows = filenamesToRows[imageName]
|
|
75
|
-
|
|
76
|
-
# As per above, this is convenient and appears to be true; asserting to be safe
|
|
77
|
-
assert(len(rows) == 1)
|
|
78
|
-
iRow = rows[0]
|
|
79
|
-
|
|
80
|
-
row = input_metadata.iloc[iRow]
|
|
81
|
-
|
|
82
|
-
im = {}
|
|
83
|
-
# Filenames look like "290716114012001a1116.jpg"
|
|
84
|
-
im['id'] = imageName.split('.')[0]
|
|
85
|
-
im['file_name'] = imageName
|
|
86
|
-
im['seq_id'] = '-1'
|
|
87
|
-
|
|
88
|
-
images.append(im)
|
|
89
|
-
|
|
90
|
-
categoryName = row[label_col].lower()
|
|
91
|
-
|
|
92
|
-
# Have we seen this category before?
|
|
93
|
-
if categoryName in categoryNamesToCategories:
|
|
94
|
-
categoryID = categoryNamesToCategories[categoryName]['id']
|
|
95
|
-
else:
|
|
96
|
-
cat = {}
|
|
97
|
-
categoryID = nextCategoryID
|
|
98
|
-
cat['name'] = categoryName
|
|
99
|
-
cat['id'] = nextCategoryID
|
|
100
|
-
categories.append(cat)
|
|
101
|
-
categoryNamesToCategories[categoryName] = cat
|
|
102
|
-
nextCategoryID += 1
|
|
103
|
-
|
|
104
|
-
# Create an annotation
|
|
105
|
-
ann = {}
|
|
106
|
-
|
|
107
|
-
# The Internet tells me this guarantees uniqueness to a reasonable extent, even
|
|
108
|
-
# beyond the sheer improbability of collisions.
|
|
109
|
-
ann['id'] = str(uuid.uuid1())
|
|
110
|
-
ann['image_id'] = im['id']
|
|
111
|
-
ann['category_id'] = categoryID
|
|
112
|
-
|
|
113
|
-
annotations.append(ann)
|
|
114
|
-
|
|
115
|
-
# ...for each image
|
|
116
|
-
|
|
117
|
-
print('Finished creating dictionaries')
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
#%% Create info struct
|
|
121
|
-
|
|
122
|
-
info = {}
|
|
123
|
-
info['year'] = 2019
|
|
124
|
-
info['version'] = 1
|
|
125
|
-
info['description'] = 'COCO style database'
|
|
126
|
-
info['secondary_contributor'] = 'Converted to COCO .json by Dan Morris'
|
|
127
|
-
info['contributor'] = ''
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
#%% Write output
|
|
131
|
-
|
|
132
|
-
json_data = {}
|
|
133
|
-
json_data['images'] = images
|
|
134
|
-
json_data['annotations'] = annotations
|
|
135
|
-
json_data['categories'] = categories
|
|
136
|
-
json_data['info'] = info
|
|
137
|
-
json.dump(json_data, open(output_file,'w'), indent=4)
|
|
138
|
-
|
|
139
|
-
print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
|
|
140
|
-
len(images),len(annotations),len(categories)))
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
#%% Validate
|
|
144
|
-
|
|
145
|
-
from data_management.databases import integrity_check_json_db
|
|
146
|
-
|
|
147
|
-
options = integrity_check_json_db.IntegrityCheckOptions()
|
|
148
|
-
sortedCategories,data = integrity_check_json_db.integrity_check_json_db(output_file, options)
|
|
149
|
-
|
|
150
|
-
|
|
@@ -1,250 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
mcgill_to_json.py
|
|
4
|
-
|
|
5
|
-
Convert the .csv file provided for the McGill test data set to a
|
|
6
|
-
COCO-camera-traps .json file
|
|
7
|
-
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
#%% Constants and environment
|
|
11
|
-
|
|
12
|
-
import pandas as pd
|
|
13
|
-
import os
|
|
14
|
-
import glob
|
|
15
|
-
import json
|
|
16
|
-
import uuid
|
|
17
|
-
import time
|
|
18
|
-
import ntpath
|
|
19
|
-
import humanfriendly
|
|
20
|
-
import PIL
|
|
21
|
-
import math
|
|
22
|
-
|
|
23
|
-
baseDir = r'D:\wildlife_data\mcgill_test'
|
|
24
|
-
input_metadata_file = os.path.join(baseDir, 'dan_500_photos_metadata.csv')
|
|
25
|
-
output_file = os.path.join(baseDir, 'mcgill_test.json')
|
|
26
|
-
image_directory = baseDir
|
|
27
|
-
|
|
28
|
-
assert(os.path.isdir(image_directory))
|
|
29
|
-
assert(os.path.isfile(input_metadata_file))
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
#%% Read source data
|
|
33
|
-
|
|
34
|
-
input_metadata = pd.read_csv(input_metadata_file)
|
|
35
|
-
|
|
36
|
-
print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
|
|
37
|
-
len(input_metadata)))
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
#%% Map filenames to rows, verify image existence
|
|
41
|
-
|
|
42
|
-
# Create an additional column for concatenated filenames
|
|
43
|
-
input_metadata['relative_path'] = ''
|
|
44
|
-
input_metadata['full_path'] = ''
|
|
45
|
-
|
|
46
|
-
startTime = time.time()
|
|
47
|
-
|
|
48
|
-
# Maps relative filenames to rows
|
|
49
|
-
filenamesToRows = {}
|
|
50
|
-
|
|
51
|
-
duplicateRows = []
|
|
52
|
-
|
|
53
|
-
# Build up a map from filenames to a list of rows, checking image existence as we go
|
|
54
|
-
# row = input_metadata.iloc[0]
|
|
55
|
-
for iFile,row in input_metadata.iterrows():
|
|
56
|
-
|
|
57
|
-
relativePath = os.path.join(row['site'],row['date_range'],str(row['camera']),
|
|
58
|
-
str(row['folder']),row['filename'])
|
|
59
|
-
fullPath = os.path.join(baseDir,relativePath)
|
|
60
|
-
|
|
61
|
-
if (relativePath in filenamesToRows):
|
|
62
|
-
duplicateRows.append(iFile)
|
|
63
|
-
filenamesToRows[relativePath].append(iFile)
|
|
64
|
-
else:
|
|
65
|
-
filenamesToRows[relativePath] = [iFile]
|
|
66
|
-
assert(os.path.isfile(fullPath))
|
|
67
|
-
|
|
68
|
-
row['relative_path'] = relativePath
|
|
69
|
-
row['full_path'] = fullPath
|
|
70
|
-
|
|
71
|
-
input_metadata.iloc[iFile] = row
|
|
72
|
-
|
|
73
|
-
elapsed = time.time() - startTime
|
|
74
|
-
print('Finished verifying image existence in {}, found {} filenames with multiple labels'.format(
|
|
75
|
-
humanfriendly.format_timespan(elapsed),len(duplicateRows)))
|
|
76
|
-
|
|
77
|
-
# I didn't expect this to be true a priori, but it appears to be true, and
|
|
78
|
-
# it saves us the trouble of checking consistency across multiple occurrences
|
|
79
|
-
# of an image.
|
|
80
|
-
assert(len(duplicateRows) == 0)
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
#%% Check for images that aren't included in the metadata file
|
|
84
|
-
|
|
85
|
-
# Enumerate all images
|
|
86
|
-
imageFullPaths = glob.glob(os.path.join(image_directory,'**/*.JPG'), recursive=True)
|
|
87
|
-
|
|
88
|
-
for iImage,imagePath in enumerate(imageFullPaths):
|
|
89
|
-
|
|
90
|
-
imageRelPath = ntpath.relpath(imagePath, image_directory)
|
|
91
|
-
assert(imageRelPath in filenamesToRows)
|
|
92
|
-
|
|
93
|
-
print('Finished checking {} images to make sure they\'re in the metadata'.format(
|
|
94
|
-
len(imageFullPaths)))
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
#%% Create CCT dictionaries
|
|
98
|
-
|
|
99
|
-
# Also gets image sizes, so this takes ~6 minutes
|
|
100
|
-
#
|
|
101
|
-
# Implicitly checks images for overt corruptness, i.e. by not crashing.
|
|
102
|
-
|
|
103
|
-
images = []
|
|
104
|
-
annotations = []
|
|
105
|
-
categories = []
|
|
106
|
-
|
|
107
|
-
emptyCategory = {}
|
|
108
|
-
emptyCategory['id'] = 0
|
|
109
|
-
emptyCategory['name'] = 'empty'
|
|
110
|
-
emptyCategory['latin'] = 'empty'
|
|
111
|
-
emptyCategory['count'] = 0
|
|
112
|
-
categories.append(emptyCategory)
|
|
113
|
-
|
|
114
|
-
# Map categories to integer IDs (that's what COCO likes)
|
|
115
|
-
nextCategoryID = 1
|
|
116
|
-
labelToCategory = {'empty':emptyCategory}
|
|
117
|
-
|
|
118
|
-
# For each image
|
|
119
|
-
#
|
|
120
|
-
# Because in practice images are 1:1 with annotations in this data set,
|
|
121
|
-
# this is also a loop over annotations.
|
|
122
|
-
|
|
123
|
-
startTime = time.time()
|
|
124
|
-
|
|
125
|
-
# row = input_metadata.iloc[0]
|
|
126
|
-
for iFile,row in input_metadata.iterrows():
|
|
127
|
-
|
|
128
|
-
relPath = row['relative_path'].replace('\\','/')
|
|
129
|
-
im = {}
|
|
130
|
-
# Filenames look like "290716114012001a1116.jpg"
|
|
131
|
-
im['id'] = relPath.replace('/','_').replace(' ','_')
|
|
132
|
-
|
|
133
|
-
im['file_name'] = relPath
|
|
134
|
-
|
|
135
|
-
im['seq_id'] = -1
|
|
136
|
-
im['frame_num'] = -1
|
|
137
|
-
|
|
138
|
-
# In the form "001a"
|
|
139
|
-
im['site']= row['site']
|
|
140
|
-
|
|
141
|
-
# Can be in the form '111' or 's46'
|
|
142
|
-
im['camera'] = row['camera']
|
|
143
|
-
|
|
144
|
-
# In the form "7/29/2016 11:40"
|
|
145
|
-
im['datetime'] = row['timestamp']
|
|
146
|
-
|
|
147
|
-
otherFields = ['motion','temp_F','n_present','n_waterhole','n_contact','notes']
|
|
148
|
-
|
|
149
|
-
for s in otherFields:
|
|
150
|
-
im[s] = row[s]
|
|
151
|
-
|
|
152
|
-
# Check image height and width
|
|
153
|
-
fullPath = row['full_path']
|
|
154
|
-
assert(os.path.isfile(fullPath))
|
|
155
|
-
pilImage = PIL.Image.open(fullPath)
|
|
156
|
-
width, height = pilImage.size
|
|
157
|
-
im['width'] = width
|
|
158
|
-
im['height'] = height
|
|
159
|
-
|
|
160
|
-
images.append(im)
|
|
161
|
-
|
|
162
|
-
label = row['species']
|
|
163
|
-
if not isinstance(label,str):
|
|
164
|
-
# NaN is the only thing we should see that's not a string
|
|
165
|
-
assert math.isnan(label)
|
|
166
|
-
label = 'empty'
|
|
167
|
-
else:
|
|
168
|
-
label = label.lower()
|
|
169
|
-
|
|
170
|
-
latin = row['binomial']
|
|
171
|
-
if not isinstance(latin,str):
|
|
172
|
-
# NaN is the only thing we should see that's not a string
|
|
173
|
-
assert math.isnan(latin)
|
|
174
|
-
latin = 'empty'
|
|
175
|
-
else:
|
|
176
|
-
latin = latin.lower()
|
|
177
|
-
|
|
178
|
-
if label == 'empty':
|
|
179
|
-
if latin != 'empty':
|
|
180
|
-
latin = 'empty'
|
|
181
|
-
|
|
182
|
-
if label == 'unknown':
|
|
183
|
-
if latin != 'unknown':
|
|
184
|
-
latin = 'unknown'
|
|
185
|
-
|
|
186
|
-
if label not in labelToCategory:
|
|
187
|
-
print('Adding category {} ({})'.format(label,latin))
|
|
188
|
-
category = {}
|
|
189
|
-
categoryID = nextCategoryID
|
|
190
|
-
category['id'] = categoryID
|
|
191
|
-
nextCategoryID += 1
|
|
192
|
-
category['name'] = label
|
|
193
|
-
category['latin'] = latin
|
|
194
|
-
category['count'] = 1
|
|
195
|
-
labelToCategory[label] = category
|
|
196
|
-
categories.append(category)
|
|
197
|
-
else:
|
|
198
|
-
category = labelToCategory[label]
|
|
199
|
-
category['count'] = category['count'] + 1
|
|
200
|
-
categoryID = category['id']
|
|
201
|
-
|
|
202
|
-
# Create an annotation
|
|
203
|
-
ann = {}
|
|
204
|
-
|
|
205
|
-
# The Internet tells me this guarantees uniqueness to a reasonable extent, even
|
|
206
|
-
# beyond the sheer improbability of collisions.
|
|
207
|
-
ann['id'] = str(uuid.uuid1())
|
|
208
|
-
ann['image_id'] = im['id']
|
|
209
|
-
ann['category_id'] = categoryID
|
|
210
|
-
|
|
211
|
-
annotations.append(ann)
|
|
212
|
-
|
|
213
|
-
# ...for each image
|
|
214
|
-
|
|
215
|
-
# Convert categories to a CCT-style dictionary
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
for category in categories:
|
|
219
|
-
print('Category {}, count {}'.format(category['name'],category['count']))
|
|
220
|
-
|
|
221
|
-
elapsed = time.time() - startTime
|
|
222
|
-
print('Finished creating CCT dictionaries in {}'.format(
|
|
223
|
-
humanfriendly.format_timespan(elapsed)))
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
#%% Create info struct
|
|
227
|
-
|
|
228
|
-
info = {}
|
|
229
|
-
info['year'] = 2019
|
|
230
|
-
info['version'] = 1
|
|
231
|
-
info['description'] = 'COCO style database'
|
|
232
|
-
info['secondary_contributor'] = 'Converted to COCO .json by Dan Morris'
|
|
233
|
-
info['contributor'] = 'McGill University'
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
#%% Write output
|
|
237
|
-
|
|
238
|
-
json_data = {}
|
|
239
|
-
json_data['images'] = images
|
|
240
|
-
json_data['annotations'] = annotations
|
|
241
|
-
json_data['categories'] = categories
|
|
242
|
-
json_data['info'] = info
|
|
243
|
-
json.dump(json_data, open(output_file,'w'), indent=4)
|
|
244
|
-
|
|
245
|
-
print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
|
|
246
|
-
len(images),len(annotations),len(categories)))
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|