megadetector 5.0.11__py3-none-any.whl → 5.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +439 -0
- megadetector/api/batch_processing/api_core/server.py +294 -0
- megadetector/api/batch_processing/api_core/server_api_config.py +98 -0
- megadetector/api/batch_processing/api_core/server_app_config.py +55 -0
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +220 -0
- megadetector/api/batch_processing/api_core/server_job_status_table.py +152 -0
- megadetector/api/batch_processing/api_core/server_orchestration.py +360 -0
- megadetector/api/batch_processing/api_core/server_utils.py +92 -0
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +46 -0
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +152 -0
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +126 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +152 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -0
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +35 -0
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +110 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +627 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +855 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +610 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +699 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +506 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +34 -0
- megadetector/data_management/camtrap_dp_to_coco.py +239 -0
- megadetector/data_management/cct_json_utils.py +395 -0
- megadetector/data_management/cct_to_md.py +176 -0
- megadetector/data_management/cct_to_wi.py +289 -0
- megadetector/data_management/coco_to_labelme.py +272 -0
- megadetector/data_management/coco_to_yolo.py +662 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +33 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +206 -0
- megadetector/data_management/databases/integrity_check_json_db.py +477 -0
- megadetector/data_management/databases/subset_json_db.py +115 -0
- megadetector/data_management/generate_crops_from_cct.py +149 -0
- megadetector/data_management/get_image_sizes.py +189 -0
- megadetector/data_management/importers/add_nacti_sizes.py +52 -0
- megadetector/data_management/importers/add_timestamps_to_icct.py +79 -0
- megadetector/data_management/importers/animl_results_to_md_results.py +158 -0
- megadetector/data_management/importers/auckland_doc_test_to_json.py +373 -0
- megadetector/data_management/importers/auckland_doc_to_json.py +201 -0
- megadetector/data_management/importers/awc_to_json.py +191 -0
- megadetector/data_management/importers/bellevue_to_json.py +273 -0
- megadetector/data_management/importers/cacophony-thermal-importer.py +796 -0
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +269 -0
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +289 -0
- megadetector/data_management/importers/cct_field_adjustments.py +58 -0
- megadetector/data_management/importers/channel_islands_to_cct.py +913 -0
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +180 -0
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +249 -0
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +223 -0
- megadetector/data_management/importers/ena24_to_json.py +276 -0
- megadetector/data_management/importers/filenames_to_json.py +386 -0
- megadetector/data_management/importers/helena_to_cct.py +283 -0
- megadetector/data_management/importers/idaho-camera-traps.py +1407 -0
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +294 -0
- megadetector/data_management/importers/jb_csv_to_json.py +150 -0
- megadetector/data_management/importers/mcgill_to_json.py +250 -0
- megadetector/data_management/importers/missouri_to_json.py +490 -0
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +79 -0
- megadetector/data_management/importers/noaa_seals_2019.py +181 -0
- megadetector/data_management/importers/pc_to_json.py +365 -0
- megadetector/data_management/importers/plot_wni_giraffes.py +123 -0
- megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -0
- megadetector/data_management/importers/prepare_zsl_imerit.py +131 -0
- megadetector/data_management/importers/rspb_to_json.py +356 -0
- megadetector/data_management/importers/save_the_elephants_survey_A.py +320 -0
- megadetector/data_management/importers/save_the_elephants_survey_B.py +329 -0
- megadetector/data_management/importers/snapshot_safari_importer.py +758 -0
- megadetector/data_management/importers/snapshot_safari_importer_reprise.py +665 -0
- megadetector/data_management/importers/snapshot_serengeti_lila.py +1067 -0
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +150 -0
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +153 -0
- megadetector/data_management/importers/sulross_get_exif.py +65 -0
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +490 -0
- megadetector/data_management/importers/ubc_to_json.py +399 -0
- megadetector/data_management/importers/umn_to_json.py +507 -0
- megadetector/data_management/importers/wellington_to_json.py +263 -0
- megadetector/data_management/importers/wi_to_json.py +442 -0
- megadetector/data_management/importers/zamba_results_to_md_results.py +181 -0
- megadetector/data_management/labelme_to_coco.py +547 -0
- megadetector/data_management/labelme_to_yolo.py +272 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +97 -0
- megadetector/data_management/lila/add_locations_to_nacti.py +147 -0
- megadetector/data_management/lila/create_lila_blank_set.py +558 -0
- megadetector/data_management/lila/create_lila_test_set.py +152 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +178 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +516 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +170 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +300 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +132 -0
- megadetector/data_management/ocr_tools.py +874 -0
- megadetector/data_management/read_exif.py +681 -0
- megadetector/data_management/remap_coco_categories.py +84 -0
- megadetector/data_management/remove_exif.py +66 -0
- megadetector/data_management/resize_coco_dataset.py +189 -0
- megadetector/data_management/wi_download_csv_to_coco.py +246 -0
- megadetector/data_management/yolo_output_to_md_output.py +441 -0
- megadetector/data_management/yolo_to_coco.py +676 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/detector_training/__init__.py +0 -0
- megadetector/detection/detector_training/model_main_tf2.py +114 -0
- megadetector/detection/process_video.py +702 -0
- megadetector/detection/pytorch_detector.py +341 -0
- megadetector/detection/run_detector.py +779 -0
- megadetector/detection/run_detector_batch.py +1219 -0
- megadetector/detection/run_inference_with_yolov5_val.py +917 -0
- megadetector/detection/run_tiled_inference.py +934 -0
- megadetector/detection/tf_detector.py +189 -0
- megadetector/detection/video_utils.py +606 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +64 -0
- megadetector/postprocessing/categorize_detections_by_size.py +163 -0
- megadetector/postprocessing/combine_api_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +958 -0
- megadetector/postprocessing/convert_output_format.py +396 -0
- megadetector/postprocessing/load_api_results.py +195 -0
- megadetector/postprocessing/md_to_coco.py +310 -0
- megadetector/postprocessing/md_to_labelme.py +330 -0
- megadetector/postprocessing/merge_detections.py +401 -0
- megadetector/postprocessing/postprocess_batch_results.py +1902 -0
- megadetector/postprocessing/remap_detection_categories.py +170 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +660 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +211 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +83 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1631 -0
- megadetector/postprocessing/separate_detections_into_folders.py +730 -0
- megadetector/postprocessing/subset_json_detector_output.py +696 -0
- megadetector/postprocessing/top_folders_to_bottom.py +223 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +150 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +590 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +219 -0
- megadetector/taxonomy_mapping/species_lookup.py +834 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/azure_utils.py +178 -0
- megadetector/utils/ct_utils.py +612 -0
- megadetector/utils/directory_listing.py +246 -0
- megadetector/utils/md_tests.py +968 -0
- megadetector/utils/path_utils.py +1044 -0
- megadetector/utils/process_utils.py +157 -0
- megadetector/utils/sas_blob_utils.py +509 -0
- megadetector/utils/split_locations_into_train_val.py +228 -0
- megadetector/utils/string_utils.py +92 -0
- megadetector/utils/url_utils.py +323 -0
- megadetector/utils/write_html_image_list.py +225 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +293 -0
- megadetector/visualization/render_images_with_thumbnails.py +275 -0
- megadetector/visualization/visualization_utils.py +1536 -0
- megadetector/visualization/visualize_db.py +550 -0
- megadetector/visualization/visualize_detector_output.py +405 -0
- {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/METADATA +1 -1
- megadetector-5.0.12.dist-info/RECORD +199 -0
- megadetector-5.0.12.dist-info/top_level.txt +1 -0
- megadetector-5.0.11.dist-info/RECORD +0 -5
- megadetector-5.0.11.dist-info/top_level.txt +0 -1
- {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/LICENSE +0 -0
- {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
ena24_to_json_2017.py
|
|
4
|
+
|
|
5
|
+
Convert the ENA24 data set to a COCO-camera-traps .json file
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
#%% Constants and environment
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import json
|
|
13
|
+
import uuid
|
|
14
|
+
import time
|
|
15
|
+
import humanfriendly
|
|
16
|
+
import numpy as np
|
|
17
|
+
import shutil
|
|
18
|
+
import zipfile
|
|
19
|
+
|
|
20
|
+
from PIL import Image
|
|
21
|
+
from tqdm import tqdm
|
|
22
|
+
|
|
23
|
+
base_directory = r'e:\wildlife_data\ena24'
|
|
24
|
+
output_file = os.path.join(base_directory,'ena24.json')
|
|
25
|
+
image_directory = os.path.join(base_directory,'images')
|
|
26
|
+
label_directory = os.path.join(base_directory,'labels')
|
|
27
|
+
|
|
28
|
+
assert(os.path.isdir(label_directory))
|
|
29
|
+
assert(os.path.isdir(image_directory))
|
|
30
|
+
|
|
31
|
+
# Temporary folders for human and non-human images
|
|
32
|
+
human_dir = os.path.join(base_directory, 'human')
|
|
33
|
+
non_human_dir = os.path.join(base_directory, 'non-human')
|
|
34
|
+
|
|
35
|
+
human_zipfile = os.path.join(base_directory, 'ena24_humans.zip')
|
|
36
|
+
non_human_zipfile = os.path.join(base_directory, 'ena24.zip')
|
|
37
|
+
|
|
38
|
+
# Clean existing output folders/zipfiles
|
|
39
|
+
if os.path.isdir(human_dir):
|
|
40
|
+
shutil.rmtree(human_dir)
|
|
41
|
+
if os.path.isdir(non_human_dir):
|
|
42
|
+
shutil.rmtree(non_human_dir)
|
|
43
|
+
|
|
44
|
+
if os.path.isfile(human_zipfile):
|
|
45
|
+
os.remove(human_zipfile)
|
|
46
|
+
if os.path.isfile(human_zipfile):
|
|
47
|
+
os.remove(non_human_zipfile)
|
|
48
|
+
|
|
49
|
+
os.makedirs(human_dir,exist_ok=True)
|
|
50
|
+
os.makedirs(non_human_dir,exist_ok=True)
|
|
51
|
+
|
|
52
|
+
labels = ['White_Tailed_Deer', 'Dog', 'Bobcat', 'Red Fox', 'Horse',
|
|
53
|
+
'Domestic Cat', 'American Black Bear', 'Eastern Cottontail', 'Grey Fox', 'Coyote',
|
|
54
|
+
'Eastern Fox Squirrel', 'Eastern Gray Squirrel', 'Vehicle', 'Eastern Chipmunk', 'Wild Turkey',
|
|
55
|
+
'Northern Raccoon', 'Striped Skunk', 'Woodchuck', 'Virginia Opossum', 'Human',
|
|
56
|
+
'Bird', 'American Crow', 'Chicken']
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
#%% Support functions
|
|
60
|
+
|
|
61
|
+
def zipdir(path, zipfilename, basepath=None):
|
|
62
|
+
"""
|
|
63
|
+
Zip everything in [path] into [zipfilename], with paths in the zipfile relative to [basepath]
|
|
64
|
+
"""
|
|
65
|
+
ziph = zipfile.ZipFile(zipfilename, 'w', zipfile.ZIP_STORED)
|
|
66
|
+
|
|
67
|
+
for root, dirs, files in os.walk(path):
|
|
68
|
+
for file in files:
|
|
69
|
+
src = os.path.join(root, file)
|
|
70
|
+
if basepath is None:
|
|
71
|
+
dst = file
|
|
72
|
+
else:
|
|
73
|
+
dst = os.path.relpath(src,basepath)
|
|
74
|
+
ziph.write(src, dst, zipfile.ZIP_STORED)
|
|
75
|
+
|
|
76
|
+
ziph.close()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
#%% Read source data
|
|
80
|
+
|
|
81
|
+
image_list = os.listdir(label_directory)
|
|
82
|
+
print('Enumerated {} label files'.format(len(image_list)))
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
#%% Map filenames to rows, verify image existence
|
|
86
|
+
|
|
87
|
+
startTime = time.time()
|
|
88
|
+
|
|
89
|
+
# Build up a map from filenames to a list of rows, checking image existence as we go
|
|
90
|
+
for filename in image_list:
|
|
91
|
+
imagePath = os.path.join(image_directory, "{}.jpg".format(filename.split(".")[0]))
|
|
92
|
+
assert(os.path.isfile(imagePath))
|
|
93
|
+
|
|
94
|
+
elapsed = time.time() - startTime
|
|
95
|
+
print('Finished verifying image existence for {} files in {}'.format(
|
|
96
|
+
len(image_list),humanfriendly.format_timespan(elapsed)))
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
#%% Create CCT dictionaries
|
|
100
|
+
|
|
101
|
+
# Also gets image sizes, so this takes ~6 minutes
|
|
102
|
+
#
|
|
103
|
+
# Implicitly checks images for overt corruptness, i.e. by not crashing.
|
|
104
|
+
|
|
105
|
+
images = []
|
|
106
|
+
annotations = []
|
|
107
|
+
|
|
108
|
+
# Map categories to integer IDs (that's what COCO likes)
|
|
109
|
+
nextCategoryID = 0
|
|
110
|
+
categoriesToCategoryId = {}
|
|
111
|
+
categoriesToCounts = {}
|
|
112
|
+
|
|
113
|
+
# For each image
|
|
114
|
+
#
|
|
115
|
+
# Because in practice images are 1:1 with annotations in this data set,
|
|
116
|
+
# this is also a loop over annotations.
|
|
117
|
+
|
|
118
|
+
startTime = time.time()
|
|
119
|
+
for filename in tqdm(image_list):
|
|
120
|
+
|
|
121
|
+
contains_human = False
|
|
122
|
+
im = {}
|
|
123
|
+
im['id'] = filename.split('.')[0]
|
|
124
|
+
fn = "{}.jpg".format(filename.split('.')[0])
|
|
125
|
+
im['file_name'] = fn
|
|
126
|
+
|
|
127
|
+
# Check image height and width
|
|
128
|
+
imagePath = os.path.join(image_directory, fn)
|
|
129
|
+
assert(os.path.isfile(imagePath))
|
|
130
|
+
pilImage = Image.open(imagePath)
|
|
131
|
+
width, height = pilImage.size
|
|
132
|
+
im['width'] = width
|
|
133
|
+
im['height'] = height
|
|
134
|
+
|
|
135
|
+
images.append(im)
|
|
136
|
+
|
|
137
|
+
label_path = os.path.join(label_directory, filename)
|
|
138
|
+
file_data = open(label_path, 'r').read()
|
|
139
|
+
row = file_data.split()
|
|
140
|
+
category = labels[int(row[0])-1]
|
|
141
|
+
|
|
142
|
+
rows = np.loadtxt(label_path)
|
|
143
|
+
|
|
144
|
+
# Each row is category, [box coordinates]
|
|
145
|
+
|
|
146
|
+
# If there's just one row, loadtxt reads it as a 1d array; make it a 2d array
|
|
147
|
+
# with one row
|
|
148
|
+
if len(rows.shape)==1:
|
|
149
|
+
rows = rows.reshape(1,-5)
|
|
150
|
+
|
|
151
|
+
assert (len(rows.shape)==2 and rows.shape[1] == 5)
|
|
152
|
+
|
|
153
|
+
categories_this_image = set()
|
|
154
|
+
|
|
155
|
+
# Each row is a bounding box
|
|
156
|
+
for row in rows:
|
|
157
|
+
|
|
158
|
+
i_category = int(row[0])-1
|
|
159
|
+
category = labels[i_category]
|
|
160
|
+
if category == 'Human':
|
|
161
|
+
contains_human = True
|
|
162
|
+
categories_this_image.add(category)
|
|
163
|
+
|
|
164
|
+
# Have we seen this category before?
|
|
165
|
+
if category in categoriesToCategoryId:
|
|
166
|
+
categoryID = categoriesToCategoryId[category]
|
|
167
|
+
categoriesToCounts[category] += 1
|
|
168
|
+
else:
|
|
169
|
+
categoryID = nextCategoryID
|
|
170
|
+
categoriesToCategoryId[category] = categoryID
|
|
171
|
+
categoriesToCounts[category] = 0
|
|
172
|
+
nextCategoryID += 1
|
|
173
|
+
|
|
174
|
+
# Create an annotation
|
|
175
|
+
ann = {}
|
|
176
|
+
|
|
177
|
+
ann['id'] = str(uuid.uuid1())
|
|
178
|
+
ann['image_id'] = im['id']
|
|
179
|
+
ann['category_id'] = categoryID
|
|
180
|
+
ann['bbox'] = [row[1]*width, row[2]*height, row[3]*width, row[4]*height]
|
|
181
|
+
annotations.append(ann)
|
|
182
|
+
|
|
183
|
+
# ...for each bounding box
|
|
184
|
+
|
|
185
|
+
# This was here for debugging; nearly every instance is Human+Horse, Human+Vehicle,
|
|
186
|
+
# or Human+Dog, but there is one Rabbit+Opossium, and a few Deer+Chicken!
|
|
187
|
+
if False:
|
|
188
|
+
if len(categories_this_image) > 1:
|
|
189
|
+
print('Image {} has multiple categories: '.format(filename),end='')
|
|
190
|
+
for c in categories_this_image:
|
|
191
|
+
print(c, end=',')
|
|
192
|
+
print('')
|
|
193
|
+
|
|
194
|
+
if contains_human:
|
|
195
|
+
shutil.copy(imagePath, os.path.join(base_directory, human_dir))
|
|
196
|
+
else:
|
|
197
|
+
shutil.copy(imagePath, os.path.join(base_directory, non_human_dir))
|
|
198
|
+
|
|
199
|
+
# ...for each image
|
|
200
|
+
|
|
201
|
+
# Convert categories to a CCT-style dictionary
|
|
202
|
+
|
|
203
|
+
categories = []
|
|
204
|
+
|
|
205
|
+
for category in categoriesToCounts:
|
|
206
|
+
print('Category {}, count {}'.format(category, categoriesToCounts[category]))
|
|
207
|
+
categoryID = categoriesToCategoryId[category]
|
|
208
|
+
cat = {}
|
|
209
|
+
cat['name'] = category
|
|
210
|
+
cat['id'] = categoryID
|
|
211
|
+
categories.append(cat)
|
|
212
|
+
|
|
213
|
+
elapsed = time.time() - startTime
|
|
214
|
+
print('Finished creating CCT dictionaries in {}'.format(
|
|
215
|
+
humanfriendly.format_timespan(elapsed)))
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
#%% Create info struct
|
|
219
|
+
|
|
220
|
+
info = {}
|
|
221
|
+
info['year'] = 2016
|
|
222
|
+
info['version'] = 1
|
|
223
|
+
info['description'] = ''
|
|
224
|
+
info['secondary_contributor'] = 'Converted to COCO .json by Vardhan Duvvuri'
|
|
225
|
+
info['contributor'] = 'University of Missouri'
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
#%% Write output
|
|
229
|
+
|
|
230
|
+
json_data = {}
|
|
231
|
+
json_data['images'] = images
|
|
232
|
+
json_data['annotations'] = annotations
|
|
233
|
+
json_data['categories'] = categories
|
|
234
|
+
json_data['info'] = info
|
|
235
|
+
json.dump(json_data, open(output_file, 'w'), indent=2)
|
|
236
|
+
|
|
237
|
+
print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
|
|
238
|
+
len(images),len(annotations),len(categories)))
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
#%% Create ZIP files for human and non human
|
|
242
|
+
|
|
243
|
+
zipdir(human_dir,human_zipfile)
|
|
244
|
+
zipdir(non_human_dir,non_human_zipfile)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
#%% Validate output
|
|
248
|
+
|
|
249
|
+
from megadetector.data_management.databases import integrity_check_json_db
|
|
250
|
+
|
|
251
|
+
fn = output_file
|
|
252
|
+
options = integrity_check_json_db.IntegrityCheckOptions()
|
|
253
|
+
options.baseDir = image_directory
|
|
254
|
+
options.bCheckImageSizes = False
|
|
255
|
+
options.bCheckImageExistence = True
|
|
256
|
+
options.bFindUnusedImages = True
|
|
257
|
+
|
|
258
|
+
sortedCategories, data = integrity_check_json_db.integrity_check_json_db(fn,options)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
#%% Preview labels
|
|
262
|
+
|
|
263
|
+
from megadetector.visualization import visualize_db
|
|
264
|
+
from megadetector.data_management.databases import integrity_check_json_db
|
|
265
|
+
|
|
266
|
+
viz_options = visualize_db.DbVizOptions()
|
|
267
|
+
viz_options.num_to_visualize = None
|
|
268
|
+
viz_options.trim_to_images_with_bboxes = False
|
|
269
|
+
viz_options.add_search_links = True
|
|
270
|
+
viz_options.sort_by_filename = False
|
|
271
|
+
viz_options.parallelize_rendering = True
|
|
272
|
+
html_output_file,image_db = visualize_db.visualize_db(db_path=output_file,
|
|
273
|
+
output_dir=os.path.join(base_directory,'preview'),
|
|
274
|
+
image_base_dir=image_directory,
|
|
275
|
+
options=viz_options)
|
|
276
|
+
os.startfile(html_output_file)
|
|
@@ -0,0 +1,386 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
filenames_to_json.py
|
|
4
|
+
|
|
5
|
+
Take a directory of images in which species labels are encoded by folder
|
|
6
|
+
names, and produces a COCO-style .json file
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
#%% Constants and imports
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import io
|
|
14
|
+
import os
|
|
15
|
+
import uuid
|
|
16
|
+
import csv
|
|
17
|
+
import warnings
|
|
18
|
+
import datetime
|
|
19
|
+
|
|
20
|
+
from PIL import Image
|
|
21
|
+
|
|
22
|
+
from megadetector.utils.path_utils import find_images
|
|
23
|
+
|
|
24
|
+
# ignoring all "PIL cannot read EXIF metainfo for the images" warnings
|
|
25
|
+
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
|
|
26
|
+
# Metadata Warning, tag 256 had too many entries: 42, expected 1
|
|
27
|
+
warnings.filterwarnings("ignore", "Metadata warning", UserWarning)
|
|
28
|
+
|
|
29
|
+
# Filenames will be stored in the output .json relative to this base dir
|
|
30
|
+
baseDir = r'D:\wildlife_data\bellevue_camera_traps\bellevue_camera_traps.19.06.02.1320'
|
|
31
|
+
outputJsonFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320.json')
|
|
32
|
+
outputCsvFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320.csv')
|
|
33
|
+
|
|
34
|
+
# rawClassListFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320_classes.csv')
|
|
35
|
+
# classMappingsFilename = os.path.join(baseDir,'bellevue_camera_traps.19.06.02.1320_class_mapping.csv')
|
|
36
|
+
outputEncoding = 'utf-8'
|
|
37
|
+
|
|
38
|
+
classMappings = {'transitional':'unlabeled','moving':'unlabeled','setup':'unlabeled','blurry':'unlabeled','transitional':'unlabeled','junk':'unlabeled','unknown':'unlabeled'}
|
|
39
|
+
|
|
40
|
+
bLoadFileListIfAvailable = True
|
|
41
|
+
|
|
42
|
+
info = {}
|
|
43
|
+
info['year'] = 2019
|
|
44
|
+
info['version'] = '1.0'
|
|
45
|
+
info['description'] = 'Bellevue Camera Traps'
|
|
46
|
+
info['contributor'] = 'Dan Morris'
|
|
47
|
+
info['date_created'] = str(datetime.date.today())
|
|
48
|
+
|
|
49
|
+
maxFiles = -1
|
|
50
|
+
bReadImageSizes = False
|
|
51
|
+
bUseExternalRemappingTable = False
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
#%% Enumerate files, read image sizes
|
|
55
|
+
|
|
56
|
+
# Each element will be a list of relative path/full path/width/height
|
|
57
|
+
fileInfo = []
|
|
58
|
+
nonImages = []
|
|
59
|
+
nFiles = 0
|
|
60
|
+
|
|
61
|
+
if bLoadFileListIfAvailable and os.path.isfile(outputCsvFilename):
|
|
62
|
+
|
|
63
|
+
print('Loading file list from {}'.format(outputCsvFilename))
|
|
64
|
+
|
|
65
|
+
with open(outputCsvFilename,'r') as f:
|
|
66
|
+
reader = csv.reader(f)
|
|
67
|
+
csvInfo = list(list(item) for item in csv.reader(f, delimiter=','))
|
|
68
|
+
|
|
69
|
+
for iRow in range(len(csvInfo)):
|
|
70
|
+
csvInfo[iRow][2] = int(csvInfo[iRow][2])
|
|
71
|
+
csvInfo[iRow][3] = int(csvInfo[iRow][3])
|
|
72
|
+
|
|
73
|
+
fileInfo = csvInfo
|
|
74
|
+
|
|
75
|
+
print('Finished reading list of {} files'.format(len(fileInfo)))
|
|
76
|
+
|
|
77
|
+
else:
|
|
78
|
+
|
|
79
|
+
print('Enumerating files from {} to {}'.format(baseDir,outputCsvFilename))
|
|
80
|
+
|
|
81
|
+
image_files = find_images(baseDir,bRecursive=True)
|
|
82
|
+
print('Enumerated {} images'.format(len(image_files)))
|
|
83
|
+
|
|
84
|
+
with io.open(outputCsvFilename, "w", encoding=outputEncoding) as outputFileHandle:
|
|
85
|
+
|
|
86
|
+
for fname in image_files:
|
|
87
|
+
|
|
88
|
+
nFiles = nFiles + 1
|
|
89
|
+
if maxFiles >= 0 and nFiles > maxFiles:
|
|
90
|
+
print('Warning: early break at {} files'.format(maxFiles))
|
|
91
|
+
break
|
|
92
|
+
|
|
93
|
+
fullPath = fname
|
|
94
|
+
relativePath = os.path.relpath(fullPath,baseDir)
|
|
95
|
+
|
|
96
|
+
if maxFiles >= 0:
|
|
97
|
+
print(relativePath)
|
|
98
|
+
|
|
99
|
+
h = -1
|
|
100
|
+
w = -1
|
|
101
|
+
|
|
102
|
+
if bReadImageSizes:
|
|
103
|
+
|
|
104
|
+
# Read the image
|
|
105
|
+
try:
|
|
106
|
+
|
|
107
|
+
im = Image.open(fullPath)
|
|
108
|
+
h = im.height
|
|
109
|
+
w = im.width
|
|
110
|
+
|
|
111
|
+
except:
|
|
112
|
+
# Corrupt or not an image
|
|
113
|
+
nonImages.append(fullPath)
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
# Store file info
|
|
117
|
+
imageInfo = [relativePath, fullPath, w, h]
|
|
118
|
+
fileInfo.append(imageInfo)
|
|
119
|
+
|
|
120
|
+
# Write to output file
|
|
121
|
+
outputFileHandle.write('"' + relativePath + '"' + ',' +
|
|
122
|
+
'"' + fullPath + '"' + ',' +
|
|
123
|
+
str(w) + ',' + str(h) + '\n')
|
|
124
|
+
|
|
125
|
+
# ...for each image file
|
|
126
|
+
|
|
127
|
+
# ...csv file output
|
|
128
|
+
|
|
129
|
+
print("Finished writing {} file names to {}".format(nFiles,outputCsvFilename))
|
|
130
|
+
|
|
131
|
+
# ...if the file list is/isn't available
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
#%% Enumerate classes
|
|
135
|
+
|
|
136
|
+
# Maps classes to counts
|
|
137
|
+
classList = {}
|
|
138
|
+
|
|
139
|
+
for iRow,row in enumerate(fileInfo):
|
|
140
|
+
|
|
141
|
+
fullPath = row[0]
|
|
142
|
+
className = os.path.split(os.path.dirname(fullPath))[1]
|
|
143
|
+
className = className.lower().strip()
|
|
144
|
+
if className in classList:
|
|
145
|
+
classList[className] += 1
|
|
146
|
+
else:
|
|
147
|
+
classList[className] = 1
|
|
148
|
+
row.append(className)
|
|
149
|
+
|
|
150
|
+
classNames = list(classList.keys())
|
|
151
|
+
|
|
152
|
+
# We like 'empty' to be class 0
|
|
153
|
+
if 'empty' in classNames:
|
|
154
|
+
classNames.remove('empty')
|
|
155
|
+
classNames.insert(0,'empty')
|
|
156
|
+
|
|
157
|
+
print('Finished enumerating {} classes'.format(len(classList)))
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
#%% Assemble dictionaries
|
|
161
|
+
|
|
162
|
+
images = []
|
|
163
|
+
annotations = []
|
|
164
|
+
categories = []
|
|
165
|
+
|
|
166
|
+
categoryNameToId = {}
|
|
167
|
+
idToCategory = {}
|
|
168
|
+
imageIdToImage = {}
|
|
169
|
+
|
|
170
|
+
nextId = 0
|
|
171
|
+
|
|
172
|
+
for categoryName in classNames:
|
|
173
|
+
|
|
174
|
+
catId = nextId
|
|
175
|
+
nextId += 1
|
|
176
|
+
categoryNameToId[categoryName] = catId
|
|
177
|
+
newCat = {}
|
|
178
|
+
newCat['id'] = categoryNameToId[categoryName]
|
|
179
|
+
newCat['name'] = categoryName
|
|
180
|
+
newCat['count'] = 0
|
|
181
|
+
categories.append(newCat)
|
|
182
|
+
idToCategory[catId] = newCat
|
|
183
|
+
|
|
184
|
+
# ...for each category
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
# Each element is a list of relative path/full path/width/height/className
|
|
188
|
+
|
|
189
|
+
for iRow,row in enumerate(fileInfo):
|
|
190
|
+
|
|
191
|
+
relativePath = row[0]
|
|
192
|
+
w = row[2]
|
|
193
|
+
h = row[3]
|
|
194
|
+
className = row[4]
|
|
195
|
+
|
|
196
|
+
assert className in categoryNameToId
|
|
197
|
+
categoryId = categoryNameToId[className]
|
|
198
|
+
|
|
199
|
+
im = {}
|
|
200
|
+
im['id'] = str(uuid.uuid1())
|
|
201
|
+
im['file_name'] = relativePath
|
|
202
|
+
im['height'] = h
|
|
203
|
+
im['width'] = w
|
|
204
|
+
images.append(im)
|
|
205
|
+
imageIdToImage[im['id']] = im
|
|
206
|
+
|
|
207
|
+
ann = {}
|
|
208
|
+
ann['id'] = str(uuid.uuid1())
|
|
209
|
+
ann['image_id'] = im['id']
|
|
210
|
+
ann['category_id'] = categoryId
|
|
211
|
+
annotations.append(ann)
|
|
212
|
+
|
|
213
|
+
cat = idToCategory[categoryId]
|
|
214
|
+
cat['count'] += 1
|
|
215
|
+
|
|
216
|
+
# ...for each image
|
|
217
|
+
|
|
218
|
+
oldNameToOldId = categoryNameToId
|
|
219
|
+
originalCategories = categories
|
|
220
|
+
|
|
221
|
+
print('Finished assembling dictionaries')
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
#%% External class mapping
|
|
225
|
+
|
|
226
|
+
if bUseExternalRemappingTable:
|
|
227
|
+
|
|
228
|
+
assert classMappings is None
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
#%% Write raw class table
|
|
232
|
+
|
|
233
|
+
# cat = categories[0]
|
|
234
|
+
if os.path.isfile(rawClassListFilename):
|
|
235
|
+
|
|
236
|
+
print('Not over-writing raw class table')
|
|
237
|
+
|
|
238
|
+
else:
|
|
239
|
+
|
|
240
|
+
with io.open(rawClassListFilename, "w", encoding=outputEncoding) as classListFileHandle:
|
|
241
|
+
for cat in categories:
|
|
242
|
+
catId = cat['id']
|
|
243
|
+
categoryName = cat['name']
|
|
244
|
+
categoryCount = cat['count']
|
|
245
|
+
classListFileHandle.write(str(catId) + ',"' + categoryName + '",' + str(categoryCount) + '\n')
|
|
246
|
+
|
|
247
|
+
print('Finished writing raw class table')
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
#%% Read the mapped class table
|
|
251
|
+
|
|
252
|
+
classMappings = {}
|
|
253
|
+
|
|
254
|
+
if os.path.isfile(classMappingsFilename):
|
|
255
|
+
|
|
256
|
+
print('Loading file list from {}'.format(classMappingsFilename))
|
|
257
|
+
|
|
258
|
+
with open(classMappingsFilename,'r') as f:
|
|
259
|
+
reader = csv.reader(f)
|
|
260
|
+
mappingInfo = list(list(item) for item in csv.reader(f, delimiter=','))
|
|
261
|
+
|
|
262
|
+
for mapping in mappingInfo:
|
|
263
|
+
assert len(mapping) == 4
|
|
264
|
+
|
|
265
|
+
# id, source, count, target
|
|
266
|
+
sourceClass = mapping[1]
|
|
267
|
+
targetClass = mapping[3]
|
|
268
|
+
assert sourceClass not in classMappings
|
|
269
|
+
classMappings[sourceClass] = targetClass
|
|
270
|
+
|
|
271
|
+
print('Finished reading list of {} class mappings'.format(len(mappingInfo)))
|
|
272
|
+
|
|
273
|
+
else:
|
|
274
|
+
|
|
275
|
+
#%% Make classMappings contain *all* classes, not just remapped classes
|
|
276
|
+
|
|
277
|
+
# cat = categories[0]
|
|
278
|
+
for cat in categories:
|
|
279
|
+
if cat['name'] not in classMappings:
|
|
280
|
+
classMappings[cat['name']] = cat['name']
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
#%% Create new class list
|
|
284
|
+
|
|
285
|
+
categories = []
|
|
286
|
+
categoryNameToId = {}
|
|
287
|
+
oldIdToNewId = {}
|
|
288
|
+
|
|
289
|
+
# Start at 1, explicitly assign 0 to "empty"
|
|
290
|
+
nextCategoryId = 1
|
|
291
|
+
for sourceClass in classMappings:
|
|
292
|
+
targetClass = classMappings[sourceClass]
|
|
293
|
+
|
|
294
|
+
if targetClass not in categoryNameToId:
|
|
295
|
+
|
|
296
|
+
if targetClass == 'empty':
|
|
297
|
+
categoryId = 0
|
|
298
|
+
else:
|
|
299
|
+
categoryId = nextCategoryId
|
|
300
|
+
nextCategoryId = nextCategoryId + 1
|
|
301
|
+
|
|
302
|
+
categoryNameToId[targetClass] = categoryId
|
|
303
|
+
newCat = {}
|
|
304
|
+
newCat['id'] = categoryId
|
|
305
|
+
newCat['name'] = targetClass
|
|
306
|
+
newCat['count'] = 0
|
|
307
|
+
|
|
308
|
+
if targetClass == 'empty':
|
|
309
|
+
categories.insert(0,newCat)
|
|
310
|
+
else:
|
|
311
|
+
categories.append(newCat)
|
|
312
|
+
|
|
313
|
+
else:
|
|
314
|
+
|
|
315
|
+
categoryId = categoryNameToId[targetClass]
|
|
316
|
+
|
|
317
|
+
# One-off issue with character encoding
|
|
318
|
+
if sourceClass == 'human':
|
|
319
|
+
sourceClass = 'human'
|
|
320
|
+
|
|
321
|
+
assert sourceClass in oldNameToOldId
|
|
322
|
+
oldId = oldNameToOldId[sourceClass]
|
|
323
|
+
oldIdToNewId[oldId] = categoryId
|
|
324
|
+
|
|
325
|
+
categoryIdToCat = {}
|
|
326
|
+
for cat in categories:
|
|
327
|
+
categoryIdToCat[cat['id']] = cat
|
|
328
|
+
|
|
329
|
+
print('Mapped {} original classes to {} new classes'.format(len(originalCategories),len(categories)))
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
#%% Re-map annotations
|
|
333
|
+
|
|
334
|
+
# ann = annotations[0]
|
|
335
|
+
for ann in annotations:
|
|
336
|
+
|
|
337
|
+
ann['category_id'] = oldIdToNewId[ann['category_id']]
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
#%% Write output .json
|
|
341
|
+
|
|
342
|
+
data = {}
|
|
343
|
+
data['info'] = info
|
|
344
|
+
data['images'] = images
|
|
345
|
+
data['annotations'] = annotations
|
|
346
|
+
data['categories'] = categories
|
|
347
|
+
|
|
348
|
+
json.dump(data, open(outputJsonFilename,'w'), indent=4)
|
|
349
|
+
|
|
350
|
+
print('Finished writing json to {}'.format(outputJsonFilename))
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
#%% Utilities
|
|
354
|
+
|
|
355
|
+
if False:
|
|
356
|
+
|
|
357
|
+
#%%
|
|
358
|
+
# Find images with a particular tag
|
|
359
|
+
className = 'hum'
|
|
360
|
+
matches = []
|
|
361
|
+
assert className in categoryNameToId
|
|
362
|
+
catId = categoryNameToId[className]
|
|
363
|
+
for ann in annotations:
|
|
364
|
+
if ann['category_id'] == catId:
|
|
365
|
+
imageId = ann['image_id']
|
|
366
|
+
im = imageIdToImage[imageId]
|
|
367
|
+
matches.append(im['file_name'])
|
|
368
|
+
print('Found {} matches'.format(len(matches)))
|
|
369
|
+
|
|
370
|
+
os.startfile(os.path.join(baseDir,matches[0]))
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
#%% Randomly sample annotations
|
|
374
|
+
|
|
375
|
+
import random
|
|
376
|
+
nAnnotations = len(annotations)
|
|
377
|
+
iAnn = random.randint(0,nAnnotations)
|
|
378
|
+
ann = annotations[iAnn]
|
|
379
|
+
catId = ann['category_id']
|
|
380
|
+
imageId = ann['image_id']
|
|
381
|
+
im = imageIdToImage[imageId]
|
|
382
|
+
fn = os.path.join(baseDir,im['file_name'])
|
|
383
|
+
cat = categoryIdToCat[catId]
|
|
384
|
+
className = cat['name']
|
|
385
|
+
print('This should be a {}'.format(className))
|
|
386
|
+
os.startfile(fn)
|