megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +232 -223
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +341 -338
- megadetector/detection/pytorch_detector.py +308 -266
- megadetector/detection/run_detector.py +186 -166
- megadetector/detection/run_detector_batch.py +366 -364
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +312 -253
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +291 -283
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +808 -311
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +220 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -0
- megadetector/postprocessing/load_api_results.py +25 -22
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -302
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1019 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1511 -406
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +73 -60
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2868
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +424 -404
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +126 -98
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.27.dist-info/RECORD +0 -208
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
|
@@ -1,283 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
helena_to_cct.py
|
|
4
|
-
|
|
5
|
-
Convert the Helena Detections data set to a COCO-camera-traps .json file
|
|
6
|
-
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
#%% Constants and environment
|
|
10
|
-
|
|
11
|
-
import os
|
|
12
|
-
import json
|
|
13
|
-
import uuid
|
|
14
|
-
import time
|
|
15
|
-
import humanfriendly
|
|
16
|
-
import numpy as np
|
|
17
|
-
|
|
18
|
-
import pandas as pd
|
|
19
|
-
|
|
20
|
-
from PIL import Image
|
|
21
|
-
from datetime import datetime
|
|
22
|
-
|
|
23
|
-
from megadetector.utils.path_utils import find_images
|
|
24
|
-
|
|
25
|
-
base_directory = r'/mnt/blobfuse/wildlifeblobssc/'
|
|
26
|
-
output_directory = r'/home/gramener'
|
|
27
|
-
output_json_file = os.path.join(output_directory,'rspb.json')
|
|
28
|
-
input_metadata_file = os.path.join(base_directory, 'StHelena_Detections.xlsx')
|
|
29
|
-
image_directory = os.path.join(base_directory, 'StHELENA_images/')
|
|
30
|
-
mapping_df = ''
|
|
31
|
-
filename_col = 'image_name'
|
|
32
|
-
load_width_and_height = True
|
|
33
|
-
annotation_fields_to_copy = ['Fortnight', 'Detector', 'datetime', 'site']
|
|
34
|
-
|
|
35
|
-
assert(os.path.isdir(image_directory))
|
|
36
|
-
|
|
37
|
-
# This is one time process
|
|
38
|
-
#%% Create Filenames and timestamps mapping CSV
|
|
39
|
-
|
|
40
|
-
image_full_paths = find_images(image_directory, bRecursive=True)
|
|
41
|
-
csv_file = os.path.join(output_directory, "mapping_names.csv")
|
|
42
|
-
if not os.path.exists(csv_file):
|
|
43
|
-
map_list = []
|
|
44
|
-
for img_ in image_full_paths:
|
|
45
|
-
try:
|
|
46
|
-
date_cr = Image.open(img_)._getexif()[306]
|
|
47
|
-
_tmp = {}
|
|
48
|
-
# import pdb;pdb.set_trace()
|
|
49
|
-
img_path = img_.replace(image_directory, "")
|
|
50
|
-
img_folder = img_path.split("/")[0]
|
|
51
|
-
site = img_path.split("/")[1]
|
|
52
|
-
detector = img_path.split("/")[2]
|
|
53
|
-
_tmp["image_name"] = img_path
|
|
54
|
-
_tmp["Fortnight"] = img_folder.replace("Fortnight", "")
|
|
55
|
-
_tmp["site"] = site
|
|
56
|
-
_tmp["Detector"] = detector
|
|
57
|
-
_tmp["datetime"] = "-".join(date_cr.split(":")[:-1])
|
|
58
|
-
map_list.append(_tmp)
|
|
59
|
-
except Exception as e:
|
|
60
|
-
print(e)
|
|
61
|
-
print(img_)
|
|
62
|
-
mapping_df = pd.DataFrame(map_list)
|
|
63
|
-
mapping_df.to_csv(csv_file, index=False)
|
|
64
|
-
else:
|
|
65
|
-
mapping_df = pd.read_csv(csv_file)
|
|
66
|
-
|
|
67
|
-
#%% To create CCT JSON for RSPB dataset
|
|
68
|
-
|
|
69
|
-
#%% Read source data
|
|
70
|
-
input_metadata = pd.read_excel(input_metadata_file)
|
|
71
|
-
|
|
72
|
-
print('Read {} columns and {} rows from metadata file'.format(len(input_metadata.columns),
|
|
73
|
-
len(input_metadata)))
|
|
74
|
-
|
|
75
|
-
# Original Excel file had timestamp in different columns
|
|
76
|
-
input_metadata['datetime'] = input_metadata[['DATUM', 'Hour', 'Mins']].apply(lambda x: '{0} {1}-{2}'.format(datetime.strftime(x[0], '%Y-%m-%d'),"{0:0=2d}".format(x[1]),"{0:0=2d}".format(x[2])), axis = 1)
|
|
77
|
-
input_metadata['Detector'] = "Detector"+input_metadata['Detector'].astype('str')
|
|
78
|
-
result = pd.merge(input_metadata, mapping_df, how='left', on=['datetime', "Fortnight", "site", "Detector"])
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
#%% Map filenames to rows, verify image existence
|
|
82
|
-
|
|
83
|
-
start_time = time.time()
|
|
84
|
-
filenames_to_rows = {}
|
|
85
|
-
image_filenames = result[filename_col]
|
|
86
|
-
image_filenames = list(set(image_filenames))
|
|
87
|
-
|
|
88
|
-
missing_files = []
|
|
89
|
-
duplicate_rows = []
|
|
90
|
-
|
|
91
|
-
# Build up a map from filenames to a list of rows, checking image existence as we go
|
|
92
|
-
for iFile, fn in enumerate(image_filenames):
|
|
93
|
-
try:
|
|
94
|
-
if fn == 'nan' or type(fn) == float:
|
|
95
|
-
pass
|
|
96
|
-
else:
|
|
97
|
-
if (fn in filenames_to_rows):
|
|
98
|
-
duplicate_rows.append(iFile)
|
|
99
|
-
filenames_to_rows[fn].append(iFile)
|
|
100
|
-
else:
|
|
101
|
-
filenames_to_rows[fn] = [iFile]
|
|
102
|
-
image_path = os.path.join(image_directory, fn)
|
|
103
|
-
if not os.path.isfile(image_path):
|
|
104
|
-
missing_files.append(fn)
|
|
105
|
-
except Exception as e:
|
|
106
|
-
pass
|
|
107
|
-
|
|
108
|
-
elapsed = time.time() - start_time
|
|
109
|
-
|
|
110
|
-
print('Finished verifying image existence in {}, found {} missing files (of {})'.format(
|
|
111
|
-
humanfriendly.format_timespan(elapsed),
|
|
112
|
-
len(missing_files),len(image_filenames)))
|
|
113
|
-
|
|
114
|
-
#%% Skipping this check because one image has multiple species
|
|
115
|
-
# assert len(duplicate_rows) == 0
|
|
116
|
-
|
|
117
|
-
#%% Check for images that aren't included in the metadata file
|
|
118
|
-
|
|
119
|
-
images_missing_from_metadata = []
|
|
120
|
-
|
|
121
|
-
for iImage, image_path in enumerate(image_full_paths):
|
|
122
|
-
|
|
123
|
-
relative_path = os.path.relpath(image_path, image_directory)
|
|
124
|
-
if relative_path not in filenames_to_rows:
|
|
125
|
-
images_missing_from_metadata.append(relative_path)
|
|
126
|
-
|
|
127
|
-
print('{} of {} files are not in metadata'.format(len(images_missing_from_metadata),len(image_full_paths)))
|
|
128
|
-
|
|
129
|
-
#%% Create CCT dictionaries
|
|
130
|
-
|
|
131
|
-
images = []
|
|
132
|
-
annotations = []
|
|
133
|
-
|
|
134
|
-
# Map categories to integer IDs
|
|
135
|
-
#
|
|
136
|
-
# The category '0' is reserved for 'empty'
|
|
137
|
-
|
|
138
|
-
categories_to_category_id = {}
|
|
139
|
-
categories_to_counts = {}
|
|
140
|
-
categories_to_category_id['empty'] = 0
|
|
141
|
-
categories_to_counts['empty'] = 0
|
|
142
|
-
|
|
143
|
-
next_category_id = 1
|
|
144
|
-
|
|
145
|
-
# For each image
|
|
146
|
-
|
|
147
|
-
start_time = time.time()
|
|
148
|
-
for image_name in image_filenames:
|
|
149
|
-
|
|
150
|
-
if type(image_name) != str:
|
|
151
|
-
continue
|
|
152
|
-
|
|
153
|
-
image_path = os.path.join(image_directory, image_name)
|
|
154
|
-
# Don't include images that don't exist on disk
|
|
155
|
-
if not os.path.isfile(image_path):
|
|
156
|
-
continue
|
|
157
|
-
|
|
158
|
-
im = {}
|
|
159
|
-
im['id'] = image_name.split('.')[0]
|
|
160
|
-
im['file_name'] = image_name
|
|
161
|
-
|
|
162
|
-
if load_width_and_height:
|
|
163
|
-
pilImage = Image.open(image_path)
|
|
164
|
-
width, height = pilImage.size
|
|
165
|
-
im['width'] = width
|
|
166
|
-
im['height'] = height
|
|
167
|
-
else:
|
|
168
|
-
im['width'] = -1
|
|
169
|
-
im['height'] = -1
|
|
170
|
-
|
|
171
|
-
images.append(im)
|
|
172
|
-
|
|
173
|
-
rows = filenames_to_rows[image_name]
|
|
174
|
-
|
|
175
|
-
# Some filenames will match to multiple rows
|
|
176
|
-
# assert(len(rows) == 1)
|
|
177
|
-
|
|
178
|
-
# iRow = rows[0]
|
|
179
|
-
for iRow in rows:
|
|
180
|
-
row = result.iloc[iRow]
|
|
181
|
-
|
|
182
|
-
category = row['Species']
|
|
183
|
-
|
|
184
|
-
# Have we seen this category before?
|
|
185
|
-
if category in categories_to_category_id:
|
|
186
|
-
categoryID = categories_to_category_id[category]
|
|
187
|
-
categories_to_counts[category] += 1
|
|
188
|
-
else:
|
|
189
|
-
categoryID = next_category_id
|
|
190
|
-
categories_to_category_id[category] = categoryID
|
|
191
|
-
categories_to_counts[category] = 1
|
|
192
|
-
next_category_id += 1
|
|
193
|
-
|
|
194
|
-
# Create an annotation
|
|
195
|
-
ann = {}
|
|
196
|
-
|
|
197
|
-
# The Internet tells me this guarantees uniqueness to a reasonable extent, even
|
|
198
|
-
# beyond the sheer improbability of collisions.
|
|
199
|
-
ann['id'] = str(uuid.uuid1())
|
|
200
|
-
ann['image_id'] = im['id']
|
|
201
|
-
ann['category_id'] = categoryID
|
|
202
|
-
# ann['datetime'] = row['datetime']
|
|
203
|
-
# ann['site'] = row['site']
|
|
204
|
-
|
|
205
|
-
for fieldname in annotation_fields_to_copy:
|
|
206
|
-
ann[fieldname] = row[fieldname]
|
|
207
|
-
if ann[fieldname] is np.nan:
|
|
208
|
-
ann[fieldname] = ''
|
|
209
|
-
ann[fieldname] = str(ann[fieldname])
|
|
210
|
-
|
|
211
|
-
annotations.append(ann)
|
|
212
|
-
|
|
213
|
-
# ...for each image
|
|
214
|
-
|
|
215
|
-
# Convert categories to a CCT-style dictionary
|
|
216
|
-
categories = []
|
|
217
|
-
|
|
218
|
-
for category in categories_to_counts:
|
|
219
|
-
print('Category {}, count {}'.format(
|
|
220
|
-
category, categories_to_counts[category]))
|
|
221
|
-
categoryID = categories_to_category_id[category]
|
|
222
|
-
cat = {}
|
|
223
|
-
cat['name'] = category
|
|
224
|
-
cat['id'] = categoryID
|
|
225
|
-
categories.append(cat)
|
|
226
|
-
|
|
227
|
-
elapsed = time.time() - start_time
|
|
228
|
-
print('Finished creating CCT dictionaries in {}'.format(
|
|
229
|
-
humanfriendly.format_timespan(elapsed)))
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
#%% Create info struct
|
|
233
|
-
|
|
234
|
-
info = {}
|
|
235
|
-
info['year'] = 2012
|
|
236
|
-
info['version'] = 1
|
|
237
|
-
info['description'] = 'RSPB Dataset'
|
|
238
|
-
info['contributor'] = 'Helena Detection'
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
#%% Write output
|
|
242
|
-
|
|
243
|
-
json_data = {}
|
|
244
|
-
json_data['images'] = images
|
|
245
|
-
json_data['annotations'] = annotations
|
|
246
|
-
json_data['categories'] = categories
|
|
247
|
-
json_data['info'] = info
|
|
248
|
-
json.dump(json_data, open(output_json_file, 'w'), indent=4)
|
|
249
|
-
|
|
250
|
-
print('Finished writing .json file with {} images, {} annotations, and {} categories'.format(
|
|
251
|
-
len(images), len(annotations), len(categories)))
|
|
252
|
-
|
|
253
|
-
#%% Validate output
|
|
254
|
-
|
|
255
|
-
from megadetector.data_management.databases import integrity_check_json_db
|
|
256
|
-
|
|
257
|
-
options = integrity_check_json_db.IntegrityCheckOptions()
|
|
258
|
-
options.baseDir = image_directory
|
|
259
|
-
options.bCheckImageSizes = False
|
|
260
|
-
options.bCheckImageExistence = False
|
|
261
|
-
options.bFindUnusedImages = False
|
|
262
|
-
data = integrity_check_json_db.integrity_check_json_db(output_json_file,options)
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
#%% Preview labels
|
|
266
|
-
|
|
267
|
-
from megadetector.visualization import visualize_db
|
|
268
|
-
from megadetector.data_management.databases import integrity_check_json_db
|
|
269
|
-
|
|
270
|
-
viz_options = visualize_db.DbVizOptions()
|
|
271
|
-
viz_options.num_to_visualize = None
|
|
272
|
-
viz_options.trim_to_images_with_bboxes = False
|
|
273
|
-
viz_options.add_search_links = True
|
|
274
|
-
viz_options.sort_by_filename = False
|
|
275
|
-
viz_options.parallelize_rendering = True
|
|
276
|
-
viz_options.classes_to_exclude = ['empty']
|
|
277
|
-
html_output_file,image_db = visualize_db.visualize_db(db_path=output_json_file,
|
|
278
|
-
output_dir=os.path.join(
|
|
279
|
-
output_directory, 'RSPB/preview'),
|
|
280
|
-
image_base_dir=image_directory,
|
|
281
|
-
options=viz_options)
|
|
282
|
-
os.startfile(html_output_file)
|
|
283
|
-
|