megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/efficientnet/model.py +8 -8
- megadetector/classification/efficientnet/utils.py +6 -5
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +26 -26
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -2
- megadetector/data_management/camtrap_dp_to_coco.py +79 -46
- megadetector/data_management/cct_json_utils.py +103 -103
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +210 -193
- megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
- megadetector/data_management/databases/integrity_check_json_db.py +228 -200
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +88 -39
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +133 -125
- megadetector/data_management/labelme_to_yolo.py +159 -73
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
- megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +73 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
- megadetector/data_management/mewc_to_md.py +344 -340
- megadetector/data_management/ocr_tools.py +262 -255
- megadetector/data_management/read_exif.py +249 -227
- megadetector/data_management/remap_coco_categories.py +90 -28
- megadetector/data_management/remove_exif.py +81 -21
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +588 -120
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +248 -122
- megadetector/data_management/yolo_to_coco.py +333 -191
- megadetector/detection/change_detection.py +832 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +358 -278
- megadetector/detection/run_detector.py +399 -186
- megadetector/detection/run_detector_batch.py +404 -377
- megadetector/detection/run_inference_with_yolov5_val.py +340 -327
- megadetector/detection/run_tiled_inference.py +257 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +332 -295
- megadetector/postprocessing/add_max_conf.py +19 -11
- megadetector/postprocessing/categorize_detections_by_size.py +45 -45
- megadetector/postprocessing/classification_postprocessing.py +468 -433
- megadetector/postprocessing/combine_batch_outputs.py +23 -23
- megadetector/postprocessing/compare_batch_results.py +590 -525
- megadetector/postprocessing/convert_output_format.py +106 -102
- megadetector/postprocessing/create_crop_folder.py +347 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +48 -27
- megadetector/postprocessing/md_to_coco.py +133 -102
- megadetector/postprocessing/md_to_labelme.py +107 -90
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +92 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -301
- megadetector/postprocessing/remap_detection_categories.py +91 -38
- megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +156 -74
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/ct_utils.py +1049 -211
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +632 -529
- megadetector/utils/path_utils.py +1520 -431
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/split_locations_into_train_val.py +62 -62
- megadetector/utils/string_utils.py +148 -27
- megadetector/utils/url_utils.py +489 -176
- megadetector/utils/wi_utils.py +2658 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +34 -30
- megadetector/visualization/render_images_with_thumbnails.py +39 -74
- megadetector/visualization/visualization_utils.py +487 -435
- megadetector/visualization/visualize_db.py +232 -198
- megadetector/visualization/visualize_detector_output.py +82 -76
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
- megadetector-10.0.0.dist-info/RECORD +139 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
- megadetector/api/batch_processing/api_core/server.py +0 -294
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
- megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
- megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
- megadetector/api/batch_processing/api_core/server_utils.py +0 -88
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector/utils/azure_utils.py +0 -178
- megadetector/utils/sas_blob_utils.py +0 -509
- megadetector-5.0.28.dist-info/RECORD +0 -209
- /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,110 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
import os
|
|
3
|
-
import json
|
|
4
|
-
import io
|
|
5
|
-
import random
|
|
6
|
-
import requests
|
|
7
|
-
|
|
8
|
-
from PIL import Image
|
|
9
|
-
from multiprocessing import Pool
|
|
10
|
-
from datetime import datetime
|
|
11
|
-
from requests_toolbelt import MultipartEncoder
|
|
12
|
-
from requests_toolbelt.multipart import decoder
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
ip_address = '100.100.200.200'
|
|
16
|
-
port = 5050
|
|
17
|
-
|
|
18
|
-
base_url = 'http://{}:{}/v1/camera-trap/sync/'.format(ip_address, port)
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def call_api(args):
|
|
22
|
-
start = datetime.now()
|
|
23
|
-
|
|
24
|
-
index, url, params, data, headers = args['index'],args['url'], args['params'], args['data'], args['headers']
|
|
25
|
-
print('calling api: {} starttime: {}'.format(index, start))
|
|
26
|
-
|
|
27
|
-
response = requests.post(url, params=params, data=data, headers=headers)
|
|
28
|
-
elapsed_time = datetime.now() - start
|
|
29
|
-
print('\napi {} status code: {}, elapsed time in seconds {}'.format(index, response.status_code, elapsed_time.total_seconds()))
|
|
30
|
-
|
|
31
|
-
get_detections(response)
|
|
32
|
-
return response
|
|
33
|
-
|
|
34
|
-
def get_detections(response):
|
|
35
|
-
results = decoder.MultipartDecoder.from_response(response)
|
|
36
|
-
text_results = {}
|
|
37
|
-
images = {}
|
|
38
|
-
for part in results.parts:
|
|
39
|
-
# part is a BodyPart object with b'Content-Type', and b'Content-Disposition', the later includes 'name' and 'filename' info
|
|
40
|
-
headers = {}
|
|
41
|
-
for k, v in part.headers.items():
|
|
42
|
-
headers[k.decode(part.encoding)] = v.decode(part.encoding)
|
|
43
|
-
|
|
44
|
-
if headers.get('Content-Type', None) == 'application/json':
|
|
45
|
-
text_result = json.loads(part.content.decode())
|
|
46
|
-
|
|
47
|
-
print(text_result)
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
def test_load(num_requests, params, max_images=1):
|
|
51
|
-
requests = []
|
|
52
|
-
|
|
53
|
-
# read the images anew for each request
|
|
54
|
-
index = 0
|
|
55
|
-
for i in range(num_requests):
|
|
56
|
-
index += 1
|
|
57
|
-
files = {}
|
|
58
|
-
sample_input_dir = '../../../api/synchronous/sample_input/test_images'
|
|
59
|
-
|
|
60
|
-
image_files = os.listdir(sample_input_dir)
|
|
61
|
-
random.shuffle(image_files)
|
|
62
|
-
|
|
63
|
-
num_images = 0
|
|
64
|
-
for i, image_name in enumerate(image_files):
|
|
65
|
-
if not image_name.lower().endswith('.jpg'):
|
|
66
|
-
continue
|
|
67
|
-
|
|
68
|
-
if num_images >= max_images:
|
|
69
|
-
break
|
|
70
|
-
else:
|
|
71
|
-
num_images += 1
|
|
72
|
-
|
|
73
|
-
img_path = os.path.join(sample_input_dir, image_name)
|
|
74
|
-
with open(img_path, 'rb') as f:
|
|
75
|
-
content = f.read()
|
|
76
|
-
files[image_name] = (image_name, content, 'image/jpeg')
|
|
77
|
-
|
|
78
|
-
m = MultipartEncoder(fields=files)
|
|
79
|
-
args = {
|
|
80
|
-
'index': index,
|
|
81
|
-
'url': base_url + 'detect',
|
|
82
|
-
'params': params,
|
|
83
|
-
'data': m,
|
|
84
|
-
'headers': {'Content-Type': m.content_type}
|
|
85
|
-
}
|
|
86
|
-
requests.append(args)
|
|
87
|
-
|
|
88
|
-
print('starting', num_requests, 'threads...')
|
|
89
|
-
# images are read and in each request by the time we call the API in map()
|
|
90
|
-
with Pool(num_requests) as pool:
|
|
91
|
-
results = pool.map(call_api, requests)
|
|
92
|
-
|
|
93
|
-
return results
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
if __name__ == "__main__":
|
|
97
|
-
params = {
|
|
98
|
-
'min_confidence': 0.05,
|
|
99
|
-
'min_rendering_confidence': 0.2,
|
|
100
|
-
'render': True
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
num_requests = 10
|
|
104
|
-
max_images = 1
|
|
105
|
-
|
|
106
|
-
start = datetime.now()
|
|
107
|
-
responses = test_load(num_requests, params, max_images=max_images)
|
|
108
|
-
end = datetime.now()
|
|
109
|
-
total_time = end - start
|
|
110
|
-
print('Total time for {} requests: {}'.format(num_requests, total_time))
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
add_nacti_sizes.py
|
|
4
|
-
|
|
5
|
-
NACTI bounding box metadata was posted before we inclduded width and height as semi-standard
|
|
6
|
-
fields; pull size information from the main metadata file and add to the bbox file.
|
|
7
|
-
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
#%% Constants and environment
|
|
11
|
-
|
|
12
|
-
import json
|
|
13
|
-
from tqdm import tqdm
|
|
14
|
-
|
|
15
|
-
input_file = 'G:/temp/nacti_metadata.json'
|
|
16
|
-
input_bbox_file = 'G:/temp/nacti_20200401_bboxes.json'
|
|
17
|
-
output_bbox_file = 'G:/temp/nacti_20230920_bboxes.json'
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
#%% Read .json files
|
|
21
|
-
|
|
22
|
-
with open(input_file,'r') as f:
|
|
23
|
-
input_data = json.load(f)
|
|
24
|
-
|
|
25
|
-
with open(input_bbox_file,'r') as f:
|
|
26
|
-
input_bbox_data = json.load(f)
|
|
27
|
-
|
|
28
|
-
print('Finished reading .json data')
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
#%% Map image names to width and height
|
|
32
|
-
|
|
33
|
-
filename_to_size = {}
|
|
34
|
-
for im in tqdm(input_data['images']):
|
|
35
|
-
filename_to_size[im['file_name']] = (im['width'],im['height'])
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
#%% Add to output data
|
|
39
|
-
|
|
40
|
-
for im in tqdm(input_bbox_data['images']):
|
|
41
|
-
size = filename_to_size[im['file_name']]
|
|
42
|
-
im['width'] = size[0]
|
|
43
|
-
im['height'] = size[1]
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
#%% Write output
|
|
47
|
-
|
|
48
|
-
output_bbox_data = input_bbox_data
|
|
49
|
-
output_bbox_data['version'] = '2023-09-20'
|
|
50
|
-
|
|
51
|
-
with open(output_bbox_file,'w') as f:
|
|
52
|
-
json.dump(output_bbox_data,f,indent=1)
|
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
add_timestamps_to_icct.py
|
|
4
|
-
|
|
5
|
-
The Island Conservation Camera Traps dataset was originally posted without timestamps
|
|
6
|
-
in either .json metadata or EXIF metadata. We pulled timestamps out using ocr_tools.py,
|
|
7
|
-
this script adds those timestamps into the .json metadata.
|
|
8
|
-
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
#%% Imports and constants
|
|
12
|
-
|
|
13
|
-
import json
|
|
14
|
-
|
|
15
|
-
ocr_results_file = r'g:\temp\ocr_results.2023.10.31.07.37.54.json'
|
|
16
|
-
input_metadata_file = r'd:\lila\islandconservationcameratraps\island_conservation.json'
|
|
17
|
-
output_metadata_file = r'g:\temp\island_conservation_camera_traps_1.02.json'
|
|
18
|
-
ocr_results_file_base = 'g:/temp/island_conservation_camera_traps/'
|
|
19
|
-
assert ocr_results_file_base.endswith('/')
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
#%% Read input metadata
|
|
23
|
-
|
|
24
|
-
with open(input_metadata_file,'r') as f:
|
|
25
|
-
input_metadata = json.load(f)
|
|
26
|
-
|
|
27
|
-
assert input_metadata['info']['version'] == '1.01'
|
|
28
|
-
|
|
29
|
-
# im = input_metadata['images'][0]
|
|
30
|
-
for im in input_metadata['images']:
|
|
31
|
-
assert 'datetime' not in im
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
#%% Read OCR results
|
|
35
|
-
|
|
36
|
-
with open(ocr_results_file,'r') as f:
|
|
37
|
-
abs_filename_to_ocr_results = json.load(f)
|
|
38
|
-
|
|
39
|
-
relative_filename_to_ocr_results = {}
|
|
40
|
-
|
|
41
|
-
for fn_abs in abs_filename_to_ocr_results:
|
|
42
|
-
assert ocr_results_file_base in fn_abs
|
|
43
|
-
fn_relative = fn_abs.replace(ocr_results_file_base,'')
|
|
44
|
-
relative_filename_to_ocr_results[fn_relative] = abs_filename_to_ocr_results[fn_abs]
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
#%% Add datetimes to metadata
|
|
48
|
-
|
|
49
|
-
images_not_in_datetime_results = []
|
|
50
|
-
images_with_failed_datetimes = []
|
|
51
|
-
|
|
52
|
-
for i_image,im in enumerate(input_metadata['images']):
|
|
53
|
-
if im['file_name'] not in relative_filename_to_ocr_results:
|
|
54
|
-
images_not_in_datetime_results.append(im)
|
|
55
|
-
im['datetime'] = None
|
|
56
|
-
continue
|
|
57
|
-
ocr_results = relative_filename_to_ocr_results[im['file_name']]
|
|
58
|
-
if ocr_results['datetime'] is None:
|
|
59
|
-
images_with_failed_datetimes.append(im)
|
|
60
|
-
im['datetime'] = None
|
|
61
|
-
continue
|
|
62
|
-
im['datetime'] = ocr_results['datetime']
|
|
63
|
-
|
|
64
|
-
print('{} of {} images were not in datetime results'.format(
|
|
65
|
-
len(images_not_in_datetime_results),len(input_metadata['images'])))
|
|
66
|
-
|
|
67
|
-
print('{} of {} images were had failed datetime results'.format(
|
|
68
|
-
len(images_with_failed_datetimes),len(input_metadata['images'])))
|
|
69
|
-
|
|
70
|
-
for im in input_metadata['images']:
|
|
71
|
-
assert 'datetime' in im
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
#%% Write output
|
|
75
|
-
|
|
76
|
-
input_metadata['info']['version'] = '1.02'
|
|
77
|
-
|
|
78
|
-
with open(output_metadata_file,'w') as f:
|
|
79
|
-
json.dump(input_metadata,f,indent=1)
|
|
@@ -1,158 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
animl_results_to_md_results.py
|
|
4
|
-
|
|
5
|
-
Convert a .csv file produced by the Animl package:
|
|
6
|
-
|
|
7
|
-
https://github.com/conservationtechlab/animl-py
|
|
8
|
-
|
|
9
|
-
...to a MD results file suitable for import into Timelapse.
|
|
10
|
-
|
|
11
|
-
Columns are expected to be:
|
|
12
|
-
|
|
13
|
-
file
|
|
14
|
-
category (MD category identifies: 1==animal, 2==person, 3==vehicle)
|
|
15
|
-
detection_conf
|
|
16
|
-
bbox1,bbox2,bbox3,bbox4
|
|
17
|
-
class
|
|
18
|
-
classification_conf
|
|
19
|
-
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
#%% Imports and constants
|
|
23
|
-
|
|
24
|
-
import pandas as pd
|
|
25
|
-
import json
|
|
26
|
-
|
|
27
|
-
# It's a little icky to hard-code this here rather than importing from elsewhere
|
|
28
|
-
# in the MD repo, but it seemed silly to take a dependency on lots of MD code
|
|
29
|
-
# just for this, so, hard-coding.
|
|
30
|
-
detection_category_id_to_name = {'1':'animal','2':'person','3':'vehicle'}
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
#%% Main function
|
|
34
|
-
|
|
35
|
-
def animl_results_to_md_results(input_file,output_file=None):
|
|
36
|
-
"""
|
|
37
|
-
Converts the .csv file [input_file] to the MD-formatted .json file [output_file].
|
|
38
|
-
|
|
39
|
-
If [output_file] is None, '.json' will be appended to the input file.
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
if output_file is None:
|
|
43
|
-
output_file = input_file + '.json'
|
|
44
|
-
|
|
45
|
-
df = pd.read_csv(input_file)
|
|
46
|
-
|
|
47
|
-
expected_columns = ('file','category','detection_conf',
|
|
48
|
-
'bbox1','bbox2','bbox3','bbox4','class','classification_conf')
|
|
49
|
-
|
|
50
|
-
for s in expected_columns:
|
|
51
|
-
assert s in df.columns,\
|
|
52
|
-
'Expected column {} not found'.format(s)
|
|
53
|
-
|
|
54
|
-
classification_category_name_to_id = {}
|
|
55
|
-
filename_to_results = {}
|
|
56
|
-
|
|
57
|
-
# i_row = 0; row = df.iloc[i_row]
|
|
58
|
-
for i_row,row in df.iterrows():
|
|
59
|
-
|
|
60
|
-
# Is this the first detection we've seen for this file?
|
|
61
|
-
if row['file'] in filename_to_results:
|
|
62
|
-
im = filename_to_results[row['file']]
|
|
63
|
-
else:
|
|
64
|
-
im = {}
|
|
65
|
-
im['detections'] = []
|
|
66
|
-
im['file'] = row['file']
|
|
67
|
-
filename_to_results[im['file']] = im
|
|
68
|
-
|
|
69
|
-
assert isinstance(row['category'],int),'Invalid category identifier in row {}'.format(im['file'])
|
|
70
|
-
detection_category_id = str(row['category'])
|
|
71
|
-
assert detection_category_id in detection_category_id_to_name,\
|
|
72
|
-
'Unrecognized detection category ID {}'.format(detection_category_id)
|
|
73
|
-
|
|
74
|
-
detection = {}
|
|
75
|
-
detection['category'] = detection_category_id
|
|
76
|
-
detection['conf'] = row['detection_conf']
|
|
77
|
-
bbox = [row['bbox1'],row['bbox2'],row['bbox3'],row['bbox4']]
|
|
78
|
-
detection['bbox'] = bbox
|
|
79
|
-
classification_category_name = row['class']
|
|
80
|
-
|
|
81
|
-
# Have we seen this classification category before?
|
|
82
|
-
if classification_category_name in classification_category_name_to_id:
|
|
83
|
-
classification_category_id = \
|
|
84
|
-
classification_category_name_to_id[classification_category_name]
|
|
85
|
-
else:
|
|
86
|
-
classification_category_id = str(len(classification_category_name_to_id))
|
|
87
|
-
classification_category_name_to_id[classification_category_name] = \
|
|
88
|
-
classification_category_id
|
|
89
|
-
|
|
90
|
-
classifications = [[classification_category_id,row['classification_conf']]]
|
|
91
|
-
detection['classifications'] = classifications
|
|
92
|
-
|
|
93
|
-
im['detections'].append(detection)
|
|
94
|
-
|
|
95
|
-
# ...for each row
|
|
96
|
-
|
|
97
|
-
info = {}
|
|
98
|
-
info['format_version'] = '1.3'
|
|
99
|
-
info['detector'] = 'Animl'
|
|
100
|
-
info['classifier'] = 'Animl'
|
|
101
|
-
|
|
102
|
-
results = {}
|
|
103
|
-
results['info'] = info
|
|
104
|
-
results['detection_categories'] = detection_category_id_to_name
|
|
105
|
-
results['classification_categories'] = \
|
|
106
|
-
{v: k for k, v in classification_category_name_to_id.items()}
|
|
107
|
-
results['images'] = list(filename_to_results.values())
|
|
108
|
-
|
|
109
|
-
with open(output_file,'w') as f:
|
|
110
|
-
json.dump(results,f,indent=1)
|
|
111
|
-
|
|
112
|
-
# ...animl_results_to_md_results(...)
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
#%% Interactive driver
|
|
116
|
-
|
|
117
|
-
if False:
|
|
118
|
-
|
|
119
|
-
pass
|
|
120
|
-
|
|
121
|
-
#%%
|
|
122
|
-
|
|
123
|
-
input_file = r"G:\temp\animl-runs\animl-runs\Coati_v2\manifest.csv"
|
|
124
|
-
output_file = None
|
|
125
|
-
animl_results_to_md_results(input_file,output_file)
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
#%% Command-line driver
|
|
129
|
-
|
|
130
|
-
import sys,argparse
|
|
131
|
-
|
|
132
|
-
def main():
|
|
133
|
-
|
|
134
|
-
parser = argparse.ArgumentParser(
|
|
135
|
-
description='Convert an Animl-formatted .csv results file to MD-formatted .json results file')
|
|
136
|
-
|
|
137
|
-
parser.add_argument(
|
|
138
|
-
'input_file',
|
|
139
|
-
type=str,
|
|
140
|
-
help='input .csv file')
|
|
141
|
-
|
|
142
|
-
parser.add_argument(
|
|
143
|
-
'--output_file',
|
|
144
|
-
type=str,
|
|
145
|
-
default=None,
|
|
146
|
-
help='output .json file (defaults to input file appended with ".json")')
|
|
147
|
-
|
|
148
|
-
if len(sys.argv[1:]) == 0:
|
|
149
|
-
parser.print_help()
|
|
150
|
-
parser.exit()
|
|
151
|
-
|
|
152
|
-
args = parser.parse_args()
|
|
153
|
-
|
|
154
|
-
animl_results_to_md_results(args.input_file,args.output_file)
|
|
155
|
-
|
|
156
|
-
if __name__ == '__main__':
|
|
157
|
-
main()
|
|
158
|
-
|