megadetector 5.0.8__py3-none-any.whl → 5.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -1
- api/batch_processing/api_core/server_job_status_table.py +0 -1
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -1
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -1
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +65 -65
- api/batch_processing/data_preparation/manage_video_batch.py +8 -8
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -1
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -1
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +12 -12
- api/batch_processing/postprocessing/categorize_detections_by_size.py +32 -14
- api/batch_processing/postprocessing/combine_api_outputs.py +68 -54
- api/batch_processing/postprocessing/compare_batch_results.py +113 -43
- api/batch_processing/postprocessing/convert_output_format.py +41 -16
- api/batch_processing/postprocessing/load_api_results.py +16 -17
- api/batch_processing/postprocessing/md_to_coco.py +31 -21
- api/batch_processing/postprocessing/md_to_labelme.py +52 -22
- api/batch_processing/postprocessing/merge_detections.py +14 -14
- api/batch_processing/postprocessing/postprocess_batch_results.py +246 -174
- api/batch_processing/postprocessing/remap_detection_categories.py +32 -25
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +60 -27
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +53 -44
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +25 -14
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +242 -158
- api/batch_processing/postprocessing/separate_detections_into_folders.py +159 -114
- api/batch_processing/postprocessing/subset_json_detector_output.py +146 -169
- api/batch_processing/postprocessing/top_folders_to_bottom.py +77 -43
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -2
- api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -268
- api/synchronous/api_core/animal_detection_api/config.py +35 -35
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +109 -109
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +21 -24
- classification/analyze_failed_images.py +11 -13
- classification/cache_batchapi_outputs.py +51 -51
- classification/create_classification_dataset.py +69 -68
- classification/crop_detections.py +54 -53
- classification/csv_to_json.py +97 -100
- classification/detect_and_crop.py +105 -105
- classification/evaluate_model.py +43 -42
- classification/identify_mislabeled_candidates.py +47 -46
- classification/json_to_azcopy_list.py +10 -10
- classification/json_validator.py +72 -71
- classification/map_classification_categories.py +44 -43
- classification/merge_classification_detection_output.py +68 -68
- classification/prepare_classification_script.py +157 -154
- classification/prepare_classification_script_mc.py +228 -228
- classification/run_classifier.py +27 -26
- classification/save_mislabeled.py +30 -30
- classification/train_classifier.py +20 -20
- classification/train_classifier_tf.py +21 -22
- classification/train_utils.py +10 -10
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +18 -31
- data_management/camtrap_dp_to_coco.py +238 -0
- data_management/cct_json_utils.py +102 -59
- data_management/cct_to_md.py +176 -158
- data_management/cct_to_wi.py +247 -219
- data_management/coco_to_labelme.py +272 -263
- data_management/coco_to_yolo.py +79 -58
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +20 -16
- data_management/databases/combine_coco_camera_traps_files.py +35 -31
- data_management/databases/integrity_check_json_db.py +62 -24
- data_management/databases/subset_json_db.py +24 -15
- data_management/generate_crops_from_cct.py +27 -45
- data_management/get_image_sizes.py +188 -162
- data_management/importers/add_nacti_sizes.py +8 -8
- data_management/importers/add_timestamps_to_icct.py +78 -78
- data_management/importers/animl_results_to_md_results.py +158 -158
- data_management/importers/auckland_doc_test_to_json.py +9 -9
- data_management/importers/auckland_doc_to_json.py +8 -8
- data_management/importers/awc_to_json.py +7 -7
- data_management/importers/bellevue_to_json.py +15 -15
- data_management/importers/cacophony-thermal-importer.py +13 -13
- data_management/importers/carrizo_shrubfree_2018.py +8 -8
- data_management/importers/carrizo_trail_cam_2017.py +8 -8
- data_management/importers/cct_field_adjustments.py +9 -9
- data_management/importers/channel_islands_to_cct.py +10 -10
- data_management/importers/eMammal/copy_and_unzip_emammal.py +1 -0
- data_management/importers/ena24_to_json.py +7 -7
- data_management/importers/filenames_to_json.py +8 -8
- data_management/importers/helena_to_cct.py +7 -7
- data_management/importers/idaho-camera-traps.py +7 -7
- data_management/importers/idfg_iwildcam_lila_prep.py +10 -10
- data_management/importers/jb_csv_to_json.py +9 -9
- data_management/importers/mcgill_to_json.py +8 -8
- data_management/importers/missouri_to_json.py +18 -18
- data_management/importers/nacti_fieldname_adjustments.py +10 -10
- data_management/importers/noaa_seals_2019.py +7 -7
- data_management/importers/pc_to_json.py +7 -7
- data_management/importers/plot_wni_giraffes.py +7 -7
- data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -359
- data_management/importers/prepare_zsl_imerit.py +7 -7
- data_management/importers/rspb_to_json.py +8 -8
- data_management/importers/save_the_elephants_survey_A.py +8 -8
- data_management/importers/save_the_elephants_survey_B.py +9 -9
- data_management/importers/snapshot_safari_importer.py +26 -26
- data_management/importers/snapshot_safari_importer_reprise.py +665 -665
- data_management/importers/snapshot_serengeti_lila.py +14 -14
- data_management/importers/sulross_get_exif.py +8 -9
- data_management/importers/timelapse_csv_set_to_json.py +11 -11
- data_management/importers/ubc_to_json.py +13 -13
- data_management/importers/umn_to_json.py +7 -7
- data_management/importers/wellington_to_json.py +8 -8
- data_management/importers/wi_to_json.py +9 -9
- data_management/importers/zamba_results_to_md_results.py +181 -181
- data_management/labelme_to_coco.py +65 -24
- data_management/labelme_to_yolo.py +8 -8
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +9 -9
- data_management/lila/add_locations_to_nacti.py +147 -147
- data_management/lila/create_lila_blank_set.py +13 -13
- data_management/lila/create_lila_test_set.py +8 -8
- data_management/lila/create_links_to_md_results_files.py +106 -106
- data_management/lila/download_lila_subset.py +44 -110
- data_management/lila/generate_lila_per_image_labels.py +55 -42
- data_management/lila/get_lila_annotation_counts.py +18 -15
- data_management/lila/get_lila_image_counts.py +11 -11
- data_management/lila/lila_common.py +96 -33
- data_management/lila/test_lila_metadata_urls.py +132 -116
- data_management/ocr_tools.py +173 -128
- data_management/read_exif.py +110 -97
- data_management/remap_coco_categories.py +83 -83
- data_management/remove_exif.py +58 -62
- data_management/resize_coco_dataset.py +30 -23
- data_management/wi_download_csv_to_coco.py +246 -239
- data_management/yolo_output_to_md_output.py +86 -73
- data_management/yolo_to_coco.py +300 -60
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/process_video.py +85 -33
- detection/pytorch_detector.py +43 -25
- detection/run_detector.py +157 -72
- detection/run_detector_batch.py +179 -113
- detection/run_inference_with_yolov5_val.py +108 -48
- detection/run_tiled_inference.py +111 -40
- detection/tf_detector.py +51 -29
- detection/video_utils.py +606 -521
- docs/source/conf.py +43 -0
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +9 -9
- md_utils/ct_utils.py +228 -68
- md_utils/directory_listing.py +59 -64
- md_utils/md_tests.py +968 -871
- md_utils/path_utils.py +460 -134
- md_utils/process_utils.py +157 -133
- md_utils/sas_blob_utils.py +20 -20
- md_utils/split_locations_into_train_val.py +45 -32
- md_utils/string_utils.py +33 -10
- md_utils/url_utils.py +176 -60
- md_utils/write_html_image_list.py +40 -33
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +102 -109
- md_visualization/render_images_with_thumbnails.py +34 -34
- md_visualization/visualization_utils.py +597 -291
- md_visualization/visualize_db.py +76 -48
- md_visualization/visualize_detector_output.py +61 -42
- {megadetector-5.0.8.dist-info → megadetector-5.0.9.dist-info}/METADATA +13 -7
- megadetector-5.0.9.dist-info/RECORD +224 -0
- {megadetector-5.0.8.dist-info → megadetector-5.0.9.dist-info}/top_level.txt +1 -0
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +342 -335
- taxonomy_mapping/map_new_lila_datasets.py +154 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -134
- taxonomy_mapping/preview_lila_taxonomy.py +591 -591
- taxonomy_mapping/retrieve_sample_image.py +12 -12
- taxonomy_mapping/simple_image_download.py +11 -11
- taxonomy_mapping/species_lookup.py +10 -10
- taxonomy_mapping/taxonomy_csv_checker.py +18 -18
- taxonomy_mapping/taxonomy_graph.py +47 -47
- taxonomy_mapping/validate_lila_category_mappings.py +83 -76
- data_management/cct_json_to_filename_json.py +0 -89
- data_management/cct_to_csv.py +0 -140
- data_management/databases/remove_corrupted_images_from_db.py +0 -191
- detection/detector_training/copy_checkpoints.py +0 -43
- megadetector-5.0.8.dist-info/RECORD +0 -205
- {megadetector-5.0.8.dist-info → megadetector-5.0.9.dist-info}/LICENSE +0 -0
- {megadetector-5.0.8.dist-info → megadetector-5.0.9.dist-info}/WHEEL +0 -0
|
@@ -1,268 +1,266 @@
|
|
|
1
|
-
#
|
|
2
|
-
# api_frontend.py
|
|
3
|
-
#
|
|
4
|
-
# Defines the Flask app, which takes requests (one or more images) from
|
|
5
|
-
# remote callers and pushes the images onto the shared Redis queue, to be processed
|
|
6
|
-
# by the main service in api_backend.py .
|
|
7
|
-
#
|
|
8
|
-
|
|
9
|
-
#%% Imports
|
|
10
|
-
|
|
11
|
-
import os
|
|
12
|
-
import json
|
|
13
|
-
import time
|
|
14
|
-
import uuid
|
|
15
|
-
import redis
|
|
16
|
-
import shutil
|
|
17
|
-
import argparse
|
|
18
|
-
import traceback
|
|
19
|
-
|
|
20
|
-
from io import BytesIO
|
|
21
|
-
from flask import Flask, Response, jsonify, make_response, request
|
|
22
|
-
from requests_toolbelt.multipart.encoder import MultipartEncoder
|
|
23
|
-
|
|
24
|
-
import md_visualization.visualization_utils as vis_utils
|
|
25
|
-
import config
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
#%% Initialization
|
|
29
|
-
|
|
30
|
-
app = Flask(__name__)
|
|
31
|
-
db = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
#%% Support functions
|
|
35
|
-
|
|
36
|
-
def _make_error_object(error_code, error_message):
|
|
37
|
-
|
|
38
|
-
# Make a dict that the request_processing_function can return to the endpoint
|
|
39
|
-
# function to notify it of an error
|
|
40
|
-
return {
|
|
41
|
-
'error_message': error_message,
|
|
42
|
-
'error_code': error_code
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def _make_error_response(error_code, error_message):
|
|
47
|
-
|
|
48
|
-
return make_response(jsonify({'error': error_message}), error_code)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def has_access(request):
|
|
52
|
-
|
|
53
|
-
if not os.path.exists(config.API_KEYS_FILE):
|
|
54
|
-
return True
|
|
55
|
-
else:
|
|
56
|
-
if not request.headers.get('key'):
|
|
57
|
-
print('Key header not available')
|
|
58
|
-
return False
|
|
59
|
-
else:
|
|
60
|
-
API_key = request.headers.get('key').strip().lower()
|
|
61
|
-
with open(config.API_KEYS_FILE, "r") as f:
|
|
62
|
-
for line in f:
|
|
63
|
-
valid_key = line.strip().lower()
|
|
64
|
-
if valid_key == API_key:
|
|
65
|
-
return True
|
|
66
|
-
|
|
67
|
-
return False
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def check_posted_data(request):
|
|
71
|
-
|
|
72
|
-
files = request.files
|
|
73
|
-
params = request.args
|
|
74
|
-
|
|
75
|
-
# Verify that the content uploaded is not too big
|
|
76
|
-
#
|
|
77
|
-
# request.content_length is the length of the total payload
|
|
78
|
-
content_length = request.content_length
|
|
79
|
-
if not content_length:
|
|
80
|
-
return _make_error_object(411, 'No image(s) were sent, or content length cannot be determined.')
|
|
81
|
-
if content_length > config.MAX_CONTENT_LENGTH_IN_MB * 1024 * 1024:
|
|
82
|
-
return _make_error_object(413, ('Payload size {:.2f} MB exceeds the maximum allowed of {} MB. '
|
|
83
|
-
'Please upload fewer or more compressed images.').format(
|
|
84
|
-
content_length / (1024 * 1024), config.MAX_CONTENT_LENGTH_IN_MB))
|
|
85
|
-
|
|
86
|
-
render_boxes = True if params.get('render', '').lower() == 'true' else False
|
|
87
|
-
|
|
88
|
-
if 'min_confidence' in params:
|
|
89
|
-
return_confidence_threshold = float(params['min_confidence'])
|
|
90
|
-
print('runserver, post_detect_sync, user specified detection confidence: ', return_confidence_threshold)
|
|
91
|
-
if return_confidence_threshold < 0.0 or return_confidence_threshold > 1.0:
|
|
92
|
-
return _make_error_object(400, 'Detection confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
93
|
-
return_confidence_threshold))
|
|
94
|
-
else:
|
|
95
|
-
return_confidence_threshold = config.DEFAULT_CONFIDENCE_THRESHOLD
|
|
96
|
-
|
|
97
|
-
if 'min_rendering_confidence' in params:
|
|
98
|
-
rendering_confidence_threshold = float(params['min_rendering_confidence'])
|
|
99
|
-
print('runserver, post_detect_sync, user specified rendering confidence: ', rendering_confidence_threshold)
|
|
100
|
-
if rendering_confidence_threshold < 0.0 or rendering_confidence_threshold > 1.0:
|
|
101
|
-
return _make_error_object(400, 'Rendering confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
102
|
-
rendering_confidence_threshold))
|
|
103
|
-
else:
|
|
104
|
-
rendering_confidence_threshold = config.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD
|
|
105
|
-
|
|
106
|
-
# Verify that the number of images is acceptable
|
|
107
|
-
num_images = sum([1 if file.content_type in config.IMAGE_CONTENT_TYPES else 0 for file in files.values()])
|
|
108
|
-
print('runserver, post_detect_sync, number of images received: ', num_images)
|
|
109
|
-
|
|
110
|
-
if num_images > config.MAX_IMAGES_ACCEPTED:
|
|
111
|
-
return _make_error_object(413, 'Too many images. Maximum number of images that can be processed in one call is {}.'.format(config.MAX_IMAGES_ACCEPTED))
|
|
112
|
-
elif num_images == 0:
|
|
113
|
-
return _make_error_object(400, 'No image(s) of accepted types (image/jpeg, image/png, application/octet-stream) received.')
|
|
114
|
-
|
|
115
|
-
return {
|
|
116
|
-
'render_boxes': render_boxes,
|
|
117
|
-
'return_confidence_threshold': return_confidence_threshold,
|
|
118
|
-
'rendering_confidence_threshold': rendering_confidence_threshold
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
# ...def check_posted_data(request)
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
#%% Main loop
|
|
125
|
-
|
|
126
|
-
@app.route(config.API_PREFIX + '/detect', methods = ['POST'])
|
|
127
|
-
def detect_sync():
|
|
128
|
-
|
|
129
|
-
if not has_access(request):
|
|
130
|
-
print('Access denied, please provide a valid API key')
|
|
131
|
-
return _make_error_response(403, 'Access denied, please provide a valid API key')
|
|
132
|
-
|
|
133
|
-
# Check whether the request_processing_function had an error
|
|
134
|
-
post_data = check_posted_data(request)
|
|
135
|
-
if post_data.get('error_code', None) is not None:
|
|
136
|
-
return _make_error_response(post_data.get('error_code'), post_data.get('error_message'))
|
|
137
|
-
|
|
138
|
-
render_boxes = post_data.get('render_boxes')
|
|
139
|
-
return_confidence_threshold = post_data.get('return_confidence_threshold')
|
|
140
|
-
rendering_confidence_threshold = post_data.get('rendering_confidence_threshold')
|
|
141
|
-
|
|
142
|
-
redis_id = str(uuid.uuid4())
|
|
143
|
-
d = {'id': redis_id, 'render_boxes': render_boxes, 'return_confidence_threshold': return_confidence_threshold}
|
|
144
|
-
temp_direc = os.path.join(config.TEMP_FOLDER, redis_id)
|
|
145
|
-
|
|
146
|
-
try:
|
|
147
|
-
|
|
148
|
-
try:
|
|
149
|
-
# Write images to temporary files
|
|
150
|
-
#
|
|
151
|
-
# TODO: read from memory rather than using intermediate files
|
|
152
|
-
os.makedirs(temp_direc,exist_ok=True)
|
|
153
|
-
for name, file in request.files.items():
|
|
154
|
-
if file.content_type in config.IMAGE_CONTENT_TYPES:
|
|
155
|
-
filename = request.files[name].filename
|
|
156
|
-
image_path = os.path.join(temp_direc, filename)
|
|
157
|
-
print('Saving image {} to {}'.format(name,image_path))
|
|
158
|
-
file.save(image_path)
|
|
159
|
-
assert os.path.isfile(image_path),'Error creating file {}'.format(image_path)
|
|
160
|
-
|
|
161
|
-
except Exception as e:
|
|
162
|
-
return _make_error_object(500, 'Error saving images: ' + str(e))
|
|
163
|
-
|
|
164
|
-
# Submit the image(s) for processing by api_backend.py, who is waiting on this queue
|
|
165
|
-
db.rpush(config.REDIS_QUEUE_NAME, json.dumps(d))
|
|
166
|
-
|
|
167
|
-
while True:
|
|
168
|
-
|
|
169
|
-
# TODO: convert to a blocking read and eliminate the sleep() statement in this loop
|
|
170
|
-
result = db.get(redis_id)
|
|
171
|
-
|
|
172
|
-
if result:
|
|
173
|
-
|
|
174
|
-
result = json.loads(result.decode())
|
|
175
|
-
print('Processing result {}'.format(str(result)))
|
|
176
|
-
|
|
177
|
-
if result['status'] == 200:
|
|
178
|
-
detections = result['detections']
|
|
179
|
-
db.delete(redis_id)
|
|
180
|
-
|
|
181
|
-
else:
|
|
182
|
-
db.delete(redis_id)
|
|
183
|
-
print('Detection error: ' + str(result))
|
|
184
|
-
return _make_error_response(500, 'Detection error: ' + str(result))
|
|
185
|
-
|
|
186
|
-
try:
|
|
187
|
-
print('detect_sync: postprocessing and sending images back...')
|
|
188
|
-
fields = {
|
|
189
|
-
'detection_result': ('detection_result', json.dumps(detections), 'application/json'),
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
if render_boxes and result['status'] == 200:
|
|
193
|
-
|
|
194
|
-
print('Rendering images')
|
|
195
|
-
|
|
196
|
-
for image_name, detections in detections.items():
|
|
197
|
-
|
|
198
|
-
#image = Image.open(os.path.join(temp_direc, image_name))
|
|
199
|
-
image = open(f'{temp_direc}/{image_name}', "rb")
|
|
200
|
-
image = vis_utils.load_image(image)
|
|
201
|
-
width, height = image.size
|
|
202
|
-
|
|
203
|
-
_detections = []
|
|
204
|
-
for d in detections:
|
|
205
|
-
y1,x1,y2,x2 = d[0:4]
|
|
206
|
-
width = x2 - x1
|
|
207
|
-
height = y2 - y1
|
|
208
|
-
bbox = [x1,y1,width,height]
|
|
209
|
-
_detections.append({'bbox': bbox, 'conf': d[4], 'category': d[5]})
|
|
210
|
-
|
|
211
|
-
vis_utils.render_detection_bounding_boxes(_detections, image,
|
|
212
|
-
confidence_threshold=rendering_confidence_threshold)
|
|
213
|
-
|
|
214
|
-
output_img_stream = BytesIO()
|
|
215
|
-
image.save(output_img_stream, format='jpeg')
|
|
216
|
-
output_img_stream.seek(0)
|
|
217
|
-
fields[image_name] = (image_name, output_img_stream, 'image/jpeg')
|
|
218
|
-
print('Done rendering images')
|
|
219
|
-
|
|
220
|
-
m = MultipartEncoder(fields=fields)
|
|
221
|
-
return Response(m.to_string(), mimetype=m.content_type)
|
|
222
|
-
|
|
223
|
-
except Exception as e:
|
|
224
|
-
|
|
225
|
-
print(traceback.format_exc())
|
|
226
|
-
print('Error returning result or rendering the detection boxes: ' + str(e))
|
|
227
|
-
|
|
228
|
-
finally:
|
|
229
|
-
|
|
230
|
-
try:
|
|
231
|
-
print('Removing temporary files')
|
|
232
|
-
shutil.rmtree(temp_direc)
|
|
233
|
-
except Exception as e:
|
|
234
|
-
print('Error removing temporary folder {}: {}'.format(temp_direc,str(e)))
|
|
235
|
-
|
|
236
|
-
else:
|
|
237
|
-
time.sleep(0.005)
|
|
238
|
-
|
|
239
|
-
# ...if we do/don't have a request available on the queue
|
|
240
|
-
|
|
241
|
-
# ...while(True)
|
|
242
|
-
|
|
243
|
-
except Exception as e:
|
|
244
|
-
|
|
245
|
-
print(traceback.format_exc())
|
|
246
|
-
return _make_error_object(500, 'Error processing images: ' + str(e))
|
|
247
|
-
|
|
248
|
-
# ...def detect_sync()
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
#%% Command-line driver
|
|
252
|
-
|
|
253
|
-
if __name__ == '__main__':
|
|
254
|
-
|
|
255
|
-
parser = argparse.ArgumentParser(description='api frontend')
|
|
256
|
-
|
|
257
|
-
# use --non-docker if you are testing without Docker
|
|
258
|
-
#
|
|
259
|
-
# python api_frontend.py --non-docker
|
|
260
|
-
parser.add_argument('--non-docker', action="store_true", default=False)
|
|
261
|
-
args = parser.parse_args()
|
|
262
|
-
|
|
263
|
-
if args.non_docker:
|
|
264
|
-
app.run(host='0.0.0.0', port=5050)
|
|
265
|
-
else:
|
|
266
|
-
app.run()
|
|
267
|
-
|
|
268
|
-
|
|
1
|
+
#
|
|
2
|
+
# api_frontend.py
|
|
3
|
+
#
|
|
4
|
+
# Defines the Flask app, which takes requests (one or more images) from
|
|
5
|
+
# remote callers and pushes the images onto the shared Redis queue, to be processed
|
|
6
|
+
# by the main service in api_backend.py .
|
|
7
|
+
#
|
|
8
|
+
|
|
9
|
+
#%% Imports
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import json
|
|
13
|
+
import time
|
|
14
|
+
import uuid
|
|
15
|
+
import redis
|
|
16
|
+
import shutil
|
|
17
|
+
import argparse
|
|
18
|
+
import traceback
|
|
19
|
+
|
|
20
|
+
from io import BytesIO
|
|
21
|
+
from flask import Flask, Response, jsonify, make_response, request
|
|
22
|
+
from requests_toolbelt.multipart.encoder import MultipartEncoder
|
|
23
|
+
|
|
24
|
+
import md_visualization.visualization_utils as vis_utils
|
|
25
|
+
import config
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
#%% Initialization
|
|
29
|
+
|
|
30
|
+
app = Flask(__name__)
|
|
31
|
+
db = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
#%% Support functions
|
|
35
|
+
|
|
36
|
+
def _make_error_object(error_code, error_message):
|
|
37
|
+
|
|
38
|
+
# Make a dict that the request_processing_function can return to the endpoint
|
|
39
|
+
# function to notify it of an error
|
|
40
|
+
return {
|
|
41
|
+
'error_message': error_message,
|
|
42
|
+
'error_code': error_code
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _make_error_response(error_code, error_message):
|
|
47
|
+
|
|
48
|
+
return make_response(jsonify({'error': error_message}), error_code)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def has_access(request):
|
|
52
|
+
|
|
53
|
+
if not os.path.exists(config.API_KEYS_FILE):
|
|
54
|
+
return True
|
|
55
|
+
else:
|
|
56
|
+
if not request.headers.get('key'):
|
|
57
|
+
print('Key header not available')
|
|
58
|
+
return False
|
|
59
|
+
else:
|
|
60
|
+
API_key = request.headers.get('key').strip().lower()
|
|
61
|
+
with open(config.API_KEYS_FILE, "r") as f:
|
|
62
|
+
for line in f:
|
|
63
|
+
valid_key = line.strip().lower()
|
|
64
|
+
if valid_key == API_key:
|
|
65
|
+
return True
|
|
66
|
+
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def check_posted_data(request):
|
|
71
|
+
|
|
72
|
+
files = request.files
|
|
73
|
+
params = request.args
|
|
74
|
+
|
|
75
|
+
# Verify that the content uploaded is not too big
|
|
76
|
+
#
|
|
77
|
+
# request.content_length is the length of the total payload
|
|
78
|
+
content_length = request.content_length
|
|
79
|
+
if not content_length:
|
|
80
|
+
return _make_error_object(411, 'No image(s) were sent, or content length cannot be determined.')
|
|
81
|
+
if content_length > config.MAX_CONTENT_LENGTH_IN_MB * 1024 * 1024:
|
|
82
|
+
return _make_error_object(413, ('Payload size {:.2f} MB exceeds the maximum allowed of {} MB. '
|
|
83
|
+
'Please upload fewer or more compressed images.').format(
|
|
84
|
+
content_length / (1024 * 1024), config.MAX_CONTENT_LENGTH_IN_MB))
|
|
85
|
+
|
|
86
|
+
render_boxes = True if params.get('render', '').lower() == 'true' else False
|
|
87
|
+
|
|
88
|
+
if 'min_confidence' in params:
|
|
89
|
+
return_confidence_threshold = float(params['min_confidence'])
|
|
90
|
+
print('runserver, post_detect_sync, user specified detection confidence: ', return_confidence_threshold)
|
|
91
|
+
if return_confidence_threshold < 0.0 or return_confidence_threshold > 1.0:
|
|
92
|
+
return _make_error_object(400, 'Detection confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
93
|
+
return_confidence_threshold))
|
|
94
|
+
else:
|
|
95
|
+
return_confidence_threshold = config.DEFAULT_CONFIDENCE_THRESHOLD
|
|
96
|
+
|
|
97
|
+
if 'min_rendering_confidence' in params:
|
|
98
|
+
rendering_confidence_threshold = float(params['min_rendering_confidence'])
|
|
99
|
+
print('runserver, post_detect_sync, user specified rendering confidence: ', rendering_confidence_threshold)
|
|
100
|
+
if rendering_confidence_threshold < 0.0 or rendering_confidence_threshold > 1.0:
|
|
101
|
+
return _make_error_object(400, 'Rendering confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
102
|
+
rendering_confidence_threshold))
|
|
103
|
+
else:
|
|
104
|
+
rendering_confidence_threshold = config.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD
|
|
105
|
+
|
|
106
|
+
# Verify that the number of images is acceptable
|
|
107
|
+
num_images = sum([1 if file.content_type in config.IMAGE_CONTENT_TYPES else 0 for file in files.values()])
|
|
108
|
+
print('runserver, post_detect_sync, number of images received: ', num_images)
|
|
109
|
+
|
|
110
|
+
if num_images > config.MAX_IMAGES_ACCEPTED:
|
|
111
|
+
return _make_error_object(413, 'Too many images. Maximum number of images that can be processed in one call is {}.'.format(config.MAX_IMAGES_ACCEPTED))
|
|
112
|
+
elif num_images == 0:
|
|
113
|
+
return _make_error_object(400, 'No image(s) of accepted types (image/jpeg, image/png, application/octet-stream) received.')
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
'render_boxes': render_boxes,
|
|
117
|
+
'return_confidence_threshold': return_confidence_threshold,
|
|
118
|
+
'rendering_confidence_threshold': rendering_confidence_threshold
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# ...def check_posted_data(request)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
#%% Main loop
|
|
125
|
+
|
|
126
|
+
@app.route(config.API_PREFIX + '/detect', methods = ['POST'])
|
|
127
|
+
def detect_sync():
|
|
128
|
+
|
|
129
|
+
if not has_access(request):
|
|
130
|
+
print('Access denied, please provide a valid API key')
|
|
131
|
+
return _make_error_response(403, 'Access denied, please provide a valid API key')
|
|
132
|
+
|
|
133
|
+
# Check whether the request_processing_function had an error
|
|
134
|
+
post_data = check_posted_data(request)
|
|
135
|
+
if post_data.get('error_code', None) is not None:
|
|
136
|
+
return _make_error_response(post_data.get('error_code'), post_data.get('error_message'))
|
|
137
|
+
|
|
138
|
+
render_boxes = post_data.get('render_boxes')
|
|
139
|
+
return_confidence_threshold = post_data.get('return_confidence_threshold')
|
|
140
|
+
rendering_confidence_threshold = post_data.get('rendering_confidence_threshold')
|
|
141
|
+
|
|
142
|
+
redis_id = str(uuid.uuid4())
|
|
143
|
+
d = {'id': redis_id, 'render_boxes': render_boxes, 'return_confidence_threshold': return_confidence_threshold}
|
|
144
|
+
temp_direc = os.path.join(config.TEMP_FOLDER, redis_id)
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
# Write images to temporary files
|
|
150
|
+
#
|
|
151
|
+
# TODO: read from memory rather than using intermediate files
|
|
152
|
+
os.makedirs(temp_direc,exist_ok=True)
|
|
153
|
+
for name, file in request.files.items():
|
|
154
|
+
if file.content_type in config.IMAGE_CONTENT_TYPES:
|
|
155
|
+
filename = request.files[name].filename
|
|
156
|
+
image_path = os.path.join(temp_direc, filename)
|
|
157
|
+
print('Saving image {} to {}'.format(name,image_path))
|
|
158
|
+
file.save(image_path)
|
|
159
|
+
assert os.path.isfile(image_path),'Error creating file {}'.format(image_path)
|
|
160
|
+
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return _make_error_object(500, 'Error saving images: ' + str(e))
|
|
163
|
+
|
|
164
|
+
# Submit the image(s) for processing by api_backend.py, who is waiting on this queue
|
|
165
|
+
db.rpush(config.REDIS_QUEUE_NAME, json.dumps(d))
|
|
166
|
+
|
|
167
|
+
while True:
|
|
168
|
+
|
|
169
|
+
# TODO: convert to a blocking read and eliminate the sleep() statement in this loop
|
|
170
|
+
result = db.get(redis_id)
|
|
171
|
+
|
|
172
|
+
if result:
|
|
173
|
+
|
|
174
|
+
result = json.loads(result.decode())
|
|
175
|
+
print('Processing result {}'.format(str(result)))
|
|
176
|
+
|
|
177
|
+
if result['status'] == 200:
|
|
178
|
+
detections = result['detections']
|
|
179
|
+
db.delete(redis_id)
|
|
180
|
+
|
|
181
|
+
else:
|
|
182
|
+
db.delete(redis_id)
|
|
183
|
+
print('Detection error: ' + str(result))
|
|
184
|
+
return _make_error_response(500, 'Detection error: ' + str(result))
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
print('detect_sync: postprocessing and sending images back...')
|
|
188
|
+
fields = {
|
|
189
|
+
'detection_result': ('detection_result', json.dumps(detections), 'application/json'),
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if render_boxes and result['status'] == 200:
|
|
193
|
+
|
|
194
|
+
print('Rendering images')
|
|
195
|
+
|
|
196
|
+
for image_name, detections in detections.items():
|
|
197
|
+
|
|
198
|
+
#image = Image.open(os.path.join(temp_direc, image_name))
|
|
199
|
+
image = open(f'{temp_direc}/{image_name}', "rb")
|
|
200
|
+
image = vis_utils.load_image(image)
|
|
201
|
+
width, height = image.size
|
|
202
|
+
|
|
203
|
+
_detections = []
|
|
204
|
+
for d in detections:
|
|
205
|
+
y1,x1,y2,x2 = d[0:4]
|
|
206
|
+
width = x2 - x1
|
|
207
|
+
height = y2 - y1
|
|
208
|
+
bbox = [x1,y1,width,height]
|
|
209
|
+
_detections.append({'bbox': bbox, 'conf': d[4], 'category': d[5]})
|
|
210
|
+
|
|
211
|
+
vis_utils.render_detection_bounding_boxes(_detections, image,
|
|
212
|
+
confidence_threshold=rendering_confidence_threshold)
|
|
213
|
+
|
|
214
|
+
output_img_stream = BytesIO()
|
|
215
|
+
image.save(output_img_stream, format='jpeg')
|
|
216
|
+
output_img_stream.seek(0)
|
|
217
|
+
fields[image_name] = (image_name, output_img_stream, 'image/jpeg')
|
|
218
|
+
print('Done rendering images')
|
|
219
|
+
|
|
220
|
+
m = MultipartEncoder(fields=fields)
|
|
221
|
+
return Response(m.to_string(), mimetype=m.content_type)
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
|
|
225
|
+
print(traceback.format_exc())
|
|
226
|
+
print('Error returning result or rendering the detection boxes: ' + str(e))
|
|
227
|
+
|
|
228
|
+
finally:
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
print('Removing temporary files')
|
|
232
|
+
shutil.rmtree(temp_direc)
|
|
233
|
+
except Exception as e:
|
|
234
|
+
print('Error removing temporary folder {}: {}'.format(temp_direc,str(e)))
|
|
235
|
+
|
|
236
|
+
else:
|
|
237
|
+
time.sleep(0.005)
|
|
238
|
+
|
|
239
|
+
# ...if we do/don't have a request available on the queue
|
|
240
|
+
|
|
241
|
+
# ...while(True)
|
|
242
|
+
|
|
243
|
+
except Exception as e:
|
|
244
|
+
|
|
245
|
+
print(traceback.format_exc())
|
|
246
|
+
return _make_error_object(500, 'Error processing images: ' + str(e))
|
|
247
|
+
|
|
248
|
+
# ...def detect_sync()
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
#%% Command-line driver
|
|
252
|
+
|
|
253
|
+
if __name__ == '__main__':
|
|
254
|
+
|
|
255
|
+
parser = argparse.ArgumentParser(description='api frontend')
|
|
256
|
+
|
|
257
|
+
# use --non-docker if you are testing without Docker
|
|
258
|
+
#
|
|
259
|
+
# python api_frontend.py --non-docker
|
|
260
|
+
parser.add_argument('--non-docker', action="store_true", default=False)
|
|
261
|
+
args = parser.parse_args()
|
|
262
|
+
|
|
263
|
+
if args.non_docker:
|
|
264
|
+
app.run(host='0.0.0.0', port=5050)
|
|
265
|
+
else:
|
|
266
|
+
app.run()
|
|
@@ -1,35 +1,35 @@
|
|
|
1
|
-
## Camera trap real-time API configuration
|
|
2
|
-
|
|
3
|
-
REDIS_HOST = 'localhost'
|
|
4
|
-
|
|
5
|
-
REDIS_PORT = 6379
|
|
6
|
-
|
|
7
|
-
# Full path to the temporary folder for image storage, only meaningful
|
|
8
|
-
# within the Docker container
|
|
9
|
-
TEMP_FOLDER = '/app/temp'
|
|
10
|
-
|
|
11
|
-
REDIS_QUEUE_NAME = 'camera-trap-queue'
|
|
12
|
-
|
|
13
|
-
# Upper limit on total content length (all images and parameters)
|
|
14
|
-
MAX_CONTENT_LENGTH_IN_MB = 5 * 8 # 5MB per image * number of images allowed
|
|
15
|
-
|
|
16
|
-
MAX_IMAGES_ACCEPTED = 8
|
|
17
|
-
|
|
18
|
-
IMAGE_CONTENT_TYPES = ['image/png', 'application/octet-stream', 'image/jpeg']
|
|
19
|
-
|
|
20
|
-
DETECTOR_MODEL_PATH = '/app/animal_detection_api/model/md_v5a.0.0.pt'
|
|
21
|
-
|
|
22
|
-
DETECTOR_MODEL_VERSION = 'v5a.0.0'
|
|
23
|
-
|
|
24
|
-
# Minimum confidence threshold for detections
|
|
25
|
-
DEFAULT_CONFIDENCE_THRESHOLD = 0.01
|
|
26
|
-
|
|
27
|
-
# Minimum confidence threshold for showing a bounding box on the output image
|
|
28
|
-
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = 0.2
|
|
29
|
-
|
|
30
|
-
API_PREFIX = '/v1/camera-trap/sync'
|
|
31
|
-
|
|
32
|
-
API_KEYS_FILE = 'allowed_keys.txt'
|
|
33
|
-
|
|
34
|
-
# Use this when testing without Docker
|
|
35
|
-
DETECTOR_MODEL_PATH_DEBUG = 'model/md_v5a.0.0.pt'
|
|
1
|
+
## Camera trap real-time API configuration
|
|
2
|
+
|
|
3
|
+
REDIS_HOST = 'localhost'
|
|
4
|
+
|
|
5
|
+
REDIS_PORT = 6379
|
|
6
|
+
|
|
7
|
+
# Full path to the temporary folder for image storage, only meaningful
|
|
8
|
+
# within the Docker container
|
|
9
|
+
TEMP_FOLDER = '/app/temp'
|
|
10
|
+
|
|
11
|
+
REDIS_QUEUE_NAME = 'camera-trap-queue'
|
|
12
|
+
|
|
13
|
+
# Upper limit on total content length (all images and parameters)
|
|
14
|
+
MAX_CONTENT_LENGTH_IN_MB = 5 * 8 # 5MB per image * number of images allowed
|
|
15
|
+
|
|
16
|
+
MAX_IMAGES_ACCEPTED = 8
|
|
17
|
+
|
|
18
|
+
IMAGE_CONTENT_TYPES = ['image/png', 'application/octet-stream', 'image/jpeg']
|
|
19
|
+
|
|
20
|
+
DETECTOR_MODEL_PATH = '/app/animal_detection_api/model/md_v5a.0.0.pt'
|
|
21
|
+
|
|
22
|
+
DETECTOR_MODEL_VERSION = 'v5a.0.0'
|
|
23
|
+
|
|
24
|
+
# Minimum confidence threshold for detections
|
|
25
|
+
DEFAULT_CONFIDENCE_THRESHOLD = 0.01
|
|
26
|
+
|
|
27
|
+
# Minimum confidence threshold for showing a bounding box on the output image
|
|
28
|
+
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = 0.2
|
|
29
|
+
|
|
30
|
+
API_PREFIX = '/v1/camera-trap/sync'
|
|
31
|
+
|
|
32
|
+
API_KEYS_FILE = 'allowed_keys.txt'
|
|
33
|
+
|
|
34
|
+
# Use this when testing without Docker
|
|
35
|
+
DETECTOR_MODEL_PATH_DEBUG = 'model/md_v5a.0.0.pt'
|
|
File without changes
|