megadetector 5.0.29__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/classification/efficientnet/model.py +8 -8
- megadetector/classification/efficientnet/utils.py +6 -5
- megadetector/classification/prepare_classification_script_mc.py +3 -3
- megadetector/data_management/annotations/annotation_constants.py +0 -1
- megadetector/data_management/camtrap_dp_to_coco.py +34 -1
- megadetector/data_management/cct_json_utils.py +2 -2
- megadetector/data_management/coco_to_yolo.py +22 -5
- megadetector/data_management/databases/add_width_and_height_to_db.py +85 -12
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +2 -2
- megadetector/data_management/databases/integrity_check_json_db.py +29 -15
- megadetector/data_management/generate_crops_from_cct.py +50 -1
- megadetector/data_management/labelme_to_coco.py +4 -2
- megadetector/data_management/labelme_to_yolo.py +82 -2
- megadetector/data_management/lila/generate_lila_per_image_labels.py +276 -18
- megadetector/data_management/lila/get_lila_annotation_counts.py +5 -3
- megadetector/data_management/lila/lila_common.py +3 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +15 -5
- megadetector/data_management/mewc_to_md.py +5 -0
- megadetector/data_management/ocr_tools.py +4 -3
- megadetector/data_management/read_exif.py +20 -5
- megadetector/data_management/remap_coco_categories.py +66 -4
- megadetector/data_management/remove_exif.py +50 -1
- megadetector/data_management/rename_images.py +3 -3
- megadetector/data_management/resize_coco_dataset.py +563 -95
- megadetector/data_management/yolo_output_to_md_output.py +131 -2
- megadetector/data_management/yolo_to_coco.py +140 -5
- megadetector/detection/change_detection.py +4 -3
- megadetector/detection/pytorch_detector.py +60 -22
- megadetector/detection/run_detector.py +225 -25
- megadetector/detection/run_detector_batch.py +42 -16
- megadetector/detection/run_inference_with_yolov5_val.py +12 -2
- megadetector/detection/run_tiled_inference.py +1 -0
- megadetector/detection/video_utils.py +53 -24
- megadetector/postprocessing/add_max_conf.py +4 -0
- megadetector/postprocessing/categorize_detections_by_size.py +1 -1
- megadetector/postprocessing/classification_postprocessing.py +55 -20
- megadetector/postprocessing/combine_batch_outputs.py +3 -2
- megadetector/postprocessing/compare_batch_results.py +64 -10
- megadetector/postprocessing/convert_output_format.py +12 -8
- megadetector/postprocessing/create_crop_folder.py +137 -10
- megadetector/postprocessing/load_api_results.py +26 -8
- megadetector/postprocessing/md_to_coco.py +4 -4
- megadetector/postprocessing/md_to_labelme.py +18 -7
- megadetector/postprocessing/merge_detections.py +5 -0
- megadetector/postprocessing/postprocess_batch_results.py +6 -3
- megadetector/postprocessing/remap_detection_categories.py +55 -2
- megadetector/postprocessing/render_detection_confusion_matrix.py +9 -6
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +2 -2
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +3 -4
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +40 -19
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +1 -1
- megadetector/taxonomy_mapping/species_lookup.py +123 -41
- megadetector/utils/ct_utils.py +133 -113
- megadetector/utils/md_tests.py +93 -13
- megadetector/utils/path_utils.py +137 -107
- megadetector/utils/split_locations_into_train_val.py +2 -2
- megadetector/utils/string_utils.py +7 -7
- megadetector/utils/url_utils.py +81 -58
- megadetector/utils/wi_utils.py +46 -17
- megadetector/visualization/plot_utils.py +13 -9
- megadetector/visualization/render_images_with_thumbnails.py +2 -1
- megadetector/visualization/visualization_utils.py +94 -46
- megadetector/visualization/visualize_db.py +36 -9
- megadetector/visualization/visualize_detector_output.py +4 -4
- {megadetector-5.0.29.dist-info → megadetector-10.0.0.dist-info}/METADATA +135 -135
- megadetector-10.0.0.dist-info/RECORD +139 -0
- {megadetector-5.0.29.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.29.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +0 -438
- megadetector/api/batch_processing/api_core/server.py +0 -294
- megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
- megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
- megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
- megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
- megadetector/api/batch_processing/api_core/server_utils.py +0 -88
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +0 -109
- megadetector/utils/azure_utils.py +0 -178
- megadetector/utils/sas_blob_utils.py +0 -513
- megadetector-5.0.29.dist-info/RECORD +0 -163
- /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
- {megadetector-5.0.29.dist-info → megadetector-10.0.0.dist-info}/WHEEL +0 -0
|
@@ -1,152 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
2
|
-
# Licensed under the MIT License.
|
|
3
|
-
|
|
4
|
-
"""
|
|
5
|
-
This script can be run in a separate process to monitor all instances of the batch API.
|
|
6
|
-
It sends a digest of submissions within the past day to a Teams channel webhook.
|
|
7
|
-
|
|
8
|
-
It requires the environment variables TEAMS_WEBHOOK, COSMOS_ENDPOINT and COSMOS_READ_KEY to be set.
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import time
|
|
12
|
-
import os
|
|
13
|
-
import json
|
|
14
|
-
from datetime import datetime, timezone, timedelta
|
|
15
|
-
from collections import defaultdict
|
|
16
|
-
|
|
17
|
-
import requests
|
|
18
|
-
from azure.cosmos.cosmos_client import CosmosClient
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
# Cosmos DB `batch-api-jobs` table for job status
|
|
22
|
-
COSMOS_ENDPOINT = os.environ['COSMOS_ENDPOINT']
|
|
23
|
-
COSMOS_READ_KEY = os.environ['COSMOS_READ_KEY']
|
|
24
|
-
|
|
25
|
-
TEAMS_WEBHOOK = os.environ['TEAMS_WEBHOOK']
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
def send_message():
|
|
29
|
-
cosmos_client = CosmosClient(COSMOS_ENDPOINT, credential=COSMOS_READ_KEY)
|
|
30
|
-
db_client = cosmos_client.get_database_client('camera-trap')
|
|
31
|
-
db_jobs_client = db_client.get_container_client('batch_api_jobs')
|
|
32
|
-
|
|
33
|
-
yesterday = datetime.now(timezone.utc).date() - timedelta(days=1)
|
|
34
|
-
|
|
35
|
-
query = f'''
|
|
36
|
-
SELECT *
|
|
37
|
-
FROM job
|
|
38
|
-
WHERE job.job_submission_time >= "{yesterday.isoformat()}T00:00:00Z"
|
|
39
|
-
'''
|
|
40
|
-
|
|
41
|
-
result_iterable = db_jobs_client.query_items(query=query,
|
|
42
|
-
enable_cross_partition_query=True)
|
|
43
|
-
|
|
44
|
-
# aggregate the number of images, country and organization names info from each job
|
|
45
|
-
# submitted during yesterday (UTC time)
|
|
46
|
-
instance_num_images = defaultdict(lambda: defaultdict(int))
|
|
47
|
-
instance_countries = defaultdict(set)
|
|
48
|
-
instance_orgs = defaultdict(set)
|
|
49
|
-
|
|
50
|
-
total_images_received = 0
|
|
51
|
-
|
|
52
|
-
for job in result_iterable:
|
|
53
|
-
api_instance = job['api_instance']
|
|
54
|
-
status = job['status']
|
|
55
|
-
call_params = job['call_params']
|
|
56
|
-
|
|
57
|
-
if status['request_status'] == 'completed':
|
|
58
|
-
instance_num_images[api_instance]['num_images_completed'] += status.get('num_images', 0)
|
|
59
|
-
instance_num_images[api_instance]['num_images_total'] += status.get('num_images', 0)
|
|
60
|
-
total_images_received += status.get('num_images', 0)
|
|
61
|
-
|
|
62
|
-
instance_countries[api_instance].add(call_params.get('country', 'unknown'))
|
|
63
|
-
instance_orgs[api_instance].add(call_params.get('organization_name', 'unknown'))
|
|
64
|
-
|
|
65
|
-
print(f'send_message, number of images received yesterday: {total_images_received}')
|
|
66
|
-
|
|
67
|
-
if total_images_received < 1:
|
|
68
|
-
print('send_message, no images submitted yesterday, not sending a summary')
|
|
69
|
-
print('')
|
|
70
|
-
return
|
|
71
|
-
|
|
72
|
-
# create the card
|
|
73
|
-
sections = []
|
|
74
|
-
|
|
75
|
-
for instance_name, num_images in instance_num_images.items():
|
|
76
|
-
entry = {
|
|
77
|
-
'activityTitle': f'API instance: {instance_name}',
|
|
78
|
-
'facts': [
|
|
79
|
-
{
|
|
80
|
-
'name': 'Total images',
|
|
81
|
-
'value': '{:,}'.format(num_images['num_images_total'])
|
|
82
|
-
},
|
|
83
|
-
{
|
|
84
|
-
'name': 'Images completed',
|
|
85
|
-
'value': '{:,}'.format(num_images['num_images_completed'])
|
|
86
|
-
},
|
|
87
|
-
{
|
|
88
|
-
'name': 'Countries',
|
|
89
|
-
'value': ', '.join(sorted(list(instance_countries[instance_name])))
|
|
90
|
-
},
|
|
91
|
-
{
|
|
92
|
-
'name': 'Organizations',
|
|
93
|
-
'value': ', '.join(sorted(list(instance_orgs[instance_name])))
|
|
94
|
-
}
|
|
95
|
-
]
|
|
96
|
-
}
|
|
97
|
-
sections.append(entry)
|
|
98
|
-
|
|
99
|
-
card = {
|
|
100
|
-
'@type': 'MessageCard',
|
|
101
|
-
'@context': 'http://schema.org/extensions',
|
|
102
|
-
'themeColor': 'ffcdb2',
|
|
103
|
-
'summary': 'Digest of batch API activities over the past 24 hours',
|
|
104
|
-
'title': f'Camera traps batch API activities on {yesterday.strftime("%b %d, %Y")}',
|
|
105
|
-
'sections': sections,
|
|
106
|
-
'potentialAction': [
|
|
107
|
-
{
|
|
108
|
-
'@type': 'OpenUri',
|
|
109
|
-
'name': 'View Batch account in Azure Portal',
|
|
110
|
-
'targets': [
|
|
111
|
-
{
|
|
112
|
-
'os': 'default',
|
|
113
|
-
'uri': 'https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/74d91980-e5b4-4fd9-adb6-263b8f90ec5b/resourcegroups/camera_trap_api_rg/providers/Microsoft.Batch/batchAccounts/cameratrapssc/accountOverview'
|
|
114
|
-
}
|
|
115
|
-
]
|
|
116
|
-
}
|
|
117
|
-
]
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
response = requests.post(TEAMS_WEBHOOK, data=json.dumps(card))
|
|
121
|
-
print(f'send_message, card to send:')
|
|
122
|
-
print(json.dumps(card, indent=4))
|
|
123
|
-
print(f'send_message, sent summary to webhook, response code: {response.status_code}')
|
|
124
|
-
print('')
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
def main(): # noqa
|
|
128
|
-
"""
|
|
129
|
-
Wake up at 5 minutes past midnight UTC to send a summary of yesterday's activities if there were any.
|
|
130
|
-
Then goes in a loop to wake up and send a summary every 24 hours.
|
|
131
|
-
"""
|
|
132
|
-
current = datetime.utcnow()
|
|
133
|
-
future = current.replace(day=current.day, hour=0, minute=5, second=0, microsecond=0) + timedelta(
|
|
134
|
-
days=1) # current has been modified
|
|
135
|
-
|
|
136
|
-
current = datetime.utcnow()
|
|
137
|
-
duration = future - current
|
|
138
|
-
|
|
139
|
-
duration_hours = duration.seconds / (60 * 60)
|
|
140
|
-
print(f'Current time: {current}')
|
|
141
|
-
print(f'Will wake up at {future}, in {duration_hours} hours')
|
|
142
|
-
print('')
|
|
143
|
-
|
|
144
|
-
time.sleep(duration.seconds)
|
|
145
|
-
|
|
146
|
-
while True:
|
|
147
|
-
print(f'Woke up at {datetime.utcnow()}')
|
|
148
|
-
send_message()
|
|
149
|
-
time.sleep(24 * 60 * 60)
|
|
150
|
-
|
|
151
|
-
if __name__ == '__main__':
|
|
152
|
-
main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
#
|
|
2
|
-
# api_backend.py
|
|
3
|
-
#
|
|
4
|
-
# Defines the model execution service, which pulls requests (one or more images)
|
|
5
|
-
# from the shared Redis queue, and runs them through the TF model.
|
|
6
|
-
#
|
|
7
|
-
|
|
8
|
-
#%% Imports
|
|
9
|
-
|
|
10
|
-
import os
|
|
11
|
-
import json
|
|
12
|
-
import time
|
|
13
|
-
import redis
|
|
14
|
-
import argparse
|
|
15
|
-
import PIL
|
|
16
|
-
|
|
17
|
-
from detection.run_detector import load_detector
|
|
18
|
-
from utils.ct_utils import convert_xywh_to_xyxy as convert_to_tf_coords
|
|
19
|
-
import config
|
|
20
|
-
import visualization.visualization_utils as vis_utils
|
|
21
|
-
|
|
22
|
-
#%% Initialization
|
|
23
|
-
|
|
24
|
-
db = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
|
|
25
|
-
current_directory = os.path.dirname(os.path.realpath(__file__))
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
#%% Main loop
|
|
29
|
-
|
|
30
|
-
def detect_process():
|
|
31
|
-
|
|
32
|
-
while True:
|
|
33
|
-
|
|
34
|
-
# TODO: convert to a blocking read and eliminate the sleep() statement in this loop
|
|
35
|
-
serialized_entry = db.lpop(config.REDIS_QUEUE_NAME)
|
|
36
|
-
all_detection_results = []
|
|
37
|
-
inference_time_detector = []
|
|
38
|
-
|
|
39
|
-
if serialized_entry:
|
|
40
|
-
|
|
41
|
-
entry = json.loads(serialized_entry)
|
|
42
|
-
id = entry['id']
|
|
43
|
-
print('Processing images from request id:', id)
|
|
44
|
-
return_confidence_threshold = entry['return_confidence_threshold']
|
|
45
|
-
|
|
46
|
-
try:
|
|
47
|
-
|
|
48
|
-
temp_direc = os.path.join(config.TEMP_FOLDER,id)
|
|
49
|
-
assert os.path.isdir(temp_direc), 'Could not find temporary folder {}'.format(temp_direc)
|
|
50
|
-
|
|
51
|
-
for filename in os.listdir(temp_direc):
|
|
52
|
-
|
|
53
|
-
image_path = f'{temp_direc}/{filename}'
|
|
54
|
-
print('Reading image from {}'.format(image_path))
|
|
55
|
-
image = open(image_path, 'rb')
|
|
56
|
-
image = vis_utils.load_image(image)
|
|
57
|
-
|
|
58
|
-
start_time = time.time()
|
|
59
|
-
result = detector.generate_detections_one_image(image, filename, detection_threshold=config.DEFAULT_CONFIDENCE_THRESHOLD)
|
|
60
|
-
all_detection_results.append(result)
|
|
61
|
-
|
|
62
|
-
elapsed = time.time() - start_time
|
|
63
|
-
inference_time_detector.append(elapsed)
|
|
64
|
-
|
|
65
|
-
except Exception as e:
|
|
66
|
-
|
|
67
|
-
print('Detection error: ' + str(e))
|
|
68
|
-
|
|
69
|
-
db.set(entry['id'], json.dumps({
|
|
70
|
-
'status': 500,
|
|
71
|
-
'error': 'Detection error: ' + str(e)
|
|
72
|
-
}))
|
|
73
|
-
|
|
74
|
-
continue
|
|
75
|
-
|
|
76
|
-
# Filter the detections by the confidence threshold
|
|
77
|
-
#
|
|
78
|
-
# Each result is [ymin, xmin, ymax, xmax, confidence, category]
|
|
79
|
-
#
|
|
80
|
-
# Coordinates are relative, with the origin in the upper-left
|
|
81
|
-
detections = {}
|
|
82
|
-
|
|
83
|
-
try:
|
|
84
|
-
|
|
85
|
-
for result in all_detection_results:
|
|
86
|
-
|
|
87
|
-
image_name = result['file']
|
|
88
|
-
_detections = result.get('detections', None)
|
|
89
|
-
detections[image_name] = []
|
|
90
|
-
|
|
91
|
-
if _detections is None:
|
|
92
|
-
continue
|
|
93
|
-
|
|
94
|
-
for d in _detections:
|
|
95
|
-
if d['conf'] > return_confidence_threshold:
|
|
96
|
-
res = convert_to_tf_coords(d['bbox'])
|
|
97
|
-
res.append(d['conf'])
|
|
98
|
-
res.append(int(d['category']))
|
|
99
|
-
detections[image_name].append(res)
|
|
100
|
-
|
|
101
|
-
db.set(entry['id'], json.dumps({
|
|
102
|
-
'status': 200,
|
|
103
|
-
'detections': detections,
|
|
104
|
-
'inference_time_detector': inference_time_detector
|
|
105
|
-
}))
|
|
106
|
-
|
|
107
|
-
except Exception as e:
|
|
108
|
-
print('Error consolidating the detection boxes: ' + str(e))
|
|
109
|
-
|
|
110
|
-
db.set(entry['id'], json.dumps({
|
|
111
|
-
'status': 500,
|
|
112
|
-
'error': 'Error consolidating the detection boxes:' + str(e)
|
|
113
|
-
}))
|
|
114
|
-
|
|
115
|
-
# ...if serialized_entry
|
|
116
|
-
|
|
117
|
-
else:
|
|
118
|
-
time.sleep(0.005)
|
|
119
|
-
|
|
120
|
-
# ...while(True)
|
|
121
|
-
|
|
122
|
-
# ...def detect_process()
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
#%% Command-line driver
|
|
126
|
-
|
|
127
|
-
if __name__ == '__main__':
|
|
128
|
-
|
|
129
|
-
parser = argparse.ArgumentParser(description='api backend')
|
|
130
|
-
|
|
131
|
-
# use --non-docker if you are testing without Docker
|
|
132
|
-
#
|
|
133
|
-
# python api_frontend.py --non-docker
|
|
134
|
-
parser.add_argument('--non-docker', action='store_true', default=False)
|
|
135
|
-
args = parser.parse_args()
|
|
136
|
-
|
|
137
|
-
if args.non_docker:
|
|
138
|
-
model_path = config.DETECTOR_MODEL_PATH_DEBUG
|
|
139
|
-
else:
|
|
140
|
-
model_path = config.DETECTOR_MODEL_PATH
|
|
141
|
-
|
|
142
|
-
detector = load_detector(model_path)
|
|
143
|
-
|
|
144
|
-
# run detections on a test image to load the model
|
|
145
|
-
print('Running initial detection to load model...')
|
|
146
|
-
test_image = PIL.Image.new(mode="RGB", size=(200, 200))
|
|
147
|
-
result = detector.generate_detections_one_image(test_image, "test_image", detection_threshold=config.DEFAULT_CONFIDENCE_THRESHOLD)
|
|
148
|
-
print(result)
|
|
149
|
-
print('\n')
|
|
150
|
-
|
|
151
|
-
detect_process()
|
|
@@ -1,263 +0,0 @@
|
|
|
1
|
-
#
|
|
2
|
-
# api_frontend.py
|
|
3
|
-
#
|
|
4
|
-
# Defines the Flask app, which takes requests (one or more images) from
|
|
5
|
-
# remote callers and pushes the images onto the shared Redis queue, to be processed
|
|
6
|
-
# by the main service in api_backend.py .
|
|
7
|
-
#
|
|
8
|
-
|
|
9
|
-
#%% Imports
|
|
10
|
-
|
|
11
|
-
import os
|
|
12
|
-
import json
|
|
13
|
-
import time
|
|
14
|
-
import uuid
|
|
15
|
-
import redis
|
|
16
|
-
import shutil
|
|
17
|
-
import argparse
|
|
18
|
-
import traceback
|
|
19
|
-
|
|
20
|
-
from io import BytesIO
|
|
21
|
-
from flask import Flask, Response, jsonify, make_response, request
|
|
22
|
-
from requests_toolbelt.multipart.encoder import MultipartEncoder
|
|
23
|
-
|
|
24
|
-
import visualization.visualization_utils as vis_utils
|
|
25
|
-
import config
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
#%% Initialization
|
|
29
|
-
|
|
30
|
-
app = Flask(__name__)
|
|
31
|
-
db = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
#%% Support functions
|
|
35
|
-
|
|
36
|
-
def _make_error_object(error_code, error_message):
|
|
37
|
-
|
|
38
|
-
# Make a dict that the request_processing_function can return to the endpoint
|
|
39
|
-
# function to notify it of an error
|
|
40
|
-
return {
|
|
41
|
-
'error_message': error_message,
|
|
42
|
-
'error_code': error_code
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def _make_error_response(error_code, error_message):
|
|
47
|
-
|
|
48
|
-
return make_response(jsonify({'error': error_message}), error_code)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def has_access(request):
|
|
52
|
-
|
|
53
|
-
if not os.path.exists(config.API_KEYS_FILE):
|
|
54
|
-
return True
|
|
55
|
-
else:
|
|
56
|
-
if not request.headers.get('key'):
|
|
57
|
-
print('Key header not available')
|
|
58
|
-
return False
|
|
59
|
-
else:
|
|
60
|
-
API_key = request.headers.get('key').strip().lower()
|
|
61
|
-
with open(config.API_KEYS_FILE, "r") as f:
|
|
62
|
-
for line in f:
|
|
63
|
-
valid_key = line.strip().lower()
|
|
64
|
-
if valid_key == API_key:
|
|
65
|
-
return True
|
|
66
|
-
|
|
67
|
-
return False
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def check_posted_data(request):
|
|
71
|
-
|
|
72
|
-
files = request.files
|
|
73
|
-
params = request.args
|
|
74
|
-
|
|
75
|
-
# Verify that the content uploaded is not too big
|
|
76
|
-
#
|
|
77
|
-
# request.content_length is the length of the total payload
|
|
78
|
-
content_length = request.content_length
|
|
79
|
-
if not content_length:
|
|
80
|
-
return _make_error_object(411, 'No image(s) were sent, or content length cannot be determined.')
|
|
81
|
-
if content_length > config.MAX_CONTENT_LENGTH_IN_MB * 1024 * 1024:
|
|
82
|
-
return _make_error_object(413, ('Payload size {:.2f} MB exceeds the maximum allowed of {} MB. '
|
|
83
|
-
'Please upload fewer or more compressed images.').format(
|
|
84
|
-
content_length / (1024 * 1024), config.MAX_CONTENT_LENGTH_IN_MB))
|
|
85
|
-
|
|
86
|
-
render_boxes = True if params.get('render', '').lower() == 'true' else False
|
|
87
|
-
|
|
88
|
-
if 'min_confidence' in params:
|
|
89
|
-
return_confidence_threshold = float(params['min_confidence'])
|
|
90
|
-
print('runserver, post_detect_sync, user specified detection confidence: ', return_confidence_threshold)
|
|
91
|
-
if return_confidence_threshold < 0.0 or return_confidence_threshold > 1.0:
|
|
92
|
-
return _make_error_object(400, 'Detection confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
93
|
-
return_confidence_threshold))
|
|
94
|
-
else:
|
|
95
|
-
return_confidence_threshold = config.DEFAULT_CONFIDENCE_THRESHOLD
|
|
96
|
-
|
|
97
|
-
if 'min_rendering_confidence' in params:
|
|
98
|
-
rendering_confidence_threshold = float(params['min_rendering_confidence'])
|
|
99
|
-
print('runserver, post_detect_sync, user specified rendering confidence: ', rendering_confidence_threshold)
|
|
100
|
-
if rendering_confidence_threshold < 0.0 or rendering_confidence_threshold > 1.0:
|
|
101
|
-
return _make_error_object(400, 'Rendering confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
102
|
-
rendering_confidence_threshold))
|
|
103
|
-
else:
|
|
104
|
-
rendering_confidence_threshold = config.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD
|
|
105
|
-
|
|
106
|
-
# Verify that the number of images is acceptable
|
|
107
|
-
num_images = sum([1 if file.content_type in config.IMAGE_CONTENT_TYPES else 0 for file in files.values()])
|
|
108
|
-
print('runserver, post_detect_sync, number of images received: ', num_images)
|
|
109
|
-
|
|
110
|
-
if num_images > config.MAX_IMAGES_ACCEPTED:
|
|
111
|
-
return _make_error_object(413, 'Too many images. Maximum number of images that can be processed in one call is {}.'.format(config.MAX_IMAGES_ACCEPTED))
|
|
112
|
-
elif num_images == 0:
|
|
113
|
-
return _make_error_object(400, 'No image(s) of accepted types (image/jpeg, image/png, application/octet-stream) received.')
|
|
114
|
-
|
|
115
|
-
return {
|
|
116
|
-
'render_boxes': render_boxes,
|
|
117
|
-
'return_confidence_threshold': return_confidence_threshold,
|
|
118
|
-
'rendering_confidence_threshold': rendering_confidence_threshold
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
# ...def check_posted_data(request)
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
#%% Main loop
|
|
125
|
-
|
|
126
|
-
@app.route(config.API_PREFIX + '/detect', methods = ['POST'])
|
|
127
|
-
def detect_sync():
|
|
128
|
-
|
|
129
|
-
if not has_access(request):
|
|
130
|
-
print('Access denied, please provide a valid API key')
|
|
131
|
-
return _make_error_response(403, 'Access denied, please provide a valid API key')
|
|
132
|
-
|
|
133
|
-
# Check whether the request_processing_function had an error
|
|
134
|
-
post_data = check_posted_data(request)
|
|
135
|
-
if post_data.get('error_code', None) is not None:
|
|
136
|
-
return _make_error_response(post_data.get('error_code'), post_data.get('error_message'))
|
|
137
|
-
|
|
138
|
-
render_boxes = post_data.get('render_boxes')
|
|
139
|
-
return_confidence_threshold = post_data.get('return_confidence_threshold')
|
|
140
|
-
rendering_confidence_threshold = post_data.get('rendering_confidence_threshold')
|
|
141
|
-
|
|
142
|
-
redis_id = str(uuid.uuid4())
|
|
143
|
-
d = {'id': redis_id, 'render_boxes': render_boxes, 'return_confidence_threshold': return_confidence_threshold}
|
|
144
|
-
temp_direc = os.path.join(config.TEMP_FOLDER, redis_id)
|
|
145
|
-
|
|
146
|
-
try:
|
|
147
|
-
|
|
148
|
-
try:
|
|
149
|
-
# Write images to temporary files
|
|
150
|
-
os.makedirs(temp_direc,exist_ok=True)
|
|
151
|
-
for name, file in request.files.items():
|
|
152
|
-
if file.content_type in config.IMAGE_CONTENT_TYPES:
|
|
153
|
-
filename = request.files[name].filename
|
|
154
|
-
image_path = os.path.join(temp_direc, filename)
|
|
155
|
-
print('Saving image {} to {}'.format(name,image_path))
|
|
156
|
-
file.save(image_path)
|
|
157
|
-
assert os.path.isfile(image_path),'Error creating file {}'.format(image_path)
|
|
158
|
-
|
|
159
|
-
except Exception as e:
|
|
160
|
-
return _make_error_object(500, 'Error saving images: ' + str(e))
|
|
161
|
-
|
|
162
|
-
# Submit the image(s) for processing by api_backend.py, who is waiting on this queue
|
|
163
|
-
db.rpush(config.REDIS_QUEUE_NAME, json.dumps(d))
|
|
164
|
-
|
|
165
|
-
while True:
|
|
166
|
-
|
|
167
|
-
result = db.get(redis_id)
|
|
168
|
-
|
|
169
|
-
if result:
|
|
170
|
-
|
|
171
|
-
result = json.loads(result.decode())
|
|
172
|
-
print('Processing result {}'.format(str(result)))
|
|
173
|
-
|
|
174
|
-
if result['status'] == 200:
|
|
175
|
-
detections = result['detections']
|
|
176
|
-
db.delete(redis_id)
|
|
177
|
-
|
|
178
|
-
else:
|
|
179
|
-
db.delete(redis_id)
|
|
180
|
-
print('Detection error: ' + str(result))
|
|
181
|
-
return _make_error_response(500, 'Detection error: ' + str(result))
|
|
182
|
-
|
|
183
|
-
try:
|
|
184
|
-
print('detect_sync: postprocessing and sending images back...')
|
|
185
|
-
fields = {
|
|
186
|
-
'detection_result': ('detection_result', json.dumps(detections), 'application/json'),
|
|
187
|
-
}
|
|
188
|
-
|
|
189
|
-
if render_boxes and result['status'] == 200:
|
|
190
|
-
|
|
191
|
-
print('Rendering images')
|
|
192
|
-
|
|
193
|
-
for image_name, detections in detections.items():
|
|
194
|
-
|
|
195
|
-
#image = Image.open(os.path.join(temp_direc, image_name))
|
|
196
|
-
image = open(f'{temp_direc}/{image_name}', "rb")
|
|
197
|
-
image = vis_utils.load_image(image)
|
|
198
|
-
width, height = image.size
|
|
199
|
-
|
|
200
|
-
_detections = []
|
|
201
|
-
for d in detections:
|
|
202
|
-
y1,x1,y2,x2 = d[0:4]
|
|
203
|
-
width = x2 - x1
|
|
204
|
-
height = y2 - y1
|
|
205
|
-
bbox = [x1,y1,width,height]
|
|
206
|
-
_detections.append({'bbox': bbox, 'conf': d[4], 'category': d[5]})
|
|
207
|
-
|
|
208
|
-
vis_utils.render_detection_bounding_boxes(_detections, image,
|
|
209
|
-
confidence_threshold=rendering_confidence_threshold)
|
|
210
|
-
|
|
211
|
-
output_img_stream = BytesIO()
|
|
212
|
-
image.save(output_img_stream, format='jpeg')
|
|
213
|
-
output_img_stream.seek(0)
|
|
214
|
-
fields[image_name] = (image_name, output_img_stream, 'image/jpeg')
|
|
215
|
-
print('Done rendering images')
|
|
216
|
-
|
|
217
|
-
m = MultipartEncoder(fields=fields)
|
|
218
|
-
return Response(m.to_string(), mimetype=m.content_type)
|
|
219
|
-
|
|
220
|
-
except Exception as e:
|
|
221
|
-
|
|
222
|
-
print(traceback.format_exc())
|
|
223
|
-
print('Error returning result or rendering the detection boxes: ' + str(e))
|
|
224
|
-
|
|
225
|
-
finally:
|
|
226
|
-
|
|
227
|
-
try:
|
|
228
|
-
print('Removing temporary files')
|
|
229
|
-
shutil.rmtree(temp_direc)
|
|
230
|
-
except Exception as e:
|
|
231
|
-
print('Error removing temporary folder {}: {}'.format(temp_direc,str(e)))
|
|
232
|
-
|
|
233
|
-
else:
|
|
234
|
-
time.sleep(0.005)
|
|
235
|
-
|
|
236
|
-
# ...if we do/don't have a request available on the queue
|
|
237
|
-
|
|
238
|
-
# ...while(True)
|
|
239
|
-
|
|
240
|
-
except Exception as e:
|
|
241
|
-
|
|
242
|
-
print(traceback.format_exc())
|
|
243
|
-
return _make_error_object(500, 'Error processing images: ' + str(e))
|
|
244
|
-
|
|
245
|
-
# ...def detect_sync()
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
#%% Command-line driver
|
|
249
|
-
|
|
250
|
-
if __name__ == '__main__':
|
|
251
|
-
|
|
252
|
-
parser = argparse.ArgumentParser(description='api frontend')
|
|
253
|
-
|
|
254
|
-
# use --non-docker if you are testing without Docker
|
|
255
|
-
#
|
|
256
|
-
# python api_frontend.py --non-docker
|
|
257
|
-
parser.add_argument('--non-docker', action="store_true", default=False)
|
|
258
|
-
args = parser.parse_args()
|
|
259
|
-
|
|
260
|
-
if args.non_docker:
|
|
261
|
-
app.run(host='0.0.0.0', port=5050)
|
|
262
|
-
else:
|
|
263
|
-
app.run()
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
## Camera trap real-time API configuration
|
|
2
|
-
|
|
3
|
-
REDIS_HOST = 'localhost'
|
|
4
|
-
|
|
5
|
-
REDIS_PORT = 6379
|
|
6
|
-
|
|
7
|
-
# Full path to the temporary folder for image storage, only meaningful
|
|
8
|
-
# within the Docker container
|
|
9
|
-
TEMP_FOLDER = '/app/temp'
|
|
10
|
-
|
|
11
|
-
REDIS_QUEUE_NAME = 'camera-trap-queue'
|
|
12
|
-
|
|
13
|
-
# Upper limit on total content length (all images and parameters)
|
|
14
|
-
MAX_CONTENT_LENGTH_IN_MB = 5 * 8 # 5MB per image * number of images allowed
|
|
15
|
-
|
|
16
|
-
MAX_IMAGES_ACCEPTED = 8
|
|
17
|
-
|
|
18
|
-
IMAGE_CONTENT_TYPES = ['image/png', 'application/octet-stream', 'image/jpeg']
|
|
19
|
-
|
|
20
|
-
DETECTOR_MODEL_PATH = '/app/animal_detection_api/model/md_v5a.0.0.pt'
|
|
21
|
-
|
|
22
|
-
DETECTOR_MODEL_VERSION = 'v5a.0.0'
|
|
23
|
-
|
|
24
|
-
# Minimum confidence threshold for detections
|
|
25
|
-
DEFAULT_CONFIDENCE_THRESHOLD = 0.01
|
|
26
|
-
|
|
27
|
-
# Minimum confidence threshold for showing a bounding box on the output image
|
|
28
|
-
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = 0.2
|
|
29
|
-
|
|
30
|
-
API_PREFIX = '/v1/camera-trap/sync'
|
|
31
|
-
|
|
32
|
-
API_KEYS_FILE = 'allowed_keys.txt'
|
|
33
|
-
|
|
34
|
-
# Use this when testing without Docker
|
|
35
|
-
DETECTOR_MODEL_PATH_DEBUG = 'model/md_v5a.0.0.pt'
|
|
File without changes
|