megadetector 5.0.11__py3-none-any.whl → 5.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/__init__.py +0 -0
- megadetector/api/batch_processing/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
- megadetector/api/batch_processing/api_core/batch_service/score.py +439 -0
- megadetector/api/batch_processing/api_core/server.py +294 -0
- megadetector/api/batch_processing/api_core/server_api_config.py +98 -0
- megadetector/api/batch_processing/api_core/server_app_config.py +55 -0
- megadetector/api/batch_processing/api_core/server_batch_job_manager.py +220 -0
- megadetector/api/batch_processing/api_core/server_job_status_table.py +152 -0
- megadetector/api/batch_processing/api_core/server_orchestration.py +360 -0
- megadetector/api/batch_processing/api_core/server_utils.py +92 -0
- megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +46 -0
- megadetector/api/batch_processing/api_support/__init__.py +0 -0
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +152 -0
- megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
- megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +126 -0
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
- megadetector/api/synchronous/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +152 -0
- megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -0
- megadetector/api/synchronous/api_core/animal_detection_api/config.py +35 -0
- megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
- megadetector/api/synchronous/api_core/tests/load_test.py +110 -0
- megadetector/classification/__init__.py +0 -0
- megadetector/classification/aggregate_classifier_probs.py +108 -0
- megadetector/classification/analyze_failed_images.py +227 -0
- megadetector/classification/cache_batchapi_outputs.py +198 -0
- megadetector/classification/create_classification_dataset.py +627 -0
- megadetector/classification/crop_detections.py +516 -0
- megadetector/classification/csv_to_json.py +226 -0
- megadetector/classification/detect_and_crop.py +855 -0
- megadetector/classification/efficientnet/__init__.py +9 -0
- megadetector/classification/efficientnet/model.py +415 -0
- megadetector/classification/efficientnet/utils.py +610 -0
- megadetector/classification/evaluate_model.py +520 -0
- megadetector/classification/identify_mislabeled_candidates.py +152 -0
- megadetector/classification/json_to_azcopy_list.py +63 -0
- megadetector/classification/json_validator.py +699 -0
- megadetector/classification/map_classification_categories.py +276 -0
- megadetector/classification/merge_classification_detection_output.py +506 -0
- megadetector/classification/prepare_classification_script.py +194 -0
- megadetector/classification/prepare_classification_script_mc.py +228 -0
- megadetector/classification/run_classifier.py +287 -0
- megadetector/classification/save_mislabeled.py +110 -0
- megadetector/classification/train_classifier.py +827 -0
- megadetector/classification/train_classifier_tf.py +725 -0
- megadetector/classification/train_utils.py +323 -0
- megadetector/data_management/__init__.py +0 -0
- megadetector/data_management/annotations/__init__.py +0 -0
- megadetector/data_management/annotations/annotation_constants.py +34 -0
- megadetector/data_management/camtrap_dp_to_coco.py +239 -0
- megadetector/data_management/cct_json_utils.py +395 -0
- megadetector/data_management/cct_to_md.py +176 -0
- megadetector/data_management/cct_to_wi.py +289 -0
- megadetector/data_management/coco_to_labelme.py +272 -0
- megadetector/data_management/coco_to_yolo.py +662 -0
- megadetector/data_management/databases/__init__.py +0 -0
- megadetector/data_management/databases/add_width_and_height_to_db.py +33 -0
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +206 -0
- megadetector/data_management/databases/integrity_check_json_db.py +477 -0
- megadetector/data_management/databases/subset_json_db.py +115 -0
- megadetector/data_management/generate_crops_from_cct.py +149 -0
- megadetector/data_management/get_image_sizes.py +189 -0
- megadetector/data_management/importers/add_nacti_sizes.py +52 -0
- megadetector/data_management/importers/add_timestamps_to_icct.py +79 -0
- megadetector/data_management/importers/animl_results_to_md_results.py +158 -0
- megadetector/data_management/importers/auckland_doc_test_to_json.py +373 -0
- megadetector/data_management/importers/auckland_doc_to_json.py +201 -0
- megadetector/data_management/importers/awc_to_json.py +191 -0
- megadetector/data_management/importers/bellevue_to_json.py +273 -0
- megadetector/data_management/importers/cacophony-thermal-importer.py +796 -0
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +269 -0
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +289 -0
- megadetector/data_management/importers/cct_field_adjustments.py +58 -0
- megadetector/data_management/importers/channel_islands_to_cct.py +913 -0
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +180 -0
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +249 -0
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +223 -0
- megadetector/data_management/importers/ena24_to_json.py +276 -0
- megadetector/data_management/importers/filenames_to_json.py +386 -0
- megadetector/data_management/importers/helena_to_cct.py +283 -0
- megadetector/data_management/importers/idaho-camera-traps.py +1407 -0
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +294 -0
- megadetector/data_management/importers/jb_csv_to_json.py +150 -0
- megadetector/data_management/importers/mcgill_to_json.py +250 -0
- megadetector/data_management/importers/missouri_to_json.py +490 -0
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +79 -0
- megadetector/data_management/importers/noaa_seals_2019.py +181 -0
- megadetector/data_management/importers/pc_to_json.py +365 -0
- megadetector/data_management/importers/plot_wni_giraffes.py +123 -0
- megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -0
- megadetector/data_management/importers/prepare_zsl_imerit.py +131 -0
- megadetector/data_management/importers/rspb_to_json.py +356 -0
- megadetector/data_management/importers/save_the_elephants_survey_A.py +320 -0
- megadetector/data_management/importers/save_the_elephants_survey_B.py +329 -0
- megadetector/data_management/importers/snapshot_safari_importer.py +758 -0
- megadetector/data_management/importers/snapshot_safari_importer_reprise.py +665 -0
- megadetector/data_management/importers/snapshot_serengeti_lila.py +1067 -0
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +150 -0
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +153 -0
- megadetector/data_management/importers/sulross_get_exif.py +65 -0
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +490 -0
- megadetector/data_management/importers/ubc_to_json.py +399 -0
- megadetector/data_management/importers/umn_to_json.py +507 -0
- megadetector/data_management/importers/wellington_to_json.py +263 -0
- megadetector/data_management/importers/wi_to_json.py +442 -0
- megadetector/data_management/importers/zamba_results_to_md_results.py +181 -0
- megadetector/data_management/labelme_to_coco.py +547 -0
- megadetector/data_management/labelme_to_yolo.py +272 -0
- megadetector/data_management/lila/__init__.py +0 -0
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +97 -0
- megadetector/data_management/lila/add_locations_to_nacti.py +147 -0
- megadetector/data_management/lila/create_lila_blank_set.py +558 -0
- megadetector/data_management/lila/create_lila_test_set.py +152 -0
- megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
- megadetector/data_management/lila/download_lila_subset.py +178 -0
- megadetector/data_management/lila/generate_lila_per_image_labels.py +516 -0
- megadetector/data_management/lila/get_lila_annotation_counts.py +170 -0
- megadetector/data_management/lila/get_lila_image_counts.py +112 -0
- megadetector/data_management/lila/lila_common.py +300 -0
- megadetector/data_management/lila/test_lila_metadata_urls.py +132 -0
- megadetector/data_management/ocr_tools.py +874 -0
- megadetector/data_management/read_exif.py +681 -0
- megadetector/data_management/remap_coco_categories.py +84 -0
- megadetector/data_management/remove_exif.py +66 -0
- megadetector/data_management/resize_coco_dataset.py +189 -0
- megadetector/data_management/wi_download_csv_to_coco.py +246 -0
- megadetector/data_management/yolo_output_to_md_output.py +441 -0
- megadetector/data_management/yolo_to_coco.py +676 -0
- megadetector/detection/__init__.py +0 -0
- megadetector/detection/detector_training/__init__.py +0 -0
- megadetector/detection/detector_training/model_main_tf2.py +114 -0
- megadetector/detection/process_video.py +702 -0
- megadetector/detection/pytorch_detector.py +341 -0
- megadetector/detection/run_detector.py +779 -0
- megadetector/detection/run_detector_batch.py +1219 -0
- megadetector/detection/run_inference_with_yolov5_val.py +917 -0
- megadetector/detection/run_tiled_inference.py +934 -0
- megadetector/detection/tf_detector.py +189 -0
- megadetector/detection/video_utils.py +606 -0
- megadetector/postprocessing/__init__.py +0 -0
- megadetector/postprocessing/add_max_conf.py +64 -0
- megadetector/postprocessing/categorize_detections_by_size.py +163 -0
- megadetector/postprocessing/combine_api_outputs.py +249 -0
- megadetector/postprocessing/compare_batch_results.py +958 -0
- megadetector/postprocessing/convert_output_format.py +396 -0
- megadetector/postprocessing/load_api_results.py +195 -0
- megadetector/postprocessing/md_to_coco.py +310 -0
- megadetector/postprocessing/md_to_labelme.py +330 -0
- megadetector/postprocessing/merge_detections.py +401 -0
- megadetector/postprocessing/postprocess_batch_results.py +1902 -0
- megadetector/postprocessing/remap_detection_categories.py +170 -0
- megadetector/postprocessing/render_detection_confusion_matrix.py +660 -0
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +211 -0
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +83 -0
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1631 -0
- megadetector/postprocessing/separate_detections_into_folders.py +730 -0
- megadetector/postprocessing/subset_json_detector_output.py +696 -0
- megadetector/postprocessing/top_folders_to_bottom.py +223 -0
- megadetector/taxonomy_mapping/__init__.py +0 -0
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +150 -0
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -0
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +590 -0
- megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
- megadetector/taxonomy_mapping/simple_image_download.py +219 -0
- megadetector/taxonomy_mapping/species_lookup.py +834 -0
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
- megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
- megadetector/utils/__init__.py +0 -0
- megadetector/utils/azure_utils.py +178 -0
- megadetector/utils/ct_utils.py +612 -0
- megadetector/utils/directory_listing.py +246 -0
- megadetector/utils/md_tests.py +968 -0
- megadetector/utils/path_utils.py +1044 -0
- megadetector/utils/process_utils.py +157 -0
- megadetector/utils/sas_blob_utils.py +509 -0
- megadetector/utils/split_locations_into_train_val.py +228 -0
- megadetector/utils/string_utils.py +92 -0
- megadetector/utils/url_utils.py +323 -0
- megadetector/utils/write_html_image_list.py +225 -0
- megadetector/visualization/__init__.py +0 -0
- megadetector/visualization/plot_utils.py +293 -0
- megadetector/visualization/render_images_with_thumbnails.py +275 -0
- megadetector/visualization/visualization_utils.py +1536 -0
- megadetector/visualization/visualize_db.py +550 -0
- megadetector/visualization/visualize_detector_output.py +405 -0
- {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/METADATA +1 -1
- megadetector-5.0.12.dist-info/RECORD +199 -0
- megadetector-5.0.12.dist-info/top_level.txt +1 -0
- megadetector-5.0.11.dist-info/RECORD +0 -5
- megadetector-5.0.11.dist-info/top_level.txt +0 -1
- {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/LICENSE +0 -0
- {megadetector-5.0.11.dist-info → megadetector-5.0.12.dist-info}/WHEEL +0 -0
megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Test script for pushing annotations to the eMammal db
|
|
3
|
+
#
|
|
4
|
+
|
|
5
|
+
#%% Imports
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import json
|
|
9
|
+
import argparse
|
|
10
|
+
import pymysql
|
|
11
|
+
import config as cfg
|
|
12
|
+
|
|
13
|
+
from tqdm import tqdm
|
|
14
|
+
from enum import Enum
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
#%% Database functions
|
|
18
|
+
|
|
19
|
+
class Categories(Enum):
|
|
20
|
+
animal = 1
|
|
21
|
+
person = 2
|
|
22
|
+
vehicle = 3
|
|
23
|
+
|
|
24
|
+
mysql_connection = pymysql.connect( host=cfg.host,
|
|
25
|
+
user=cfg.username,
|
|
26
|
+
passwd=cfg.password,
|
|
27
|
+
db=cfg.database,
|
|
28
|
+
port=cfg.port)
|
|
29
|
+
|
|
30
|
+
def update_data(sql):
|
|
31
|
+
with mysql_connection.cursor() as cursor:
|
|
32
|
+
cursor.execute(sql)
|
|
33
|
+
|
|
34
|
+
def get_records_all(sql):
|
|
35
|
+
with mysql_connection.cursor() as cursor:
|
|
36
|
+
sql = sql
|
|
37
|
+
cursor.execute(sql)
|
|
38
|
+
rows = cursor.fetchall()
|
|
39
|
+
return rows
|
|
40
|
+
|
|
41
|
+
def format_data_print_deployments(rows):
|
|
42
|
+
count = 0
|
|
43
|
+
result = []
|
|
44
|
+
for row in rows:
|
|
45
|
+
count += 1
|
|
46
|
+
print("{}. {}-{}".format(str(count), row[0],row[1]))
|
|
47
|
+
result.append((count, row[0], row[1]))
|
|
48
|
+
|
|
49
|
+
return result
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
#%% Command-line driver
|
|
53
|
+
|
|
54
|
+
def main():
|
|
55
|
+
|
|
56
|
+
parser = argparse.ArgumentParser()
|
|
57
|
+
parser.add_argument('input_file', type=str, help='Input .json filename')
|
|
58
|
+
|
|
59
|
+
if len(sys.argv[1:]) == 0:
|
|
60
|
+
parser.print_help()
|
|
61
|
+
parser.exit()
|
|
62
|
+
|
|
63
|
+
args = parser.parse_args()
|
|
64
|
+
|
|
65
|
+
print("Enter the number of the deployment:")
|
|
66
|
+
|
|
67
|
+
rows = get_records_all(''' select * from deployment ''')
|
|
68
|
+
deployments = format_data_print_deployments(rows)
|
|
69
|
+
print("\n")
|
|
70
|
+
deployment_choice = input()
|
|
71
|
+
deployment_id = deployments[int(deployment_choice)][1]
|
|
72
|
+
|
|
73
|
+
print(deployment_id)
|
|
74
|
+
|
|
75
|
+
# TODO: check project ID ?
|
|
76
|
+
sql = ''' SELECT emammal_project_taxa_id FROM wild_id.emammal_project_taxa
|
|
77
|
+
where species in ("No Animal", "Unknown Animal", "Homo sapiens", "Vehicle") '''
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
emammal_categories = get_records_all(sql)
|
|
81
|
+
|
|
82
|
+
with open(args.input_file) as f:
|
|
83
|
+
data = json.load(f)
|
|
84
|
+
|
|
85
|
+
images = data['images']
|
|
86
|
+
emammal_category = 0
|
|
87
|
+
for index, im in tqdm(enumerate(images), total=len(images)):
|
|
88
|
+
fn = im['file']
|
|
89
|
+
|
|
90
|
+
if len(im['detections']) <= 0:
|
|
91
|
+
image_type_id = 2
|
|
92
|
+
|
|
93
|
+
# No-animal category
|
|
94
|
+
emammal_categories = emammal_categories[0]
|
|
95
|
+
else:
|
|
96
|
+
max_conf = im['max_detection_conf']
|
|
97
|
+
detection = [k for k in im['detections'] if k['conf'] == max_conf]
|
|
98
|
+
category= int(detection[0]['category'])
|
|
99
|
+
|
|
100
|
+
if category == Categories.animal:
|
|
101
|
+
image_type_id = 1
|
|
102
|
+
emammal_category = emammal_categories[1]
|
|
103
|
+
else:
|
|
104
|
+
image_type_id = 5
|
|
105
|
+
if category == Categories.person:
|
|
106
|
+
emammal_category = emammal_categories[2]
|
|
107
|
+
elif category == Categories.vehicle:
|
|
108
|
+
emammal_category = emammal_categories[3]
|
|
109
|
+
|
|
110
|
+
sql = """ UPDATE wild_id.emammal_sequence_annotation,
|
|
111
|
+
wild_id.image,
|
|
112
|
+
wild_id.image_sequence,
|
|
113
|
+
wild_id.deployment
|
|
114
|
+
SET wild_id.emammal_sequence_annotation.project_taxa_id = 4
|
|
115
|
+
WHERE wild_id.image.image_sequence_id = wild_id.emammal_sequence_annotation.sequence_id
|
|
116
|
+
AND wild_id.image_sequence.deployment_id = wild_id.deployment.deployment_id
|
|
117
|
+
AND wild_id.image.raw_name = '{}' """.format(fn)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
print(sql)
|
|
121
|
+
update_data(sql)
|
|
122
|
+
mysql_connection.commit()
|
|
123
|
+
|
|
124
|
+
if __name__ == '__main__':
|
|
125
|
+
main()
|
|
126
|
+
|
megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
#%% Imports
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import argparse
|
|
6
|
+
import json
|
|
7
|
+
import shutil
|
|
8
|
+
|
|
9
|
+
from tqdm import tqdm
|
|
10
|
+
|
|
11
|
+
#%% Main function
|
|
12
|
+
|
|
13
|
+
def main():
|
|
14
|
+
|
|
15
|
+
parser = argparse.ArgumentParser()
|
|
16
|
+
parser.add_argument('input_file', type=str, help='Input .json filename')
|
|
17
|
+
|
|
18
|
+
if len(sys.argv[1:]) == 0:
|
|
19
|
+
parser.print_help()
|
|
20
|
+
parser.exit()
|
|
21
|
+
|
|
22
|
+
args = parser.parse_args()
|
|
23
|
+
|
|
24
|
+
with open(args.input_file) as f:
|
|
25
|
+
data = json.load(f)
|
|
26
|
+
|
|
27
|
+
images = data['images'][:1000]
|
|
28
|
+
data['images'] = images
|
|
29
|
+
with open('output.json', 'w') as f:
|
|
30
|
+
json.dump(data, f, indent=1)
|
|
31
|
+
|
|
32
|
+
select_images_folder = "select_images_folder"
|
|
33
|
+
|
|
34
|
+
if os.path.exists(select_images_folder):
|
|
35
|
+
shutil.rmtree(select_images_folder, ignore_errors=True)
|
|
36
|
+
|
|
37
|
+
os.mkdir(select_images_folder)
|
|
38
|
+
|
|
39
|
+
source = os.getcwd() + "\\SWWLF2019_R1_GMU1_F_9\\"
|
|
40
|
+
destination = os.getcwd() + "\\" + select_images_folder + "\\"
|
|
41
|
+
|
|
42
|
+
# if not os.path.exists(directory):
|
|
43
|
+
# os.makedirs(directory)
|
|
44
|
+
|
|
45
|
+
for index, im in tqdm(enumerate(images), total=len(images)):
|
|
46
|
+
fn=im['file']
|
|
47
|
+
fn = fn.replace('/', '\\')
|
|
48
|
+
print(os.path.dirname(fn))
|
|
49
|
+
directory = destination + os.path.dirname(fn)
|
|
50
|
+
if not os.path.exists(directory):
|
|
51
|
+
os.makedirs(directory)
|
|
52
|
+
dest = shutil.copyfile(source + fn, destination + fn)
|
|
53
|
+
|
|
54
|
+
if __name__ == '__main__':
|
|
55
|
+
main()
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
#
|
|
2
|
+
# api_backend.py
|
|
3
|
+
#
|
|
4
|
+
# Defines the model execution service, which pulls requests (one or more images)
|
|
5
|
+
# from the shared Redis queue, and runs them through the TF model.
|
|
6
|
+
#
|
|
7
|
+
|
|
8
|
+
#%% Imports
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
import json
|
|
12
|
+
import time
|
|
13
|
+
import redis
|
|
14
|
+
import argparse
|
|
15
|
+
import PIL
|
|
16
|
+
|
|
17
|
+
from io import BytesIO
|
|
18
|
+
|
|
19
|
+
from detection.run_detector import load_detector, convert_to_tf_coords
|
|
20
|
+
import config
|
|
21
|
+
import visualization.visualization_utils as vis_utils
|
|
22
|
+
|
|
23
|
+
#%% Initialization
|
|
24
|
+
|
|
25
|
+
db = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
|
|
26
|
+
current_directory = os.path.dirname(os.path.realpath(__file__))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
#%% Main loop
|
|
30
|
+
|
|
31
|
+
def detect_process():
|
|
32
|
+
|
|
33
|
+
while True:
|
|
34
|
+
|
|
35
|
+
# TODO: convert to a blocking read and eliminate the sleep() statement in this loop
|
|
36
|
+
serialized_entry = db.lpop(config.REDIS_QUEUE_NAME)
|
|
37
|
+
all_detection_results = []
|
|
38
|
+
inference_time_detector = []
|
|
39
|
+
|
|
40
|
+
if serialized_entry:
|
|
41
|
+
|
|
42
|
+
entry = json.loads(serialized_entry)
|
|
43
|
+
id = entry['id']
|
|
44
|
+
print('Processing images from request id:', id)
|
|
45
|
+
return_confidence_threshold = entry['return_confidence_threshold']
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
|
|
49
|
+
temp_direc = os.path.join(config.TEMP_FOLDER,id)
|
|
50
|
+
assert os.path.isdir(temp_direc), 'Could not find temporary folder {}'.format(temp_direc)
|
|
51
|
+
|
|
52
|
+
for filename in os.listdir(temp_direc):
|
|
53
|
+
|
|
54
|
+
image_path = f'{temp_direc}/{filename}'
|
|
55
|
+
print('Reading image from {}'.format(image_path))
|
|
56
|
+
image = open(image_path, 'rb')
|
|
57
|
+
image = vis_utils.load_image(image)
|
|
58
|
+
|
|
59
|
+
start_time = time.time()
|
|
60
|
+
result = detector.generate_detections_one_image(image, filename, detection_threshold=config.DEFAULT_CONFIDENCE_THRESHOLD)
|
|
61
|
+
all_detection_results.append(result)
|
|
62
|
+
|
|
63
|
+
elapsed = time.time() - start_time
|
|
64
|
+
inference_time_detector.append(elapsed)
|
|
65
|
+
|
|
66
|
+
except Exception as e:
|
|
67
|
+
|
|
68
|
+
print('Detection error: ' + str(e))
|
|
69
|
+
|
|
70
|
+
db.set(entry['id'], json.dumps({
|
|
71
|
+
'status': 500,
|
|
72
|
+
'error': 'Detection error: ' + str(e)
|
|
73
|
+
}))
|
|
74
|
+
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
# Filter the detections by the confidence threshold
|
|
78
|
+
#
|
|
79
|
+
# Each result is [ymin, xmin, ymax, xmax, confidence, category]
|
|
80
|
+
#
|
|
81
|
+
# Coordinates are relative, with the origin in the upper-left
|
|
82
|
+
detections = {}
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
|
|
86
|
+
for result in all_detection_results:
|
|
87
|
+
|
|
88
|
+
image_name = result['file']
|
|
89
|
+
_detections = result.get('detections', None)
|
|
90
|
+
detections[image_name] = []
|
|
91
|
+
|
|
92
|
+
if _detections is None:
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
for d in _detections:
|
|
96
|
+
if d['conf'] > return_confidence_threshold:
|
|
97
|
+
res = convert_to_tf_coords(d['bbox'])
|
|
98
|
+
res.append(d['conf'])
|
|
99
|
+
res.append(int(d['category']))
|
|
100
|
+
detections[image_name].append(res)
|
|
101
|
+
|
|
102
|
+
db.set(entry['id'], json.dumps({
|
|
103
|
+
'status': 200,
|
|
104
|
+
'detections': detections,
|
|
105
|
+
'inference_time_detector': inference_time_detector
|
|
106
|
+
}))
|
|
107
|
+
|
|
108
|
+
except Exception as e:
|
|
109
|
+
print('Error consolidating the detection boxes: ' + str(e))
|
|
110
|
+
|
|
111
|
+
db.set(entry['id'], json.dumps({
|
|
112
|
+
'status': 500,
|
|
113
|
+
'error': 'Error consolidating the detection boxes:' + str(e)
|
|
114
|
+
}))
|
|
115
|
+
|
|
116
|
+
# ...if serialized_entry
|
|
117
|
+
|
|
118
|
+
else:
|
|
119
|
+
time.sleep(0.005)
|
|
120
|
+
|
|
121
|
+
# ...while(True)
|
|
122
|
+
|
|
123
|
+
# ...def detect_process()
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
#%% Command-line driver
|
|
127
|
+
|
|
128
|
+
if __name__ == '__main__':
|
|
129
|
+
|
|
130
|
+
parser = argparse.ArgumentParser(description='api backend')
|
|
131
|
+
|
|
132
|
+
# use --non-docker if you are testing without Docker
|
|
133
|
+
#
|
|
134
|
+
# python api_frontend.py --non-docker
|
|
135
|
+
parser.add_argument('--non-docker', action='store_true', default=False)
|
|
136
|
+
args = parser.parse_args()
|
|
137
|
+
|
|
138
|
+
if args.non_docker:
|
|
139
|
+
model_path = config.DETECTOR_MODEL_PATH_DEBUG
|
|
140
|
+
else:
|
|
141
|
+
model_path = config.DETECTOR_MODEL_PATH
|
|
142
|
+
|
|
143
|
+
detector = load_detector(model_path)
|
|
144
|
+
|
|
145
|
+
# run detections on a test image to load the model
|
|
146
|
+
print('Running initial detection to load model...')
|
|
147
|
+
test_image = PIL.Image.new(mode="RGB", size=(200, 200))
|
|
148
|
+
result = detector.generate_detections_one_image(test_image, "test_image", detection_threshold=config.DEFAULT_CONFIDENCE_THRESHOLD)
|
|
149
|
+
print(result)
|
|
150
|
+
print('\n')
|
|
151
|
+
|
|
152
|
+
detect_process()
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
#
|
|
2
|
+
# api_frontend.py
|
|
3
|
+
#
|
|
4
|
+
# Defines the Flask app, which takes requests (one or more images) from
|
|
5
|
+
# remote callers and pushes the images onto the shared Redis queue, to be processed
|
|
6
|
+
# by the main service in api_backend.py .
|
|
7
|
+
#
|
|
8
|
+
|
|
9
|
+
#%% Imports
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import json
|
|
13
|
+
import time
|
|
14
|
+
import uuid
|
|
15
|
+
import redis
|
|
16
|
+
import shutil
|
|
17
|
+
import argparse
|
|
18
|
+
import traceback
|
|
19
|
+
|
|
20
|
+
from io import BytesIO
|
|
21
|
+
from flask import Flask, Response, jsonify, make_response, request
|
|
22
|
+
from requests_toolbelt.multipart.encoder import MultipartEncoder
|
|
23
|
+
|
|
24
|
+
import visualization.visualization_utils as vis_utils
|
|
25
|
+
import config
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
#%% Initialization
|
|
29
|
+
|
|
30
|
+
app = Flask(__name__)
|
|
31
|
+
db = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
#%% Support functions
|
|
35
|
+
|
|
36
|
+
def _make_error_object(error_code, error_message):
|
|
37
|
+
|
|
38
|
+
# Make a dict that the request_processing_function can return to the endpoint
|
|
39
|
+
# function to notify it of an error
|
|
40
|
+
return {
|
|
41
|
+
'error_message': error_message,
|
|
42
|
+
'error_code': error_code
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _make_error_response(error_code, error_message):
|
|
47
|
+
|
|
48
|
+
return make_response(jsonify({'error': error_message}), error_code)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def has_access(request):
|
|
52
|
+
|
|
53
|
+
if not os.path.exists(config.API_KEYS_FILE):
|
|
54
|
+
return True
|
|
55
|
+
else:
|
|
56
|
+
if not request.headers.get('key'):
|
|
57
|
+
print('Key header not available')
|
|
58
|
+
return False
|
|
59
|
+
else:
|
|
60
|
+
API_key = request.headers.get('key').strip().lower()
|
|
61
|
+
with open(config.API_KEYS_FILE, "r") as f:
|
|
62
|
+
for line in f:
|
|
63
|
+
valid_key = line.strip().lower()
|
|
64
|
+
if valid_key == API_key:
|
|
65
|
+
return True
|
|
66
|
+
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def check_posted_data(request):
|
|
71
|
+
|
|
72
|
+
files = request.files
|
|
73
|
+
params = request.args
|
|
74
|
+
|
|
75
|
+
# Verify that the content uploaded is not too big
|
|
76
|
+
#
|
|
77
|
+
# request.content_length is the length of the total payload
|
|
78
|
+
content_length = request.content_length
|
|
79
|
+
if not content_length:
|
|
80
|
+
return _make_error_object(411, 'No image(s) were sent, or content length cannot be determined.')
|
|
81
|
+
if content_length > config.MAX_CONTENT_LENGTH_IN_MB * 1024 * 1024:
|
|
82
|
+
return _make_error_object(413, ('Payload size {:.2f} MB exceeds the maximum allowed of {} MB. '
|
|
83
|
+
'Please upload fewer or more compressed images.').format(
|
|
84
|
+
content_length / (1024 * 1024), config.MAX_CONTENT_LENGTH_IN_MB))
|
|
85
|
+
|
|
86
|
+
render_boxes = True if params.get('render', '').lower() == 'true' else False
|
|
87
|
+
|
|
88
|
+
if 'min_confidence' in params:
|
|
89
|
+
return_confidence_threshold = float(params['min_confidence'])
|
|
90
|
+
print('runserver, post_detect_sync, user specified detection confidence: ', return_confidence_threshold)
|
|
91
|
+
if return_confidence_threshold < 0.0 or return_confidence_threshold > 1.0:
|
|
92
|
+
return _make_error_object(400, 'Detection confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
93
|
+
return_confidence_threshold))
|
|
94
|
+
else:
|
|
95
|
+
return_confidence_threshold = config.DEFAULT_CONFIDENCE_THRESHOLD
|
|
96
|
+
|
|
97
|
+
if 'min_rendering_confidence' in params:
|
|
98
|
+
rendering_confidence_threshold = float(params['min_rendering_confidence'])
|
|
99
|
+
print('runserver, post_detect_sync, user specified rendering confidence: ', rendering_confidence_threshold)
|
|
100
|
+
if rendering_confidence_threshold < 0.0 or rendering_confidence_threshold > 1.0:
|
|
101
|
+
return _make_error_object(400, 'Rendering confidence threshold {} is invalid, should be between 0.0 and 1.0.'.format(
|
|
102
|
+
rendering_confidence_threshold))
|
|
103
|
+
else:
|
|
104
|
+
rendering_confidence_threshold = config.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD
|
|
105
|
+
|
|
106
|
+
# Verify that the number of images is acceptable
|
|
107
|
+
num_images = sum([1 if file.content_type in config.IMAGE_CONTENT_TYPES else 0 for file in files.values()])
|
|
108
|
+
print('runserver, post_detect_sync, number of images received: ', num_images)
|
|
109
|
+
|
|
110
|
+
if num_images > config.MAX_IMAGES_ACCEPTED:
|
|
111
|
+
return _make_error_object(413, 'Too many images. Maximum number of images that can be processed in one call is {}.'.format(config.MAX_IMAGES_ACCEPTED))
|
|
112
|
+
elif num_images == 0:
|
|
113
|
+
return _make_error_object(400, 'No image(s) of accepted types (image/jpeg, image/png, application/octet-stream) received.')
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
'render_boxes': render_boxes,
|
|
117
|
+
'return_confidence_threshold': return_confidence_threshold,
|
|
118
|
+
'rendering_confidence_threshold': rendering_confidence_threshold
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# ...def check_posted_data(request)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
#%% Main loop
|
|
125
|
+
|
|
126
|
+
@app.route(config.API_PREFIX + '/detect', methods = ['POST'])
|
|
127
|
+
def detect_sync():
|
|
128
|
+
|
|
129
|
+
if not has_access(request):
|
|
130
|
+
print('Access denied, please provide a valid API key')
|
|
131
|
+
return _make_error_response(403, 'Access denied, please provide a valid API key')
|
|
132
|
+
|
|
133
|
+
# Check whether the request_processing_function had an error
|
|
134
|
+
post_data = check_posted_data(request)
|
|
135
|
+
if post_data.get('error_code', None) is not None:
|
|
136
|
+
return _make_error_response(post_data.get('error_code'), post_data.get('error_message'))
|
|
137
|
+
|
|
138
|
+
render_boxes = post_data.get('render_boxes')
|
|
139
|
+
return_confidence_threshold = post_data.get('return_confidence_threshold')
|
|
140
|
+
rendering_confidence_threshold = post_data.get('rendering_confidence_threshold')
|
|
141
|
+
|
|
142
|
+
redis_id = str(uuid.uuid4())
|
|
143
|
+
d = {'id': redis_id, 'render_boxes': render_boxes, 'return_confidence_threshold': return_confidence_threshold}
|
|
144
|
+
temp_direc = os.path.join(config.TEMP_FOLDER, redis_id)
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
# Write images to temporary files
|
|
150
|
+
#
|
|
151
|
+
# TODO: read from memory rather than using intermediate files
|
|
152
|
+
os.makedirs(temp_direc,exist_ok=True)
|
|
153
|
+
for name, file in request.files.items():
|
|
154
|
+
if file.content_type in config.IMAGE_CONTENT_TYPES:
|
|
155
|
+
filename = request.files[name].filename
|
|
156
|
+
image_path = os.path.join(temp_direc, filename)
|
|
157
|
+
print('Saving image {} to {}'.format(name,image_path))
|
|
158
|
+
file.save(image_path)
|
|
159
|
+
assert os.path.isfile(image_path),'Error creating file {}'.format(image_path)
|
|
160
|
+
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return _make_error_object(500, 'Error saving images: ' + str(e))
|
|
163
|
+
|
|
164
|
+
# Submit the image(s) for processing by api_backend.py, who is waiting on this queue
|
|
165
|
+
db.rpush(config.REDIS_QUEUE_NAME, json.dumps(d))
|
|
166
|
+
|
|
167
|
+
while True:
|
|
168
|
+
|
|
169
|
+
# TODO: convert to a blocking read and eliminate the sleep() statement in this loop
|
|
170
|
+
result = db.get(redis_id)
|
|
171
|
+
|
|
172
|
+
if result:
|
|
173
|
+
|
|
174
|
+
result = json.loads(result.decode())
|
|
175
|
+
print('Processing result {}'.format(str(result)))
|
|
176
|
+
|
|
177
|
+
if result['status'] == 200:
|
|
178
|
+
detections = result['detections']
|
|
179
|
+
db.delete(redis_id)
|
|
180
|
+
|
|
181
|
+
else:
|
|
182
|
+
db.delete(redis_id)
|
|
183
|
+
print('Detection error: ' + str(result))
|
|
184
|
+
return _make_error_response(500, 'Detection error: ' + str(result))
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
print('detect_sync: postprocessing and sending images back...')
|
|
188
|
+
fields = {
|
|
189
|
+
'detection_result': ('detection_result', json.dumps(detections), 'application/json'),
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if render_boxes and result['status'] == 200:
|
|
193
|
+
|
|
194
|
+
print('Rendering images')
|
|
195
|
+
|
|
196
|
+
for image_name, detections in detections.items():
|
|
197
|
+
|
|
198
|
+
#image = Image.open(os.path.join(temp_direc, image_name))
|
|
199
|
+
image = open(f'{temp_direc}/{image_name}', "rb")
|
|
200
|
+
image = vis_utils.load_image(image)
|
|
201
|
+
width, height = image.size
|
|
202
|
+
|
|
203
|
+
_detections = []
|
|
204
|
+
for d in detections:
|
|
205
|
+
y1,x1,y2,x2 = d[0:4]
|
|
206
|
+
width = x2 - x1
|
|
207
|
+
height = y2 - y1
|
|
208
|
+
bbox = [x1,y1,width,height]
|
|
209
|
+
_detections.append({'bbox': bbox, 'conf': d[4], 'category': d[5]})
|
|
210
|
+
|
|
211
|
+
vis_utils.render_detection_bounding_boxes(_detections, image,
|
|
212
|
+
confidence_threshold=rendering_confidence_threshold)
|
|
213
|
+
|
|
214
|
+
output_img_stream = BytesIO()
|
|
215
|
+
image.save(output_img_stream, format='jpeg')
|
|
216
|
+
output_img_stream.seek(0)
|
|
217
|
+
fields[image_name] = (image_name, output_img_stream, 'image/jpeg')
|
|
218
|
+
print('Done rendering images')
|
|
219
|
+
|
|
220
|
+
m = MultipartEncoder(fields=fields)
|
|
221
|
+
return Response(m.to_string(), mimetype=m.content_type)
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
|
|
225
|
+
print(traceback.format_exc())
|
|
226
|
+
print('Error returning result or rendering the detection boxes: ' + str(e))
|
|
227
|
+
|
|
228
|
+
finally:
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
print('Removing temporary files')
|
|
232
|
+
shutil.rmtree(temp_direc)
|
|
233
|
+
except Exception as e:
|
|
234
|
+
print('Error removing temporary folder {}: {}'.format(temp_direc,str(e)))
|
|
235
|
+
|
|
236
|
+
else:
|
|
237
|
+
time.sleep(0.005)
|
|
238
|
+
|
|
239
|
+
# ...if we do/don't have a request available on the queue
|
|
240
|
+
|
|
241
|
+
# ...while(True)
|
|
242
|
+
|
|
243
|
+
except Exception as e:
|
|
244
|
+
|
|
245
|
+
print(traceback.format_exc())
|
|
246
|
+
return _make_error_object(500, 'Error processing images: ' + str(e))
|
|
247
|
+
|
|
248
|
+
# ...def detect_sync()
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
#%% Command-line driver
|
|
252
|
+
|
|
253
|
+
if __name__ == '__main__':
|
|
254
|
+
|
|
255
|
+
parser = argparse.ArgumentParser(description='api frontend')
|
|
256
|
+
|
|
257
|
+
# use --non-docker if you are testing without Docker
|
|
258
|
+
#
|
|
259
|
+
# python api_frontend.py --non-docker
|
|
260
|
+
parser.add_argument('--non-docker', action="store_true", default=False)
|
|
261
|
+
args = parser.parse_args()
|
|
262
|
+
|
|
263
|
+
if args.non_docker:
|
|
264
|
+
app.run(host='0.0.0.0', port=5050)
|
|
265
|
+
else:
|
|
266
|
+
app.run()
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
## Camera trap real-time API configuration
|
|
2
|
+
|
|
3
|
+
REDIS_HOST = 'localhost'
|
|
4
|
+
|
|
5
|
+
REDIS_PORT = 6379
|
|
6
|
+
|
|
7
|
+
# Full path to the temporary folder for image storage, only meaningful
|
|
8
|
+
# within the Docker container
|
|
9
|
+
TEMP_FOLDER = '/app/temp'
|
|
10
|
+
|
|
11
|
+
REDIS_QUEUE_NAME = 'camera-trap-queue'
|
|
12
|
+
|
|
13
|
+
# Upper limit on total content length (all images and parameters)
|
|
14
|
+
MAX_CONTENT_LENGTH_IN_MB = 5 * 8 # 5MB per image * number of images allowed
|
|
15
|
+
|
|
16
|
+
MAX_IMAGES_ACCEPTED = 8
|
|
17
|
+
|
|
18
|
+
IMAGE_CONTENT_TYPES = ['image/png', 'application/octet-stream', 'image/jpeg']
|
|
19
|
+
|
|
20
|
+
DETECTOR_MODEL_PATH = '/app/animal_detection_api/model/md_v5a.0.0.pt'
|
|
21
|
+
|
|
22
|
+
DETECTOR_MODEL_VERSION = 'v5a.0.0'
|
|
23
|
+
|
|
24
|
+
# Minimum confidence threshold for detections
|
|
25
|
+
DEFAULT_CONFIDENCE_THRESHOLD = 0.01
|
|
26
|
+
|
|
27
|
+
# Minimum confidence threshold for showing a bounding box on the output image
|
|
28
|
+
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = 0.2
|
|
29
|
+
|
|
30
|
+
API_PREFIX = '/v1/camera-trap/sync'
|
|
31
|
+
|
|
32
|
+
API_KEYS_FILE = 'allowed_keys.txt'
|
|
33
|
+
|
|
34
|
+
# Use this when testing without Docker
|
|
35
|
+
DETECTOR_MODEL_PATH_DEBUG = 'model/md_v5a.0.0.pt'
|
|
File without changes
|