megadetector 5.0.7__py3-none-any.whl → 5.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- api/__init__.py +0 -0
- api/batch_processing/__init__.py +0 -0
- api/batch_processing/api_core/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/__init__.py +0 -0
- api/batch_processing/api_core/batch_service/score.py +0 -1
- api/batch_processing/api_core/server_job_status_table.py +0 -1
- api/batch_processing/api_core_support/__init__.py +0 -0
- api/batch_processing/api_core_support/aggregate_results_manually.py +0 -1
- api/batch_processing/api_support/__init__.py +0 -0
- api/batch_processing/api_support/summarize_daily_activity.py +0 -1
- api/batch_processing/data_preparation/__init__.py +0 -0
- api/batch_processing/data_preparation/manage_local_batch.py +93 -79
- api/batch_processing/data_preparation/manage_video_batch.py +8 -8
- api/batch_processing/integration/digiKam/xmp_integration.py +0 -1
- api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -1
- api/batch_processing/postprocessing/__init__.py +0 -0
- api/batch_processing/postprocessing/add_max_conf.py +12 -12
- api/batch_processing/postprocessing/categorize_detections_by_size.py +32 -14
- api/batch_processing/postprocessing/combine_api_outputs.py +69 -55
- api/batch_processing/postprocessing/compare_batch_results.py +114 -44
- api/batch_processing/postprocessing/convert_output_format.py +62 -19
- api/batch_processing/postprocessing/load_api_results.py +17 -20
- api/batch_processing/postprocessing/md_to_coco.py +31 -21
- api/batch_processing/postprocessing/md_to_labelme.py +165 -68
- api/batch_processing/postprocessing/merge_detections.py +40 -15
- api/batch_processing/postprocessing/postprocess_batch_results.py +270 -186
- api/batch_processing/postprocessing/remap_detection_categories.py +170 -0
- api/batch_processing/postprocessing/render_detection_confusion_matrix.py +75 -39
- api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +53 -44
- api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +25 -14
- api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +244 -160
- api/batch_processing/postprocessing/separate_detections_into_folders.py +159 -114
- api/batch_processing/postprocessing/subset_json_detector_output.py +146 -169
- api/batch_processing/postprocessing/top_folders_to_bottom.py +77 -43
- api/synchronous/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
- api/synchronous/api_core/animal_detection_api/api_backend.py +0 -2
- api/synchronous/api_core/animal_detection_api/api_frontend.py +266 -268
- api/synchronous/api_core/animal_detection_api/config.py +35 -35
- api/synchronous/api_core/tests/__init__.py +0 -0
- api/synchronous/api_core/tests/load_test.py +109 -109
- classification/__init__.py +0 -0
- classification/aggregate_classifier_probs.py +21 -24
- classification/analyze_failed_images.py +11 -13
- classification/cache_batchapi_outputs.py +51 -51
- classification/create_classification_dataset.py +69 -68
- classification/crop_detections.py +54 -53
- classification/csv_to_json.py +97 -100
- classification/detect_and_crop.py +105 -105
- classification/evaluate_model.py +43 -42
- classification/identify_mislabeled_candidates.py +47 -46
- classification/json_to_azcopy_list.py +10 -10
- classification/json_validator.py +72 -71
- classification/map_classification_categories.py +44 -43
- classification/merge_classification_detection_output.py +68 -68
- classification/prepare_classification_script.py +157 -154
- classification/prepare_classification_script_mc.py +228 -228
- classification/run_classifier.py +27 -26
- classification/save_mislabeled.py +30 -30
- classification/train_classifier.py +20 -20
- classification/train_classifier_tf.py +21 -22
- classification/train_utils.py +10 -10
- data_management/__init__.py +0 -0
- data_management/annotations/__init__.py +0 -0
- data_management/annotations/annotation_constants.py +18 -31
- data_management/camtrap_dp_to_coco.py +238 -0
- data_management/cct_json_utils.py +107 -59
- data_management/cct_to_md.py +176 -158
- data_management/cct_to_wi.py +247 -219
- data_management/coco_to_labelme.py +272 -0
- data_management/coco_to_yolo.py +86 -62
- data_management/databases/__init__.py +0 -0
- data_management/databases/add_width_and_height_to_db.py +20 -16
- data_management/databases/combine_coco_camera_traps_files.py +35 -31
- data_management/databases/integrity_check_json_db.py +130 -83
- data_management/databases/subset_json_db.py +25 -16
- data_management/generate_crops_from_cct.py +27 -45
- data_management/get_image_sizes.py +188 -144
- data_management/importers/add_nacti_sizes.py +8 -8
- data_management/importers/add_timestamps_to_icct.py +78 -78
- data_management/importers/animl_results_to_md_results.py +158 -160
- data_management/importers/auckland_doc_test_to_json.py +9 -9
- data_management/importers/auckland_doc_to_json.py +8 -8
- data_management/importers/awc_to_json.py +7 -7
- data_management/importers/bellevue_to_json.py +15 -15
- data_management/importers/cacophony-thermal-importer.py +13 -13
- data_management/importers/carrizo_shrubfree_2018.py +8 -8
- data_management/importers/carrizo_trail_cam_2017.py +8 -8
- data_management/importers/cct_field_adjustments.py +9 -9
- data_management/importers/channel_islands_to_cct.py +10 -10
- data_management/importers/eMammal/copy_and_unzip_emammal.py +1 -0
- data_management/importers/ena24_to_json.py +7 -7
- data_management/importers/filenames_to_json.py +8 -8
- data_management/importers/helena_to_cct.py +7 -7
- data_management/importers/idaho-camera-traps.py +7 -7
- data_management/importers/idfg_iwildcam_lila_prep.py +10 -10
- data_management/importers/jb_csv_to_json.py +9 -9
- data_management/importers/mcgill_to_json.py +8 -8
- data_management/importers/missouri_to_json.py +18 -18
- data_management/importers/nacti_fieldname_adjustments.py +10 -10
- data_management/importers/noaa_seals_2019.py +8 -8
- data_management/importers/pc_to_json.py +7 -7
- data_management/importers/plot_wni_giraffes.py +7 -7
- data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -359
- data_management/importers/prepare_zsl_imerit.py +7 -7
- data_management/importers/rspb_to_json.py +8 -8
- data_management/importers/save_the_elephants_survey_A.py +8 -8
- data_management/importers/save_the_elephants_survey_B.py +9 -9
- data_management/importers/snapshot_safari_importer.py +26 -26
- data_management/importers/snapshot_safari_importer_reprise.py +665 -665
- data_management/importers/snapshot_serengeti_lila.py +14 -14
- data_management/importers/sulross_get_exif.py +8 -9
- data_management/importers/timelapse_csv_set_to_json.py +11 -11
- data_management/importers/ubc_to_json.py +13 -13
- data_management/importers/umn_to_json.py +7 -7
- data_management/importers/wellington_to_json.py +8 -8
- data_management/importers/wi_to_json.py +9 -9
- data_management/importers/zamba_results_to_md_results.py +181 -181
- data_management/labelme_to_coco.py +309 -159
- data_management/labelme_to_yolo.py +103 -60
- data_management/lila/__init__.py +0 -0
- data_management/lila/add_locations_to_island_camera_traps.py +9 -9
- data_management/lila/add_locations_to_nacti.py +147 -147
- data_management/lila/create_lila_blank_set.py +114 -31
- data_management/lila/create_lila_test_set.py +8 -8
- data_management/lila/create_links_to_md_results_files.py +106 -106
- data_management/lila/download_lila_subset.py +92 -90
- data_management/lila/generate_lila_per_image_labels.py +56 -43
- data_management/lila/get_lila_annotation_counts.py +18 -15
- data_management/lila/get_lila_image_counts.py +11 -11
- data_management/lila/lila_common.py +103 -70
- data_management/lila/test_lila_metadata_urls.py +132 -116
- data_management/ocr_tools.py +173 -128
- data_management/read_exif.py +161 -99
- data_management/remap_coco_categories.py +84 -0
- data_management/remove_exif.py +58 -62
- data_management/resize_coco_dataset.py +32 -44
- data_management/wi_download_csv_to_coco.py +246 -0
- data_management/yolo_output_to_md_output.py +86 -73
- data_management/yolo_to_coco.py +535 -95
- detection/__init__.py +0 -0
- detection/detector_training/__init__.py +0 -0
- detection/process_video.py +85 -33
- detection/pytorch_detector.py +43 -25
- detection/run_detector.py +157 -72
- detection/run_detector_batch.py +189 -114
- detection/run_inference_with_yolov5_val.py +118 -51
- detection/run_tiled_inference.py +113 -42
- detection/tf_detector.py +51 -28
- detection/video_utils.py +606 -521
- docs/source/conf.py +43 -0
- md_utils/__init__.py +0 -0
- md_utils/azure_utils.py +9 -9
- md_utils/ct_utils.py +249 -70
- md_utils/directory_listing.py +59 -64
- md_utils/md_tests.py +968 -862
- md_utils/path_utils.py +655 -155
- md_utils/process_utils.py +157 -133
- md_utils/sas_blob_utils.py +20 -20
- md_utils/split_locations_into_train_val.py +45 -32
- md_utils/string_utils.py +33 -10
- md_utils/url_utils.py +208 -27
- md_utils/write_html_image_list.py +51 -35
- md_visualization/__init__.py +0 -0
- md_visualization/plot_utils.py +102 -109
- md_visualization/render_images_with_thumbnails.py +34 -34
- md_visualization/visualization_utils.py +908 -311
- md_visualization/visualize_db.py +109 -58
- md_visualization/visualize_detector_output.py +61 -42
- {megadetector-5.0.7.dist-info → megadetector-5.0.9.dist-info}/METADATA +21 -17
- megadetector-5.0.9.dist-info/RECORD +224 -0
- {megadetector-5.0.7.dist-info → megadetector-5.0.9.dist-info}/WHEEL +1 -1
- {megadetector-5.0.7.dist-info → megadetector-5.0.9.dist-info}/top_level.txt +1 -0
- taxonomy_mapping/__init__.py +0 -0
- taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +342 -335
- taxonomy_mapping/map_new_lila_datasets.py +154 -154
- taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -134
- taxonomy_mapping/preview_lila_taxonomy.py +591 -591
- taxonomy_mapping/retrieve_sample_image.py +12 -12
- taxonomy_mapping/simple_image_download.py +11 -11
- taxonomy_mapping/species_lookup.py +10 -10
- taxonomy_mapping/taxonomy_csv_checker.py +18 -18
- taxonomy_mapping/taxonomy_graph.py +47 -47
- taxonomy_mapping/validate_lila_category_mappings.py +83 -76
- data_management/cct_json_to_filename_json.py +0 -89
- data_management/cct_to_csv.py +0 -140
- data_management/databases/remove_corrupted_images_from_db.py +0 -191
- detection/detector_training/copy_checkpoints.py +0 -43
- md_visualization/visualize_megadb.py +0 -183
- megadetector-5.0.7.dist-info/RECORD +0 -202
- {megadetector-5.0.7.dist-info → megadetector-5.0.9.dist-info}/LICENSE +0 -0
md_visualization/visualize_db.py
CHANGED
|
@@ -1,101 +1,135 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
visualize_db.py
|
|
4
|
+
|
|
5
|
+
Outputs an HTML page visualizing annotations (class labels and/or bounding boxes)
|
|
6
|
+
on a sample of images in a database in the COCO Camera Traps format.
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
9
|
|
|
10
10
|
#%% Imports
|
|
11
11
|
|
|
12
12
|
import argparse
|
|
13
13
|
import inspect
|
|
14
|
+
import random
|
|
14
15
|
import json
|
|
15
16
|
import math
|
|
16
17
|
import os
|
|
17
18
|
import sys
|
|
18
19
|
import time
|
|
20
|
+
|
|
21
|
+
import pandas as pd
|
|
22
|
+
import numpy as np
|
|
23
|
+
|
|
24
|
+
import humanfriendly
|
|
25
|
+
|
|
19
26
|
from itertools import compress
|
|
20
27
|
from multiprocessing.pool import ThreadPool
|
|
21
28
|
from multiprocessing.pool import Pool
|
|
22
|
-
|
|
23
|
-
import pandas as pd
|
|
24
29
|
from tqdm import tqdm
|
|
25
|
-
import humanfriendly
|
|
26
30
|
|
|
27
31
|
from md_utils.write_html_image_list import write_html_image_list
|
|
32
|
+
from data_management.cct_json_utils import IndexedJsonDb
|
|
28
33
|
|
|
29
34
|
import md_visualization.visualization_utils as vis_utils
|
|
30
|
-
from data_management.cct_json_utils import IndexedJsonDb
|
|
31
35
|
|
|
32
36
|
|
|
33
37
|
#%% Settings
|
|
34
38
|
|
|
35
39
|
class DbVizOptions:
|
|
40
|
+
"""
|
|
41
|
+
Parameters controlling the behavior of visualize_db().
|
|
42
|
+
"""
|
|
36
43
|
|
|
37
|
-
|
|
44
|
+
#: Number of images to sample from the database, or None to visualize all images
|
|
38
45
|
num_to_visualize = None
|
|
39
46
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
viz_size = (
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
47
|
+
#: Target size for rendering; set either dimension to -1 to preserve aspect ratio.
|
|
48
|
+
#:
|
|
49
|
+
#: If viz_size is None or (-1,-1), the original image size is used.
|
|
50
|
+
viz_size = (800, -1)
|
|
51
|
+
|
|
52
|
+
#: HTML rendering options; see write_html_image_list for details
|
|
53
|
+
#:
|
|
54
|
+
#:The most relevant option one might want to set here is:
|
|
55
|
+
#:
|
|
56
|
+
#: htmlOptions['maxFiguresPerHtmlFile']
|
|
57
|
+
#:
|
|
58
|
+
#: ...which can be used to paginate previews to a number of images that will load well
|
|
59
|
+
#: in a browser (5000 is a reasonable limit).
|
|
51
60
|
htmlOptions = write_html_image_list()
|
|
52
61
|
|
|
62
|
+
#: Whether to sort images by filename (True) or randomly (False)
|
|
53
63
|
sort_by_filename = True
|
|
64
|
+
|
|
65
|
+
#: Only show images that contain bounding boxes
|
|
54
66
|
trim_to_images_with_bboxes = False
|
|
55
67
|
|
|
56
|
-
|
|
68
|
+
#: Random seed to use for sampling images
|
|
69
|
+
random_seed = 0
|
|
57
70
|
|
|
58
|
-
|
|
71
|
+
#: Should we include Web search links for each category name?
|
|
59
72
|
add_search_links = False
|
|
60
73
|
|
|
61
|
-
|
|
74
|
+
#: Should each thumbnail image link back to the original image?
|
|
62
75
|
include_image_links = False
|
|
63
76
|
|
|
64
|
-
|
|
77
|
+
#: Should there be a text link back to each original image?
|
|
65
78
|
include_filename_links = False
|
|
66
79
|
|
|
80
|
+
#: Line width in pixels
|
|
67
81
|
box_thickness = 4
|
|
82
|
+
|
|
83
|
+
#: Number of pixels to expand each bounding box
|
|
68
84
|
box_expansion = 0
|
|
69
85
|
|
|
70
|
-
|
|
71
|
-
|
|
86
|
+
#: Only include images that contain annotations with these class names (not IDs)
|
|
87
|
+
#:
|
|
88
|
+
#: Mutually exclusive with classes_to_exclude
|
|
72
89
|
classes_to_include = None
|
|
73
90
|
|
|
74
|
-
|
|
91
|
+
#: Exclude images that contain annotations with these class names (not IDs)
|
|
92
|
+
#:
|
|
93
|
+
#: Mutually exclusive with classes_to_include
|
|
94
|
+
classes_to_exclude = None
|
|
95
|
+
|
|
96
|
+
#: Special tag used to say "show me all images with multiple categories"
|
|
97
|
+
#:
|
|
98
|
+
#: :meta private:
|
|
75
99
|
multiple_categories_tag = '*multiple*'
|
|
76
100
|
|
|
77
|
-
|
|
78
|
-
|
|
101
|
+
#: We sometimes flatten image directories by replacing a path separator with
|
|
102
|
+
#: another character. Leave blank for the typical case where this isn't necessary.
|
|
79
103
|
pathsep_replacement = '' # '~'
|
|
80
104
|
|
|
81
|
-
|
|
82
|
-
|
|
105
|
+
#: Parallelize rendering across multiple workers
|
|
106
|
+
parallelize_rendering = False
|
|
83
107
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
# flip this with a warning, since I intend to support it in the future.
|
|
108
|
+
#: In theory, whether to parallelize with threads (True) or processes (False), but
|
|
109
|
+
#: process-based parallelization in this function is currently unsupported
|
|
87
110
|
parallelize_rendering_with_threads = True
|
|
88
|
-
parallelize_rendering = False
|
|
89
111
|
|
|
90
|
-
|
|
112
|
+
#: Number of workers to use for parallelization; ignored if parallelize_rendering
|
|
113
|
+
#: is False
|
|
114
|
+
parallelize_rendering_n_cores = 25
|
|
115
|
+
|
|
116
|
+
#: Should we show absolute (True) or relative (False) paths for each image?
|
|
91
117
|
show_full_paths = False
|
|
92
118
|
|
|
119
|
+
#: Set to False to skip existing images
|
|
120
|
+
force_rendering = True
|
|
121
|
+
|
|
122
|
+
#: Enable additionald debug console output
|
|
123
|
+
verbose = False
|
|
124
|
+
|
|
93
125
|
|
|
94
126
|
#%% Helper functions
|
|
95
127
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
128
|
+
def _image_filename_to_path(image_file_name, image_base_dir, pathsep_replacement=''):
|
|
129
|
+
"""
|
|
130
|
+
Translates the file name in an image entry in the json database to a path, possibly doing
|
|
131
|
+
some manipulation of path separators.
|
|
132
|
+
"""
|
|
99
133
|
|
|
100
134
|
if len(pathsep_replacement) > 0:
|
|
101
135
|
image_file_name = os.path.normpath(image_file_name).replace(os.pathsep,pathsep_replacement)
|
|
@@ -106,14 +140,16 @@ def image_filename_to_path(image_file_name, image_base_dir, pathsep_replacement=
|
|
|
106
140
|
|
|
107
141
|
def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
108
142
|
"""
|
|
109
|
-
Writes images and html to output_dir to visualize the annotations in
|
|
110
|
-
db_path.
|
|
143
|
+
Writes images and html to output_dir to visualize the annotations in a .json file.
|
|
111
144
|
|
|
112
|
-
|
|
145
|
+
Args:
|
|
146
|
+
db_path (str or dict): the .json filename to load, or a previously-loaded database
|
|
147
|
+
image_base_dir (str): the folder where the images live; filenames in [db_path] should
|
|
148
|
+
be relative to this folder.
|
|
149
|
+
options (DbVizOptions, optional): See DbVizOptions for details
|
|
113
150
|
|
|
114
|
-
Returns
|
|
115
|
-
|
|
116
|
-
return htmlOutputFile,image_db
|
|
151
|
+
Returns:
|
|
152
|
+
tuple: A length-two tuple containing (the html filename) and (the loaded database).
|
|
117
153
|
"""
|
|
118
154
|
|
|
119
155
|
if options is None:
|
|
@@ -241,7 +277,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
241
277
|
img_path = image_base_dir + img_relative_path
|
|
242
278
|
else:
|
|
243
279
|
img_path = os.path.join(image_base_dir,
|
|
244
|
-
|
|
280
|
+
_image_filename_to_path(img_relative_path, image_base_dir))
|
|
245
281
|
|
|
246
282
|
annos_i = df_anno.loc[df_anno['image_id'] == img_id, :] # all annotations on this image
|
|
247
283
|
|
|
@@ -272,7 +308,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
272
308
|
categoryName = label_map[categoryID]
|
|
273
309
|
if options.add_search_links:
|
|
274
310
|
categoryName = categoryName.replace('"','')
|
|
275
|
-
categoryName = '<a href="https://www.
|
|
311
|
+
categoryName = '<a href="https://www.google.com/search?tbm=isch&q={}">{}</a>'.format(
|
|
276
312
|
categoryName,categoryName)
|
|
277
313
|
imageCategories.add(categoryName)
|
|
278
314
|
|
|
@@ -317,14 +353,22 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
317
353
|
if options.include_filename_links:
|
|
318
354
|
filename_text = '<a href="{}">{}</a>'.format(img_path,filename_text)
|
|
319
355
|
|
|
356
|
+
flagString = ''
|
|
357
|
+
|
|
358
|
+
def isnan(x):
|
|
359
|
+
return (isinstance(x,float) and np.isnan(x))
|
|
360
|
+
|
|
361
|
+
if ('flags' in img) and (not isnan(img['flags'])):
|
|
362
|
+
flagString = ', flags: {}'.format(str(img['flags']))
|
|
363
|
+
|
|
320
364
|
# We're adding html for an image before we render it, so it's possible this image will
|
|
321
365
|
# fail to render. For applications where this script is being used to debua a database
|
|
322
366
|
# (the common case?), this is useful behavior, for other applications, this is annoying.
|
|
323
367
|
image_dict = \
|
|
324
368
|
{
|
|
325
369
|
'filename': '{}/{}'.format('rendered_images', file_name),
|
|
326
|
-
'title': '{}<br/>{}, num boxes: {}, {}class labels: {}{}'.format(
|
|
327
|
-
filename_text, img_id, len(bboxes), frameString, imageClasses, labelLevelString),
|
|
370
|
+
'title': '{}<br/>{}, num boxes: {}, {}class labels: {}{}{}'.format(
|
|
371
|
+
filename_text, img_id, len(bboxes), frameString, imageClasses, labelLevelString, flagString),
|
|
328
372
|
'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;' + \
|
|
329
373
|
'text-align:left;margin-top:20;margin-bottom:5'
|
|
330
374
|
}
|
|
@@ -341,7 +385,13 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
341
385
|
bboxes = rendering_info['bboxes']
|
|
342
386
|
bboxClasses = rendering_info['boxClasses']
|
|
343
387
|
output_file_name = rendering_info['output_file_name']
|
|
388
|
+
output_full_path = os.path.join(output_dir, 'rendered_images', output_file_name)
|
|
344
389
|
|
|
390
|
+
if (os.path.isfile(output_full_path)) and (not options.force_rendering):
|
|
391
|
+
if options.verbose:
|
|
392
|
+
print('Skipping existing image {}'.format(output_full_path))
|
|
393
|
+
return True
|
|
394
|
+
|
|
345
395
|
if not img_path.startswith('http'):
|
|
346
396
|
if not os.path.exists(img_path):
|
|
347
397
|
print('Image {} cannot be found'.format(img_path))
|
|
@@ -356,7 +406,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
356
406
|
image = vis_utils.resize_image(original_image, options.viz_size[0],
|
|
357
407
|
options.viz_size[1])
|
|
358
408
|
except Exception as e:
|
|
359
|
-
print('Image {} failed to open
|
|
409
|
+
print('Image {} failed to open, error: {}'.format(img_path, e))
|
|
360
410
|
return False
|
|
361
411
|
|
|
362
412
|
vis_utils.render_db_bounding_boxes(boxes=bboxes, classes=bboxClasses,
|
|
@@ -365,7 +415,8 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
365
415
|
thickness=options.box_thickness,
|
|
366
416
|
expansion=options.box_expansion)
|
|
367
417
|
|
|
368
|
-
image.save(
|
|
418
|
+
image.save(output_full_path)
|
|
419
|
+
|
|
369
420
|
return True
|
|
370
421
|
|
|
371
422
|
# ...def render_image_info
|
|
@@ -408,6 +459,8 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
|
|
|
408
459
|
|
|
409
460
|
if options.sort_by_filename:
|
|
410
461
|
images_html = sorted(images_html, key=lambda x: x['filename'])
|
|
462
|
+
else:
|
|
463
|
+
random.shuffle(images_html)
|
|
411
464
|
|
|
412
465
|
htmlOutputFile = os.path.join(output_dir, 'index.html')
|
|
413
466
|
|
|
@@ -477,9 +530,7 @@ def main():
|
|
|
477
530
|
|
|
478
531
|
visualize_db(options.db_path,options.output_dir,options.image_base_dir,options)
|
|
479
532
|
|
|
480
|
-
|
|
481
|
-
if __name__ == '__main__':
|
|
482
|
-
|
|
533
|
+
if __name__ == '__main__':
|
|
483
534
|
main()
|
|
484
535
|
|
|
485
536
|
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
visualize_detector_output.py
|
|
4
|
+
|
|
5
|
+
Render images with bounding boxes annotated on them to a folder, based on a
|
|
6
|
+
detector output result file (.json), optionally writing an HTML index file.
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
9
|
|
|
10
10
|
#%% Imports
|
|
11
11
|
|
|
@@ -38,11 +38,14 @@ DEFAULT_DETECTOR_LABEL_MAP = {
|
|
|
38
38
|
|
|
39
39
|
#%% Support functions
|
|
40
40
|
|
|
41
|
-
def
|
|
41
|
+
def _render_image(entry,
|
|
42
42
|
detector_label_map,classification_label_map,
|
|
43
43
|
confidence_threshold,classification_confidence_threshold,
|
|
44
44
|
render_detections_only,preserve_path_structure,out_dir,images_dir,
|
|
45
45
|
output_image_width):
|
|
46
|
+
"""
|
|
47
|
+
Internal function for rendering a single image.
|
|
48
|
+
"""
|
|
46
49
|
|
|
47
50
|
rendering_result = {'failed_image':False,'missing_image':False,
|
|
48
51
|
'skipped_image':False,'annotated_image_path':None,
|
|
@@ -97,38 +100,55 @@ def render_image(entry,
|
|
|
97
100
|
|
|
98
101
|
#%% Main function
|
|
99
102
|
|
|
100
|
-
def visualize_detector_output(detector_output_path
|
|
101
|
-
out_dir
|
|
102
|
-
images_dir
|
|
103
|
-
confidence_threshold
|
|
104
|
-
sample
|
|
105
|
-
output_image_width
|
|
106
|
-
random_seed
|
|
107
|
-
render_detections_only
|
|
108
|
-
classification_confidence_threshold
|
|
109
|
-
html_output_file
|
|
110
|
-
html_output_options
|
|
111
|
-
preserve_path_structure
|
|
112
|
-
parallelize_rendering
|
|
113
|
-
parallelize_rendering_n_cores
|
|
114
|
-
parallelize_rendering_with_threads
|
|
103
|
+
def visualize_detector_output(detector_output_path,
|
|
104
|
+
out_dir,
|
|
105
|
+
images_dir,
|
|
106
|
+
confidence_threshold=0.15,
|
|
107
|
+
sample=-1,
|
|
108
|
+
output_image_width=700,
|
|
109
|
+
random_seed=None,
|
|
110
|
+
render_detections_only=False,
|
|
111
|
+
classification_confidence_threshold=0.1,
|
|
112
|
+
html_output_file=None,
|
|
113
|
+
html_output_options=None,
|
|
114
|
+
preserve_path_structure=False,
|
|
115
|
+
parallelize_rendering=False,
|
|
116
|
+
parallelize_rendering_n_cores=10,
|
|
117
|
+
parallelize_rendering_with_threads=True):
|
|
115
118
|
|
|
116
119
|
"""
|
|
117
|
-
|
|
120
|
+
Draws bounding boxes on images given the output of a detector.
|
|
118
121
|
|
|
119
122
|
Args:
|
|
120
|
-
detector_output_path
|
|
121
|
-
out_dir
|
|
122
|
-
images_dir
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
output_image_width
|
|
127
|
-
set to -1 to use original image width
|
|
128
|
-
random_seed
|
|
129
|
-
render_detections_only
|
|
130
|
-
|
|
131
|
-
|
|
123
|
+
detector_output_path (str): path to detector output .json file
|
|
124
|
+
out_dir (str): path to directory for saving annotated images
|
|
125
|
+
images_dir (str): folder where the images live; filenames in
|
|
126
|
+
[detector_output_path] should be relative to [image_dir]
|
|
127
|
+
confidence_threshold (float, optional): threshold above which detections will be rendered
|
|
128
|
+
sample (int, optional): maximum number of images to render, -1 for all
|
|
129
|
+
output_image_width (int, optional): width in pixels to resize images for display,
|
|
130
|
+
preserving aspect ration; set to -1 to use original image width
|
|
131
|
+
random_seed (int, optional): seed to use for choosing images when sample != -1
|
|
132
|
+
render_detections_only (bool): only render images with above-threshold detections
|
|
133
|
+
classification_confidence_threshold (float, optional): only show classifications
|
|
134
|
+
above this threshold; does not impact whether images are rendered, only whether
|
|
135
|
+
classification labels (not detection categories) are displayed
|
|
136
|
+
html_output_file (str, optional): output path for an HTML index file (not written
|
|
137
|
+
if None)
|
|
138
|
+
html_output_options (dict, optional): HTML formatting options; see write_html_image_list
|
|
139
|
+
for details
|
|
140
|
+
preserve_path_structure (bool, optional): if False (default), writes images to unique
|
|
141
|
+
names in a flat structure in the output folder; if True, preserves relative paths
|
|
142
|
+
within the output folder
|
|
143
|
+
parallelize_rendering (bool, optional): whether to use concurrent workers for rendering
|
|
144
|
+
parallelize_rendering_n_cores (int, optional): number of concurrent workers to use
|
|
145
|
+
(ignored if parallelize_rendering is False)
|
|
146
|
+
parallelize_rendering_with_threads (bool, optional): determines whether we use
|
|
147
|
+
threads (True) or processes (False) for parallelization (ignored if parallelize_rendering
|
|
148
|
+
is False)
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
list: list of paths to annotated images
|
|
132
152
|
"""
|
|
133
153
|
|
|
134
154
|
assert os.path.exists(detector_output_path), \
|
|
@@ -209,7 +229,7 @@ def visualize_detector_output(detector_output_path: str,
|
|
|
209
229
|
print('Rendering images with {} {}'.format(parallelize_rendering_n_cores,
|
|
210
230
|
worker_string))
|
|
211
231
|
rendering_results = list(tqdm(pool.imap(
|
|
212
|
-
partial(
|
|
232
|
+
partial(_render_image,detector_label_map=detector_label_map,
|
|
213
233
|
classification_label_map=classification_label_map,
|
|
214
234
|
confidence_threshold=confidence_threshold,
|
|
215
235
|
classification_confidence_threshold=classification_confidence_threshold,
|
|
@@ -224,7 +244,7 @@ def visualize_detector_output(detector_output_path: str,
|
|
|
224
244
|
|
|
225
245
|
for entry in tqdm(images):
|
|
226
246
|
|
|
227
|
-
rendering_result =
|
|
247
|
+
rendering_result = _render_image(entry,detector_label_map,classification_label_map,
|
|
228
248
|
confidence_threshold,classification_confidence_threshold,
|
|
229
249
|
render_detections_only,preserve_path_structure,out_dir,
|
|
230
250
|
images_dir,output_image_width)
|
|
@@ -269,9 +289,8 @@ def visualize_detector_output(detector_output_path: str,
|
|
|
269
289
|
|
|
270
290
|
#%% Command-line driver
|
|
271
291
|
|
|
272
|
-
def main()
|
|
273
|
-
|
|
274
|
-
|
|
292
|
+
def main():
|
|
293
|
+
|
|
275
294
|
parser = argparse.ArgumentParser(
|
|
276
295
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
277
296
|
description='Annotate the bounding boxes predicted by a detector above '
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: megadetector
|
|
3
|
-
Version: 5.0.
|
|
3
|
+
Version: 5.0.9
|
|
4
4
|
Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
|
|
5
5
|
Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
6
6
|
Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
@@ -25,6 +25,7 @@ License: MIT License
|
|
|
25
25
|
SOFTWARE.
|
|
26
26
|
|
|
27
27
|
Project-URL: Homepage, https://github.com/agentmorris/MegaDetector
|
|
28
|
+
Project-URL: Documentation, https://megadetector.readthedocs.io
|
|
28
29
|
Project-URL: Bug Reports, https://github.com/agentmorris/MegaDetector/issues
|
|
29
30
|
Project-URL: Source, https://github.com/agentmorris/MegaDetector
|
|
30
31
|
Keywords: camera traps,conservation,wildlife,ai
|
|
@@ -51,16 +52,16 @@ Requires-Dist: ultralytics-yolov5 ==0.1.1
|
|
|
51
52
|
|
|
52
53
|
# MegaDetector
|
|
53
54
|
|
|
54
|
-
This package is a pip-installable version of the support/inference code for [MegaDetector](https://github.com/agentmorris/MegaDetector), an object detection model that helps conservation biologists spend less time doing boring things with camera trap images.
|
|
55
|
+
This package is a pip-installable version of the support/inference code for [MegaDetector](https://github.com/agentmorris/MegaDetector), an object detection model that helps conservation biologists spend less time doing boring things with camera trap images. Complete documentation for this Python package is available at <megadetector.readthedocs.io>.
|
|
55
56
|
|
|
56
|
-
If you want to learn more about what MegaDetector is all about, head over to the [MegaDetector repo](https://github.com/agentmorris/MegaDetector).
|
|
57
|
+
If you aren't looking for the Python package specificaly, and you just want to learn more about what MegaDetector is all about, head over to the [MegaDetector repo](https://github.com/agentmorris/MegaDetector).
|
|
57
58
|
|
|
58
59
|
|
|
59
60
|
## Reasons you probably aren't looking for this package
|
|
60
61
|
|
|
61
62
|
### If you are an ecologist...
|
|
62
63
|
|
|
63
|
-
If you are an ecologist looking to use MegaDetector to help you get through your camera trap images, you probably don't want this package. We recommend starting with our "[Getting started with MegaDetector](https://github.com/agentmorris/MegaDetector/blob/main/
|
|
64
|
+
If you are an ecologist looking to use MegaDetector to help you get through your camera trap images, you probably don't want this package. We recommend starting with our "[Getting started with MegaDetector](https://github.com/agentmorris/MegaDetector/blob/main/getting-started.md)" page, then digging in to the [MegaDetector User Guide](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md), which will walk you through the process of using MegaDetector. That journey will <i>not</i> involve this Python package.
|
|
64
65
|
|
|
65
66
|
### If you are a computer-vision-y type...
|
|
66
67
|
|
|
@@ -80,9 +81,14 @@ To install:
|
|
|
80
81
|
|
|
81
82
|
MegaDetector model weights aren't downloaded at pip-install time, but they will be (optionally) automatically downloaded the first time you run the model.
|
|
82
83
|
|
|
83
|
-
|
|
84
|
+
## Package reference
|
|
84
85
|
|
|
85
|
-
|
|
86
|
+
See <megadetector.readthedocs.io>.
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
## Examples of things you can do with this package
|
|
90
|
+
|
|
91
|
+
### Run MegaDetector on one image and count the number of detections
|
|
86
92
|
|
|
87
93
|
```
|
|
88
94
|
from md_utils import url_utils
|
|
@@ -95,18 +101,16 @@ temporary_filename = url_utils.download_url(image_url)
|
|
|
95
101
|
|
|
96
102
|
image = vis_utils.load_image(temporary_filename)
|
|
97
103
|
|
|
98
|
-
# This will automatically download MDv5a
|
|
99
|
-
# you can also specify a filename explicitly, or set the $MDV5A
|
|
100
|
-
# environment variable to point to the model file.
|
|
104
|
+
# This will automatically download MDv5a; you can also specify a filename.
|
|
101
105
|
model = run_detector.load_detector('MDV5A')
|
|
102
106
|
|
|
103
107
|
result = model.generate_detections_one_image(image)
|
|
104
108
|
|
|
105
109
|
detections_above_threshold = [d for d in result['detections'] if d['conf'] > 0.2]
|
|
106
|
-
print('Found {}
|
|
110
|
+
print('Found {} detections above threshold'.format(len(detections_above_threshold)))
|
|
107
111
|
```
|
|
108
112
|
|
|
109
|
-
|
|
113
|
+
### Run MegaDetector on a folder of images
|
|
110
114
|
|
|
111
115
|
```
|
|
112
116
|
from detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
@@ -120,14 +124,14 @@ output_file = os.path.expanduser('~/megadetector_output_test.json')
|
|
|
120
124
|
# Recursively find images
|
|
121
125
|
image_file_names = path_utils.find_images(image_folder,recursive=True)
|
|
122
126
|
|
|
123
|
-
# This will automatically download MDv5a
|
|
124
|
-
# you can also specify a filename explicitly, or set the $MDV5A
|
|
125
|
-
# environment variable to point to the model file.
|
|
127
|
+
# This will automatically download MDv5a; you can also specify a filename.
|
|
126
128
|
results = load_and_run_detector_batch('MDV5A', image_file_names)
|
|
127
129
|
|
|
128
|
-
# Write results
|
|
129
|
-
|
|
130
|
-
|
|
130
|
+
# Write results to a format that Timelapse and other downstream tools like.
|
|
131
|
+
write_results_to_file(results,
|
|
132
|
+
output_file,
|
|
133
|
+
relative_path_base=image_folder,
|
|
134
|
+
detector_file=detector_filename)
|
|
131
135
|
```
|
|
132
136
|
|
|
133
137
|
## Contact
|