megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +232 -223
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +341 -338
- megadetector/detection/pytorch_detector.py +308 -266
- megadetector/detection/run_detector.py +186 -166
- megadetector/detection/run_detector_batch.py +366 -364
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +312 -253
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +291 -283
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +808 -311
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +220 -147
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -0
- megadetector/postprocessing/load_api_results.py +25 -22
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +319 -302
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1019 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1511 -406
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +73 -60
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2868
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +424 -404
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +126 -98
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.27.dist-info/RECORD +0 -208
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
|
@@ -2,15 +2,15 @@
|
|
|
2
2
|
|
|
3
3
|
run_detector.py
|
|
4
4
|
|
|
5
|
-
Module to run an animal detection model on images. The main function in this script also renders
|
|
5
|
+
Module to run an animal detection model on images. The main function in this script also renders
|
|
6
6
|
the predicted bounding boxes on images and saves the resulting images (with bounding boxes).
|
|
7
7
|
|
|
8
8
|
**This script is not a good way to process lots of images**. It does not produce a useful
|
|
9
9
|
output format, and it does not facilitate checkpointing the results so if it crashes you
|
|
10
|
-
would have to start from scratch. **If you want to run a detector on lots of images, you should
|
|
10
|
+
would have to start from scratch. **If you want to run a detector on lots of images, you should
|
|
11
11
|
check out run_detector_batch.py**.
|
|
12
12
|
|
|
13
|
-
That said, this script (run_detector.py) is a good way to test our detector on a handful of images
|
|
13
|
+
That said, this script (run_detector.py) is a good way to test our detector on a handful of images
|
|
14
14
|
and get super-satisfying, graphical results.
|
|
15
15
|
|
|
16
16
|
If you would like to *not* use the GPU on the machine, set the environment
|
|
@@ -29,6 +29,7 @@ import os
|
|
|
29
29
|
import statistics
|
|
30
30
|
import sys
|
|
31
31
|
import time
|
|
32
|
+
import json
|
|
32
33
|
import warnings
|
|
33
34
|
import tempfile
|
|
34
35
|
|
|
@@ -147,7 +148,7 @@ known_models = {
|
|
|
147
148
|
'model_type':'yolov5',
|
|
148
149
|
'normalized_typical_inference_speed':1.0
|
|
149
150
|
},
|
|
150
|
-
|
|
151
|
+
|
|
151
152
|
# Fake values for testing
|
|
152
153
|
'v1000.0.0-redwood':
|
|
153
154
|
{
|
|
@@ -179,7 +180,7 @@ DEFAULT_BOX_EXPANSION = 0
|
|
|
179
180
|
DEFAULT_LABEL_FONT_SIZE = 16
|
|
180
181
|
DETECTION_FILENAME_INSERT = '_detections'
|
|
181
182
|
|
|
182
|
-
# Approximate inference speeds (in images per second) for MDv5 based on
|
|
183
|
+
# Approximate inference speeds (in images per second) for MDv5 based on
|
|
183
184
|
# benchmarks, only used for reporting very coarse expectations about inference time.
|
|
184
185
|
device_token_to_mdv5_inference_speed = {
|
|
185
186
|
'4090':17.6,
|
|
@@ -191,9 +192,9 @@ device_token_to_mdv5_inference_speed = {
|
|
|
191
192
|
# is around 3.5x faster than MDv4.
|
|
192
193
|
'V100':2.79*3.5,
|
|
193
194
|
'2080':2.3*3.5,
|
|
194
|
-
'2060':1.6*3.5
|
|
195
|
+
'2060':1.6*3.5
|
|
195
196
|
}
|
|
196
|
-
|
|
197
|
+
|
|
197
198
|
|
|
198
199
|
#%% Utility functions
|
|
199
200
|
|
|
@@ -201,21 +202,21 @@ def get_detector_metadata_from_version_string(detector_version):
|
|
|
201
202
|
"""
|
|
202
203
|
Given a MegaDetector version string (e.g. "v4.1.0"), returns the metadata for
|
|
203
204
|
the model. Used for writing standard defaults to batch output files.
|
|
204
|
-
|
|
205
|
+
|
|
205
206
|
Args:
|
|
206
207
|
detector_version (str): a detection version string, e.g. "v4.1.0", which you
|
|
207
208
|
can extract from a filename using get_detector_version_from_filename()
|
|
208
|
-
|
|
209
|
+
|
|
209
210
|
Returns:
|
|
210
211
|
dict: metadata for this model, suitable for writing to a MD output file
|
|
211
212
|
"""
|
|
212
|
-
|
|
213
|
+
|
|
213
214
|
if detector_version not in known_models:
|
|
214
215
|
print('Warning: no metadata for unknown detector version {}'.format(detector_version))
|
|
215
216
|
default_detector_metadata = {
|
|
216
217
|
'megadetector_version':'unknown',
|
|
217
|
-
'typical_detection_threshold':0.
|
|
218
|
-
'conservative_detection_threshold':0.
|
|
218
|
+
'typical_detection_threshold':0.2,
|
|
219
|
+
'conservative_detection_threshold':0.1
|
|
219
220
|
}
|
|
220
221
|
return default_detector_metadata
|
|
221
222
|
else:
|
|
@@ -228,31 +229,32 @@ def get_detector_version_from_filename(detector_filename,
|
|
|
228
229
|
accept_first_match=True,
|
|
229
230
|
verbose=False):
|
|
230
231
|
r"""
|
|
231
|
-
Gets the canonical version number string of a detector from the model filename.
|
|
232
|
-
|
|
232
|
+
Gets the canonical version number string of a detector from the model filename.
|
|
233
|
+
|
|
233
234
|
[detector_filename] will almost always end with one of the following:
|
|
234
|
-
|
|
235
|
+
|
|
235
236
|
* megadetector_v2.pb
|
|
236
237
|
* megadetector_v3.pb
|
|
237
|
-
* megadetector_v4.1 (not
|
|
238
|
+
* megadetector_v4.1 (not produced by run_detector_batch.py, only found in output files from
|
|
239
|
+
the deprecated Azure Batch API)
|
|
238
240
|
* md_v4.1.0.pb
|
|
239
241
|
* md_v5a.0.0.pt
|
|
240
242
|
* md_v5b.0.0.pt
|
|
241
|
-
|
|
242
|
-
This function identifies the version number as "v2.0.0", "v3.0.0", "v4.1.0",
|
|
243
|
-
"v4.1.0", "v5a.0.0", and "v5b.0.0", respectively. See known_models for the list
|
|
243
|
+
|
|
244
|
+
This function identifies the version number as "v2.0.0", "v3.0.0", "v4.1.0",
|
|
245
|
+
"v4.1.0", "v5a.0.0", and "v5b.0.0", respectively. See known_models for the list
|
|
244
246
|
of valid version numbers.
|
|
245
|
-
|
|
247
|
+
|
|
246
248
|
Args:
|
|
247
249
|
detector_filename (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
248
|
-
accept_first_match (bool, optional): if multiple candidates match the filename, choose the
|
|
250
|
+
accept_first_match (bool, optional): if multiple candidates match the filename, choose the
|
|
249
251
|
first one, otherwise returns the string "multiple"
|
|
250
252
|
verbose (bool, optional): enable additional debug output
|
|
251
|
-
|
|
253
|
+
|
|
252
254
|
Returns:
|
|
253
255
|
str: a detector version string, e.g. "v5a.0.0", or "multiple" if I'm confused
|
|
254
256
|
"""
|
|
255
|
-
|
|
257
|
+
|
|
256
258
|
fn = os.path.basename(detector_filename).lower()
|
|
257
259
|
matches = []
|
|
258
260
|
for s in model_string_to_model_version.keys():
|
|
@@ -267,117 +269,118 @@ def get_detector_version_from_filename(detector_filename,
|
|
|
267
269
|
if verbose:
|
|
268
270
|
print('Warning: multiple MegaDetector versions for model file {}:'.format(detector_filename))
|
|
269
271
|
for s in matches:
|
|
270
|
-
print(s)
|
|
272
|
+
print(s)
|
|
271
273
|
return 'multiple'
|
|
272
274
|
else:
|
|
273
275
|
return model_string_to_model_version[matches[0]]
|
|
274
|
-
|
|
276
|
+
|
|
275
277
|
|
|
276
278
|
def get_detector_version_from_model_file(detector_filename,verbose=False):
|
|
277
279
|
"""
|
|
278
|
-
Gets the canonical detection version from a model file, preferably by reading it
|
|
280
|
+
Gets the canonical detection version from a model file, preferably by reading it
|
|
279
281
|
from the file itself, otherwise based on the filename.
|
|
280
|
-
|
|
282
|
+
|
|
281
283
|
Args:
|
|
282
|
-
detector_filename (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
284
|
+
detector_filename (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
283
285
|
verbose (bool, optional): enable additional debug output
|
|
284
|
-
|
|
286
|
+
|
|
285
287
|
Returns:
|
|
286
288
|
str: a canonical detector version string, e.g. "v5a.0.0", or "unknown"
|
|
287
289
|
"""
|
|
288
|
-
|
|
290
|
+
|
|
289
291
|
# Try to extract a version string from the filename
|
|
290
292
|
version_string_based_on_filename = get_detector_version_from_filename(
|
|
291
293
|
detector_filename, verbose=verbose)
|
|
292
294
|
if version_string_based_on_filename == 'unknown':
|
|
293
295
|
version_string_based_on_filename = None
|
|
294
|
-
|
|
295
|
-
# Try to extract a version string from the file itself; currently this is only
|
|
296
|
+
|
|
297
|
+
# Try to extract a version string from the file itself; currently this is only
|
|
296
298
|
# a thing for PyTorch models
|
|
297
|
-
|
|
299
|
+
|
|
298
300
|
version_string_based_on_model_file = None
|
|
299
|
-
|
|
301
|
+
|
|
300
302
|
if detector_filename.endswith('.pt') or detector_filename.endswith('.zip'):
|
|
301
|
-
|
|
303
|
+
|
|
302
304
|
from megadetector.detection.pytorch_detector import \
|
|
303
305
|
read_metadata_from_megadetector_model_file
|
|
304
306
|
metadata = read_metadata_from_megadetector_model_file(detector_filename,verbose=verbose)
|
|
305
|
-
|
|
307
|
+
|
|
306
308
|
if metadata is not None and isinstance(metadata,dict):
|
|
307
|
-
|
|
309
|
+
|
|
308
310
|
if 'metadata_format_version' not in metadata or \
|
|
309
311
|
not isinstance(metadata['metadata_format_version'],float):
|
|
310
|
-
|
|
312
|
+
|
|
311
313
|
print(f'Warning: I found a metadata file in detector file {detector_filename}, '+\
|
|
312
314
|
'but it doesn\'t have a valid format version number')
|
|
313
|
-
|
|
315
|
+
|
|
314
316
|
elif 'model_version_string' not in metadata or \
|
|
315
317
|
not isinstance(metadata['model_version_string'],str):
|
|
316
|
-
|
|
318
|
+
|
|
317
319
|
print(f'Warning: I found a metadata file in detector file {detector_filename}, '+\
|
|
318
320
|
'but it doesn\'t have a format model version string')
|
|
319
|
-
|
|
321
|
+
|
|
320
322
|
else:
|
|
321
|
-
|
|
323
|
+
|
|
322
324
|
version_string_based_on_model_file = metadata['model_version_string']
|
|
323
|
-
|
|
325
|
+
|
|
324
326
|
if version_string_based_on_model_file not in known_models:
|
|
325
|
-
print('Warning: unknown model version
|
|
326
|
-
version_string_based_on_model_file,detector_filename))
|
|
327
|
-
|
|
327
|
+
print('Warning: unknown model version:\n\n{}\n\n...specified in file:\n\n{}'.format(
|
|
328
|
+
version_string_based_on_model_file,os.path.basename(detector_filename)))
|
|
329
|
+
|
|
328
330
|
# ...if there's metadata in this file
|
|
329
|
-
|
|
331
|
+
|
|
330
332
|
# ...if this looks like a PyTorch file
|
|
331
|
-
|
|
333
|
+
|
|
332
334
|
# If we got versions strings from the filename *and* the model file...
|
|
333
335
|
if (version_string_based_on_filename is not None) and \
|
|
334
336
|
(version_string_based_on_model_file is not None):
|
|
335
337
|
|
|
336
338
|
if version_string_based_on_filename != version_string_based_on_model_file:
|
|
337
|
-
print(
|
|
338
|
-
|
|
339
|
+
print(
|
|
340
|
+
'Warning: model version string in file:\n\n{}\n\n...is:\n\n{}\n\n...but the filename implies:\n\n{}'.format(
|
|
341
|
+
os.path.basename(detector_filename),
|
|
339
342
|
version_string_based_on_model_file,
|
|
340
343
|
version_string_based_on_filename))
|
|
341
|
-
|
|
344
|
+
|
|
342
345
|
return version_string_based_on_model_file
|
|
343
|
-
|
|
346
|
+
|
|
344
347
|
# If we got version string from neither the filename nor the model file...
|
|
345
348
|
if (version_string_based_on_filename is None) and \
|
|
346
349
|
(version_string_based_on_model_file is None):
|
|
347
|
-
|
|
350
|
+
|
|
348
351
|
print('Warning: could not determine model version string for model file {}'.format(
|
|
349
352
|
detector_filename))
|
|
350
353
|
return None
|
|
351
|
-
|
|
354
|
+
|
|
352
355
|
elif version_string_based_on_filename is not None:
|
|
353
|
-
|
|
356
|
+
|
|
354
357
|
return version_string_based_on_filename
|
|
355
|
-
|
|
358
|
+
|
|
356
359
|
else:
|
|
357
|
-
|
|
360
|
+
|
|
358
361
|
assert version_string_based_on_model_file is not None
|
|
359
362
|
return version_string_based_on_model_file
|
|
360
|
-
|
|
363
|
+
|
|
361
364
|
# ...def get_detector_version_from_model_file(...)
|
|
362
365
|
|
|
363
|
-
|
|
366
|
+
|
|
364
367
|
def estimate_md_images_per_second(model_file, device_name=None):
|
|
365
368
|
r"""
|
|
366
|
-
Estimates how fast MegaDetector will run on a particular device, based on benchmarks.
|
|
367
|
-
Defaults to querying the current device. Returns None if no data is available for the current
|
|
368
|
-
card/model. Estimates only available for a small handful of GPUs. Uses an absurdly simple
|
|
369
|
+
Estimates how fast MegaDetector will run on a particular device, based on benchmarks.
|
|
370
|
+
Defaults to querying the current device. Returns None if no data is available for the current
|
|
371
|
+
card/model. Estimates only available for a small handful of GPUs. Uses an absurdly simple
|
|
369
372
|
lookup approach, e.g. if the string "4090" appears in the device name, congratulations,
|
|
370
373
|
you have an RTX 4090.
|
|
371
|
-
|
|
374
|
+
|
|
372
375
|
Args:
|
|
373
376
|
model_file (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
374
377
|
device_name (str, optional): device name, e.g. blah-blah-4090-blah-blah
|
|
375
|
-
|
|
378
|
+
|
|
376
379
|
Returns:
|
|
377
380
|
float: the approximate number of images this model version can process on this
|
|
378
381
|
device per second
|
|
379
382
|
"""
|
|
380
|
-
|
|
383
|
+
|
|
381
384
|
if device_name is None:
|
|
382
385
|
try:
|
|
383
386
|
import torch
|
|
@@ -385,50 +388,56 @@ def estimate_md_images_per_second(model_file, device_name=None):
|
|
|
385
388
|
except Exception as e:
|
|
386
389
|
print('Error querying device name: {}'.format(e))
|
|
387
390
|
return None
|
|
388
|
-
|
|
391
|
+
|
|
389
392
|
# About how fast is this model compared to MDv5?
|
|
390
393
|
model_version = get_detector_version_from_model_file(model_file)
|
|
391
|
-
|
|
394
|
+
|
|
392
395
|
if model_version not in known_models.keys():
|
|
393
396
|
print('Could not estimate inference speed: error determining model version for model file {}'.format(
|
|
394
397
|
model_file))
|
|
395
398
|
return None
|
|
396
|
-
|
|
399
|
+
|
|
397
400
|
model_info = known_models[model_version]
|
|
398
|
-
|
|
401
|
+
|
|
399
402
|
if 'normalized_typical_inference_speed' not in model_info or \
|
|
400
403
|
model_info['normalized_typical_inference_speed'] is None:
|
|
401
404
|
print('No speed ratio available for model type {}'.format(model_version))
|
|
402
405
|
return None
|
|
403
|
-
|
|
406
|
+
|
|
404
407
|
normalized_inference_speed = model_info['normalized_typical_inference_speed']
|
|
405
|
-
|
|
408
|
+
|
|
406
409
|
# About how fast would MDv5 run on this device?
|
|
407
410
|
mdv5_inference_speed = None
|
|
408
411
|
for device_token in device_token_to_mdv5_inference_speed.keys():
|
|
409
412
|
if device_token in device_name:
|
|
410
413
|
mdv5_inference_speed = device_token_to_mdv5_inference_speed[device_token]
|
|
411
414
|
break
|
|
412
|
-
|
|
415
|
+
|
|
413
416
|
if mdv5_inference_speed is None:
|
|
414
417
|
print('No baseline speed estimate available for device {}'.format(device_name))
|
|
415
418
|
return None
|
|
416
|
-
|
|
419
|
+
|
|
417
420
|
return normalized_inference_speed * mdv5_inference_speed
|
|
418
|
-
|
|
419
|
-
|
|
421
|
+
|
|
422
|
+
|
|
420
423
|
def get_typical_confidence_threshold_from_results(results):
|
|
421
424
|
"""
|
|
422
425
|
Given the .json data loaded from a MD results file, returns a typical confidence
|
|
423
426
|
threshold based on the detector version.
|
|
424
|
-
|
|
427
|
+
|
|
425
428
|
Args:
|
|
426
|
-
results (dict): a dict of MD results, as it would be loaded from a MD results .json
|
|
427
|
-
|
|
429
|
+
results (dict or str): a dict of MD results, as it would be loaded from a MD results .json
|
|
430
|
+
file, or a .json filename
|
|
431
|
+
|
|
428
432
|
Returns:
|
|
429
433
|
float: a sensible default threshold for this model
|
|
430
434
|
"""
|
|
431
|
-
|
|
435
|
+
|
|
436
|
+
# Load results if necessary
|
|
437
|
+
if isinstance(results,str):
|
|
438
|
+
with open(results,'r') as f:
|
|
439
|
+
results = json.load(f)
|
|
440
|
+
|
|
432
441
|
if 'detector_metadata' in results['info'] and \
|
|
433
442
|
'typical_detection_threshold' in results['info']['detector_metadata']:
|
|
434
443
|
default_threshold = results['info']['detector_metadata']['typical_detection_threshold']
|
|
@@ -443,31 +452,31 @@ def get_typical_confidence_threshold_from_results(results):
|
|
|
443
452
|
detector_metadata = get_detector_metadata_from_version_string(detector_version)
|
|
444
453
|
default_threshold = detector_metadata['typical_detection_threshold']
|
|
445
454
|
|
|
446
|
-
return default_threshold
|
|
455
|
+
return default_threshold
|
|
456
|
+
|
|
447
457
|
|
|
448
|
-
|
|
449
458
|
def is_gpu_available(model_file):
|
|
450
459
|
r"""
|
|
451
460
|
Determines whether a GPU is available, importing PyTorch or TF depending on the extension
|
|
452
|
-
of model_file. Does not actually load model_file, just uses that to determine how to check
|
|
461
|
+
of model_file. Does not actually load model_file, just uses that to determine how to check
|
|
453
462
|
for GPU availability (PT vs. TF).
|
|
454
|
-
|
|
463
|
+
|
|
455
464
|
Args:
|
|
456
465
|
model_file (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
457
|
-
|
|
466
|
+
|
|
458
467
|
Returns:
|
|
459
468
|
bool: whether a GPU is available
|
|
460
469
|
"""
|
|
461
|
-
|
|
470
|
+
|
|
462
471
|
if model_file.endswith('.pb'):
|
|
463
472
|
import tensorflow.compat.v1 as tf
|
|
464
473
|
gpu_available = tf.test.is_gpu_available()
|
|
465
474
|
print('TensorFlow version:', tf.__version__)
|
|
466
|
-
print('tf.test.is_gpu_available:', gpu_available)
|
|
475
|
+
print('tf.test.is_gpu_available:', gpu_available)
|
|
467
476
|
return gpu_available
|
|
468
477
|
if not model_file.endswith('.pt'):
|
|
469
478
|
print('Warning: could not determine environment from model file name, assuming PyTorch')
|
|
470
|
-
|
|
479
|
+
|
|
471
480
|
import torch
|
|
472
481
|
gpu_available = torch.cuda.is_available()
|
|
473
482
|
print('PyTorch reports {} available CUDA devices'.format(torch.cuda.device_count()))
|
|
@@ -480,16 +489,16 @@ def is_gpu_available(model_file):
|
|
|
480
489
|
except AttributeError:
|
|
481
490
|
pass
|
|
482
491
|
return gpu_available
|
|
483
|
-
|
|
484
492
|
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
493
|
+
|
|
494
|
+
def load_detector(model_file,
|
|
495
|
+
force_cpu=False,
|
|
496
|
+
force_model_download=False,
|
|
488
497
|
detector_options=None,
|
|
489
498
|
verbose=False):
|
|
490
499
|
r"""
|
|
491
500
|
Loads a TF or PT detector, depending on the extension of model_file.
|
|
492
|
-
|
|
501
|
+
|
|
493
502
|
Args:
|
|
494
503
|
model_file (str): model filename (e.g. c:/x/z/md_v5a.0.0.pt) or known model
|
|
495
504
|
name (e.g. "MDV5A")
|
|
@@ -498,21 +507,21 @@ def load_detector(model_file,
|
|
|
498
507
|
force_model_download (bool, optional): force downloading the model file if
|
|
499
508
|
a named model (e.g. "MDV5A") is supplied, even if the local file already
|
|
500
509
|
exists
|
|
501
|
-
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
510
|
+
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
502
511
|
by different detectors
|
|
503
512
|
verbose (bool, optional): enable additional debug output
|
|
504
|
-
|
|
513
|
+
|
|
505
514
|
Returns:
|
|
506
515
|
object: loaded detector object
|
|
507
516
|
"""
|
|
508
|
-
|
|
517
|
+
|
|
509
518
|
# Possibly automatically download the model
|
|
510
|
-
model_file = try_download_known_detector(model_file,
|
|
519
|
+
model_file = try_download_known_detector(model_file,
|
|
511
520
|
force_download=force_model_download)
|
|
512
|
-
|
|
521
|
+
|
|
513
522
|
if verbose:
|
|
514
523
|
print('GPU available: {}'.format(is_gpu_available(model_file)))
|
|
515
|
-
|
|
524
|
+
|
|
516
525
|
start_time = time.time()
|
|
517
526
|
|
|
518
527
|
if model_file.endswith('.pb'):
|
|
@@ -524,9 +533,9 @@ def load_detector(model_file,
|
|
|
524
533
|
detector = TFDetector(model_file, detector_options)
|
|
525
534
|
|
|
526
535
|
elif model_file.endswith('.pt'):
|
|
527
|
-
|
|
536
|
+
|
|
528
537
|
from megadetector.detection.pytorch_detector import PTDetector
|
|
529
|
-
|
|
538
|
+
|
|
530
539
|
# Prepare options specific to the PTDetector class
|
|
531
540
|
if detector_options is None:
|
|
532
541
|
detector_options = {}
|
|
@@ -538,16 +547,16 @@ def load_detector(model_file,
|
|
|
538
547
|
detector_options['force_cpu'] = force_cpu
|
|
539
548
|
detector_options['use_model_native_classes'] = USE_MODEL_NATIVE_CLASSES
|
|
540
549
|
detector = PTDetector(model_file, detector_options, verbose=verbose)
|
|
541
|
-
|
|
550
|
+
|
|
542
551
|
else:
|
|
543
|
-
|
|
552
|
+
|
|
544
553
|
raise ValueError('Unrecognized model format: {}'.format(model_file))
|
|
545
|
-
|
|
554
|
+
|
|
546
555
|
elapsed = time.time() - start_time
|
|
547
|
-
|
|
556
|
+
|
|
548
557
|
if verbose:
|
|
549
558
|
print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))
|
|
550
|
-
|
|
559
|
+
|
|
551
560
|
return detector
|
|
552
561
|
|
|
553
562
|
# ...def load_detector(...)
|
|
@@ -555,21 +564,22 @@ def load_detector(model_file,
|
|
|
555
564
|
|
|
556
565
|
#%% Main function
|
|
557
566
|
|
|
558
|
-
def load_and_run_detector(model_file,
|
|
567
|
+
def load_and_run_detector(model_file,
|
|
559
568
|
image_file_names,
|
|
560
569
|
output_dir,
|
|
561
570
|
render_confidence_threshold=DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
|
|
562
|
-
crop_images=False,
|
|
563
|
-
box_thickness=DEFAULT_BOX_THICKNESS,
|
|
571
|
+
crop_images=False,
|
|
572
|
+
box_thickness=DEFAULT_BOX_THICKNESS,
|
|
564
573
|
box_expansion=DEFAULT_BOX_EXPANSION,
|
|
565
574
|
image_size=None,
|
|
566
575
|
label_font_size=DEFAULT_LABEL_FONT_SIZE,
|
|
567
576
|
augment=False,
|
|
568
577
|
force_model_download=False,
|
|
569
|
-
detector_options=None
|
|
578
|
+
detector_options=None,
|
|
579
|
+
verbose=False):
|
|
570
580
|
r"""
|
|
571
581
|
Loads and runs a detector on target images, and visualizes the results.
|
|
572
|
-
|
|
582
|
+
|
|
573
583
|
Args:
|
|
574
584
|
model_file (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt, or a known model
|
|
575
585
|
string, e.g. "MDV5A"
|
|
@@ -585,23 +595,27 @@ def load_and_run_detector(model_file,
|
|
|
585
595
|
if (a) you're using a model other than MegaDetector or (b) you know what you're
|
|
586
596
|
doing
|
|
587
597
|
label_font_size (float, optional): font size to use for displaying class names
|
|
588
|
-
and confidence values in the rendered images
|
|
598
|
+
and confidence values in the rendered images
|
|
589
599
|
augment (bool, optional): enable (implementation-specific) image augmentation
|
|
590
600
|
force_model_download (bool, optional): force downloading the model file if
|
|
591
601
|
a named model (e.g. "MDV5A") is supplied, even if the local file already
|
|
592
602
|
exists
|
|
593
|
-
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
603
|
+
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
594
604
|
by different detectors
|
|
595
605
|
"""
|
|
596
|
-
|
|
606
|
+
|
|
597
607
|
if len(image_file_names) == 0:
|
|
598
608
|
print('Warning: no files available')
|
|
599
609
|
return
|
|
600
610
|
|
|
601
611
|
# Possibly automatically download the model
|
|
602
|
-
model_file = try_download_known_detector(model_file,
|
|
612
|
+
model_file = try_download_known_detector(model_file,
|
|
613
|
+
force_download=force_model_download,
|
|
614
|
+
verbose=verbose)
|
|
603
615
|
|
|
604
|
-
detector = load_detector(model_file,
|
|
616
|
+
detector = load_detector(model_file,
|
|
617
|
+
detector_options=detector_options,
|
|
618
|
+
verbose=verbose)
|
|
605
619
|
|
|
606
620
|
detection_results = []
|
|
607
621
|
time_load = []
|
|
@@ -642,7 +656,7 @@ def load_and_run_detector(model_file,
|
|
|
642
656
|
|
|
643
657
|
Returns: output file path
|
|
644
658
|
"""
|
|
645
|
-
|
|
659
|
+
|
|
646
660
|
fn = os.path.basename(fn).lower()
|
|
647
661
|
name, ext = os.path.splitext(fn)
|
|
648
662
|
if crop_index >= 0:
|
|
@@ -658,7 +672,7 @@ def load_and_run_detector(model_file,
|
|
|
658
672
|
return fn
|
|
659
673
|
|
|
660
674
|
# ...def input_file_to_detection_file()
|
|
661
|
-
|
|
675
|
+
|
|
662
676
|
for im_file in tqdm(image_file_names):
|
|
663
677
|
|
|
664
678
|
try:
|
|
@@ -682,7 +696,7 @@ def load_and_run_detector(model_file,
|
|
|
682
696
|
start_time = time.time()
|
|
683
697
|
|
|
684
698
|
result = detector.generate_detections_one_image(
|
|
685
|
-
image,
|
|
699
|
+
image,
|
|
686
700
|
im_file,
|
|
687
701
|
detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
|
|
688
702
|
image_size=image_size,
|
|
@@ -745,21 +759,21 @@ def load_and_run_detector(model_file,
|
|
|
745
759
|
def _download_model(model_name,force_download=False):
|
|
746
760
|
"""
|
|
747
761
|
Downloads one of the known models to local temp space if it hasn't already been downloaded.
|
|
748
|
-
|
|
762
|
+
|
|
749
763
|
Args:
|
|
750
764
|
model_name (str): a known model string, e.g. "MDV5A". Returns None if this string is not
|
|
751
765
|
a known model name.
|
|
752
|
-
force_download (bool, optional): whether to download the model even if the local target
|
|
766
|
+
force_download (bool, optional): whether to download the model even if the local target
|
|
753
767
|
file already exists
|
|
754
768
|
"""
|
|
755
|
-
|
|
756
|
-
model_tempdir = os.path.join(tempfile.gettempdir(), 'megadetector_models')
|
|
769
|
+
|
|
770
|
+
model_tempdir = os.path.join(tempfile.gettempdir(), 'megadetector_models')
|
|
757
771
|
os.makedirs(model_tempdir,exist_ok=True)
|
|
758
|
-
|
|
772
|
+
|
|
759
773
|
# This is a lazy fix to an issue... if multiple users run this script, the
|
|
760
774
|
# "megadetector_models" folder is owned by the first person who creates it, and others
|
|
761
775
|
# can't write to it. I could create uniquely-named folders, but I philosophically prefer
|
|
762
|
-
# to put all the individual UUID-named folders within a larger folder, so as to be a
|
|
776
|
+
# to put all the individual UUID-named folders within a larger folder, so as to be a
|
|
763
777
|
# good tempdir citizen. So, the lazy fix is to make this world-writable.
|
|
764
778
|
try:
|
|
765
779
|
os.chmod(model_tempdir,0o777)
|
|
@@ -770,7 +784,7 @@ def _download_model(model_name,force_download=False):
|
|
|
770
784
|
return None
|
|
771
785
|
url = known_models[model_name.lower()]['url']
|
|
772
786
|
destination_filename = os.path.join(model_tempdir,url.split('/')[-1])
|
|
773
|
-
local_file = download_url(url, destination_filename=destination_filename, progress_updater=None,
|
|
787
|
+
local_file = download_url(url, destination_filename=destination_filename, progress_updater=None,
|
|
774
788
|
force_download=force_download, verbose=True)
|
|
775
789
|
print('Model {} available at {}'.format(model_name,local_file))
|
|
776
790
|
return local_file
|
|
@@ -781,33 +795,33 @@ def try_download_known_detector(detector_file,force_download=False,verbose=False
|
|
|
781
795
|
Checks whether detector_file is really the name of a known model, in which case we will
|
|
782
796
|
either read the actual filename from the corresponding environment variable or download
|
|
783
797
|
(if necessary) to local temp space. Otherwise just returns the input string.
|
|
784
|
-
|
|
798
|
+
|
|
785
799
|
Args:
|
|
786
800
|
detector_file (str): a known model string (e.g. "MDV5A"), or any other string (in which
|
|
787
801
|
case this function is a no-op)
|
|
788
|
-
force_download (bool, optional): whether to download the model even if the local target
|
|
802
|
+
force_download (bool, optional): whether to download the model even if the local target
|
|
789
803
|
file already exists
|
|
790
804
|
verbose (bool, optional): enable additional debug output
|
|
791
|
-
|
|
805
|
+
|
|
792
806
|
Returns:
|
|
793
807
|
str: the local filename to which the model was downloaded, or the same string that
|
|
794
808
|
was passed in, if it's not recognized as a well-known model name
|
|
795
809
|
"""
|
|
796
|
-
|
|
810
|
+
|
|
797
811
|
model_string = detector_file.lower()
|
|
798
|
-
|
|
799
|
-
# If this is a short model string (e.g. "MDV5A"), convert to a canonical version
|
|
812
|
+
|
|
813
|
+
# If this is a short model string (e.g. "MDV5A"), convert to a canonical version
|
|
800
814
|
# string (e.g. "v5a.0.0")
|
|
801
815
|
if model_string in model_string_to_model_version:
|
|
802
|
-
|
|
816
|
+
|
|
803
817
|
if verbose:
|
|
804
818
|
print('Converting short string {} to canonical version string {}'.format(
|
|
805
819
|
model_string,
|
|
806
820
|
model_string_to_model_version[model_string]))
|
|
807
821
|
model_string = model_string_to_model_version[model_string]
|
|
808
|
-
|
|
822
|
+
|
|
809
823
|
if model_string in known_models:
|
|
810
|
-
|
|
824
|
+
|
|
811
825
|
if detector_file in os.environ:
|
|
812
826
|
fn = os.environ[detector_file]
|
|
813
827
|
print('Reading MD location from environment variable {}: {}'.format(
|
|
@@ -815,25 +829,25 @@ def try_download_known_detector(detector_file,force_download=False,verbose=False
|
|
|
815
829
|
detector_file = fn
|
|
816
830
|
else:
|
|
817
831
|
detector_file = _download_model(model_string,force_download=force_download)
|
|
818
|
-
|
|
832
|
+
|
|
819
833
|
return detector_file
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
834
|
+
|
|
835
|
+
|
|
836
|
+
|
|
823
837
|
|
|
824
838
|
#%% Command-line driver
|
|
825
839
|
|
|
826
|
-
def main():
|
|
840
|
+
def main(): # noqa
|
|
827
841
|
|
|
828
842
|
parser = argparse.ArgumentParser(
|
|
829
843
|
description='Module to run an animal detection model on images')
|
|
830
|
-
|
|
844
|
+
|
|
831
845
|
parser.add_argument(
|
|
832
846
|
'detector_file',
|
|
833
847
|
help='Path detector model file (.pb or .pt). Can also be MDV4, MDV5A, or MDV5B to request automatic download.')
|
|
834
|
-
|
|
848
|
+
|
|
835
849
|
# Must specify either an image file or a directory
|
|
836
|
-
group = parser.add_mutually_exclusive_group(required=True)
|
|
850
|
+
group = parser.add_mutually_exclusive_group(required=True)
|
|
837
851
|
group.add_argument(
|
|
838
852
|
'--image_file',
|
|
839
853
|
type=str,
|
|
@@ -844,98 +858,103 @@ def main():
|
|
|
844
858
|
type=str,
|
|
845
859
|
default=None,
|
|
846
860
|
help='Directory to search for images, with optional recursion by adding --recursive')
|
|
847
|
-
|
|
861
|
+
|
|
848
862
|
parser.add_argument(
|
|
849
863
|
'--recursive',
|
|
850
864
|
action='store_true',
|
|
851
865
|
help='Recurse into directories, only meaningful if using --image_dir')
|
|
852
|
-
|
|
866
|
+
|
|
853
867
|
parser.add_argument(
|
|
854
868
|
'--output_dir',
|
|
855
869
|
type=str,
|
|
856
870
|
default=None,
|
|
857
871
|
help='Directory for output images (defaults to same as input)')
|
|
858
|
-
|
|
872
|
+
|
|
859
873
|
parser.add_argument(
|
|
860
874
|
'--image_size',
|
|
861
875
|
type=int,
|
|
862
876
|
default=None,
|
|
863
877
|
help=('Force image resizing to a (square) integer size (not recommended to change this)'))
|
|
864
|
-
|
|
878
|
+
|
|
865
879
|
parser.add_argument(
|
|
866
880
|
'--threshold',
|
|
867
881
|
type=float,
|
|
868
882
|
default=DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
|
|
869
|
-
help=('Confidence threshold between 0 and 1.0; only render' +
|
|
883
|
+
help=('Confidence threshold between 0 and 1.0; only render' +
|
|
870
884
|
' boxes above this confidence (defaults to {})'.format(
|
|
871
885
|
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD)))
|
|
872
|
-
|
|
886
|
+
|
|
873
887
|
parser.add_argument(
|
|
874
888
|
'--crop',
|
|
875
889
|
default=False,
|
|
876
890
|
action='store_true',
|
|
877
891
|
help=('If set, produces separate output images for each crop, '
|
|
878
892
|
'rather than adding bounding boxes to the original image'))
|
|
879
|
-
|
|
893
|
+
|
|
880
894
|
parser.add_argument(
|
|
881
895
|
'--augment',
|
|
882
896
|
default=False,
|
|
883
897
|
action='store_true',
|
|
884
898
|
help=('Enable image augmentation'))
|
|
885
|
-
|
|
899
|
+
|
|
886
900
|
parser.add_argument(
|
|
887
901
|
'--box_thickness',
|
|
888
902
|
type=int,
|
|
889
903
|
default=DEFAULT_BOX_THICKNESS,
|
|
890
904
|
help=('Line width (in pixels) for box rendering (defaults to {})'.format(
|
|
891
905
|
DEFAULT_BOX_THICKNESS)))
|
|
892
|
-
|
|
906
|
+
|
|
893
907
|
parser.add_argument(
|
|
894
908
|
'--box_expansion',
|
|
895
909
|
type=int,
|
|
896
910
|
default=DEFAULT_BOX_EXPANSION,
|
|
897
911
|
help=('Number of pixels to expand boxes by (defaults to {})'.format(
|
|
898
912
|
DEFAULT_BOX_EXPANSION)))
|
|
899
|
-
|
|
913
|
+
|
|
900
914
|
parser.add_argument(
|
|
901
915
|
'--label_font_size',
|
|
902
916
|
type=int,
|
|
903
917
|
default=DEFAULT_LABEL_FONT_SIZE,
|
|
904
918
|
help=('Label font size (defaults to {})'.format(
|
|
905
919
|
DEFAULT_LABEL_FONT_SIZE)))
|
|
906
|
-
|
|
920
|
+
|
|
907
921
|
parser.add_argument(
|
|
908
922
|
'--process_likely_output_images',
|
|
909
923
|
action='store_true',
|
|
910
924
|
help=('By default, we skip images that end in {}, because they probably came from this script. '\
|
|
911
925
|
.format(DETECTION_FILENAME_INSERT) + \
|
|
912
926
|
'This option disables that behavior.'))
|
|
913
|
-
|
|
927
|
+
|
|
914
928
|
parser.add_argument(
|
|
915
929
|
'--force_model_download',
|
|
916
930
|
action='store_true',
|
|
917
931
|
help=('If a named model (e.g. "MDV5A") is supplied, force a download of that model even if the ' +\
|
|
918
932
|
'local file already exists.'))
|
|
919
933
|
|
|
934
|
+
parser.add_argument(
|
|
935
|
+
'--verbose',
|
|
936
|
+
action='store_true',
|
|
937
|
+
help=('Enable additional debug output'))
|
|
938
|
+
|
|
920
939
|
parser.add_argument(
|
|
921
940
|
'--detector_options',
|
|
922
941
|
nargs='*',
|
|
923
942
|
metavar='KEY=VALUE',
|
|
924
943
|
default='',
|
|
925
944
|
help='Detector-specific options, as a space-separated list of key-value pairs')
|
|
926
|
-
|
|
945
|
+
|
|
927
946
|
if len(sys.argv[1:]) == 0:
|
|
928
947
|
parser.print_help()
|
|
929
948
|
parser.exit()
|
|
930
949
|
|
|
931
950
|
args = parser.parse_args()
|
|
932
951
|
detector_options = parse_kvp_list(args.detector_options)
|
|
933
|
-
|
|
934
|
-
# If the specified detector file is really the name of a known model, find
|
|
952
|
+
|
|
953
|
+
# If the specified detector file is really the name of a known model, find
|
|
935
954
|
# (and possibly download) that model
|
|
936
955
|
args.detector_file = try_download_known_detector(args.detector_file,
|
|
937
956
|
force_download=args.force_model_download)
|
|
938
|
-
|
|
957
|
+
|
|
939
958
|
assert os.path.exists(args.detector_file), 'detector file {} does not exist'.format(
|
|
940
959
|
args.detector_file)
|
|
941
960
|
assert 0.0 < args.threshold <= 1.0, 'Confidence threshold needs to be between 0 and 1'
|
|
@@ -954,7 +973,7 @@ def main():
|
|
|
954
973
|
else:
|
|
955
974
|
image_file_names_valid.append(fn)
|
|
956
975
|
image_file_names = image_file_names_valid
|
|
957
|
-
|
|
976
|
+
|
|
958
977
|
print('Running detector on {} images...'.format(len(image_file_names)))
|
|
959
978
|
|
|
960
979
|
if args.output_dir:
|
|
@@ -965,20 +984,21 @@ def main():
|
|
|
965
984
|
else:
|
|
966
985
|
# but for a single image, args.image_dir is also None
|
|
967
986
|
args.output_dir = os.path.dirname(args.image_file)
|
|
968
|
-
|
|
987
|
+
|
|
969
988
|
load_and_run_detector(model_file=args.detector_file,
|
|
970
989
|
image_file_names=image_file_names,
|
|
971
990
|
output_dir=args.output_dir,
|
|
972
991
|
render_confidence_threshold=args.threshold,
|
|
973
992
|
box_thickness=args.box_thickness,
|
|
974
|
-
box_expansion=args.box_expansion,
|
|
993
|
+
box_expansion=args.box_expansion,
|
|
975
994
|
crop_images=args.crop,
|
|
976
995
|
image_size=args.image_size,
|
|
977
996
|
label_font_size=args.label_font_size,
|
|
978
997
|
augment=args.augment,
|
|
979
998
|
# If --force_model_download was specified, we already handled it
|
|
980
999
|
force_model_download=False,
|
|
981
|
-
detector_options=detector_options
|
|
1000
|
+
detector_options=detector_options,
|
|
1001
|
+
verbose=args.verbose)
|
|
982
1002
|
|
|
983
1003
|
if __name__ == '__main__':
|
|
984
1004
|
main()
|
|
@@ -991,19 +1011,19 @@ if False:
|
|
|
991
1011
|
pass
|
|
992
1012
|
|
|
993
1013
|
#%% Test model download
|
|
994
|
-
|
|
1014
|
+
|
|
995
1015
|
r"""
|
|
996
1016
|
cd i:\models\all_models_in_the_wild
|
|
997
1017
|
i:
|
|
998
1018
|
python -m http.server 8181
|
|
999
1019
|
"""
|
|
1000
|
-
|
|
1020
|
+
|
|
1001
1021
|
model_name = 'redwood'
|
|
1002
1022
|
try_download_known_detector(model_name,force_download=True,verbose=True)
|
|
1003
|
-
|
|
1023
|
+
|
|
1004
1024
|
|
|
1005
1025
|
#%% Load and run detector
|
|
1006
|
-
|
|
1026
|
+
|
|
1007
1027
|
model_file = r'c:\temp\models\md_v4.1.0.pb'
|
|
1008
1028
|
image_file_names = path_utils.find_images(r'c:\temp\demo_images\ssverymini')
|
|
1009
1029
|
output_dir = r'c:\temp\demo_images\ssverymini'
|