megadetector 5.0.28__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +231 -224
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +304 -262
- megadetector/detection/run_detector.py +177 -164
- megadetector/detection/run_detector_batch.py +364 -363
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +256 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +290 -282
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +415 -415
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +219 -146
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +23 -20
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +313 -298
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -66
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1018 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1457 -398
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +61 -61
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +401 -397
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +79 -73
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/METADATA +135 -132
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.28.dist-info/RECORD +0 -209
|
@@ -2,15 +2,15 @@
|
|
|
2
2
|
|
|
3
3
|
run_detector.py
|
|
4
4
|
|
|
5
|
-
Module to run an animal detection model on images. The main function in this script also renders
|
|
5
|
+
Module to run an animal detection model on images. The main function in this script also renders
|
|
6
6
|
the predicted bounding boxes on images and saves the resulting images (with bounding boxes).
|
|
7
7
|
|
|
8
8
|
**This script is not a good way to process lots of images**. It does not produce a useful
|
|
9
9
|
output format, and it does not facilitate checkpointing the results so if it crashes you
|
|
10
|
-
would have to start from scratch. **If you want to run a detector on lots of images, you should
|
|
10
|
+
would have to start from scratch. **If you want to run a detector on lots of images, you should
|
|
11
11
|
check out run_detector_batch.py**.
|
|
12
12
|
|
|
13
|
-
That said, this script (run_detector.py) is a good way to test our detector on a handful of images
|
|
13
|
+
That said, this script (run_detector.py) is a good way to test our detector on a handful of images
|
|
14
14
|
and get super-satisfying, graphical results.
|
|
15
15
|
|
|
16
16
|
If you would like to *not* use the GPU on the machine, set the environment
|
|
@@ -148,7 +148,7 @@ known_models = {
|
|
|
148
148
|
'model_type':'yolov5',
|
|
149
149
|
'normalized_typical_inference_speed':1.0
|
|
150
150
|
},
|
|
151
|
-
|
|
151
|
+
|
|
152
152
|
# Fake values for testing
|
|
153
153
|
'v1000.0.0-redwood':
|
|
154
154
|
{
|
|
@@ -180,7 +180,7 @@ DEFAULT_BOX_EXPANSION = 0
|
|
|
180
180
|
DEFAULT_LABEL_FONT_SIZE = 16
|
|
181
181
|
DETECTION_FILENAME_INSERT = '_detections'
|
|
182
182
|
|
|
183
|
-
# Approximate inference speeds (in images per second) for MDv5 based on
|
|
183
|
+
# Approximate inference speeds (in images per second) for MDv5 based on
|
|
184
184
|
# benchmarks, only used for reporting very coarse expectations about inference time.
|
|
185
185
|
device_token_to_mdv5_inference_speed = {
|
|
186
186
|
'4090':17.6,
|
|
@@ -192,9 +192,9 @@ device_token_to_mdv5_inference_speed = {
|
|
|
192
192
|
# is around 3.5x faster than MDv4.
|
|
193
193
|
'V100':2.79*3.5,
|
|
194
194
|
'2080':2.3*3.5,
|
|
195
|
-
'2060':1.6*3.5
|
|
195
|
+
'2060':1.6*3.5
|
|
196
196
|
}
|
|
197
|
-
|
|
197
|
+
|
|
198
198
|
|
|
199
199
|
#%% Utility functions
|
|
200
200
|
|
|
@@ -202,15 +202,15 @@ def get_detector_metadata_from_version_string(detector_version):
|
|
|
202
202
|
"""
|
|
203
203
|
Given a MegaDetector version string (e.g. "v4.1.0"), returns the metadata for
|
|
204
204
|
the model. Used for writing standard defaults to batch output files.
|
|
205
|
-
|
|
205
|
+
|
|
206
206
|
Args:
|
|
207
207
|
detector_version (str): a detection version string, e.g. "v4.1.0", which you
|
|
208
208
|
can extract from a filename using get_detector_version_from_filename()
|
|
209
|
-
|
|
209
|
+
|
|
210
210
|
Returns:
|
|
211
211
|
dict: metadata for this model, suitable for writing to a MD output file
|
|
212
212
|
"""
|
|
213
|
-
|
|
213
|
+
|
|
214
214
|
if detector_version not in known_models:
|
|
215
215
|
print('Warning: no metadata for unknown detector version {}'.format(detector_version))
|
|
216
216
|
default_detector_metadata = {
|
|
@@ -229,31 +229,32 @@ def get_detector_version_from_filename(detector_filename,
|
|
|
229
229
|
accept_first_match=True,
|
|
230
230
|
verbose=False):
|
|
231
231
|
r"""
|
|
232
|
-
Gets the canonical version number string of a detector from the model filename.
|
|
233
|
-
|
|
232
|
+
Gets the canonical version number string of a detector from the model filename.
|
|
233
|
+
|
|
234
234
|
[detector_filename] will almost always end with one of the following:
|
|
235
|
-
|
|
235
|
+
|
|
236
236
|
* megadetector_v2.pb
|
|
237
237
|
* megadetector_v3.pb
|
|
238
|
-
* megadetector_v4.1 (not
|
|
238
|
+
* megadetector_v4.1 (not produced by run_detector_batch.py, only found in output files from
|
|
239
|
+
the deprecated Azure Batch API)
|
|
239
240
|
* md_v4.1.0.pb
|
|
240
241
|
* md_v5a.0.0.pt
|
|
241
242
|
* md_v5b.0.0.pt
|
|
242
|
-
|
|
243
|
-
This function identifies the version number as "v2.0.0", "v3.0.0", "v4.1.0",
|
|
244
|
-
"v4.1.0", "v5a.0.0", and "v5b.0.0", respectively. See known_models for the list
|
|
243
|
+
|
|
244
|
+
This function identifies the version number as "v2.0.0", "v3.0.0", "v4.1.0",
|
|
245
|
+
"v4.1.0", "v5a.0.0", and "v5b.0.0", respectively. See known_models for the list
|
|
245
246
|
of valid version numbers.
|
|
246
|
-
|
|
247
|
+
|
|
247
248
|
Args:
|
|
248
249
|
detector_filename (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
249
|
-
accept_first_match (bool, optional): if multiple candidates match the filename, choose the
|
|
250
|
+
accept_first_match (bool, optional): if multiple candidates match the filename, choose the
|
|
250
251
|
first one, otherwise returns the string "multiple"
|
|
251
252
|
verbose (bool, optional): enable additional debug output
|
|
252
|
-
|
|
253
|
+
|
|
253
254
|
Returns:
|
|
254
255
|
str: a detector version string, e.g. "v5a.0.0", or "multiple" if I'm confused
|
|
255
256
|
"""
|
|
256
|
-
|
|
257
|
+
|
|
257
258
|
fn = os.path.basename(detector_filename).lower()
|
|
258
259
|
matches = []
|
|
259
260
|
for s in model_string_to_model_version.keys():
|
|
@@ -268,117 +269,118 @@ def get_detector_version_from_filename(detector_filename,
|
|
|
268
269
|
if verbose:
|
|
269
270
|
print('Warning: multiple MegaDetector versions for model file {}:'.format(detector_filename))
|
|
270
271
|
for s in matches:
|
|
271
|
-
print(s)
|
|
272
|
+
print(s)
|
|
272
273
|
return 'multiple'
|
|
273
274
|
else:
|
|
274
275
|
return model_string_to_model_version[matches[0]]
|
|
275
|
-
|
|
276
|
+
|
|
276
277
|
|
|
277
278
|
def get_detector_version_from_model_file(detector_filename,verbose=False):
|
|
278
279
|
"""
|
|
279
|
-
Gets the canonical detection version from a model file, preferably by reading it
|
|
280
|
+
Gets the canonical detection version from a model file, preferably by reading it
|
|
280
281
|
from the file itself, otherwise based on the filename.
|
|
281
|
-
|
|
282
|
+
|
|
282
283
|
Args:
|
|
283
|
-
detector_filename (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
284
|
+
detector_filename (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
284
285
|
verbose (bool, optional): enable additional debug output
|
|
285
|
-
|
|
286
|
+
|
|
286
287
|
Returns:
|
|
287
288
|
str: a canonical detector version string, e.g. "v5a.0.0", or "unknown"
|
|
288
289
|
"""
|
|
289
|
-
|
|
290
|
+
|
|
290
291
|
# Try to extract a version string from the filename
|
|
291
292
|
version_string_based_on_filename = get_detector_version_from_filename(
|
|
292
293
|
detector_filename, verbose=verbose)
|
|
293
294
|
if version_string_based_on_filename == 'unknown':
|
|
294
295
|
version_string_based_on_filename = None
|
|
295
|
-
|
|
296
|
-
# Try to extract a version string from the file itself; currently this is only
|
|
296
|
+
|
|
297
|
+
# Try to extract a version string from the file itself; currently this is only
|
|
297
298
|
# a thing for PyTorch models
|
|
298
|
-
|
|
299
|
+
|
|
299
300
|
version_string_based_on_model_file = None
|
|
300
|
-
|
|
301
|
+
|
|
301
302
|
if detector_filename.endswith('.pt') or detector_filename.endswith('.zip'):
|
|
302
|
-
|
|
303
|
+
|
|
303
304
|
from megadetector.detection.pytorch_detector import \
|
|
304
305
|
read_metadata_from_megadetector_model_file
|
|
305
306
|
metadata = read_metadata_from_megadetector_model_file(detector_filename,verbose=verbose)
|
|
306
|
-
|
|
307
|
+
|
|
307
308
|
if metadata is not None and isinstance(metadata,dict):
|
|
308
|
-
|
|
309
|
+
|
|
309
310
|
if 'metadata_format_version' not in metadata or \
|
|
310
311
|
not isinstance(metadata['metadata_format_version'],float):
|
|
311
|
-
|
|
312
|
+
|
|
312
313
|
print(f'Warning: I found a metadata file in detector file {detector_filename}, '+\
|
|
313
314
|
'but it doesn\'t have a valid format version number')
|
|
314
|
-
|
|
315
|
+
|
|
315
316
|
elif 'model_version_string' not in metadata or \
|
|
316
317
|
not isinstance(metadata['model_version_string'],str):
|
|
317
|
-
|
|
318
|
+
|
|
318
319
|
print(f'Warning: I found a metadata file in detector file {detector_filename}, '+\
|
|
319
320
|
'but it doesn\'t have a format model version string')
|
|
320
|
-
|
|
321
|
+
|
|
321
322
|
else:
|
|
322
|
-
|
|
323
|
+
|
|
323
324
|
version_string_based_on_model_file = metadata['model_version_string']
|
|
324
|
-
|
|
325
|
+
|
|
325
326
|
if version_string_based_on_model_file not in known_models:
|
|
326
|
-
print('Warning: unknown model version
|
|
327
|
-
version_string_based_on_model_file,detector_filename))
|
|
328
|
-
|
|
327
|
+
print('Warning: unknown model version:\n\n{}\n\n...specified in file:\n\n{}'.format(
|
|
328
|
+
version_string_based_on_model_file,os.path.basename(detector_filename)))
|
|
329
|
+
|
|
329
330
|
# ...if there's metadata in this file
|
|
330
|
-
|
|
331
|
+
|
|
331
332
|
# ...if this looks like a PyTorch file
|
|
332
|
-
|
|
333
|
+
|
|
333
334
|
# If we got versions strings from the filename *and* the model file...
|
|
334
335
|
if (version_string_based_on_filename is not None) and \
|
|
335
336
|
(version_string_based_on_model_file is not None):
|
|
336
337
|
|
|
337
338
|
if version_string_based_on_filename != version_string_based_on_model_file:
|
|
338
|
-
print(
|
|
339
|
-
|
|
339
|
+
print(
|
|
340
|
+
'Warning: model version string in file:\n\n{}\n\n...is:\n\n{}\n\n...but the filename implies:\n\n{}'.format(
|
|
341
|
+
os.path.basename(detector_filename),
|
|
340
342
|
version_string_based_on_model_file,
|
|
341
343
|
version_string_based_on_filename))
|
|
342
|
-
|
|
344
|
+
|
|
343
345
|
return version_string_based_on_model_file
|
|
344
|
-
|
|
346
|
+
|
|
345
347
|
# If we got version string from neither the filename nor the model file...
|
|
346
348
|
if (version_string_based_on_filename is None) and \
|
|
347
349
|
(version_string_based_on_model_file is None):
|
|
348
|
-
|
|
350
|
+
|
|
349
351
|
print('Warning: could not determine model version string for model file {}'.format(
|
|
350
352
|
detector_filename))
|
|
351
353
|
return None
|
|
352
|
-
|
|
354
|
+
|
|
353
355
|
elif version_string_based_on_filename is not None:
|
|
354
|
-
|
|
356
|
+
|
|
355
357
|
return version_string_based_on_filename
|
|
356
|
-
|
|
358
|
+
|
|
357
359
|
else:
|
|
358
|
-
|
|
360
|
+
|
|
359
361
|
assert version_string_based_on_model_file is not None
|
|
360
362
|
return version_string_based_on_model_file
|
|
361
|
-
|
|
363
|
+
|
|
362
364
|
# ...def get_detector_version_from_model_file(...)
|
|
363
365
|
|
|
364
|
-
|
|
366
|
+
|
|
365
367
|
def estimate_md_images_per_second(model_file, device_name=None):
|
|
366
368
|
r"""
|
|
367
|
-
Estimates how fast MegaDetector will run on a particular device, based on benchmarks.
|
|
368
|
-
Defaults to querying the current device. Returns None if no data is available for the current
|
|
369
|
-
card/model. Estimates only available for a small handful of GPUs. Uses an absurdly simple
|
|
369
|
+
Estimates how fast MegaDetector will run on a particular device, based on benchmarks.
|
|
370
|
+
Defaults to querying the current device. Returns None if no data is available for the current
|
|
371
|
+
card/model. Estimates only available for a small handful of GPUs. Uses an absurdly simple
|
|
370
372
|
lookup approach, e.g. if the string "4090" appears in the device name, congratulations,
|
|
371
373
|
you have an RTX 4090.
|
|
372
|
-
|
|
374
|
+
|
|
373
375
|
Args:
|
|
374
376
|
model_file (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
375
377
|
device_name (str, optional): device name, e.g. blah-blah-4090-blah-blah
|
|
376
|
-
|
|
378
|
+
|
|
377
379
|
Returns:
|
|
378
380
|
float: the approximate number of images this model version can process on this
|
|
379
381
|
device per second
|
|
380
382
|
"""
|
|
381
|
-
|
|
383
|
+
|
|
382
384
|
if device_name is None:
|
|
383
385
|
try:
|
|
384
386
|
import torch
|
|
@@ -386,51 +388,51 @@ def estimate_md_images_per_second(model_file, device_name=None):
|
|
|
386
388
|
except Exception as e:
|
|
387
389
|
print('Error querying device name: {}'.format(e))
|
|
388
390
|
return None
|
|
389
|
-
|
|
391
|
+
|
|
390
392
|
# About how fast is this model compared to MDv5?
|
|
391
393
|
model_version = get_detector_version_from_model_file(model_file)
|
|
392
|
-
|
|
394
|
+
|
|
393
395
|
if model_version not in known_models.keys():
|
|
394
396
|
print('Could not estimate inference speed: error determining model version for model file {}'.format(
|
|
395
397
|
model_file))
|
|
396
398
|
return None
|
|
397
|
-
|
|
399
|
+
|
|
398
400
|
model_info = known_models[model_version]
|
|
399
|
-
|
|
401
|
+
|
|
400
402
|
if 'normalized_typical_inference_speed' not in model_info or \
|
|
401
403
|
model_info['normalized_typical_inference_speed'] is None:
|
|
402
404
|
print('No speed ratio available for model type {}'.format(model_version))
|
|
403
405
|
return None
|
|
404
|
-
|
|
406
|
+
|
|
405
407
|
normalized_inference_speed = model_info['normalized_typical_inference_speed']
|
|
406
|
-
|
|
408
|
+
|
|
407
409
|
# About how fast would MDv5 run on this device?
|
|
408
410
|
mdv5_inference_speed = None
|
|
409
411
|
for device_token in device_token_to_mdv5_inference_speed.keys():
|
|
410
412
|
if device_token in device_name:
|
|
411
413
|
mdv5_inference_speed = device_token_to_mdv5_inference_speed[device_token]
|
|
412
414
|
break
|
|
413
|
-
|
|
415
|
+
|
|
414
416
|
if mdv5_inference_speed is None:
|
|
415
417
|
print('No baseline speed estimate available for device {}'.format(device_name))
|
|
416
418
|
return None
|
|
417
|
-
|
|
419
|
+
|
|
418
420
|
return normalized_inference_speed * mdv5_inference_speed
|
|
419
|
-
|
|
420
|
-
|
|
421
|
+
|
|
422
|
+
|
|
421
423
|
def get_typical_confidence_threshold_from_results(results):
|
|
422
424
|
"""
|
|
423
425
|
Given the .json data loaded from a MD results file, returns a typical confidence
|
|
424
426
|
threshold based on the detector version.
|
|
425
|
-
|
|
427
|
+
|
|
426
428
|
Args:
|
|
427
|
-
results (dict or str): a dict of MD results, as it would be loaded from a MD results .json
|
|
429
|
+
results (dict or str): a dict of MD results, as it would be loaded from a MD results .json
|
|
428
430
|
file, or a .json filename
|
|
429
|
-
|
|
431
|
+
|
|
430
432
|
Returns:
|
|
431
433
|
float: a sensible default threshold for this model
|
|
432
434
|
"""
|
|
433
|
-
|
|
435
|
+
|
|
434
436
|
# Load results if necessary
|
|
435
437
|
if isinstance(results,str):
|
|
436
438
|
with open(results,'r') as f:
|
|
@@ -450,31 +452,31 @@ def get_typical_confidence_threshold_from_results(results):
|
|
|
450
452
|
detector_metadata = get_detector_metadata_from_version_string(detector_version)
|
|
451
453
|
default_threshold = detector_metadata['typical_detection_threshold']
|
|
452
454
|
|
|
453
|
-
return default_threshold
|
|
455
|
+
return default_threshold
|
|
456
|
+
|
|
454
457
|
|
|
455
|
-
|
|
456
458
|
def is_gpu_available(model_file):
|
|
457
459
|
r"""
|
|
458
460
|
Determines whether a GPU is available, importing PyTorch or TF depending on the extension
|
|
459
|
-
of model_file. Does not actually load model_file, just uses that to determine how to check
|
|
461
|
+
of model_file. Does not actually load model_file, just uses that to determine how to check
|
|
460
462
|
for GPU availability (PT vs. TF).
|
|
461
|
-
|
|
463
|
+
|
|
462
464
|
Args:
|
|
463
465
|
model_file (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt
|
|
464
|
-
|
|
466
|
+
|
|
465
467
|
Returns:
|
|
466
468
|
bool: whether a GPU is available
|
|
467
469
|
"""
|
|
468
|
-
|
|
470
|
+
|
|
469
471
|
if model_file.endswith('.pb'):
|
|
470
472
|
import tensorflow.compat.v1 as tf
|
|
471
473
|
gpu_available = tf.test.is_gpu_available()
|
|
472
474
|
print('TensorFlow version:', tf.__version__)
|
|
473
|
-
print('tf.test.is_gpu_available:', gpu_available)
|
|
475
|
+
print('tf.test.is_gpu_available:', gpu_available)
|
|
474
476
|
return gpu_available
|
|
475
477
|
if not model_file.endswith('.pt'):
|
|
476
478
|
print('Warning: could not determine environment from model file name, assuming PyTorch')
|
|
477
|
-
|
|
479
|
+
|
|
478
480
|
import torch
|
|
479
481
|
gpu_available = torch.cuda.is_available()
|
|
480
482
|
print('PyTorch reports {} available CUDA devices'.format(torch.cuda.device_count()))
|
|
@@ -487,16 +489,16 @@ def is_gpu_available(model_file):
|
|
|
487
489
|
except AttributeError:
|
|
488
490
|
pass
|
|
489
491
|
return gpu_available
|
|
490
|
-
|
|
491
492
|
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
493
|
+
|
|
494
|
+
def load_detector(model_file,
|
|
495
|
+
force_cpu=False,
|
|
496
|
+
force_model_download=False,
|
|
495
497
|
detector_options=None,
|
|
496
498
|
verbose=False):
|
|
497
499
|
r"""
|
|
498
500
|
Loads a TF or PT detector, depending on the extension of model_file.
|
|
499
|
-
|
|
501
|
+
|
|
500
502
|
Args:
|
|
501
503
|
model_file (str): model filename (e.g. c:/x/z/md_v5a.0.0.pt) or known model
|
|
502
504
|
name (e.g. "MDV5A")
|
|
@@ -505,21 +507,21 @@ def load_detector(model_file,
|
|
|
505
507
|
force_model_download (bool, optional): force downloading the model file if
|
|
506
508
|
a named model (e.g. "MDV5A") is supplied, even if the local file already
|
|
507
509
|
exists
|
|
508
|
-
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
510
|
+
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
509
511
|
by different detectors
|
|
510
512
|
verbose (bool, optional): enable additional debug output
|
|
511
|
-
|
|
513
|
+
|
|
512
514
|
Returns:
|
|
513
515
|
object: loaded detector object
|
|
514
516
|
"""
|
|
515
|
-
|
|
517
|
+
|
|
516
518
|
# Possibly automatically download the model
|
|
517
|
-
model_file = try_download_known_detector(model_file,
|
|
519
|
+
model_file = try_download_known_detector(model_file,
|
|
518
520
|
force_download=force_model_download)
|
|
519
|
-
|
|
521
|
+
|
|
520
522
|
if verbose:
|
|
521
523
|
print('GPU available: {}'.format(is_gpu_available(model_file)))
|
|
522
|
-
|
|
524
|
+
|
|
523
525
|
start_time = time.time()
|
|
524
526
|
|
|
525
527
|
if model_file.endswith('.pb'):
|
|
@@ -531,9 +533,9 @@ def load_detector(model_file,
|
|
|
531
533
|
detector = TFDetector(model_file, detector_options)
|
|
532
534
|
|
|
533
535
|
elif model_file.endswith('.pt'):
|
|
534
|
-
|
|
536
|
+
|
|
535
537
|
from megadetector.detection.pytorch_detector import PTDetector
|
|
536
|
-
|
|
538
|
+
|
|
537
539
|
# Prepare options specific to the PTDetector class
|
|
538
540
|
if detector_options is None:
|
|
539
541
|
detector_options = {}
|
|
@@ -545,16 +547,16 @@ def load_detector(model_file,
|
|
|
545
547
|
detector_options['force_cpu'] = force_cpu
|
|
546
548
|
detector_options['use_model_native_classes'] = USE_MODEL_NATIVE_CLASSES
|
|
547
549
|
detector = PTDetector(model_file, detector_options, verbose=verbose)
|
|
548
|
-
|
|
550
|
+
|
|
549
551
|
else:
|
|
550
|
-
|
|
552
|
+
|
|
551
553
|
raise ValueError('Unrecognized model format: {}'.format(model_file))
|
|
552
|
-
|
|
554
|
+
|
|
553
555
|
elapsed = time.time() - start_time
|
|
554
|
-
|
|
556
|
+
|
|
555
557
|
if verbose:
|
|
556
558
|
print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))
|
|
557
|
-
|
|
559
|
+
|
|
558
560
|
return detector
|
|
559
561
|
|
|
560
562
|
# ...def load_detector(...)
|
|
@@ -562,21 +564,22 @@ def load_detector(model_file,
|
|
|
562
564
|
|
|
563
565
|
#%% Main function
|
|
564
566
|
|
|
565
|
-
def load_and_run_detector(model_file,
|
|
567
|
+
def load_and_run_detector(model_file,
|
|
566
568
|
image_file_names,
|
|
567
569
|
output_dir,
|
|
568
570
|
render_confidence_threshold=DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
|
|
569
|
-
crop_images=False,
|
|
570
|
-
box_thickness=DEFAULT_BOX_THICKNESS,
|
|
571
|
+
crop_images=False,
|
|
572
|
+
box_thickness=DEFAULT_BOX_THICKNESS,
|
|
571
573
|
box_expansion=DEFAULT_BOX_EXPANSION,
|
|
572
574
|
image_size=None,
|
|
573
575
|
label_font_size=DEFAULT_LABEL_FONT_SIZE,
|
|
574
576
|
augment=False,
|
|
575
577
|
force_model_download=False,
|
|
576
|
-
detector_options=None
|
|
578
|
+
detector_options=None,
|
|
579
|
+
verbose=False):
|
|
577
580
|
r"""
|
|
578
581
|
Loads and runs a detector on target images, and visualizes the results.
|
|
579
|
-
|
|
582
|
+
|
|
580
583
|
Args:
|
|
581
584
|
model_file (str): model filename, e.g. c:/x/z/md_v5a.0.0.pt, or a known model
|
|
582
585
|
string, e.g. "MDV5A"
|
|
@@ -592,23 +595,27 @@ def load_and_run_detector(model_file,
|
|
|
592
595
|
if (a) you're using a model other than MegaDetector or (b) you know what you're
|
|
593
596
|
doing
|
|
594
597
|
label_font_size (float, optional): font size to use for displaying class names
|
|
595
|
-
and confidence values in the rendered images
|
|
598
|
+
and confidence values in the rendered images
|
|
596
599
|
augment (bool, optional): enable (implementation-specific) image augmentation
|
|
597
600
|
force_model_download (bool, optional): force downloading the model file if
|
|
598
601
|
a named model (e.g. "MDV5A") is supplied, even if the local file already
|
|
599
602
|
exists
|
|
600
|
-
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
603
|
+
detector_options (dict, optional): key/value pairs that are interpreted differently
|
|
601
604
|
by different detectors
|
|
602
605
|
"""
|
|
603
|
-
|
|
606
|
+
|
|
604
607
|
if len(image_file_names) == 0:
|
|
605
608
|
print('Warning: no files available')
|
|
606
609
|
return
|
|
607
610
|
|
|
608
611
|
# Possibly automatically download the model
|
|
609
|
-
model_file = try_download_known_detector(model_file,
|
|
612
|
+
model_file = try_download_known_detector(model_file,
|
|
613
|
+
force_download=force_model_download,
|
|
614
|
+
verbose=verbose)
|
|
610
615
|
|
|
611
|
-
detector = load_detector(model_file,
|
|
616
|
+
detector = load_detector(model_file,
|
|
617
|
+
detector_options=detector_options,
|
|
618
|
+
verbose=verbose)
|
|
612
619
|
|
|
613
620
|
detection_results = []
|
|
614
621
|
time_load = []
|
|
@@ -649,7 +656,7 @@ def load_and_run_detector(model_file,
|
|
|
649
656
|
|
|
650
657
|
Returns: output file path
|
|
651
658
|
"""
|
|
652
|
-
|
|
659
|
+
|
|
653
660
|
fn = os.path.basename(fn).lower()
|
|
654
661
|
name, ext = os.path.splitext(fn)
|
|
655
662
|
if crop_index >= 0:
|
|
@@ -665,7 +672,7 @@ def load_and_run_detector(model_file,
|
|
|
665
672
|
return fn
|
|
666
673
|
|
|
667
674
|
# ...def input_file_to_detection_file()
|
|
668
|
-
|
|
675
|
+
|
|
669
676
|
for im_file in tqdm(image_file_names):
|
|
670
677
|
|
|
671
678
|
try:
|
|
@@ -689,7 +696,7 @@ def load_and_run_detector(model_file,
|
|
|
689
696
|
start_time = time.time()
|
|
690
697
|
|
|
691
698
|
result = detector.generate_detections_one_image(
|
|
692
|
-
image,
|
|
699
|
+
image,
|
|
693
700
|
im_file,
|
|
694
701
|
detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
|
|
695
702
|
image_size=image_size,
|
|
@@ -752,21 +759,21 @@ def load_and_run_detector(model_file,
|
|
|
752
759
|
def _download_model(model_name,force_download=False):
|
|
753
760
|
"""
|
|
754
761
|
Downloads one of the known models to local temp space if it hasn't already been downloaded.
|
|
755
|
-
|
|
762
|
+
|
|
756
763
|
Args:
|
|
757
764
|
model_name (str): a known model string, e.g. "MDV5A". Returns None if this string is not
|
|
758
765
|
a known model name.
|
|
759
|
-
force_download (bool, optional): whether to download the model even if the local target
|
|
766
|
+
force_download (bool, optional): whether to download the model even if the local target
|
|
760
767
|
file already exists
|
|
761
768
|
"""
|
|
762
|
-
|
|
763
|
-
model_tempdir = os.path.join(tempfile.gettempdir(), 'megadetector_models')
|
|
769
|
+
|
|
770
|
+
model_tempdir = os.path.join(tempfile.gettempdir(), 'megadetector_models')
|
|
764
771
|
os.makedirs(model_tempdir,exist_ok=True)
|
|
765
|
-
|
|
772
|
+
|
|
766
773
|
# This is a lazy fix to an issue... if multiple users run this script, the
|
|
767
774
|
# "megadetector_models" folder is owned by the first person who creates it, and others
|
|
768
775
|
# can't write to it. I could create uniquely-named folders, but I philosophically prefer
|
|
769
|
-
# to put all the individual UUID-named folders within a larger folder, so as to be a
|
|
776
|
+
# to put all the individual UUID-named folders within a larger folder, so as to be a
|
|
770
777
|
# good tempdir citizen. So, the lazy fix is to make this world-writable.
|
|
771
778
|
try:
|
|
772
779
|
os.chmod(model_tempdir,0o777)
|
|
@@ -777,7 +784,7 @@ def _download_model(model_name,force_download=False):
|
|
|
777
784
|
return None
|
|
778
785
|
url = known_models[model_name.lower()]['url']
|
|
779
786
|
destination_filename = os.path.join(model_tempdir,url.split('/')[-1])
|
|
780
|
-
local_file = download_url(url, destination_filename=destination_filename, progress_updater=None,
|
|
787
|
+
local_file = download_url(url, destination_filename=destination_filename, progress_updater=None,
|
|
781
788
|
force_download=force_download, verbose=True)
|
|
782
789
|
print('Model {} available at {}'.format(model_name,local_file))
|
|
783
790
|
return local_file
|
|
@@ -788,33 +795,33 @@ def try_download_known_detector(detector_file,force_download=False,verbose=False
|
|
|
788
795
|
Checks whether detector_file is really the name of a known model, in which case we will
|
|
789
796
|
either read the actual filename from the corresponding environment variable or download
|
|
790
797
|
(if necessary) to local temp space. Otherwise just returns the input string.
|
|
791
|
-
|
|
798
|
+
|
|
792
799
|
Args:
|
|
793
800
|
detector_file (str): a known model string (e.g. "MDV5A"), or any other string (in which
|
|
794
801
|
case this function is a no-op)
|
|
795
|
-
force_download (bool, optional): whether to download the model even if the local target
|
|
802
|
+
force_download (bool, optional): whether to download the model even if the local target
|
|
796
803
|
file already exists
|
|
797
804
|
verbose (bool, optional): enable additional debug output
|
|
798
|
-
|
|
805
|
+
|
|
799
806
|
Returns:
|
|
800
807
|
str: the local filename to which the model was downloaded, or the same string that
|
|
801
808
|
was passed in, if it's not recognized as a well-known model name
|
|
802
809
|
"""
|
|
803
|
-
|
|
810
|
+
|
|
804
811
|
model_string = detector_file.lower()
|
|
805
|
-
|
|
806
|
-
# If this is a short model string (e.g. "MDV5A"), convert to a canonical version
|
|
812
|
+
|
|
813
|
+
# If this is a short model string (e.g. "MDV5A"), convert to a canonical version
|
|
807
814
|
# string (e.g. "v5a.0.0")
|
|
808
815
|
if model_string in model_string_to_model_version:
|
|
809
|
-
|
|
816
|
+
|
|
810
817
|
if verbose:
|
|
811
818
|
print('Converting short string {} to canonical version string {}'.format(
|
|
812
819
|
model_string,
|
|
813
820
|
model_string_to_model_version[model_string]))
|
|
814
821
|
model_string = model_string_to_model_version[model_string]
|
|
815
|
-
|
|
822
|
+
|
|
816
823
|
if model_string in known_models:
|
|
817
|
-
|
|
824
|
+
|
|
818
825
|
if detector_file in os.environ:
|
|
819
826
|
fn = os.environ[detector_file]
|
|
820
827
|
print('Reading MD location from environment variable {}: {}'.format(
|
|
@@ -822,25 +829,25 @@ def try_download_known_detector(detector_file,force_download=False,verbose=False
|
|
|
822
829
|
detector_file = fn
|
|
823
830
|
else:
|
|
824
831
|
detector_file = _download_model(model_string,force_download=force_download)
|
|
825
|
-
|
|
832
|
+
|
|
826
833
|
return detector_file
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
834
|
+
|
|
835
|
+
|
|
836
|
+
|
|
830
837
|
|
|
831
838
|
#%% Command-line driver
|
|
832
839
|
|
|
833
|
-
def main():
|
|
840
|
+
def main(): # noqa
|
|
834
841
|
|
|
835
842
|
parser = argparse.ArgumentParser(
|
|
836
843
|
description='Module to run an animal detection model on images')
|
|
837
|
-
|
|
844
|
+
|
|
838
845
|
parser.add_argument(
|
|
839
846
|
'detector_file',
|
|
840
847
|
help='Path detector model file (.pb or .pt). Can also be MDV4, MDV5A, or MDV5B to request automatic download.')
|
|
841
|
-
|
|
848
|
+
|
|
842
849
|
# Must specify either an image file or a directory
|
|
843
|
-
group = parser.add_mutually_exclusive_group(required=True)
|
|
850
|
+
group = parser.add_mutually_exclusive_group(required=True)
|
|
844
851
|
group.add_argument(
|
|
845
852
|
'--image_file',
|
|
846
853
|
type=str,
|
|
@@ -851,98 +858,103 @@ def main():
|
|
|
851
858
|
type=str,
|
|
852
859
|
default=None,
|
|
853
860
|
help='Directory to search for images, with optional recursion by adding --recursive')
|
|
854
|
-
|
|
861
|
+
|
|
855
862
|
parser.add_argument(
|
|
856
863
|
'--recursive',
|
|
857
864
|
action='store_true',
|
|
858
865
|
help='Recurse into directories, only meaningful if using --image_dir')
|
|
859
|
-
|
|
866
|
+
|
|
860
867
|
parser.add_argument(
|
|
861
868
|
'--output_dir',
|
|
862
869
|
type=str,
|
|
863
870
|
default=None,
|
|
864
871
|
help='Directory for output images (defaults to same as input)')
|
|
865
|
-
|
|
872
|
+
|
|
866
873
|
parser.add_argument(
|
|
867
874
|
'--image_size',
|
|
868
875
|
type=int,
|
|
869
876
|
default=None,
|
|
870
877
|
help=('Force image resizing to a (square) integer size (not recommended to change this)'))
|
|
871
|
-
|
|
878
|
+
|
|
872
879
|
parser.add_argument(
|
|
873
880
|
'--threshold',
|
|
874
881
|
type=float,
|
|
875
882
|
default=DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
|
|
876
|
-
help=('Confidence threshold between 0 and 1.0; only render' +
|
|
883
|
+
help=('Confidence threshold between 0 and 1.0; only render' +
|
|
877
884
|
' boxes above this confidence (defaults to {})'.format(
|
|
878
885
|
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD)))
|
|
879
|
-
|
|
886
|
+
|
|
880
887
|
parser.add_argument(
|
|
881
888
|
'--crop',
|
|
882
889
|
default=False,
|
|
883
890
|
action='store_true',
|
|
884
891
|
help=('If set, produces separate output images for each crop, '
|
|
885
892
|
'rather than adding bounding boxes to the original image'))
|
|
886
|
-
|
|
893
|
+
|
|
887
894
|
parser.add_argument(
|
|
888
895
|
'--augment',
|
|
889
896
|
default=False,
|
|
890
897
|
action='store_true',
|
|
891
898
|
help=('Enable image augmentation'))
|
|
892
|
-
|
|
899
|
+
|
|
893
900
|
parser.add_argument(
|
|
894
901
|
'--box_thickness',
|
|
895
902
|
type=int,
|
|
896
903
|
default=DEFAULT_BOX_THICKNESS,
|
|
897
904
|
help=('Line width (in pixels) for box rendering (defaults to {})'.format(
|
|
898
905
|
DEFAULT_BOX_THICKNESS)))
|
|
899
|
-
|
|
906
|
+
|
|
900
907
|
parser.add_argument(
|
|
901
908
|
'--box_expansion',
|
|
902
909
|
type=int,
|
|
903
910
|
default=DEFAULT_BOX_EXPANSION,
|
|
904
911
|
help=('Number of pixels to expand boxes by (defaults to {})'.format(
|
|
905
912
|
DEFAULT_BOX_EXPANSION)))
|
|
906
|
-
|
|
913
|
+
|
|
907
914
|
parser.add_argument(
|
|
908
915
|
'--label_font_size',
|
|
909
916
|
type=int,
|
|
910
917
|
default=DEFAULT_LABEL_FONT_SIZE,
|
|
911
918
|
help=('Label font size (defaults to {})'.format(
|
|
912
919
|
DEFAULT_LABEL_FONT_SIZE)))
|
|
913
|
-
|
|
920
|
+
|
|
914
921
|
parser.add_argument(
|
|
915
922
|
'--process_likely_output_images',
|
|
916
923
|
action='store_true',
|
|
917
924
|
help=('By default, we skip images that end in {}, because they probably came from this script. '\
|
|
918
925
|
.format(DETECTION_FILENAME_INSERT) + \
|
|
919
926
|
'This option disables that behavior.'))
|
|
920
|
-
|
|
927
|
+
|
|
921
928
|
parser.add_argument(
|
|
922
929
|
'--force_model_download',
|
|
923
930
|
action='store_true',
|
|
924
931
|
help=('If a named model (e.g. "MDV5A") is supplied, force a download of that model even if the ' +\
|
|
925
932
|
'local file already exists.'))
|
|
926
933
|
|
|
934
|
+
parser.add_argument(
|
|
935
|
+
'--verbose',
|
|
936
|
+
action='store_true',
|
|
937
|
+
help=('Enable additional debug output'))
|
|
938
|
+
|
|
927
939
|
parser.add_argument(
|
|
928
940
|
'--detector_options',
|
|
929
941
|
nargs='*',
|
|
930
942
|
metavar='KEY=VALUE',
|
|
931
943
|
default='',
|
|
932
944
|
help='Detector-specific options, as a space-separated list of key-value pairs')
|
|
933
|
-
|
|
945
|
+
|
|
934
946
|
if len(sys.argv[1:]) == 0:
|
|
935
947
|
parser.print_help()
|
|
936
948
|
parser.exit()
|
|
937
949
|
|
|
938
950
|
args = parser.parse_args()
|
|
939
951
|
detector_options = parse_kvp_list(args.detector_options)
|
|
940
|
-
|
|
941
|
-
# If the specified detector file is really the name of a known model, find
|
|
952
|
+
|
|
953
|
+
# If the specified detector file is really the name of a known model, find
|
|
942
954
|
# (and possibly download) that model
|
|
943
955
|
args.detector_file = try_download_known_detector(args.detector_file,
|
|
944
956
|
force_download=args.force_model_download)
|
|
945
|
-
|
|
957
|
+
|
|
946
958
|
assert os.path.exists(args.detector_file), 'detector file {} does not exist'.format(
|
|
947
959
|
args.detector_file)
|
|
948
960
|
assert 0.0 < args.threshold <= 1.0, 'Confidence threshold needs to be between 0 and 1'
|
|
@@ -961,7 +973,7 @@ def main():
|
|
|
961
973
|
else:
|
|
962
974
|
image_file_names_valid.append(fn)
|
|
963
975
|
image_file_names = image_file_names_valid
|
|
964
|
-
|
|
976
|
+
|
|
965
977
|
print('Running detector on {} images...'.format(len(image_file_names)))
|
|
966
978
|
|
|
967
979
|
if args.output_dir:
|
|
@@ -972,20 +984,21 @@ def main():
|
|
|
972
984
|
else:
|
|
973
985
|
# but for a single image, args.image_dir is also None
|
|
974
986
|
args.output_dir = os.path.dirname(args.image_file)
|
|
975
|
-
|
|
987
|
+
|
|
976
988
|
load_and_run_detector(model_file=args.detector_file,
|
|
977
989
|
image_file_names=image_file_names,
|
|
978
990
|
output_dir=args.output_dir,
|
|
979
991
|
render_confidence_threshold=args.threshold,
|
|
980
992
|
box_thickness=args.box_thickness,
|
|
981
|
-
box_expansion=args.box_expansion,
|
|
993
|
+
box_expansion=args.box_expansion,
|
|
982
994
|
crop_images=args.crop,
|
|
983
995
|
image_size=args.image_size,
|
|
984
996
|
label_font_size=args.label_font_size,
|
|
985
997
|
augment=args.augment,
|
|
986
998
|
# If --force_model_download was specified, we already handled it
|
|
987
999
|
force_model_download=False,
|
|
988
|
-
detector_options=detector_options
|
|
1000
|
+
detector_options=detector_options,
|
|
1001
|
+
verbose=args.verbose)
|
|
989
1002
|
|
|
990
1003
|
if __name__ == '__main__':
|
|
991
1004
|
main()
|
|
@@ -998,19 +1011,19 @@ if False:
|
|
|
998
1011
|
pass
|
|
999
1012
|
|
|
1000
1013
|
#%% Test model download
|
|
1001
|
-
|
|
1014
|
+
|
|
1002
1015
|
r"""
|
|
1003
1016
|
cd i:\models\all_models_in_the_wild
|
|
1004
1017
|
i:
|
|
1005
1018
|
python -m http.server 8181
|
|
1006
1019
|
"""
|
|
1007
|
-
|
|
1020
|
+
|
|
1008
1021
|
model_name = 'redwood'
|
|
1009
1022
|
try_download_known_detector(model_name,force_download=True,verbose=True)
|
|
1010
|
-
|
|
1023
|
+
|
|
1011
1024
|
|
|
1012
1025
|
#%% Load and run detector
|
|
1013
|
-
|
|
1026
|
+
|
|
1014
1027
|
model_file = r'c:\temp\models\md_v4.1.0.pb'
|
|
1015
1028
|
image_file_names = path_utils.find_images(r'c:\temp\demo_images\ssverymini')
|
|
1016
1029
|
output_dir = r'c:\temp\demo_images\ssverymini'
|