megadetector 10.0.8__tar.gz → 10.0.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- {megadetector-10.0.8/megadetector.egg-info → megadetector-10.0.9}/PKG-INFO +1 -1
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/pytorch_detector.py +3 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/run_detector.py +1 -2
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/run_detector_batch.py +30 -15
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/run_tiled_inference.py +56 -15
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/compare_batch_results.py +48 -28
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/postprocess_batch_results.py +1 -1
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/subset_json_detector_output.py +80 -0
- {megadetector-10.0.8 → megadetector-10.0.9/megadetector.egg-info}/PKG-INFO +1 -1
- {megadetector-10.0.8 → megadetector-10.0.9}/pyproject.toml +3 -3
- {megadetector-10.0.8 → megadetector-10.0.9}/LICENSE +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/README-package.md +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/README.md +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/api/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/digiKam/setup.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/aggregate_classifier_probs.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/analyze_failed_images.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/cache_batchapi_outputs.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/create_classification_dataset.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/crop_detections.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/csv_to_json.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/detect_and_crop.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/efficientnet/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/efficientnet/model.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/efficientnet/utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/evaluate_model.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/identify_mislabeled_candidates.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/json_to_azcopy_list.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/json_validator.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/map_classification_categories.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/merge_classification_detection_output.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/prepare_classification_script.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/prepare_classification_script_mc.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/run_classifier.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/save_mislabeled.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/train_classifier.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/train_classifier_tf.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/classification/train_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/animl_to_md.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/annotations/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/annotations/annotation_constants.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/camtrap_dp_to_coco.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/cct_json_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/cct_to_md.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/cct_to_wi.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/coco_to_labelme.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/coco_to_yolo.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/databases/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/databases/add_width_and_height_to_db.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/databases/combine_coco_camera_traps_files.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/databases/integrity_check_json_db.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/databases/subset_json_db.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/generate_crops_from_cct.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/get_image_sizes.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/labelme_to_coco.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/labelme_to_yolo.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/create_lila_blank_set.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/create_lila_test_set.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/create_links_to_md_results_files.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/download_lila_subset.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/generate_lila_per_image_labels.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/get_lila_annotation_counts.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/get_lila_image_counts.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/lila_common.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/lila/test_lila_metadata_urls.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/mewc_to_md.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/ocr_tools.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/read_exif.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/remap_coco_categories.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/remove_exif.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/rename_images.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/resize_coco_dataset.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/speciesnet_to_md.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/wi_download_csv_to_coco.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/yolo_output_to_md_output.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/yolo_to_coco.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/data_management/zamba_to_md.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/change_detection.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/process_video.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/run_inference_with_yolov5_val.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/run_md_and_speciesnet.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/tf_detector.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/detection/video_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/add_max_conf.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/categorize_detections_by_size.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/classification_postprocessing.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/combine_batch_outputs.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/convert_output_format.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/create_crop_folder.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/detector_calibration.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/generate_csv_report.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/load_api_results.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/md_to_coco.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/md_to_labelme.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/md_to_wi.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/merge_detections.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/remap_detection_categories.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/render_detection_confusion_matrix.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/separate_detections_into_folders.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/top_folders_to_bottom.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/validate_batch_results.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/map_new_lila_datasets.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/preview_lila_taxonomy.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/retrieve_sample_image.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/simple_image_download.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/species_lookup.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/taxonomy_csv_checker.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/taxonomy_graph.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/taxonomy_mapping/validate_lila_category_mappings.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/tests/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/tests/test_nms_synthetic.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/ct_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/directory_listing.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/extract_frames_from_video.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/gpu_test.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/md_tests.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/path_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/process_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/split_locations_into_train_val.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/string_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/url_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/wi_platform_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/wi_taxonomy_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/utils/write_html_image_list.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/__init__.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/plot_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/render_images_with_thumbnails.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/visualization_utils.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/visualize_db.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/visualize_detector_output.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector/visualization/visualize_video_output.py +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector.egg-info/SOURCES.txt +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector.egg-info/dependency_links.txt +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector.egg-info/requires.txt +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/megadetector.egg-info/top_level.txt +0 -0
- {megadetector-10.0.8 → megadetector-10.0.9}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: megadetector
|
|
3
|
-
Version: 10.0.
|
|
3
|
+
Version: 10.0.9
|
|
4
4
|
Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
|
|
5
5
|
Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
6
6
|
Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
@@ -859,6 +859,9 @@ class PTDetector:
|
|
|
859
859
|
except AttributeError:
|
|
860
860
|
pass
|
|
861
861
|
|
|
862
|
+
# AddaxAI depends on this printout, don't remove it
|
|
863
|
+
print('PTDetector using device {}'.format(str(self.device).lower()))
|
|
864
|
+
|
|
862
865
|
try:
|
|
863
866
|
self.model = PTDetector._load_model(model_path,
|
|
864
867
|
device=self.device,
|
|
@@ -596,8 +596,7 @@ def load_detector(model_file,
|
|
|
596
596
|
model_file = try_download_known_detector(model_file,
|
|
597
597
|
force_download=force_model_download)
|
|
598
598
|
|
|
599
|
-
|
|
600
|
-
print('GPU available: {}'.format(is_gpu_available(model_file)))
|
|
599
|
+
print('GPU available: {}'.format(is_gpu_available(model_file)))
|
|
601
600
|
|
|
602
601
|
start_time = time.time()
|
|
603
602
|
|
|
@@ -94,20 +94,29 @@ max_queue_size = 10
|
|
|
94
94
|
# How often should we print progress when using the image queue?
|
|
95
95
|
n_queue_print = 1000
|
|
96
96
|
|
|
97
|
-
#
|
|
97
|
+
# Only used if --include_exif_data or --include_image_timestamp are supplied
|
|
98
|
+
exif_options = read_exif.ReadExifOptions()
|
|
99
|
+
exif_options.processing_library = 'pil'
|
|
100
|
+
exif_options.byte_handling = 'convert_to_string'
|
|
101
|
+
|
|
102
|
+
# Only relevant when we're running our test harness; because bugs in batch
|
|
103
|
+
# inference are dependent on batch grouping, we randomize batch grouping
|
|
104
|
+
# during testing to maximize the probability that latent bugs come up
|
|
105
|
+
# eventually.
|
|
106
|
+
randomize_batch_order_during_testing = True
|
|
107
|
+
|
|
108
|
+
# TODO: it's a little sloppy that the following are module-level globals, but in practice it
|
|
98
109
|
# doesn't really matter, so I'm not in a big rush to move these to options until I do
|
|
99
110
|
# a larger cleanup of all the long argument lists in this module.
|
|
100
|
-
|
|
111
|
+
|
|
101
112
|
# Should the consumer loop run on its own process, or here in the main process?
|
|
102
113
|
run_separate_consumer_process = False
|
|
103
|
-
use_threads_for_queue = False
|
|
104
|
-
verbose = False
|
|
105
114
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
exif_options.byte_handling = 'convert_to_string'
|
|
115
|
+
# Should we use threads (rather than processes) for the data loading workers?
|
|
116
|
+
use_threads_for_queue = False
|
|
109
117
|
|
|
110
|
-
|
|
118
|
+
# Enable additional debug output
|
|
119
|
+
verbose = False
|
|
111
120
|
|
|
112
121
|
|
|
113
122
|
#%% Support functions for multiprocessing
|
|
@@ -736,7 +745,9 @@ def _process_batch(image_items_batch,
|
|
|
736
745
|
try:
|
|
737
746
|
|
|
738
747
|
batch_detections = \
|
|
739
|
-
detector.generate_detections_one_batch(valid_images,
|
|
748
|
+
detector.generate_detections_one_batch(valid_images,
|
|
749
|
+
valid_image_filenames,
|
|
750
|
+
verbose=verbose)
|
|
740
751
|
|
|
741
752
|
assert len(batch_detections) == len(valid_images)
|
|
742
753
|
|
|
@@ -1050,7 +1061,8 @@ def load_and_run_detector_batch(model_file,
|
|
|
1050
1061
|
detector_options=None,
|
|
1051
1062
|
loader_workers=default_loaders,
|
|
1052
1063
|
preprocess_on_image_queue=default_preprocess_on_image_queue,
|
|
1053
|
-
batch_size=1
|
|
1064
|
+
batch_size=1,
|
|
1065
|
+
verbose_output=False):
|
|
1054
1066
|
"""
|
|
1055
1067
|
Load a model file and run it on a list of images.
|
|
1056
1068
|
|
|
@@ -1087,6 +1099,7 @@ def load_and_run_detector_batch(model_file,
|
|
|
1087
1099
|
preprocess_on_image_queue (bool, optional): if the image queue is enabled, should it handle
|
|
1088
1100
|
image loading and preprocessing (True), or just image loading (False)?
|
|
1089
1101
|
batch_size (int, optional): batch size for GPU processing, automatically set to 1 for CPU processing
|
|
1102
|
+
verbose_output (bool, optional): enable additional debug output
|
|
1090
1103
|
|
|
1091
1104
|
Returns:
|
|
1092
1105
|
results: list of dicts; each dict represents detections on one image
|
|
@@ -1109,6 +1122,11 @@ def load_and_run_detector_batch(model_file,
|
|
|
1109
1122
|
if class_mapping_filename is not None:
|
|
1110
1123
|
_load_custom_class_mapping(class_mapping_filename)
|
|
1111
1124
|
|
|
1125
|
+
global verbose
|
|
1126
|
+
if verbose_output:
|
|
1127
|
+
print('Enabling verbose output')
|
|
1128
|
+
verbose = True
|
|
1129
|
+
|
|
1112
1130
|
# Handle the case where image_file_names is not yet actually a list
|
|
1113
1131
|
if isinstance(image_file_names,str):
|
|
1114
1132
|
|
|
@@ -1866,11 +1884,7 @@ def main(): # noqa
|
|
|
1866
1884
|
|
|
1867
1885
|
args = parser.parse_args()
|
|
1868
1886
|
|
|
1869
|
-
global verbose
|
|
1870
1887
|
global use_threads_for_queue
|
|
1871
|
-
|
|
1872
|
-
if args.verbose:
|
|
1873
|
-
verbose = True
|
|
1874
1888
|
if args.use_threads_for_queue:
|
|
1875
1889
|
use_threads_for_queue = True
|
|
1876
1890
|
|
|
@@ -2087,7 +2101,8 @@ def main(): # noqa
|
|
|
2087
2101
|
detector_options=detector_options,
|
|
2088
2102
|
loader_workers=args.loader_workers,
|
|
2089
2103
|
preprocess_on_image_queue=args.preprocess_on_image_queue,
|
|
2090
|
-
batch_size=args.batch_size
|
|
2104
|
+
batch_size=args.batch_size,
|
|
2105
|
+
verbose_output=args.verbose)
|
|
2091
2106
|
|
|
2092
2107
|
elapsed = time.time() - start_time
|
|
2093
2108
|
images_per_second = len(results) / elapsed
|
|
@@ -39,7 +39,7 @@ from torchvision import ops
|
|
|
39
39
|
from megadetector.detection.run_inference_with_yolov5_val import \
|
|
40
40
|
YoloInferenceOptions,run_inference_with_yolo_val
|
|
41
41
|
from megadetector.detection.run_detector_batch import \
|
|
42
|
-
load_and_run_detector_batch,write_results_to_file
|
|
42
|
+
load_and_run_detector_batch,write_results_to_file,default_loaders
|
|
43
43
|
from megadetector.detection.run_detector import \
|
|
44
44
|
try_download_known_detector, CONF_DIGITS, COORD_DIGITS
|
|
45
45
|
from megadetector.utils import path_utils
|
|
@@ -406,7 +406,9 @@ def run_tiled_inference(model_file,
|
|
|
406
406
|
detector_options=None,
|
|
407
407
|
use_image_queue=True,
|
|
408
408
|
preprocess_on_image_queue=True,
|
|
409
|
-
|
|
409
|
+
loader_workers=default_loaders,
|
|
410
|
+
inference_size=None,
|
|
411
|
+
verbose=False):
|
|
410
412
|
"""
|
|
411
413
|
Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
|
|
412
414
|
into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
|
|
@@ -451,16 +453,17 @@ def run_tiled_inference(model_file,
|
|
|
451
453
|
image_list (list, optional): .json file containing a list of specific images to process. If
|
|
452
454
|
this is supplied, and the paths are absolute, [image_folder] will be ignored. If this is supplied,
|
|
453
455
|
and the paths are relative, they should be relative to [image_folder]
|
|
454
|
-
augment (bool, optional): apply test-time augmentation
|
|
455
|
-
is None
|
|
456
|
+
augment (bool, optional): apply test-time augmentation
|
|
456
457
|
detector_options (dict, optional): parameters to pass to run_detector, only relevant if
|
|
457
458
|
yolo_inference_options is None
|
|
458
459
|
use_image_queue (bool, optional): whether to use a loader worker queue, only relevant if
|
|
459
460
|
yolo_inference_options is None
|
|
460
461
|
preprocess_on_image_queue (bool, optional): whether the image queue should also be responsible
|
|
461
462
|
for preprocessing
|
|
463
|
+
loader_workers (int, optional): number of preprocessing loader workers to use
|
|
462
464
|
inference_size (int, optional): override the default inference image size, only relevant if
|
|
463
465
|
yolo_inference_options is None
|
|
466
|
+
verbose (bool, optional): enable additional debug output
|
|
464
467
|
|
|
465
468
|
Returns:
|
|
466
469
|
dict: MD-formatted results dictionary, identical to what's written to [output_file]
|
|
@@ -522,7 +525,8 @@ def run_tiled_inference(model_file,
|
|
|
522
525
|
|
|
523
526
|
all_image_patch_info = None
|
|
524
527
|
|
|
525
|
-
print('Extracting patches from {} images'.format(
|
|
528
|
+
print('Extracting patches from {} images on {} workers'.format(
|
|
529
|
+
len(image_files_relative),n_patch_extraction_workers))
|
|
526
530
|
|
|
527
531
|
n_workers = n_patch_extraction_workers
|
|
528
532
|
|
|
@@ -632,7 +636,9 @@ def run_tiled_inference(model_file,
|
|
|
632
636
|
detector_options=detector_options,
|
|
633
637
|
use_image_queue=use_image_queue,
|
|
634
638
|
preprocess_on_image_queue=preprocess_on_image_queue,
|
|
635
|
-
image_size=inference_size
|
|
639
|
+
image_size=inference_size,
|
|
640
|
+
verbose_output=verbose,
|
|
641
|
+
loader_workers=loader_workers)
|
|
636
642
|
|
|
637
643
|
patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
|
|
638
644
|
|
|
@@ -847,12 +853,12 @@ if False:
|
|
|
847
853
|
yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
|
|
848
854
|
|
|
849
855
|
run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
+
tile_size_x=tile_size_x, tile_size_y=tile_size_y,
|
|
857
|
+
tile_overlap=tile_overlap,
|
|
858
|
+
checkpoint_path=checkpoint_path,
|
|
859
|
+
checkpoint_frequency=checkpoint_frequency,
|
|
860
|
+
remove_tiles=remove_tiles,
|
|
861
|
+
yolo_inference_options=yolo_inference_options)
|
|
856
862
|
|
|
857
863
|
|
|
858
864
|
#%% Run tiled inference (generate a command)
|
|
@@ -930,6 +936,14 @@ def main():
|
|
|
930
936
|
'--no_remove_tiles',
|
|
931
937
|
action='store_true',
|
|
932
938
|
help='Tiles are removed by default; this option suppresses tile deletion')
|
|
939
|
+
parser.add_argument(
|
|
940
|
+
'--augment',
|
|
941
|
+
action='store_true',
|
|
942
|
+
help='Enable test-time augmentation')
|
|
943
|
+
parser.add_argument(
|
|
944
|
+
'--verbose',
|
|
945
|
+
action='store_true',
|
|
946
|
+
help='Enable additional debug output')
|
|
933
947
|
parser.add_argument(
|
|
934
948
|
'--tile_size_x',
|
|
935
949
|
type=int,
|
|
@@ -960,6 +974,21 @@ def main():
|
|
|
960
974
|
type=str,
|
|
961
975
|
default=None,
|
|
962
976
|
help=('A list of detector options (key-value pairs)'))
|
|
977
|
+
parser.add_argument(
|
|
978
|
+
'--inference_size',
|
|
979
|
+
type=int,
|
|
980
|
+
default=None,
|
|
981
|
+
help=('Run inference at a non-default size'))
|
|
982
|
+
parser.add_argument(
|
|
983
|
+
'--n_patch_extraction_workers',
|
|
984
|
+
type=int,
|
|
985
|
+
default=1,
|
|
986
|
+
help=('Number of workers to use for patch extraction'))
|
|
987
|
+
parser.add_argument(
|
|
988
|
+
'--loader_workers',
|
|
989
|
+
type=int,
|
|
990
|
+
default=default_loaders,
|
|
991
|
+
help=('Number of workers to use for image loading and preprocessing (0 to disable)'))
|
|
963
992
|
|
|
964
993
|
# detector_options = parse_kvp_list(args.detector_options)
|
|
965
994
|
|
|
@@ -987,11 +1016,23 @@ def main():
|
|
|
987
1016
|
|
|
988
1017
|
remove_tiles = (not args.no_remove_tiles)
|
|
989
1018
|
|
|
990
|
-
|
|
991
|
-
|
|
1019
|
+
use_image_queue = (args.loader_workers > 0)
|
|
1020
|
+
|
|
1021
|
+
run_tiled_inference(model_file,
|
|
1022
|
+
args.image_folder,
|
|
1023
|
+
args.tiling_folder,
|
|
1024
|
+
args.output_file,
|
|
1025
|
+
tile_size_x=args.tile_size_x,
|
|
1026
|
+
tile_size_y=args.tile_size_y,
|
|
992
1027
|
tile_overlap=args.tile_overlap,
|
|
993
1028
|
remove_tiles=remove_tiles,
|
|
994
|
-
image_list=args.image_list
|
|
1029
|
+
image_list=args.image_list,
|
|
1030
|
+
augment=args.augment,
|
|
1031
|
+
inference_size=args.inference_size,
|
|
1032
|
+
verbose=args.verbose,
|
|
1033
|
+
n_patch_extraction_workers=args.n_patch_extraction_workers,
|
|
1034
|
+
loader_workers=args.loader_workers,
|
|
1035
|
+
use_image_queue=use_image_queue)
|
|
995
1036
|
|
|
996
1037
|
if __name__ == '__main__':
|
|
997
1038
|
main()
|
{megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/compare_batch_results.py
RENAMED
|
@@ -136,7 +136,7 @@ class BatchComparisonOptions:
|
|
|
136
136
|
#: Colormap to use for detections in file B (maps detection categories to colors)
|
|
137
137
|
self.colormap_b = ['RoyalBlue']
|
|
138
138
|
|
|
139
|
-
#:
|
|
139
|
+
#: Whether to render images with threads (True) or processes (False)
|
|
140
140
|
self.parallelize_rendering_with_threads = True
|
|
141
141
|
|
|
142
142
|
#: List of filenames to include in the comparison, or None to use all files
|
|
@@ -152,7 +152,7 @@ class BatchComparisonOptions:
|
|
|
152
152
|
self.target_width = 800
|
|
153
153
|
|
|
154
154
|
#: Number of workers to use for rendering, or <=1 to disable parallelization
|
|
155
|
-
self.n_rendering_workers =
|
|
155
|
+
self.n_rendering_workers = 10
|
|
156
156
|
|
|
157
157
|
#: Random seed for image sampling (not used if max_images_per_category is None)
|
|
158
158
|
self.random_seed = 0
|
|
@@ -183,7 +183,7 @@ class BatchComparisonOptions:
|
|
|
183
183
|
#: Should we show category names (instead of numbers) on detected boxes?
|
|
184
184
|
self.show_category_names_on_detected_boxes = True
|
|
185
185
|
|
|
186
|
-
#: List of PairwiseBatchComparisonOptions that defines the comparisons we'll render
|
|
186
|
+
#: List of PairwiseBatchComparisonOptions that defines the comparisons we'll render
|
|
187
187
|
self.pairwise_options = []
|
|
188
188
|
|
|
189
189
|
#: Only process images whose file names contain this token
|
|
@@ -197,7 +197,7 @@ class BatchComparisonOptions:
|
|
|
197
197
|
self.verbose = False
|
|
198
198
|
|
|
199
199
|
#: Separate out the "clean TP" and "clean TN" categories, only relevant when GT is
|
|
200
|
-
#: available
|
|
200
|
+
#: available
|
|
201
201
|
self.include_clean_categories = True
|
|
202
202
|
|
|
203
203
|
#: When rendering to the output table, optionally write alternative strings
|
|
@@ -211,6 +211,10 @@ class BatchComparisonOptions:
|
|
|
211
211
|
#: Should we include a TOC? TOC is always omitted if <=2 comparisons are performed.
|
|
212
212
|
self.include_toc = True
|
|
213
213
|
|
|
214
|
+
#: Should we return the mapping from categories (e.g. "common detections") to image
|
|
215
|
+
#: pairs? Makes the return dict much larger, but allows post-hoc exploration.
|
|
216
|
+
self.return_images_by_category = False
|
|
217
|
+
|
|
214
218
|
# ...class BatchComparisonOptions
|
|
215
219
|
|
|
216
220
|
|
|
@@ -224,7 +228,7 @@ class PairwiseBatchComparisonResults:
|
|
|
224
228
|
#: String of HTML content suitable for rendering to an HTML file
|
|
225
229
|
self.html_content = None
|
|
226
230
|
|
|
227
|
-
#: Possibly-modified version of the PairwiseBatchComparisonOptions supplied as input
|
|
231
|
+
#: Possibly-modified version of the PairwiseBatchComparisonOptions supplied as input
|
|
228
232
|
self.pairwise_options = None
|
|
229
233
|
|
|
230
234
|
#: A dictionary with keys representing category names; in the no-ground-truth case, for example,
|
|
@@ -295,7 +299,8 @@ def _render_image_pair(fn,image_pairs,category_folder,options,pairwise_options):
|
|
|
295
299
|
"""
|
|
296
300
|
|
|
297
301
|
input_image_path = os.path.join(options.image_folder,fn)
|
|
298
|
-
assert os.path.isfile(input_image_path),
|
|
302
|
+
assert os.path.isfile(input_image_path), \
|
|
303
|
+
'Image {} does not exist'.format(input_image_path)
|
|
299
304
|
|
|
300
305
|
im = visualization_utils.open_image(input_image_path)
|
|
301
306
|
image_pair = image_pairs[fn]
|
|
@@ -628,11 +633,21 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
628
633
|
os.makedirs(options.output_folder,exist_ok=True)
|
|
629
634
|
|
|
630
635
|
|
|
636
|
+
# Just in case the user provided a single category instead of a list
|
|
637
|
+
# for category_names_to_include
|
|
638
|
+
if options.category_names_to_include is not None:
|
|
639
|
+
if isinstance(options.category_names_to_include,str):
|
|
640
|
+
options.category_names_to_include = [options.category_names_to_include]
|
|
641
|
+
|
|
631
642
|
##%% Load both result sets
|
|
632
643
|
|
|
644
|
+
if options.verbose:
|
|
645
|
+
print('Loading {}'.format(pairwise_options.results_filename_a))
|
|
633
646
|
with open(pairwise_options.results_filename_a,'r') as f:
|
|
634
647
|
results_a = json.load(f)
|
|
635
648
|
|
|
649
|
+
if options.verbose:
|
|
650
|
+
print('Loading {}'.format(pairwise_options.results_filename_b))
|
|
636
651
|
with open(pairwise_options.results_filename_b,'r') as f:
|
|
637
652
|
results_b = json.load(f)
|
|
638
653
|
|
|
@@ -654,6 +669,17 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
654
669
|
detection_category_name_to_id = invert_dictionary(detection_categories_a)
|
|
655
670
|
options.detection_category_id_to_name = detection_category_id_to_name
|
|
656
671
|
|
|
672
|
+
category_name_to_id_a = invert_dictionary(detection_categories_a)
|
|
673
|
+
category_name_to_id_b = invert_dictionary(detection_categories_b)
|
|
674
|
+
category_ids_to_include_a = []
|
|
675
|
+
category_ids_to_include_b = []
|
|
676
|
+
|
|
677
|
+
for category_name in options.category_names_to_include:
|
|
678
|
+
if category_name in category_name_to_id_a:
|
|
679
|
+
category_ids_to_include_a.append(category_name_to_id_a[category_name])
|
|
680
|
+
if category_name in category_name_to_id_b:
|
|
681
|
+
category_ids_to_include_b.append(category_name_to_id_b[category_name])
|
|
682
|
+
|
|
657
683
|
if pairwise_options.results_description_a is None:
|
|
658
684
|
if 'detector' not in results_a['info']:
|
|
659
685
|
print('No model metadata supplied for results-A, assuming MDv4')
|
|
@@ -679,7 +705,7 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
679
705
|
filename_to_image_b = {im['file']:im for im in images_b}
|
|
680
706
|
|
|
681
707
|
|
|
682
|
-
##%% Make sure
|
|
708
|
+
##%% Make sure the two result sets represent the same set of images
|
|
683
709
|
|
|
684
710
|
filenames_a = [im['file'] for im in images_a]
|
|
685
711
|
filenames_b_set = set([im['file'] for im in images_b])
|
|
@@ -914,7 +940,8 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
914
940
|
pairwise_options.detection_thresholds_b['default']
|
|
915
941
|
|
|
916
942
|
# fn = filenames_to_compare[0]
|
|
917
|
-
for i_file,fn in tqdm(enumerate(filenames_to_compare),
|
|
943
|
+
for i_file,fn in tqdm(enumerate(filenames_to_compare),
|
|
944
|
+
total=len(filenames_to_compare)):
|
|
918
945
|
|
|
919
946
|
if fn not in filename_to_image_b:
|
|
920
947
|
|
|
@@ -1000,27 +1027,11 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
1000
1027
|
categories_above_threshold_b.add(category_id)
|
|
1001
1028
|
|
|
1002
1029
|
if invalid_category_error:
|
|
1003
|
-
|
|
1004
1030
|
continue
|
|
1005
1031
|
|
|
1006
1032
|
# Should we be restricting the comparison to only certain categories?
|
|
1007
1033
|
if options.category_names_to_include is not None:
|
|
1008
1034
|
|
|
1009
|
-
# Just in case the user provided a single category instead of a list
|
|
1010
|
-
if isinstance(options.category_names_to_include,str):
|
|
1011
|
-
options.category_names_to_include = [options.category_names_to_include]
|
|
1012
|
-
|
|
1013
|
-
category_name_to_id_a = invert_dictionary(detection_categories_a)
|
|
1014
|
-
category_name_to_id_b = invert_dictionary(detection_categories_b)
|
|
1015
|
-
category_ids_to_include_a = []
|
|
1016
|
-
category_ids_to_include_b = []
|
|
1017
|
-
|
|
1018
|
-
for category_name in options.category_names_to_include:
|
|
1019
|
-
if category_name in category_name_to_id_a:
|
|
1020
|
-
category_ids_to_include_a.append(category_name_to_id_a[category_name])
|
|
1021
|
-
if category_name in category_name_to_id_b:
|
|
1022
|
-
category_ids_to_include_b.append(category_name_to_id_b[category_name])
|
|
1023
|
-
|
|
1024
1035
|
# Restrict the categories we treat as above-threshold to the set we're supposed
|
|
1025
1036
|
# to be using
|
|
1026
1037
|
categories_above_threshold_a = [category_id for category_id in categories_above_threshold_a if \
|
|
@@ -1287,7 +1298,7 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
1287
1298
|
max_conf_b = _maxempty([det['conf'] for det in im_b['detections']])
|
|
1288
1299
|
sort_conf = max(max_conf_a,max_conf_b)
|
|
1289
1300
|
|
|
1290
|
-
|
|
1301
|
+
# ...what kind of ground truth (if any) do we have?
|
|
1291
1302
|
|
|
1292
1303
|
assert comparison_category is not None
|
|
1293
1304
|
categories_to_image_pairs[comparison_category][fn] = im_pair
|
|
@@ -1313,7 +1324,11 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
1313
1324
|
local_output_folder = os.path.join(options.output_folder,'cmp_' + \
|
|
1314
1325
|
str(output_index).zfill(3))
|
|
1315
1326
|
|
|
1316
|
-
def
|
|
1327
|
+
def _render_detection_comparisons(category,image_pairs,image_filenames):
|
|
1328
|
+
"""
|
|
1329
|
+
Render all the detection results pairs for the sampled images in a
|
|
1330
|
+
particular category (e.g. all the "common detections").
|
|
1331
|
+
"""
|
|
1317
1332
|
|
|
1318
1333
|
print('Rendering detections for category {}'.format(category))
|
|
1319
1334
|
|
|
@@ -1336,7 +1351,7 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
1336
1351
|
|
|
1337
1352
|
return output_image_paths
|
|
1338
1353
|
|
|
1339
|
-
# ...def
|
|
1354
|
+
# ...def _render_detection_comparisons()
|
|
1340
1355
|
|
|
1341
1356
|
if len(options.colormap_a) > 1:
|
|
1342
1357
|
color_string_a = str(options.colormap_a)
|
|
@@ -1371,7 +1386,7 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
1371
1386
|
|
|
1372
1387
|
input_image_absolute_paths = [os.path.join(options.image_folder,fn) for fn in image_filenames]
|
|
1373
1388
|
|
|
1374
|
-
category_image_output_paths =
|
|
1389
|
+
category_image_output_paths = _render_detection_comparisons(category,
|
|
1375
1390
|
image_pairs,image_filenames)
|
|
1376
1391
|
|
|
1377
1392
|
category_html_filename = os.path.join(local_output_folder,
|
|
@@ -1469,6 +1484,8 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
|
|
|
1469
1484
|
print("Pool closed and joined for comparison rendering")
|
|
1470
1485
|
except Exception:
|
|
1471
1486
|
pass
|
|
1487
|
+
|
|
1488
|
+
|
|
1472
1489
|
##%% Write the top-level HTML file content
|
|
1473
1490
|
|
|
1474
1491
|
html_output_string = ''
|
|
@@ -1591,8 +1608,11 @@ def compare_batch_results(options):
|
|
|
1591
1608
|
for i_comparison,pairwise_options in enumerate(pairwise_options_list):
|
|
1592
1609
|
|
|
1593
1610
|
print('Running comparison {} of {}'.format(i_comparison,n_comparisons))
|
|
1611
|
+
pairwise_options.verbose = options.verbose
|
|
1594
1612
|
pairwise_results = \
|
|
1595
1613
|
_pairwise_compare_batch_results(options,i_comparison,pairwise_options)
|
|
1614
|
+
if not options.return_images_by_category:
|
|
1615
|
+
pairwise_results.categories_to_image_pairs = None
|
|
1596
1616
|
html_content += pairwise_results.html_content
|
|
1597
1617
|
all_pairwise_results.append(pairwise_results)
|
|
1598
1618
|
|
{megadetector-10.0.8 → megadetector-10.0.9}/megadetector/postprocessing/postprocess_batch_results.py
RENAMED
|
@@ -1145,7 +1145,7 @@ def process_batch_results(options):
|
|
|
1145
1145
|
|
|
1146
1146
|
images_to_visualize = detections_df
|
|
1147
1147
|
|
|
1148
|
-
if options.num_images_to_sample is not None and options.num_images_to_sample > 0:
|
|
1148
|
+
if (options.num_images_to_sample is not None) and (options.num_images_to_sample > 0):
|
|
1149
1149
|
images_to_visualize = images_to_visualize.sample(
|
|
1150
1150
|
n=min(options.num_images_to_sample, len(images_to_visualize)),
|
|
1151
1151
|
random_state=options.sample_seed)
|
|
@@ -156,6 +156,12 @@ class SubsetJsonDetectorOutputOptions:
|
|
|
156
156
|
#: to be contiguous. Set to 1 to remove empty categories only.
|
|
157
157
|
self.remove_classification_categories_below_count = None
|
|
158
158
|
|
|
159
|
+
#: Remove detections above a threshold size (as a fraction of the image size)
|
|
160
|
+
self.maximum_detection_size = None
|
|
161
|
+
|
|
162
|
+
#: Remove detections below a threshold size (as a fraction of the image size)
|
|
163
|
+
self.minimum_detection_size = None
|
|
164
|
+
|
|
159
165
|
# ...class SubsetJsonDetectorOutputOptions
|
|
160
166
|
|
|
161
167
|
|
|
@@ -274,6 +280,71 @@ def remove_classification_categories_below_count(data, options):
|
|
|
274
280
|
# ...def remove_classification_categories_below_count(...)
|
|
275
281
|
|
|
276
282
|
|
|
283
|
+
def subset_json_detector_output_by_size(data, options):
|
|
284
|
+
"""
|
|
285
|
+
Remove detections above or below threshold sizes (as a fraction
|
|
286
|
+
of the image size).
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
data (dict): data loaded from a MD results file
|
|
290
|
+
options (SubsetJsonDetectorOutputOptions): parameters for subsetting
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
dict: Possibly-modified version of [data] (also modifies in place)
|
|
294
|
+
"""
|
|
295
|
+
|
|
296
|
+
if (options.maximum_detection_size is None) and \
|
|
297
|
+
(options.minimum_detection_size is None):
|
|
298
|
+
return data
|
|
299
|
+
|
|
300
|
+
if options.maximum_detection_size is None:
|
|
301
|
+
options.maximum_detection_size = 1000
|
|
302
|
+
|
|
303
|
+
if options.minimum_detection_size is None:
|
|
304
|
+
options.minimum_detection_size = -1000
|
|
305
|
+
|
|
306
|
+
print('Subsetting by size ({} <--> {})'.format(
|
|
307
|
+
options.minimum_detection_size,
|
|
308
|
+
options.maximum_detection_size))
|
|
309
|
+
|
|
310
|
+
images_in = data['images']
|
|
311
|
+
images_out = []
|
|
312
|
+
|
|
313
|
+
# im = images_in[0]
|
|
314
|
+
for i_image, im in tqdm(enumerate(images_in), total=len(images_in)):
|
|
315
|
+
|
|
316
|
+
# Always keep failed images; if the caller wants to remove these, they
|
|
317
|
+
# will use remove_failed_images
|
|
318
|
+
if ('detections' not in im) or (im['detections'] is None):
|
|
319
|
+
images_out.append(im)
|
|
320
|
+
continue
|
|
321
|
+
|
|
322
|
+
detections_to_keep = []
|
|
323
|
+
|
|
324
|
+
for det in im['detections']:
|
|
325
|
+
|
|
326
|
+
# [x_min, y_min, width_of_box, height_of_box]
|
|
327
|
+
detection_size = det['bbox'][2] * det['bbox'][3]
|
|
328
|
+
|
|
329
|
+
if (detection_size >= options.minimum_detection_size) and \
|
|
330
|
+
(detection_size <= options.maximum_detection_size):
|
|
331
|
+
detections_to_keep.append(det)
|
|
332
|
+
|
|
333
|
+
im['detections'] = detections_to_keep
|
|
334
|
+
|
|
335
|
+
images_out.append(im)
|
|
336
|
+
|
|
337
|
+
# ...for each image
|
|
338
|
+
|
|
339
|
+
data['images'] = images_out
|
|
340
|
+
print('done, found {} matches (of {})'.format(
|
|
341
|
+
len(data['images']),len(images_in)))
|
|
342
|
+
|
|
343
|
+
return data
|
|
344
|
+
|
|
345
|
+
# ...def subset_json_detector_output_by_size(...)
|
|
346
|
+
|
|
347
|
+
|
|
277
348
|
def subset_json_detector_output_by_confidence(data, options):
|
|
278
349
|
"""
|
|
279
350
|
Removes all detections below options.confidence_threshold.
|
|
@@ -674,6 +745,11 @@ def subset_json_detector_output(input_filename, output_filename, options, data=N
|
|
|
674
745
|
|
|
675
746
|
data = subset_json_detector_output_by_list(data, options)
|
|
676
747
|
|
|
748
|
+
if (options.maximum_detection_size is not None) or \
|
|
749
|
+
(options.minimum_detection_size is not None):
|
|
750
|
+
|
|
751
|
+
data = subset_json_detector_output_by_size(data, options)
|
|
752
|
+
|
|
677
753
|
if not options.split_folders:
|
|
678
754
|
|
|
679
755
|
_write_detection_results(data, output_filename, options)
|
|
@@ -837,6 +913,10 @@ def main(): # noqa
|
|
|
837
913
|
help='Replace [query] with this')
|
|
838
914
|
parser.add_argument('--confidence_threshold', type=float, default=None,
|
|
839
915
|
help='Remove detections below this confidence level')
|
|
916
|
+
parser.add_argument('--maximum_detection_size', type=float, default=None,
|
|
917
|
+
help='Remove detections above this size (as a fraction of the image size)')
|
|
918
|
+
parser.add_argument('--minimum_detection_size', type=float, default=None,
|
|
919
|
+
help='Remove detections below this size (as a fraction of the image size)')
|
|
840
920
|
parser.add_argument('--keep_files_in_list', type=str, default=None,
|
|
841
921
|
help='Keep only files in this list, which can be a .json results file or a folder.' + \
|
|
842
922
|
' Assumes that the input .json file contains relative paths when comparing to a folder.')
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: megadetector
|
|
3
|
-
Version: 10.0.
|
|
3
|
+
Version: 10.0.9
|
|
4
4
|
Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
|
|
5
5
|
Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
6
6
|
Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
@@ -5,7 +5,7 @@ requires = [
|
|
|
5
5
|
|
|
6
6
|
[project]
|
|
7
7
|
name = "megadetector"
|
|
8
|
-
version = "10.0.
|
|
8
|
+
version = "10.0.9"
|
|
9
9
|
description = "MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images."
|
|
10
10
|
readme = "README-package.md"
|
|
11
11
|
requires-python = ">=3.9,<3.14"
|
|
@@ -93,7 +93,6 @@ dependencies = [
|
|
|
93
93
|
include = ["megadetector*"]
|
|
94
94
|
|
|
95
95
|
[tool.ruff]
|
|
96
|
-
ignore = ["D212"]
|
|
97
96
|
line-length = 120
|
|
98
97
|
target-version = "py39"
|
|
99
98
|
include = ["megadetector/**/*.py"]
|
|
@@ -108,6 +107,7 @@ exclude = ["megadetector/api/batch_processing/api_core/**/*.py",
|
|
|
108
107
|
"megadetector/taxonomy_mapping/**/*.py"]
|
|
109
108
|
|
|
110
109
|
[tool.ruff.lint]
|
|
110
|
+
ignore = ["D212"]
|
|
111
111
|
select = [
|
|
112
112
|
"E", # Pycodestyle errors
|
|
113
113
|
"W", # Pycodestyle warnings
|
|
@@ -122,7 +122,7 @@ select = [
|
|
|
122
122
|
[tool.ruff.lint.pydocstyle]
|
|
123
123
|
convention = "google"
|
|
124
124
|
|
|
125
|
-
[tool.ruff.per-file-ignores]
|
|
125
|
+
[tool.ruff.lint.per-file-ignores]
|
|
126
126
|
"**/*.py" = [
|
|
127
127
|
# Comment/whitespace conventions that differ from ruff's defaults
|
|
128
128
|
"D415", # First line should end with a period, question mark, or exclamation point
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|