megadetector 10.0.7__tar.gz → 10.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (153) hide show
  1. {megadetector-10.0.7/megadetector.egg-info → megadetector-10.0.9}/PKG-INFO +1 -1
  2. {megadetector-10.0.7 → megadetector-10.0.9}/README.md +2 -2
  3. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/cct_json_utils.py +16 -6
  4. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/databases/subset_json_db.py +57 -2
  5. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/pytorch_detector.py +32 -15
  6. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/run_detector.py +1 -2
  7. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/run_detector_batch.py +30 -15
  8. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/run_inference_with_yolov5_val.py +3 -1
  9. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/run_tiled_inference.py +61 -17
  10. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/video_utils.py +23 -7
  11. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/classification_postprocessing.py +5 -1
  12. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/compare_batch_results.py +48 -28
  13. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/convert_output_format.py +81 -87
  14. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/postprocess_batch_results.py +1 -1
  15. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/subset_json_detector_output.py +83 -0
  16. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/directory_listing.py +19 -13
  17. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/path_utils.py +58 -8
  18. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/url_utils.py +91 -1
  19. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/wi_taxonomy_utils.py +26 -26
  20. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/visualize_video_output.py +16 -6
  21. {megadetector-10.0.7 → megadetector-10.0.9/megadetector.egg-info}/PKG-INFO +1 -1
  22. {megadetector-10.0.7 → megadetector-10.0.9}/pyproject.toml +4 -4
  23. {megadetector-10.0.7 → megadetector-10.0.9}/LICENSE +0 -0
  24. {megadetector-10.0.7 → megadetector-10.0.9}/README-package.md +0 -0
  25. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/__init__.py +0 -0
  26. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/api/__init__.py +0 -0
  27. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/digiKam/setup.py +0 -0
  28. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +0 -0
  29. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -0
  30. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -0
  31. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -0
  32. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/__init__.py +0 -0
  33. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/aggregate_classifier_probs.py +0 -0
  34. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/analyze_failed_images.py +0 -0
  35. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/cache_batchapi_outputs.py +0 -0
  36. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/create_classification_dataset.py +0 -0
  37. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/crop_detections.py +0 -0
  38. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/csv_to_json.py +0 -0
  39. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/detect_and_crop.py +0 -0
  40. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/efficientnet/__init__.py +0 -0
  41. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/efficientnet/model.py +0 -0
  42. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/efficientnet/utils.py +0 -0
  43. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/evaluate_model.py +0 -0
  44. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/identify_mislabeled_candidates.py +0 -0
  45. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/json_to_azcopy_list.py +0 -0
  46. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/json_validator.py +0 -0
  47. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/map_classification_categories.py +0 -0
  48. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/merge_classification_detection_output.py +0 -0
  49. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/prepare_classification_script.py +0 -0
  50. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/prepare_classification_script_mc.py +0 -0
  51. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/run_classifier.py +0 -0
  52. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/save_mislabeled.py +0 -0
  53. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/train_classifier.py +0 -0
  54. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/train_classifier_tf.py +0 -0
  55. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/classification/train_utils.py +0 -0
  56. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/__init__.py +0 -0
  57. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/animl_to_md.py +0 -0
  58. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/annotations/__init__.py +0 -0
  59. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/annotations/annotation_constants.py +0 -0
  60. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/camtrap_dp_to_coco.py +0 -0
  61. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/cct_to_md.py +0 -0
  62. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/cct_to_wi.py +0 -0
  63. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/coco_to_labelme.py +0 -0
  64. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/coco_to_yolo.py +0 -0
  65. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/databases/__init__.py +0 -0
  66. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/databases/add_width_and_height_to_db.py +0 -0
  67. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/databases/combine_coco_camera_traps_files.py +0 -0
  68. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/databases/integrity_check_json_db.py +0 -0
  69. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/generate_crops_from_cct.py +0 -0
  70. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/get_image_sizes.py +0 -0
  71. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/labelme_to_coco.py +0 -0
  72. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/labelme_to_yolo.py +0 -0
  73. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/__init__.py +0 -0
  74. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/create_lila_blank_set.py +0 -0
  75. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/create_lila_test_set.py +0 -0
  76. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/create_links_to_md_results_files.py +0 -0
  77. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/download_lila_subset.py +0 -0
  78. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/generate_lila_per_image_labels.py +0 -0
  79. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/get_lila_annotation_counts.py +0 -0
  80. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/get_lila_image_counts.py +0 -0
  81. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/lila_common.py +0 -0
  82. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/lila/test_lila_metadata_urls.py +0 -0
  83. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/mewc_to_md.py +0 -0
  84. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/ocr_tools.py +0 -0
  85. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/read_exif.py +0 -0
  86. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/remap_coco_categories.py +0 -0
  87. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/remove_exif.py +0 -0
  88. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/rename_images.py +0 -0
  89. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/resize_coco_dataset.py +0 -0
  90. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/speciesnet_to_md.py +0 -0
  91. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/wi_download_csv_to_coco.py +0 -0
  92. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/yolo_output_to_md_output.py +0 -0
  93. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/yolo_to_coco.py +0 -0
  94. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/data_management/zamba_to_md.py +0 -0
  95. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/__init__.py +0 -0
  96. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/change_detection.py +0 -0
  97. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/process_video.py +0 -0
  98. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/run_md_and_speciesnet.py +0 -0
  99. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/detection/tf_detector.py +0 -0
  100. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/__init__.py +0 -0
  101. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/add_max_conf.py +0 -0
  102. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/categorize_detections_by_size.py +0 -0
  103. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/combine_batch_outputs.py +0 -0
  104. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/create_crop_folder.py +0 -0
  105. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/detector_calibration.py +0 -0
  106. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/generate_csv_report.py +0 -0
  107. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/load_api_results.py +0 -0
  108. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/md_to_coco.py +0 -0
  109. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/md_to_labelme.py +0 -0
  110. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/md_to_wi.py +0 -0
  111. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/merge_detections.py +0 -0
  112. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/remap_detection_categories.py +0 -0
  113. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/render_detection_confusion_matrix.py +0 -0
  114. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -0
  115. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -0
  116. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -0
  117. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/separate_detections_into_folders.py +0 -0
  118. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/top_folders_to_bottom.py +0 -0
  119. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/postprocessing/validate_batch_results.py +0 -0
  120. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/__init__.py +0 -0
  121. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -0
  122. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/map_new_lila_datasets.py +0 -0
  123. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -0
  124. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/preview_lila_taxonomy.py +0 -0
  125. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/retrieve_sample_image.py +0 -0
  126. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/simple_image_download.py +0 -0
  127. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/species_lookup.py +0 -0
  128. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/taxonomy_csv_checker.py +0 -0
  129. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/taxonomy_graph.py +0 -0
  130. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/taxonomy_mapping/validate_lila_category_mappings.py +0 -0
  131. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/tests/__init__.py +0 -0
  132. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/tests/test_nms_synthetic.py +0 -0
  133. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/__init__.py +0 -0
  134. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/ct_utils.py +0 -0
  135. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/extract_frames_from_video.py +0 -0
  136. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/gpu_test.py +0 -0
  137. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/md_tests.py +0 -0
  138. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/process_utils.py +0 -0
  139. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/split_locations_into_train_val.py +0 -0
  140. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/string_utils.py +0 -0
  141. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/wi_platform_utils.py +0 -0
  142. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/utils/write_html_image_list.py +0 -0
  143. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/__init__.py +0 -0
  144. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/plot_utils.py +0 -0
  145. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/render_images_with_thumbnails.py +0 -0
  146. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/visualization_utils.py +0 -0
  147. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/visualize_db.py +0 -0
  148. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector/visualization/visualize_detector_output.py +0 -0
  149. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector.egg-info/SOURCES.txt +0 -0
  150. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector.egg-info/dependency_links.txt +0 -0
  151. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector.egg-info/requires.txt +0 -0
  152. {megadetector-10.0.7 → megadetector-10.0.9}/megadetector.egg-info/top_level.txt +0 -0
  153. {megadetector-10.0.7 → megadetector-10.0.9}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: megadetector
3
- Version: 10.0.7
3
+ Version: 10.0.9
4
4
  Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
5
5
  Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
6
6
  Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
@@ -112,7 +112,6 @@ Here are a few of the organizations that have used MegaDetector... we're only li
112
112
  * [TROPECOLNET project](https://www.anabenitezlopez.com/research/global-change-biology/tropecolnet/), Museo Nacional de Ciencias Naturales
113
113
  * [Wildlife Coexistence Lab](https://wildlife.forestry.ubc.ca/), University of British Columbia
114
114
  * [Wildlife Research](https://www.dfw.state.or.us/wildlife/research/index.asp), Oregon Department of Fish and Wildlife
115
- * [Wildlife Division](https://www.michigan.gov/dnr/about/contact/wildlife), Michigan Department of Natural Resources
116
115
  * [Kohl Wildlife Lab](https://kohlwildlifelab.com/), University of Georgia
117
116
  * [SPEC Lab](https://thespeclab.weebly.com/) and [Cherry Lab](https://www.ckwri.tamuk.edu/about/michael-j-cherry-phd), Caesar Kleberg Wildlife Research Institute, Texas A&M Kingsville
118
117
  * Ecology and Conservation of Amazonian Vertebrates Research Group, Federal University of Amapá
@@ -148,10 +147,11 @@ Here are a few of the organizations that have used MegaDetector... we're only li
148
147
  * [The Nature Conservancy in California](https://www.nature.org/en-us/about-us/where-we-work/united-states/california/) ([Animl platform](https://github.com/tnc-ca-geo/animl-frontend)) ([story](https://www.vision-systems.com/non-factory/environment-agriculture/article/14304433/the-nature-conservancy-brings-cameras-ai-to-invasive-species-prevention))
149
148
  * [San Diego Zoo Wildlife Alliance](https://science.sandiegozoo.org/) ([Animl R package](https://github.com/conservationtechlab/animl))
150
149
  * [TerrOïko](https://www.terroiko.fr/) ([OCAPI platform](https://www.terroiko.fr/ocapi))
150
+ * [Wildlife Division](https://www.michigan.gov/dnr/about/contact/wildlife), Michigan Department of Natural Resources ([blog post](https://www.michigan.gov/dnr/about/newsroom/releases/2025/08/18/dnr-researchers-to-test-trail-cameras-in-elk-survey))
151
151
 
152
152
  Also see:
153
153
 
154
- * The [list of MD-related GUIs, platforms, and GitHub repos](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md#is-there-a-gui) within the MegaDetector User Guide... although you can never have too many lists, so here they are in a concise comma-separated list: [Wildlife Insights](https://wildlifeinsights.org/), [Animal Detect](https://www.animaldetect.com), [TrapTagger](https://wildeyeconservation.org/trap-tagger-about/), [WildTrax](https://www.wildtrax.ca/), [Agouti](https://agouti.eu/), [Trapper](https://trapper-project.readthedocs.io/en/latest/overview.html), [Camelot](https://camelotproject.org/), [WildePod](https://wildepod.org/), [wpsWatch](https://wildlabs.net/inventory/products/wpswatch), [TNC Animl](https://animl.camera/) ([code](https://github.com/tnc-ca-geo/animl-frontend)), [Wildlife Observer Network](https://roadecology.ucdavis.edu/research/projects/wildlife-observer-network), [Zooniverse ML Subject Assistant](https://subject-assistant.zooniverse.org/#/intro), [Dudek AI Image Toolkit](https://ait.dudek.com), [Zamba Cloud](https://github.com/drivendataorg/zamba), [OCAPI](https://www.terroiko.fr/ocapi/).
154
+ * The [list of MD-related GUIs, platforms, and GitHub repos](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md#is-there-a-gui) within the MegaDetector User Guide... although you can never have too many lists, so here they are in a concise comma-separated list: [Wildlife Insights](https://wildlifeinsights.org/), [Animal Detect](https://www.animaldetect.com), [TrapTagger](https://wildeyeconservation.org/trap-tagger-about/), [WildTrax](https://www.wildtrax.ca/), [Agouti](https://agouti.eu/), [Trapper](https://trapper-project.readthedocs.io/en/latest/overview.html), [Camelot](https://camelotproject.org/), [WildePod](https://wildepod.org/), [wpsWatch](https://wildlabs.net/inventory/products/wpswatch), [TNC Animl](https://animl.camera/) ([code](https://github.com/tnc-ca-geo/animl-frontend)), [Wildlife Observer Network](https://roadecology.ucdavis.edu/research/projects/wildlife-observer-network), [Zooniverse ML Subject Assistant](https://subject-assistant.zooniverse.org/#/intro), [Dudek AI Image Toolkit](https://ait.dudek.com), [Zamba Cloud](https://github.com/drivendataorg/zamba), [OCAPI](https://www.terroiko.fr/ocapi/), [BoquilaHUB](https://boquila.org/hub)
155
155
 
156
156
  * [Peter's map of AddaxAI (formerly EcoAssist) users](https://addaxdatascience.com/addaxai/) (who are also MegaDetector users!)
157
157
 
@@ -305,6 +305,9 @@ class SequenceOptions:
305
305
  #: How to handle invalid datetimes: 'error' or 'none'
306
306
  self.datetime_conversion_failure_behavior = 'none'
307
307
 
308
+ #: Enable additional debug output
309
+ self.verbose = False
310
+
308
311
 
309
312
  #%% Functions
310
313
 
@@ -331,7 +334,9 @@ def write_object_with_serialized_datetimes(d,json_fn):
331
334
  json.dump(d,f,indent=1,default=json_serialize_datetime)
332
335
 
333
336
 
334
- def parse_datetimes_from_cct_image_list(images,conversion_failure_behavior='error'):
337
+ def parse_datetimes_from_cct_image_list(images,
338
+ conversion_failure_behavior='error',
339
+ verbose=False):
335
340
  """
336
341
  Given the "images" field from a COCO camera traps dictionary, converts all
337
342
  string-formatted datetime fields to Python datetimes, making reasonable assumptions
@@ -342,6 +347,7 @@ def parse_datetimes_from_cct_image_list(images,conversion_failure_behavior='erro
342
347
  conversion_failure_behavior (str, optional): determines what happens on a failed
343
348
  conversion; can be "error" (raise an error), "str" (leave as a string), or
344
349
  "none" (convert to None)
350
+ verbose (bool, optional): enable additional debug output
345
351
 
346
352
  Returns:
347
353
  images: the input list, with datetimes converted (after modifying in place)
@@ -359,14 +365,17 @@ def parse_datetimes_from_cct_image_list(images,conversion_failure_behavior='erro
359
365
  dt = dateutil.parser.parse(im['datetime'])
360
366
  im['datetime'] = dt
361
367
  except Exception as e:
362
- s = 'could not parse datetime {}: {}'.format(str(im['datetime']),str(e))
368
+ s = 'could not parse datetime {} from {}: {}'.format(
369
+ str(im['datetime']),im['file_name'],str(e))
363
370
  if conversion_failure_behavior == 'error':
364
371
  raise ValueError(s)
365
372
  elif conversion_failure_behavior == 'str':
366
- print('Warning: {}'.format(s))
373
+ if verbose:
374
+ print('Warning: {}'.format(s))
367
375
  pass
368
376
  elif conversion_failure_behavior == 'none':
369
- print('Warning: {}'.format(s))
377
+ if verbose:
378
+ print('Warning: {}'.format(s))
370
379
  im['datetime'] = None
371
380
 
372
381
  # ...for each image
@@ -450,7 +459,8 @@ def create_sequences(image_info,options=None):
450
459
 
451
460
  # Modifies the images in place
452
461
  _ = parse_datetimes_from_cct_image_list(image_info,
453
- conversion_failure_behavior=options.datetime_conversion_failure_behavior)
462
+ conversion_failure_behavior=options.datetime_conversion_failure_behavior,
463
+ verbose=options.verbose)
454
464
 
455
465
  n_invalid_datetimes = 0
456
466
  for im in image_info:
@@ -505,7 +515,7 @@ def create_sequences(image_info,options=None):
505
515
  delta = (im['datetime'] - previous_datetime).total_seconds()
506
516
 
507
517
  # Start a new sequence if necessary, including the case where this datetime is invalid
508
- if delta is None or delta > options.episode_interval_seconds or invalid_datetime:
518
+ if (delta is None) or (delta > options.episode_interval_seconds) or (invalid_datetime):
509
519
  next_frame_number = 0
510
520
  current_sequence_id = 'location_{}_sequence_index_{}'.format(
511
521
  location,str(next_sequence_number).zfill(5))
@@ -18,13 +18,20 @@ import json
18
18
  import argparse
19
19
 
20
20
  from tqdm import tqdm
21
- from megadetector.utils import ct_utils
22
21
  from copy import copy
23
22
 
23
+ from megadetector.utils import ct_utils
24
+ from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
25
+
24
26
 
25
27
  #%% Functions
26
28
 
27
- def subset_json_db(input_json, query, output_json=None, ignore_case=False, verbose=False):
29
+ def subset_json_db(input_json,
30
+ query,
31
+ output_json=None,
32
+ ignore_case=False,
33
+ remap_categories=True,
34
+ verbose=False):
28
35
  """
29
36
  Given a json file (or dictionary already loaded from a json file), produce a new
30
37
  database containing only the images whose filenames contain the string 'query',
@@ -36,6 +43,8 @@ def subset_json_db(input_json, query, output_json=None, ignore_case=False, verbo
36
43
  contain this string. If this is a list, test for exact matches.
37
44
  output_json (str, optional): file to write the resulting .json file to
38
45
  ignore_case (bool, optional): whether to perform a case-insensitive search for [query]
46
+ remap_categories (bool, optional): trim the category list to only the categores used
47
+ in the subset
39
48
  verbose (bool, optional): enable additional debug output
40
49
 
41
50
  Returns:
@@ -92,6 +101,52 @@ def subset_json_db(input_json, query, output_json=None, ignore_case=False, verbo
92
101
  output_data['images'] = images
93
102
  output_data['annotations'] = annotations
94
103
 
104
+ # Remap categories if necessary
105
+ if remap_categories:
106
+
107
+ category_ids_used = set()
108
+ for ann in annotations:
109
+ category_ids_used.add(ann['category_id'])
110
+
111
+ if verbose:
112
+ print('Keeping {} of {} categories'.format(
113
+ len(category_ids_used),len(input_data['categories'])))
114
+
115
+ input_category_id_to_output_category_id = {}
116
+
117
+ next_category_id = 0
118
+
119
+ # Build mappings from old to new category IDs
120
+ for input_category_id in category_ids_used:
121
+ assert isinstance(input_category_id,int), \
122
+ 'Illegal category ID {}'.format(input_category_id)
123
+ output_category_id = next_category_id
124
+ next_category_id = next_category_id + 1
125
+ input_category_id_to_output_category_id[input_category_id] = output_category_id
126
+
127
+ # Modify the annotations
128
+ for ann in annotations:
129
+ assert ann['category_id'] in input_category_id_to_output_category_id
130
+ ann['category_id'] = input_category_id_to_output_category_id[ann['category_id']]
131
+
132
+ output_categories = []
133
+
134
+ # Re-write the category table
135
+ for cat in input_data['categories']:
136
+
137
+ if cat['id'] in input_category_id_to_output_category_id:
138
+
139
+ # There may be non-required fields, so don't just create an empty dict
140
+ # and copy the name/id field, keep the original dict other than "id"
141
+ output_category = copy(cat)
142
+ output_category['id'] = input_category_id_to_output_category_id[cat['id']]
143
+ output_categories.append(output_category)
144
+
145
+ output_categories = sort_list_of_dicts_by_key(output_categories,'id')
146
+ output_data['categories'] = output_categories
147
+
148
+ # ...if we need to remap categories
149
+
95
150
  # Write the output file if requested
96
151
  if output_json is not None:
97
152
  if verbose:
@@ -328,7 +328,8 @@ def _initialize_yolo_imports(model_type='yolov5',
328
328
  if try_yolov5_import and not utils_imported:
329
329
 
330
330
  try:
331
- from yolov5.utils.general import non_max_suppression, xyxy2xywh # noqa
331
+ # from yolov5.utils.general import non_max_suppression # type: ignore
332
+ from yolov5.utils.general import xyxy2xywh # noqa
332
333
  from yolov5.utils.augmentations import letterbox # noqa
333
334
  try:
334
335
  from yolov5.utils.general import scale_boxes as scale_coords
@@ -348,7 +349,8 @@ def _initialize_yolo_imports(model_type='yolov5',
348
349
 
349
350
  try:
350
351
 
351
- from yolov9.utils.general import non_max_suppression, xyxy2xywh # noqa
352
+ # from yolov9.utils.general import non_max_suppression # noqa
353
+ from yolov9.utils.general import xyxy2xywh # noqa
352
354
  from yolov9.utils.augmentations import letterbox # noqa
353
355
  from yolov9.utils.general import scale_boxes as scale_coords # noqa
354
356
  utils_imported = True
@@ -378,7 +380,12 @@ def _initialize_yolo_imports(model_type='yolov5',
378
380
 
379
381
  try:
380
382
 
381
- from ultralytics.utils.ops import non_max_suppression # type: ignore # noqa
383
+ # The non_max_suppression() function moved from the ops module to the nms module
384
+ # in mid-2025
385
+ try:
386
+ from ultralytics.utils.ops import non_max_suppression # type: ignore # noqa
387
+ except Exception:
388
+ from ultralytics.utils.nms import non_max_suppression # type: ignore # noqa
382
389
  from ultralytics.utils.ops import xyxy2xywh # type: ignore # noqa
383
390
 
384
391
  # In the ultralytics package, scale_boxes and scale_coords both exist;
@@ -444,9 +451,9 @@ def _initialize_yolo_imports(model_type='yolov5',
444
451
  if verbose:
445
452
  print('Imported utils from ultralytics package')
446
453
 
447
- except Exception:
454
+ except Exception as e:
448
455
 
449
- # print('Ultralytics module import failed')
456
+ print('Ultralytics module import failed: {}'.format(str(e)))
450
457
  pass
451
458
 
452
459
  # If we haven't succeeded yet, assume the YOLOv5 repo is on our PYTHONPATH.
@@ -455,7 +462,8 @@ def _initialize_yolo_imports(model_type='yolov5',
455
462
  try:
456
463
 
457
464
  # import pre- and post-processing functions from the YOLOv5 repo
458
- from utils.general import non_max_suppression, xyxy2xywh # type: ignore
465
+ # from utils.general import non_max_suppression # type: ignore
466
+ from utils.general import xyxy2xywh # type: ignore
459
467
  from utils.augmentations import letterbox # type: ignore
460
468
 
461
469
  # scale_coords() is scale_boxes() in some YOLOv5 versions
@@ -851,6 +859,9 @@ class PTDetector:
851
859
  except AttributeError:
852
860
  pass
853
861
 
862
+ # AddaxAI depends on this printout, don't remove it
863
+ print('PTDetector using device {}'.format(str(self.device).lower()))
864
+
854
865
  try:
855
866
  self.model = PTDetector._load_model(model_path,
856
867
  device=self.device,
@@ -1283,17 +1294,23 @@ class PTDetector:
1283
1294
  else:
1284
1295
  nms_iou_thres = 0.6
1285
1296
 
1286
- pred = nms(prediction=pred,
1287
- conf_thres=detection_threshold,
1288
- iou_thres=nms_iou_thres)
1297
+ use_library_nms = False
1298
+
1299
+ # Model output format changed in recent ultralytics packages, and the nms implementation
1300
+ # in this module hasn't been updated to handle that format yet.
1301
+ if (yolo_model_type_imported is not None) and (yolo_model_type_imported == 'ultralytics'):
1302
+ use_library_nms = True
1289
1303
 
1290
- # For posterity, the ultralytics implementation
1291
- if False:
1304
+ if use_library_nms:
1292
1305
  pred = non_max_suppression(prediction=pred,
1293
- conf_thres=detection_threshold,
1294
- iou_thres=nms_iou_thres,
1295
- agnostic=False,
1296
- multi_label=False)
1306
+ conf_thres=detection_threshold,
1307
+ iou_thres=nms_iou_thres,
1308
+ agnostic=False,
1309
+ multi_label=False)
1310
+ else:
1311
+ pred = nms(prediction=pred,
1312
+ conf_thres=detection_threshold,
1313
+ iou_thres=nms_iou_thres)
1297
1314
 
1298
1315
  assert isinstance(pred, list)
1299
1316
  assert len(pred) == len(batch_metadata), \
@@ -596,8 +596,7 @@ def load_detector(model_file,
596
596
  model_file = try_download_known_detector(model_file,
597
597
  force_download=force_model_download)
598
598
 
599
- if verbose:
600
- print('GPU available: {}'.format(is_gpu_available(model_file)))
599
+ print('GPU available: {}'.format(is_gpu_available(model_file)))
601
600
 
602
601
  start_time = time.time()
603
602
 
@@ -94,20 +94,29 @@ max_queue_size = 10
94
94
  # How often should we print progress when using the image queue?
95
95
  n_queue_print = 1000
96
96
 
97
- # TODO: it's a little sloppy that these are module-level globals, but in practice it
97
+ # Only used if --include_exif_data or --include_image_timestamp are supplied
98
+ exif_options = read_exif.ReadExifOptions()
99
+ exif_options.processing_library = 'pil'
100
+ exif_options.byte_handling = 'convert_to_string'
101
+
102
+ # Only relevant when we're running our test harness; because bugs in batch
103
+ # inference are dependent on batch grouping, we randomize batch grouping
104
+ # during testing to maximize the probability that latent bugs come up
105
+ # eventually.
106
+ randomize_batch_order_during_testing = True
107
+
108
+ # TODO: it's a little sloppy that the following are module-level globals, but in practice it
98
109
  # doesn't really matter, so I'm not in a big rush to move these to options until I do
99
110
  # a larger cleanup of all the long argument lists in this module.
100
- #
111
+
101
112
  # Should the consumer loop run on its own process, or here in the main process?
102
113
  run_separate_consumer_process = False
103
- use_threads_for_queue = False
104
- verbose = False
105
114
 
106
- exif_options = read_exif.ReadExifOptions()
107
- exif_options.processing_library = 'pil'
108
- exif_options.byte_handling = 'convert_to_string'
115
+ # Should we use threads (rather than processes) for the data loading workers?
116
+ use_threads_for_queue = False
109
117
 
110
- randomize_batch_order_during_testing = True
118
+ # Enable additional debug output
119
+ verbose = False
111
120
 
112
121
 
113
122
  #%% Support functions for multiprocessing
@@ -736,7 +745,9 @@ def _process_batch(image_items_batch,
736
745
  try:
737
746
 
738
747
  batch_detections = \
739
- detector.generate_detections_one_batch(valid_images, valid_image_filenames, verbose=verbose)
748
+ detector.generate_detections_one_batch(valid_images,
749
+ valid_image_filenames,
750
+ verbose=verbose)
740
751
 
741
752
  assert len(batch_detections) == len(valid_images)
742
753
 
@@ -1050,7 +1061,8 @@ def load_and_run_detector_batch(model_file,
1050
1061
  detector_options=None,
1051
1062
  loader_workers=default_loaders,
1052
1063
  preprocess_on_image_queue=default_preprocess_on_image_queue,
1053
- batch_size=1):
1064
+ batch_size=1,
1065
+ verbose_output=False):
1054
1066
  """
1055
1067
  Load a model file and run it on a list of images.
1056
1068
 
@@ -1087,6 +1099,7 @@ def load_and_run_detector_batch(model_file,
1087
1099
  preprocess_on_image_queue (bool, optional): if the image queue is enabled, should it handle
1088
1100
  image loading and preprocessing (True), or just image loading (False)?
1089
1101
  batch_size (int, optional): batch size for GPU processing, automatically set to 1 for CPU processing
1102
+ verbose_output (bool, optional): enable additional debug output
1090
1103
 
1091
1104
  Returns:
1092
1105
  results: list of dicts; each dict represents detections on one image
@@ -1109,6 +1122,11 @@ def load_and_run_detector_batch(model_file,
1109
1122
  if class_mapping_filename is not None:
1110
1123
  _load_custom_class_mapping(class_mapping_filename)
1111
1124
 
1125
+ global verbose
1126
+ if verbose_output:
1127
+ print('Enabling verbose output')
1128
+ verbose = True
1129
+
1112
1130
  # Handle the case where image_file_names is not yet actually a list
1113
1131
  if isinstance(image_file_names,str):
1114
1132
 
@@ -1866,11 +1884,7 @@ def main(): # noqa
1866
1884
 
1867
1885
  args = parser.parse_args()
1868
1886
 
1869
- global verbose
1870
1887
  global use_threads_for_queue
1871
-
1872
- if args.verbose:
1873
- verbose = True
1874
1888
  if args.use_threads_for_queue:
1875
1889
  use_threads_for_queue = True
1876
1890
 
@@ -2087,7 +2101,8 @@ def main(): # noqa
2087
2101
  detector_options=detector_options,
2088
2102
  loader_workers=args.loader_workers,
2089
2103
  preprocess_on_image_queue=args.preprocess_on_image_queue,
2090
- batch_size=args.batch_size)
2104
+ batch_size=args.batch_size,
2105
+ verbose_output=args.verbose)
2091
2106
 
2092
2107
  elapsed = time.time() - start_time
2093
2108
  images_per_second = len(results) / elapsed
@@ -159,7 +159,9 @@ class YoloInferenceOptions:
159
159
  #: These are deliberately offset from the standard MD categories; YOLOv5
160
160
  #: needs categories IDs to start at 0.
161
161
  #:
162
- #: This can also be a string that points to a YOLO dataset.yaml file.
162
+ #: This can also be a string that points to any class mapping file supported
163
+ #: by read_classes_from_yolo_dataset_file(): a YOLO dataset.yaml file, a text
164
+ #: file with a list of classes, or a .json file with an ID --> name dict
163
165
  self.yolo_category_id_to_name = {0:'animal',1:'person',2:'vehicle'}
164
166
 
165
167
  #: What should we do if the output file already exists?
@@ -39,7 +39,7 @@ from torchvision import ops
39
39
  from megadetector.detection.run_inference_with_yolov5_val import \
40
40
  YoloInferenceOptions,run_inference_with_yolo_val
41
41
  from megadetector.detection.run_detector_batch import \
42
- load_and_run_detector_batch,write_results_to_file
42
+ load_and_run_detector_batch,write_results_to_file,default_loaders
43
43
  from megadetector.detection.run_detector import \
44
44
  try_download_known_detector, CONF_DIGITS, COORD_DIGITS
45
45
  from megadetector.utils import path_utils
@@ -406,7 +406,9 @@ def run_tiled_inference(model_file,
406
406
  detector_options=None,
407
407
  use_image_queue=True,
408
408
  preprocess_on_image_queue=True,
409
- inference_size=None):
409
+ loader_workers=default_loaders,
410
+ inference_size=None,
411
+ verbose=False):
410
412
  """
411
413
  Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
412
414
  into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
@@ -451,16 +453,17 @@ def run_tiled_inference(model_file,
451
453
  image_list (list, optional): .json file containing a list of specific images to process. If
452
454
  this is supplied, and the paths are absolute, [image_folder] will be ignored. If this is supplied,
453
455
  and the paths are relative, they should be relative to [image_folder]
454
- augment (bool, optional): apply test-time augmentation, only relevant if yolo_inference_options
455
- is None
456
+ augment (bool, optional): apply test-time augmentation
456
457
  detector_options (dict, optional): parameters to pass to run_detector, only relevant if
457
458
  yolo_inference_options is None
458
459
  use_image_queue (bool, optional): whether to use a loader worker queue, only relevant if
459
460
  yolo_inference_options is None
460
461
  preprocess_on_image_queue (bool, optional): whether the image queue should also be responsible
461
462
  for preprocessing
463
+ loader_workers (int, optional): number of preprocessing loader workers to use
462
464
  inference_size (int, optional): override the default inference image size, only relevant if
463
465
  yolo_inference_options is None
466
+ verbose (bool, optional): enable additional debug output
464
467
 
465
468
  Returns:
466
469
  dict: MD-formatted results dictionary, identical to what's written to [output_file]
@@ -522,7 +525,8 @@ def run_tiled_inference(model_file,
522
525
 
523
526
  all_image_patch_info = None
524
527
 
525
- print('Extracting patches from {} images'.format(len(image_files_relative)))
528
+ print('Extracting patches from {} images on {} workers'.format(
529
+ len(image_files_relative),n_patch_extraction_workers))
526
530
 
527
531
  n_workers = n_patch_extraction_workers
528
532
 
@@ -632,7 +636,9 @@ def run_tiled_inference(model_file,
632
636
  detector_options=detector_options,
633
637
  use_image_queue=use_image_queue,
634
638
  preprocess_on_image_queue=preprocess_on_image_queue,
635
- image_size=inference_size)
639
+ image_size=inference_size,
640
+ verbose_output=verbose,
641
+ loader_workers=loader_workers)
636
642
 
637
643
  patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
638
644
 
@@ -847,12 +853,12 @@ if False:
847
853
  yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
848
854
 
849
855
  run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
850
- tile_size_x=tile_size_x, tile_size_y=tile_size_y,
851
- tile_overlap=tile_overlap,
852
- checkpoint_path=checkpoint_path,
853
- checkpoint_frequency=checkpoint_frequency,
854
- remove_tiles=remove_tiles,
855
- yolo_inference_options=yolo_inference_options)
856
+ tile_size_x=tile_size_x, tile_size_y=tile_size_y,
857
+ tile_overlap=tile_overlap,
858
+ checkpoint_path=checkpoint_path,
859
+ checkpoint_frequency=checkpoint_frequency,
860
+ remove_tiles=remove_tiles,
861
+ yolo_inference_options=yolo_inference_options)
856
862
 
857
863
 
858
864
  #%% Run tiled inference (generate a command)
@@ -907,7 +913,10 @@ if False:
907
913
 
908
914
  #%% Command-line driver
909
915
 
910
- def main(): # noqa
916
+ def main():
917
+ """
918
+ Command-line driver for run_tiled_inference
919
+ """
911
920
 
912
921
  parser = argparse.ArgumentParser(
913
922
  description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
@@ -927,6 +936,14 @@ def main(): # noqa
927
936
  '--no_remove_tiles',
928
937
  action='store_true',
929
938
  help='Tiles are removed by default; this option suppresses tile deletion')
939
+ parser.add_argument(
940
+ '--augment',
941
+ action='store_true',
942
+ help='Enable test-time augmentation')
943
+ parser.add_argument(
944
+ '--verbose',
945
+ action='store_true',
946
+ help='Enable additional debug output')
930
947
  parser.add_argument(
931
948
  '--tile_size_x',
932
949
  type=int,
@@ -956,7 +973,22 @@ def main(): # noqa
956
973
  '--detector_options',
957
974
  type=str,
958
975
  default=None,
959
- help=('A list of detector options (key-value pairs) to '))
976
+ help=('A list of detector options (key-value pairs)'))
977
+ parser.add_argument(
978
+ '--inference_size',
979
+ type=int,
980
+ default=None,
981
+ help=('Run inference at a non-default size'))
982
+ parser.add_argument(
983
+ '--n_patch_extraction_workers',
984
+ type=int,
985
+ default=1,
986
+ help=('Number of workers to use for patch extraction'))
987
+ parser.add_argument(
988
+ '--loader_workers',
989
+ type=int,
990
+ default=default_loaders,
991
+ help=('Number of workers to use for image loading and preprocessing (0 to disable)'))
960
992
 
961
993
  # detector_options = parse_kvp_list(args.detector_options)
962
994
 
@@ -984,11 +1016,23 @@ def main(): # noqa
984
1016
 
985
1017
  remove_tiles = (not args.no_remove_tiles)
986
1018
 
987
- run_tiled_inference(model_file, args.image_folder, args.tiling_folder, args.output_file,
988
- tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
1019
+ use_image_queue = (args.loader_workers > 0)
1020
+
1021
+ run_tiled_inference(model_file,
1022
+ args.image_folder,
1023
+ args.tiling_folder,
1024
+ args.output_file,
1025
+ tile_size_x=args.tile_size_x,
1026
+ tile_size_y=args.tile_size_y,
989
1027
  tile_overlap=args.tile_overlap,
990
1028
  remove_tiles=remove_tiles,
991
- image_list=args.image_list)
1029
+ image_list=args.image_list,
1030
+ augment=args.augment,
1031
+ inference_size=args.inference_size,
1032
+ verbose=args.verbose,
1033
+ n_patch_extraction_workers=args.n_patch_extraction_workers,
1034
+ loader_workers=args.loader_workers,
1035
+ use_image_queue=use_image_queue)
992
1036
 
993
1037
  if __name__ == '__main__':
994
1038
  main()
@@ -22,6 +22,7 @@ from functools import partial
22
22
  from inspect import signature
23
23
 
24
24
  from megadetector.utils import path_utils
25
+ from megadetector.utils.path_utils import clean_path
25
26
  from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
26
27
  from megadetector.visualization import visualization_utils as vis_utils
27
28
 
@@ -592,7 +593,7 @@ def video_to_frames(input_video_file,
592
593
  quality=None,
593
594
  max_width=None,
594
595
  frames_to_extract=None,
595
- allow_empty_videos=False):
596
+ allow_empty_videos=True):
596
597
  """
597
598
  Renders frames from [input_video_file] to .jpg files in [output_folder].
598
599
 
@@ -618,8 +619,8 @@ def video_to_frames(input_video_file,
618
619
  a single frame number. In the special case where frames_to_extract
619
620
  is [], this function still reads video frame rates and verifies that videos
620
621
  are readable, but no frames are extracted.
621
- allow_empty_videos (bool, optional): Just print a warning if a video appears to have no
622
- frames (by default, this is an error).
622
+ allow_empty_videos (bool, optional): Just print a warning if a video appears to have
623
+ no frames (by default, this is an error).
623
624
 
624
625
  Returns:
625
626
  tuple: length-2 tuple containing (list of frame filenames,frame rate)
@@ -883,7 +884,14 @@ def _video_to_frames_for_folder(relative_fn,input_folder,output_folder_base,
883
884
 
884
885
  # Create the target output folder
885
886
  output_folder_video = os.path.join(output_folder_base,relative_fn)
886
- os.makedirs(output_folder_video,exist_ok=True)
887
+ try:
888
+ os.makedirs(output_folder_video,exist_ok=True)
889
+ except Exception:
890
+ output_folder_clean = clean_path(output_folder_video)
891
+ print('Warning: failed to create folder {}, trying {}'.format(
892
+ output_folder_video,output_folder_clean))
893
+ output_folder_video = output_folder_clean
894
+ os.makedirs(output_folder_video,exist_ok=True)
887
895
 
888
896
  # Render frames
889
897
  # input_video_file = input_fn_absolute; output_folder = output_folder_video
@@ -1090,6 +1098,9 @@ class FrameToVideoOptions:
1090
1098
  #: Are frame rates required?
1091
1099
  self.frame_rates_are_required = False
1092
1100
 
1101
+ #: Enable additional debug output
1102
+ self.verbose = False
1103
+
1093
1104
 
1094
1105
  def frame_results_to_video_results(input_file,
1095
1106
  output_file,
@@ -1176,9 +1187,14 @@ def frame_results_to_video_results(input_file,
1176
1187
 
1177
1188
  if (video_filename_to_frame_rate is not None):
1178
1189
 
1179
- if options.frame_rates_are_required:
1180
- assert video_name in video_filename_to_frame_rate, \
1181
- 'Could not determine frame rate for {}'.format(video_name)
1190
+ if video_name not in video_filename_to_frame_rate:
1191
+
1192
+ s = 'Could not determine frame rate for {}'.format(video_name)
1193
+ if options.frame_rates_are_required:
1194
+ raise ValueError(s)
1195
+ elif options.verbose:
1196
+ print('Warning: {}'.format(s))
1197
+
1182
1198
  if video_name in video_filename_to_frame_rate:
1183
1199
  im_out['frame_rate'] = video_filename_to_frame_rate[video_name]
1184
1200
 
@@ -130,7 +130,7 @@ class ClassificationSmoothingOptions:
130
130
 
131
131
  ## Populated internally
132
132
 
133
- #: #: Only include these categories in the smoothing process (None to use all categories)
133
+ #: Only include these categories in the smoothing process (None to use all categories)
134
134
  self._detection_category_ids_to_smooth = None
135
135
 
136
136
 
@@ -1015,6 +1015,10 @@ def smooth_classification_results_sequence_level(input_file,
1015
1015
 
1016
1016
  detections_this_sequence = []
1017
1017
  for image_filename in image_filenames_this_sequence:
1018
+ if image_filename not in image_fn_to_classification_results:
1019
+ print('Warning: {} in sequence list but not in results'.format(
1020
+ image_filename))
1021
+ continue
1018
1022
  im = image_fn_to_classification_results[image_filename]
1019
1023
  if 'detections' not in im or im['detections'] is None:
1020
1024
  continue