megadetector 10.0.3__tar.gz → 10.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (152) hide show
  1. {megadetector-10.0.3/megadetector.egg-info → megadetector-10.0.4}/PKG-INFO +1 -1
  2. {megadetector-10.0.3 → megadetector-10.0.4}/README.md +1 -1
  3. megadetector-10.0.4/megadetector/data_management/animl_to_md.py +158 -0
  4. megadetector-10.0.4/megadetector/data_management/zamba_to_md.py +188 -0
  5. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/process_video.py +52 -40
  6. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/pytorch_detector.py +24 -34
  7. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/run_detector_batch.py +138 -93
  8. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/run_md_and_speciesnet.py +22 -4
  9. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/video_utils.py +5 -4
  10. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/combine_batch_outputs.py +2 -0
  11. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/separate_detections_into_folders.py +1 -1
  12. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/subset_json_detector_output.py +1 -3
  13. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/ct_utils.py +53 -0
  14. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/md_tests.py +8 -7
  15. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/path_utils.py +4 -15
  16. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/visualization_utils.py +1 -1
  17. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/visualize_detector_output.py +6 -4
  18. {megadetector-10.0.3 → megadetector-10.0.4/megadetector.egg-info}/PKG-INFO +1 -1
  19. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector.egg-info/SOURCES.txt +2 -0
  20. {megadetector-10.0.3 → megadetector-10.0.4}/pyproject.toml +1 -1
  21. {megadetector-10.0.3 → megadetector-10.0.4}/LICENSE +0 -0
  22. {megadetector-10.0.3 → megadetector-10.0.4}/README-package.md +0 -0
  23. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/__init__.py +0 -0
  24. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/api/__init__.py +0 -0
  25. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/api/batch_processing/integration/digiKam/setup.py +0 -0
  26. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +0 -0
  27. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -0
  28. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -0
  29. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -0
  30. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/__init__.py +0 -0
  31. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/aggregate_classifier_probs.py +0 -0
  32. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/analyze_failed_images.py +0 -0
  33. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/cache_batchapi_outputs.py +0 -0
  34. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/create_classification_dataset.py +0 -0
  35. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/crop_detections.py +0 -0
  36. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/csv_to_json.py +0 -0
  37. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/detect_and_crop.py +0 -0
  38. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/efficientnet/__init__.py +0 -0
  39. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/efficientnet/model.py +0 -0
  40. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/efficientnet/utils.py +0 -0
  41. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/evaluate_model.py +0 -0
  42. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/identify_mislabeled_candidates.py +0 -0
  43. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/json_to_azcopy_list.py +0 -0
  44. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/json_validator.py +0 -0
  45. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/map_classification_categories.py +0 -0
  46. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/merge_classification_detection_output.py +0 -0
  47. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/prepare_classification_script.py +0 -0
  48. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/prepare_classification_script_mc.py +0 -0
  49. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/run_classifier.py +0 -0
  50. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/save_mislabeled.py +0 -0
  51. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/train_classifier.py +0 -0
  52. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/train_classifier_tf.py +0 -0
  53. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/classification/train_utils.py +0 -0
  54. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/__init__.py +0 -0
  55. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/annotations/__init__.py +0 -0
  56. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/annotations/annotation_constants.py +0 -0
  57. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/camtrap_dp_to_coco.py +0 -0
  58. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/cct_json_utils.py +0 -0
  59. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/cct_to_md.py +0 -0
  60. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/cct_to_wi.py +0 -0
  61. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/coco_to_labelme.py +0 -0
  62. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/coco_to_yolo.py +0 -0
  63. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/databases/__init__.py +0 -0
  64. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/databases/add_width_and_height_to_db.py +0 -0
  65. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/databases/combine_coco_camera_traps_files.py +0 -0
  66. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/databases/integrity_check_json_db.py +0 -0
  67. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/databases/subset_json_db.py +0 -0
  68. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/generate_crops_from_cct.py +0 -0
  69. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/get_image_sizes.py +0 -0
  70. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/labelme_to_coco.py +0 -0
  71. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/labelme_to_yolo.py +0 -0
  72. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/__init__.py +0 -0
  73. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/create_lila_blank_set.py +0 -0
  74. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/create_lila_test_set.py +0 -0
  75. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/create_links_to_md_results_files.py +0 -0
  76. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/download_lila_subset.py +0 -0
  77. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/generate_lila_per_image_labels.py +0 -0
  78. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/get_lila_annotation_counts.py +0 -0
  79. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/get_lila_image_counts.py +0 -0
  80. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/lila_common.py +0 -0
  81. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/lila/test_lila_metadata_urls.py +0 -0
  82. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/mewc_to_md.py +0 -0
  83. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/ocr_tools.py +0 -0
  84. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/read_exif.py +0 -0
  85. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/remap_coco_categories.py +0 -0
  86. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/remove_exif.py +0 -0
  87. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/rename_images.py +0 -0
  88. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/resize_coco_dataset.py +0 -0
  89. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/speciesnet_to_md.py +0 -0
  90. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/wi_download_csv_to_coco.py +0 -0
  91. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/yolo_output_to_md_output.py +0 -0
  92. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/data_management/yolo_to_coco.py +0 -0
  93. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/__init__.py +0 -0
  94. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/change_detection.py +0 -0
  95. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/run_detector.py +0 -0
  96. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/run_inference_with_yolov5_val.py +0 -0
  97. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/run_tiled_inference.py +0 -0
  98. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/detection/tf_detector.py +0 -0
  99. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/__init__.py +0 -0
  100. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/add_max_conf.py +0 -0
  101. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/categorize_detections_by_size.py +0 -0
  102. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/classification_postprocessing.py +0 -0
  103. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/compare_batch_results.py +0 -0
  104. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/convert_output_format.py +0 -0
  105. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/create_crop_folder.py +0 -0
  106. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/detector_calibration.py +0 -0
  107. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/generate_csv_report.py +0 -0
  108. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/load_api_results.py +0 -0
  109. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/md_to_coco.py +0 -0
  110. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/md_to_labelme.py +0 -0
  111. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/md_to_wi.py +0 -0
  112. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/merge_detections.py +0 -0
  113. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/postprocess_batch_results.py +0 -0
  114. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/remap_detection_categories.py +0 -0
  115. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/render_detection_confusion_matrix.py +0 -0
  116. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -0
  117. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -0
  118. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -0
  119. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/top_folders_to_bottom.py +0 -0
  120. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/postprocessing/validate_batch_results.py +0 -0
  121. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/__init__.py +0 -0
  122. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -0
  123. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/map_new_lila_datasets.py +0 -0
  124. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -0
  125. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/preview_lila_taxonomy.py +0 -0
  126. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/retrieve_sample_image.py +0 -0
  127. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/simple_image_download.py +0 -0
  128. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/species_lookup.py +0 -0
  129. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/taxonomy_csv_checker.py +0 -0
  130. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/taxonomy_graph.py +0 -0
  131. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/taxonomy_mapping/validate_lila_category_mappings.py +0 -0
  132. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/tests/__init__.py +0 -0
  133. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/tests/test_nms_synthetic.py +0 -0
  134. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/__init__.py +0 -0
  135. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/directory_listing.py +0 -0
  136. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/extract_frames_from_video.py +0 -0
  137. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/gpu_test.py +0 -0
  138. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/process_utils.py +0 -0
  139. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/split_locations_into_train_val.py +0 -0
  140. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/string_utils.py +0 -0
  141. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/url_utils.py +0 -0
  142. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/wi_utils.py +0 -0
  143. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/utils/write_html_image_list.py +0 -0
  144. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/__init__.py +0 -0
  145. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/plot_utils.py +0 -0
  146. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/render_images_with_thumbnails.py +0 -0
  147. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/visualize_db.py +0 -0
  148. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector/visualization/visualize_video_output.py +0 -0
  149. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector.egg-info/dependency_links.txt +0 -0
  150. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector.egg-info/requires.txt +0 -0
  151. {megadetector-10.0.3 → megadetector-10.0.4}/megadetector.egg-info/top_level.txt +0 -0
  152. {megadetector-10.0.3 → megadetector-10.0.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: megadetector
3
- Version: 10.0.3
3
+ Version: 10.0.4
4
4
  Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
5
5
  Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
6
6
  Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
@@ -274,7 +274,7 @@ Code written on or before April 28, 2023 is [copyright Microsoft](https://github
274
274
 
275
275
  ## Contributing
276
276
 
277
- This project welcomes contributions, as pull requests, issues, or suggestions by [email](mailto:cameratraps@lila.science). We have a [list](https://github.com/agentmorris/MegaDetector/issues/84) of issues that we're hoping to address, many of which would be good starting points for new contributors. We also depend on other open-source tools that help users run MegaDetector (particularly [AddaxAI](https://github.com/PetervanLunteren/AddaxAI) (formerly EcoAssist), and open-source tools that help users work with MegaDetector results (particularly [Timelapse](https://github.com/saulgreenberg/Timelapse)). If you are looking to get involved in GUI development, reach out to the developers of those tools as well!
277
+ This project welcomes contributions, as pull requests, issues, or suggestions by [email](mailto:cameratraps@lila.science). We have a [list](https://dmorris.net/task-viewer/?file=https://raw.githubusercontent.com/agentmorris/MegaDetector/refs/heads/main/TODO.md) of issues that we're hoping to address, many of which would be good starting points for new contributors. We also depend on other open-source tools that help users run MegaDetector (particularly [AddaxAI](https://github.com/PetervanLunteren/AddaxAI) (formerly EcoAssist), and open-source tools that help users work with MegaDetector results (particularly [Timelapse](https://github.com/saulgreenberg/Timelapse)). If you are looking to get involved in GUI development, reach out to the developers of those tools as well!
278
278
 
279
279
  If you are interesting in getting involved in the conservation technology space, and MegaDetector just happens to be the first page you landed on, and none of our open issues are getting you fired up, don't fret! Head over to the [WILDLABS discussion forums](https://wildlabs.net/discussions) and let the community know you're a developer looking to get involved. Someone needs your help!
280
280
 
@@ -0,0 +1,158 @@
1
+ """
2
+
3
+ animl_to_md.py
4
+
5
+ Convert a .csv file produced by the Animl package:
6
+
7
+ https://github.com/conservationtechlab/animl-py
8
+
9
+ ...to a MD results file suitable for import into Timelapse.
10
+
11
+ Columns are expected to be:
12
+
13
+ file
14
+ category (MD category identifies: 1==animal, 2==person, 3==vehicle)
15
+ detection_conf
16
+ bbox1,bbox2,bbox3,bbox4
17
+ class
18
+ classification_conf
19
+
20
+ """
21
+
22
+ #%% Imports and constants
23
+
24
+ import sys
25
+ import argparse
26
+
27
+ import pandas as pd
28
+
29
+ from megadetector.utils.ct_utils import write_json
30
+ from megadetector.detection.run_detector import DEFAULT_DETECTOR_LABEL_MAP
31
+ detection_category_id_to_name = DEFAULT_DETECTOR_LABEL_MAP
32
+
33
+
34
+ #%% Main function
35
+
36
+ def animl_results_to_md_results(input_file,output_file=None):
37
+ """
38
+ Converts the .csv file [input_file] to the MD-formatted .json file [output_file].
39
+
40
+ If [output_file] is None, '.json' will be appended to the input file.
41
+ """
42
+
43
+ if output_file is None:
44
+ output_file = input_file + '.json'
45
+
46
+ df = pd.read_csv(input_file)
47
+
48
+ expected_columns = ('file','category','detection_conf',
49
+ 'bbox1','bbox2','bbox3','bbox4','class','classification_conf')
50
+
51
+ for s in expected_columns:
52
+ assert s in df.columns,\
53
+ 'Expected column {} not found'.format(s)
54
+
55
+ classification_category_name_to_id = {}
56
+ filename_to_results = {}
57
+
58
+ # i_row = 0; row = df.iloc[i_row]
59
+ for i_row,row in df.iterrows():
60
+
61
+ # Is this the first detection we've seen for this file?
62
+ if row['file'] in filename_to_results:
63
+ im = filename_to_results[row['file']]
64
+ else:
65
+ im = {}
66
+ im['detections'] = []
67
+ im['file'] = row['file']
68
+ filename_to_results[im['file']] = im
69
+
70
+ assert isinstance(row['category'],int),'Invalid category identifier in row {}'.format(im['file'])
71
+ detection_category_id = str(row['category'])
72
+ assert detection_category_id in detection_category_id_to_name,\
73
+ 'Unrecognized detection category ID {}'.format(detection_category_id)
74
+
75
+ detection = {}
76
+ detection['category'] = detection_category_id
77
+ detection['conf'] = row['detection_conf']
78
+ bbox = [row['bbox1'],row['bbox2'],row['bbox3'],row['bbox4']]
79
+ detection['bbox'] = bbox
80
+ classification_category_name = row['class']
81
+
82
+ # Have we seen this classification category before?
83
+ if classification_category_name in classification_category_name_to_id:
84
+ classification_category_id = \
85
+ classification_category_name_to_id[classification_category_name]
86
+ else:
87
+ classification_category_id = str(len(classification_category_name_to_id))
88
+ classification_category_name_to_id[classification_category_name] = \
89
+ classification_category_id
90
+
91
+ classifications = [[classification_category_id,row['classification_conf']]]
92
+ detection['classifications'] = classifications
93
+
94
+ im['detections'].append(detection)
95
+
96
+ # ...for each row
97
+
98
+ info = {}
99
+ info['format_version'] = '1.3'
100
+ info['detector'] = 'Animl'
101
+ info['classifier'] = 'Animl'
102
+
103
+ results = {}
104
+ results['info'] = info
105
+ results['detection_categories'] = detection_category_id_to_name
106
+ results['classification_categories'] = \
107
+ {v: k for k, v in classification_category_name_to_id.items()}
108
+ results['images'] = list(filename_to_results.values())
109
+
110
+ write_json(output_file,results)
111
+
112
+ # ...animl_results_to_md_results(...)
113
+
114
+
115
+ #%% Interactive driver
116
+
117
+ if False:
118
+
119
+ pass
120
+
121
+ #%%
122
+
123
+ input_file = r"G:\temp\animl-runs\animl-runs\Coati_v2\manifest.csv"
124
+ output_file = None
125
+ animl_results_to_md_results(input_file,output_file)
126
+
127
+
128
+ #%% Command-line driver
129
+
130
+ def main():
131
+ """
132
+ Command-line driver for animl_to_md
133
+ """
134
+
135
+ parser = argparse.ArgumentParser(
136
+ description='Convert an Animl-formatted .csv results file to MD-formatted .json results file')
137
+
138
+ parser.add_argument(
139
+ 'input_file',
140
+ type=str,
141
+ help='input .csv file')
142
+
143
+ parser.add_argument(
144
+ '--output_file',
145
+ type=str,
146
+ default=None,
147
+ help='output .json file (defaults to input file appended with ".json")')
148
+
149
+ if len(sys.argv[1:]) == 0:
150
+ parser.print_help()
151
+ parser.exit()
152
+
153
+ args = parser.parse_args()
154
+
155
+ animl_results_to_md_results(args.input_file,args.output_file)
156
+
157
+ if __name__ == '__main__':
158
+ main()
@@ -0,0 +1,188 @@
1
+ """
2
+
3
+ zamba_to_md.py
4
+
5
+ Convert a labels.csv file produced by Zamba Cloud to a MD results file suitable
6
+ for import into Timelapse.
7
+
8
+ Columns are expected to be:
9
+
10
+ video_uuid (not used)
11
+ original_filename (assumed to be a relative path name)
12
+ top_k_label,top_k_probability, for k = 1..N
13
+ [category name 1],[category name 2],...
14
+ corrected_label
15
+
16
+ Because the MD results file fundamentally stores detections, what we'll
17
+ actually do is create bogus detections that fill the entire image.
18
+
19
+ There is no special handling of empty/blank categories; because these results are
20
+ based on a classifier, rather than a detector (where "blank" would be the absence of
21
+ all other categories), "blank" can be queried in Timelapse just like any other class.
22
+
23
+ """
24
+
25
+ #%% Imports and constants
26
+
27
+ import sys
28
+ import argparse
29
+
30
+ import pandas as pd
31
+
32
+ from megadetector.utils.ct_utils import write_json
33
+
34
+
35
+ #%% Main function
36
+
37
+ def zamba_results_to_md_results(input_file,output_file=None):
38
+ """
39
+ Converts the .csv file [input_file] to the MD-formatted .json file [output_file].
40
+
41
+ If [output_file] is None, '.json' will be appended to the input file.
42
+
43
+ Args:
44
+ input_file (str): the .csv file to convert
45
+ output_file (str, optional): the output .json file (defaults to
46
+ [input_file].json)
47
+ """
48
+
49
+ if output_file is None:
50
+ output_file = input_file + '.json'
51
+
52
+ df = pd.read_csv(input_file)
53
+
54
+ expected_columns = ('video_uuid','corrected_label','original_filename')
55
+ for s in expected_columns:
56
+ assert s in df.columns,\
57
+ 'Expected column {} not found, are you sure this is a Zamba results .csv file?'.format(
58
+ s)
59
+
60
+ # How many results are included per file?
61
+ assert 'top_1_probability' in df.columns and 'top_1_label' in df.columns
62
+ top_k = 2
63
+ while(True):
64
+ p_string = 'top_' + str(top_k) + '_probability'
65
+ label_string = 'top_' + str(top_k) + '_label'
66
+
67
+ if p_string in df.columns:
68
+ assert label_string in df.columns,\
69
+ 'Oops, {} is a column but {} is not'.format(
70
+ p_string,label_string)
71
+ top_k += 1
72
+ continue
73
+ else:
74
+ assert label_string not in df.columns,\
75
+ 'Oops, {} is a column but {} is not'.format(
76
+ label_string,p_string)
77
+ top_k -= 1
78
+ break
79
+
80
+ print('Found {} probability column pairs'.format(top_k))
81
+
82
+ # Category names start after the fixed columns and the probability columns
83
+ category_names = []
84
+ column_names = list(df.columns)
85
+ first_category_name_index = 0
86
+ while('top_' in column_names[first_category_name_index] or \
87
+ column_names[first_category_name_index] in expected_columns):
88
+ first_category_name_index += 1
89
+
90
+ i_column = first_category_name_index
91
+ while( (i_column < len(column_names)) and (column_names[i_column] != 'corrected_label') ):
92
+ category_names.append(column_names[i_column])
93
+ i_column += 1
94
+
95
+ print('Found {} categories:\n'.format(len(category_names)))
96
+
97
+ for s in category_names:
98
+ print(s)
99
+
100
+ info = {}
101
+ info['format_version'] = '1.3'
102
+ info['detector'] = 'Zamba Cloud'
103
+ info['classifier'] = 'Zamba Cloud'
104
+
105
+ detection_category_id_to_name = {}
106
+ for category_id,category_name in enumerate(category_names):
107
+ detection_category_id_to_name[str(category_id)] = category_name
108
+ detection_category_name_to_id = {v: k for k, v in detection_category_id_to_name.items()}
109
+
110
+ images = []
111
+
112
+ # i_row = 0; row = df.iloc[i_row]
113
+ for i_row,row in df.iterrows():
114
+
115
+ im = {}
116
+ images.append(im)
117
+ im['file'] = row['original_filename']
118
+
119
+ detections = []
120
+
121
+ # k = 1
122
+ for k in range(1,top_k+1):
123
+ label = row['top_{}_label'.format(k)]
124
+ confidence = row['top_{}_probability'.format(k)]
125
+ det = {}
126
+ det['category'] = detection_category_name_to_id[label]
127
+ det['conf'] = confidence
128
+ det['bbox'] = [0,0,1.0,1.0]
129
+ detections.append(det)
130
+
131
+ im['detections'] = detections
132
+
133
+ # ...for each row
134
+
135
+ results = {}
136
+ results['info'] = info
137
+ results['detection_categories'] = detection_category_id_to_name
138
+ results['images'] = images
139
+
140
+ write_json(output_file,results)
141
+
142
+ # ...zamba_results_to_md_results(...)
143
+
144
+
145
+ #%% Interactive driver
146
+
147
+ if False:
148
+
149
+ pass
150
+
151
+ #%%
152
+
153
+ input_file = r"G:\temp\labels-job-b95a4b76-e332-4e17-ab40-03469392d36a-2023-11-04_16-28-50.060130.csv"
154
+ output_file = None
155
+ zamba_results_to_md_results(input_file,output_file)
156
+
157
+
158
+ #%% Command-line driver
159
+
160
+ def main():
161
+ """
162
+ Command-line driver for zamba_to_md
163
+ """
164
+
165
+ parser = argparse.ArgumentParser(
166
+ description='Convert a Zamba-formatted .csv results file to a MD-formatted .json results file')
167
+
168
+ parser.add_argument(
169
+ 'input_file',
170
+ type=str,
171
+ help='input .csv file')
172
+
173
+ parser.add_argument(
174
+ '--output_file',
175
+ type=str,
176
+ default=None,
177
+ help='output .json file (defaults to input file appended with ".json")')
178
+
179
+ if len(sys.argv[1:]) == 0:
180
+ parser.print_help()
181
+ parser.exit()
182
+
183
+ args = parser.parse_args()
184
+
185
+ zamba_results_to_md_results(args.input_file,args.output_file)
186
+
187
+ if __name__ == '__main__':
188
+ main()
@@ -22,11 +22,16 @@ from megadetector.detection import run_detector_batch
22
22
  from megadetector.utils.ct_utils import args_to_object
23
23
  from megadetector.utils.ct_utils import dict_to_kvp_list, parse_kvp_list
24
24
  from megadetector.detection.video_utils import _filename_to_frame_number
25
+ from megadetector.detection.video_utils import find_videos
25
26
  from megadetector.detection.video_utils import run_callback_on_frames_for_folder
26
27
  from megadetector.detection.run_detector import load_detector
27
28
  from megadetector.postprocessing.validate_batch_results import \
28
29
  ValidateBatchResultsOptions, validate_batch_results
29
30
 
31
+ # Notes to self re: upcoming work on checkpointing
32
+ from megadetector.utils.ct_utils import split_list_into_fixed_size_chunks # noqa
33
+ from megadetector.detection.run_detector_batch import write_checkpoint, load_checkpoint # noqa
34
+
30
35
 
31
36
  #%% Classes
32
37
 
@@ -68,16 +73,6 @@ class ProcessVideoOptions:
68
73
  #: Sample frames every N seconds. Mutually exclusive with [frame_sample]
69
74
  self.time_sample = None
70
75
 
71
- #: Number of workers to use for parallelization; set to <= 1 to disable parallelization
72
- self.n_cores = 1
73
-
74
- #: For debugging only, stop processing after a certain number of frames.
75
- self.debug_max_frames = -1
76
-
77
- #: File containing non-standard categories, typically only used if you're running a non-MD
78
- #: detector.
79
- self.class_mapping_filename = None
80
-
81
76
  #: Run the model at this image size (don't mess with this unless you know what you're
82
77
  #: getting into)... if you just want to pass smaller frames to MD, use max_width
83
78
  self.image_size = None
@@ -92,6 +87,17 @@ class ProcessVideoOptions:
92
87
  #: Detector-specific options
93
88
  self.detector_options = None
94
89
 
90
+ #: Write a checkpoint file (to resume processing later) every N videos;
91
+ #: set to -1 (default) to disable checkpointing
92
+ self.checkpoint_frequency = -1
93
+
94
+ #: Path to checkpoint file; None (default) for auto-generation based on output filename
95
+ self.checkpoint_path = None
96
+
97
+ #: Resume from a checkpoint file, or "auto" to use the most recent checkpoint in the
98
+ #: output directory
99
+ self.resume_from_checkpoint = None
100
+
95
101
  # ...class ProcessVideoOptions
96
102
 
97
103
 
@@ -137,7 +143,7 @@ def process_videos(options):
137
143
  every_n_frames_param = options.frame_sample
138
144
 
139
145
  if options.verbose:
140
- print('Running MegaDetector for folder {}'.format(options.input_video_file))
146
+ print('Processing videos from input source {}'.format(options.input_video_file))
141
147
 
142
148
  detector = load_detector(options.model_file,detector_options=options.detector_options)
143
149
 
@@ -145,7 +151,9 @@ def process_videos(options):
145
151
  return detector.generate_detections_one_image(image_np,
146
152
  image_id,
147
153
  detection_threshold=options.json_confidence_threshold,
148
- augment=options.augment)
154
+ augment=options.augment,
155
+ image_size=options.image_size,
156
+ verbose=options.verbose)
149
157
 
150
158
  """
151
159
  [md_results] will be dict with keys 'video_filenames' (list of str), 'frame_rates' (list of floats),
@@ -164,7 +172,8 @@ def process_videos(options):
164
172
  frame_callback=frame_callback,
165
173
  every_n_frames=every_n_frames_param,
166
174
  verbose=options.verbose,
167
- files_to_process_relative=[video_bn])
175
+ files_to_process_relative=[video_bn],
176
+ allow_empty_videos=options.allow_empty_videos)
168
177
 
169
178
  else:
170
179
 
@@ -172,10 +181,15 @@ def process_videos(options):
172
181
  '{} is neither a file nor a folder'.format(options.input_video_file)
173
182
 
174
183
  video_folder = options.input_video_file
184
+
175
185
  md_results = run_callback_on_frames_for_folder(input_video_folder=options.input_video_file,
176
186
  frame_callback=frame_callback,
177
187
  every_n_frames=every_n_frames_param,
178
- verbose=options.verbose)
188
+ verbose=options.verbose,
189
+ recursive=options.recursive,
190
+ allow_empty_videos=options.allow_empty_videos)
191
+
192
+ # ...whether we're processing a file or a folder
179
193
 
180
194
  print('Finished running MD on videos')
181
195
 
@@ -231,12 +245,12 @@ def process_videos(options):
231
245
 
232
246
  # ...was this a failed video?
233
247
 
248
+ im['frames_processed'] = sorted(im['frames_processed'])
249
+
234
250
  video_list_md_format.append(im)
235
251
 
236
252
  # ...for each video
237
253
 
238
- im['frames_processed'] = sorted(im['frames_processed'])
239
-
240
254
  run_detector_batch.write_results_to_file(
241
255
  video_list_md_format,
242
256
  options.output_json_file,
@@ -276,14 +290,8 @@ def options_to_command(options):
276
290
  cmd += ' --output_json_file' + ' "' + options.output_json_file + '"'
277
291
  if options.json_confidence_threshold is not None:
278
292
  cmd += ' --json_confidence_threshold ' + str(options.json_confidence_threshold)
279
- if options.n_cores is not None:
280
- cmd += ' --n_cores ' + str(options.n_cores)
281
293
  if options.frame_sample is not None:
282
294
  cmd += ' --frame_sample ' + str(options.frame_sample)
283
- if options.debug_max_frames is not None:
284
- cmd += ' --debug_max_frames ' + str(options.debug_max_frames)
285
- if options.class_mapping_filename is not None:
286
- cmd += ' --class_mapping_filename ' + str(options.class_mapping_filename)
287
295
  if options.verbose:
288
296
  cmd += ' --verbose'
289
297
  if options.detector_options is not None and len(options.detector_options) > 0:
@@ -384,13 +392,6 @@ def main(): # noqa
384
392
  'below this threshold (default {})'.format(
385
393
  default_options.json_confidence_threshold))
386
394
 
387
- parser.add_argument('--n_cores', type=int,
388
- default=default_options.n_cores,
389
- help='Number of cores to use for frame separation and detection. '\
390
- 'If using a GPU, this option will be respected for frame separation but '\
391
- 'ignored for detection. Only relevant to frame separation when processing '\
392
- 'a folder. Default {}.'.format(default_options.n_cores))
393
-
394
395
  parser.add_argument('--frame_sample', type=int,
395
396
  default=None, help='process every Nth frame (defaults to every frame), mutually exclusive '\
396
397
  'with --time_sample.')
@@ -400,17 +401,6 @@ def main(): # noqa
400
401
  'frame sampling rate, so it may not be exactly the requested interval in seconds. '\
401
402
  'mutually exclusive with --frame_sample')
402
403
 
403
- parser.add_argument('--debug_max_frames', type=int,
404
- default=-1, help='Trim to N frames for debugging (impacts model execution, '\
405
- 'not frame rendering)')
406
-
407
- parser.add_argument('--class_mapping_filename',
408
- type=str,
409
- default=None, help='Use a non-default class mapping, supplied in a .json file '\
410
- 'with a dictionary mapping int-strings to strings. This will also disable '\
411
- 'the addition of "1" to all category IDs, so your class mapping should start '\
412
- 'at zero.')
413
-
414
404
  parser.add_argument('--verbose', action='store_true',
415
405
  help='Enable additional debug output')
416
406
 
@@ -435,6 +425,28 @@ def main(): # noqa
435
425
  default='',
436
426
  help='Detector-specific options, as a space-separated list of key-value pairs')
437
427
 
428
+ parser.add_argument(
429
+ '--checkpoint_frequency',
430
+ type=int,
431
+ default=default_options.checkpoint_frequency,
432
+ help='Write a checkpoint file (to resume processing later) every N videos; ' + \
433
+ 'set to -1 to disable checkpointing (default {})'.format(
434
+ default_options.checkpoint_frequency))
435
+
436
+ parser.add_argument(
437
+ '--checkpoint_path',
438
+ type=str,
439
+ default=None,
440
+ help='Path to checkpoint file; defaults to a file in the same directory ' + \
441
+ 'as the output file')
442
+
443
+ parser.add_argument(
444
+ '--resume_from_checkpoint',
445
+ type=str,
446
+ default=None,
447
+ help='Resume from a specific checkpoint file, or "auto" to resume from the ' + \
448
+ 'most recent checkpoint in the output directory')
449
+
438
450
  if len(sys.argv[1:]) == 0:
439
451
  parser.print_help()
440
452
  parser.exit()
@@ -27,6 +27,7 @@ from megadetector.detection.run_detector import \
27
27
  get_detector_version_from_model_file, \
28
28
  known_models
29
29
  from megadetector.utils.ct_utils import parse_bool_string
30
+ from megadetector.utils.ct_utils import is_running_in_gha
30
31
  from megadetector.utils import ct_utils
31
32
  import torchvision
32
33
 
@@ -840,10 +841,16 @@ class PTDetector:
840
841
  self.device = torch.device('cuda:0')
841
842
  try:
842
843
  if torch.backends.mps.is_built and torch.backends.mps.is_available():
843
- print('Using MPS device')
844
- self.device = 'mps'
844
+ # MPS inference fails on GitHub runners as of 2025.08. This is
845
+ # independent of model size. So, we disable MPS when running in GHA.
846
+ if is_running_in_gha():
847
+ print('GitHub actions detected, bypassing MPS backend')
848
+ else:
849
+ print('Using MPS device')
850
+ self.device = 'mps'
845
851
  except AttributeError:
846
852
  pass
853
+
847
854
  try:
848
855
  self.model = PTDetector._load_model(model_path,
849
856
  device=self.device,
@@ -876,18 +883,10 @@ class PTDetector:
876
883
  if verbose:
877
884
  print(f'Using PyTorch version {torch.__version__}')
878
885
 
879
- # There are two very slightly different ways to load the model, (1) using the
880
- # map_location=device parameter to torch.load and (2) calling .to(device) after
881
- # loading the model. The former is what we did for a zillion years, but is not
882
- # supported on Apple silicon at of 2024.09. Switching to the latter causes
883
- # very slight changes to the output, which always make me nervous, so I'm not
884
- # doing a wholesale swap just yet. Instead, when running in "classic" compatibility
885
- # mode, we'll only use map_location on M1 hardware, where at least at some point
886
- # there was not a choice.
887
- if 'classic' in compatibility_mode:
888
- use_map_location = (device != 'mps')
889
- else:
890
- use_map_location = False
886
+ # I get quirky errors when loading YOLOv5 models on MPS hardware using
887
+ # map_location, but this is the recommended method, so I'm using it everywhere
888
+ # other than MPS devices.
889
+ use_map_location = (device != 'mps')
891
890
 
892
891
  if use_map_location:
893
892
  try:
@@ -917,10 +916,9 @@ class PTDetector:
917
916
  if t is torch.nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
918
917
  m.recompute_scale_factor = None
919
918
 
920
- if use_map_location:
921
- model = checkpoint['model'].float().fuse().eval()
922
- else:
923
- model = checkpoint['model'].float().fuse().eval().to(device)
919
+ # Calling .to(device) should no longer be necessary now that we're using map_location=device
920
+ # model = checkpoint['model'].float().fuse().eval().to(device)
921
+ model = checkpoint['model'].float().fuse().eval()
924
922
 
925
923
  return model
926
924
 
@@ -1123,9 +1121,6 @@ class PTDetector:
1123
1121
  if not isinstance(img_original, list):
1124
1122
  raise ValueError('img_original must be a list for batch processing')
1125
1123
 
1126
- if verbose:
1127
- print('generate_detections_one_batch: processing a batch of size {}'.format(len(img_original)))
1128
-
1129
1124
  if len(img_original) == 0:
1130
1125
  return []
1131
1126
 
@@ -1180,9 +1175,8 @@ class PTDetector:
1180
1175
  preprocessed_images.append((i_img, image_info, current_image_id))
1181
1176
 
1182
1177
  except Exception as e:
1183
- if verbose:
1184
- print('Preprocessing failed for image {}: {}'.format(
1185
- image_id[i_img] if image_id else f'index_{i_img}', str(e)))
1178
+ print('Warning: preprocessing failed for image {}: {}'.format(
1179
+ image_id[i_img] if image_id else f'index_{i_img}', str(e)))
1186
1180
 
1187
1181
  preprocessing_failed_indices.add(i_img)
1188
1182
  current_image_id = image_id[i_img] if image_id else f'index_{i_img}'
@@ -1203,18 +1197,14 @@ class PTDetector:
1203
1197
  shape_groups[actual_shape] = []
1204
1198
  shape_groups[actual_shape].append((original_idx, image_info, current_image_id))
1205
1199
 
1206
- if verbose and len(shape_groups) > 1:
1207
- print('generate_detections_one_batch: batch of size {} split into {} shape-group batches'.\
1208
- format(len(preprocessed_images), len(shape_groups)))
1209
-
1210
1200
  # Process each shape group as a batch
1211
1201
  for target_shape, group_items in shape_groups.items():
1202
+
1212
1203
  try:
1213
1204
  self._process_batch_group(group_items, results, detection_threshold, augment, verbose)
1214
1205
  except Exception as e:
1215
1206
  # If inference fails for the entire batch, mark all images in this batch as failed
1216
- if verbose:
1217
- print('Batch inference failed for shape {}: {}'.format(target_shape, str(e)))
1207
+ print('Warning: batch inference failed for shape {}: {}'.format(target_shape, str(e)))
1218
1208
 
1219
1209
  for original_idx, image_info, current_image_id in group_items:
1220
1210
  results[original_idx] = {
@@ -1223,8 +1213,12 @@ class PTDetector:
1223
1213
  'failure': FAILURE_INFER
1224
1214
  }
1225
1215
 
1216
+ # ...for each shape group
1226
1217
  return results
1227
1218
 
1219
+ # ...def generate_detections_one_batch(...)
1220
+
1221
+
1228
1222
  def _process_batch_group(self, group_items, results, detection_threshold, augment, verbose):
1229
1223
  """
1230
1224
  Process a group of images with the same target shape as a single batch.
@@ -1273,10 +1267,6 @@ class PTDetector:
1273
1267
  # Stack images into a batch tensor
1274
1268
  batch_tensor = torch.stack(batch_images)
1275
1269
 
1276
- if verbose:
1277
- if batch_tensor.shape[0] > 1:
1278
- print('_process_batch_group: processing a batch of size {}'.format(batch_tensor.shape[0]))
1279
-
1280
1270
  batch_tensor = batch_tensor.float()
1281
1271
  batch_tensor /= 255.0
1282
1272