megadetector 5.0.10__py3-none-any.whl → 5.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {api → megadetector/api}/batch_processing/api_core/batch_service/score.py +2 -2
  2. {api → megadetector/api}/synchronous/api_core/animal_detection_api/api_backend.py +1 -1
  3. {api → megadetector/api}/synchronous/api_core/animal_detection_api/api_frontend.py +1 -1
  4. {classification → megadetector/classification}/analyze_failed_images.py +3 -3
  5. {classification → megadetector/classification}/cache_batchapi_outputs.py +1 -1
  6. {classification → megadetector/classification}/create_classification_dataset.py +1 -1
  7. {classification → megadetector/classification}/crop_detections.py +1 -1
  8. {classification → megadetector/classification}/detect_and_crop.py +5 -5
  9. {classification → megadetector/classification}/evaluate_model.py +1 -1
  10. {classification → megadetector/classification}/json_to_azcopy_list.py +2 -2
  11. {classification → megadetector/classification}/json_validator.py +13 -9
  12. {classification → megadetector/classification}/map_classification_categories.py +1 -1
  13. {classification → megadetector/classification}/merge_classification_detection_output.py +1 -1
  14. {classification → megadetector/classification}/run_classifier.py +2 -1
  15. {classification → megadetector/classification}/train_classifier.py +8 -6
  16. {classification → megadetector/classification}/train_classifier_tf.py +10 -9
  17. {classification → megadetector/classification}/train_utils.py +3 -2
  18. {data_management → megadetector/data_management}/camtrap_dp_to_coco.py +4 -3
  19. {data_management → megadetector/data_management}/cct_json_utils.py +2 -2
  20. {data_management → megadetector/data_management}/cct_to_md.py +1 -1
  21. {data_management → megadetector/data_management}/coco_to_labelme.py +1 -1
  22. {data_management → megadetector/data_management}/coco_to_yolo.py +1 -1
  23. {data_management → megadetector/data_management}/databases/integrity_check_json_db.py +2 -2
  24. {data_management → megadetector/data_management}/get_image_sizes.py +4 -3
  25. {data_management → megadetector/data_management}/importers/auckland_doc_test_to_json.py +6 -5
  26. {data_management → megadetector/data_management}/importers/auckland_doc_to_json.py +4 -3
  27. {data_management → megadetector/data_management}/importers/awc_to_json.py +6 -4
  28. {data_management → megadetector/data_management}/importers/bellevue_to_json.py +3 -3
  29. {data_management → megadetector/data_management}/importers/cacophony-thermal-importer.py +4 -4
  30. {data_management → megadetector/data_management}/importers/carrizo_shrubfree_2018.py +5 -4
  31. {data_management → megadetector/data_management}/importers/carrizo_trail_cam_2017.py +8 -6
  32. {data_management → megadetector/data_management}/importers/cct_field_adjustments.py +2 -1
  33. {data_management → megadetector/data_management}/importers/channel_islands_to_cct.py +2 -2
  34. {data_management → megadetector/data_management}/importers/ena24_to_json.py +6 -5
  35. {data_management → megadetector/data_management}/importers/filenames_to_json.py +2 -1
  36. {data_management → megadetector/data_management}/importers/helena_to_cct.py +7 -6
  37. {data_management → megadetector/data_management}/importers/idaho-camera-traps.py +6 -6
  38. {data_management → megadetector/data_management}/importers/idfg_iwildcam_lila_prep.py +4 -4
  39. {data_management → megadetector/data_management}/importers/jb_csv_to_json.py +1 -1
  40. {data_management → megadetector/data_management}/importers/missouri_to_json.py +4 -3
  41. {data_management → megadetector/data_management}/importers/noaa_seals_2019.py +2 -2
  42. {data_management → megadetector/data_management}/importers/pc_to_json.py +5 -5
  43. {data_management → megadetector/data_management}/importers/prepare-noaa-fish-data-for-lila.py +3 -3
  44. {data_management → megadetector/data_management}/importers/prepare_zsl_imerit.py +3 -3
  45. {data_management → megadetector/data_management}/importers/rspb_to_json.py +2 -2
  46. {data_management → megadetector/data_management}/importers/save_the_elephants_survey_A.py +4 -4
  47. {data_management → megadetector/data_management}/importers/save_the_elephants_survey_B.py +6 -9
  48. {data_management → megadetector/data_management}/importers/snapshot_safari_importer.py +4 -4
  49. {data_management → megadetector/data_management}/importers/snapshot_safari_importer_reprise.py +2 -2
  50. {data_management → megadetector/data_management}/importers/snapshot_serengeti_lila.py +4 -4
  51. {data_management → megadetector/data_management}/importers/timelapse_csv_set_to_json.py +3 -3
  52. {data_management → megadetector/data_management}/importers/ubc_to_json.py +3 -3
  53. {data_management → megadetector/data_management}/importers/umn_to_json.py +2 -2
  54. {data_management → megadetector/data_management}/importers/wellington_to_json.py +3 -3
  55. {data_management → megadetector/data_management}/importers/wi_to_json.py +3 -2
  56. {data_management → megadetector/data_management}/labelme_to_coco.py +6 -7
  57. {data_management → megadetector/data_management}/labelme_to_yolo.py +2 -2
  58. {data_management → megadetector/data_management}/lila/add_locations_to_island_camera_traps.py +4 -4
  59. {data_management → megadetector/data_management}/lila/create_lila_blank_set.py +10 -9
  60. {data_management → megadetector/data_management}/lila/create_lila_test_set.py +3 -2
  61. {data_management → megadetector/data_management}/lila/create_links_to_md_results_files.py +1 -1
  62. {data_management → megadetector/data_management}/lila/download_lila_subset.py +5 -4
  63. {data_management → megadetector/data_management}/lila/generate_lila_per_image_labels.py +6 -5
  64. {data_management → megadetector/data_management}/lila/get_lila_annotation_counts.py +2 -2
  65. {data_management → megadetector/data_management}/lila/get_lila_image_counts.py +2 -1
  66. {data_management → megadetector/data_management}/lila/lila_common.py +5 -5
  67. {data_management → megadetector/data_management}/lila/test_lila_metadata_urls.py +2 -2
  68. {data_management → megadetector/data_management}/ocr_tools.py +6 -6
  69. {data_management → megadetector/data_management}/read_exif.py +2 -2
  70. {data_management → megadetector/data_management}/remap_coco_categories.py +1 -1
  71. {data_management → megadetector/data_management}/remove_exif.py +1 -1
  72. {data_management → megadetector/data_management}/resize_coco_dataset.py +4 -4
  73. {data_management → megadetector/data_management}/wi_download_csv_to_coco.py +3 -3
  74. {data_management → megadetector/data_management}/yolo_output_to_md_output.py +5 -5
  75. {data_management → megadetector/data_management}/yolo_to_coco.py +9 -9
  76. {detection → megadetector/detection}/process_video.py +9 -10
  77. {detection → megadetector/detection}/pytorch_detector.py +12 -8
  78. {detection → megadetector/detection}/run_detector.py +6 -6
  79. {detection → megadetector/detection}/run_detector_batch.py +12 -12
  80. {detection → megadetector/detection}/run_inference_with_yolov5_val.py +12 -12
  81. {detection → megadetector/detection}/run_tiled_inference.py +8 -9
  82. {detection → megadetector/detection}/tf_detector.py +3 -2
  83. {detection → megadetector/detection}/video_utils.py +2 -2
  84. {api/batch_processing → megadetector}/postprocessing/add_max_conf.py +1 -1
  85. {api/batch_processing → megadetector}/postprocessing/categorize_detections_by_size.py +1 -1
  86. {api/batch_processing → megadetector}/postprocessing/combine_api_outputs.py +1 -1
  87. {api/batch_processing → megadetector}/postprocessing/compare_batch_results.py +5 -5
  88. {api/batch_processing → megadetector}/postprocessing/convert_output_format.py +4 -5
  89. {api/batch_processing → megadetector}/postprocessing/load_api_results.py +1 -1
  90. {api/batch_processing → megadetector}/postprocessing/md_to_coco.py +3 -3
  91. {api/batch_processing → megadetector}/postprocessing/md_to_labelme.py +3 -3
  92. {api/batch_processing → megadetector}/postprocessing/merge_detections.py +1 -1
  93. {api/batch_processing → megadetector}/postprocessing/postprocess_batch_results.py +19 -21
  94. {api/batch_processing → megadetector}/postprocessing/remap_detection_categories.py +1 -1
  95. {api/batch_processing → megadetector}/postprocessing/render_detection_confusion_matrix.py +5 -6
  96. {api/batch_processing → megadetector}/postprocessing/repeat_detection_elimination/find_repeat_detections.py +3 -3
  97. {api/batch_processing → megadetector}/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +3 -2
  98. {api/batch_processing → megadetector}/postprocessing/repeat_detection_elimination/repeat_detections_core.py +11 -11
  99. {api/batch_processing → megadetector}/postprocessing/separate_detections_into_folders.py +3 -4
  100. {api/batch_processing → megadetector}/postprocessing/subset_json_detector_output.py +2 -2
  101. {api/batch_processing → megadetector}/postprocessing/top_folders_to_bottom.py +1 -1
  102. {taxonomy_mapping → megadetector/taxonomy_mapping}/map_lila_taxonomy_to_wi_taxonomy.py +2 -2
  103. {taxonomy_mapping → megadetector/taxonomy_mapping}/map_new_lila_datasets.py +2 -6
  104. {taxonomy_mapping → megadetector/taxonomy_mapping}/preview_lila_taxonomy.py +6 -7
  105. {taxonomy_mapping → megadetector/taxonomy_mapping}/retrieve_sample_image.py +1 -1
  106. {taxonomy_mapping → megadetector/taxonomy_mapping}/simple_image_download.py +2 -1
  107. {taxonomy_mapping → megadetector/taxonomy_mapping}/species_lookup.py +1 -1
  108. {taxonomy_mapping → megadetector/taxonomy_mapping}/taxonomy_csv_checker.py +1 -1
  109. {taxonomy_mapping → megadetector/taxonomy_mapping}/validate_lila_category_mappings.py +1 -1
  110. {md_utils → megadetector/utils}/azure_utils.py +7 -3
  111. {md_utils → megadetector/utils}/directory_listing.py +1 -1
  112. {md_utils → megadetector/utils}/md_tests.py +29 -29
  113. {md_utils → megadetector/utils}/split_locations_into_train_val.py +1 -1
  114. {md_utils → megadetector/utils}/write_html_image_list.py +1 -1
  115. {md_visualization → megadetector/visualization}/render_images_with_thumbnails.py +3 -3
  116. {md_visualization → megadetector/visualization}/visualization_utils.py +6 -7
  117. {md_visualization → megadetector/visualization}/visualize_db.py +3 -4
  118. {md_visualization → megadetector/visualization}/visualize_detector_output.py +9 -10
  119. {megadetector-5.0.10.dist-info → megadetector-5.0.12.dist-info}/LICENSE +0 -0
  120. {megadetector-5.0.10.dist-info → megadetector-5.0.12.dist-info}/METADATA +12 -11
  121. megadetector-5.0.12.dist-info/RECORD +199 -0
  122. megadetector-5.0.12.dist-info/top_level.txt +1 -0
  123. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  124. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  125. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  126. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  127. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  128. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  129. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  130. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  131. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  132. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  133. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  134. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  135. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  136. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  137. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  138. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  139. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  140. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  141. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  142. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  143. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  144. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  145. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  146. detection/detector_training/model_main_tf2.py +0 -114
  147. docs/source/conf.py +0 -43
  148. megadetector-5.0.10.dist-info/RECORD +0 -224
  149. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  150. {api → megadetector/api}/__init__.py +0 -0
  151. {api → megadetector/api}/batch_processing/__init__.py +0 -0
  152. {api → megadetector/api}/batch_processing/api_core/__init__.py +0 -0
  153. {api → megadetector/api}/batch_processing/api_core/batch_service/__init__.py +0 -0
  154. {api → megadetector/api}/batch_processing/api_core/server.py +0 -0
  155. {api → megadetector/api}/batch_processing/api_core/server_api_config.py +0 -0
  156. {api → megadetector/api}/batch_processing/api_core/server_app_config.py +0 -0
  157. {api → megadetector/api}/batch_processing/api_core/server_batch_job_manager.py +0 -0
  158. {api → megadetector/api}/batch_processing/api_core/server_job_status_table.py +0 -0
  159. {api → megadetector/api}/batch_processing/api_core/server_orchestration.py +0 -0
  160. {api → megadetector/api}/batch_processing/api_core/server_utils.py +0 -0
  161. {api → megadetector/api}/batch_processing/api_core_support/__init__.py +0 -0
  162. {api → megadetector/api}/batch_processing/api_core_support/aggregate_results_manually.py +0 -0
  163. {api → megadetector/api}/batch_processing/api_support/__init__.py +0 -0
  164. {api → megadetector/api}/batch_processing/api_support/summarize_daily_activity.py +0 -0
  165. {api → megadetector/api}/batch_processing/data_preparation/__init__.py +0 -0
  166. {api → megadetector/api}/batch_processing/integration/digiKam/setup.py +0 -0
  167. {api → megadetector/api}/batch_processing/integration/digiKam/xmp_integration.py +0 -0
  168. {api → megadetector/api}/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -0
  169. {api → megadetector/api}/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -0
  170. {api → megadetector/api}/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -0
  171. {api/batch_processing/postprocessing → megadetector/api/synchronous}/__init__.py +0 -0
  172. {api/synchronous → megadetector/api/synchronous/api_core/animal_detection_api}/__init__.py +0 -0
  173. {api → megadetector/api}/synchronous/api_core/animal_detection_api/config.py +0 -0
  174. {api/synchronous/api_core/animal_detection_api → megadetector/api/synchronous/api_core/tests}/__init__.py +0 -0
  175. {api → megadetector/api}/synchronous/api_core/tests/load_test.py +0 -0
  176. {api/synchronous/api_core/tests → megadetector/classification}/__init__.py +0 -0
  177. {classification → megadetector/classification}/aggregate_classifier_probs.py +0 -0
  178. {classification → megadetector/classification}/csv_to_json.py +0 -0
  179. {classification → megadetector/classification}/efficientnet/__init__.py +0 -0
  180. {classification → megadetector/classification}/efficientnet/model.py +0 -0
  181. {classification → megadetector/classification}/efficientnet/utils.py +0 -0
  182. {classification → megadetector/classification}/identify_mislabeled_candidates.py +0 -0
  183. {classification → megadetector/classification}/prepare_classification_script.py +0 -0
  184. {classification → megadetector/classification}/prepare_classification_script_mc.py +0 -0
  185. {classification → megadetector/classification}/save_mislabeled.py +0 -0
  186. {classification → megadetector/data_management}/__init__.py +0 -0
  187. {data_management → megadetector/data_management/annotations}/__init__.py +0 -0
  188. {data_management → megadetector/data_management}/annotations/annotation_constants.py +0 -0
  189. {data_management → megadetector/data_management}/cct_to_wi.py +0 -0
  190. {data_management/annotations → megadetector/data_management/databases}/__init__.py +0 -0
  191. {data_management → megadetector/data_management}/databases/add_width_and_height_to_db.py +0 -0
  192. {data_management → megadetector/data_management}/databases/combine_coco_camera_traps_files.py +0 -0
  193. {data_management → megadetector/data_management}/databases/subset_json_db.py +0 -0
  194. {data_management → megadetector/data_management}/generate_crops_from_cct.py +0 -0
  195. {data_management → megadetector/data_management}/importers/add_nacti_sizes.py +0 -0
  196. {data_management → megadetector/data_management}/importers/add_timestamps_to_icct.py +0 -0
  197. {data_management → megadetector/data_management}/importers/animl_results_to_md_results.py +0 -0
  198. {data_management → megadetector/data_management}/importers/eMammal/copy_and_unzip_emammal.py +0 -0
  199. {data_management → megadetector/data_management}/importers/eMammal/eMammal_helpers.py +0 -0
  200. {data_management → megadetector/data_management}/importers/eMammal/make_eMammal_json.py +0 -0
  201. {data_management → megadetector/data_management}/importers/mcgill_to_json.py +0 -0
  202. {data_management → megadetector/data_management}/importers/nacti_fieldname_adjustments.py +0 -0
  203. {data_management → megadetector/data_management}/importers/plot_wni_giraffes.py +0 -0
  204. {data_management → megadetector/data_management}/importers/snapshotserengeti/make_full_SS_json.py +0 -0
  205. {data_management → megadetector/data_management}/importers/snapshotserengeti/make_per_season_SS_json.py +0 -0
  206. {data_management → megadetector/data_management}/importers/sulross_get_exif.py +0 -0
  207. {data_management → megadetector/data_management}/importers/zamba_results_to_md_results.py +0 -0
  208. {data_management/databases → megadetector/data_management/lila}/__init__.py +0 -0
  209. {data_management → megadetector/data_management}/lila/add_locations_to_nacti.py +0 -0
  210. {data_management/lila → megadetector/detection}/__init__.py +0 -0
  211. {detection → megadetector/detection/detector_training}/__init__.py +0 -0
  212. {api/synchronous/api_core/animal_detection_api → megadetector}/detection/detector_training/model_main_tf2.py +0 -0
  213. {detection/detector_training → megadetector/postprocessing}/__init__.py +0 -0
  214. {md_utils → megadetector/taxonomy_mapping}/__init__.py +0 -0
  215. {taxonomy_mapping → megadetector/taxonomy_mapping}/prepare_lila_taxonomy_release.py +0 -0
  216. {taxonomy_mapping → megadetector/taxonomy_mapping}/taxonomy_graph.py +0 -0
  217. {md_visualization → megadetector/utils}/__init__.py +0 -0
  218. {md_utils → megadetector/utils}/ct_utils.py +0 -0
  219. {md_utils → megadetector/utils}/path_utils.py +0 -0
  220. {md_utils → megadetector/utils}/process_utils.py +0 -0
  221. {md_utils → megadetector/utils}/sas_blob_utils.py +0 -0
  222. {md_utils → megadetector/utils}/string_utils.py +0 -0
  223. {md_utils → megadetector/utils}/url_utils.py +0 -0
  224. {taxonomy_mapping → megadetector/visualization}/__init__.py +0 -0
  225. {md_visualization → megadetector/visualization}/plot_utils.py +0 -0
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.12.dist-info}/WHEEL +0 -0
@@ -1,165 +0,0 @@
1
- ########
2
- #
3
- # tf_detector.py
4
- #
5
- # Module containing the class TFDetector for loading a TensorFlow detection model and
6
- # running inference.
7
- #
8
- ########
9
-
10
- import numpy as np
11
-
12
- from detection.run_detector import CONF_DIGITS, COORD_DIGITS, FAILURE_INFER
13
- from md_utils.ct_utils import truncate_float
14
-
15
- import tensorflow.compat.v1 as tf
16
-
17
- print('TensorFlow version:', tf.__version__)
18
- print('Is GPU available? tf.test.is_gpu_available:', tf.test.is_gpu_available())
19
-
20
-
21
- class TFDetector:
22
- """
23
- A detector model loaded at the time of initialization. It is intended to be used with
24
- the MegaDetector (TF). The inference batch size is set to 1; code needs to be modified
25
- to support larger batch sizes, including resizing appropriately.
26
- """
27
-
28
- # MegaDetector was trained with batch size of 1, and the resizing function is a part
29
- # of the inference graph
30
- BATCH_SIZE = 1
31
-
32
-
33
- def __init__(self, model_path):
34
- """
35
- Loads model from model_path and starts a tf.Session with this graph. Obtains
36
- input and output tensor handles.
37
- """
38
-
39
- detection_graph = TFDetector.__load_model(model_path)
40
- self.tf_session = tf.Session(graph=detection_graph)
41
-
42
- self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
43
- self.box_tensor = detection_graph.get_tensor_by_name('detection_boxes:0')
44
- self.score_tensor = detection_graph.get_tensor_by_name('detection_scores:0')
45
- self.class_tensor = detection_graph.get_tensor_by_name('detection_classes:0')
46
-
47
- @staticmethod
48
- def round_and_make_float(d, precision=4):
49
- return truncate_float(float(d), precision=precision)
50
-
51
- @staticmethod
52
- def __convert_coords(tf_coords):
53
- """
54
- Converts coordinates from the model's output format [y1, x1, y2, x2] to the
55
- format used by our API and MegaDB: [x1, y1, width, height]. All coordinates
56
- (including model outputs) are normalized in the range [0, 1].
57
-
58
- Args:
59
- tf_coords: np.array of predicted bounding box coordinates from the TF detector,
60
- has format [y1, x1, y2, x2]
61
-
62
- Returns: list of Python float, predicted bounding box coordinates [x1, y1, width, height]
63
- """
64
-
65
- # change from [y1, x1, y2, x2] to [x1, y1, width, height]
66
- width = tf_coords[3] - tf_coords[1]
67
- height = tf_coords[2] - tf_coords[0]
68
-
69
- new = [tf_coords[1], tf_coords[0], width, height] # must be a list instead of np.array
70
-
71
- # convert numpy floats to Python floats
72
- for i, d in enumerate(new):
73
- new[i] = TFDetector.round_and_make_float(d, precision=COORD_DIGITS)
74
- return new
75
-
76
- @staticmethod
77
- def __load_model(model_path):
78
- """
79
- Loads a detection model (i.e., create a graph) from a .pb file.
80
-
81
- Args:
82
- model_path: .pb file of the model.
83
-
84
- Returns: the loaded graph.
85
- """
86
-
87
- print('TFDetector: Loading graph...')
88
- detection_graph = tf.Graph()
89
- with detection_graph.as_default():
90
- od_graph_def = tf.GraphDef()
91
- with tf.gfile.GFile(model_path, 'rb') as fid:
92
- serialized_graph = fid.read()
93
- od_graph_def.ParseFromString(serialized_graph)
94
- tf.import_graph_def(od_graph_def, name='')
95
- print('TFDetector: Detection graph loaded.')
96
-
97
- return detection_graph
98
-
99
- def _generate_detections_one_image(self, image):
100
- np_im = np.asarray(image, np.uint8)
101
- im_w_batch_dim = np.expand_dims(np_im, axis=0)
102
-
103
- # need to change the above line to the following if supporting a batch size > 1 and resizing to the same size
104
- # np_images = [np.asarray(image, np.uint8) for image in images]
105
- # images_stacked = np.stack(np_images, axis=0) if len(images) > 1 else np.expand_dims(np_images[0], axis=0)
106
-
107
- # performs inference
108
- (box_tensor_out, score_tensor_out, class_tensor_out) = self.tf_session.run(
109
- [self.box_tensor, self.score_tensor, self.class_tensor],
110
- feed_dict={self.image_tensor: im_w_batch_dim})
111
-
112
- return box_tensor_out, score_tensor_out, class_tensor_out
113
-
114
- def generate_detections_one_image(self, image, image_id, detection_threshold, image_size=None,
115
- skip_image_resizing=False):
116
- """
117
- Apply the detector to an image.
118
-
119
- Args:
120
- image: the PIL Image object
121
- image_id: a path to identify the image; will be in the "file" field of the output object
122
- detection_threshold: confidence above which to include the detection proposal
123
-
124
- Returns:
125
- A dict with the following fields, see the 'images' key in https://github.com/agentmorris/MegaDetector/tree/master/api/batch_processing#batch-processing-api-output-format
126
- - 'file' (always present)
127
- - 'max_detection_conf'
128
- - 'detections', which is a list of detection objects containing keys 'category', 'conf' and 'bbox'
129
- - 'failure'
130
- """
131
-
132
- assert image_size is None, 'Image sizing not supported for TF detectors'
133
- assert not skip_image_resizing, 'Image sizing not supported for TF detectors'
134
- result = {
135
- 'file': image_id
136
- }
137
- try:
138
- b_box, b_score, b_class = self._generate_detections_one_image(image)
139
-
140
- # our batch size is 1; need to loop the batch dim if supporting batch size > 1
141
- boxes, scores, classes = b_box[0], b_score[0], b_class[0]
142
-
143
- detections_cur_image = [] # will be empty for an image with no confident detections
144
- max_detection_conf = 0.0
145
- for b, s, c in zip(boxes, scores, classes):
146
- if s > detection_threshold:
147
- detection_entry = {
148
- 'category': str(int(c)), # use string type for the numerical class label, not int
149
- 'conf': truncate_float(float(s), # cast to float for json serialization
150
- precision=CONF_DIGITS),
151
- 'bbox': TFDetector.__convert_coords(b)
152
- }
153
- detections_cur_image.append(detection_entry)
154
- if s > max_detection_conf:
155
- max_detection_conf = s
156
-
157
- result['max_detection_conf'] = truncate_float(float(max_detection_conf),
158
- precision=CONF_DIGITS)
159
- result['detections'] = detections_cur_image
160
-
161
- except Exception as e:
162
- result['failure'] = FAILURE_INFER
163
- print('TFDetector: image {} failed during inference: {}'.format(image_id, str(e)))
164
-
165
- return result
@@ -1,495 +0,0 @@
1
- ########
2
- #
3
- # video_utils.py
4
- #
5
- # Utilities for splitting, rendering, and assembling videos.
6
- #
7
- ########
8
-
9
- #%% Constants, imports, environment
10
-
11
- import os
12
- import cv2
13
- import glob
14
- import json
15
-
16
- from collections import defaultdict
17
- from multiprocessing.pool import ThreadPool
18
- from multiprocessing.pool import Pool
19
- from tqdm import tqdm
20
- from typing import Container,Iterable,List
21
- from functools import partial
22
-
23
- from md_utils import path_utils
24
-
25
- from md_visualization import visualization_utils as vis_utils
26
-
27
-
28
- #%% Path utilities
29
-
30
- VIDEO_EXTENSIONS = ('.mp4','.avi','.mpeg','.mpg')
31
-
32
- def is_video_file(s: str, video_extensions: Container[str] = VIDEO_EXTENSIONS
33
- ) -> bool:
34
- """
35
- Checks a file's extension against a hard-coded set of video file
36
- extensions.
37
- """
38
-
39
- ext = os.path.splitext(s)[1]
40
- return ext.lower() in video_extensions
41
-
42
-
43
- def find_video_strings(strings: Iterable[str]) -> List[str]:
44
- """
45
- Given a list of strings that are potentially video file names, looks for
46
- strings that actually look like video file names (based on extension).
47
- """
48
-
49
- return [s for s in strings if is_video_file(s.lower())]
50
-
51
-
52
- def find_videos(dirname: str, recursive: bool = False) -> List[str]:
53
- """
54
- Finds all files in a directory that look like video file names. Returns
55
- absolute paths.
56
- """
57
-
58
- if recursive:
59
- strings = glob.glob(os.path.join(dirname, '**', '*.*'), recursive=True)
60
- else:
61
- strings = glob.glob(os.path.join(dirname, '*.*'))
62
- return find_video_strings(strings)
63
-
64
-
65
- #%% Function for rendering frames to video and vice-versa
66
-
67
- # http://tsaith.github.io/combine-images-into-a-video-with-python-3-and-opencv-3.html
68
-
69
- def frames_to_video(images, Fs, output_file_name, codec_spec='h264'):
70
- """
71
- Given a list of image files and a sample rate, concatenate those images into
72
- a video and write to [output_file_name].
73
-
74
- Note to self: h264 is a sensible default and generally works on Windows, but when this
75
- fails (which is around 50% of the time on Linux), I fall back to mp4v.
76
- """
77
-
78
- if codec_spec is None:
79
- codec_spec = 'h264'
80
-
81
- if len(images) == 0:
82
- return
83
-
84
- # Determine the width and height from the first image
85
- frame = cv2.imread(images[0])
86
- cv2.imshow('video',frame)
87
- height, width, channels = frame.shape
88
-
89
- # Define the codec and create VideoWriter object
90
- fourcc = cv2.VideoWriter_fourcc(*codec_spec)
91
- out = cv2.VideoWriter(output_file_name, fourcc, Fs, (width, height))
92
-
93
- for image in images:
94
- frame = cv2.imread(image)
95
- out.write(frame)
96
-
97
- out.release()
98
- cv2.destroyAllWindows()
99
-
100
-
101
- def get_video_fs(input_video_file):
102
- """
103
- Get the frame rate of [input_video_file]
104
- """
105
-
106
- assert os.path.isfile(input_video_file), 'File {} not found'.format(input_video_file)
107
- vidcap = cv2.VideoCapture(input_video_file)
108
- Fs = vidcap.get(cv2.CAP_PROP_FPS)
109
- vidcap.release()
110
- return Fs
111
-
112
-
113
- def frame_number_to_filename(frame_number):
114
- return 'frame{:06d}.jpg'.format(frame_number)
115
-
116
-
117
- def video_to_frames(input_video_file, output_folder, overwrite=True,
118
- every_n_frames=None, verbose=False):
119
- """
120
- Render every frame of [input_video_file] to a .jpg in [output_folder]
121
-
122
- With help from:
123
-
124
- https://stackoverflow.com/questions/33311153/python-extracting-and-saving-video-frames
125
- """
126
-
127
- assert os.path.isfile(input_video_file), 'File {} not found'.format(input_video_file)
128
-
129
- vidcap = cv2.VideoCapture(input_video_file)
130
- n_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
131
- Fs = vidcap.get(cv2.CAP_PROP_FPS)
132
-
133
- # If we're not over-writing, check whether all frame images already exist
134
- if overwrite == False:
135
-
136
- missing_frame_number = None
137
- frame_filenames = []
138
-
139
- for frame_number in range(0,n_frames):
140
-
141
- if every_n_frames is not None:
142
- if frame_number % every_n_frames != 0:
143
- continue
144
-
145
- frame_filename = frame_number_to_filename(frame_number)
146
- frame_filename = os.path.join(output_folder,frame_filename)
147
- frame_filenames.append(frame_filename)
148
- if os.path.isfile(frame_filename):
149
- continue
150
- else:
151
- missing_frame_number = frame_number
152
- break
153
-
154
- # OpenCV seems to over-report the number of frames by 1 in some cases, or fails
155
- # to read the last frame; either way, I'm allowing one missing frame.
156
- allow_last_frame_missing = True
157
-
158
- if missing_frame_number is None or \
159
- (allow_last_frame_missing and (missing_frame_number == n_frames-1)):
160
- if verbose:
161
- print('Skipping video {}, all output frames exist'.format(input_video_file))
162
- return frame_filenames,Fs
163
- else:
164
- pass
165
- # print("Rendering video {}, couldn't find frame {}".format(
166
- # input_video_file,missing_frame_number))
167
-
168
- # ...if we need to check whether to skip this video entirely
169
-
170
- if verbose:
171
- print('Reading {} frames at {} Hz from {}'.format(n_frames,Fs,input_video_file))
172
-
173
- frame_filenames = []
174
-
175
- # for frame_number in tqdm(range(0,n_frames)):
176
- for frame_number in range(0,n_frames):
177
-
178
- success,image = vidcap.read()
179
- if not success:
180
- assert image is None
181
- if verbose:
182
- print('Read terminating at frame {} of {}'.format(frame_number,n_frames))
183
- break
184
-
185
- if every_n_frames is not None:
186
- if frame_number % every_n_frames != 0:
187
- continue
188
-
189
- frame_filename = frame_number_to_filename(frame_number)
190
- frame_filename = os.path.join(output_folder,frame_filename)
191
- frame_filenames.append(frame_filename)
192
-
193
- if overwrite == False and os.path.isfile(frame_filename):
194
- # print('Skipping frame {}'.format(frame_filename))
195
- pass
196
- else:
197
- try:
198
- if frame_filename.isascii():
199
- cv2.imwrite(os.path.normpath(frame_filename),image)
200
- else:
201
- is_success, im_buf_arr = cv2.imencode('.jpg', image)
202
- im_buf_arr.tofile(frame_filename)
203
- assert os.path.isfile(frame_filename), \
204
- 'Output frame {} unavailable'.format(frame_filename)
205
- except KeyboardInterrupt:
206
- vidcap.release()
207
- raise
208
- except Exception as e:
209
- print('Error on frame {} of {}: {}'.format(frame_number,n_frames,str(e)))
210
-
211
- if verbose:
212
- print('\nExtracted {} of {} frames'.format(len(frame_filenames),n_frames))
213
-
214
- vidcap.release()
215
- return frame_filenames,Fs
216
-
217
-
218
- def _video_to_frames_for_folder(relative_fn,input_folder,output_folder_base,every_n_frames,overwrite,verbose):
219
- """
220
- Internal function to call video_to_frames in the context of video_folder_to_frames;
221
- makes sure the right output folder exists, then calls video_to_frames.
222
- """
223
-
224
- input_fn_absolute = os.path.join(input_folder,relative_fn)
225
- assert os.path.isfile(input_fn_absolute)
226
-
227
- # Create the target output folder
228
- output_folder_video = os.path.join(output_folder_base,relative_fn)
229
- os.makedirs(output_folder_video,exist_ok=True)
230
-
231
- # Render frames
232
- # input_video_file = input_fn_absolute; output_folder = output_folder_video
233
- frame_filenames,fs = video_to_frames(input_fn_absolute,output_folder_video,
234
- overwrite=overwrite,every_n_frames=every_n_frames,
235
- verbose=verbose)
236
-
237
- return frame_filenames,fs
238
-
239
-
240
- def video_folder_to_frames(input_folder:str, output_folder_base:str,
241
- recursive:bool=True, overwrite:bool=True,
242
- n_threads:int=1, every_n_frames:int=None,
243
- verbose=False, parallelization_uses_threads=True):
244
- """
245
- For every video file in input_folder, create a folder within output_folder_base, and
246
- render every frame of the video to .jpg in that folder.
247
- """
248
-
249
- # Recursively enumerate video files
250
- input_files_full_paths = find_videos(input_folder,recursive=recursive)
251
- print('Found {} videos in folder {}'.format(len(input_files_full_paths),input_folder))
252
- if len(input_files_full_paths) == 0:
253
- return [],[],[]
254
-
255
- input_files_relative_paths = [os.path.relpath(s,input_folder) for s in input_files_full_paths]
256
- input_files_relative_paths = [s.replace('\\','/') for s in input_files_relative_paths]
257
-
258
- os.makedirs(output_folder_base,exist_ok=True)
259
-
260
- frame_filenames_by_video = []
261
- fs_by_video = []
262
-
263
- if n_threads == 1:
264
- # For each video
265
- #
266
- # input_fn_relative = input_files_relative_paths[0]
267
- for input_fn_relative in tqdm(input_files_relative_paths):
268
-
269
- frame_filenames,fs = \
270
- _video_to_frames_for_folder(input_fn_relative,input_folder,output_folder_base,
271
- every_n_frames,overwrite,verbose)
272
- frame_filenames_by_video.append(frame_filenames)
273
- fs_by_video.append(fs)
274
- else:
275
- if parallelization_uses_threads:
276
- print('Starting a worker pool with {} threads'.format(n_threads))
277
- pool = ThreadPool(n_threads)
278
- else:
279
- print('Starting a worker pool with {} processes'.format(n_threads))
280
- pool = Pool(n_threads)
281
- process_video_with_options = partial(_video_to_frames_for_folder,
282
- input_folder=input_folder,
283
- output_folder_base=output_folder_base,
284
- every_n_frames=every_n_frames,
285
- overwrite=overwrite,
286
- verbose=verbose)
287
- results = list(tqdm(pool.imap(
288
- partial(process_video_with_options),input_files_relative_paths),
289
- total=len(input_files_relative_paths)))
290
- frame_filenames_by_video = [x[0] for x in results]
291
- fs_by_video = [x[1] for x in results]
292
-
293
- return frame_filenames_by_video,fs_by_video,input_files_full_paths
294
-
295
-
296
- class FrameToVideoOptions:
297
-
298
- # zero-indexed
299
- nth_highest_confidence = 1
300
-
301
-
302
- def frame_results_to_video_results(input_file,output_file,options:FrameToVideoOptions = None):
303
- """
304
- Given an API output file produced at the *frame* level, corresponding to a directory
305
- created with video_folder_to_frames, map those frame-level results back to the
306
- video level for use in Timelapse.
307
-
308
- Preserves everything in the input .json file other than the images.
309
- """
310
-
311
- if options is None:
312
- options = FrameToVideoOptions()
313
-
314
- # Load results
315
- with open(input_file,'r') as f:
316
- input_data = json.load(f)
317
-
318
- images = input_data['images']
319
- detection_categories = input_data['detection_categories']
320
-
321
- ## Break into videos
322
-
323
- video_to_frames = defaultdict(list)
324
-
325
- # im = images[0]
326
- for im in tqdm(images):
327
-
328
- fn = im['file']
329
- video_name = os.path.dirname(fn)
330
- assert is_video_file(video_name)
331
- video_to_frames[video_name].append(im)
332
-
333
- print('Found {} unique videos in {} frame-level results'.format(
334
- len(video_to_frames),len(images)))
335
-
336
- output_images = []
337
-
338
- ## For each video...
339
-
340
- # video_name = list(video_to_frames.keys())[0]
341
- for video_name in tqdm(video_to_frames):
342
-
343
- frames = video_to_frames[video_name]
344
-
345
- all_detections_this_video = []
346
-
347
- # frame = frames[0]
348
- for frame in frames:
349
- if frame['detections'] is not None:
350
- all_detections_this_video.extend(frame['detections'])
351
-
352
- # At most one detection for each category for the whole video
353
- canonical_detections = []
354
-
355
- # category_id = list(detection_categories.keys())[0]
356
- for category_id in detection_categories:
357
-
358
- category_detections = [det for det in all_detections_this_video if \
359
- det['category'] == category_id]
360
-
361
- # Find the nth-highest-confidence video to choose a confidence value
362
- if len(category_detections) >= options.nth_highest_confidence:
363
-
364
- category_detections_by_confidence = sorted(category_detections,
365
- key = lambda i: i['conf'],reverse=True)
366
- canonical_detection = category_detections_by_confidence[options.nth_highest_confidence-1]
367
- canonical_detections.append(canonical_detection)
368
-
369
- # Prepare the output representation for this video
370
- im_out = {}
371
- im_out['file'] = video_name
372
- im_out['detections'] = canonical_detections
373
-
374
- # 'max_detection_conf' is no longer included in output files by default
375
- if False:
376
- im_out['max_detection_conf'] = 0
377
- if len(canonical_detections) > 0:
378
- confidences = [d['conf'] for d in canonical_detections]
379
- im_out['max_detection_conf'] = max(confidences)
380
-
381
- output_images.append(im_out)
382
-
383
- # ...for each video
384
-
385
- output_data = input_data
386
- output_data['images'] = output_images
387
- s = json.dumps(output_data,indent=1)
388
-
389
- # Write the output file
390
- with open(output_file,'w') as f:
391
- f.write(s)
392
-
393
-
394
- #%% Test driver
395
-
396
- if False:
397
-
398
- #%% Constants
399
-
400
- Fs = 30.01
401
- confidence_threshold = 0.75
402
- input_folder = 'z:\\'
403
- frame_folder_base = r'e:\video_test\frames'
404
- detected_frame_folder_base = r'e:\video_test\detected_frames'
405
- rendered_videos_folder_base = r'e:\video_test\rendered_videos'
406
-
407
- results_file = r'results.json'
408
- os.makedirs(detected_frame_folder_base,exist_ok=True)
409
- os.makedirs(rendered_videos_folder_base,exist_ok=True)
410
-
411
-
412
- #%% Split videos into frames
413
-
414
- frame_filenames_by_video,fs_by_video,video_filenames = \
415
- video_folder_to_frames(input_folder,frame_folder_base,recursive=True)
416
-
417
-
418
- #%% List image files, break into folders
419
-
420
- frame_files = path_utils.find_images(frame_folder_base,True)
421
- frame_files = [s.replace('\\','/') for s in frame_files]
422
- print('Enumerated {} total frames'.format(len(frame_files)))
423
-
424
- Fs = 30.01
425
- # Find unique folders
426
- folders = set()
427
- # fn = frame_files[0]
428
- for fn in frame_files:
429
- folders.add(os.path.dirname(fn))
430
- folders = [s.replace('\\','/') for s in folders]
431
- print('Found {} folders for {} files'.format(len(folders),len(frame_files)))
432
-
433
-
434
- #%% Load detector output
435
-
436
- with open(results_file,'r') as f:
437
- detection_results = json.load(f)
438
- detections = detection_results['images']
439
- detector_label_map = detection_results['detection_categories']
440
- for d in detections:
441
- d['file'] = d['file'].replace('\\','/').replace('video_frames/','')
442
-
443
-
444
- #%% Render detector frames
445
-
446
- # folder = list(folders)[0]
447
- for folder in folders:
448
-
449
- frame_files_this_folder = [fn for fn in frame_files if folder in fn]
450
- folder_relative = folder.replace((frame_folder_base + '/').replace('\\','/'),'')
451
- detection_results_this_folder = [d for d in detections if folder_relative in d['file']]
452
- print('Found {} detections in folder {}'.format(len(detection_results_this_folder),folder))
453
- assert len(frame_files_this_folder) == len(detection_results_this_folder)
454
-
455
- rendered_frame_output_folder = os.path.join(detected_frame_folder_base,folder_relative)
456
- os.makedirs(rendered_frame_output_folder,exist_ok=True)
457
-
458
- # d = detection_results_this_folder[0]
459
- for d in tqdm(detection_results_this_folder):
460
-
461
- input_file = os.path.join(frame_folder_base,d['file'])
462
- output_file = os.path.join(detected_frame_folder_base,d['file'])
463
- os.makedirs(os.path.dirname(output_file),exist_ok=True)
464
- vis_utils.draw_bounding_boxes_on_file(input_file,output_file,d['detections'],
465
- confidence_threshold)
466
-
467
- # ...for each file in this folder
468
-
469
- # ...for each folder
470
-
471
-
472
- #%% Render output videos
473
-
474
- # folder = list(folders)[0]
475
- for folder in tqdm(folders):
476
-
477
- folder_relative = folder.replace((frame_folder_base + '/').replace('\\','/'),'')
478
- rendered_detector_output_folder = os.path.join(detected_frame_folder_base,folder_relative)
479
- assert os.path.isdir(rendered_detector_output_folder)
480
-
481
- frame_files_relative = os.listdir(rendered_detector_output_folder)
482
- frame_files_absolute = [os.path.join(rendered_detector_output_folder,s) \
483
- for s in frame_files_relative]
484
-
485
- output_video_filename = os.path.join(rendered_videos_folder_base,folder_relative)
486
- os.makedirs(os.path.dirname(output_video_filename),exist_ok=True)
487
-
488
- original_video_filename = output_video_filename.replace(
489
- rendered_videos_folder_base,input_folder)
490
- assert os.path.isfile(original_video_filename)
491
- Fs = get_video_fs(original_video_filename)
492
-
493
- frames_to_video(frame_files_absolute, Fs, output_video_filename)
494
-
495
- # ...for each video