megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,627 +0,0 @@
1
- ########
2
- #
3
- # run_detector.py
4
- #
5
- # Module to run an animal detection model on images.
6
- #
7
- # The main function in this script also renders the predicted
8
- # bounding boxes on images and saves the resulting images (with bounding boxes).
9
- #
10
- # This script is not a good way to process lots of images (tens of thousands,
11
- # say). It does not facilitate checkpointing the results so if it crashes you
12
- # would have to start from scratch. If you want to run a detector (e.g., ours)
13
- # on lots of images, you should check out:
14
- #
15
- # 1) run_detector_batch.py (for local execution)
16
- #
17
- # 2) https://github.com/agentmorris/MegaDetector/tree/master/api/batch_processing
18
- # (for running large jobs on Azure ML)
19
- #
20
- # To run this script, we recommend you set up a conda virtual environment
21
- # following instructions in the Installation section on the main README, using
22
- # `environment-detector.yml` as the environment file where asked.
23
- #
24
- # This is a good way to test our detector on a handful of images and get
25
- # super-satisfying, graphical results. It's also a good way to see how fast a
26
- # detector model will run on a particular machine.
27
- #
28
- # If you would like to *not* use the GPU on the machine, set the environment
29
- # variable CUDA_VISIBLE_DEVICES to "-1".
30
- #
31
- # If no output directory is specified, writes detections for c:\foo\bar.jpg to
32
- # c:\foo\bar_detections.jpg.
33
- #
34
- # This script will only consider detections with > 0.005 confidence at all times.
35
- # The `threshold` you provide is only for rendering the results. If you need to
36
- # see lower-confidence detections, you can change
37
- # DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD.
38
- #
39
- # Reference:
40
- # https://github.com/tensorflow/models/blob/master/research/object_detection/inference/detection_inference.py
41
- #
42
- ########
43
-
44
- #%% Constants, imports, environment
45
-
46
- import argparse
47
- import os
48
- import statistics
49
- import sys
50
- import time
51
- import warnings
52
-
53
- import humanfriendly
54
- from tqdm import tqdm
55
-
56
- import md_utils.path_utils as path_utils
57
- import md_visualization.visualization_utils as vis_utils
58
-
59
- # ignoring all "PIL cannot read EXIF metainfo for the images" warnings
60
- warnings.filterwarnings('ignore', '(Possibly )?corrupt EXIF data', UserWarning)
61
-
62
- # Metadata Warning, tag 256 had too many entries: 42, expected 1
63
- warnings.filterwarnings('ignore', 'Metadata warning', UserWarning)
64
-
65
- # Numpy FutureWarnings from tensorflow import
66
- warnings.filterwarnings('ignore', category=FutureWarning)
67
-
68
- # Useful hack to force CPU inference
69
- # os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
70
-
71
-
72
- # An enumeration of failure reasons
73
- FAILURE_INFER = 'Failure inference'
74
- FAILURE_IMAGE_OPEN = 'Failure image access'
75
-
76
- # Number of decimal places to round to for confidence and bbox coordinates
77
- CONF_DIGITS = 3
78
- COORD_DIGITS = 4
79
-
80
- # Label mapping for MegaDetector
81
- DEFAULT_DETECTOR_LABEL_MAP = {
82
- '1': 'animal',
83
- '2': 'person',
84
- '3': 'vehicle' # available in megadetector v4+
85
- }
86
-
87
- # Should we allow classes that don't look anything like the MegaDetector classes?
88
- #
89
- # By default, we error if we see unfamiliar classes.
90
- #
91
- # TODO: the use of a global variable to manage this was fine when this was really
92
- # experimental, but this is really sloppy now that we actually use this code for
93
- # models other than MegaDetector.
94
- USE_MODEL_NATIVE_CLASSES = False
95
-
96
- # Each version of the detector is associated with some "typical" values
97
- # that are included in output files, so that downstream applications can
98
- # use them as defaults.
99
- DETECTOR_METADATA = {
100
- 'v2.0.0':
101
- {'megadetector_version':'v2.0.0',
102
- 'typical_detection_threshold':0.8,
103
- 'conservative_detection_threshold':0.3},
104
- 'v3.0.0':
105
- {'megadetector_version':'v3.0.0',
106
- 'typical_detection_threshold':0.8,
107
- 'conservative_detection_threshold':0.3},
108
- 'v4.1.0':
109
- {'megadetector_version':'v4.1.0',
110
- 'typical_detection_threshold':0.8,
111
- 'conservative_detection_threshold':0.3},
112
- 'v5a.0.0':
113
- {'megadetector_version':'v5a.0.0',
114
- 'typical_detection_threshold':0.2,
115
- 'conservative_detection_threshold':0.05},
116
- 'v5b.0.0':
117
- {'megadetector_version':'v5b.0.0',
118
- 'typical_detection_threshold':0.2,
119
- 'conservative_detection_threshold':0.05}
120
- }
121
-
122
- DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = DETECTOR_METADATA['v5b.0.0']['typical_detection_threshold']
123
- DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD = 0.005
124
-
125
- DEFAULT_BOX_THICKNESS = 4
126
- DEFAULT_BOX_EXPANSION = 0
127
- DEFAULT_LABEL_FONT_SIZE = 16
128
- DETECTION_FILENAME_INSERT = '_detections'
129
-
130
- # The model filenames "MDV5A", "MDV5B", and "MDV4" are special; they will trigger an
131
- # automatic model download to the system temp folder, or they will use the paths specified in the
132
- # $MDV4, $MDV5A, or $MDV5B environment variables if they exist.
133
- downloadable_models = {
134
- 'MDV4':'https://github.com/agentmorris/MegaDetector/releases/download/v4.1/md_v4.1.0.pb',
135
- 'MDV5A':'https://github.com/agentmorris/MegaDetector/releases/download/v5.0/md_v5a.0.0.pt',
136
- 'MDV5B':'https://github.com/agentmorris/MegaDetector/releases/download/v5.0/md_v5b.0.0.pt'
137
- }
138
-
139
-
140
- #%% Utility functions
141
-
142
- def convert_to_tf_coords(array):
143
- """
144
- From [x1, y1, width, height] to [y1, x1, y2, x2], where x1 is x_min, x2 is x_max
145
-
146
- This is only used to keep the interface of the synchronous API.
147
- """
148
-
149
- x1 = array[0]
150
- y1 = array[1]
151
- width = array[2]
152
- height = array[3]
153
- x2 = x1 + width
154
- y2 = y1 + height
155
- return [y1, x1, y2, x2]
156
-
157
-
158
- def get_detector_metadata_from_version_string(detector_version):
159
- """
160
- Given a MegaDetector version string (e.g. "v4.1.0"), return the metadata for
161
- the model. Used for writing standard defaults to batch output files.
162
- """
163
-
164
- if detector_version not in DETECTOR_METADATA:
165
- print('Warning: no metadata for unknown detector version {}'.format(detector_version))
166
- default_detector_metadata = {
167
- 'megadetector_version':'unknown'
168
- }
169
- return default_detector_metadata
170
- else:
171
- return DETECTOR_METADATA[detector_version]
172
-
173
-
174
- def get_detector_version_from_filename(detector_filename):
175
- """
176
- Get the version number component of the detector from the model filename.
177
-
178
- *detector_filename* will almost always end with one of the following:
179
-
180
- megadetector_v2.pb
181
- megadetector_v3.pb
182
- megadetector_v4.1 (not produed by run_detector_batch.py, only found in Azure Batch API output files)
183
- md_v4.1.0.pb
184
- md_v5a.0.0.pt
185
- md_v5b.0.0.pt
186
-
187
- ...for which we identify the version number as "v2.0.0", "v3.0.0", "v4.1.0",
188
- "v4.1.0", "v5a.0.0", and "v5b.0.0", respectively.
189
- """
190
-
191
- fn = os.path.basename(detector_filename)
192
- known_model_versions = {'v2':'v2.0.0',
193
- 'v3':'v3.0.0',
194
- 'v4.1':'v4.1.0',
195
- 'v5a.0.0':'v5a.0.0',
196
- 'v5b.0.0':'v5b.0.0'}
197
- matches = []
198
- for s in known_model_versions.keys():
199
- if s in fn:
200
- matches.append(s)
201
- if len(matches) == 0:
202
- print('Warning: could not determine MegaDetector version for model file {}'.format(detector_filename))
203
- return 'unknown'
204
- elif len(matches) > 1:
205
- print('Warning: multiple MegaDetector versions for model file {}'.format(detector_filename))
206
- return 'multiple'
207
- else:
208
- return known_model_versions[matches[0]]
209
-
210
-
211
- def get_typical_confidence_threshold_from_results(results):
212
- """
213
- Given the .json data loaded from a MD results file, determine a typical confidence
214
- threshold based on the detector version.
215
- """
216
-
217
- if 'detector_metadata' in results['info'] and \
218
- 'typical_detection_threshold' in results['info']['detector_metadata']:
219
- default_threshold = results['info']['detector_metadata']['typical_detection_threshold']
220
- elif ('detector' not in results['info']) or (results['info']['detector'] is None):
221
- print('Warning: detector version not available in results file, using MDv5 defaults')
222
- detector_metadata = get_detector_metadata_from_version_string('v5a.0.0')
223
- default_threshold = detector_metadata['typical_detection_threshold']
224
- else:
225
- print('Warning: detector metadata not available in results file, inferring from MD version')
226
- detector_filename = results['info']['detector']
227
- detector_version = get_detector_version_from_filename(detector_filename)
228
- detector_metadata = get_detector_metadata_from_version_string(detector_version)
229
- default_threshold = detector_metadata['typical_detection_threshold']
230
-
231
- return default_threshold
232
-
233
-
234
- def is_gpu_available(model_file):
235
- """
236
- Decide whether a GPU is available, importing PyTorch or TF depending on the extension
237
- of model_file. Does not actually load model_file, just uses that to determine how to check
238
- for GPU availability.
239
- """
240
-
241
- if model_file.endswith('.pb'):
242
- import tensorflow.compat.v1 as tf
243
- gpu_available = tf.test.is_gpu_available()
244
- print('TensorFlow version:', tf.__version__)
245
- print('tf.test.is_gpu_available:', gpu_available)
246
- return gpu_available
247
- elif model_file.endswith('.pt'):
248
- import torch
249
- gpu_available = torch.cuda.is_available()
250
- print('PyTorch reports {} available CUDA devices'.format(torch.cuda.device_count()))
251
- if not gpu_available:
252
- try:
253
- # mps backend only available in torch >= 1.12.0
254
- if torch.backends.mps.is_built and torch.backends.mps.is_available():
255
- gpu_available = True
256
- print('PyTorch reports Metal Performance Shaders are available')
257
- except AttributeError:
258
- pass
259
- return gpu_available
260
- else:
261
- raise ValueError('Unrecognized model file extension for model {}'.format(model_file))
262
-
263
-
264
- def load_detector(model_file, force_cpu=False):
265
- """
266
- Load a TF or PT detector, depending on the extension of model_file.
267
- """
268
-
269
- # Possibly automatically download the model
270
- model_file = try_download_known_detector(model_file)
271
-
272
- start_time = time.time()
273
- if model_file.endswith('.pb'):
274
- from detection.tf_detector import TFDetector
275
- if force_cpu:
276
- raise ValueError('force_cpu is not currently supported for TF detectors, ' + \
277
- 'use CUDA_VISIBLE_DEVICES=-1 instead')
278
- detector = TFDetector(model_file)
279
- elif model_file.endswith('.pt'):
280
- from detection.pytorch_detector import PTDetector
281
- detector = PTDetector(model_file, force_cpu, USE_MODEL_NATIVE_CLASSES)
282
- else:
283
- raise ValueError('Unrecognized model format: {}'.format(model_file))
284
- elapsed = time.time() - start_time
285
- print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))
286
- return detector
287
-
288
-
289
- #%% Main function
290
-
291
- def load_and_run_detector(model_file, image_file_names, output_dir,
292
- render_confidence_threshold=DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
293
- crop_images=False, box_thickness=DEFAULT_BOX_THICKNESS,
294
- box_expansion=DEFAULT_BOX_EXPANSION, image_size=None,
295
- label_font_size=DEFAULT_LABEL_FONT_SIZE
296
- ):
297
- """
298
- Load and run detector on target images, and visualize the results.
299
- """
300
-
301
- if len(image_file_names) == 0:
302
- print('Warning: no files available')
303
- return
304
-
305
- # Possibly automatically download the model
306
- model_file = try_download_known_detector(model_file)
307
-
308
- print('GPU available: {}'.format(is_gpu_available(model_file)))
309
-
310
- detector = load_detector(model_file)
311
-
312
- detection_results = []
313
- time_load = []
314
- time_infer = []
315
-
316
- # Dictionary mapping output file names to a collision-avoidance count.
317
- #
318
- # Since we'll be writing a bunch of files to the same folder, we rename
319
- # as necessary to avoid collisions.
320
- output_filename_collision_counts = {}
321
-
322
- def input_file_to_detection_file(fn, crop_index=-1):
323
- """
324
- Creates unique file names for output files.
325
-
326
- This function does 3 things:
327
- 1) If the --crop flag is used, then each input image may produce several output
328
- crops. For example, if foo.jpg has 3 detections, then this function should
329
- get called 3 times, with crop_index taking on 0, 1, then 2. Each time, this
330
- function appends crop_index to the filename, resulting in
331
- foo_crop00_detections.jpg
332
- foo_crop01_detections.jpg
333
- foo_crop02_detections.jpg
334
-
335
- 2) If the --recursive flag is used, then the same file (base)name may appear
336
- multiple times. However, we output into a single flat folder. To avoid
337
- filename collisions, we prepend an integer prefix to duplicate filenames:
338
- foo_crop00_detections.jpg
339
- 0000_foo_crop00_detections.jpg
340
- 0001_foo_crop00_detections.jpg
341
-
342
- 3) Prepends the output directory:
343
- out_dir/foo_crop00_detections.jpg
344
-
345
- Args:
346
- fn: str, filename
347
- crop_index: int, crop number
348
-
349
- Returns: output file path
350
- """
351
-
352
- fn = os.path.basename(fn).lower()
353
- name, ext = os.path.splitext(fn)
354
- if crop_index >= 0:
355
- name += '_crop{:0>2d}'.format(crop_index)
356
- fn = '{}{}{}'.format(name, DETECTION_FILENAME_INSERT, '.jpg')
357
- if fn in output_filename_collision_counts:
358
- n_collisions = output_filename_collision_counts[fn]
359
- fn = '{:0>4d}'.format(n_collisions) + '_' + fn
360
- output_filename_collision_counts[fn] += 1
361
- else:
362
- output_filename_collision_counts[fn] = 0
363
- fn = os.path.join(output_dir, fn)
364
- return fn
365
-
366
- # ...def input_file_to_detection_file()
367
-
368
- for im_file in tqdm(image_file_names):
369
-
370
- try:
371
- start_time = time.time()
372
-
373
- image = vis_utils.load_image(im_file)
374
-
375
- elapsed = time.time() - start_time
376
- time_load.append(elapsed)
377
-
378
- except Exception as e:
379
- print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
380
- result = {
381
- 'file': im_file,
382
- 'failure': FAILURE_IMAGE_OPEN
383
- }
384
- detection_results.append(result)
385
- continue
386
-
387
- try:
388
- start_time = time.time()
389
-
390
- result = detector.generate_detections_one_image(image, im_file,
391
- detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
392
- image_size=image_size)
393
- detection_results.append(result)
394
-
395
- elapsed = time.time() - start_time
396
- time_infer.append(elapsed)
397
-
398
- except Exception as e:
399
- print('An error occurred while running the detector on image {}. Exception: {}'.format(im_file, e))
400
- continue
401
-
402
- try:
403
- if crop_images:
404
-
405
- images_cropped = vis_utils.crop_image(result['detections'], image,
406
- confidence_threshold=render_confidence_threshold,
407
- expansion=box_expansion)
408
-
409
- for i_crop, cropped_image in enumerate(images_cropped):
410
- output_full_path = input_file_to_detection_file(im_file, i_crop)
411
- cropped_image.save(output_full_path)
412
-
413
- else:
414
-
415
- # Image is modified in place
416
- vis_utils.render_detection_bounding_boxes(result['detections'], image,
417
- label_map=DEFAULT_DETECTOR_LABEL_MAP,
418
- confidence_threshold=render_confidence_threshold,
419
- thickness=box_thickness, expansion=box_expansion,
420
- label_font_size=label_font_size)
421
- output_full_path = input_file_to_detection_file(im_file)
422
- image.save(output_full_path)
423
-
424
- except Exception as e:
425
- print('Visualizing results on the image {} failed. Exception: {}'.format(im_file, e))
426
- continue
427
-
428
- # ...for each image
429
-
430
- ave_time_load = statistics.mean(time_load)
431
- ave_time_infer = statistics.mean(time_infer)
432
- if len(time_load) > 1 and len(time_infer) > 1:
433
- std_dev_time_load = humanfriendly.format_timespan(statistics.stdev(time_load))
434
- std_dev_time_infer = humanfriendly.format_timespan(statistics.stdev(time_infer))
435
- else:
436
- std_dev_time_load = 'not available'
437
- std_dev_time_infer = 'not available'
438
- print('On average, for each image,')
439
- print('- loading took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_load),
440
- std_dev_time_load))
441
- print('- inference took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_infer),
442
- std_dev_time_infer))
443
-
444
- # ...def load_and_run_detector()
445
-
446
-
447
- def download_model(model_name,force_download=False):
448
- """
449
- Download one of the known models to local temp space if it hasn't already been downloaded
450
- """
451
-
452
- import tempfile
453
- from md_utils.url_utils import download_url
454
- model_tempdir = os.path.join(tempfile.gettempdir(), 'megadetector_models')
455
- os.makedirs(model_tempdir,exist_ok=True)
456
-
457
- # This is a lazy fix to an issue... if multiple users run this script, the
458
- # "megadetector_models" folder is owned by the first person who creates it, and others
459
- # can't write to it. I could create uniquely-named folders, but I philosophically prefer
460
- # to put all the individual UUID-named folders within a larger folder, so as to be a
461
- # good tempdir citizen. So, the lazy fix is to make this world-writable.
462
- try:
463
- os.chmod(model_tempdir,0o777)
464
- except Exception:
465
- pass
466
- if model_name not in downloadable_models:
467
- print('Unrecognized downloadable model {}'.format(model_name))
468
- return None
469
- url = downloadable_models[model_name]
470
- destination_filename = os.path.join(model_tempdir,url.split('/')[-1])
471
- local_file = download_url(url, destination_filename=destination_filename, progress_updater=None,
472
- force_download=force_download, verbose=True)
473
- return local_file
474
-
475
-
476
- def try_download_known_detector(detector_file):
477
- """
478
- Check whether detector_file is really the name of a known model, in which case we will
479
- either read the actual filename from the corresponding environment variable or download
480
- (if necessary) to local temp space. Otherwise just returns the input string.
481
- """
482
-
483
- if detector_file in downloadable_models:
484
- if detector_file in os.environ:
485
- fn = os.environ[detector_file]
486
- print('Reading MD location from environment variable {}: {}'.format(
487
- detector_file,fn))
488
- detector_file = fn
489
- else:
490
- print('Downloading model {}'.format(detector_file))
491
- detector_file = download_model(detector_file)
492
- return detector_file
493
-
494
-
495
-
496
- #%% Command-line driver
497
-
498
- def main():
499
-
500
- parser = argparse.ArgumentParser(
501
- description='Module to run an animal detection model on images')
502
-
503
- parser.add_argument(
504
- 'detector_file',
505
- help='Path detector model file (.pb or .pt). Can also be MDV4, MDV5A, or MDV5B to request automatic download.')
506
-
507
- # Must specify either an image file or a directory
508
- group = parser.add_mutually_exclusive_group(required=True)
509
- group.add_argument(
510
- '--image_file',
511
- help='Single file to process, mutually exclusive with --image_dir')
512
- group.add_argument(
513
- '--image_dir',
514
- help='Directory to search for images, with optional recursion by adding --recursive')
515
-
516
- parser.add_argument(
517
- '--recursive',
518
- action='store_true',
519
- help='Recurse into directories, only meaningful if using --image_dir')
520
-
521
- parser.add_argument(
522
- '--output_dir',
523
- help='Directory for output images (defaults to same as input)')
524
-
525
- parser.add_argument(
526
- '--image_size',
527
- type=int,
528
- default=None,
529
- help=('Force image resizing to a (square) integer size (not recommended to change this)'))
530
-
531
- parser.add_argument(
532
- '--threshold',
533
- type=float,
534
- default=DEFAULT_RENDERING_CONFIDENCE_THRESHOLD,
535
- help=('Confidence threshold between 0 and 1.0; only render' +
536
- ' boxes above this confidence (defaults to {})'.format(
537
- DEFAULT_RENDERING_CONFIDENCE_THRESHOLD)))
538
-
539
- parser.add_argument(
540
- '--crop',
541
- default=False,
542
- action="store_true",
543
- help=('If set, produces separate output images for each crop, '
544
- 'rather than adding bounding boxes to the original image'))
545
-
546
- parser.add_argument(
547
- '--box_thickness',
548
- type=int,
549
- default=DEFAULT_BOX_THICKNESS,
550
- help=('Line width (in pixels) for box rendering (defaults to {})'.format(
551
- DEFAULT_BOX_THICKNESS)))
552
-
553
- parser.add_argument(
554
- '--box_expansion',
555
- type=int,
556
- default=DEFAULT_BOX_EXPANSION,
557
- help=('Number of pixels to expand boxes by (defaults to {})'.format(
558
- DEFAULT_BOX_EXPANSION)))
559
-
560
- parser.add_argument(
561
- '--label_font_size',
562
- type=int,
563
- default=DEFAULT_LABEL_FONT_SIZE,
564
- help=('Label font size (defaults to {})'.format(
565
- DEFAULT_LABEL_FONT_SIZE)))
566
-
567
- if len(sys.argv[1:]) == 0:
568
- parser.print_help()
569
- parser.exit()
570
-
571
- args = parser.parse_args()
572
-
573
- # If the specified detector file is really the name of a known model, find
574
- # (and possibly download) that model
575
- args.detector_file = try_download_known_detector(args.detector_file)
576
-
577
- assert os.path.exists(args.detector_file), 'detector file {} does not exist'.format(
578
- args.detector_file)
579
- assert 0.0 < args.threshold <= 1.0, 'Confidence threshold needs to be between 0 and 1'
580
-
581
- if args.image_file:
582
- image_file_names = [args.image_file]
583
- else:
584
- image_file_names = path_utils.find_images(args.image_dir, args.recursive)
585
-
586
- print('Running detector on {} images...'.format(len(image_file_names)))
587
-
588
- if args.output_dir:
589
- os.makedirs(args.output_dir, exist_ok=True)
590
- else:
591
- if args.image_dir:
592
- args.output_dir = args.image_dir
593
- else:
594
- # but for a single image, args.image_dir is also None
595
- args.output_dir = os.path.dirname(args.image_file)
596
-
597
- load_and_run_detector(model_file=args.detector_file,
598
- image_file_names=image_file_names,
599
- output_dir=args.output_dir,
600
- render_confidence_threshold=args.threshold,
601
- box_thickness=args.box_thickness,
602
- box_expansion=args.box_expansion,
603
- crop_images=args.crop,
604
- image_size=args.image_size,
605
- label_font_size=args.label_font_size)
606
-
607
-
608
- if __name__ == '__main__':
609
- main()
610
-
611
-
612
- #%% Interactive driver
613
-
614
- if False:
615
-
616
- #%%
617
- model_file = r'c:\temp\models\md_v4.1.0.pb'
618
- image_file_names = path_utils.find_images(r'c:\temp\demo_images\ssverymini')
619
- output_dir = r'c:\temp\demo_images\ssverymini'
620
- render_confidence_threshold = 0.8
621
- crop_images = True
622
-
623
- load_and_run_detector(model_file=model_file,
624
- image_file_names=image_file_names,
625
- output_dir=output_dir,
626
- render_confidence_threshold=render_confidence_threshold,
627
- crop_images=crop_images)