megadetector 5.0.11__py3-none-any.whl → 5.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (203) hide show
  1. megadetector/api/__init__.py +0 -0
  2. megadetector/api/batch_processing/__init__.py +0 -0
  3. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  4. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  5. megadetector/api/batch_processing/api_core/batch_service/score.py +439 -0
  6. megadetector/api/batch_processing/api_core/server.py +294 -0
  7. megadetector/api/batch_processing/api_core/server_api_config.py +97 -0
  8. megadetector/api/batch_processing/api_core/server_app_config.py +55 -0
  9. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +220 -0
  10. megadetector/api/batch_processing/api_core/server_job_status_table.py +149 -0
  11. megadetector/api/batch_processing/api_core/server_orchestration.py +360 -0
  12. megadetector/api/batch_processing/api_core/server_utils.py +88 -0
  13. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  14. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +46 -0
  15. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  16. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +152 -0
  17. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  18. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  19. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  20. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  21. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
  22. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  23. megadetector/api/synchronous/__init__.py +0 -0
  24. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  25. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +152 -0
  26. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +263 -0
  27. megadetector/api/synchronous/api_core/animal_detection_api/config.py +35 -0
  28. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  29. megadetector/api/synchronous/api_core/tests/load_test.py +110 -0
  30. megadetector/classification/__init__.py +0 -0
  31. megadetector/classification/aggregate_classifier_probs.py +108 -0
  32. megadetector/classification/analyze_failed_images.py +227 -0
  33. megadetector/classification/cache_batchapi_outputs.py +198 -0
  34. megadetector/classification/create_classification_dataset.py +627 -0
  35. megadetector/classification/crop_detections.py +516 -0
  36. megadetector/classification/csv_to_json.py +226 -0
  37. megadetector/classification/detect_and_crop.py +855 -0
  38. megadetector/classification/efficientnet/__init__.py +9 -0
  39. megadetector/classification/efficientnet/model.py +415 -0
  40. megadetector/classification/efficientnet/utils.py +607 -0
  41. megadetector/classification/evaluate_model.py +520 -0
  42. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  43. megadetector/classification/json_to_azcopy_list.py +63 -0
  44. megadetector/classification/json_validator.py +699 -0
  45. megadetector/classification/map_classification_categories.py +276 -0
  46. megadetector/classification/merge_classification_detection_output.py +506 -0
  47. megadetector/classification/prepare_classification_script.py +194 -0
  48. megadetector/classification/prepare_classification_script_mc.py +228 -0
  49. megadetector/classification/run_classifier.py +287 -0
  50. megadetector/classification/save_mislabeled.py +110 -0
  51. megadetector/classification/train_classifier.py +827 -0
  52. megadetector/classification/train_classifier_tf.py +725 -0
  53. megadetector/classification/train_utils.py +323 -0
  54. megadetector/data_management/__init__.py +0 -0
  55. megadetector/data_management/annotations/__init__.py +0 -0
  56. megadetector/data_management/annotations/annotation_constants.py +34 -0
  57. megadetector/data_management/camtrap_dp_to_coco.py +237 -0
  58. megadetector/data_management/cct_json_utils.py +404 -0
  59. megadetector/data_management/cct_to_md.py +176 -0
  60. megadetector/data_management/cct_to_wi.py +289 -0
  61. megadetector/data_management/coco_to_labelme.py +283 -0
  62. megadetector/data_management/coco_to_yolo.py +662 -0
  63. megadetector/data_management/databases/__init__.py +0 -0
  64. megadetector/data_management/databases/add_width_and_height_to_db.py +33 -0
  65. megadetector/data_management/databases/combine_coco_camera_traps_files.py +206 -0
  66. megadetector/data_management/databases/integrity_check_json_db.py +493 -0
  67. megadetector/data_management/databases/subset_json_db.py +115 -0
  68. megadetector/data_management/generate_crops_from_cct.py +149 -0
  69. megadetector/data_management/get_image_sizes.py +189 -0
  70. megadetector/data_management/importers/add_nacti_sizes.py +52 -0
  71. megadetector/data_management/importers/add_timestamps_to_icct.py +79 -0
  72. megadetector/data_management/importers/animl_results_to_md_results.py +158 -0
  73. megadetector/data_management/importers/auckland_doc_test_to_json.py +373 -0
  74. megadetector/data_management/importers/auckland_doc_to_json.py +201 -0
  75. megadetector/data_management/importers/awc_to_json.py +191 -0
  76. megadetector/data_management/importers/bellevue_to_json.py +273 -0
  77. megadetector/data_management/importers/cacophony-thermal-importer.py +793 -0
  78. megadetector/data_management/importers/carrizo_shrubfree_2018.py +269 -0
  79. megadetector/data_management/importers/carrizo_trail_cam_2017.py +289 -0
  80. megadetector/data_management/importers/cct_field_adjustments.py +58 -0
  81. megadetector/data_management/importers/channel_islands_to_cct.py +913 -0
  82. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +180 -0
  83. megadetector/data_management/importers/eMammal/eMammal_helpers.py +249 -0
  84. megadetector/data_management/importers/eMammal/make_eMammal_json.py +223 -0
  85. megadetector/data_management/importers/ena24_to_json.py +276 -0
  86. megadetector/data_management/importers/filenames_to_json.py +386 -0
  87. megadetector/data_management/importers/helena_to_cct.py +283 -0
  88. megadetector/data_management/importers/idaho-camera-traps.py +1407 -0
  89. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +294 -0
  90. megadetector/data_management/importers/jb_csv_to_json.py +150 -0
  91. megadetector/data_management/importers/mcgill_to_json.py +250 -0
  92. megadetector/data_management/importers/missouri_to_json.py +490 -0
  93. megadetector/data_management/importers/nacti_fieldname_adjustments.py +79 -0
  94. megadetector/data_management/importers/noaa_seals_2019.py +181 -0
  95. megadetector/data_management/importers/pc_to_json.py +365 -0
  96. megadetector/data_management/importers/plot_wni_giraffes.py +123 -0
  97. megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +359 -0
  98. megadetector/data_management/importers/prepare_zsl_imerit.py +131 -0
  99. megadetector/data_management/importers/rspb_to_json.py +356 -0
  100. megadetector/data_management/importers/save_the_elephants_survey_A.py +320 -0
  101. megadetector/data_management/importers/save_the_elephants_survey_B.py +329 -0
  102. megadetector/data_management/importers/snapshot_safari_importer.py +758 -0
  103. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +665 -0
  104. megadetector/data_management/importers/snapshot_serengeti_lila.py +1067 -0
  105. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +150 -0
  106. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +153 -0
  107. megadetector/data_management/importers/sulross_get_exif.py +65 -0
  108. megadetector/data_management/importers/timelapse_csv_set_to_json.py +490 -0
  109. megadetector/data_management/importers/ubc_to_json.py +399 -0
  110. megadetector/data_management/importers/umn_to_json.py +507 -0
  111. megadetector/data_management/importers/wellington_to_json.py +263 -0
  112. megadetector/data_management/importers/wi_to_json.py +442 -0
  113. megadetector/data_management/importers/zamba_results_to_md_results.py +181 -0
  114. megadetector/data_management/labelme_to_coco.py +547 -0
  115. megadetector/data_management/labelme_to_yolo.py +272 -0
  116. megadetector/data_management/lila/__init__.py +0 -0
  117. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +97 -0
  118. megadetector/data_management/lila/add_locations_to_nacti.py +147 -0
  119. megadetector/data_management/lila/create_lila_blank_set.py +558 -0
  120. megadetector/data_management/lila/create_lila_test_set.py +152 -0
  121. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  122. megadetector/data_management/lila/download_lila_subset.py +178 -0
  123. megadetector/data_management/lila/generate_lila_per_image_labels.py +516 -0
  124. megadetector/data_management/lila/get_lila_annotation_counts.py +170 -0
  125. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  126. megadetector/data_management/lila/lila_common.py +300 -0
  127. megadetector/data_management/lila/test_lila_metadata_urls.py +132 -0
  128. megadetector/data_management/ocr_tools.py +870 -0
  129. megadetector/data_management/read_exif.py +809 -0
  130. megadetector/data_management/remap_coco_categories.py +84 -0
  131. megadetector/data_management/remove_exif.py +66 -0
  132. megadetector/data_management/rename_images.py +187 -0
  133. megadetector/data_management/resize_coco_dataset.py +189 -0
  134. megadetector/data_management/wi_download_csv_to_coco.py +247 -0
  135. megadetector/data_management/yolo_output_to_md_output.py +446 -0
  136. megadetector/data_management/yolo_to_coco.py +676 -0
  137. megadetector/detection/__init__.py +0 -0
  138. megadetector/detection/detector_training/__init__.py +0 -0
  139. megadetector/detection/detector_training/model_main_tf2.py +114 -0
  140. megadetector/detection/process_video.py +846 -0
  141. megadetector/detection/pytorch_detector.py +355 -0
  142. megadetector/detection/run_detector.py +779 -0
  143. megadetector/detection/run_detector_batch.py +1219 -0
  144. megadetector/detection/run_inference_with_yolov5_val.py +1087 -0
  145. megadetector/detection/run_tiled_inference.py +934 -0
  146. megadetector/detection/tf_detector.py +192 -0
  147. megadetector/detection/video_utils.py +698 -0
  148. megadetector/postprocessing/__init__.py +0 -0
  149. megadetector/postprocessing/add_max_conf.py +64 -0
  150. megadetector/postprocessing/categorize_detections_by_size.py +165 -0
  151. megadetector/postprocessing/classification_postprocessing.py +716 -0
  152. megadetector/postprocessing/combine_api_outputs.py +249 -0
  153. megadetector/postprocessing/compare_batch_results.py +966 -0
  154. megadetector/postprocessing/convert_output_format.py +396 -0
  155. megadetector/postprocessing/load_api_results.py +195 -0
  156. megadetector/postprocessing/md_to_coco.py +310 -0
  157. megadetector/postprocessing/md_to_labelme.py +330 -0
  158. megadetector/postprocessing/merge_detections.py +412 -0
  159. megadetector/postprocessing/postprocess_batch_results.py +1908 -0
  160. megadetector/postprocessing/remap_detection_categories.py +170 -0
  161. megadetector/postprocessing/render_detection_confusion_matrix.py +660 -0
  162. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +211 -0
  163. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +83 -0
  164. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1635 -0
  165. megadetector/postprocessing/separate_detections_into_folders.py +730 -0
  166. megadetector/postprocessing/subset_json_detector_output.py +700 -0
  167. megadetector/postprocessing/top_folders_to_bottom.py +223 -0
  168. megadetector/taxonomy_mapping/__init__.py +0 -0
  169. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  170. megadetector/taxonomy_mapping/map_new_lila_datasets.py +150 -0
  171. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +142 -0
  172. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +588 -0
  173. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  174. megadetector/taxonomy_mapping/simple_image_download.py +219 -0
  175. megadetector/taxonomy_mapping/species_lookup.py +834 -0
  176. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  177. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  178. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  179. megadetector/utils/__init__.py +0 -0
  180. megadetector/utils/azure_utils.py +178 -0
  181. megadetector/utils/ct_utils.py +613 -0
  182. megadetector/utils/directory_listing.py +246 -0
  183. megadetector/utils/md_tests.py +1164 -0
  184. megadetector/utils/path_utils.py +1045 -0
  185. megadetector/utils/process_utils.py +160 -0
  186. megadetector/utils/sas_blob_utils.py +509 -0
  187. megadetector/utils/split_locations_into_train_val.py +228 -0
  188. megadetector/utils/string_utils.py +92 -0
  189. megadetector/utils/url_utils.py +323 -0
  190. megadetector/utils/write_html_image_list.py +225 -0
  191. megadetector/visualization/__init__.py +0 -0
  192. megadetector/visualization/plot_utils.py +293 -0
  193. megadetector/visualization/render_images_with_thumbnails.py +275 -0
  194. megadetector/visualization/visualization_utils.py +1536 -0
  195. megadetector/visualization/visualize_db.py +552 -0
  196. megadetector/visualization/visualize_detector_output.py +405 -0
  197. {megadetector-5.0.11.dist-info → megadetector-5.0.13.dist-info}/LICENSE +0 -0
  198. {megadetector-5.0.11.dist-info → megadetector-5.0.13.dist-info}/METADATA +2 -2
  199. megadetector-5.0.13.dist-info/RECORD +201 -0
  200. megadetector-5.0.13.dist-info/top_level.txt +1 -0
  201. megadetector-5.0.11.dist-info/RECORD +0 -5
  202. megadetector-5.0.11.dist-info/top_level.txt +0 -1
  203. {megadetector-5.0.11.dist-info → megadetector-5.0.13.dist-info}/WHEEL +0 -0
@@ -0,0 +1,934 @@
1
+ """
2
+
3
+ run_tiled_inference.py
4
+
5
+ **This script is experimental, YMMV.**
6
+
7
+ Runs inference on a folder, fist splitting each image up into tiles of size
8
+ MxN (typically the native inference size of your detector), writing those
9
+ tiles out to a temporary folder, then de-duplicating the resulting detections before
10
+ merging them back into a set of detections that make sense on the original images.
11
+
12
+ This approach will likely fail to detect very large animals, so if you expect both large
13
+ and small animals (in terms of pixel size), this script is best used in
14
+ conjunction with a traditional inference pass that looks at whole images.
15
+
16
+ Currently requires temporary storage at least as large as the input data, generally
17
+ a lot more than that (depending on the overlap between adjacent tiles). This is
18
+ inefficient, but easy to debug.
19
+
20
+ Programmatic invocation supports using YOLOv5's inference scripts (and test-time
21
+ augmentation); the command-line interface only supports standard inference right now.
22
+
23
+ """
24
+
25
+ #%% Imports and constants
26
+
27
+ import os
28
+ import json
29
+
30
+ from tqdm import tqdm
31
+
32
+ import torch
33
+ from torchvision import ops
34
+
35
+ from megadetector.detection.run_inference_with_yolov5_val import YoloInferenceOptions,run_inference_with_yolo_val
36
+ from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
37
+ from megadetector.detection.run_detector import try_download_known_detector
38
+ from megadetector.utils import path_utils
39
+ from megadetector.visualization import visualization_utils as vis_utils
40
+
41
+ default_patch_overlap = 0.5
42
+ patch_jpeg_quality = 95
43
+
44
+ # This isn't NMS in the usual sense of redundant model predictions; this is being
45
+ # used to de-duplicate predictions from overlapping patches.
46
+ nms_iou_threshold = 0.45
47
+
48
+ default_tile_size = [1280,1280]
49
+
50
+ default_n_patch_extraction_workers = 1
51
+ parallelization_uses_threads = False
52
+
53
+
54
+ #%% Support functions
55
+
56
+ def get_patch_boundaries(image_size,patch_size,patch_stride=None):
57
+ """
58
+ Computes a list of patch starting coordinates (x,y) given an image size (w,h)
59
+ and a stride (x,y)
60
+
61
+ Patch size is guaranteed, but the stride may deviate to make sure all pixels are covered.
62
+ I.e., we move by regular strides until the current patch walks off the right/bottom,
63
+ at which point it backs up to one patch from the end. So if your image is 15
64
+ pixels wide and you have a stride of 10 pixels, you will get starting positions
65
+ of 0 (from 0 to 9) and 5 (from 5 to 14).
66
+
67
+ Args:
68
+ image_size (tuple): size of the image you want to divide into patches, as a length-2 tuple (w,h)
69
+ patch_size (tuple): patch size into which you want to divide an image, as a length-2 tuple (w,h)
70
+ patch_stride (tuple or float, optional): stride between patches, as a length-2 tuple (x,y), or a
71
+ float; if this is a float, it's interpreted as the stride relative to the patch size
72
+ (0.1 == 10% stride). Defaults to half the patch size.
73
+
74
+ Returns:
75
+ list: list of length-2 tuples, each representing the x/y start position of a patch
76
+ """
77
+
78
+ if patch_stride is None:
79
+ patch_stride = (round(patch_size[0]*(1.0-default_patch_overlap)),
80
+ round(patch_size[1]*(1.0-default_patch_overlap)))
81
+ elif isinstance(patch_stride,float):
82
+ patch_stride = (round(patch_size[0]*(patch_stride)),
83
+ round(patch_size[1]*(patch_stride)))
84
+
85
+ image_width = image_size[0]
86
+ image_height = image_size[1]
87
+
88
+ assert patch_size[0] <= image_size[0], 'Patch width {} is larger than image width {}'.format(
89
+ patch_size[0],image_size[0])
90
+ assert patch_size[1] <= image_size[1], 'Patch height {} is larger than image height {}'.format(
91
+ patch_size[1],image_size[1])
92
+
93
+ def add_patch_row(patch_start_positions,y_start):
94
+ """
95
+ Add one row to our list of patch start positions, i.e.
96
+ loop over all columns.
97
+ """
98
+
99
+ x_start = 0; x_end = x_start + patch_size[0] - 1
100
+
101
+ while(True):
102
+
103
+ patch_start_positions.append([x_start,y_start])
104
+
105
+ # If this patch put us right at the end of the last column, we're done
106
+ if x_end == image_width - 1:
107
+ break
108
+
109
+ # Move one patch to the right
110
+ x_start += patch_stride[0]
111
+ x_end = x_start + patch_size[0] - 1
112
+
113
+ # If this patch flows over the edge, add one more patch to cover
114
+ # the pixels on the end, then we're done.
115
+ if x_end > (image_width - 1):
116
+ overshoot = (x_end - image_width) + 1
117
+ x_start -= overshoot
118
+ x_end = x_start + patch_size[0] - 1
119
+ patch_start_positions.append([x_start,y_start])
120
+ break
121
+
122
+ # ...for each column
123
+
124
+ return patch_start_positions
125
+
126
+ patch_start_positions = []
127
+
128
+ y_start = 0; y_end = y_start + patch_size[1] - 1
129
+
130
+ while(True):
131
+
132
+ patch_start_positions = add_patch_row(patch_start_positions,y_start)
133
+
134
+ # If this patch put us right at the bottom of the lats row, we're done
135
+ if y_end == image_height - 1:
136
+ break
137
+
138
+ # Move one patch down
139
+ y_start += patch_stride[1]
140
+ y_end = y_start + patch_size[1] - 1
141
+
142
+ # If this patch flows over the bottom, add one more patch to cover
143
+ # the pixels at the bottom, then we're done
144
+ if y_end > (image_height - 1):
145
+ overshoot = (y_end - image_height) + 1
146
+ y_start -= overshoot
147
+ y_end = y_start + patch_size[1] - 1
148
+ patch_start_positions = add_patch_row(patch_start_positions,y_start)
149
+ break
150
+
151
+ # ...for each row
152
+
153
+ for p in patch_start_positions:
154
+ assert p[0] >= 0 and p[1] >= 0 and p[0] <= image_width and p[1] <= image_height, \
155
+ 'Patch generation error (illegal patch {})'.format(p)
156
+
157
+ # The last patch should always end at the bottom-right of the image
158
+ assert patch_start_positions[-1][0]+patch_size[0] == image_width, \
159
+ 'Patch generation error (last patch does not end on the right)'
160
+ assert patch_start_positions[-1][1]+patch_size[1] == image_height, \
161
+ 'Patch generation error (last patch does not end at the bottom)'
162
+
163
+ # All patches should be unique
164
+ patch_start_positions_tuples = [tuple(x) for x in patch_start_positions]
165
+ assert len(patch_start_positions_tuples) == len(set(patch_start_positions_tuples)), \
166
+ 'Patch generation error (duplicate start position)'
167
+
168
+ return patch_start_positions
169
+
170
+ # ...get_patch_boundaries()
171
+
172
+
173
+ def patch_info_to_patch_name(image_name,patch_x_min,patch_y_min):
174
+ """
175
+ Gives a unique string name to an x/y coordinate, e.g. turns ("a.jpg",10,20) into
176
+ "a.jpg_0010_0020".
177
+
178
+ Args:
179
+ image_name (str): image identifier
180
+ patch_x_min (int): x coordinate
181
+ patch_y_min (int): y coordinate
182
+
183
+ Returns:
184
+ str: name for this patch, e.g. "a.jpg_0010_0020"
185
+ """
186
+ patch_name = image_name + '_' + \
187
+ str(patch_x_min).zfill(4) + '_' + str(patch_y_min).zfill(4)
188
+ return patch_name
189
+
190
+
191
+ def extract_patch_from_image(im,
192
+ patch_xy,
193
+ patch_size,
194
+ patch_image_fn=None,
195
+ patch_folder=None,
196
+ image_name=None,
197
+ overwrite=True):
198
+ """
199
+ Extracts a patch from the provided image, and writes that patch out to a new file.
200
+
201
+ Args:
202
+ im (str or Image): image from which we should extract a patch, can be a filename or
203
+ a PIL Image object.
204
+ patch_xy (tuple): length-2 tuple of ints (x,y) representing the upper-left corner
205
+ of the patch to extract
206
+ patch_size (tuple): length-2 tuple of ints (w,h) representing the size of the
207
+ patch to extract
208
+ patch_image_fn (str, optional): image filename to write the patch to; if this is None
209
+ the filename will be generated from [image_name] and the patch coordinates
210
+ patch_folder (str, optional): folder in which the image lives; only used to generate
211
+ a patch filename, so only required if [patch_image_fn] is None
212
+ image_name (str, optional): the identifier of the source image; only used to generate
213
+ a patch filename, so only required if [patch_image_fn] is None
214
+ overwrite (bool, optional): whether to overwrite an existing patch image
215
+
216
+ Returns:
217
+ dict: a dictionary with fields xmin,xmax,ymin,ymax,patch_fn
218
+ """
219
+
220
+ if isinstance(im,str):
221
+ pil_im = vis_utils.open_image(im)
222
+ else:
223
+ pil_im = im
224
+
225
+ patch_x_min = patch_xy[0]
226
+ patch_y_min = patch_xy[1]
227
+ patch_x_max = patch_x_min + patch_size[0] - 1
228
+ patch_y_max = patch_y_min + patch_size[1] - 1
229
+
230
+ # PIL represents coordinates in a way that is very hard for me to get my head
231
+ # around, such that even though the "right" and "bottom" arguments to the crop()
232
+ # function are inclusive... well, they're not really.
233
+ #
234
+ # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#coordinate-system
235
+ #
236
+ # So we add 1 to the max values.
237
+ patch_im = pil_im.crop((patch_x_min,patch_y_min,patch_x_max+1,patch_y_max+1))
238
+ assert patch_im.size[0] == patch_size[0]
239
+ assert patch_im.size[1] == patch_size[1]
240
+
241
+ if patch_image_fn is None:
242
+ assert patch_folder is not None,\
243
+ "If you don't supply a patch filename to extract_patch_from_image, you need to supply a folder name"
244
+ patch_name = patch_info_to_patch_name(image_name,patch_x_min,patch_y_min)
245
+ patch_image_fn = os.path.join(patch_folder,patch_name + '.jpg')
246
+
247
+ if os.path.isfile(patch_image_fn) and (not overwrite):
248
+ pass
249
+ else:
250
+ patch_im.save(patch_image_fn,quality=patch_jpeg_quality)
251
+
252
+ patch_info = {}
253
+ patch_info['xmin'] = patch_x_min
254
+ patch_info['xmax'] = patch_x_max
255
+ patch_info['ymin'] = patch_y_min
256
+ patch_info['ymax'] = patch_y_max
257
+ patch_info['patch_fn'] = patch_image_fn
258
+
259
+ return patch_info
260
+
261
+ # ...def extract_patch_from_image(...)
262
+
263
+
264
+ def in_place_nms(md_results, iou_thres=0.45, verbose=True):
265
+ """
266
+ Run torch.ops.nms in-place on MD-formatted detection results.
267
+
268
+ Args:
269
+ md_results (dict): detection results for a list of images, in MD results format (i.e.,
270
+ containing a list of image dicts with the key 'images', each of which has a list
271
+ of detections with the key 'detections')
272
+ iou_thres (float, optional): IoU threshold above which we will treat two detections as
273
+ redundant
274
+ verbose (bool, optional): enable additional debug console output
275
+ """
276
+
277
+ n_detections_before = 0
278
+ n_detections_after = 0
279
+
280
+ # i_image = 18; im = md_results['images'][i_image]
281
+ for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
282
+
283
+ if (im['detections'] is None) or (len(im['detections']) == 0):
284
+ continue
285
+
286
+ boxes = []
287
+ scores = []
288
+
289
+ n_detections_before += len(im['detections'])
290
+
291
+ # det = im['detections'][0]
292
+ for det in im['detections']:
293
+
294
+ # Using x1/x2 notation rather than x0/x1 notation to be consistent
295
+ # with the Torch documentation.
296
+ x1 = det['bbox'][0]
297
+ y1 = det['bbox'][1]
298
+ x2 = det['bbox'][0] + det['bbox'][2]
299
+ y2 = det['bbox'][1] + det['bbox'][3]
300
+ box = [x1,y1,x2,y2]
301
+ boxes.append(box)
302
+ scores.append(det['conf'])
303
+
304
+ # ...for each detection
305
+
306
+ t_boxes = torch.tensor(boxes)
307
+ t_scores = torch.tensor(scores)
308
+
309
+ box_indices = ops.nms(t_boxes,t_scores,iou_thres).tolist()
310
+
311
+ post_nms_detections = [im['detections'][x] for x in box_indices]
312
+
313
+ assert len(post_nms_detections) <= len(im['detections'])
314
+
315
+ im['detections'] = post_nms_detections
316
+
317
+ n_detections_after += len(im['detections'])
318
+
319
+ # ...for each image
320
+
321
+ if verbose:
322
+ print('NMS removed {} of {} detections'.format(
323
+ n_detections_before-n_detections_after,
324
+ n_detections_before))
325
+
326
+ # ...in_place_nms()
327
+
328
+
329
+ def _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,overwrite):
330
+ """
331
+ Private function to extract tiles for a single image.
332
+
333
+ Returns a dict with fields 'patches' (see extract_patch_from_image) and 'image_fn'.
334
+
335
+ If there is an error, 'patches' will be None and the 'error' field will contain
336
+ failure details. In that case, some tiles may still be generated.
337
+ """
338
+
339
+ fn_abs = os.path.join(image_folder,fn_relative)
340
+ error = None
341
+ patches = []
342
+
343
+ image_name = path_utils.clean_filename(fn_relative,char_limit=None,force_lower=True)
344
+
345
+ try:
346
+
347
+ # Open the image
348
+ im = vis_utils.open_image(fn_abs)
349
+ image_size = [im.width,im.height]
350
+
351
+ # Generate patch boundaries (a list of [x,y] starting points)
352
+ patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
353
+
354
+ # Extract patches
355
+ #
356
+ # patch_xy = patch_boundaries[0]
357
+ for patch_xy in patch_boundaries:
358
+
359
+ patch_info = extract_patch_from_image(im,patch_xy,patch_size,
360
+ patch_folder=tiling_folder,
361
+ image_name=image_name,
362
+ overwrite=overwrite)
363
+ patch_info['source_fn'] = fn_relative
364
+ patches.append(patch_info)
365
+
366
+ except Exception as e:
367
+
368
+ s = 'Patch generation error for {}: \n{}'.format(fn_relative,str(e))
369
+ print(s)
370
+ # patches = None
371
+ error = s
372
+
373
+ image_patch_info = {}
374
+ image_patch_info['patches'] = patches
375
+ image_patch_info['image_fn'] = fn_relative
376
+ image_patch_info['error'] = error
377
+
378
+ return image_patch_info
379
+
380
+
381
+ #%% Main function
382
+
383
+ def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
384
+ tile_size_x=1280, tile_size_y=1280, tile_overlap=0.5,
385
+ checkpoint_path=None, checkpoint_frequency=-1, remove_tiles=False,
386
+ yolo_inference_options=None,
387
+ n_patch_extraction_workers=default_n_patch_extraction_workers,
388
+ overwrite_tiles=True,
389
+ image_list=None):
390
+ """
391
+ Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
392
+ into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
393
+ then de-duplicating the results before merging them back into a set of detections that make
394
+ sense on the original images and writing those results to [output_file].
395
+
396
+ [tiling_folder] can be any folder, but this function reserves the right to do whatever it wants
397
+ within that folder, including deleting everything, so it's best if it's a new folder.
398
+ Conceptually this folder is temporary, it's just helpful in this case to not actually
399
+ use the system temp folder, because the tile cache may be very large, so the caller may
400
+ want it to be on a specific drive.
401
+
402
+ tile_overlap is the fraction of overlap between tiles.
403
+
404
+ Optionally removes the temporary tiles.
405
+
406
+ if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
407
+ this case the model will be run with run_inference_with_yolov5_val. This is typically used to
408
+ run the model with test-time augmentation.
409
+
410
+ Args:
411
+ model_file (str): model filename (ending in .pt), or a well-known model name (e.g. "MDV5A")
412
+ image_folder (str): the folder of images to proess (always recursive)
413
+ tiling_folder (str): folder for temporary tile storage; see caveats above
414
+ output_file (str): .json file to which we should write MD-formatted results
415
+ tile_size_x (int, optional): tile width
416
+ tile_size_y (int, optional): tile height
417
+ tile_overlap (float, optional): overlap between adjacenet tiles, as a fraction of the
418
+ tile size
419
+ checkpoint_path (str, optional): checkpoint path; passed directly to run_detector_batch; see
420
+ run_detector_batch for details
421
+ checkpoint_frequency (int, optional): checkpoint frequency; passed directly to run_detector_batch; see
422
+ run_detector_batch for details
423
+ remove_tiles (bool, optional): whether to delete the tiles when we're done
424
+ yolo_inference_options (YoloInferenceOptions, optional): if not None, will run inference with
425
+ run_inference_with_yolov5_val.py, rather than with run_detector_batch.py, using these options
426
+ n_patch_extraction_workers (int, optional): number of workers to use for patch extraction;
427
+ set to <= 1 to disable parallelization
428
+ image_list (list, optional): .json file containing a list of specific images to process. If
429
+ this is supplied, and the paths are absolute, [image_folder] will be ignored. If this is supplied,
430
+ and the paths are relative, they should be relative to [image_folder].
431
+
432
+ Returns:
433
+ dict: MD-formatted results dictionary, identical to what's written to [output_file]
434
+ """
435
+
436
+ ##%% Validate arguments
437
+
438
+ assert tile_overlap < 1 and tile_overlap >= 0, \
439
+ 'Illegal tile overlap value {}'.format(tile_overlap)
440
+
441
+ if tile_size_x == -1:
442
+ tile_size_x = default_tile_size[0]
443
+ if tile_size_y == -1:
444
+ tile_size_y = default_tile_size[1]
445
+
446
+ patch_size = [tile_size_x,tile_size_y]
447
+ patch_stride = (round(patch_size[0]*(1.0-tile_overlap)),
448
+ round(patch_size[1]*(1.0-tile_overlap)))
449
+
450
+ os.makedirs(tiling_folder,exist_ok=True)
451
+
452
+ ##%% List files
453
+
454
+ if image_list is None:
455
+
456
+ print('Enumerating images in {}'.format(image_folder))
457
+ image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
458
+ assert len(image_files_relative) > 0, 'No images found in folder {}'.format(image_folder)
459
+
460
+ else:
461
+
462
+ print('Loading image list from {}'.format(image_list))
463
+ with open(image_list,'r') as f:
464
+ image_files_relative = json.load(f)
465
+ n_absolute_paths = 0
466
+ for i_fn,fn in enumerate(image_files_relative):
467
+ if os.path.isabs(fn):
468
+ n_absolute_paths += 1
469
+ try:
470
+ fn_relative = os.path.relpath(fn,image_folder)
471
+ except ValueError:
472
+ 'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
473
+ fn,image_folder)
474
+ raise
475
+ assert not fn_relative.startswith('..'), \
476
+ 'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
477
+ fn,image_folder)
478
+ image_files_relative[i_fn] = fn_relative
479
+ if (n_absolute_paths != 0) and (n_absolute_paths != len(image_files_relative)):
480
+ raise ValueError('Illegal file list: converted {} of {} paths to relative'.format(
481
+ n_absolute_paths,len(image_files_relative)))
482
+
483
+ ##%% Generate tiles
484
+
485
+ all_image_patch_info = None
486
+
487
+ print('Extracting patches from {} images'.format(len(image_files_relative)))
488
+
489
+ n_workers = n_patch_extraction_workers
490
+
491
+ if n_workers <= 1:
492
+
493
+ all_image_patch_info = []
494
+
495
+ # fn_relative = image_files_relative[0]
496
+ for fn_relative in tqdm(image_files_relative):
497
+ image_patch_info = \
498
+ _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,
499
+ overwrite=overwrite_tiles)
500
+ all_image_patch_info.append(image_patch_info)
501
+
502
+ else:
503
+
504
+ from multiprocessing.pool import ThreadPool
505
+ from multiprocessing.pool import Pool
506
+ from functools import partial
507
+
508
+ if n_workers > len(image_files_relative):
509
+
510
+ print('Pool of {} requested, but only {} images available, reducing pool to {}'.\
511
+ format(n_workers,len(image_files_relative),len(image_files_relative)))
512
+ n_workers = len(image_files_relative)
513
+
514
+ if parallelization_uses_threads:
515
+ pool = ThreadPool(n_workers); poolstring = 'threads'
516
+ else:
517
+ pool = Pool(n_workers); poolstring = 'processes'
518
+
519
+ print('Starting patch extraction pool with {} {}'.format(n_workers,poolstring))
520
+
521
+ all_image_patch_info = list(tqdm(pool.imap(
522
+ partial(_extract_tiles_for_image,
523
+ image_folder=image_folder,
524
+ tiling_folder=tiling_folder,
525
+ patch_size=patch_size,
526
+ patch_stride=patch_stride,
527
+ overwrite=overwrite_tiles),
528
+ image_files_relative),total=len(image_files_relative)))
529
+
530
+ # ...for each image
531
+
532
+ # Write tile information to file; this is just a debugging convenience
533
+ folder_name = path_utils.clean_filename(image_folder,force_lower=True)
534
+ if folder_name.startswith('_'):
535
+ folder_name = folder_name[1:]
536
+
537
+ tile_cache_file = os.path.join(tiling_folder,folder_name + '_patch_info.json')
538
+ with open(tile_cache_file,'w') as f:
539
+ json.dump(all_image_patch_info,f,indent=1)
540
+
541
+ # Keep track of patches that failed
542
+ images_with_patch_errors = {}
543
+ for patch_info in all_image_patch_info:
544
+ if patch_info['error'] is not None:
545
+ images_with_patch_errors[patch_info['image_fn']] = patch_info
546
+
547
+
548
+ ##%% Run inference on tiles
549
+
550
+ # When running with run_inference_with_yolov5_val, we'll pass the folder
551
+ if yolo_inference_options is not None:
552
+
553
+ patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
554
+
555
+ if yolo_inference_options.model_filename is None:
556
+ yolo_inference_options.model_filename = model_file
557
+ else:
558
+ assert yolo_inference_options.model_filename == model_file, \
559
+ 'Model file between yolo inference file ({}) and model file parameter ({})'.format(
560
+ yolo_inference_options.model_filename,model_file)
561
+
562
+ yolo_inference_options.input_folder = tiling_folder
563
+ yolo_inference_options.output_file = patch_level_output_file
564
+
565
+ run_inference_with_yolo_val(yolo_inference_options)
566
+ with open(patch_level_output_file,'r') as f:
567
+ patch_level_results = json.load(f)
568
+
569
+ # For standard inference, we'll pass a list of files
570
+ else:
571
+
572
+ patch_file_names = []
573
+ for im in all_image_patch_info:
574
+ # If there was a patch generation error, don't run inference
575
+ if patch_info['error'] is not None:
576
+ assert im['image_fn'] in images_with_patch_errors
577
+ continue
578
+ for patch in im['patches']:
579
+ patch_file_names.append(patch['patch_fn'])
580
+
581
+ inference_results = load_and_run_detector_batch(model_file,
582
+ patch_file_names,
583
+ checkpoint_path=checkpoint_path,
584
+ checkpoint_frequency=checkpoint_frequency,
585
+ quiet=True)
586
+
587
+ patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
588
+
589
+ patch_level_results = write_results_to_file(inference_results,
590
+ patch_level_output_file,
591
+ relative_path_base=tiling_folder,
592
+ detector_file=model_file)
593
+
594
+
595
+ ##%% Map patch-level detections back to the original images
596
+
597
+ # Map relative paths for patches to detections
598
+ patch_fn_relative_to_results = {}
599
+ for im in tqdm(patch_level_results['images']):
600
+ patch_fn_relative_to_results[im['file']] = im
601
+
602
+ image_level_results = {}
603
+ image_level_results['info'] = patch_level_results['info']
604
+ image_level_results['detection_categories'] = patch_level_results['detection_categories']
605
+ image_level_results['images'] = []
606
+
607
+ image_fn_relative_to_patch_info = { x['image_fn']:x for x in all_image_patch_info }
608
+
609
+ # i_image = 0; image_fn_relative = image_files_relative[i_image]
610
+ for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),
611
+ total=len(image_files_relative)):
612
+
613
+ image_fn_abs = os.path.join(image_folder,image_fn_relative)
614
+ assert os.path.isfile(image_fn_abs)
615
+
616
+ output_im = {}
617
+ output_im['file'] = image_fn_relative
618
+
619
+ # If we had a patch generation error
620
+ if image_fn_relative in images_with_patch_errors:
621
+
622
+ patch_info = image_fn_relative_to_patch_info[image_fn_relative]
623
+ assert patch_info['error'] is not None
624
+
625
+ output_im['detections'] = None
626
+ output_im['failure'] = 'Patch generation error'
627
+ output_im['failure_details'] = patch_info['error']
628
+ image_level_results['images'].append(output_im)
629
+ continue
630
+
631
+ try:
632
+ pil_im = vis_utils.open_image(image_fn_abs)
633
+ image_w = pil_im.size[0]
634
+ image_h = pil_im.size[1]
635
+
636
+ # This would be a very unusual situation; we're reading back an image here that we already
637
+ # (successfully) read once during patch generation.
638
+ except Exception as e:
639
+ print('Warning: image read error after successful patch generation for {}:\n{}'.format(
640
+ image_fn_relative,str(e)))
641
+ output_im['detections'] = None
642
+ output_im['failure'] = 'Patch processing error'
643
+ output_im['failure_details'] = str(e)
644
+ image_level_results['images'].append(output_im)
645
+ continue
646
+
647
+ output_im['detections'] = []
648
+
649
+ image_patch_info = image_fn_relative_to_patch_info[image_fn_relative]
650
+ assert image_patch_info['patches'][0]['source_fn'] == image_fn_relative
651
+
652
+ # Patches for this image
653
+ patch_fn_abs_to_patch_info_this_image = {}
654
+
655
+ for patch_info in image_patch_info['patches']:
656
+ patch_fn_abs_to_patch_info_this_image[patch_info['patch_fn']] = patch_info
657
+
658
+ # For each patch
659
+ #
660
+ # i_patch = 0; patch_fn_abs = list(patch_fn_abs_to_patch_info_this_image.keys())[i_patch]
661
+ for i_patch,patch_fn_abs in enumerate(patch_fn_abs_to_patch_info_this_image.keys()):
662
+
663
+ patch_fn_relative = os.path.relpath(patch_fn_abs,tiling_folder)
664
+ patch_results = patch_fn_relative_to_results[patch_fn_relative]
665
+ patch_info = patch_fn_abs_to_patch_info_this_image[patch_fn_abs]
666
+
667
+ # patch_results['file'] is a relative path, and a subset of patch_info['patch_fn']
668
+ assert patch_results['file'] in patch_info['patch_fn']
669
+
670
+ patch_w = (patch_info['xmax'] - patch_info['xmin']) + 1
671
+ patch_h = (patch_info['ymax'] - patch_info['ymin']) + 1
672
+ assert patch_w == patch_size[0]
673
+ assert patch_h == patch_size[1]
674
+
675
+ # If there was an inference failure on one patch, report the image
676
+ # as an inference failure
677
+ if 'detections' not in patch_results:
678
+ assert 'failure' in patch_results
679
+ output_im['detections'] = None
680
+ output_im['failure'] = patch_results['failure']
681
+ break
682
+
683
+ # det = patch_results['detections'][0]
684
+ for det in patch_results['detections']:
685
+
686
+ bbox_patch_relative = det['bbox']
687
+ xmin_patch_relative = bbox_patch_relative[0]
688
+ ymin_patch_relative = bbox_patch_relative[1]
689
+ w_patch_relative = bbox_patch_relative[2]
690
+ h_patch_relative = bbox_patch_relative[3]
691
+
692
+ # Convert from patch-relative normalized values to image-relative absolute values
693
+ w_pixels = w_patch_relative * patch_w
694
+ h_pixels = h_patch_relative * patch_h
695
+ xmin_patch_pixels = xmin_patch_relative * patch_w
696
+ ymin_patch_pixels = ymin_patch_relative * patch_h
697
+ xmin_image_pixels = patch_info['xmin'] + xmin_patch_pixels
698
+ ymin_image_pixels = patch_info['ymin'] + ymin_patch_pixels
699
+
700
+ # ...and now to image-relative normalized values
701
+ w_image_normalized = w_pixels / image_w
702
+ h_image_normalized = h_pixels / image_h
703
+ xmin_image_normalized = xmin_image_pixels / image_w
704
+ ymin_image_normalized = ymin_image_pixels / image_h
705
+
706
+ bbox_image_normalized = [xmin_image_normalized,
707
+ ymin_image_normalized,
708
+ w_image_normalized,
709
+ h_image_normalized]
710
+
711
+ output_det = {}
712
+ output_det['bbox'] = bbox_image_normalized
713
+ output_det['conf'] = det['conf']
714
+ output_det['category'] = det['category']
715
+
716
+ output_im['detections'].append(output_det)
717
+
718
+ # ...for each detection
719
+
720
+ # ...for each patch
721
+
722
+ image_level_results['images'].append(output_im)
723
+
724
+ # ...for each image
725
+
726
+ image_level_results_file_pre_nms = \
727
+ os.path.join(tiling_folder,folder_name + '_image_level_results_pre_nms.json')
728
+ with open(image_level_results_file_pre_nms,'w') as f:
729
+ json.dump(image_level_results,f,indent=1)
730
+
731
+
732
+ ##%% Run NMS
733
+
734
+ in_place_nms(image_level_results,iou_thres=nms_iou_threshold)
735
+
736
+
737
+ ##%% Write output file
738
+
739
+ print('Saving image-level results (after NMS) to {}'.format(output_file))
740
+
741
+ with open(output_file,'w') as f:
742
+ json.dump(image_level_results,f,indent=1)
743
+
744
+
745
+ ##%% Possibly remove tiles
746
+
747
+ if remove_tiles:
748
+
749
+ patch_file_names = []
750
+ for im in all_image_patch_info:
751
+ for patch in im['patches']:
752
+ patch_file_names.append(patch['patch_fn'])
753
+
754
+ for patch_fn_abs in patch_file_names:
755
+ os.remove(patch_fn_abs)
756
+
757
+
758
+ ##%% Return
759
+
760
+ return image_level_results
761
+
762
+
763
+ #%% Interactive driver
764
+
765
+ if False:
766
+
767
+ pass
768
+
769
+ #%% Run tiled inference (in Python)
770
+
771
+ model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
772
+ image_folder = os.path.expanduser('~/data/KRU-test')
773
+ tiling_folder = os.path.expanduser('~/tmp/tiling-test')
774
+ output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
775
+
776
+ tile_size_x = 3000
777
+ tile_size_y = 3000
778
+ tile_overlap = 0.5
779
+ checkpoint_path = None
780
+ checkpoint_frequency = -1
781
+ remove_tiles = False
782
+
783
+ use_yolo_inference = False
784
+
785
+ if not use_yolo_inference:
786
+
787
+ yolo_inference_options = None
788
+
789
+ else:
790
+
791
+ yolo_inference_options = YoloInferenceOptions()
792
+ yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
793
+
794
+ run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
795
+ tile_size_x=tile_size_x, tile_size_y=tile_size_y,
796
+ tile_overlap=tile_overlap,
797
+ checkpoint_path=checkpoint_path,
798
+ checkpoint_frequency=checkpoint_frequency,
799
+ remove_tiles=remove_tiles,
800
+ yolo_inference_options=yolo_inference_options)
801
+
802
+
803
+ #%% Run tiled inference (generate a command)
804
+
805
+ import os
806
+
807
+ model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
808
+ image_folder = os.path.expanduser('~/data/KRU-test')
809
+ tiling_folder = os.path.expanduser('~/tmp/tiling-test')
810
+ output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
811
+ tile_size = [5152,3968]
812
+ tile_overlap = 0.8
813
+
814
+ cmd = f'python run_tiled_inference.py {model_file} {image_folder} {tiling_folder} {output_file} ' + \
815
+ f'--tile_overlap {tile_overlap} --no_remove_tiles --tile_size_x {tile_size[0]} --tile_size_y {tile_size[1]}'
816
+
817
+ print(cmd)
818
+ import clipboard; clipboard.copy(cmd)
819
+
820
+
821
+ #%% Preview tiled inference
822
+
823
+ from megadetector.postprocessing.postprocess_batch_results import \
824
+ PostProcessingOptions, process_batch_results
825
+
826
+ options = PostProcessingOptions()
827
+ options.image_base_dir = image_folder
828
+ options.include_almost_detections = True
829
+ options.num_images_to_sample = None
830
+ options.confidence_threshold = 0.2
831
+ options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
832
+ options.ground_truth_json_file = None
833
+ options.separate_detections_by_category = True
834
+ # options.sample_seed = 0
835
+
836
+ options.parallelize_rendering = True
837
+ options.parallelize_rendering_n_cores = 10
838
+ options.parallelize_rendering_with_threads = False
839
+
840
+ preview_base = os.path.join(tiling_folder,'preview')
841
+ os.makedirs(preview_base, exist_ok=True)
842
+
843
+ print('Processing post-RDE to {}'.format(preview_base))
844
+
845
+ options.md_results_file = output_file
846
+ options.output_dir = preview_base
847
+ ppresults = process_batch_results(options)
848
+ html_output_file = ppresults.output_html_file
849
+
850
+ path_utils.open_file(html_output_file)
851
+
852
+
853
+ #%% Command-line driver
854
+
855
+ import sys,argparse
856
+
857
+ def main():
858
+
859
+ parser = argparse.ArgumentParser(
860
+ description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
861
+ parser.add_argument(
862
+ 'model_file',
863
+ help='Path to detector model file (.pb or .pt)')
864
+ parser.add_argument(
865
+ 'image_folder',
866
+ help='Folder containing images for inference (always recursive, unless image_list is supplied)')
867
+ parser.add_argument(
868
+ 'tiling_folder',
869
+ help='Temporary folder where tiles and intermediate results will be stored')
870
+ parser.add_argument(
871
+ 'output_file',
872
+ help='Path to output JSON results file, should end with a .json extension')
873
+ parser.add_argument(
874
+ '--no_remove_tiles',
875
+ action='store_true',
876
+ help='Tiles are removed by default; this option suppresses tile deletion')
877
+ parser.add_argument(
878
+ '--tile_size_x',
879
+ type=int,
880
+ default=default_tile_size[0],
881
+ help=('Tile width (defaults to {})'.format(default_tile_size[0])))
882
+ parser.add_argument(
883
+ '--tile_size_y',
884
+ type=int,
885
+ default=default_tile_size[0],
886
+ help=('Tile height (defaults to {})'.format(default_tile_size[1])))
887
+ parser.add_argument(
888
+ '--tile_overlap',
889
+ type=float,
890
+ default=default_patch_overlap,
891
+ help=('Overlap between tiles [0,1] (defaults to {})'.format(default_patch_overlap)))
892
+ parser.add_argument(
893
+ '--overwrite_handling',
894
+ type=str,
895
+ default='skip',
896
+ help=('Behavior when the target file exists (skip/overwrite/error) (default skip)'))
897
+ parser.add_argument(
898
+ '--image_list',
899
+ type=str,
900
+ default=None,
901
+ help=('A .json list of relative filenames (or absolute paths contained within image_folder) to include'))
902
+
903
+ if len(sys.argv[1:]) == 0:
904
+ parser.print_help()
905
+ parser.exit()
906
+
907
+ args = parser.parse_args()
908
+
909
+ model_file = try_download_known_detector(args.model_file)
910
+ assert os.path.exists(model_file), \
911
+ 'detector file {} does not exist'.format(args.model_file)
912
+
913
+ if os.path.exists(args.output_file):
914
+ if args.overwrite_handling == 'skip':
915
+ print('Warning: output file {} exists, skipping'.format(args.output_file))
916
+ return
917
+ elif args.overwrite_handling == 'overwrite':
918
+ print('Warning: output file {} exists, overwriting'.format(args.output_file))
919
+ elif args.overwrite_handling == 'error':
920
+ raise ValueError('Output file {} exists'.format(args.output_file))
921
+ else:
922
+ raise ValueError('Unknown output handling method {}'.format(args.overwrite_handling))
923
+
924
+
925
+ remove_tiles = (not args.no_remove_tiles)
926
+
927
+ run_tiled_inference(model_file, args.image_folder, args.tiling_folder, args.output_file,
928
+ tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
929
+ tile_overlap=args.tile_overlap,
930
+ remove_tiles=remove_tiles,
931
+ image_list=args.image_list)
932
+
933
+ if __name__ == '__main__':
934
+ main()