megadetector 5.0.9__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.9.dist-info/RECORD +0 -224
  214. megadetector-5.0.9.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.9.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,935 +0,0 @@
1
- """
2
-
3
- run_tiled_inference.py
4
-
5
- **This script is experimental, YMMV.**
6
-
7
- Runs inference on a folder, fist splitting each image up into tiles of size
8
- MxN (typically the native inference size of your detector), writing those
9
- tiles out to a temporary folder, then de-duplicating the resulting detections before
10
- merging them back into a set of detections that make sense on the original images.
11
-
12
- This approach will likely fail to detect very large animals, so if you expect both large
13
- and small animals (in terms of pixel size), this script is best used in
14
- conjunction with a traditional inference pass that looks at whole images.
15
-
16
- Currently requires temporary storage at least as large as the input data, generally
17
- a lot more than that (depending on the overlap between adjacent tiles). This is
18
- inefficient, but easy to debug.
19
-
20
- Programmatic invocation supports using YOLOv5's inference scripts (and test-time
21
- augmentation); the command-line interface only supports standard inference right now.
22
-
23
- """
24
-
25
- #%% Imports and constants
26
-
27
- import os
28
- import json
29
-
30
- from tqdm import tqdm
31
-
32
- from detection.run_inference_with_yolov5_val import YoloInferenceOptions,run_inference_with_yolo_val
33
- from detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
34
- from detection.run_detector import try_download_known_detector
35
-
36
- import torch
37
- from torchvision import ops
38
-
39
- from md_utils import path_utils
40
- from md_visualization import visualization_utils as vis_utils
41
-
42
- default_patch_overlap = 0.5
43
- patch_jpeg_quality = 95
44
-
45
- # This isn't NMS in the usual sense of redundant model predictions; this is being
46
- # used to de-duplicate predictions from overlapping patches.
47
- nms_iou_threshold = 0.45
48
-
49
- default_tile_size = [1280,1280]
50
-
51
- default_n_patch_extraction_workers = 1
52
- parallelization_uses_threads = False
53
-
54
-
55
- #%% Support functions
56
-
57
- def get_patch_boundaries(image_size,patch_size,patch_stride=None):
58
- """
59
- Computes a list of patch starting coordinates (x,y) given an image size (w,h)
60
- and a stride (x,y)
61
-
62
- Patch size is guaranteed, but the stride may deviate to make sure all pixels are covered.
63
- I.e., we move by regular strides until the current patch walks off the right/bottom,
64
- at which point it backs up to one patch from the end. So if your image is 15
65
- pixels wide and you have a stride of 10 pixels, you will get starting positions
66
- of 0 (from 0 to 9) and 5 (from 5 to 14).
67
-
68
- Args:
69
- image_size (tuple): size of the image you want to divide into patches, as a length-2 tuple (w,h)
70
- patch_size (tuple): patch size into which you want to divide an image, as a length-2 tuple (w,h)
71
- patch_stride (tuple or float, optional): stride between patches, as a length-2 tuple (x,y), or a
72
- float; if this is a float, it's interpreted as the stride relative to the patch size
73
- (0.1 == 10% stride). Defaults to half the patch size.
74
-
75
- Returns:
76
- list: list of length-2 tuples, each representing the x/y start position of a patch
77
- """
78
-
79
- if patch_stride is None:
80
- patch_stride = (round(patch_size[0]*(1.0-default_patch_overlap)),
81
- round(patch_size[1]*(1.0-default_patch_overlap)))
82
- elif isinstance(patch_stride,float):
83
- patch_stride = (round(patch_size[0]*(patch_stride)),
84
- round(patch_size[1]*(patch_stride)))
85
-
86
- image_width = image_size[0]
87
- image_height = image_size[1]
88
-
89
- assert patch_size[0] <= image_size[0], 'Patch width {} is larger than image width {}'.format(
90
- patch_size[0],image_size[0])
91
- assert patch_size[1] <= image_size[1], 'Patch height {} is larger than image height {}'.format(
92
- patch_size[1],image_size[1])
93
-
94
- def add_patch_row(patch_start_positions,y_start):
95
- """
96
- Add one row to our list of patch start positions, i.e.
97
- loop over all columns.
98
- """
99
-
100
- x_start = 0; x_end = x_start + patch_size[0] - 1
101
-
102
- while(True):
103
-
104
- patch_start_positions.append([x_start,y_start])
105
-
106
- # If this patch put us right at the end of the last column, we're done
107
- if x_end == image_width - 1:
108
- break
109
-
110
- # Move one patch to the right
111
- x_start += patch_stride[0]
112
- x_end = x_start + patch_size[0] - 1
113
-
114
- # If this patch flows over the edge, add one more patch to cover
115
- # the pixels on the end, then we're done.
116
- if x_end > (image_width - 1):
117
- overshoot = (x_end - image_width) + 1
118
- x_start -= overshoot
119
- x_end = x_start + patch_size[0] - 1
120
- patch_start_positions.append([x_start,y_start])
121
- break
122
-
123
- # ...for each column
124
-
125
- return patch_start_positions
126
-
127
- patch_start_positions = []
128
-
129
- y_start = 0; y_end = y_start + patch_size[1] - 1
130
-
131
- while(True):
132
-
133
- patch_start_positions = add_patch_row(patch_start_positions,y_start)
134
-
135
- # If this patch put us right at the bottom of the lats row, we're done
136
- if y_end == image_height - 1:
137
- break
138
-
139
- # Move one patch down
140
- y_start += patch_stride[1]
141
- y_end = y_start + patch_size[1] - 1
142
-
143
- # If this patch flows over the bottom, add one more patch to cover
144
- # the pixels at the bottom, then we're done
145
- if y_end > (image_height - 1):
146
- overshoot = (y_end - image_height) + 1
147
- y_start -= overshoot
148
- y_end = y_start + patch_size[1] - 1
149
- patch_start_positions = add_patch_row(patch_start_positions,y_start)
150
- break
151
-
152
- # ...for each row
153
-
154
- for p in patch_start_positions:
155
- assert p[0] >= 0 and p[1] >= 0 and p[0] <= image_width and p[1] <= image_height, \
156
- 'Patch generation error (illegal patch {})'.format(p)
157
-
158
- # The last patch should always end at the bottom-right of the image
159
- assert patch_start_positions[-1][0]+patch_size[0] == image_width, \
160
- 'Patch generation error (last patch does not end on the right)'
161
- assert patch_start_positions[-1][1]+patch_size[1] == image_height, \
162
- 'Patch generation error (last patch does not end at the bottom)'
163
-
164
- # All patches should be unique
165
- patch_start_positions_tuples = [tuple(x) for x in patch_start_positions]
166
- assert len(patch_start_positions_tuples) == len(set(patch_start_positions_tuples)), \
167
- 'Patch generation error (duplicate start position)'
168
-
169
- return patch_start_positions
170
-
171
- # ...get_patch_boundaries()
172
-
173
-
174
- def patch_info_to_patch_name(image_name,patch_x_min,patch_y_min):
175
- """
176
- Gives a unique string name to an x/y coordinate, e.g. turns ("a.jpg",10,20) into
177
- "a.jpg_0010_0020".
178
-
179
- Args:
180
- image_name (str): image identifier
181
- patch_x_min (int): x coordinate
182
- patch_y_min (int): y coordinate
183
-
184
- Returns:
185
- str: name for this patch, e.g. "a.jpg_0010_0020"
186
- """
187
- patch_name = image_name + '_' + \
188
- str(patch_x_min).zfill(4) + '_' + str(patch_y_min).zfill(4)
189
- return patch_name
190
-
191
-
192
- def extract_patch_from_image(im,
193
- patch_xy,
194
- patch_size,
195
- patch_image_fn=None,
196
- patch_folder=None,
197
- image_name=None,
198
- overwrite=True):
199
- """
200
- Extracts a patch from the provided image, and writes that patch out to a new file.
201
-
202
- Args:
203
- im (str or Image): image from which we should extract a patch, can be a filename or
204
- a PIL Image object.
205
- patch_xy (tuple): length-2 tuple of ints (x,y) representing the upper-left corner
206
- of the patch to extract
207
- patch_size (tuple): length-2 tuple of ints (w,h) representing the size of the
208
- patch to extract
209
- patch_image_fn (str, optional): image filename to write the patch to; if this is None
210
- the filename will be generated from [image_name] and the patch coordinates
211
- patch_folder (str, optional): folder in which the image lives; only used to generate
212
- a patch filename, so only required if [patch_image_fn] is None
213
- image_name (str, optional): the identifier of the source image; only used to generate
214
- a patch filename, so only required if [patch_image_fn] is None
215
- overwrite (bool, optional): whether to overwrite an existing patch image
216
-
217
- Returns:
218
- dict: a dictionary with fields xmin,xmax,ymin,ymax,patch_fn
219
- """
220
-
221
- if isinstance(im,str):
222
- pil_im = vis_utils.open_image(im)
223
- else:
224
- pil_im = im
225
-
226
- patch_x_min = patch_xy[0]
227
- patch_y_min = patch_xy[1]
228
- patch_x_max = patch_x_min + patch_size[0] - 1
229
- patch_y_max = patch_y_min + patch_size[1] - 1
230
-
231
- # PIL represents coordinates in a way that is very hard for me to get my head
232
- # around, such that even though the "right" and "bottom" arguments to the crop()
233
- # function are inclusive... well, they're not really.
234
- #
235
- # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#coordinate-system
236
- #
237
- # So we add 1 to the max values.
238
- patch_im = pil_im.crop((patch_x_min,patch_y_min,patch_x_max+1,patch_y_max+1))
239
- assert patch_im.size[0] == patch_size[0]
240
- assert patch_im.size[1] == patch_size[1]
241
-
242
- if patch_image_fn is None:
243
- assert patch_folder is not None,\
244
- "If you don't supply a patch filename to extract_patch_from_image, you need to supply a folder name"
245
- patch_name = patch_info_to_patch_name(image_name,patch_x_min,patch_y_min)
246
- patch_image_fn = os.path.join(patch_folder,patch_name + '.jpg')
247
-
248
- if os.path.isfile(patch_image_fn) and (not overwrite):
249
- pass
250
- else:
251
- patch_im.save(patch_image_fn,quality=patch_jpeg_quality)
252
-
253
- patch_info = {}
254
- patch_info['xmin'] = patch_x_min
255
- patch_info['xmax'] = patch_x_max
256
- patch_info['ymin'] = patch_y_min
257
- patch_info['ymax'] = patch_y_max
258
- patch_info['patch_fn'] = patch_image_fn
259
-
260
- return patch_info
261
-
262
- # ...def extract_patch_from_image(...)
263
-
264
-
265
- def in_place_nms(md_results, iou_thres=0.45, verbose=True):
266
- """
267
- Run torch.ops.nms in-place on MD-formatted detection results.
268
-
269
- Args:
270
- md_results (dict): detection results for a list of images, in MD results format (i.e.,
271
- containing a list of image dicts with the key 'images', each of which has a list
272
- of detections with the key 'detections')
273
- iou_thres (float, optional): IoU threshold above which we will treat two detections as
274
- redundant
275
- verbose (bool, optional): enable additional debug console output
276
- """
277
-
278
- n_detections_before = 0
279
- n_detections_after = 0
280
-
281
- # i_image = 18; im = md_results['images'][i_image]
282
- for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
283
-
284
- if (im['detections'] is None) or (len(im['detections']) == 0):
285
- continue
286
-
287
- boxes = []
288
- scores = []
289
-
290
- n_detections_before += len(im['detections'])
291
-
292
- # det = im['detections'][0]
293
- for det in im['detections']:
294
-
295
- # Using x1/x2 notation rather than x0/x1 notation to be consistent
296
- # with the Torch documentation.
297
- x1 = det['bbox'][0]
298
- y1 = det['bbox'][1]
299
- x2 = det['bbox'][0] + det['bbox'][2]
300
- y2 = det['bbox'][1] + det['bbox'][3]
301
- box = [x1,y1,x2,y2]
302
- boxes.append(box)
303
- scores.append(det['conf'])
304
-
305
- # ...for each detection
306
-
307
- t_boxes = torch.tensor(boxes)
308
- t_scores = torch.tensor(scores)
309
-
310
- box_indices = ops.nms(t_boxes,t_scores,iou_thres).tolist()
311
-
312
- post_nms_detections = [im['detections'][x] for x in box_indices]
313
-
314
- assert len(post_nms_detections) <= len(im['detections'])
315
-
316
- im['detections'] = post_nms_detections
317
-
318
- n_detections_after += len(im['detections'])
319
-
320
- # ...for each image
321
-
322
- if verbose:
323
- print('NMS removed {} of {} detections'.format(
324
- n_detections_before-n_detections_after,
325
- n_detections_before))
326
-
327
- # ...in_place_nms()
328
-
329
-
330
- def _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,overwrite):
331
- """
332
- Private function to extract tiles for a single image.
333
-
334
- Returns a dict with fields 'patches' (see extract_patch_from_image) and 'image_fn'.
335
-
336
- If there is an error, 'patches' will be None and the 'error' field will contain
337
- failure details. In that case, some tiles may still be generated.
338
- """
339
-
340
- fn_abs = os.path.join(image_folder,fn_relative)
341
- error = None
342
- patches = []
343
-
344
- image_name = path_utils.clean_filename(fn_relative,char_limit=None,force_lower=True)
345
-
346
- try:
347
-
348
- # Open the image
349
- im = vis_utils.open_image(fn_abs)
350
- image_size = [im.width,im.height]
351
-
352
- # Generate patch boundaries (a list of [x,y] starting points)
353
- patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
354
-
355
- # Extract patches
356
- #
357
- # patch_xy = patch_boundaries[0]
358
- for patch_xy in patch_boundaries:
359
-
360
- patch_info = extract_patch_from_image(im,patch_xy,patch_size,
361
- patch_folder=tiling_folder,
362
- image_name=image_name,
363
- overwrite=overwrite)
364
- patch_info['source_fn'] = fn_relative
365
- patches.append(patch_info)
366
-
367
- except Exception as e:
368
-
369
- s = 'Patch generation error for {}: \n{}'.format(fn_relative,str(e))
370
- print(s)
371
- # patches = None
372
- error = s
373
-
374
- image_patch_info = {}
375
- image_patch_info['patches'] = patches
376
- image_patch_info['image_fn'] = fn_relative
377
- image_patch_info['error'] = error
378
-
379
- return image_patch_info
380
-
381
-
382
- #%% Main function
383
-
384
- def run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
385
- tile_size_x=1280, tile_size_y=1280, tile_overlap=0.5,
386
- checkpoint_path=None, checkpoint_frequency=-1, remove_tiles=False,
387
- yolo_inference_options=None,
388
- n_patch_extraction_workers=default_n_patch_extraction_workers,
389
- overwrite_tiles=True,
390
- image_list=None):
391
- """
392
- Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
393
- into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
394
- then de-duplicating the results before merging them back into a set of detections that make
395
- sense on the original images and writing those results to [output_file].
396
-
397
- [tiling_folder] can be any folder, but this function reserves the right to do whatever it wants
398
- within that folder, including deleting everything, so it's best if it's a new folder.
399
- Conceptually this folder is temporary, it's just helpful in this case to not actually
400
- use the system temp folder, because the tile cache may be very large, so the caller may
401
- want it to be on a specific drive.
402
-
403
- tile_overlap is the fraction of overlap between tiles.
404
-
405
- Optionally removes the temporary tiles.
406
-
407
- if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
408
- this case the model will be run with run_inference_with_yolov5_val. This is typically used to
409
- run the model with test-time augmentation.
410
-
411
- Args:
412
- model_file (str): model filename (ending in .pt), or a well-known model name (e.g. "MDV5A")
413
- image_folder (str): the folder of images to proess (always recursive)
414
- tiling_folder (str): folder for temporary tile storage; see caveats above
415
- output_file (str): .json file to which we should write MD-formatted results
416
- tile_size_x (int, optional): tile width
417
- tile_size_y (int, optional): tile height
418
- tile_overlap (float, optional): overlap between adjacenet tiles, as a fraction of the
419
- tile size
420
- checkpoint_path (str, optional): checkpoint path; passed directly to run_detector_batch; see
421
- run_detector_batch for details
422
- checkpoint_frequency (int, optional): checkpoint frequency; passed directly to run_detector_batch; see
423
- run_detector_batch for details
424
- remove_tiles (bool, optional): whether to delete the tiles when we're done
425
- yolo_inference_options (YoloInferenceOptions, optional): if not None, will run inference with
426
- run_inference_with_yolov5_val.py, rather than with run_detector_batch.py, using these options
427
- n_patch_extraction_workers (int, optional): number of workers to use for patch extraction;
428
- set to <= 1 to disable parallelization
429
- image_list (list, optional): .json file containing a list of specific images to process. If
430
- this is supplied, and the paths are absolute, [image_folder] will be ignored. If this is supplied,
431
- and the paths are relative, they should be relative to [image_folder].
432
-
433
- Returns:
434
- dict: MD-formatted results dictionary, identical to what's written to [output_file]
435
- """
436
-
437
- ##%% Validate arguments
438
-
439
- assert tile_overlap < 1 and tile_overlap >= 0, \
440
- 'Illegal tile overlap value {}'.format(tile_overlap)
441
-
442
- if tile_size_x == -1:
443
- tile_size_x = default_tile_size[0]
444
- if tile_size_y == -1:
445
- tile_size_y = default_tile_size[1]
446
-
447
- patch_size = [tile_size_x,tile_size_y]
448
- patch_stride = (round(patch_size[0]*(1.0-tile_overlap)),
449
- round(patch_size[1]*(1.0-tile_overlap)))
450
-
451
- os.makedirs(tiling_folder,exist_ok=True)
452
-
453
- ##%% List files
454
-
455
- if image_list is None:
456
-
457
- print('Enumerating images in {}'.format(image_folder))
458
- image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
459
- assert len(image_files_relative) > 0, 'No images found in folder {}'.format(image_folder)
460
-
461
- else:
462
-
463
- print('Loading image list from {}'.format(image_list))
464
- with open(image_list,'r') as f:
465
- image_files_relative = json.load(f)
466
- n_absolute_paths = 0
467
- for i_fn,fn in enumerate(image_files_relative):
468
- if os.path.isabs(fn):
469
- n_absolute_paths += 1
470
- try:
471
- fn_relative = os.path.relpath(fn,image_folder)
472
- except ValueError:
473
- 'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
474
- fn,image_folder)
475
- raise
476
- assert not fn_relative.startswith('..'), \
477
- 'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
478
- fn,image_folder)
479
- image_files_relative[i_fn] = fn_relative
480
- if (n_absolute_paths != 0) and (n_absolute_paths != len(image_files_relative)):
481
- raise ValueError('Illegal file list: converted {} of {} paths to relative'.format(
482
- n_absolute_paths,len(image_files_relative)))
483
-
484
- ##%% Generate tiles
485
-
486
- all_image_patch_info = None
487
-
488
- print('Extracting patches from {} images'.format(len(image_files_relative)))
489
-
490
- n_workers = n_patch_extraction_workers
491
-
492
- if n_workers <= 1:
493
-
494
- all_image_patch_info = []
495
-
496
- # fn_relative = image_files_relative[0]
497
- for fn_relative in tqdm(image_files_relative):
498
- image_patch_info = \
499
- _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,
500
- overwrite=overwrite_tiles)
501
- all_image_patch_info.append(image_patch_info)
502
-
503
- else:
504
-
505
- from multiprocessing.pool import ThreadPool
506
- from multiprocessing.pool import Pool
507
- from functools import partial
508
-
509
- if n_workers > len(image_files_relative):
510
-
511
- print('Pool of {} requested, but only {} images available, reducing pool to {}'.\
512
- format(n_workers,len(image_files_relative),len(image_files_relative)))
513
- n_workers = len(image_files_relative)
514
-
515
- if parallelization_uses_threads:
516
- pool = ThreadPool(n_workers); poolstring = 'threads'
517
- else:
518
- pool = Pool(n_workers); poolstring = 'processes'
519
-
520
- print('Starting patch extraction pool with {} {}'.format(n_workers,poolstring))
521
-
522
- all_image_patch_info = list(tqdm(pool.imap(
523
- partial(_extract_tiles_for_image,
524
- image_folder=image_folder,
525
- tiling_folder=tiling_folder,
526
- patch_size=patch_size,
527
- patch_stride=patch_stride,
528
- overwrite=overwrite_tiles),
529
- image_files_relative),total=len(image_files_relative)))
530
-
531
- # ...for each image
532
-
533
- # Write tile information to file; this is just a debugging convenience
534
- folder_name = path_utils.clean_filename(image_folder,force_lower=True)
535
- if folder_name.startswith('_'):
536
- folder_name = folder_name[1:]
537
-
538
- tile_cache_file = os.path.join(tiling_folder,folder_name + '_patch_info.json')
539
- with open(tile_cache_file,'w') as f:
540
- json.dump(all_image_patch_info,f,indent=1)
541
-
542
- # Keep track of patches that failed
543
- images_with_patch_errors = {}
544
- for patch_info in all_image_patch_info:
545
- if patch_info['error'] is not None:
546
- images_with_patch_errors[patch_info['image_fn']] = patch_info
547
-
548
-
549
- ##%% Run inference on tiles
550
-
551
- # When running with run_inference_with_yolov5_val, we'll pass the folder
552
- if yolo_inference_options is not None:
553
-
554
- patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
555
-
556
- if yolo_inference_options.model_filename is None:
557
- yolo_inference_options.model_filename = model_file
558
- else:
559
- assert yolo_inference_options.model_filename == model_file, \
560
- 'Model file between yolo inference file ({}) and model file parameter ({})'.format(
561
- yolo_inference_options.model_filename,model_file)
562
-
563
- yolo_inference_options.input_folder = tiling_folder
564
- yolo_inference_options.output_file = patch_level_output_file
565
-
566
- run_inference_with_yolo_val(yolo_inference_options)
567
- with open(patch_level_output_file,'r') as f:
568
- patch_level_results = json.load(f)
569
-
570
- # For standard inference, we'll pass a list of files
571
- else:
572
-
573
- patch_file_names = []
574
- for im in all_image_patch_info:
575
- # If there was a patch generation error, don't run inference
576
- if patch_info['error'] is not None:
577
- assert im['image_fn'] in images_with_patch_errors
578
- continue
579
- for patch in im['patches']:
580
- patch_file_names.append(patch['patch_fn'])
581
-
582
- inference_results = load_and_run_detector_batch(model_file,
583
- patch_file_names,
584
- checkpoint_path=checkpoint_path,
585
- checkpoint_frequency=checkpoint_frequency,
586
- quiet=True)
587
-
588
- patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
589
-
590
- patch_level_results = write_results_to_file(inference_results,
591
- patch_level_output_file,
592
- relative_path_base=tiling_folder,
593
- detector_file=model_file)
594
-
595
-
596
- ##%% Map patch-level detections back to the original images
597
-
598
- # Map relative paths for patches to detections
599
- patch_fn_relative_to_results = {}
600
- for im in tqdm(patch_level_results['images']):
601
- patch_fn_relative_to_results[im['file']] = im
602
-
603
- image_level_results = {}
604
- image_level_results['info'] = patch_level_results['info']
605
- image_level_results['detection_categories'] = patch_level_results['detection_categories']
606
- image_level_results['images'] = []
607
-
608
- image_fn_relative_to_patch_info = { x['image_fn']:x for x in all_image_patch_info }
609
-
610
- # i_image = 0; image_fn_relative = image_files_relative[i_image]
611
- for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),
612
- total=len(image_files_relative)):
613
-
614
- image_fn_abs = os.path.join(image_folder,image_fn_relative)
615
- assert os.path.isfile(image_fn_abs)
616
-
617
- output_im = {}
618
- output_im['file'] = image_fn_relative
619
-
620
- # If we had a patch generation error
621
- if image_fn_relative in images_with_patch_errors:
622
-
623
- patch_info = image_fn_relative_to_patch_info[image_fn_relative]
624
- assert patch_info['error'] is not None
625
-
626
- output_im['detections'] = None
627
- output_im['failure'] = 'Patch generation error'
628
- output_im['failure_details'] = patch_info['error']
629
- image_level_results['images'].append(output_im)
630
- continue
631
-
632
- try:
633
- pil_im = vis_utils.open_image(image_fn_abs)
634
- image_w = pil_im.size[0]
635
- image_h = pil_im.size[1]
636
-
637
- # This would be a very unusual situation; we're reading back an image here that we already
638
- # (successfully) read once during patch generation.
639
- except Exception as e:
640
- print('Warning: image read error after successful patch generation for {}:\n{}'.format(
641
- image_fn_relative,str(e)))
642
- output_im['detections'] = None
643
- output_im['failure'] = 'Patch processing error'
644
- output_im['failure_details'] = str(e)
645
- image_level_results['images'].append(output_im)
646
- continue
647
-
648
- output_im['detections'] = []
649
-
650
- image_patch_info = image_fn_relative_to_patch_info[image_fn_relative]
651
- assert image_patch_info['patches'][0]['source_fn'] == image_fn_relative
652
-
653
- # Patches for this image
654
- patch_fn_abs_to_patch_info_this_image = {}
655
-
656
- for patch_info in image_patch_info['patches']:
657
- patch_fn_abs_to_patch_info_this_image[patch_info['patch_fn']] = patch_info
658
-
659
- # For each patch
660
- #
661
- # i_patch = 0; patch_fn_abs = list(patch_fn_abs_to_patch_info_this_image.keys())[i_patch]
662
- for i_patch,patch_fn_abs in enumerate(patch_fn_abs_to_patch_info_this_image.keys()):
663
-
664
- patch_fn_relative = os.path.relpath(patch_fn_abs,tiling_folder)
665
- patch_results = patch_fn_relative_to_results[patch_fn_relative]
666
- patch_info = patch_fn_abs_to_patch_info_this_image[patch_fn_abs]
667
-
668
- # patch_results['file'] is a relative path, and a subset of patch_info['patch_fn']
669
- assert patch_results['file'] in patch_info['patch_fn']
670
-
671
- patch_w = (patch_info['xmax'] - patch_info['xmin']) + 1
672
- patch_h = (patch_info['ymax'] - patch_info['ymin']) + 1
673
- assert patch_w == patch_size[0]
674
- assert patch_h == patch_size[1]
675
-
676
- # If there was an inference failure on one patch, report the image
677
- # as an inference failure
678
- if 'detections' not in patch_results:
679
- assert 'failure' in patch_results
680
- output_im['detections'] = None
681
- output_im['failure'] = patch_results['failure']
682
- break
683
-
684
- # det = patch_results['detections'][0]
685
- for det in patch_results['detections']:
686
-
687
- bbox_patch_relative = det['bbox']
688
- xmin_patch_relative = bbox_patch_relative[0]
689
- ymin_patch_relative = bbox_patch_relative[1]
690
- w_patch_relative = bbox_patch_relative[2]
691
- h_patch_relative = bbox_patch_relative[3]
692
-
693
- # Convert from patch-relative normalized values to image-relative absolute values
694
- w_pixels = w_patch_relative * patch_w
695
- h_pixels = h_patch_relative * patch_h
696
- xmin_patch_pixels = xmin_patch_relative * patch_w
697
- ymin_patch_pixels = ymin_patch_relative * patch_h
698
- xmin_image_pixels = patch_info['xmin'] + xmin_patch_pixels
699
- ymin_image_pixels = patch_info['ymin'] + ymin_patch_pixels
700
-
701
- # ...and now to image-relative normalized values
702
- w_image_normalized = w_pixels / image_w
703
- h_image_normalized = h_pixels / image_h
704
- xmin_image_normalized = xmin_image_pixels / image_w
705
- ymin_image_normalized = ymin_image_pixels / image_h
706
-
707
- bbox_image_normalized = [xmin_image_normalized,
708
- ymin_image_normalized,
709
- w_image_normalized,
710
- h_image_normalized]
711
-
712
- output_det = {}
713
- output_det['bbox'] = bbox_image_normalized
714
- output_det['conf'] = det['conf']
715
- output_det['category'] = det['category']
716
-
717
- output_im['detections'].append(output_det)
718
-
719
- # ...for each detection
720
-
721
- # ...for each patch
722
-
723
- image_level_results['images'].append(output_im)
724
-
725
- # ...for each image
726
-
727
- image_level_results_file_pre_nms = \
728
- os.path.join(tiling_folder,folder_name + '_image_level_results_pre_nms.json')
729
- with open(image_level_results_file_pre_nms,'w') as f:
730
- json.dump(image_level_results,f,indent=1)
731
-
732
-
733
- ##%% Run NMS
734
-
735
- in_place_nms(image_level_results,iou_thres=nms_iou_threshold)
736
-
737
-
738
- ##%% Write output file
739
-
740
- print('Saving image-level results (after NMS) to {}'.format(output_file))
741
-
742
- with open(output_file,'w') as f:
743
- json.dump(image_level_results,f,indent=1)
744
-
745
-
746
- ##%% Possibly remove tiles
747
-
748
- if remove_tiles:
749
-
750
- patch_file_names = []
751
- for im in all_image_patch_info:
752
- for patch in im['patches']:
753
- patch_file_names.append(patch['patch_fn'])
754
-
755
- for patch_fn_abs in patch_file_names:
756
- os.remove(patch_fn_abs)
757
-
758
-
759
- ##%% Return
760
-
761
- return image_level_results
762
-
763
-
764
- #%% Interactive driver
765
-
766
- if False:
767
-
768
- pass
769
-
770
- #%% Run tiled inference (in Python)
771
-
772
- model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
773
- image_folder = os.path.expanduser('~/data/KRU-test')
774
- tiling_folder = os.path.expanduser('~/tmp/tiling-test')
775
- output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
776
-
777
- tile_size_x = 3000
778
- tile_size_y = 3000
779
- tile_overlap = 0.5
780
- checkpoint_path = None
781
- checkpoint_frequency = -1
782
- remove_tiles = False
783
-
784
- use_yolo_inference = False
785
-
786
- if not use_yolo_inference:
787
-
788
- yolo_inference_options = None
789
-
790
- else:
791
-
792
- yolo_inference_options = YoloInferenceOptions()
793
- yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
794
-
795
- run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
796
- tile_size_x=tile_size_x, tile_size_y=tile_size_y,
797
- tile_overlap=tile_overlap,
798
- checkpoint_path=checkpoint_path,
799
- checkpoint_frequency=checkpoint_frequency,
800
- remove_tiles=remove_tiles,
801
- yolo_inference_options=yolo_inference_options)
802
-
803
-
804
- #%% Run tiled inference (generate a command)
805
-
806
- import os
807
-
808
- model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
809
- image_folder = os.path.expanduser('~/data/KRU-test')
810
- tiling_folder = os.path.expanduser('~/tmp/tiling-test')
811
- output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
812
- tile_size = [5152,3968]
813
- tile_overlap = 0.8
814
-
815
- cmd = f'python run_tiled_inference.py {model_file} {image_folder} {tiling_folder} {output_file} ' + \
816
- f'--tile_overlap {tile_overlap} --no_remove_tiles --tile_size_x {tile_size[0]} --tile_size_y {tile_size[1]}'
817
-
818
- print(cmd)
819
- import clipboard; clipboard.copy(cmd)
820
-
821
-
822
- #%% Preview tiled inference
823
-
824
- from api.batch_processing.postprocessing.postprocess_batch_results import (
825
- PostProcessingOptions, process_batch_results)
826
-
827
- options = PostProcessingOptions()
828
- options.image_base_dir = image_folder
829
- options.include_almost_detections = True
830
- options.num_images_to_sample = None
831
- options.confidence_threshold = 0.2
832
- options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
833
- options.ground_truth_json_file = None
834
- options.separate_detections_by_category = True
835
- # options.sample_seed = 0
836
-
837
- options.parallelize_rendering = True
838
- options.parallelize_rendering_n_cores = 10
839
- options.parallelize_rendering_with_threads = False
840
-
841
- preview_base = os.path.join(tiling_folder,'preview')
842
- os.makedirs(preview_base, exist_ok=True)
843
-
844
- print('Processing post-RDE to {}'.format(preview_base))
845
-
846
- options.api_output_file = output_file
847
- options.output_dir = preview_base
848
- ppresults = process_batch_results(options)
849
- html_output_file = ppresults.output_html_file
850
-
851
- path_utils.open_file(html_output_file)
852
-
853
-
854
- #%% Command-line driver
855
-
856
- import sys,argparse
857
-
858
- def main():
859
-
860
- parser = argparse.ArgumentParser(
861
- description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
862
- parser.add_argument(
863
- 'model_file',
864
- help='Path to detector model file (.pb or .pt)')
865
- parser.add_argument(
866
- 'image_folder',
867
- help='Folder containing images for inference (always recursive, unless image_list is supplied)')
868
- parser.add_argument(
869
- 'tiling_folder',
870
- help='Temporary folder where tiles and intermediate results will be stored')
871
- parser.add_argument(
872
- 'output_file',
873
- help='Path to output JSON results file, should end with a .json extension')
874
- parser.add_argument(
875
- '--no_remove_tiles',
876
- action='store_true',
877
- help='Tiles are removed by default; this option suppresses tile deletion')
878
- parser.add_argument(
879
- '--tile_size_x',
880
- type=int,
881
- default=default_tile_size[0],
882
- help=('Tile width (defaults to {})'.format(default_tile_size[0])))
883
- parser.add_argument(
884
- '--tile_size_y',
885
- type=int,
886
- default=default_tile_size[0],
887
- help=('Tile height (defaults to {})'.format(default_tile_size[1])))
888
- parser.add_argument(
889
- '--tile_overlap',
890
- type=float,
891
- default=default_patch_overlap,
892
- help=('Overlap between tiles [0,1] (defaults to {})'.format(default_patch_overlap)))
893
- parser.add_argument(
894
- '--overwrite_handling',
895
- type=str,
896
- default='skip',
897
- help=('Behavior when the target file exists (skip/overwrite/error) (default skip)'))
898
- parser.add_argument(
899
- '--image_list',
900
- type=str,
901
- default=None,
902
- help=('A .json list of relative filenames (or absolute paths contained within image_folder) to include'))
903
-
904
- if len(sys.argv[1:]) == 0:
905
- parser.print_help()
906
- parser.exit()
907
-
908
- args = parser.parse_args()
909
-
910
- model_file = try_download_known_detector(args.model_file)
911
- assert os.path.exists(model_file), \
912
- 'detector file {} does not exist'.format(args.model_file)
913
-
914
- if os.path.exists(args.output_file):
915
- if args.overwrite_handling == 'skip':
916
- print('Warning: output file {} exists, skipping'.format(args.output_file))
917
- return
918
- elif args.overwrite_handling == 'overwrite':
919
- print('Warning: output file {} exists, overwriting'.format(args.output_file))
920
- elif args.overwrite_handling == 'error':
921
- raise ValueError('Output file {} exists'.format(args.output_file))
922
- else:
923
- raise ValueError('Unknown output handling method {}'.format(args.overwrite_handling))
924
-
925
-
926
- remove_tiles = (not args.no_remove_tiles)
927
-
928
- run_tiled_inference(model_file, args.image_folder, args.tiling_folder, args.output_file,
929
- tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
930
- tile_overlap=args.tile_overlap,
931
- remove_tiles=remove_tiles,
932
- image_list=args.image_list)
933
-
934
- if __name__ == '__main__':
935
- main()