megadetector 10.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. megadetector/__init__.py +0 -0
  2. megadetector/api/__init__.py +0 -0
  3. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
  7. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  8. megadetector/classification/__init__.py +0 -0
  9. megadetector/classification/aggregate_classifier_probs.py +108 -0
  10. megadetector/classification/analyze_failed_images.py +227 -0
  11. megadetector/classification/cache_batchapi_outputs.py +198 -0
  12. megadetector/classification/create_classification_dataset.py +626 -0
  13. megadetector/classification/crop_detections.py +516 -0
  14. megadetector/classification/csv_to_json.py +226 -0
  15. megadetector/classification/detect_and_crop.py +853 -0
  16. megadetector/classification/efficientnet/__init__.py +9 -0
  17. megadetector/classification/efficientnet/model.py +415 -0
  18. megadetector/classification/efficientnet/utils.py +608 -0
  19. megadetector/classification/evaluate_model.py +520 -0
  20. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  21. megadetector/classification/json_to_azcopy_list.py +63 -0
  22. megadetector/classification/json_validator.py +696 -0
  23. megadetector/classification/map_classification_categories.py +276 -0
  24. megadetector/classification/merge_classification_detection_output.py +509 -0
  25. megadetector/classification/prepare_classification_script.py +194 -0
  26. megadetector/classification/prepare_classification_script_mc.py +228 -0
  27. megadetector/classification/run_classifier.py +287 -0
  28. megadetector/classification/save_mislabeled.py +110 -0
  29. megadetector/classification/train_classifier.py +827 -0
  30. megadetector/classification/train_classifier_tf.py +725 -0
  31. megadetector/classification/train_utils.py +323 -0
  32. megadetector/data_management/__init__.py +0 -0
  33. megadetector/data_management/animl_to_md.py +161 -0
  34. megadetector/data_management/annotations/__init__.py +0 -0
  35. megadetector/data_management/annotations/annotation_constants.py +33 -0
  36. megadetector/data_management/camtrap_dp_to_coco.py +270 -0
  37. megadetector/data_management/cct_json_utils.py +566 -0
  38. megadetector/data_management/cct_to_md.py +184 -0
  39. megadetector/data_management/cct_to_wi.py +293 -0
  40. megadetector/data_management/coco_to_labelme.py +284 -0
  41. megadetector/data_management/coco_to_yolo.py +701 -0
  42. megadetector/data_management/databases/__init__.py +0 -0
  43. megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
  44. megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
  45. megadetector/data_management/databases/integrity_check_json_db.py +563 -0
  46. megadetector/data_management/databases/subset_json_db.py +195 -0
  47. megadetector/data_management/generate_crops_from_cct.py +200 -0
  48. megadetector/data_management/get_image_sizes.py +164 -0
  49. megadetector/data_management/labelme_to_coco.py +559 -0
  50. megadetector/data_management/labelme_to_yolo.py +349 -0
  51. megadetector/data_management/lila/__init__.py +0 -0
  52. megadetector/data_management/lila/create_lila_blank_set.py +556 -0
  53. megadetector/data_management/lila/create_lila_test_set.py +192 -0
  54. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  55. megadetector/data_management/lila/download_lila_subset.py +182 -0
  56. megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
  57. megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
  58. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  59. megadetector/data_management/lila/lila_common.py +319 -0
  60. megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
  61. megadetector/data_management/mewc_to_md.py +344 -0
  62. megadetector/data_management/ocr_tools.py +873 -0
  63. megadetector/data_management/read_exif.py +964 -0
  64. megadetector/data_management/remap_coco_categories.py +195 -0
  65. megadetector/data_management/remove_exif.py +156 -0
  66. megadetector/data_management/rename_images.py +194 -0
  67. megadetector/data_management/resize_coco_dataset.py +665 -0
  68. megadetector/data_management/speciesnet_to_md.py +41 -0
  69. megadetector/data_management/wi_download_csv_to_coco.py +247 -0
  70. megadetector/data_management/yolo_output_to_md_output.py +594 -0
  71. megadetector/data_management/yolo_to_coco.py +984 -0
  72. megadetector/data_management/zamba_to_md.py +188 -0
  73. megadetector/detection/__init__.py +0 -0
  74. megadetector/detection/change_detection.py +840 -0
  75. megadetector/detection/process_video.py +479 -0
  76. megadetector/detection/pytorch_detector.py +1451 -0
  77. megadetector/detection/run_detector.py +1267 -0
  78. megadetector/detection/run_detector_batch.py +2172 -0
  79. megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
  80. megadetector/detection/run_md_and_speciesnet.py +1604 -0
  81. megadetector/detection/run_tiled_inference.py +1044 -0
  82. megadetector/detection/tf_detector.py +209 -0
  83. megadetector/detection/video_utils.py +1379 -0
  84. megadetector/postprocessing/__init__.py +0 -0
  85. megadetector/postprocessing/add_max_conf.py +72 -0
  86. megadetector/postprocessing/categorize_detections_by_size.py +166 -0
  87. megadetector/postprocessing/classification_postprocessing.py +1943 -0
  88. megadetector/postprocessing/combine_batch_outputs.py +249 -0
  89. megadetector/postprocessing/compare_batch_results.py +2110 -0
  90. megadetector/postprocessing/convert_output_format.py +403 -0
  91. megadetector/postprocessing/create_crop_folder.py +629 -0
  92. megadetector/postprocessing/detector_calibration.py +570 -0
  93. megadetector/postprocessing/generate_csv_report.py +522 -0
  94. megadetector/postprocessing/load_api_results.py +223 -0
  95. megadetector/postprocessing/md_to_coco.py +428 -0
  96. megadetector/postprocessing/md_to_labelme.py +351 -0
  97. megadetector/postprocessing/md_to_wi.py +41 -0
  98. megadetector/postprocessing/merge_detections.py +392 -0
  99. megadetector/postprocessing/postprocess_batch_results.py +2140 -0
  100. megadetector/postprocessing/remap_detection_categories.py +226 -0
  101. megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
  102. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
  103. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
  104. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
  105. megadetector/postprocessing/separate_detections_into_folders.py +795 -0
  106. megadetector/postprocessing/subset_json_detector_output.py +964 -0
  107. megadetector/postprocessing/top_folders_to_bottom.py +238 -0
  108. megadetector/postprocessing/validate_batch_results.py +332 -0
  109. megadetector/taxonomy_mapping/__init__.py +0 -0
  110. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  111. megadetector/taxonomy_mapping/map_new_lila_datasets.py +211 -0
  112. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
  113. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
  114. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  115. megadetector/taxonomy_mapping/simple_image_download.py +231 -0
  116. megadetector/taxonomy_mapping/species_lookup.py +1008 -0
  117. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  118. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  119. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  120. megadetector/tests/__init__.py +0 -0
  121. megadetector/tests/test_nms_synthetic.py +335 -0
  122. megadetector/utils/__init__.py +0 -0
  123. megadetector/utils/ct_utils.py +1857 -0
  124. megadetector/utils/directory_listing.py +199 -0
  125. megadetector/utils/extract_frames_from_video.py +307 -0
  126. megadetector/utils/gpu_test.py +125 -0
  127. megadetector/utils/md_tests.py +2072 -0
  128. megadetector/utils/path_utils.py +2872 -0
  129. megadetector/utils/process_utils.py +172 -0
  130. megadetector/utils/split_locations_into_train_val.py +237 -0
  131. megadetector/utils/string_utils.py +234 -0
  132. megadetector/utils/url_utils.py +825 -0
  133. megadetector/utils/wi_platform_utils.py +968 -0
  134. megadetector/utils/wi_taxonomy_utils.py +1766 -0
  135. megadetector/utils/write_html_image_list.py +239 -0
  136. megadetector/visualization/__init__.py +0 -0
  137. megadetector/visualization/plot_utils.py +309 -0
  138. megadetector/visualization/render_images_with_thumbnails.py +243 -0
  139. megadetector/visualization/visualization_utils.py +1973 -0
  140. megadetector/visualization/visualize_db.py +630 -0
  141. megadetector/visualization/visualize_detector_output.py +498 -0
  142. megadetector/visualization/visualize_video_output.py +705 -0
  143. megadetector-10.0.15.dist-info/METADATA +115 -0
  144. megadetector-10.0.15.dist-info/RECORD +147 -0
  145. megadetector-10.0.15.dist-info/WHEEL +5 -0
  146. megadetector-10.0.15.dist-info/licenses/LICENSE +19 -0
  147. megadetector-10.0.15.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1044 @@
1
+ """
2
+
3
+ run_tiled_inference.py
4
+
5
+ **This script is experimental, YMMV.**
6
+
7
+ Runs inference on a folder, fist splitting each image up into tiles of size
8
+ MxN (typically the native inference size of your detector), writing those
9
+ tiles out to a temporary folder, then de-duplicating the resulting detections before
10
+ merging them back into a set of detections that make sense on the original images.
11
+
12
+ This approach will likely fail to detect very large animals, so if you expect both large
13
+ and small animals (in terms of pixel size), this script is best used in
14
+ conjunction with a traditional inference pass that looks at whole images.
15
+
16
+ Currently requires temporary storage at least as large as the input data, generally
17
+ a lot more than that (depending on the overlap between adjacent tiles). This is
18
+ inefficient, but easy to debug.
19
+
20
+ Programmatic invocation supports using YOLOv5's inference scripts (and test-time
21
+ augmentation); the command-line interface only supports standard inference right now.
22
+
23
+ """
24
+
25
+ #%% Imports and constants
26
+
27
+ import os
28
+ import json
29
+ import tempfile
30
+ import uuid
31
+ import sys
32
+ import argparse
33
+
34
+ from multiprocessing.pool import ThreadPool
35
+ from multiprocessing.pool import Pool
36
+ from functools import partial
37
+
38
+ from tqdm import tqdm
39
+
40
+ import torch
41
+ from torchvision import ops
42
+
43
+ from megadetector.detection.run_inference_with_yolov5_val import \
44
+ YoloInferenceOptions,run_inference_with_yolo_val
45
+ from megadetector.detection.run_detector_batch import \
46
+ load_and_run_detector_batch,write_results_to_file,default_loaders
47
+ from megadetector.detection.run_detector import \
48
+ try_download_known_detector, CONF_DIGITS, COORD_DIGITS
49
+ from megadetector.utils import path_utils
50
+ from megadetector.utils.ct_utils import round_float_array, round_float
51
+ from megadetector.visualization import visualization_utils as vis_utils
52
+
53
+ default_patch_overlap = 0.5
54
+ patch_jpeg_quality = 95
55
+
56
+ # This isn't NMS in the usual sense of redundant model predictions; this is being
57
+ # used to de-duplicate predictions from overlapping patches.
58
+ nms_iou_threshold = 0.45
59
+
60
+ default_tile_size = [1280,1280]
61
+
62
+ default_n_patch_extraction_workers = 1
63
+ default_pool_type = 'thread'
64
+
65
+
66
+ #%% Support functions
67
+
68
+ def get_patch_boundaries(image_size,patch_size,patch_stride=None):
69
+ """
70
+ Computes a list of patch starting coordinates (x,y) given an image size (w,h)
71
+ and a stride (x,y)
72
+
73
+ Patch size is guaranteed, but the stride may deviate to make sure all pixels are covered.
74
+ I.e., we move by regular strides until the current patch walks off the right/bottom,
75
+ at which point it backs up to one patch from the end. So if your image is 15
76
+ pixels wide and you have a stride of 10 pixels, you will get starting positions
77
+ of 0 (from 0 to 9) and 5 (from 5 to 14).
78
+
79
+ Args:
80
+ image_size (tuple): size of the image you want to divide into patches, as a length-2 tuple (w,h)
81
+ patch_size (tuple): patch size into which you want to divide an image, as a length-2 tuple (w,h)
82
+ patch_stride (tuple or float, optional): stride between patches, as a length-2 tuple (x,y), or a
83
+ float; if this is a float, it's interpreted as the stride relative to the patch size
84
+ (0.1 == 10% stride). Defaults to half the patch size.
85
+
86
+ Returns:
87
+ list: list of length-2 tuples, each representing the x/y start position of a patch
88
+ """
89
+
90
+ if patch_stride is None:
91
+ patch_stride = (round(patch_size[0]*(1.0-default_patch_overlap)),
92
+ round(patch_size[1]*(1.0-default_patch_overlap)))
93
+ elif isinstance(patch_stride,float):
94
+ patch_stride = (round(patch_size[0]*(patch_stride)),
95
+ round(patch_size[1]*(patch_stride)))
96
+
97
+ image_width = image_size[0]
98
+ image_height = image_size[1]
99
+
100
+ assert patch_size[0] <= image_size[0], 'Patch width {} is larger than image width {}'.format(
101
+ patch_size[0],image_size[0])
102
+ assert patch_size[1] <= image_size[1], 'Patch height {} is larger than image height {}'.format(
103
+ patch_size[1],image_size[1])
104
+
105
+ def add_patch_row(patch_start_positions,y_start):
106
+ """
107
+ Add one row to our list of patch start positions, i.e.
108
+ loop over all columns.
109
+ """
110
+
111
+ x_start = 0; x_end = x_start + patch_size[0] - 1
112
+
113
+ while(True):
114
+
115
+ patch_start_positions.append([x_start,y_start])
116
+
117
+ # If this patch put us right at the end of the last column, we're done
118
+ if x_end == image_width - 1:
119
+ break
120
+
121
+ # Move one patch to the right
122
+ x_start += patch_stride[0]
123
+ x_end = x_start + patch_size[0] - 1
124
+
125
+ # If this patch flows over the edge, add one more patch to cover
126
+ # the pixels on the end, then we're done.
127
+ if x_end > (image_width - 1):
128
+ overshoot = (x_end - image_width) + 1
129
+ x_start -= overshoot
130
+ x_end = x_start + patch_size[0] - 1
131
+ patch_start_positions.append([x_start,y_start])
132
+ break
133
+
134
+ # ...for each column
135
+
136
+ return patch_start_positions
137
+
138
+ patch_start_positions = []
139
+
140
+ y_start = 0; y_end = y_start + patch_size[1] - 1
141
+
142
+ while(True):
143
+
144
+ patch_start_positions = add_patch_row(patch_start_positions,y_start)
145
+
146
+ # If this patch put us right at the bottom of the lats row, we're done
147
+ if y_end == image_height - 1:
148
+ break
149
+
150
+ # Move one patch down
151
+ y_start += patch_stride[1]
152
+ y_end = y_start + patch_size[1] - 1
153
+
154
+ # If this patch flows over the bottom, add one more patch to cover
155
+ # the pixels at the bottom, then we're done
156
+ if y_end > (image_height - 1):
157
+ overshoot = (y_end - image_height) + 1
158
+ y_start -= overshoot
159
+ y_end = y_start + patch_size[1] - 1
160
+ patch_start_positions = add_patch_row(patch_start_positions,y_start)
161
+ break
162
+
163
+ # ...for each row
164
+
165
+ for p in patch_start_positions:
166
+ assert p[0] >= 0 and p[1] >= 0 and p[0] <= image_width and p[1] <= image_height, \
167
+ 'Patch generation error (illegal patch {})'.format(p)
168
+
169
+ # The last patch should always end at the bottom-right of the image
170
+ assert patch_start_positions[-1][0]+patch_size[0] == image_width, \
171
+ 'Patch generation error (last patch does not end on the right)'
172
+ assert patch_start_positions[-1][1]+patch_size[1] == image_height, \
173
+ 'Patch generation error (last patch does not end at the bottom)'
174
+
175
+ # All patches should be unique
176
+ patch_start_positions_tuples = [tuple(x) for x in patch_start_positions]
177
+ assert len(patch_start_positions_tuples) == len(set(patch_start_positions_tuples)), \
178
+ 'Patch generation error (duplicate start position)'
179
+
180
+ return patch_start_positions
181
+
182
+ # ...get_patch_boundaries()
183
+
184
+
185
+ def patch_info_to_patch_name(image_name,patch_x_min,patch_y_min):
186
+ """
187
+ Gives a unique string name to an x/y coordinate, e.g. turns ("a.jpg",10,20) into
188
+ "a.jpg_0010_0020".
189
+
190
+ Args:
191
+ image_name (str): image identifier
192
+ patch_x_min (int): x coordinate
193
+ patch_y_min (int): y coordinate
194
+
195
+ Returns:
196
+ str: name for this patch, e.g. "a.jpg_0010_0020"
197
+ """
198
+ patch_name = image_name + '_' + \
199
+ str(patch_x_min).zfill(4) + '_' + str(patch_y_min).zfill(4)
200
+ return patch_name
201
+
202
+
203
+ def extract_patch_from_image(im,
204
+ patch_xy,
205
+ patch_size,
206
+ patch_image_fn=None,
207
+ patch_folder=None,
208
+ image_name=None,
209
+ overwrite=True):
210
+ """
211
+ Extracts a patch from the provided image, and writes that patch out to a new file.
212
+
213
+ Args:
214
+ im (str or Image): image from which we should extract a patch, can be a filename or
215
+ a PIL Image object.
216
+ patch_xy (tuple): length-2 tuple of ints (x,y) representing the upper-left corner
217
+ of the patch to extract
218
+ patch_size (tuple): length-2 tuple of ints (w,h) representing the size of the
219
+ patch to extract
220
+ patch_image_fn (str, optional): image filename to write the patch to; if this is None
221
+ the filename will be generated from [image_name] and the patch coordinates
222
+ patch_folder (str, optional): folder in which the image lives; only used to generate
223
+ a patch filename, so only required if [patch_image_fn] is None
224
+ image_name (str, optional): the identifier of the source image; only used to generate
225
+ a patch filename, so only required if [patch_image_fn] is None
226
+ overwrite (bool, optional): whether to overwrite an existing patch image
227
+
228
+ Returns:
229
+ dict: a dictionary with fields xmin,xmax,ymin,ymax,patch_fn
230
+ """
231
+
232
+ if isinstance(im,str):
233
+ pil_im = vis_utils.open_image(im)
234
+ else:
235
+ pil_im = im
236
+
237
+ patch_x_min = patch_xy[0]
238
+ patch_y_min = patch_xy[1]
239
+ patch_x_max = patch_x_min + patch_size[0] - 1
240
+ patch_y_max = patch_y_min + patch_size[1] - 1
241
+
242
+ # PIL represents coordinates in a way that is very hard for me to get my head
243
+ # around, such that even though the "right" and "bottom" arguments to the crop()
244
+ # function are inclusive... well, they're not really.
245
+ #
246
+ # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#coordinate-system
247
+ #
248
+ # So we add 1 to the max values.
249
+ patch_im = pil_im.crop((patch_x_min,patch_y_min,patch_x_max+1,patch_y_max+1))
250
+ assert patch_im.size[0] == patch_size[0]
251
+ assert patch_im.size[1] == patch_size[1]
252
+
253
+ if patch_image_fn is None:
254
+ assert patch_folder is not None,\
255
+ "If you don't supply a patch filename to extract_patch_from_image, you need to supply a folder name"
256
+ patch_name = patch_info_to_patch_name(image_name,patch_x_min,patch_y_min)
257
+ patch_image_fn = os.path.join(patch_folder,patch_name + '.jpg')
258
+
259
+ if os.path.isfile(patch_image_fn) and (not overwrite):
260
+ pass
261
+ else:
262
+ patch_im.save(patch_image_fn,quality=patch_jpeg_quality)
263
+
264
+ patch_info = {}
265
+ patch_info['xmin'] = patch_x_min
266
+ patch_info['xmax'] = patch_x_max
267
+ patch_info['ymin'] = patch_y_min
268
+ patch_info['ymax'] = patch_y_max
269
+ patch_info['patch_fn'] = patch_image_fn
270
+
271
+ return patch_info
272
+
273
+ # ...def extract_patch_from_image(...)
274
+
275
+
276
+ def in_place_nms(md_results, iou_thres=0.45, verbose=True):
277
+ """
278
+ Run torch.ops.nms in-place on MD-formatted detection results.
279
+
280
+ Args:
281
+ md_results (dict): detection results for a list of images, in MD results format (i.e.,
282
+ containing a list of image dicts with the key 'images', each of which has a list
283
+ of detections with the key 'detections')
284
+ iou_thres (float, optional): IoU threshold above which we will treat two detections as
285
+ redundant
286
+ verbose (bool, optional): enable additional debug console output
287
+ """
288
+
289
+ n_detections_before = 0
290
+ n_detections_after = 0
291
+
292
+ # i_image = 18; im = md_results['images'][i_image]
293
+ for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
294
+
295
+ if (im['detections'] is None) or (len(im['detections']) == 0):
296
+ continue
297
+
298
+ boxes = []
299
+ scores = []
300
+
301
+ n_detections_before += len(im['detections'])
302
+
303
+ # det = im['detections'][0]
304
+ for det in im['detections']:
305
+
306
+ # Using x1/x2 notation rather than x0/x1 notation to be consistent
307
+ # with the Torch documentation.
308
+ x1 = det['bbox'][0]
309
+ y1 = det['bbox'][1]
310
+ x2 = det['bbox'][0] + det['bbox'][2]
311
+ y2 = det['bbox'][1] + det['bbox'][3]
312
+ box = [x1,y1,x2,y2]
313
+ boxes.append(box)
314
+ scores.append(det['conf'])
315
+
316
+ # ...for each detection
317
+
318
+ t_boxes = torch.tensor(boxes)
319
+ t_scores = torch.tensor(scores)
320
+
321
+ box_indices = ops.nms(t_boxes,t_scores,iou_thres).tolist()
322
+
323
+ post_nms_detections = [im['detections'][x] for x in box_indices]
324
+
325
+ assert len(post_nms_detections) <= len(im['detections'])
326
+
327
+ im['detections'] = post_nms_detections
328
+
329
+ n_detections_after += len(im['detections'])
330
+
331
+ # ...for each image
332
+
333
+ if verbose:
334
+ print('NMS removed {} of {} detections'.format(
335
+ n_detections_before-n_detections_after,
336
+ n_detections_before))
337
+
338
+ # ...in_place_nms()
339
+
340
+
341
+ def _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,overwrite):
342
+ """
343
+ Private function to extract tiles for a single image.
344
+
345
+ Returns a dict with fields 'patches' (see extract_patch_from_image) and 'image_fn'.
346
+
347
+ If there is an error, 'patches' will be None and the 'error' field will contain
348
+ failure details. In that case, some tiles may still be generated.
349
+ """
350
+
351
+ fn_abs = os.path.join(image_folder,fn_relative)
352
+ error = None
353
+ patches = []
354
+
355
+ image_name = path_utils.clean_filename(fn_relative,char_limit=None,force_lower=True)
356
+
357
+ try:
358
+
359
+ # Open the image
360
+ im = vis_utils.open_image(fn_abs)
361
+ image_size = [im.width,im.height]
362
+
363
+ # Generate patch boundaries (a list of [x,y] starting points)
364
+ patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
365
+
366
+ # Extract patches
367
+ #
368
+ # patch_xy = patch_boundaries[0]
369
+ for patch_xy in patch_boundaries:
370
+
371
+ patch_info = extract_patch_from_image(im,patch_xy,patch_size,
372
+ patch_folder=tiling_folder,
373
+ image_name=image_name,
374
+ overwrite=overwrite)
375
+ patch_info['source_fn'] = fn_relative
376
+ patches.append(patch_info)
377
+
378
+ except Exception as e:
379
+
380
+ s = 'Patch generation error for {}: \n{}'.format(fn_relative,str(e))
381
+ print(s)
382
+ # patches = None
383
+ error = s
384
+
385
+ image_patch_info = {}
386
+ image_patch_info['patches'] = patches
387
+ image_patch_info['image_fn'] = fn_relative
388
+ image_patch_info['error'] = error
389
+
390
+ return image_patch_info
391
+
392
+
393
+ #%% Main function
394
+
395
+ def run_tiled_inference(model_file,
396
+ image_folder,
397
+ tiling_folder,
398
+ output_file,
399
+ tile_size_x=1280,
400
+ tile_size_y=1280,
401
+ tile_overlap=0.5,
402
+ checkpoint_path=None,
403
+ checkpoint_frequency=-1,
404
+ remove_tiles=False,
405
+ yolo_inference_options=None,
406
+ n_patch_extraction_workers=default_n_patch_extraction_workers,
407
+ overwrite_tiles=True,
408
+ image_list=None,
409
+ augment=False,
410
+ detector_options=None,
411
+ use_image_queue=True,
412
+ preprocess_on_image_queue=True,
413
+ loader_workers=default_loaders,
414
+ inference_size=None,
415
+ verbose=False,
416
+ pool_type=None):
417
+ """
418
+ Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
419
+ into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
420
+ then de-duplicating the results before merging them back into a set of detections that make
421
+ sense on the original images and writing those results to [output_file].
422
+
423
+ [tiling_folder] can be any folder, but this function reserves the right to do whatever it wants
424
+ within that folder, including deleting everything, so it's best if it's a new folder.
425
+ Conceptually this folder is temporary, it's just helpful in this case to not actually
426
+ use the system temp folder, because the tile cache may be very large, so the caller may
427
+ want it to be on a specific drive. If this is None, a new folder will be created in
428
+ system temp space.
429
+
430
+ tile_overlap is the fraction of overlap between tiles.
431
+
432
+ Optionally removes the temporary tiles.
433
+
434
+ if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
435
+ this case the model will be run with run_inference_with_yolov5_val. This is typically used to
436
+ run the model with test-time augmentation.
437
+
438
+ Args:
439
+ model_file (str): model filename (ending in .pt), or a well-known model name (e.g. "MDV5A")
440
+ image_folder (str): the folder of images to proess (always recursive)
441
+ tiling_folder (str): folder for temporary tile storage; see caveats above. Can be None
442
+ to use system temp space.
443
+ output_file (str): .json file to which we should write MD-formatted results
444
+ tile_size_x (int, optional): tile width
445
+ tile_size_y (int, optional): tile height
446
+ tile_overlap (float, optional): overlap between adjacent tiles, as a fraction of the
447
+ tile size
448
+ checkpoint_path (str, optional): checkpoint path; passed directly to run_detector_batch; see
449
+ run_detector_batch for details
450
+ checkpoint_frequency (int, optional): checkpoint frequency; passed directly to run_detector_batch; see
451
+ run_detector_batch for details
452
+ remove_tiles (bool, optional): whether to delete the tiles when we're done
453
+ yolo_inference_options (YoloInferenceOptions, optional): if not None, will run inference with
454
+ run_inference_with_yolov5_val.py, rather than with run_detector_batch.py, using these options
455
+ n_patch_extraction_workers (int, optional): number of workers to use for patch extraction;
456
+ set to <= 1 to disable parallelization
457
+ overwrite_tiles (bool, optional): whether to overwrite image files for individual tiles if they exist
458
+ image_list (list, optional): .json file containing a list of specific images to process. If
459
+ this is supplied, and the paths are absolute, [image_folder] will be ignored. If this is supplied,
460
+ and the paths are relative, they should be relative to [image_folder]
461
+ augment (bool, optional): apply test-time augmentation
462
+ detector_options (dict, optional): parameters to pass to run_detector, only relevant if
463
+ yolo_inference_options is None
464
+ use_image_queue (bool, optional): whether to use a loader worker queue, only relevant if
465
+ yolo_inference_options is None
466
+ preprocess_on_image_queue (bool, optional): whether the image queue should also be responsible
467
+ for preprocessing
468
+ loader_workers (int, optional): number of preprocessing loader workers to use
469
+ inference_size (int, optional): override the default inference image size, only relevant if
470
+ yolo_inference_options is None
471
+ verbose (bool, optional): enable additional debug output
472
+ pool_type (str, optional): 'thread' or 'process', or None to use the default (threads)
473
+
474
+ Returns:
475
+ dict: MD-formatted results dictionary, identical to what's written to [output_file]
476
+ """
477
+
478
+ ##%% Validate arguments
479
+
480
+ assert tile_overlap < 1 and tile_overlap >= 0, \
481
+ 'Illegal tile overlap value {}'.format(tile_overlap)
482
+
483
+ if tile_size_x == -1:
484
+ tile_size_x = default_tile_size[0]
485
+ if tile_size_y == -1:
486
+ tile_size_y = default_tile_size[1]
487
+
488
+ patch_size = [tile_size_x,tile_size_y]
489
+ patch_stride = (round(patch_size[0]*(1.0-tile_overlap)),
490
+ round(patch_size[1]*(1.0-tile_overlap)))
491
+
492
+ if pool_type is None:
493
+ pool_type = default_pool_type
494
+ assert pool_type in ('thread','process'), 'Illegal pool type {}'.format(pool_type)
495
+
496
+ if tiling_folder is None:
497
+ tiling_folder = \
498
+ os.path.join(tempfile.gettempdir(), 'md-tiling', str(uuid.uuid1()))
499
+ print('Creating temporary tiling folder: {}'.format(tiling_folder))
500
+
501
+ os.makedirs(tiling_folder,exist_ok=True)
502
+
503
+ ##%% List files
504
+
505
+ if image_list is None:
506
+
507
+ print('Enumerating images in {}'.format(image_folder))
508
+ image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
509
+ assert len(image_files_relative) > 0, 'No images found in folder {}'.format(image_folder)
510
+
511
+ else:
512
+
513
+ print('Loading image list from {}'.format(image_list))
514
+ with open(image_list,'r') as f:
515
+ image_files_relative = json.load(f)
516
+ n_absolute_paths = 0
517
+ for i_fn,fn in enumerate(image_files_relative):
518
+ if os.path.isabs(fn):
519
+ n_absolute_paths += 1
520
+ try:
521
+ fn_relative = os.path.relpath(fn,image_folder)
522
+ except ValueError:
523
+ raise ValueError(
524
+ 'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
525
+ fn,image_folder))
526
+ assert not fn_relative.startswith('..'), \
527
+ 'Illegal absolute path supplied to run_tiled_inference, {} is outside of {}'.format(
528
+ fn,image_folder)
529
+ image_files_relative[i_fn] = fn_relative
530
+ if (n_absolute_paths != 0) and (n_absolute_paths != len(image_files_relative)):
531
+ raise ValueError('Illegal file list: converted {} of {} paths to relative'.format(
532
+ n_absolute_paths,len(image_files_relative)))
533
+
534
+ ##%% Generate tiles
535
+
536
+ all_image_patch_info = None
537
+
538
+ print('Extracting patches from {} images on {} workers'.format(
539
+ len(image_files_relative),n_patch_extraction_workers))
540
+
541
+ n_workers = n_patch_extraction_workers
542
+
543
+ if n_workers <= 1:
544
+
545
+ all_image_patch_info = []
546
+
547
+ # fn_relative = image_files_relative[0]
548
+ for fn_relative in tqdm(image_files_relative):
549
+ image_patch_info = \
550
+ _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,
551
+ overwrite=overwrite_tiles)
552
+ all_image_patch_info.append(image_patch_info)
553
+
554
+ else:
555
+
556
+ pool = None
557
+ try:
558
+ if n_workers > len(image_files_relative):
559
+
560
+ print('Pool of {} requested, but only {} images available, reducing pool to {}'.\
561
+ format(n_workers,len(image_files_relative),len(image_files_relative)))
562
+ n_workers = len(image_files_relative)
563
+
564
+ if pool_type == 'thread':
565
+ pool = ThreadPool(n_workers); poolstring = 'threads'
566
+ else:
567
+ pool = Pool(n_workers); poolstring = 'processes'
568
+
569
+ print('Starting patch extraction pool with {} {}'.format(n_workers,poolstring))
570
+
571
+ all_image_patch_info = list(tqdm(pool.imap(
572
+ partial(_extract_tiles_for_image,
573
+ image_folder=image_folder,
574
+ tiling_folder=tiling_folder,
575
+ patch_size=patch_size,
576
+ patch_stride=patch_stride,
577
+ overwrite=overwrite_tiles),
578
+ image_files_relative),total=len(image_files_relative)))
579
+ finally:
580
+ if pool is not None:
581
+ pool.close()
582
+ pool.join()
583
+ print('Pool closed and joined for patch extraction')
584
+
585
+ # ...for each image
586
+
587
+ # Write tile information to file; this is just a debugging convenience
588
+ folder_name = path_utils.clean_filename(image_folder,force_lower=True)
589
+ if folder_name.startswith('_'):
590
+ folder_name = folder_name[1:]
591
+
592
+ tile_cache_file = os.path.join(tiling_folder,folder_name + '_patch_info.json')
593
+ with open(tile_cache_file,'w') as f:
594
+ json.dump(all_image_patch_info,f,indent=1)
595
+
596
+ # Keep track of patches that failed
597
+ images_with_patch_errors = {}
598
+ for patch_info in all_image_patch_info:
599
+ if patch_info['error'] is not None:
600
+ images_with_patch_errors[patch_info['image_fn']] = patch_info
601
+
602
+
603
+ ##%% Run inference on the folder of tiles
604
+
605
+ # When running with run_inference_with_yolov5_val, we'll pass the folder
606
+ if yolo_inference_options is not None:
607
+
608
+ patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
609
+
610
+ if yolo_inference_options.model_filename is None:
611
+ yolo_inference_options.model_filename = model_file
612
+ else:
613
+ assert yolo_inference_options.model_filename == model_file, \
614
+ 'Model file between yolo inference file ({}) and model file parameter ({})'.format(
615
+ yolo_inference_options.model_filename,model_file)
616
+
617
+ yolo_inference_options.input_folder = tiling_folder
618
+ yolo_inference_options.output_file = patch_level_output_file
619
+
620
+ run_inference_with_yolo_val(yolo_inference_options)
621
+ with open(patch_level_output_file,'r') as f:
622
+ patch_level_results = json.load(f)
623
+
624
+ # For standard inference, we'll pass a list of files
625
+ else:
626
+
627
+ patch_file_names = []
628
+ for patch_info in all_image_patch_info:
629
+ # If there was a patch generation error, don't run inference
630
+ if patch_info['error'] is not None:
631
+ assert patch_info['image_fn'] in images_with_patch_errors
632
+ continue
633
+ for patch in patch_info['patches']:
634
+ patch_file_names.append(patch['patch_fn'])
635
+
636
+ inference_results = load_and_run_detector_batch(model_file,
637
+ patch_file_names,
638
+ checkpoint_path=checkpoint_path,
639
+ checkpoint_frequency=checkpoint_frequency,
640
+ quiet=True,
641
+ augment=augment,
642
+ detector_options=detector_options,
643
+ use_image_queue=use_image_queue,
644
+ preprocess_on_image_queue=preprocess_on_image_queue,
645
+ image_size=inference_size,
646
+ verbose_output=verbose,
647
+ loader_workers=loader_workers)
648
+
649
+ patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
650
+
651
+ patch_level_results = write_results_to_file(inference_results,
652
+ patch_level_output_file,
653
+ relative_path_base=tiling_folder,
654
+ detector_file=model_file)
655
+
656
+ # ...if we are/aren't using run_inference_with_yolov5_val
657
+
658
+ ##%% Map patch-level detections back to the original images
659
+
660
+ # Map relative paths for patches to detections
661
+ patch_fn_relative_to_results = {}
662
+ for im in tqdm(patch_level_results['images']):
663
+ patch_fn_relative_to_results[im['file']] = im
664
+
665
+ image_level_results = {}
666
+ image_level_results['info'] = patch_level_results['info']
667
+ image_level_results['detection_categories'] = patch_level_results['detection_categories']
668
+ image_level_results['images'] = []
669
+
670
+ image_fn_relative_to_patch_info = { x['image_fn']:x for x in all_image_patch_info }
671
+
672
+ # i_image = 0; image_fn_relative = image_files_relative[i_image]
673
+ for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),
674
+ total=len(image_files_relative)):
675
+
676
+ image_fn_abs = os.path.join(image_folder,image_fn_relative)
677
+ assert os.path.isfile(image_fn_abs)
678
+
679
+ output_im = {}
680
+ output_im['file'] = image_fn_relative
681
+
682
+ # If we had a patch generation error
683
+ if image_fn_relative in images_with_patch_errors:
684
+
685
+ patch_info = image_fn_relative_to_patch_info[image_fn_relative]
686
+ assert patch_info['error'] is not None
687
+
688
+ output_im['detections'] = None
689
+ output_im['failure'] = 'Patch generation error'
690
+ output_im['failure_details'] = patch_info['error']
691
+ image_level_results['images'].append(output_im)
692
+ continue
693
+
694
+ try:
695
+ pil_im = vis_utils.open_image(image_fn_abs)
696
+ image_w = pil_im.size[0]
697
+ image_h = pil_im.size[1]
698
+
699
+ # This would be a very unusual situation; we're reading back an image here that we already
700
+ # (successfully) read once during patch generation.
701
+ except Exception as e:
702
+ print('Warning: image read error after successful patch generation for {}:\n{}'.format(
703
+ image_fn_relative,str(e)))
704
+ output_im['detections'] = None
705
+ output_im['failure'] = 'Patch processing error'
706
+ output_im['failure_details'] = str(e)
707
+ image_level_results['images'].append(output_im)
708
+ continue
709
+
710
+ output_im['detections'] = []
711
+
712
+ image_patch_info = image_fn_relative_to_patch_info[image_fn_relative]
713
+ assert image_patch_info['patches'][0]['source_fn'] == image_fn_relative
714
+
715
+ # Patches for this image
716
+ patch_fn_abs_to_patch_info_this_image = {}
717
+
718
+ for patch_info in image_patch_info['patches']:
719
+ patch_fn_abs_to_patch_info_this_image[patch_info['patch_fn']] = patch_info
720
+
721
+ # For each patch
722
+ #
723
+ # i_patch = 0; patch_fn_abs = list(patch_fn_abs_to_patch_info_this_image.keys())[i_patch]
724
+ for i_patch,patch_fn_abs in enumerate(patch_fn_abs_to_patch_info_this_image.keys()):
725
+
726
+ patch_fn_relative = os.path.relpath(patch_fn_abs,tiling_folder)
727
+ patch_results = patch_fn_relative_to_results[patch_fn_relative]
728
+ patch_info = patch_fn_abs_to_patch_info_this_image[patch_fn_abs]
729
+
730
+ # patch_results['file'] is a relative path, and a subset of patch_info['patch_fn']
731
+ assert patch_results['file'] in patch_info['patch_fn']
732
+
733
+ patch_w = (patch_info['xmax'] - patch_info['xmin']) + 1
734
+ patch_h = (patch_info['ymax'] - patch_info['ymin']) + 1
735
+ assert patch_w == patch_size[0]
736
+ assert patch_h == patch_size[1]
737
+
738
+ # If there was an inference failure on one patch, report the image
739
+ # as an inference failure
740
+ if 'detections' not in patch_results:
741
+ assert 'failure' in patch_results
742
+ output_im['detections'] = None
743
+ output_im['failure'] = patch_results['failure']
744
+ break
745
+
746
+ # det = patch_results['detections'][0]
747
+ for det in patch_results['detections']:
748
+
749
+ bbox_patch_relative = det['bbox']
750
+ xmin_patch_relative = bbox_patch_relative[0]
751
+ ymin_patch_relative = bbox_patch_relative[1]
752
+ w_patch_relative = bbox_patch_relative[2]
753
+ h_patch_relative = bbox_patch_relative[3]
754
+
755
+ # Convert from patch-relative normalized values to image-relative absolute values
756
+ w_pixels = w_patch_relative * patch_w
757
+ h_pixels = h_patch_relative * patch_h
758
+ xmin_patch_pixels = xmin_patch_relative * patch_w
759
+ ymin_patch_pixels = ymin_patch_relative * patch_h
760
+ xmin_image_pixels = patch_info['xmin'] + xmin_patch_pixels
761
+ ymin_image_pixels = patch_info['ymin'] + ymin_patch_pixels
762
+
763
+ # ...and now to image-relative normalized values
764
+ w_image_normalized = w_pixels / image_w
765
+ h_image_normalized = h_pixels / image_h
766
+ xmin_image_normalized = xmin_image_pixels / image_w
767
+ ymin_image_normalized = ymin_image_pixels / image_h
768
+
769
+ bbox_image_normalized = [xmin_image_normalized,
770
+ ymin_image_normalized,
771
+ w_image_normalized,
772
+ h_image_normalized]
773
+
774
+ bbox_image_normalized = round_float_array(bbox_image_normalized,
775
+ precision=COORD_DIGITS)
776
+ det['conf'] = round_float(det['conf'], precision=CONF_DIGITS)
777
+
778
+ output_det = {}
779
+ output_det['bbox'] = bbox_image_normalized
780
+ output_det['conf'] = det['conf']
781
+ output_det['category'] = det['category']
782
+
783
+ output_im['detections'].append(output_det)
784
+
785
+ # ...for each detection
786
+
787
+ # ...for each patch
788
+
789
+ image_level_results['images'].append(output_im)
790
+
791
+ # ...for each image
792
+
793
+ image_level_results_file_pre_nms = \
794
+ os.path.join(tiling_folder,folder_name + '_image_level_results_pre_nms.json')
795
+ with open(image_level_results_file_pre_nms,'w') as f:
796
+ json.dump(image_level_results,f,indent=1)
797
+
798
+
799
+ ##%% Run NMS
800
+
801
+ in_place_nms(image_level_results,iou_thres=nms_iou_threshold)
802
+
803
+
804
+ ##%% Write output file
805
+
806
+ print('Saving image-level results (after NMS) to {}'.format(output_file))
807
+
808
+ with open(output_file,'w') as f:
809
+ json.dump(image_level_results,f,indent=1)
810
+
811
+
812
+ ##%% Possibly remove tiles
813
+
814
+ if remove_tiles:
815
+
816
+ patch_file_names = []
817
+ for im in all_image_patch_info:
818
+ for patch in im['patches']:
819
+ patch_file_names.append(patch['patch_fn'])
820
+
821
+ for patch_fn_abs in patch_file_names:
822
+ os.remove(patch_fn_abs)
823
+
824
+
825
+ ##%% Return
826
+
827
+ return image_level_results
828
+
829
+
830
+ #%% Interactive driver
831
+
832
+ if False:
833
+
834
+ pass
835
+
836
+ #%% Run tiled inference (in Python)
837
+
838
+ model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
839
+ image_folder = os.path.expanduser('~/data/KRU-test')
840
+ tiling_folder = os.path.expanduser('~/tmp/tiling-test')
841
+ output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
842
+
843
+ tile_size_x = 3000
844
+ tile_size_y = 3000
845
+ tile_overlap = 0.5
846
+ checkpoint_path = None
847
+ checkpoint_frequency = -1
848
+ remove_tiles = False
849
+
850
+ use_yolo_inference = False
851
+
852
+ if not use_yolo_inference:
853
+
854
+ yolo_inference_options = None
855
+
856
+ else:
857
+
858
+ yolo_inference_options = YoloInferenceOptions()
859
+ yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
860
+
861
+ run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
862
+ tile_size_x=tile_size_x, tile_size_y=tile_size_y,
863
+ tile_overlap=tile_overlap,
864
+ checkpoint_path=checkpoint_path,
865
+ checkpoint_frequency=checkpoint_frequency,
866
+ remove_tiles=remove_tiles,
867
+ yolo_inference_options=yolo_inference_options)
868
+
869
+
870
+ #%% Run tiled inference (generate a command)
871
+
872
+ import os
873
+
874
+ model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
875
+ image_folder = os.path.expanduser('~/data/KRU-test')
876
+ tiling_folder = os.path.expanduser('~/tmp/tiling-test')
877
+ output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
878
+ tile_size = [5152,3968]
879
+ tile_overlap = 0.8
880
+
881
+ cmd = f'python run_tiled_inference.py {model_file} {image_folder} {tiling_folder} {output_file} ' + \
882
+ f'--tile_overlap {tile_overlap} --no_remove_tiles --tile_size_x {tile_size[0]} --tile_size_y {tile_size[1]}'
883
+
884
+ print(cmd)
885
+ import clipboard; clipboard.copy(cmd)
886
+
887
+
888
+ #%% Preview tiled inference
889
+
890
+ from megadetector.postprocessing.postprocess_batch_results import \
891
+ PostProcessingOptions, process_batch_results
892
+
893
+ options = PostProcessingOptions()
894
+ options.image_base_dir = image_folder
895
+ options.include_almost_detections = True
896
+ options.num_images_to_sample = None
897
+ options.confidence_threshold = 0.2
898
+ options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
899
+ options.ground_truth_json_file = None
900
+ options.separate_detections_by_category = True
901
+ # options.sample_seed = 0
902
+
903
+ options.parallelize_rendering = True
904
+ options.parallelize_rendering_n_cores = 10
905
+ options.parallelize_rendering_with_threads = False
906
+
907
+ preview_base = os.path.join(tiling_folder,'preview')
908
+ os.makedirs(preview_base, exist_ok=True)
909
+
910
+ print('Processing post-RDE to {}'.format(preview_base))
911
+
912
+ options.md_results_file = output_file
913
+ options.output_dir = preview_base
914
+ ppresults = process_batch_results(options)
915
+ html_output_file = ppresults.output_html_file
916
+
917
+ path_utils.open_file(html_output_file)
918
+
919
+
920
+ #%% Command-line driver
921
+
922
+ def main():
923
+ """
924
+ Command-line driver for run_tiled_inference
925
+ """
926
+
927
+ parser = argparse.ArgumentParser(
928
+ description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
929
+ parser.add_argument(
930
+ 'model_file',
931
+ help='Path to detector model file (.pb or .pt)')
932
+ parser.add_argument(
933
+ 'image_folder',
934
+ help='Folder containing images for inference (always recursive, unless image_list is supplied)')
935
+ parser.add_argument(
936
+ 'tiling_folder',
937
+ help='Temporary folder where tiles and intermediate results will be stored')
938
+ parser.add_argument(
939
+ 'output_file',
940
+ help='Path to output JSON results file, should end with a .json extension')
941
+ parser.add_argument(
942
+ '--no_remove_tiles',
943
+ action='store_true',
944
+ help='Tiles are removed by default; this option suppresses tile deletion')
945
+ parser.add_argument(
946
+ '--augment',
947
+ action='store_true',
948
+ help='Enable test-time augmentation')
949
+ parser.add_argument(
950
+ '--verbose',
951
+ action='store_true',
952
+ help='Enable additional debug output')
953
+ parser.add_argument(
954
+ '--tile_size_x',
955
+ type=int,
956
+ default=default_tile_size[0],
957
+ help=('Tile width (defaults to {})'.format(default_tile_size[0])))
958
+ parser.add_argument(
959
+ '--tile_size_y',
960
+ type=int,
961
+ default=default_tile_size[1],
962
+ help=('Tile height (defaults to {})'.format(default_tile_size[1])))
963
+ parser.add_argument(
964
+ '--tile_overlap',
965
+ type=float,
966
+ default=default_patch_overlap,
967
+ help=('Overlap between tiles [0,1] (defaults to {})'.format(default_patch_overlap)))
968
+ parser.add_argument(
969
+ '--overwrite_handling',
970
+ type=str,
971
+ default='skip',
972
+ help=('Behavior when the target file exists (skip/overwrite/error) (default skip)'))
973
+ parser.add_argument(
974
+ '--image_list',
975
+ type=str,
976
+ default=None,
977
+ help=('A .json list of relative filenames (or absolute paths contained within image_folder) to include'))
978
+ parser.add_argument(
979
+ '--detector_options',
980
+ type=str,
981
+ default=None,
982
+ help=('A list of detector options (key-value pairs)'))
983
+ parser.add_argument(
984
+ '--inference_size',
985
+ type=int,
986
+ default=None,
987
+ help=('Run inference at a non-default size'))
988
+ parser.add_argument(
989
+ '--n_patch_extraction_workers',
990
+ type=int,
991
+ default=1,
992
+ help=('Number of workers to use for patch extraction'))
993
+ parser.add_argument(
994
+ '--loader_workers',
995
+ type=int,
996
+ default=default_loaders,
997
+ help=('Number of workers to use for image loading and preprocessing (0 to disable)'))
998
+
999
+ # detector_options = parse_kvp_list(args.detector_options)
1000
+
1001
+ if len(sys.argv[1:]) == 0:
1002
+ parser.print_help()
1003
+ parser.exit()
1004
+
1005
+ args = parser.parse_args()
1006
+
1007
+ model_file = try_download_known_detector(args.model_file)
1008
+ assert os.path.exists(model_file), \
1009
+ 'detector file {} does not exist'.format(args.model_file)
1010
+
1011
+ if os.path.exists(args.output_file):
1012
+ if args.overwrite_handling == 'skip':
1013
+ print('Warning: output file {} exists, skipping'.format(args.output_file))
1014
+ return
1015
+ elif args.overwrite_handling == 'overwrite':
1016
+ print('Warning: output file {} exists, overwriting'.format(args.output_file))
1017
+ elif args.overwrite_handling == 'error':
1018
+ raise ValueError('Output file {} exists'.format(args.output_file))
1019
+ else:
1020
+ raise ValueError('Unknown output handling method {}'.format(args.overwrite_handling))
1021
+
1022
+
1023
+ remove_tiles = (not args.no_remove_tiles)
1024
+
1025
+ use_image_queue = (args.loader_workers > 0)
1026
+
1027
+ run_tiled_inference(model_file,
1028
+ args.image_folder,
1029
+ args.tiling_folder,
1030
+ args.output_file,
1031
+ tile_size_x=args.tile_size_x,
1032
+ tile_size_y=args.tile_size_y,
1033
+ tile_overlap=args.tile_overlap,
1034
+ remove_tiles=remove_tiles,
1035
+ image_list=args.image_list,
1036
+ augment=args.augment,
1037
+ inference_size=args.inference_size,
1038
+ verbose=args.verbose,
1039
+ n_patch_extraction_workers=args.n_patch_extraction_workers,
1040
+ loader_workers=args.loader_workers,
1041
+ use_image_queue=use_image_queue)
1042
+
1043
+ if __name__ == '__main__':
1044
+ main()