megadetector 5.0.28__py3-none-any.whl → 10.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (197) hide show
  1. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  2. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  3. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  4. megadetector/classification/aggregate_classifier_probs.py +3 -3
  5. megadetector/classification/analyze_failed_images.py +5 -5
  6. megadetector/classification/cache_batchapi_outputs.py +5 -5
  7. megadetector/classification/create_classification_dataset.py +11 -12
  8. megadetector/classification/crop_detections.py +10 -10
  9. megadetector/classification/csv_to_json.py +8 -8
  10. megadetector/classification/detect_and_crop.py +13 -15
  11. megadetector/classification/efficientnet/model.py +8 -8
  12. megadetector/classification/efficientnet/utils.py +6 -5
  13. megadetector/classification/evaluate_model.py +7 -7
  14. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  15. megadetector/classification/json_to_azcopy_list.py +1 -1
  16. megadetector/classification/json_validator.py +29 -32
  17. megadetector/classification/map_classification_categories.py +9 -9
  18. megadetector/classification/merge_classification_detection_output.py +12 -9
  19. megadetector/classification/prepare_classification_script.py +19 -19
  20. megadetector/classification/prepare_classification_script_mc.py +26 -26
  21. megadetector/classification/run_classifier.py +4 -4
  22. megadetector/classification/save_mislabeled.py +6 -6
  23. megadetector/classification/train_classifier.py +1 -1
  24. megadetector/classification/train_classifier_tf.py +9 -9
  25. megadetector/classification/train_utils.py +10 -10
  26. megadetector/data_management/annotations/annotation_constants.py +1 -2
  27. megadetector/data_management/camtrap_dp_to_coco.py +79 -46
  28. megadetector/data_management/cct_json_utils.py +103 -103
  29. megadetector/data_management/cct_to_md.py +49 -49
  30. megadetector/data_management/cct_to_wi.py +33 -33
  31. megadetector/data_management/coco_to_labelme.py +75 -75
  32. megadetector/data_management/coco_to_yolo.py +210 -193
  33. megadetector/data_management/databases/add_width_and_height_to_db.py +86 -12
  34. megadetector/data_management/databases/combine_coco_camera_traps_files.py +40 -40
  35. megadetector/data_management/databases/integrity_check_json_db.py +228 -200
  36. megadetector/data_management/databases/subset_json_db.py +33 -33
  37. megadetector/data_management/generate_crops_from_cct.py +88 -39
  38. megadetector/data_management/get_image_sizes.py +54 -49
  39. megadetector/data_management/labelme_to_coco.py +133 -125
  40. megadetector/data_management/labelme_to_yolo.py +159 -73
  41. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  42. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  43. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  44. megadetector/data_management/lila/download_lila_subset.py +21 -24
  45. megadetector/data_management/lila/generate_lila_per_image_labels.py +365 -107
  46. megadetector/data_management/lila/get_lila_annotation_counts.py +35 -33
  47. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  48. megadetector/data_management/lila/lila_common.py +73 -70
  49. megadetector/data_management/lila/test_lila_metadata_urls.py +28 -19
  50. megadetector/data_management/mewc_to_md.py +344 -340
  51. megadetector/data_management/ocr_tools.py +262 -255
  52. megadetector/data_management/read_exif.py +249 -227
  53. megadetector/data_management/remap_coco_categories.py +90 -28
  54. megadetector/data_management/remove_exif.py +81 -21
  55. megadetector/data_management/rename_images.py +187 -187
  56. megadetector/data_management/resize_coco_dataset.py +588 -120
  57. megadetector/data_management/speciesnet_to_md.py +41 -41
  58. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  59. megadetector/data_management/yolo_output_to_md_output.py +248 -122
  60. megadetector/data_management/yolo_to_coco.py +333 -191
  61. megadetector/detection/change_detection.py +832 -0
  62. megadetector/detection/process_video.py +340 -337
  63. megadetector/detection/pytorch_detector.py +358 -278
  64. megadetector/detection/run_detector.py +399 -186
  65. megadetector/detection/run_detector_batch.py +404 -377
  66. megadetector/detection/run_inference_with_yolov5_val.py +340 -327
  67. megadetector/detection/run_tiled_inference.py +257 -249
  68. megadetector/detection/tf_detector.py +24 -24
  69. megadetector/detection/video_utils.py +332 -295
  70. megadetector/postprocessing/add_max_conf.py +19 -11
  71. megadetector/postprocessing/categorize_detections_by_size.py +45 -45
  72. megadetector/postprocessing/classification_postprocessing.py +468 -433
  73. megadetector/postprocessing/combine_batch_outputs.py +23 -23
  74. megadetector/postprocessing/compare_batch_results.py +590 -525
  75. megadetector/postprocessing/convert_output_format.py +106 -102
  76. megadetector/postprocessing/create_crop_folder.py +347 -147
  77. megadetector/postprocessing/detector_calibration.py +173 -168
  78. megadetector/postprocessing/generate_csv_report.py +508 -499
  79. megadetector/postprocessing/load_api_results.py +48 -27
  80. megadetector/postprocessing/md_to_coco.py +133 -102
  81. megadetector/postprocessing/md_to_labelme.py +107 -90
  82. megadetector/postprocessing/md_to_wi.py +40 -40
  83. megadetector/postprocessing/merge_detections.py +92 -114
  84. megadetector/postprocessing/postprocess_batch_results.py +319 -301
  85. megadetector/postprocessing/remap_detection_categories.py +91 -38
  86. megadetector/postprocessing/render_detection_confusion_matrix.py +214 -205
  87. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  88. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  89. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +704 -679
  90. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  91. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  92. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  93. megadetector/postprocessing/validate_batch_results.py +70 -70
  94. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  95. megadetector/taxonomy_mapping/map_new_lila_datasets.py +18 -19
  96. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +54 -33
  97. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +67 -67
  98. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  99. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  100. megadetector/taxonomy_mapping/species_lookup.py +156 -74
  101. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  102. megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
  103. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  104. megadetector/utils/ct_utils.py +1049 -211
  105. megadetector/utils/directory_listing.py +21 -77
  106. megadetector/utils/gpu_test.py +22 -22
  107. megadetector/utils/md_tests.py +632 -529
  108. megadetector/utils/path_utils.py +1520 -431
  109. megadetector/utils/process_utils.py +41 -41
  110. megadetector/utils/split_locations_into_train_val.py +62 -62
  111. megadetector/utils/string_utils.py +148 -27
  112. megadetector/utils/url_utils.py +489 -176
  113. megadetector/utils/wi_utils.py +2658 -2526
  114. megadetector/utils/write_html_image_list.py +137 -137
  115. megadetector/visualization/plot_utils.py +34 -30
  116. megadetector/visualization/render_images_with_thumbnails.py +39 -74
  117. megadetector/visualization/visualization_utils.py +487 -435
  118. megadetector/visualization/visualize_db.py +232 -198
  119. megadetector/visualization/visualize_detector_output.py +82 -76
  120. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/METADATA +5 -2
  121. megadetector-10.0.0.dist-info/RECORD +139 -0
  122. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/WHEEL +1 -1
  123. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  124. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  125. megadetector/api/batch_processing/api_core/batch_service/score.py +0 -439
  126. megadetector/api/batch_processing/api_core/server.py +0 -294
  127. megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
  128. megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
  129. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  130. megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
  131. megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
  132. megadetector/api/batch_processing/api_core/server_utils.py +0 -88
  133. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  134. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  135. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  136. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  137. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  138. megadetector/api/synchronous/__init__.py +0 -0
  139. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  140. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
  141. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
  142. megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
  143. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  144. megadetector/api/synchronous/api_core/tests/load_test.py +0 -110
  145. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  146. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  147. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  148. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  149. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  150. megadetector/data_management/importers/awc_to_json.py +0 -191
  151. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  152. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  153. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  154. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  155. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  156. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  157. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  158. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  159. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  160. megadetector/data_management/importers/ena24_to_json.py +0 -276
  161. megadetector/data_management/importers/filenames_to_json.py +0 -386
  162. megadetector/data_management/importers/helena_to_cct.py +0 -283
  163. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  164. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  165. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  166. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  167. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  168. megadetector/data_management/importers/missouri_to_json.py +0 -490
  169. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  170. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  171. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  172. megadetector/data_management/importers/pc_to_json.py +0 -365
  173. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  174. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  175. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  176. megadetector/data_management/importers/rspb_to_json.py +0 -356
  177. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  178. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  179. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  180. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  181. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  182. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  183. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  184. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  185. megadetector/data_management/importers/ubc_to_json.py +0 -399
  186. megadetector/data_management/importers/umn_to_json.py +0 -507
  187. megadetector/data_management/importers/wellington_to_json.py +0 -263
  188. megadetector/data_management/importers/wi_to_json.py +0 -442
  189. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  190. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  191. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  192. megadetector/utils/azure_utils.py +0 -178
  193. megadetector/utils/sas_blob_utils.py +0 -509
  194. megadetector-5.0.28.dist-info/RECORD +0 -209
  195. /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
  196. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/licenses/LICENSE +0 -0
  197. {megadetector-5.0.28.dist-info → megadetector-10.0.0.dist-info}/top_level.txt +0 -0
@@ -6,15 +6,15 @@ run_tiled_inference.py
6
6
 
7
7
  Runs inference on a folder, fist splitting each image up into tiles of size
8
8
  MxN (typically the native inference size of your detector), writing those
9
- tiles out to a temporary folder, then de-duplicating the resulting detections before
9
+ tiles out to a temporary folder, then de-duplicating the resulting detections before
10
10
  merging them back into a set of detections that make sense on the original images.
11
11
 
12
- This approach will likely fail to detect very large animals, so if you expect both large
13
- and small animals (in terms of pixel size), this script is best used in
12
+ This approach will likely fail to detect very large animals, so if you expect both large
13
+ and small animals (in terms of pixel size), this script is best used in
14
14
  conjunction with a traditional inference pass that looks at whole images.
15
15
 
16
16
  Currently requires temporary storage at least as large as the input data, generally
17
- a lot more than that (depending on the overlap between adjacent tiles). This is
17
+ a lot more than that (depending on the overlap between adjacent tiles). This is
18
18
  inefficient, but easy to debug.
19
19
 
20
20
  Programmatic invocation supports using YOLOv5's inference scripts (and test-time
@@ -28,6 +28,8 @@ import os
28
28
  import json
29
29
  import tempfile
30
30
  import uuid
31
+ import sys
32
+ import argparse
31
33
 
32
34
  from tqdm import tqdm
33
35
 
@@ -63,59 +65,59 @@ def get_patch_boundaries(image_size,patch_size,patch_stride=None):
63
65
  """
64
66
  Computes a list of patch starting coordinates (x,y) given an image size (w,h)
65
67
  and a stride (x,y)
66
-
68
+
67
69
  Patch size is guaranteed, but the stride may deviate to make sure all pixels are covered.
68
70
  I.e., we move by regular strides until the current patch walks off the right/bottom,
69
71
  at which point it backs up to one patch from the end. So if your image is 15
70
- pixels wide and you have a stride of 10 pixels, you will get starting positions
72
+ pixels wide and you have a stride of 10 pixels, you will get starting positions
71
73
  of 0 (from 0 to 9) and 5 (from 5 to 14).
72
-
74
+
73
75
  Args:
74
76
  image_size (tuple): size of the image you want to divide into patches, as a length-2 tuple (w,h)
75
77
  patch_size (tuple): patch size into which you want to divide an image, as a length-2 tuple (w,h)
76
- patch_stride (tuple or float, optional): stride between patches, as a length-2 tuple (x,y), or a
77
- float; if this is a float, it's interpreted as the stride relative to the patch size
78
+ patch_stride (tuple or float, optional): stride between patches, as a length-2 tuple (x,y), or a
79
+ float; if this is a float, it's interpreted as the stride relative to the patch size
78
80
  (0.1 == 10% stride). Defaults to half the patch size.
79
81
 
80
82
  Returns:
81
- list: list of length-2 tuples, each representing the x/y start position of a patch
83
+ list: list of length-2 tuples, each representing the x/y start position of a patch
82
84
  """
83
-
85
+
84
86
  if patch_stride is None:
85
87
  patch_stride = (round(patch_size[0]*(1.0-default_patch_overlap)),
86
88
  round(patch_size[1]*(1.0-default_patch_overlap)))
87
89
  elif isinstance(patch_stride,float):
88
90
  patch_stride = (round(patch_size[0]*(patch_stride)),
89
91
  round(patch_size[1]*(patch_stride)))
90
-
92
+
91
93
  image_width = image_size[0]
92
94
  image_height = image_size[1]
93
-
95
+
94
96
  assert patch_size[0] <= image_size[0], 'Patch width {} is larger than image width {}'.format(
95
97
  patch_size[0],image_size[0])
96
98
  assert patch_size[1] <= image_size[1], 'Patch height {} is larger than image height {}'.format(
97
99
  patch_size[1],image_size[1])
98
-
100
+
99
101
  def add_patch_row(patch_start_positions,y_start):
100
102
  """
101
103
  Add one row to our list of patch start positions, i.e.
102
104
  loop over all columns.
103
105
  """
104
-
106
+
105
107
  x_start = 0; x_end = x_start + patch_size[0] - 1
106
-
108
+
107
109
  while(True):
108
-
110
+
109
111
  patch_start_positions.append([x_start,y_start])
110
-
112
+
111
113
  # If this patch put us right at the end of the last column, we're done
112
114
  if x_end == image_width - 1:
113
115
  break
114
-
116
+
115
117
  # Move one patch to the right
116
118
  x_start += patch_stride[0]
117
119
  x_end = x_start + patch_size[0] - 1
118
-
120
+
119
121
  # If this patch flows over the edge, add one more patch to cover
120
122
  # the pixels on the end, then we're done.
121
123
  if x_end > (image_width - 1):
@@ -124,27 +126,27 @@ def get_patch_boundaries(image_size,patch_size,patch_stride=None):
124
126
  x_end = x_start + patch_size[0] - 1
125
127
  patch_start_positions.append([x_start,y_start])
126
128
  break
127
-
129
+
128
130
  # ...for each column
129
-
131
+
130
132
  return patch_start_positions
131
-
133
+
132
134
  patch_start_positions = []
133
-
135
+
134
136
  y_start = 0; y_end = y_start + patch_size[1] - 1
135
-
137
+
136
138
  while(True):
137
-
139
+
138
140
  patch_start_positions = add_patch_row(patch_start_positions,y_start)
139
-
141
+
140
142
  # If this patch put us right at the bottom of the lats row, we're done
141
143
  if y_end == image_height - 1:
142
144
  break
143
-
145
+
144
146
  # Move one patch down
145
147
  y_start += patch_stride[1]
146
148
  y_end = y_start + patch_size[1] - 1
147
-
149
+
148
150
  # If this patch flows over the bottom, add one more patch to cover
149
151
  # the pixels at the bottom, then we're done
150
152
  if y_end > (image_height - 1):
@@ -153,24 +155,24 @@ def get_patch_boundaries(image_size,patch_size,patch_stride=None):
153
155
  y_end = y_start + patch_size[1] - 1
154
156
  patch_start_positions = add_patch_row(patch_start_positions,y_start)
155
157
  break
156
-
158
+
157
159
  # ...for each row
158
-
160
+
159
161
  for p in patch_start_positions:
160
162
  assert p[0] >= 0 and p[1] >= 0 and p[0] <= image_width and p[1] <= image_height, \
161
163
  'Patch generation error (illegal patch {})'.format(p)
162
-
164
+
163
165
  # The last patch should always end at the bottom-right of the image
164
166
  assert patch_start_positions[-1][0]+patch_size[0] == image_width, \
165
167
  'Patch generation error (last patch does not end on the right)'
166
168
  assert patch_start_positions[-1][1]+patch_size[1] == image_height, \
167
169
  'Patch generation error (last patch does not end at the bottom)'
168
-
170
+
169
171
  # All patches should be unique
170
172
  patch_start_positions_tuples = [tuple(x) for x in patch_start_positions]
171
173
  assert len(patch_start_positions_tuples) == len(set(patch_start_positions_tuples)), \
172
174
  'Patch generation error (duplicate start position)'
173
-
175
+
174
176
  return patch_start_positions
175
177
 
176
178
  # ...get_patch_boundaries()
@@ -180,12 +182,12 @@ def patch_info_to_patch_name(image_name,patch_x_min,patch_y_min):
180
182
  """
181
183
  Gives a unique string name to an x/y coordinate, e.g. turns ("a.jpg",10,20) into
182
184
  "a.jpg_0010_0020".
183
-
185
+
184
186
  Args:
185
187
  image_name (str): image identifier
186
188
  patch_x_min (int): x coordinate
187
189
  patch_y_min (int): y coordinate
188
-
190
+
189
191
  Returns:
190
192
  str: name for this patch, e.g. "a.jpg_0010_0020"
191
193
  """
@@ -203,13 +205,13 @@ def extract_patch_from_image(im,
203
205
  overwrite=True):
204
206
  """
205
207
  Extracts a patch from the provided image, and writes that patch out to a new file.
206
-
208
+
207
209
  Args:
208
210
  im (str or Image): image from which we should extract a patch, can be a filename or
209
211
  a PIL Image object.
210
- patch_xy (tuple): length-2 tuple of ints (x,y) representing the upper-left corner
212
+ patch_xy (tuple): length-2 tuple of ints (x,y) representing the upper-left corner
211
213
  of the patch to extract
212
- patch_size (tuple): length-2 tuple of ints (w,h) representing the size of the
214
+ patch_size (tuple): length-2 tuple of ints (w,h) representing the size of the
213
215
  patch to extract
214
216
  patch_image_fn (str, optional): image filename to write the patch to; if this is None
215
217
  the filename will be generated from [image_name] and the patch coordinates
@@ -218,16 +220,16 @@ def extract_patch_from_image(im,
218
220
  image_name (str, optional): the identifier of the source image; only used to generate
219
221
  a patch filename, so only required if [patch_image_fn] is None
220
222
  overwrite (bool, optional): whether to overwrite an existing patch image
221
-
223
+
222
224
  Returns:
223
225
  dict: a dictionary with fields xmin,xmax,ymin,ymax,patch_fn
224
226
  """
225
-
227
+
226
228
  if isinstance(im,str):
227
229
  pil_im = vis_utils.open_image(im)
228
230
  else:
229
231
  pil_im = im
230
-
232
+
231
233
  patch_x_min = patch_xy[0]
232
234
  patch_y_min = patch_xy[1]
233
235
  patch_x_max = patch_x_min + patch_size[0] - 1
@@ -249,19 +251,19 @@ def extract_patch_from_image(im,
249
251
  "If you don't supply a patch filename to extract_patch_from_image, you need to supply a folder name"
250
252
  patch_name = patch_info_to_patch_name(image_name,patch_x_min,patch_y_min)
251
253
  patch_image_fn = os.path.join(patch_folder,patch_name + '.jpg')
252
-
254
+
253
255
  if os.path.isfile(patch_image_fn) and (not overwrite):
254
256
  pass
255
- else:
257
+ else:
256
258
  patch_im.save(patch_image_fn,quality=patch_jpeg_quality)
257
-
259
+
258
260
  patch_info = {}
259
261
  patch_info['xmin'] = patch_x_min
260
262
  patch_info['xmax'] = patch_x_max
261
263
  patch_info['ymin'] = patch_y_min
262
264
  patch_info['ymax'] = patch_y_max
263
265
  patch_info['patch_fn'] = patch_image_fn
264
-
266
+
265
267
  return patch_info
266
268
 
267
269
  # ...def extract_patch_from_image(...)
@@ -270,33 +272,33 @@ def extract_patch_from_image(im,
270
272
  def in_place_nms(md_results, iou_thres=0.45, verbose=True):
271
273
  """
272
274
  Run torch.ops.nms in-place on MD-formatted detection results.
273
-
275
+
274
276
  Args:
275
- md_results (dict): detection results for a list of images, in MD results format (i.e.,
277
+ md_results (dict): detection results for a list of images, in MD results format (i.e.,
276
278
  containing a list of image dicts with the key 'images', each of which has a list
277
279
  of detections with the key 'detections')
278
280
  iou_thres (float, optional): IoU threshold above which we will treat two detections as
279
281
  redundant
280
282
  verbose (bool, optional): enable additional debug console output
281
283
  """
282
-
284
+
283
285
  n_detections_before = 0
284
286
  n_detections_after = 0
285
-
287
+
286
288
  # i_image = 18; im = md_results['images'][i_image]
287
289
  for i_image,im in tqdm(enumerate(md_results['images']),total=len(md_results['images'])):
288
-
290
+
289
291
  if (im['detections'] is None) or (len(im['detections']) == 0):
290
292
  continue
291
-
293
+
292
294
  boxes = []
293
295
  scores = []
294
-
296
+
295
297
  n_detections_before += len(im['detections'])
296
-
298
+
297
299
  # det = im['detections'][0]
298
300
  for det in im['detections']:
299
-
301
+
300
302
  # Using x1/x2 notation rather than x0/x1 notation to be consistent
301
303
  # with the Torch documentation.
302
304
  x1 = det['bbox'][0]
@@ -308,86 +310,86 @@ def in_place_nms(md_results, iou_thres=0.45, verbose=True):
308
310
  scores.append(det['conf'])
309
311
 
310
312
  # ...for each detection
311
-
313
+
312
314
  t_boxes = torch.tensor(boxes)
313
315
  t_scores = torch.tensor(scores)
314
-
316
+
315
317
  box_indices = ops.nms(t_boxes,t_scores,iou_thres).tolist()
316
-
318
+
317
319
  post_nms_detections = [im['detections'][x] for x in box_indices]
318
-
320
+
319
321
  assert len(post_nms_detections) <= len(im['detections'])
320
-
322
+
321
323
  im['detections'] = post_nms_detections
322
-
324
+
323
325
  n_detections_after += len(im['detections'])
324
-
326
+
325
327
  # ...for each image
326
-
328
+
327
329
  if verbose:
328
330
  print('NMS removed {} of {} detections'.format(
329
331
  n_detections_before-n_detections_after,
330
332
  n_detections_before))
331
-
333
+
332
334
  # ...in_place_nms()
333
335
 
334
336
 
335
337
  def _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,overwrite):
336
338
  """
337
339
  Private function to extract tiles for a single image.
338
-
340
+
339
341
  Returns a dict with fields 'patches' (see extract_patch_from_image) and 'image_fn'.
340
-
342
+
341
343
  If there is an error, 'patches' will be None and the 'error' field will contain
342
344
  failure details. In that case, some tiles may still be generated.
343
345
  """
344
-
346
+
345
347
  fn_abs = os.path.join(image_folder,fn_relative)
346
348
  error = None
347
- patches = []
348
-
349
+ patches = []
350
+
349
351
  image_name = path_utils.clean_filename(fn_relative,char_limit=None,force_lower=True)
350
-
352
+
351
353
  try:
352
-
354
+
353
355
  # Open the image
354
356
  im = vis_utils.open_image(fn_abs)
355
357
  image_size = [im.width,im.height]
356
-
358
+
357
359
  # Generate patch boundaries (a list of [x,y] starting points)
358
- patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
359
-
360
+ patch_boundaries = get_patch_boundaries(image_size,patch_size,patch_stride)
361
+
360
362
  # Extract patches
361
363
  #
362
- # patch_xy = patch_boundaries[0]
364
+ # patch_xy = patch_boundaries[0]
363
365
  for patch_xy in patch_boundaries:
364
-
366
+
365
367
  patch_info = extract_patch_from_image(im,patch_xy,patch_size,
366
368
  patch_folder=tiling_folder,
367
369
  image_name=image_name,
368
370
  overwrite=overwrite)
369
371
  patch_info['source_fn'] = fn_relative
370
372
  patches.append(patch_info)
371
-
373
+
372
374
  except Exception as e:
373
-
375
+
374
376
  s = 'Patch generation error for {}: \n{}'.format(fn_relative,str(e))
375
377
  print(s)
376
378
  # patches = None
377
379
  error = s
378
-
380
+
379
381
  image_patch_info = {}
380
382
  image_patch_info['patches'] = patches
381
383
  image_patch_info['image_fn'] = fn_relative
382
384
  image_patch_info['error'] = error
383
-
385
+
384
386
  return image_patch_info
385
-
386
-
387
+
388
+
387
389
  #%% Main function
388
-
389
- def run_tiled_inference(model_file,
390
- image_folder,
390
+
391
+ def run_tiled_inference(model_file,
392
+ image_folder,
391
393
  tiling_folder,
392
394
  output_file,
393
395
  tile_size_x=1280,
@@ -395,7 +397,7 @@ def run_tiled_inference(model_file,
395
397
  tile_overlap=0.5,
396
398
  checkpoint_path=None,
397
399
  checkpoint_frequency=-1,
398
- remove_tiles=False,
400
+ remove_tiles=False,
399
401
  yolo_inference_options=None,
400
402
  n_patch_extraction_workers=default_n_patch_extraction_workers,
401
403
  overwrite_tiles=True,
@@ -406,26 +408,26 @@ def run_tiled_inference(model_file,
406
408
  preprocess_on_image_queue=True,
407
409
  inference_size=None):
408
410
  """
409
- Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
411
+ Runs inference using [model_file] on the images in [image_folder], fist splitting each image up
410
412
  into tiles of size [tile_size_x] x [tile_size_y], writing those tiles to [tiling_folder],
411
- then de-duplicating the results before merging them back into a set of detections that make
412
- sense on the original images and writing those results to [output_file].
413
-
413
+ then de-duplicating the results before merging them back into a set of detections that make
414
+ sense on the original images and writing those results to [output_file].
415
+
414
416
  [tiling_folder] can be any folder, but this function reserves the right to do whatever it wants
415
- within that folder, including deleting everything, so it's best if it's a new folder.
417
+ within that folder, including deleting everything, so it's best if it's a new folder.
416
418
  Conceptually this folder is temporary, it's just helpful in this case to not actually
417
- use the system temp folder, because the tile cache may be very large, so the caller may
418
- want it to be on a specific drive. If this is None, a new folder will be created in
419
+ use the system temp folder, because the tile cache may be very large, so the caller may
420
+ want it to be on a specific drive. If this is None, a new folder will be created in
419
421
  system temp space.
420
-
422
+
421
423
  tile_overlap is the fraction of overlap between tiles.
422
-
424
+
423
425
  Optionally removes the temporary tiles.
424
-
425
- if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
426
- this case the model will be run with run_inference_with_yolov5_val. This is typically used to
426
+
427
+ if yolo_inference_options is supplied, it should be an instance of YoloInferenceOptions; in
428
+ this case the model will be run with run_inference_with_yolov5_val. This is typically used to
427
429
  run the model with test-time augmentation.
428
-
430
+
429
431
  Args:
430
432
  model_file (str): model filename (ending in .pt), or a well-known model name (e.g. "MDV5A")
431
433
  image_folder (str): the folder of images to proess (always recursive)
@@ -445,7 +447,8 @@ def run_tiled_inference(model_file,
445
447
  run_inference_with_yolov5_val.py, rather than with run_detector_batch.py, using these options
446
448
  n_patch_extraction_workers (int, optional): number of workers to use for patch extraction;
447
449
  set to <= 1 to disable parallelization
448
- image_list (list, optional): .json file containing a list of specific images to process. If
450
+ overwrite_tiles (bool, optional): whether to overwrite image files for individual tiles if they exist
451
+ image_list (list, optional): .json file containing a list of specific images to process. If
449
452
  this is supplied, and the paths are absolute, [image_folder] will be ignored. If this is supplied,
450
453
  and the paths are relative, they should be relative to [image_folder]
451
454
  augment (bool, optional): apply test-time augmentation, only relevant if yolo_inference_options
@@ -453,47 +456,47 @@ def run_tiled_inference(model_file,
453
456
  detector_options (dict, optional): parameters to pass to run_detector, only relevant if
454
457
  yolo_inference_options is None
455
458
  use_image_queue (bool, optional): whether to use a loader worker queue, only relevant if
456
- yolo_inference_options is None
459
+ yolo_inference_options is None
457
460
  preprocess_on_image_queue (bool, optional): whether the image queue should also be responsible
458
461
  for preprocessing
459
462
  inference_size (int, optional): override the default inference image size, only relevant if
460
463
  yolo_inference_options is None
461
-
464
+
462
465
  Returns:
463
466
  dict: MD-formatted results dictionary, identical to what's written to [output_file]
464
467
  """
465
468
 
466
469
  ##%% Validate arguments
467
-
470
+
468
471
  assert tile_overlap < 1 and tile_overlap >= 0, \
469
472
  'Illegal tile overlap value {}'.format(tile_overlap)
470
-
473
+
471
474
  if tile_size_x == -1:
472
475
  tile_size_x = default_tile_size[0]
473
476
  if tile_size_y == -1:
474
477
  tile_size_y = default_tile_size[1]
475
-
478
+
476
479
  patch_size = [tile_size_x,tile_size_y]
477
480
  patch_stride = (round(patch_size[0]*(1.0-tile_overlap)),
478
481
  round(patch_size[1]*(1.0-tile_overlap)))
479
-
482
+
480
483
  if tiling_folder is None:
481
484
  tiling_folder = \
482
485
  os.path.join(tempfile.gettempdir(), 'md-tiling', str(uuid.uuid1()))
483
486
  print('Creating temporary tiling folder: {}'.format(tiling_folder))
484
-
487
+
485
488
  os.makedirs(tiling_folder,exist_ok=True)
486
-
489
+
487
490
  ##%% List files
488
-
491
+
489
492
  if image_list is None:
490
-
493
+
491
494
  print('Enumerating images in {}'.format(image_folder))
492
- image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
495
+ image_files_relative = path_utils.find_images(image_folder, recursive=True, return_relative_paths=True)
493
496
  assert len(image_files_relative) > 0, 'No images found in folder {}'.format(image_folder)
494
-
497
+
495
498
  else:
496
-
499
+
497
500
  print('Loading image list from {}'.format(image_list))
498
501
  with open(image_list,'r') as f:
499
502
  image_files_relative = json.load(f)
@@ -514,107 +517,114 @@ def run_tiled_inference(model_file,
514
517
  if (n_absolute_paths != 0) and (n_absolute_paths != len(image_files_relative)):
515
518
  raise ValueError('Illegal file list: converted {} of {} paths to relative'.format(
516
519
  n_absolute_paths,len(image_files_relative)))
517
-
520
+
518
521
  ##%% Generate tiles
519
-
522
+
520
523
  all_image_patch_info = None
521
-
524
+
522
525
  print('Extracting patches from {} images'.format(len(image_files_relative)))
523
-
526
+
524
527
  n_workers = n_patch_extraction_workers
525
-
528
+
526
529
  if n_workers <= 1:
527
-
530
+
528
531
  all_image_patch_info = []
529
-
530
- # fn_relative = image_files_relative[0]
531
- for fn_relative in tqdm(image_files_relative):
532
+
533
+ # fn_relative = image_files_relative[0]
534
+ for fn_relative in tqdm(image_files_relative):
532
535
  image_patch_info = \
533
536
  _extract_tiles_for_image(fn_relative,image_folder,tiling_folder,patch_size,patch_stride,
534
537
  overwrite=overwrite_tiles)
535
538
  all_image_patch_info.append(image_patch_info)
536
-
539
+
537
540
  else:
538
-
541
+
539
542
  from multiprocessing.pool import ThreadPool
540
543
  from multiprocessing.pool import Pool
541
544
  from functools import partial
542
545
 
543
- if n_workers > len(image_files_relative):
544
-
545
- print('Pool of {} requested, but only {} images available, reducing pool to {}'.\
546
- format(n_workers,len(image_files_relative),len(image_files_relative)))
547
- n_workers = len(image_files_relative)
548
-
549
- if parallelization_uses_threads:
550
- pool = ThreadPool(n_workers); poolstring = 'threads'
551
- else:
552
- pool = Pool(n_workers); poolstring = 'processes'
553
-
554
- print('Starting patch extraction pool with {} {}'.format(n_workers,poolstring))
555
-
556
- all_image_patch_info = list(tqdm(pool.imap(
557
- partial(_extract_tiles_for_image,
558
- image_folder=image_folder,
559
- tiling_folder=tiling_folder,
560
- patch_size=patch_size,
561
- patch_stride=patch_stride,
562
- overwrite=overwrite_tiles),
563
- image_files_relative),total=len(image_files_relative)))
564
-
546
+ pool = None
547
+ try:
548
+ if n_workers > len(image_files_relative):
549
+
550
+ print('Pool of {} requested, but only {} images available, reducing pool to {}'.\
551
+ format(n_workers,len(image_files_relative),len(image_files_relative)))
552
+ n_workers = len(image_files_relative)
553
+
554
+ if parallelization_uses_threads:
555
+ pool = ThreadPool(n_workers); poolstring = 'threads'
556
+ else:
557
+ pool = Pool(n_workers); poolstring = 'processes'
558
+
559
+ print('Starting patch extraction pool with {} {}'.format(n_workers,poolstring))
560
+
561
+ all_image_patch_info = list(tqdm(pool.imap(
562
+ partial(_extract_tiles_for_image,
563
+ image_folder=image_folder,
564
+ tiling_folder=tiling_folder,
565
+ patch_size=patch_size,
566
+ patch_stride=patch_stride,
567
+ overwrite=overwrite_tiles),
568
+ image_files_relative),total=len(image_files_relative)))
569
+ finally:
570
+ if pool is not None:
571
+ pool.close()
572
+ pool.join()
573
+ print("Pool closed and joined for patch extraction")
574
+
565
575
  # ...for each image
566
-
576
+
567
577
  # Write tile information to file; this is just a debugging convenience
568
578
  folder_name = path_utils.clean_filename(image_folder,force_lower=True)
569
579
  if folder_name.startswith('_'):
570
580
  folder_name = folder_name[1:]
571
-
581
+
572
582
  tile_cache_file = os.path.join(tiling_folder,folder_name + '_patch_info.json')
573
583
  with open(tile_cache_file,'w') as f:
574
584
  json.dump(all_image_patch_info,f,indent=1)
575
-
585
+
576
586
  # Keep track of patches that failed
577
587
  images_with_patch_errors = {}
578
588
  for patch_info in all_image_patch_info:
579
589
  if patch_info['error'] is not None:
580
590
  images_with_patch_errors[patch_info['image_fn']] = patch_info
581
-
582
-
591
+
592
+
583
593
  ##%% Run inference on the folder of tiles
584
-
594
+
585
595
  # When running with run_inference_with_yolov5_val, we'll pass the folder
586
596
  if yolo_inference_options is not None:
587
-
597
+
588
598
  patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
589
-
599
+
590
600
  if yolo_inference_options.model_filename is None:
591
601
  yolo_inference_options.model_filename = model_file
592
602
  else:
593
603
  assert yolo_inference_options.model_filename == model_file, \
594
604
  'Model file between yolo inference file ({}) and model file parameter ({})'.format(
595
605
  yolo_inference_options.model_filename,model_file)
596
-
606
+
597
607
  yolo_inference_options.input_folder = tiling_folder
598
608
  yolo_inference_options.output_file = patch_level_output_file
599
-
609
+
600
610
  run_inference_with_yolo_val(yolo_inference_options)
601
611
  with open(patch_level_output_file,'r') as f:
602
612
  patch_level_results = json.load(f)
603
-
613
+
604
614
  # For standard inference, we'll pass a list of files
605
615
  else:
606
-
616
+
607
617
  patch_file_names = []
608
618
  for im in all_image_patch_info:
609
- # If there was a patch generation error, don't run inference
619
+ # If there was a patch generation error, don't run inference
610
620
  if patch_info['error'] is not None:
611
621
  assert im['image_fn'] in images_with_patch_errors
612
622
  continue
613
623
  for patch in im['patches']:
614
624
  patch_file_names.append(patch['patch_fn'])
615
-
616
- inference_results = load_and_run_detector_batch(model_file,
617
- patch_file_names,
625
+
626
+ inference_results = load_and_run_detector_batch(model_file,
627
+ patch_file_names,
618
628
  checkpoint_path=checkpoint_path,
619
629
  checkpoint_frequency=checkpoint_frequency,
620
630
  quiet=True,
@@ -623,18 +633,18 @@ def run_tiled_inference(model_file,
623
633
  use_image_queue=use_image_queue,
624
634
  preprocess_on_image_queue=preprocess_on_image_queue,
625
635
  image_size=inference_size)
626
-
636
+
627
637
  patch_level_output_file = os.path.join(tiling_folder,folder_name + '_patch_level_results.json')
628
-
629
- patch_level_results = write_results_to_file(inference_results,
630
- patch_level_output_file,
631
- relative_path_base=tiling_folder,
638
+
639
+ patch_level_results = write_results_to_file(inference_results,
640
+ patch_level_output_file,
641
+ relative_path_base=tiling_folder,
632
642
  detector_file=model_file)
633
-
643
+
634
644
  # ...if we are/aren't using run_inference_with_yolov5_val
635
-
636
- ##%% Map patch-level detections back to the original images
637
-
645
+
646
+ ##%% Map patch-level detections back to the original images
647
+
638
648
  # Map relative paths for patches to detections
639
649
  patch_fn_relative_to_results = {}
640
650
  for im in tqdm(patch_level_results['images']):
@@ -644,36 +654,36 @@ def run_tiled_inference(model_file,
644
654
  image_level_results['info'] = patch_level_results['info']
645
655
  image_level_results['detection_categories'] = patch_level_results['detection_categories']
646
656
  image_level_results['images'] = []
647
-
657
+
648
658
  image_fn_relative_to_patch_info = { x['image_fn']:x for x in all_image_patch_info }
649
-
659
+
650
660
  # i_image = 0; image_fn_relative = image_files_relative[i_image]
651
661
  for i_image,image_fn_relative in tqdm(enumerate(image_files_relative),
652
662
  total=len(image_files_relative)):
653
-
663
+
654
664
  image_fn_abs = os.path.join(image_folder,image_fn_relative)
655
665
  assert os.path.isfile(image_fn_abs)
656
-
666
+
657
667
  output_im = {}
658
668
  output_im['file'] = image_fn_relative
659
-
669
+
660
670
  # If we had a patch generation error
661
671
  if image_fn_relative in images_with_patch_errors:
662
-
672
+
663
673
  patch_info = image_fn_relative_to_patch_info[image_fn_relative]
664
674
  assert patch_info['error'] is not None
665
-
675
+
666
676
  output_im['detections'] = None
667
677
  output_im['failure'] = 'Patch generation error'
668
678
  output_im['failure_details'] = patch_info['error']
669
679
  image_level_results['images'].append(output_im)
670
680
  continue
671
-
681
+
672
682
  try:
673
- pil_im = vis_utils.open_image(image_fn_abs)
683
+ pil_im = vis_utils.open_image(image_fn_abs)
674
684
  image_w = pil_im.size[0]
675
685
  image_h = pil_im.size[1]
676
-
686
+
677
687
  # This would be a very unusual situation; we're reading back an image here that we already
678
688
  # (successfully) read once during patch generation.
679
689
  except Exception as e:
@@ -683,36 +693,36 @@ def run_tiled_inference(model_file,
683
693
  output_im['failure'] = 'Patch processing error'
684
694
  output_im['failure_details'] = str(e)
685
695
  image_level_results['images'].append(output_im)
686
- continue
687
-
696
+ continue
697
+
688
698
  output_im['detections'] = []
689
-
699
+
690
700
  image_patch_info = image_fn_relative_to_patch_info[image_fn_relative]
691
701
  assert image_patch_info['patches'][0]['source_fn'] == image_fn_relative
692
-
702
+
693
703
  # Patches for this image
694
704
  patch_fn_abs_to_patch_info_this_image = {}
695
-
705
+
696
706
  for patch_info in image_patch_info['patches']:
697
707
  patch_fn_abs_to_patch_info_this_image[patch_info['patch_fn']] = patch_info
698
-
708
+
699
709
  # For each patch
700
710
  #
701
711
  # i_patch = 0; patch_fn_abs = list(patch_fn_abs_to_patch_info_this_image.keys())[i_patch]
702
712
  for i_patch,patch_fn_abs in enumerate(patch_fn_abs_to_patch_info_this_image.keys()):
703
-
713
+
704
714
  patch_fn_relative = os.path.relpath(patch_fn_abs,tiling_folder)
705
715
  patch_results = patch_fn_relative_to_results[patch_fn_relative]
706
716
  patch_info = patch_fn_abs_to_patch_info_this_image[patch_fn_abs]
707
-
717
+
708
718
  # patch_results['file'] is a relative path, and a subset of patch_info['patch_fn']
709
719
  assert patch_results['file'] in patch_info['patch_fn']
710
-
720
+
711
721
  patch_w = (patch_info['xmax'] - patch_info['xmin']) + 1
712
722
  patch_h = (patch_info['ymax'] - patch_info['ymin']) + 1
713
723
  assert patch_w == patch_size[0]
714
724
  assert patch_h == patch_size[1]
715
-
725
+
716
726
  # If there was an inference failure on one patch, report the image
717
727
  # as an inference failure
718
728
  if 'detections' not in patch_results:
@@ -720,16 +730,16 @@ def run_tiled_inference(model_file,
720
730
  output_im['detections'] = None
721
731
  output_im['failure'] = patch_results['failure']
722
732
  break
723
-
733
+
724
734
  # det = patch_results['detections'][0]
725
735
  for det in patch_results['detections']:
726
-
736
+
727
737
  bbox_patch_relative = det['bbox']
728
738
  xmin_patch_relative = bbox_patch_relative[0]
729
739
  ymin_patch_relative = bbox_patch_relative[1]
730
740
  w_patch_relative = bbox_patch_relative[2]
731
741
  h_patch_relative = bbox_patch_relative[3]
732
-
742
+
733
743
  # Convert from patch-relative normalized values to image-relative absolute values
734
744
  w_pixels = w_patch_relative * patch_w
735
745
  h_pixels = h_patch_relative * patch_h
@@ -737,82 +747,82 @@ def run_tiled_inference(model_file,
737
747
  ymin_patch_pixels = ymin_patch_relative * patch_h
738
748
  xmin_image_pixels = patch_info['xmin'] + xmin_patch_pixels
739
749
  ymin_image_pixels = patch_info['ymin'] + ymin_patch_pixels
740
-
750
+
741
751
  # ...and now to image-relative normalized values
742
752
  w_image_normalized = w_pixels / image_w
743
753
  h_image_normalized = h_pixels / image_h
744
754
  xmin_image_normalized = xmin_image_pixels / image_w
745
755
  ymin_image_normalized = ymin_image_pixels / image_h
746
-
756
+
747
757
  bbox_image_normalized = [xmin_image_normalized,
748
758
  ymin_image_normalized,
749
759
  w_image_normalized,
750
760
  h_image_normalized]
751
-
752
- bbox_image_normalized = round_float_array(bbox_image_normalized,
761
+
762
+ bbox_image_normalized = round_float_array(bbox_image_normalized,
753
763
  precision=COORD_DIGITS)
754
764
  det['conf'] = round_float(det['conf'], precision=CONF_DIGITS)
755
-
765
+
756
766
  output_det = {}
757
767
  output_det['bbox'] = bbox_image_normalized
758
768
  output_det['conf'] = det['conf']
759
769
  output_det['category'] = det['category']
760
-
770
+
761
771
  output_im['detections'].append(output_det)
762
-
772
+
763
773
  # ...for each detection
764
-
774
+
765
775
  # ...for each patch
766
776
 
767
777
  image_level_results['images'].append(output_im)
768
-
769
- # ...for each image
778
+
779
+ # ...for each image
770
780
 
771
781
  image_level_results_file_pre_nms = \
772
782
  os.path.join(tiling_folder,folder_name + '_image_level_results_pre_nms.json')
773
783
  with open(image_level_results_file_pre_nms,'w') as f:
774
784
  json.dump(image_level_results,f,indent=1)
775
-
785
+
776
786
 
777
787
  ##%% Run NMS
778
-
788
+
779
789
  in_place_nms(image_level_results,iou_thres=nms_iou_threshold)
780
790
 
781
-
791
+
782
792
  ##%% Write output file
783
-
793
+
784
794
  print('Saving image-level results (after NMS) to {}'.format(output_file))
785
-
795
+
786
796
  with open(output_file,'w') as f:
787
797
  json.dump(image_level_results,f,indent=1)
788
798
 
789
-
799
+
790
800
  ##%% Possibly remove tiles
791
-
801
+
792
802
  if remove_tiles:
793
-
803
+
794
804
  patch_file_names = []
795
805
  for im in all_image_patch_info:
796
806
  for patch in im['patches']:
797
807
  patch_file_names.append(patch['patch_fn'])
798
-
808
+
799
809
  for patch_fn_abs in patch_file_names:
800
810
  os.remove(patch_fn_abs)
801
-
802
-
811
+
812
+
803
813
  ##%% Return
804
-
814
+
805
815
  return image_level_results
806
816
 
807
817
 
808
818
  #%% Interactive driver
809
819
 
810
820
  if False:
811
-
821
+
812
822
  pass
813
823
 
814
824
  #%% Run tiled inference (in Python)
815
-
825
+
816
826
  model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
817
827
  image_folder = os.path.expanduser('~/data/KRU-test')
818
828
  tiling_folder = os.path.expanduser('~/tmp/tiling-test')
@@ -824,47 +834,47 @@ if False:
824
834
  checkpoint_path = None
825
835
  checkpoint_frequency = -1
826
836
  remove_tiles = False
827
-
837
+
828
838
  use_yolo_inference = False
829
-
839
+
830
840
  if not use_yolo_inference:
831
-
841
+
832
842
  yolo_inference_options = None
833
-
843
+
834
844
  else:
835
-
845
+
836
846
  yolo_inference_options = YoloInferenceOptions()
837
847
  yolo_inference_options.yolo_working_folder = os.path.expanduser('~/git/yolov5')
838
-
848
+
839
849
  run_tiled_inference(model_file, image_folder, tiling_folder, output_file,
840
- tile_size_x=tile_size_x, tile_size_y=tile_size_y,
850
+ tile_size_x=tile_size_x, tile_size_y=tile_size_y,
841
851
  tile_overlap=tile_overlap,
842
- checkpoint_path=checkpoint_path,
843
- checkpoint_frequency=checkpoint_frequency,
844
- remove_tiles=remove_tiles,
852
+ checkpoint_path=checkpoint_path,
853
+ checkpoint_frequency=checkpoint_frequency,
854
+ remove_tiles=remove_tiles,
845
855
  yolo_inference_options=yolo_inference_options)
846
-
847
-
856
+
857
+
848
858
  #%% Run tiled inference (generate a command)
849
-
859
+
850
860
  import os
851
-
861
+
852
862
  model_file = os.path.expanduser('~/models/camera_traps/megadetector/md_v5.0.0/md_v5a.0.0.pt')
853
863
  image_folder = os.path.expanduser('~/data/KRU-test')
854
864
  tiling_folder = os.path.expanduser('~/tmp/tiling-test')
855
865
  output_file = os.path.expanduser('~/tmp/KRU-test-tiled.json')
856
866
  tile_size = [5152,3968]
857
867
  tile_overlap = 0.8
858
-
868
+
859
869
  cmd = f'python run_tiled_inference.py {model_file} {image_folder} {tiling_folder} {output_file} ' + \
860
870
  f'--tile_overlap {tile_overlap} --no_remove_tiles --tile_size_x {tile_size[0]} --tile_size_y {tile_size[1]}'
861
-
871
+
862
872
  print(cmd)
863
873
  import clipboard; clipboard.copy(cmd)
864
-
865
-
874
+
875
+
866
876
  #%% Preview tiled inference
867
-
877
+
868
878
  from megadetector.postprocessing.postprocess_batch_results import \
869
879
  PostProcessingOptions, process_batch_results
870
880
 
@@ -893,14 +903,12 @@ if False:
893
903
  html_output_file = ppresults.output_html_file
894
904
 
895
905
  path_utils.open_file(html_output_file)
896
-
897
-
906
+
907
+
898
908
  #%% Command-line driver
899
909
 
900
- import sys,argparse
910
+ def main(): # noqa
901
911
 
902
- def main():
903
-
904
912
  parser = argparse.ArgumentParser(
905
913
  description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
906
914
  parser.add_argument(
@@ -918,7 +926,7 @@ def main():
918
926
  parser.add_argument(
919
927
  '--no_remove_tiles',
920
928
  action='store_true',
921
- help='Tiles are removed by default; this option suppresses tile deletion')
929
+ help='Tiles are removed by default; this option suppresses tile deletion')
922
930
  parser.add_argument(
923
931
  '--tile_size_x',
924
932
  type=int,
@@ -949,8 +957,8 @@ def main():
949
957
  type=str,
950
958
  default=None,
951
959
  help=('A list of detector options (key-value pairs) to '))
952
-
953
- # detector_options = parse_kvp_list(args.detector_options)
960
+
961
+ # detector_options = parse_kvp_list(args.detector_options)
954
962
 
955
963
  if len(sys.argv[1:]) == 0:
956
964
  parser.print_help()
@@ -961,7 +969,7 @@ def main():
961
969
  model_file = try_download_known_detector(args.model_file)
962
970
  assert os.path.exists(model_file), \
963
971
  'detector file {} does not exist'.format(args.model_file)
964
-
972
+
965
973
  if os.path.exists(args.output_file):
966
974
  if args.overwrite_handling == 'skip':
967
975
  print('Warning: output file {} exists, skipping'.format(args.output_file))
@@ -972,15 +980,15 @@ def main():
972
980
  raise ValueError('Output file {} exists'.format(args.output_file))
973
981
  else:
974
982
  raise ValueError('Unknown output handling method {}'.format(args.overwrite_handling))
975
-
983
+
976
984
 
977
985
  remove_tiles = (not args.no_remove_tiles)
978
986
 
979
987
  run_tiled_inference(model_file, args.image_folder, args.tiling_folder, args.output_file,
980
- tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
988
+ tile_size_x=args.tile_size_x, tile_size_y=args.tile_size_y,
981
989
  tile_overlap=args.tile_overlap,
982
990
  remove_tiles=remove_tiles,
983
991
  image_list=args.image_list)
984
-
992
+
985
993
  if __name__ == '__main__':
986
994
  main()