megadetector 5.0.23__py3-none-any.whl → 5.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (42) hide show
  1. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +2 -3
  2. megadetector/classification/merge_classification_detection_output.py +2 -2
  3. megadetector/data_management/coco_to_labelme.py +2 -1
  4. megadetector/data_management/databases/integrity_check_json_db.py +15 -14
  5. megadetector/data_management/databases/subset_json_db.py +49 -21
  6. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +73 -69
  7. megadetector/data_management/lila/add_locations_to_nacti.py +114 -110
  8. megadetector/data_management/mewc_to_md.py +340 -0
  9. megadetector/data_management/speciesnet_to_md.py +41 -0
  10. megadetector/data_management/yolo_output_to_md_output.py +15 -8
  11. megadetector/detection/process_video.py +24 -7
  12. megadetector/detection/pytorch_detector.py +841 -160
  13. megadetector/detection/run_detector.py +341 -146
  14. megadetector/detection/run_detector_batch.py +307 -70
  15. megadetector/detection/run_inference_with_yolov5_val.py +61 -4
  16. megadetector/detection/tf_detector.py +6 -1
  17. megadetector/postprocessing/{combine_api_outputs.py → combine_batch_outputs.py} +10 -13
  18. megadetector/postprocessing/compare_batch_results.py +236 -7
  19. megadetector/postprocessing/create_crop_folder.py +358 -0
  20. megadetector/postprocessing/md_to_labelme.py +7 -7
  21. megadetector/postprocessing/md_to_wi.py +40 -0
  22. megadetector/postprocessing/merge_detections.py +1 -1
  23. megadetector/postprocessing/postprocess_batch_results.py +12 -5
  24. megadetector/postprocessing/separate_detections_into_folders.py +32 -4
  25. megadetector/postprocessing/validate_batch_results.py +9 -4
  26. megadetector/utils/ct_utils.py +236 -45
  27. megadetector/utils/directory_listing.py +3 -3
  28. megadetector/utils/gpu_test.py +125 -0
  29. megadetector/utils/md_tests.py +455 -116
  30. megadetector/utils/path_utils.py +43 -2
  31. megadetector/utils/wi_utils.py +2691 -0
  32. megadetector/visualization/visualization_utils.py +95 -18
  33. megadetector/visualization/visualize_db.py +25 -7
  34. megadetector/visualization/visualize_detector_output.py +60 -13
  35. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/METADATA +11 -23
  36. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/RECORD +39 -36
  37. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/WHEEL +1 -1
  38. megadetector/detection/detector_training/__init__.py +0 -0
  39. megadetector/detection/detector_training/model_main_tf2.py +0 -114
  40. megadetector/utils/torch_test.py +0 -32
  41. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/LICENSE +0 -0
  42. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,358 @@
1
+ """
2
+
3
+ create_crop_folder.py
4
+
5
+ Given a MegaDetector .json file and a folder of images, creates a new folder
6
+ of images representing all above-threshold crops from the original folder.
7
+
8
+ """
9
+
10
+ #%% Constants and imports
11
+
12
+ import os
13
+ import json
14
+ from tqdm import tqdm
15
+
16
+ from multiprocessing.pool import Pool, ThreadPool
17
+ from collections import defaultdict
18
+ from functools import partial
19
+
20
+ from megadetector.utils.path_utils import insert_before_extension
21
+ from megadetector.visualization.visualization_utils import crop_image
22
+ from megadetector.visualization.visualization_utils import exif_preserving_save
23
+
24
+
25
+ #%% Support classes
26
+
27
+ class CreateCropFolderOptions:
28
+ """
29
+ Options used to parameterize create_crop_folder().
30
+ """
31
+
32
+ def __init__(self):
33
+
34
+ #: Confidence threshold determining which detections get written
35
+ self.confidence_threshold = 0.1
36
+
37
+ #: Number of pixels to expand each crop
38
+ self.expansion = 0
39
+
40
+ #: JPEG quality to use for saving crops (None for default)
41
+ self.quality = 95
42
+
43
+ #: Whether to overwrite existing images
44
+ self.overwrite = True
45
+
46
+ #: Number of concurrent workers
47
+ self.n_workers = 8
48
+
49
+ #: Whether to use processes ('process') or threads ('thread') for parallelization
50
+ self.pool_type = 'thread'
51
+
52
+
53
+ #%% Support functions
54
+
55
+ def _get_crop_filename(image_fn,crop_id):
56
+ """
57
+ Generate crop filenames in a consistent way.
58
+ """
59
+ if isinstance(crop_id,int):
60
+ crop_id = str(crop_id).zfill(3)
61
+ assert isinstance(crop_id,str)
62
+ return insert_before_extension(image_fn,'crop_' + crop_id)
63
+
64
+
65
+ def _generate_crops_for_single_image(crops_this_image,
66
+ input_folder,
67
+ output_folder,
68
+ options):
69
+ """
70
+ Generate all the crops required for a single image.
71
+ """
72
+ if len(crops_this_image) == 0:
73
+ return
74
+
75
+ image_fn_relative = crops_this_image[0]['image_fn_relative']
76
+ input_fn_abs = os.path.join(input_folder,image_fn_relative)
77
+ assert os.path.isfile(input_fn_abs)
78
+
79
+ detections_to_crop = [c['detection'] for c in crops_this_image]
80
+
81
+ cropped_images = crop_image(detections_to_crop,
82
+ input_fn_abs,
83
+ confidence_threshold=0,
84
+ expansion=options.expansion)
85
+
86
+ assert len(cropped_images) == len(crops_this_image)
87
+
88
+ # i_crop = 0; crop_info = crops_this_image[0]
89
+ for i_crop,crop_info in enumerate(crops_this_image):
90
+
91
+ assert crop_info['image_fn_relative'] == image_fn_relative
92
+ crop_filename_relative = _get_crop_filename(image_fn_relative, crop_info['crop_id'])
93
+ crop_filename_abs = os.path.join(output_folder,crop_filename_relative).replace('\\','/')
94
+
95
+ if os.path.isfile(crop_filename_abs) and not options.overwrite:
96
+ continue
97
+
98
+ cropped_image = cropped_images[i_crop]
99
+ os.makedirs(os.path.dirname(crop_filename_abs),exist_ok=True)
100
+ exif_preserving_save(cropped_image,crop_filename_abs,quality=options.quality)
101
+
102
+ # ...for each crop
103
+
104
+
105
+ #%% Main function
106
+
107
+ def crop_results_to_image_results(image_results_file_with_crop_ids,
108
+ crop_results_file,
109
+ output_file):
110
+ """
111
+ This function is intended to be run after you have:
112
+
113
+ 1. Run MegaDetector on a folder
114
+ 2. Generated a crop folder using create_crop_folder
115
+ 3. Run a species classifier on those crops
116
+
117
+ This function will take the crop-level results and transform them back
118
+ to the original images.
119
+
120
+ Args:
121
+ image_results_file_with_crop_ids (str): results file for the original images,
122
+ containing crop IDs, likely generated via create_crop_folder.
123
+ crop_results_file (str): results file for the crop folder
124
+ output_file (str): ouptut .json file, containing crop-level classifications
125
+ mapped back to the image level
126
+ """
127
+
128
+ ##%% Validate inputs
129
+
130
+ assert os.path.isfile(image_results_file_with_crop_ids), \
131
+ 'Could not find image-level input file {}'.format(image_results_file_with_crop_ids)
132
+ assert os.path.isfile(crop_results_file), \
133
+ 'Could not find crop results file {}'.format(crop_results_file)
134
+ os.makedirs(os.path.dirname(output_file),exist_ok=True)
135
+
136
+
137
+ ##%% Read input files
138
+
139
+ with open(image_results_file_with_crop_ids,'r') as f:
140
+ image_results_with_crop_ids = json.load(f)
141
+ with open(crop_results_file,'r') as f:
142
+ crop_results = json.load(f)
143
+
144
+ assert crop_results['detection_categories'] == \
145
+ image_results_with_crop_ids['detection_categories'], \
146
+ 'Crop results and image-level results use different detection categories'
147
+
148
+ crop_filename_to_results = {}
149
+
150
+ # im = crop_results['images'][0]
151
+ for im in crop_results['images']:
152
+ crop_filename_to_results[im['file']] = im
153
+
154
+ image_results_with_crop_ids['classification_categories'] = \
155
+ crop_results['classification_categories']
156
+
157
+
158
+ ##%% Read classifications from crop results
159
+
160
+ # im = image_results_with_crop_ids['images'][0]
161
+ for im in tqdm(image_results_with_crop_ids['images']):
162
+
163
+ if 'detections' not in im or im['detections'] is None:
164
+ continue
165
+
166
+ for det in im['detections']:
167
+
168
+ if 'classifications' in det:
169
+ del det['classifications']
170
+
171
+ if 'crop_id' in det:
172
+ crop_filename_relative = det['crop_filename_relative']
173
+ assert crop_filename_relative in crop_filename_to_results, \
174
+ 'Crop lookup error'
175
+ crop_results_this_detection = crop_filename_to_results[crop_filename_relative]
176
+ assert crop_results_this_detection['file'] == crop_filename_relative
177
+ assert len(crop_results_this_detection['detections']) == 1
178
+ assert crop_results_this_detection['detections'][0]['conf'] == det['conf']
179
+ assert crop_results_this_detection['detections'][0]['category'] == det['category']
180
+ assert crop_results_this_detection['detections'][0]['bbox'] == [0,0,1,1]
181
+ det['classifications'] = crop_results_this_detection['detections'][0]['classifications']
182
+
183
+ # ...for each detection
184
+
185
+ # ...for each image
186
+
187
+
188
+ ##%% Write output file
189
+
190
+ with open(output_file,'w') as f:
191
+ json.dump(image_results_with_crop_ids,f,indent=1)
192
+
193
+ # ...def crop_results_to_image_results(...)
194
+
195
+
196
+ def create_crop_folder(input_file,
197
+ input_folder,
198
+ output_folder,
199
+ output_file=None,
200
+ crops_output_file=None,
201
+ options=None):
202
+ """
203
+ Given a MegaDetector .json file and a folder of images, creates a new folder
204
+ of images representing all above-threshold crops from the original folder.
205
+
206
+ Optionally writes a new .json file that attaches unique IDs to each detection.
207
+
208
+ Args:
209
+ input_file (str): MD-formatted .json file to process
210
+ input_folder (str): Input image folder
211
+ output_folder (str): Output (cropped) image folder
212
+ output_file (str, optional): new .json file that attaches unique IDs to each detection.
213
+ crops_output_file (str, optional): new .json file that includes whole-image detections
214
+ for each of the crops, using confidence values from the original results
215
+ options (CreateCropFolderOptions, optional): crop parameters
216
+ """
217
+
218
+ ## Validate options, prepare output folders
219
+
220
+ if options is None:
221
+ options = CreateCropFolderOptions()
222
+
223
+ assert os.path.isfile(input_file), 'Input file {} not found'.format(input_file)
224
+ assert os.path.isdir(input_folder), 'Input folder {} not found'.format(input_folder)
225
+ os.makedirs(output_folder,exist_ok=True)
226
+ os.makedirs(os.path.dirname(output_file),exist_ok=True)
227
+
228
+
229
+ ##%% Read input
230
+
231
+ with open(input_file,'r') as f:
232
+ detection_results = json.load(f)
233
+
234
+
235
+ ##%% Make a list crops that we need to create
236
+
237
+ # Maps input images to list of dicts, with keys 'crop_id','detection'
238
+ image_fn_relative_to_crops = defaultdict(list)
239
+ n_crops = 0
240
+
241
+ # im = detection_results['images'][0]
242
+ for i_image,im in enumerate(detection_results['images']):
243
+
244
+ if 'detections' not in im or im['detections'] is None or len(im['detections']) == 0:
245
+ continue
246
+
247
+ detections_this_image = im['detections']
248
+
249
+ image_fn_relative = im['file']
250
+
251
+ for i_detection,det in enumerate(detections_this_image):
252
+
253
+ if det['conf'] > options.confidence_threshold:
254
+
255
+ det['crop_id'] = i_detection
256
+
257
+ crop_info = {'image_fn_relative':image_fn_relative,
258
+ 'crop_id':i_detection,
259
+ 'detection':det}
260
+
261
+ crop_filename_relative = _get_crop_filename(image_fn_relative,
262
+ crop_info['crop_id'])
263
+ det['crop_filename_relative'] = crop_filename_relative
264
+
265
+ image_fn_relative_to_crops[image_fn_relative].append(crop_info)
266
+ n_crops += 1
267
+
268
+ # ...for each input image
269
+
270
+ print('Prepared a list of {} crops from {} of {} input images'.format(
271
+ n_crops,len(image_fn_relative_to_crops),len(detection_results['images'])))
272
+
273
+
274
+ ##%% Generate crops
275
+
276
+ if options.n_workers <= 1:
277
+
278
+ # image_fn_relative = next(iter(image_fn_relative_to_crops))
279
+ for image_fn_relative in tqdm(image_fn_relative_to_crops.keys()):
280
+ crops_this_image = image_fn_relative_to_crops[image_fn_relative]
281
+ _generate_crops_for_single_image(crops_this_image=crops_this_image,
282
+ input_folder=input_folder,
283
+ output_folder=output_folder,
284
+ options=options)
285
+
286
+ else:
287
+
288
+ print('Creating a {} pool with {} workers'.format(options.pool_type,options.n_workers))
289
+
290
+ if options.pool_type == 'thread':
291
+ pool = ThreadPool(options.n_workers)
292
+ else:
293
+ assert options.pool_type == 'process'
294
+ pool = Pool(options.n_workers)
295
+
296
+ # Each element in this list is the list of crops for a single image
297
+ crop_lists = list(image_fn_relative_to_crops.values())
298
+
299
+ with tqdm(total=len(image_fn_relative_to_crops)) as pbar:
300
+ for i,_ in enumerate(pool.imap_unordered(partial(
301
+ _generate_crops_for_single_image,
302
+ input_folder=input_folder,
303
+ output_folder=output_folder,
304
+ options=options),
305
+ crop_lists)):
306
+ pbar.update()
307
+
308
+ # ...if we're using parallel processing
309
+
310
+ ##%% Write output file
311
+
312
+ if output_file is not None:
313
+ with open(output_file,'w') as f:
314
+ json.dump(detection_results,f,indent=1)
315
+
316
+ if crops_output_file is not None:
317
+
318
+ original_images = detection_results['images']
319
+
320
+ detection_results_cropped = detection_results
321
+ detection_results_cropped['images'] = []
322
+
323
+ # im = original_images[0]
324
+ for im in original_images:
325
+
326
+ if 'detections' not in im or im['detections'] is None or len(im['detections']) == 0:
327
+ continue
328
+
329
+ detections_this_image = im['detections']
330
+ image_fn_relative = im['file']
331
+
332
+ for i_detection,det in enumerate(detections_this_image):
333
+
334
+ if 'crop_id' in det:
335
+ im_out = {}
336
+ im_out['file'] = det['crop_filename_relative']
337
+ det_out = {}
338
+ det_out['category'] = det['category']
339
+ det_out['conf'] = det['conf']
340
+ det_out['bbox'] = [0, 0, 1, 1]
341
+ im_out['detections'] = [det_out]
342
+ detection_results_cropped['images'].append(im_out)
343
+
344
+ # ...if we need to include this crop in the new .json file
345
+
346
+ # ...for each crop
347
+
348
+ # ...for each original image
349
+
350
+ with open(crops_output_file,'w') as f:
351
+ json.dump(detection_results_cropped,f,indent=1)
352
+
353
+ # ...def create_crop_folder()
354
+
355
+
356
+ #%% Command-line driver
357
+
358
+ # TODO
@@ -25,8 +25,8 @@ from multiprocessing.pool import ThreadPool
25
25
  from functools import partial
26
26
 
27
27
  from megadetector.visualization.visualization_utils import open_image
28
- from megadetector.utils.ct_utils import truncate_float
29
- from megadetector.detection.run_detector import DEFAULT_DETECTOR_LABEL_MAP
28
+ from megadetector.utils.ct_utils import round_float
29
+ from megadetector.detection.run_detector import DEFAULT_DETECTOR_LABEL_MAP, FAILURE_IMAGE_OPEN
30
30
 
31
31
  output_precision = 3
32
32
  default_confidence_threshold = 0.15
@@ -92,10 +92,10 @@ def get_labelme_dict_for_image(im,image_base_name=None,category_id_to_name=None,
92
92
  # MD boxes are [x_min, y_min, width_of_box, height_of_box] (relative)
93
93
  #
94
94
  # labelme boxes are [[x0,y0],[x1,y1]] (absolute)
95
- x0 = truncate_float(det['bbox'][0] * im['width'],output_precision)
96
- y0 = truncate_float(det['bbox'][1] * im['height'],output_precision)
97
- x1 = truncate_float(x0 + det['bbox'][2] * im['width'],output_precision)
98
- y1 = truncate_float(y0 + det['bbox'][3] * im['height'],output_precision)
95
+ x0 = round_float(det['bbox'][0] * im['width'],output_precision)
96
+ y0 = round_float(det['bbox'][1] * im['height'],output_precision)
97
+ x1 = round_float(x0 + det['bbox'][2] * im['width'],output_precision)
98
+ y1 = round_float(y0 + det['bbox'][3] * im['height'],output_precision)
99
99
  shape['points'] = [[x0,y0],[x1,y1]]
100
100
  output_dict['shapes'].append(shape)
101
101
 
@@ -210,7 +210,7 @@ def md_to_labelme(results_file,image_base,confidence_threshold=None,
210
210
  print('Warning: cannot open image {}, treating as a failure during inference'.format(
211
211
  im_full_path))
212
212
  if 'failure' not in im:
213
- im['failure'] = 'Failure image access'
213
+ im['failure'] = FAILURE_IMAGE_OPEN
214
214
 
215
215
  # ...if we need to read w/h information
216
216
 
@@ -0,0 +1,40 @@
1
+ """
2
+
3
+ md_to_wi.py
4
+
5
+ Converts the MD .json format to the WI predictions.json format.
6
+
7
+ """
8
+
9
+ #%% Imports and constants
10
+
11
+ import sys
12
+ import argparse
13
+ from megadetector.utils.wi_utils import generate_predictions_json_from_md_results
14
+
15
+
16
+ #%% Command-line driver
17
+
18
+ def main():
19
+
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument('md_results_file', action='store', type=str,
22
+ help='output file in MD format to convert')
23
+ parser.add_argument('predictions_json_file', action='store', type=str,
24
+ help='.json file to write in predictions.json format')
25
+ parser.add_argument('--base_folder', action='store', type=str, default=None,
26
+ help='folder name to prepend to each path in md_results_file, ' + \
27
+ 'to convert relative paths to absolute paths.')
28
+
29
+ if len(sys.argv[1:]) == 0:
30
+ parser.print_help()
31
+ parser.exit()
32
+
33
+ args = parser.parse_args()
34
+
35
+ generate_predictions_json_from_md_results(args.md_results_file,
36
+ args.predictions_json_file,
37
+ base_folder=None)
38
+
39
+ if __name__ == '__main__':
40
+ main()
@@ -9,7 +9,7 @@ results file from MDv5a.
9
9
  Detection categories must be the same in both files; if you want to first remap
10
10
  one file's category mapping to be the same as another's, see remap_detection_categories.
11
11
 
12
- If you want to literally merge two .json files, see combine_api_outputs.py.
12
+ If you want to literally merge two .json files, see combine_batch_outputs.py.
13
13
 
14
14
  """
15
15
 
@@ -211,6 +211,9 @@ class PostProcessingOptions:
211
211
  # format('https://megadetector.readthedocs.io')
212
212
  self.footer_text = ''
213
213
 
214
+ #: Character encoding to use when writing the index HTML html
215
+ self.output_html_encoding = None
216
+
214
217
  # ...__init__()
215
218
 
216
219
  # ...PostProcessingOptions
@@ -778,7 +781,8 @@ def _render_image_no_gt(file_info,detection_categories_to_results_name,
778
781
  if det['conf'] > max_conf:
779
782
  max_conf = det['conf']
780
783
 
781
- if ('classifications' in det) and (len(det['classifications']) > 0):
784
+ if ('classifications' in det) and (len(det['classifications']) > 0) and \
785
+ (res != 'non_detections'):
782
786
 
783
787
  # This is a list of [class,confidence] pairs, sorted by confidence
784
788
  classifications = det['classifications']
@@ -1522,7 +1526,8 @@ def process_batch_results(options):
1522
1526
  # Close body and html tags
1523
1527
  index_page += '{}</body></html>'.format(options.footer_text)
1524
1528
  output_html_file = os.path.join(output_dir, 'index.html')
1525
- with open(output_html_file, 'w') as f:
1529
+ with open(output_html_file, 'w',
1530
+ encoding=options.output_html_encoding) as f:
1526
1531
  f.write(index_page)
1527
1532
 
1528
1533
  print('Finished writing html to {}'.format(output_html_file))
@@ -1717,7 +1722,7 @@ def process_batch_results(options):
1717
1722
  <p>Model version: {}</p>
1718
1723
  </div>
1719
1724
 
1720
- <h3>Sample images</h3>\n
1725
+ <h3>Detection results</h3>\n
1721
1726
  <div class="contentdiv">\n""".format(
1722
1727
  style_header, job_name_string, image_count, len(detections_df), confidence_threshold_string,
1723
1728
  almost_detection_string, model_version_string)
@@ -1778,7 +1783,8 @@ def process_batch_results(options):
1778
1783
  index_page += '</div>\n'
1779
1784
 
1780
1785
  if has_classification_info:
1781
- index_page += '<h3>Images of detected classes</h3>'
1786
+
1787
+ index_page += '<h3>Species classification results</h3>'
1782
1788
  index_page += '<p>The same image might appear under multiple classes ' + \
1783
1789
  'if multiple species were detected.</p>\n'
1784
1790
  index_page += '<p>Classifications with confidence less than {:.1%} confidence are considered "unreliable".</p>\n'.format(
@@ -1806,7 +1812,8 @@ def process_batch_results(options):
1806
1812
 
1807
1813
  index_page += '{}</body></html>'.format(options.footer_text)
1808
1814
  output_html_file = os.path.join(output_dir, 'index.html')
1809
- with open(output_html_file, 'w') as f:
1815
+ with open(output_html_file, 'w',
1816
+ encoding=options.output_html_encoding) as f:
1810
1817
  f.write(index_page)
1811
1818
 
1812
1819
  print('Finished writing html to {}'.format(output_html_file))
@@ -88,6 +88,7 @@ from tqdm import tqdm
88
88
  from megadetector.utils.ct_utils import args_to_object, is_float
89
89
  from megadetector.detection.run_detector import get_typical_confidence_threshold_from_results
90
90
  from megadetector.visualization import visualization_utils as vis_utils
91
+ from megadetector.visualization.visualization_utils import blur_detections
91
92
 
92
93
  friendly_folder_names = {'animal':'animals','person':'people','vehicle':'vehicles'}
93
94
 
@@ -188,6 +189,11 @@ class SeparateDetectionsIntoFoldersOptions:
188
189
  #: Do not set explicitly; this gets loaded from [results_file]
189
190
  self.category_id_to_category_name = None
190
191
 
192
+ #: List of category names for which we should blur detections, most commonly ['person']
193
+ #:
194
+ #: Can also be a comma-separated list.
195
+ self.category_names_to_blur = None
196
+
191
197
  # ...__init__()
192
198
 
193
199
  # ...class SeparateDetectionsIntoFoldersOptions
@@ -369,10 +375,10 @@ def _process_detections(im,options):
369
375
  return
370
376
 
371
377
  # At this point, this image is getting copied; we may or may not also need to
372
- # draw bounding boxes.
378
+ # draw bounding boxes or blur pixels.
373
379
 
374
- # Do a simple copy operation if we don't need to render any boxes
375
- if (not options.render_boxes) or \
380
+ # Do a simple copy operation if we don't need to manipulate the images (render boxes, blur pixels)
381
+ if (not options.render_boxes and (options.category_names_to_blur is None)) or \
376
382
  (categories_above_threshold is None) or \
377
383
  (len(categories_above_threshold) == 0):
378
384
 
@@ -386,6 +392,24 @@ def _process_detections(im,options):
386
392
  # Open the source image
387
393
  pil_image = vis_utils.load_image(source_path)
388
394
 
395
+ # Blur regions in the image if necessary
396
+ category_names_to_blur = options.category_names_to_blur
397
+
398
+ if category_names_to_blur is not None:
399
+
400
+ if isinstance(category_names_to_blur,str):
401
+ category_names_to_blur = category_names_to_blur.split(',')
402
+ category_names_to_blur = [s.strip() for s in category_names_to_blur]
403
+
404
+ detections_to_blur = []
405
+ for d in detections:
406
+ category_name = options.category_id_to_category_name[d['category']]
407
+ category_threshold = options.category_name_to_threshold[category_name]
408
+ if (d['conf'] >= category_threshold) and (category_name in category_names_to_blur):
409
+ detections_to_blur.append(d)
410
+ if len(detections_to_blur) > 0:
411
+ blur_detections(pil_image,detections_to_blur)
412
+
389
413
  # Render bounding boxes for each category separately, because
390
414
  # we allow different thresholds for each category.
391
415
 
@@ -447,9 +471,11 @@ def separate_detections_into_folders(options):
447
471
  # Input validation
448
472
 
449
473
  # Currently we don't support moving (instead of copying) when we're also rendering
450
- # bounding boxes.
474
+ # bounding boxes or blurring humans.
451
475
  assert not (options.render_boxes and options.move_images), \
452
476
  'Cannot specify both render_boxes and move_images'
477
+ assert not ((options.category_names_to_blur is not None) and options.move_images), \
478
+ 'Cannot specify both category_names_to_blur and move_images'
453
479
 
454
480
  # Create output folder if necessary
455
481
  if (os.path.isdir(options.base_output_folder)) and \
@@ -687,6 +713,8 @@ def main():
687
713
  help='Box expansion (in pixels) for rendering, only meaningful if ' + \
688
714
  'using render_boxes (defaults to {})'.format(
689
715
  default_box_expansion))
716
+ parser.add_argument('--category_names_to_blur', type=str, default=None,
717
+ help='Comma-separated list of category names to blur (or a single category name, e.g. "person")')
690
718
 
691
719
  if len(sys.argv[1:])==0:
692
720
  parser.print_help()
@@ -50,6 +50,9 @@ class ValidateBatchResultsOptions:
50
50
 
51
51
  #: Enable additional debug output
52
52
  self.verbose = False
53
+
54
+ #: Should we raise errors immediately (vs. just catching and reporting)?
55
+ self.raise_errors = False
53
56
 
54
57
  # ...class ValidateBatchResultsOptions
55
58
 
@@ -71,8 +74,7 @@ def validate_batch_results(json_filename,options=None):
71
74
  the loaded data. The "validation_results" dict contains fields called "errors", "warnings",
72
75
  and "filename". "errors" and "warnings" are lists of strings, although "errors" will never
73
76
  be longer than N=1, since validation fails at the first error.
74
-
75
-
77
+
76
78
  """
77
79
 
78
80
  if options is None:
@@ -223,8 +225,11 @@ def validate_batch_results(json_filename,options=None):
223
225
  'Warning: non-standard key {} present at file level'.format(k))
224
226
 
225
227
  except Exception as e:
226
-
227
- validation_results['errors'].append(str(e))
228
+
229
+ if options.raise_errors:
230
+ raise
231
+ else:
232
+ validation_results['errors'].append(str(e))
228
233
 
229
234
  # ...try/except
230
235