megadetector 5.0.23__py3-none-any.whl → 5.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (42) hide show
  1. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +2 -3
  2. megadetector/classification/merge_classification_detection_output.py +2 -2
  3. megadetector/data_management/coco_to_labelme.py +2 -1
  4. megadetector/data_management/databases/integrity_check_json_db.py +15 -14
  5. megadetector/data_management/databases/subset_json_db.py +49 -21
  6. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +73 -69
  7. megadetector/data_management/lila/add_locations_to_nacti.py +114 -110
  8. megadetector/data_management/mewc_to_md.py +340 -0
  9. megadetector/data_management/speciesnet_to_md.py +41 -0
  10. megadetector/data_management/yolo_output_to_md_output.py +15 -8
  11. megadetector/detection/process_video.py +24 -7
  12. megadetector/detection/pytorch_detector.py +841 -160
  13. megadetector/detection/run_detector.py +341 -146
  14. megadetector/detection/run_detector_batch.py +307 -70
  15. megadetector/detection/run_inference_with_yolov5_val.py +61 -4
  16. megadetector/detection/tf_detector.py +6 -1
  17. megadetector/postprocessing/{combine_api_outputs.py → combine_batch_outputs.py} +10 -13
  18. megadetector/postprocessing/compare_batch_results.py +236 -7
  19. megadetector/postprocessing/create_crop_folder.py +358 -0
  20. megadetector/postprocessing/md_to_labelme.py +7 -7
  21. megadetector/postprocessing/md_to_wi.py +40 -0
  22. megadetector/postprocessing/merge_detections.py +1 -1
  23. megadetector/postprocessing/postprocess_batch_results.py +12 -5
  24. megadetector/postprocessing/separate_detections_into_folders.py +32 -4
  25. megadetector/postprocessing/validate_batch_results.py +9 -4
  26. megadetector/utils/ct_utils.py +236 -45
  27. megadetector/utils/directory_listing.py +3 -3
  28. megadetector/utils/gpu_test.py +125 -0
  29. megadetector/utils/md_tests.py +455 -116
  30. megadetector/utils/path_utils.py +43 -2
  31. megadetector/utils/wi_utils.py +2691 -0
  32. megadetector/visualization/visualization_utils.py +95 -18
  33. megadetector/visualization/visualize_db.py +25 -7
  34. megadetector/visualization/visualize_detector_output.py +60 -13
  35. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/METADATA +11 -23
  36. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/RECORD +39 -36
  37. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/WHEEL +1 -1
  38. megadetector/detection/detector_training/__init__.py +0 -0
  39. megadetector/detection/detector_training/model_main_tf2.py +0 -114
  40. megadetector/utils/torch_test.py +0 -32
  41. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/LICENSE +0 -0
  42. {megadetector-5.0.23.dist-info → megadetector-5.0.25.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,7 @@ import os
15
15
  import cv2
16
16
 
17
17
  from io import BytesIO
18
- from PIL import Image, ImageFile, ImageFont, ImageDraw
18
+ from PIL import Image, ImageFile, ImageFont, ImageDraw, ImageFilter
19
19
  from multiprocessing.pool import ThreadPool
20
20
  from multiprocessing.pool import Pool
21
21
  from tqdm import tqdm
@@ -25,6 +25,7 @@ from megadetector.utils.path_utils import find_images
25
25
  from megadetector.data_management.annotations import annotation_constants
26
26
  from megadetector.data_management.annotations.annotation_constants import \
27
27
  detector_bbox_category_id_to_name
28
+ from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
28
29
 
29
30
  ImageFile.LOAD_TRUNCATED_IMAGES = True
30
31
 
@@ -352,13 +353,14 @@ def resize_image(image, target_width=-1, target_height=-1, output_file=None,
352
353
  def crop_image(detections, image, confidence_threshold=0.15, expansion=0):
353
354
  """
354
355
  Crops detections above [confidence_threshold] from the PIL image [image],
355
- returning a list of PIL Images.
356
+ returning a list of PIL Images, preserving the order of [detections].
356
357
 
357
358
  Args:
358
359
  detections (list): a list of dictionaries with keys 'conf' and 'bbox';
359
360
  boxes are length-four arrays formatted as [x,y,w,h], normalized,
360
361
  upper-left origin (this is the standard MD detection format)
361
- image (Image): the PIL Image object from which we should crop detections
362
+ image (Image or str): the PIL Image object from which we should crop detections,
363
+ or an image filename
362
364
  confidence_threshold (float, optional): only crop detections above this threshold
363
365
  expansion (int, optional): a number of pixels to include on each side of a cropped
364
366
  detection
@@ -369,6 +371,9 @@ def crop_image(detections, image, confidence_threshold=0.15, expansion=0):
369
371
 
370
372
  ret_images = []
371
373
 
374
+ if isinstance(image,str):
375
+ image = load_image(image)
376
+
372
377
  for detection in detections:
373
378
 
374
379
  score = float(detection['conf'])
@@ -405,12 +410,54 @@ def crop_image(detections, image, confidence_threshold=0.15, expansion=0):
405
410
 
406
411
  return ret_images
407
412
 
413
+ # ...def crop_image(...)
408
414
 
415
+
416
+ def blur_detections(image,detections,blur_radius=40):
417
+ """
418
+ Blur the regions in [image] corresponding to the MD-formatted list [detections].
419
+ [image] is modified in place.
420
+
421
+ Args:
422
+ image (PIL.Image.Image): image in which we should blur specific regions
423
+ detections (list): list of detections in the MD output format, see render
424
+ detection_bounding_boxes for more detail.
425
+ """
426
+
427
+ img_width, img_height = image.size
428
+
429
+ for d in detections:
430
+
431
+ bbox = d['bbox']
432
+ x_norm, y_norm, width_norm, height_norm = bbox
433
+
434
+ # Calculate absolute pixel coordinates
435
+ x = int(x_norm * img_width)
436
+ y = int(y_norm * img_height)
437
+ width = int(width_norm * img_width)
438
+ height = int(height_norm * img_height)
439
+
440
+ # Calculate box boundaries
441
+ left = max(0, x)
442
+ top = max(0, y)
443
+ right = min(img_width, x + width)
444
+ bottom = min(img_height, y + height)
445
+
446
+ # Crop the region, blur it, and paste it back
447
+ region = image.crop((left, top, right, bottom))
448
+ blurred_region = region.filter(ImageFilter.GaussianBlur(radius=blur_radius))
449
+ image.paste(blurred_region, (left, top))
450
+
451
+ # ...for each detection
452
+
453
+ # ...def blur_detections(...)
454
+
455
+
409
456
  def render_detection_bounding_boxes(detections,
410
457
  image,
411
458
  label_map='show_categories',
412
459
  classification_label_map=None,
413
- confidence_threshold=0.15,
460
+ confidence_threshold=0,
414
461
  thickness=DEFAULT_BOX_THICKNESS,
415
462
  expansion=0,
416
463
  classification_confidence_threshold=0.3,
@@ -419,7 +466,9 @@ def render_detection_bounding_boxes(detections,
419
466
  textalign=TEXTALIGN_LEFT,
420
467
  vtextalign=VTEXTALIGN_TOP,
421
468
  label_font_size=DEFAULT_LABEL_FONT_SIZE,
422
- custom_strings=None):
469
+ custom_strings=None,
470
+ box_sort_order='confidence',
471
+ verbose=False):
423
472
  """
424
473
  Renders bounding boxes (with labels and confidence values) on an image for all
425
474
  detections above a threshold.
@@ -497,6 +546,9 @@ def render_detection_bounding_boxes(detections,
497
546
  label_font_size (float, optional): font size for labels
498
547
  custom_strings: optional set of strings to append to detection labels, should have the
499
548
  same length as [detections]. Appended before any classification labels.
549
+ box_sort_order (str, optional): sorting scheme for detection boxes, can be None, "confidence", or
550
+ "reverse_confidence".
551
+ verbose (bool, optional): enable additional debug output
500
552
  """
501
553
 
502
554
  # Input validation
@@ -516,6 +568,15 @@ def render_detection_bounding_boxes(detections,
516
568
  # for color selection
517
569
  classes = []
518
570
 
571
+ if box_sort_order is not None:
572
+
573
+ if box_sort_order == 'confidence':
574
+ detections = sort_list_of_dicts_by_key(detections,k='conf',reverse=False)
575
+ elif box_sort_order == 'reverse_confidence':
576
+ detections = sort_list_of_dicts_by_key(detections,k='conf',reverse=True)
577
+ else:
578
+ raise ValueError('Unrecognized sorting scheme {}'.format(box_sort_order))
579
+
519
580
  for i_detection,detection in enumerate(detections):
520
581
 
521
582
  score = detection['conf']
@@ -536,8 +597,9 @@ def render_detection_bounding_boxes(detections,
536
597
  # category or on the most confident classification category.
537
598
  clss = detection['category']
538
599
 
539
- # {} is the default, which means "show labels with no mapping", so don't use "if label_map" here
540
- # if label_map:
600
+ # This will be a list of strings that should be rendered above/below this box
601
+ displayed_label = []
602
+
541
603
  if label_map is not None:
542
604
  label = label_map[clss] if clss in label_map else clss
543
605
  if score is not None:
@@ -545,17 +607,14 @@ def render_detection_bounding_boxes(detections,
545
607
  else:
546
608
  displayed_label = ['{}'.format(label)]
547
609
  else:
548
- displayed_label = ''
610
+ displayed_label = ['']
549
611
 
550
612
  if custom_strings is not None:
551
613
  custom_string = custom_strings[i_detection]
552
614
  if custom_string is not None and len(custom_string) > 0:
553
- if isinstance(displayed_label,str):
554
- displayed_label += ' ' + custom_string
555
- else:
556
- assert len(displayed_label) == 1
557
- displayed_label[0] += ' ' + custom_string
558
-
615
+ assert len(displayed_label) == 1
616
+ displayed_label[0] += ' ' + custom_string
617
+
559
618
  if ('classifications' in detection) and len(detection['classifications']) > 0:
560
619
 
561
620
  classifications = detection['classifications']
@@ -597,6 +656,7 @@ def render_detection_bounding_boxes(detections,
597
656
 
598
657
  # ...if we have classification results
599
658
 
659
+ # display_strs is a list of labels for each box
600
660
  display_strs.append(displayed_label)
601
661
  classes.append(clss)
602
662
 
@@ -606,6 +666,9 @@ def render_detection_bounding_boxes(detections,
606
666
 
607
667
  display_boxes = np.array(display_boxes)
608
668
 
669
+ if verbose:
670
+ print('Rendering {} of {} detections'.format(len(display_boxes),len(detections)))
671
+
609
672
  draw_bounding_boxes_on_image(image, display_boxes, classes,
610
673
  display_strs=display_strs, thickness=thickness,
611
674
  expansion=expansion, colormap=colormap,
@@ -953,7 +1016,8 @@ def render_db_bounding_boxes(boxes,
953
1016
  textalign=TEXTALIGN_LEFT,
954
1017
  vtextalign=VTEXTALIGN_TOP,
955
1018
  text_rotation=None,
956
- label_font_size=DEFAULT_LABEL_FONT_SIZE):
1019
+ label_font_size=DEFAULT_LABEL_FONT_SIZE,
1020
+ tags=None):
957
1021
  """
958
1022
  Render bounding boxes (with class labels) on an image. This is a wrapper for
959
1023
  draw_bounding_boxes_on_image, allowing the caller to operate on a resized image
@@ -981,6 +1045,8 @@ def render_db_bounding_boxes(boxes,
981
1045
  vtextalign (int, optional): VTEXTALIGN_TOP or VTEXTALIGN_BOTTOM
982
1046
  text_rotation (float, optional): rotation to apply to text
983
1047
  label_font_size (float, optional): font size for labels
1048
+ tags (list, optional): list of strings of length len(boxes) that should be appended
1049
+ after each class name (e.g. to show scores)
984
1050
  """
985
1051
 
986
1052
  display_boxes = []
@@ -993,8 +1059,11 @@ def render_db_bounding_boxes(boxes,
993
1059
 
994
1060
  img_width, img_height = image_size
995
1061
 
996
- for box, clss in zip(boxes, classes):
1062
+ for i_box in range(0,len(boxes)):
997
1063
 
1064
+ box = boxes[i_box]
1065
+ clss = classes[i_box]
1066
+
998
1067
  x_min_abs, y_min_abs, width_abs, height_abs = box[0:4]
999
1068
 
1000
1069
  ymin = y_min_abs / img_height
@@ -1008,9 +1077,17 @@ def render_db_bounding_boxes(boxes,
1008
1077
  if label_map:
1009
1078
  clss = label_map[int(clss)]
1010
1079
 
1080
+ display_str = str(clss)
1081
+
1082
+ # Do we have a tag to append to the class string?
1083
+ if tags is not None and tags[i_box] is not None and len(tags[i_box]) > 0:
1084
+ display_str += ' ' + tags[i_box]
1085
+
1011
1086
  # need to be a string here because PIL needs to iterate through chars
1012
- display_strs.append([str(clss)])
1087
+ display_strs.append([display_str])
1013
1088
 
1089
+ # ...for each box
1090
+
1014
1091
  display_boxes = np.array(display_boxes)
1015
1092
 
1016
1093
  draw_bounding_boxes_on_image(image,
@@ -1706,7 +1783,7 @@ if False:
1706
1783
  TEXTALIGN_LEFT,TEXTALIGN_RIGHT,VTEXTALIGN_BOTTOM,VTEXTALIGN_TOP, \
1707
1784
  DEFAULT_LABEL_FONT_SIZE
1708
1785
 
1709
- fn = os.path.expanduser('~\AppData\Local\Temp\md-tests\md-test-images\ena24_7904.jpg')
1786
+ fn = os.path.expanduser('~/AppData/Local/Temp/md-tests/md-test-images/ena24_7904.jpg')
1710
1787
  output_fn = r'g:\temp\test.jpg'
1711
1788
 
1712
1789
  image = load_image(fn)
@@ -167,7 +167,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
167
167
  options (DbVizOptions, optional): See DbVizOptions for details
168
168
 
169
169
  Returns:
170
- tuple: A length-two tuple containing (the html filename) and (the loaded database).
170
+ tuple: A length-two tuple containing (the html filename) and (the loaded database).
171
171
  """
172
172
 
173
173
  if options is None:
@@ -315,6 +315,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
315
315
 
316
316
  bboxes = []
317
317
  box_classes = []
318
+ box_score_strings = []
318
319
 
319
320
  # All the class labels we've seen for this image (with out without bboxes)
320
321
  image_categories = set()
@@ -363,12 +364,19 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
363
364
  image_categories.add(category_name)
364
365
 
365
366
  if 'bbox' in anno:
366
- bbox = anno['bbox']
367
+ bbox = anno['bbox']
367
368
  if isinstance(bbox,float):
368
369
  assert math.isnan(bbox), "I shouldn't see a bbox that's neither a box nor NaN"
369
370
  continue
370
371
  bboxes.append(bbox)
371
372
  box_classes.append(anno['category_id'])
373
+
374
+ box_score_string = ''
375
+ if options.confidence_field_name is not None and \
376
+ options.confidence_field_name in anno:
377
+ score = anno[options.confidence_field_name]
378
+ box_score_string = '({}%)'.format(round(100 * score))
379
+ box_score_strings.append(box_score_string)
372
380
 
373
381
  # ...for each of this image's annotations
374
382
 
@@ -382,8 +390,12 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
382
390
  for c in illegal_characters:
383
391
  file_name = file_name.replace(c,'~')
384
392
 
385
- rendering_info.append({'bboxes':bboxes, 'box_classes':box_classes, 'img_path':img_path,
386
- 'output_file_name':file_name})
393
+ rendering_info_this_image = {'bboxes':bboxes,
394
+ 'box_classes':box_classes,
395
+ 'tags':box_score_strings,
396
+ 'img_path':img_path,
397
+ 'output_file_name':file_name}
398
+ rendering_info.append(rendering_info_this_image)
387
399
 
388
400
  label_level_string = ''
389
401
  if len(annotation_level_for_image) > 0:
@@ -442,6 +454,9 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
442
454
  img_path = rendering_info['img_path']
443
455
  bboxes = rendering_info['bboxes']
444
456
  bbox_classes = rendering_info['box_classes']
457
+ bbox_tags = None
458
+ if 'tags' in rendering_info:
459
+ bbox_tags = rendering_info['tags']
445
460
  output_file_name = rendering_info['output_file_name']
446
461
  output_full_path = os.path.join(output_dir, 'rendered_images', output_file_name)
447
462
 
@@ -467,11 +482,14 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
467
482
  print('Image {} failed to open, error: {}'.format(img_path, e))
468
483
  return False
469
484
 
470
- vis_utils.render_db_bounding_boxes(boxes=bboxes, classes=bbox_classes,
471
- image=image, original_size=original_size,
485
+ vis_utils.render_db_bounding_boxes(boxes=bboxes,
486
+ classes=bbox_classes,
487
+ image=image,
488
+ original_size=original_size,
472
489
  label_map=label_map,
473
490
  thickness=options.box_thickness,
474
- expansion=options.box_expansion)
491
+ expansion=options.box_expansion,
492
+ tags=bbox_tags)
475
493
 
476
494
  image.save(output_full_path)
477
495
 
@@ -22,6 +22,7 @@ from tqdm import tqdm
22
22
 
23
23
  from megadetector.data_management.annotations.annotation_constants import detector_bbox_category_id_to_name
24
24
  from megadetector.visualization import visualization_utils as vis_utils
25
+ from megadetector.visualization.visualization_utils import blur_detections
25
26
  from megadetector.utils.ct_utils import get_max_conf
26
27
  from megadetector.utils import write_html_image_list
27
28
  from megadetector.detection.run_detector import get_typical_confidence_threshold_from_results
@@ -38,10 +39,17 @@ DEFAULT_DETECTOR_LABEL_MAP = {
38
39
  #%% Support functions
39
40
 
40
41
  def _render_image(entry,
41
- detector_label_map,classification_label_map,
42
- confidence_threshold,classification_confidence_threshold,
43
- render_detections_only,preserve_path_structure,out_dir,images_dir,
44
- output_image_width):
42
+ detector_label_map,
43
+ classification_label_map,
44
+ confidence_threshold,
45
+ classification_confidence_threshold,
46
+ render_detections_only,
47
+ preserve_path_structure,
48
+ out_dir,
49
+ images_dir,
50
+ output_image_width,
51
+ box_sort_order=None,
52
+ category_names_to_blur=None):
45
53
  """
46
54
  Internal function for rendering a single image.
47
55
  """
@@ -65,22 +73,43 @@ def _render_image(entry,
65
73
  rendering_result['skipped_image'] = True
66
74
  return rendering_result
67
75
 
68
- image_obj = os.path.join(images_dir, image_id)
69
- if not os.path.exists(image_obj):
76
+ image_filename_in_abs = os.path.join(images_dir, image_id)
77
+ if not os.path.exists(image_filename_in_abs):
70
78
  print(f'Image {image_id} not found in images_dir')
71
79
  rendering_result['missing_image'] = True
72
80
  return rendering_result
73
81
 
82
+ # Load the image
83
+ image = vis_utils.open_image(image_filename_in_abs)
84
+
85
+ # Find categories we're supposed to blur
86
+ category_ids_to_blur = []
87
+ if category_names_to_blur is not None:
88
+ if isinstance(category_names_to_blur,str):
89
+ category_names_to_blur = [category_names_to_blur]
90
+ for category_id in detector_label_map:
91
+ if detector_label_map[category_id] in category_names_to_blur:
92
+ category_ids_to_blur.append(category_id)
93
+
94
+ detections_to_blur = []
95
+ for d in entry['detections']:
96
+ if d['conf'] >= confidence_threshold and d['category'] in category_ids_to_blur:
97
+ detections_to_blur.append(d)
98
+ if len(detections_to_blur) > 0:
99
+ blur_detections(image,detections_to_blur)
100
+
101
+ # Resize if necessary
102
+ #
74
103
  # If output_image_width is -1 or None, this will just return the original image
75
- image = vis_utils.resize_image(
76
- vis_utils.open_image(image_obj), output_image_width)
104
+ image = vis_utils.resize_image(image, output_image_width)
77
105
 
78
106
  vis_utils.render_detection_bounding_boxes(
79
107
  entry['detections'], image,
80
108
  label_map=detector_label_map,
81
109
  classification_label_map=classification_label_map,
82
110
  confidence_threshold=confidence_threshold,
83
- classification_confidence_threshold=classification_confidence_threshold)
111
+ classification_confidence_threshold=classification_confidence_threshold,
112
+ box_sort_order=box_sort_order)
84
113
 
85
114
  if not preserve_path_structure:
86
115
  for char in ['/', '\\', ':']:
@@ -113,7 +142,9 @@ def visualize_detector_output(detector_output_path,
113
142
  preserve_path_structure=False,
114
143
  parallelize_rendering=False,
115
144
  parallelize_rendering_n_cores=10,
116
- parallelize_rendering_with_threads=True):
145
+ parallelize_rendering_with_threads=True,
146
+ box_sort_order=None,
147
+ category_names_to_blur=None):
117
148
 
118
149
  """
119
150
  Draws bounding boxes on images given the output of a detector.
@@ -145,6 +176,10 @@ def visualize_detector_output(detector_output_path,
145
176
  parallelize_rendering_with_threads (bool, optional): determines whether we use
146
177
  threads (True) or processes (False) for parallelization (ignored if parallelize_rendering
147
178
  is False)
179
+ box_sort_order (str, optional): sorting scheme for detection boxes, can be None, "confidence", or
180
+ "reverse_confidence"
181
+ category_names_to_blur (list of str, optional): category names for which we should blur detections,
182
+ most commonly ['person']
148
183
 
149
184
  Returns:
150
185
  list: list of paths to annotated images
@@ -235,7 +270,9 @@ def visualize_detector_output(detector_output_path,
235
270
  preserve_path_structure=preserve_path_structure,
236
271
  out_dir=out_dir,
237
272
  images_dir=images_dir,
238
- output_image_width=output_image_width),
273
+ output_image_width=output_image_width,
274
+ box_sort_order=box_sort_order,
275
+ category_names_to_blur=category_names_to_blur),
239
276
  images), total=len(images)))
240
277
 
241
278
  else:
@@ -245,7 +282,8 @@ def visualize_detector_output(detector_output_path,
245
282
  rendering_result = _render_image(entry,detector_label_map,classification_label_map,
246
283
  confidence_threshold,classification_confidence_threshold,
247
284
  render_detections_only,preserve_path_structure,out_dir,
248
- images_dir,output_image_width)
285
+ images_dir,output_image_width,box_sort_order,
286
+ category_names_to_blur=category_names_to_blur)
249
287
  rendering_results.append(rendering_result)
250
288
 
251
289
  # ...for each image
@@ -334,12 +372,20 @@ def main():
334
372
  parser.add_argument(
335
373
  '-pps', '--preserve_path_structure', action='store_true',
336
374
  help='Preserve relative image paths (otherwise flattens and assigns unique file names)')
375
+ parser.add_argument(
376
+ '--category_names_to_blur', default=None, type=str,
377
+ help='Comma-separated list of category names to blur (or a single category name, typically "person")')
337
378
 
338
379
  if len(sys.argv[1:]) == 0:
339
380
  parser.print_help()
340
381
  parser.exit()
341
382
 
342
383
  args = parser.parse_args()
384
+
385
+ category_names_to_blur = args.category_names_to_blur
386
+ if category_names_to_blur is not None:
387
+ category_names_to_blur = category_names_to_blur.split(',')
388
+
343
389
  visualize_detector_output(
344
390
  detector_output_path=args.detector_output_path,
345
391
  out_dir=args.out_dir,
@@ -350,7 +396,8 @@ def main():
350
396
  random_seed=args.random_seed,
351
397
  render_detections_only=args.detections_only,
352
398
  preserve_path_structure=args.preserve_path_structure,
353
- html_output_file=args.html_output_file)
399
+ html_output_file=args.html_output_file,
400
+ category_names_to_blur=category_names_to_blur)
354
401
 
355
402
  if args.html_output_file is not None and args.open_html_output_file:
356
403
  from megadetector.utils.path_utils import open_file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: megadetector
3
- Version: 5.0.23
3
+ Version: 5.0.25
4
4
  Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
5
5
  Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
6
6
  Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
@@ -29,26 +29,27 @@ Project-URL: Documentation, https://megadetector.readthedocs.io
29
29
  Project-URL: Bug Reports, https://github.com/agentmorris/MegaDetector/issues
30
30
  Project-URL: Source, https://github.com/agentmorris/MegaDetector
31
31
  Keywords: camera traps,conservation,wildlife,ai,megadetector
32
- Classifier: Development Status :: 3 - Alpha
33
- Classifier: License :: OSI Approved :: MIT License
34
32
  Classifier: Programming Language :: Python :: 3
35
33
  Requires-Python: <=3.13,>=3.9
36
34
  Description-Content-Type: text/markdown
37
35
  License-File: LICENSE
36
+ Requires-Dist: mkl==2024.0; sys_platform != "darwin"
37
+ Requires-Dist: numpy<2.0,>=1.26.4
38
38
  Requires-Dist: Pillow>=9.5
39
39
  Requires-Dist: tqdm>=4.64.0
40
40
  Requires-Dist: jsonpickle>=3.0.2
41
41
  Requires-Dist: humanfriendly>=10.0
42
- Requires-Dist: numpy<2.0,>=1.26.4
43
42
  Requires-Dist: matplotlib>=3.8.0
44
43
  Requires-Dist: opencv-python>=4.8.0
45
44
  Requires-Dist: requests>=2.31.0
46
45
  Requires-Dist: pyqtree>=1.0.0
47
- Requires-Dist: seaborn>=0.12.2
48
46
  Requires-Dist: scikit-learn>=1.3.1
49
47
  Requires-Dist: pandas>=2.1.1
50
- Requires-Dist: PyYAML>=6.0.1
48
+ Requires-Dist: python-dateutil
49
+ Requires-Dist: send2trash
50
+ Requires-Dist: dill
51
51
  Requires-Dist: ultralytics-yolov5==0.1.1
52
+ Requires-Dist: yolov9pip==0.0.4
52
53
  Requires-Dist: python-dateutil
53
54
 
54
55
  # MegaDetector
@@ -57,28 +58,15 @@ This package is a pip-installable version of the support/inference code for [Meg
57
58
 
58
59
  If you aren't looking for the Python package specifically, and you just want to learn more about what MegaDetector is all about, head over to the [MegaDetector repo](https://github.com/agentmorris/MegaDetector/?tab=readme-ov-file#megadetector).
59
60
 
61
+ If you don't want to run MegaDetector, and you just want to use the utilities in this package - postprocessing, manipulating large volumes of camera trap images, etc. - you may want to check out the [megadetector-utils](https://pypi.org/project/megadetector-utils/) package, which is identical to this one, but excludes all of the PyTorch/YOLO dependencies, and is thus approximately one zillion times smaller.
60
62
 
61
- ## Reasons you might not be looking for this package
62
-
63
- ### If you are an ecologist...
64
-
65
- If you are an ecologist looking to use MegaDetector to help you get through your camera trap images, you probably don't want this package, or at least you probably don't want to start at this page. We recommend starting with our "[Getting started with MegaDetector](https://github.com/agentmorris/MegaDetector/blob/main/getting-started.md)" page, then digging in to the [MegaDetector User Guide](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md), which will walk you through the process of using MegaDetector.
66
-
67
- ### If you are a computer-vision-y type...
68
-
69
- If you are a computer-vision-y person looking to run or fine-tune MegaDetector programmatically, you probably don't want this package. MegaDetector is just a fine-tuned version of [YOLOv5](https://github.com/ultralytics/yolov5), and the [ultralytics](https://github.com/ultralytics/ultralytics/) package (from the developers of YOLOv5) has a zillion bells and whistles for both inference and fine-tuning that this package doesn't.
70
-
71
- ## Reasons you might want to use this package
72
-
73
- If you want to programmatically interact with the postprocessing tools from the MegaDetector repo, or programmatically run MegaDetector in a way that produces [Timelapse](https://saul.cpsc.ucalgary.ca/timelapse)-friendly output (i.e., output in the standard [MegaDetector output format](https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#megadetector-batch-output-format)), this package might be for you.
74
-
75
- ## If I haven't talked you out of using this package...
63
+ ## Installation
76
64
 
77
- To install:
65
+ Install with:
78
66
 
79
67
  `pip install megadetector`
80
68
 
81
- MegaDetector model weights aren't downloaded at pip-install time, but they will be (optionally) automatically downloaded the first time you run the model.
69
+ MegaDetector model weights aren't downloaded at the time you install the package, but they will be (optionally) automatically downloaded the first time you run the model.
82
70
 
83
71
  ## Package reference
84
72