megadetector 5.0.23__py3-none-any.whl → 5.0.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (38) hide show
  1. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +2 -3
  2. megadetector/classification/merge_classification_detection_output.py +2 -2
  3. megadetector/data_management/coco_to_labelme.py +2 -1
  4. megadetector/data_management/databases/integrity_check_json_db.py +15 -14
  5. megadetector/data_management/databases/subset_json_db.py +49 -21
  6. megadetector/data_management/mewc_to_md.py +340 -0
  7. megadetector/data_management/wi_to_md.py +41 -0
  8. megadetector/data_management/yolo_output_to_md_output.py +15 -8
  9. megadetector/detection/process_video.py +24 -7
  10. megadetector/detection/pytorch_detector.py +841 -160
  11. megadetector/detection/run_detector.py +340 -146
  12. megadetector/detection/run_detector_batch.py +304 -68
  13. megadetector/detection/run_inference_with_yolov5_val.py +61 -4
  14. megadetector/detection/tf_detector.py +6 -1
  15. megadetector/postprocessing/{combine_api_outputs.py → combine_batch_outputs.py} +10 -13
  16. megadetector/postprocessing/compare_batch_results.py +68 -6
  17. megadetector/postprocessing/md_to_labelme.py +7 -7
  18. megadetector/postprocessing/md_to_wi.py +40 -0
  19. megadetector/postprocessing/merge_detections.py +1 -1
  20. megadetector/postprocessing/postprocess_batch_results.py +10 -3
  21. megadetector/postprocessing/separate_detections_into_folders.py +32 -4
  22. megadetector/postprocessing/validate_batch_results.py +9 -4
  23. megadetector/utils/ct_utils.py +165 -45
  24. megadetector/utils/gpu_test.py +107 -0
  25. megadetector/utils/md_tests.py +355 -108
  26. megadetector/utils/path_utils.py +9 -2
  27. megadetector/utils/wi_utils.py +1794 -0
  28. megadetector/visualization/visualization_utils.py +82 -16
  29. megadetector/visualization/visualize_db.py +25 -7
  30. megadetector/visualization/visualize_detector_output.py +60 -13
  31. {megadetector-5.0.23.dist-info → megadetector-5.0.24.dist-info}/METADATA +10 -24
  32. {megadetector-5.0.23.dist-info → megadetector-5.0.24.dist-info}/RECORD +35 -33
  33. megadetector/detection/detector_training/__init__.py +0 -0
  34. megadetector/detection/detector_training/model_main_tf2.py +0 -114
  35. megadetector/utils/torch_test.py +0 -32
  36. {megadetector-5.0.23.dist-info → megadetector-5.0.24.dist-info}/LICENSE +0 -0
  37. {megadetector-5.0.23.dist-info → megadetector-5.0.24.dist-info}/WHEEL +0 -0
  38. {megadetector-5.0.23.dist-info → megadetector-5.0.24.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,7 @@ import os
15
15
  import cv2
16
16
 
17
17
  from io import BytesIO
18
- from PIL import Image, ImageFile, ImageFont, ImageDraw
18
+ from PIL import Image, ImageFile, ImageFont, ImageDraw, ImageFilter
19
19
  from multiprocessing.pool import ThreadPool
20
20
  from multiprocessing.pool import Pool
21
21
  from tqdm import tqdm
@@ -25,6 +25,7 @@ from megadetector.utils.path_utils import find_images
25
25
  from megadetector.data_management.annotations import annotation_constants
26
26
  from megadetector.data_management.annotations.annotation_constants import \
27
27
  detector_bbox_category_id_to_name
28
+ from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
28
29
 
29
30
  ImageFile.LOAD_TRUNCATED_IMAGES = True
30
31
 
@@ -406,11 +407,46 @@ def crop_image(detections, image, confidence_threshold=0.15, expansion=0):
406
407
  return ret_images
407
408
 
408
409
 
410
+ def blur_detections(image,detections,blur_radius=40):
411
+ """
412
+ Blur the regions in [image] corresponding to the MD-formatted list [detections].
413
+ [image] is modified in place.
414
+
415
+ Args:
416
+ image (PIL.Image.Image): image in which we should blur specific regions
417
+ detections (list): list of detections in the MD output format, see render
418
+ detection_bounding_boxes for more detail.
419
+ """
420
+
421
+ img_width, img_height = image.size
422
+
423
+ for d in detections:
424
+
425
+ bbox = d['bbox']
426
+ x_norm, y_norm, width_norm, height_norm = bbox
427
+
428
+ # Calculate absolute pixel coordinates
429
+ x = int(x_norm * img_width)
430
+ y = int(y_norm * img_height)
431
+ width = int(width_norm * img_width)
432
+ height = int(height_norm * img_height)
433
+
434
+ # Calculate box boundaries
435
+ left = max(0, x)
436
+ top = max(0, y)
437
+ right = min(img_width, x + width)
438
+ bottom = min(img_height, y + height)
439
+
440
+ # Crop the region, blur it, and paste it back
441
+ region = image.crop((left, top, right, bottom))
442
+ blurred_region = region.filter(ImageFilter.GaussianBlur(radius=blur_radius))
443
+ image.paste(blurred_region, (left, top))
444
+
409
445
  def render_detection_bounding_boxes(detections,
410
446
  image,
411
447
  label_map='show_categories',
412
448
  classification_label_map=None,
413
- confidence_threshold=0.15,
449
+ confidence_threshold=0,
414
450
  thickness=DEFAULT_BOX_THICKNESS,
415
451
  expansion=0,
416
452
  classification_confidence_threshold=0.3,
@@ -419,7 +455,9 @@ def render_detection_bounding_boxes(detections,
419
455
  textalign=TEXTALIGN_LEFT,
420
456
  vtextalign=VTEXTALIGN_TOP,
421
457
  label_font_size=DEFAULT_LABEL_FONT_SIZE,
422
- custom_strings=None):
458
+ custom_strings=None,
459
+ box_sort_order='confidence',
460
+ verbose=False):
423
461
  """
424
462
  Renders bounding boxes (with labels and confidence values) on an image for all
425
463
  detections above a threshold.
@@ -497,6 +535,9 @@ def render_detection_bounding_boxes(detections,
497
535
  label_font_size (float, optional): font size for labels
498
536
  custom_strings: optional set of strings to append to detection labels, should have the
499
537
  same length as [detections]. Appended before any classification labels.
538
+ box_sort_order (str, optional): sorting scheme for detection boxes, can be None, "confidence", or
539
+ "reverse_confidence".
540
+ verbose (bool, optional): enable additional debug output
500
541
  """
501
542
 
502
543
  # Input validation
@@ -516,6 +557,15 @@ def render_detection_bounding_boxes(detections,
516
557
  # for color selection
517
558
  classes = []
518
559
 
560
+ if box_sort_order is not None:
561
+
562
+ if box_sort_order == 'confidence':
563
+ detections = sort_list_of_dicts_by_key(detections,k='conf',reverse=False)
564
+ elif box_sort_order == 'reverse_confidence':
565
+ detections = sort_list_of_dicts_by_key(detections,k='conf',reverse=True)
566
+ else:
567
+ raise ValueError('Unrecognized sorting scheme {}'.format(box_sort_order))
568
+
519
569
  for i_detection,detection in enumerate(detections):
520
570
 
521
571
  score = detection['conf']
@@ -536,8 +586,9 @@ def render_detection_bounding_boxes(detections,
536
586
  # category or on the most confident classification category.
537
587
  clss = detection['category']
538
588
 
539
- # {} is the default, which means "show labels with no mapping", so don't use "if label_map" here
540
- # if label_map:
589
+ # This will be a list of strings that should be rendered above/below this box
590
+ displayed_label = []
591
+
541
592
  if label_map is not None:
542
593
  label = label_map[clss] if clss in label_map else clss
543
594
  if score is not None:
@@ -545,17 +596,14 @@ def render_detection_bounding_boxes(detections,
545
596
  else:
546
597
  displayed_label = ['{}'.format(label)]
547
598
  else:
548
- displayed_label = ''
599
+ displayed_label = ['']
549
600
 
550
601
  if custom_strings is not None:
551
602
  custom_string = custom_strings[i_detection]
552
603
  if custom_string is not None and len(custom_string) > 0:
553
- if isinstance(displayed_label,str):
554
- displayed_label += ' ' + custom_string
555
- else:
556
- assert len(displayed_label) == 1
557
- displayed_label[0] += ' ' + custom_string
558
-
604
+ assert len(displayed_label) == 1
605
+ displayed_label[0] += ' ' + custom_string
606
+
559
607
  if ('classifications' in detection) and len(detection['classifications']) > 0:
560
608
 
561
609
  classifications = detection['classifications']
@@ -597,6 +645,7 @@ def render_detection_bounding_boxes(detections,
597
645
 
598
646
  # ...if we have classification results
599
647
 
648
+ # display_strs is a list of labels for each box
600
649
  display_strs.append(displayed_label)
601
650
  classes.append(clss)
602
651
 
@@ -606,6 +655,9 @@ def render_detection_bounding_boxes(detections,
606
655
 
607
656
  display_boxes = np.array(display_boxes)
608
657
 
658
+ if verbose:
659
+ print('Rendering {} of {} detections'.format(len(display_boxes),len(detections)))
660
+
609
661
  draw_bounding_boxes_on_image(image, display_boxes, classes,
610
662
  display_strs=display_strs, thickness=thickness,
611
663
  expansion=expansion, colormap=colormap,
@@ -953,7 +1005,8 @@ def render_db_bounding_boxes(boxes,
953
1005
  textalign=TEXTALIGN_LEFT,
954
1006
  vtextalign=VTEXTALIGN_TOP,
955
1007
  text_rotation=None,
956
- label_font_size=DEFAULT_LABEL_FONT_SIZE):
1008
+ label_font_size=DEFAULT_LABEL_FONT_SIZE,
1009
+ tags=None):
957
1010
  """
958
1011
  Render bounding boxes (with class labels) on an image. This is a wrapper for
959
1012
  draw_bounding_boxes_on_image, allowing the caller to operate on a resized image
@@ -981,6 +1034,8 @@ def render_db_bounding_boxes(boxes,
981
1034
  vtextalign (int, optional): VTEXTALIGN_TOP or VTEXTALIGN_BOTTOM
982
1035
  text_rotation (float, optional): rotation to apply to text
983
1036
  label_font_size (float, optional): font size for labels
1037
+ tags (list, optional): list of strings of length len(boxes) that should be appended
1038
+ after each class name (e.g. to show scores)
984
1039
  """
985
1040
 
986
1041
  display_boxes = []
@@ -993,8 +1048,11 @@ def render_db_bounding_boxes(boxes,
993
1048
 
994
1049
  img_width, img_height = image_size
995
1050
 
996
- for box, clss in zip(boxes, classes):
1051
+ for i_box in range(0,len(boxes)):
997
1052
 
1053
+ box = boxes[i_box]
1054
+ clss = classes[i_box]
1055
+
998
1056
  x_min_abs, y_min_abs, width_abs, height_abs = box[0:4]
999
1057
 
1000
1058
  ymin = y_min_abs / img_height
@@ -1008,9 +1066,17 @@ def render_db_bounding_boxes(boxes,
1008
1066
  if label_map:
1009
1067
  clss = label_map[int(clss)]
1010
1068
 
1069
+ display_str = str(clss)
1070
+
1071
+ # Do we have a tag to append to the class string?
1072
+ if tags is not None and tags[i_box] is not None and len(tags[i_box]) > 0:
1073
+ display_str += ' ' + tags[i_box]
1074
+
1011
1075
  # need to be a string here because PIL needs to iterate through chars
1012
- display_strs.append([str(clss)])
1076
+ display_strs.append([display_str])
1013
1077
 
1078
+ # ...for each box
1079
+
1014
1080
  display_boxes = np.array(display_boxes)
1015
1081
 
1016
1082
  draw_bounding_boxes_on_image(image,
@@ -1706,7 +1772,7 @@ if False:
1706
1772
  TEXTALIGN_LEFT,TEXTALIGN_RIGHT,VTEXTALIGN_BOTTOM,VTEXTALIGN_TOP, \
1707
1773
  DEFAULT_LABEL_FONT_SIZE
1708
1774
 
1709
- fn = os.path.expanduser('~\AppData\Local\Temp\md-tests\md-test-images\ena24_7904.jpg')
1775
+ fn = os.path.expanduser('~/AppData/Local/Temp/md-tests/md-test-images/ena24_7904.jpg')
1710
1776
  output_fn = r'g:\temp\test.jpg'
1711
1777
 
1712
1778
  image = load_image(fn)
@@ -167,7 +167,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
167
167
  options (DbVizOptions, optional): See DbVizOptions for details
168
168
 
169
169
  Returns:
170
- tuple: A length-two tuple containing (the html filename) and (the loaded database).
170
+ tuple: A length-two tuple containing (the html filename) and (the loaded database).
171
171
  """
172
172
 
173
173
  if options is None:
@@ -315,6 +315,7 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
315
315
 
316
316
  bboxes = []
317
317
  box_classes = []
318
+ box_score_strings = []
318
319
 
319
320
  # All the class labels we've seen for this image (with out without bboxes)
320
321
  image_categories = set()
@@ -363,12 +364,19 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
363
364
  image_categories.add(category_name)
364
365
 
365
366
  if 'bbox' in anno:
366
- bbox = anno['bbox']
367
+ bbox = anno['bbox']
367
368
  if isinstance(bbox,float):
368
369
  assert math.isnan(bbox), "I shouldn't see a bbox that's neither a box nor NaN"
369
370
  continue
370
371
  bboxes.append(bbox)
371
372
  box_classes.append(anno['category_id'])
373
+
374
+ box_score_string = ''
375
+ if options.confidence_field_name is not None and \
376
+ options.confidence_field_name in anno:
377
+ score = anno[options.confidence_field_name]
378
+ box_score_string = '({}%)'.format(round(100 * score))
379
+ box_score_strings.append(box_score_string)
372
380
 
373
381
  # ...for each of this image's annotations
374
382
 
@@ -382,8 +390,12 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
382
390
  for c in illegal_characters:
383
391
  file_name = file_name.replace(c,'~')
384
392
 
385
- rendering_info.append({'bboxes':bboxes, 'box_classes':box_classes, 'img_path':img_path,
386
- 'output_file_name':file_name})
393
+ rendering_info_this_image = {'bboxes':bboxes,
394
+ 'box_classes':box_classes,
395
+ 'tags':box_score_strings,
396
+ 'img_path':img_path,
397
+ 'output_file_name':file_name}
398
+ rendering_info.append(rendering_info_this_image)
387
399
 
388
400
  label_level_string = ''
389
401
  if len(annotation_level_for_image) > 0:
@@ -442,6 +454,9 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
442
454
  img_path = rendering_info['img_path']
443
455
  bboxes = rendering_info['bboxes']
444
456
  bbox_classes = rendering_info['box_classes']
457
+ bbox_tags = None
458
+ if 'tags' in rendering_info:
459
+ bbox_tags = rendering_info['tags']
445
460
  output_file_name = rendering_info['output_file_name']
446
461
  output_full_path = os.path.join(output_dir, 'rendered_images', output_file_name)
447
462
 
@@ -467,11 +482,14 @@ def visualize_db(db_path, output_dir, image_base_dir, options=None):
467
482
  print('Image {} failed to open, error: {}'.format(img_path, e))
468
483
  return False
469
484
 
470
- vis_utils.render_db_bounding_boxes(boxes=bboxes, classes=bbox_classes,
471
- image=image, original_size=original_size,
485
+ vis_utils.render_db_bounding_boxes(boxes=bboxes,
486
+ classes=bbox_classes,
487
+ image=image,
488
+ original_size=original_size,
472
489
  label_map=label_map,
473
490
  thickness=options.box_thickness,
474
- expansion=options.box_expansion)
491
+ expansion=options.box_expansion,
492
+ tags=bbox_tags)
475
493
 
476
494
  image.save(output_full_path)
477
495
 
@@ -22,6 +22,7 @@ from tqdm import tqdm
22
22
 
23
23
  from megadetector.data_management.annotations.annotation_constants import detector_bbox_category_id_to_name
24
24
  from megadetector.visualization import visualization_utils as vis_utils
25
+ from megadetector.visualization.visualization_utils import blur_detections
25
26
  from megadetector.utils.ct_utils import get_max_conf
26
27
  from megadetector.utils import write_html_image_list
27
28
  from megadetector.detection.run_detector import get_typical_confidence_threshold_from_results
@@ -38,10 +39,17 @@ DEFAULT_DETECTOR_LABEL_MAP = {
38
39
  #%% Support functions
39
40
 
40
41
  def _render_image(entry,
41
- detector_label_map,classification_label_map,
42
- confidence_threshold,classification_confidence_threshold,
43
- render_detections_only,preserve_path_structure,out_dir,images_dir,
44
- output_image_width):
42
+ detector_label_map,
43
+ classification_label_map,
44
+ confidence_threshold,
45
+ classification_confidence_threshold,
46
+ render_detections_only,
47
+ preserve_path_structure,
48
+ out_dir,
49
+ images_dir,
50
+ output_image_width,
51
+ box_sort_order=None,
52
+ category_names_to_blur=None):
45
53
  """
46
54
  Internal function for rendering a single image.
47
55
  """
@@ -65,22 +73,43 @@ def _render_image(entry,
65
73
  rendering_result['skipped_image'] = True
66
74
  return rendering_result
67
75
 
68
- image_obj = os.path.join(images_dir, image_id)
69
- if not os.path.exists(image_obj):
76
+ image_filename_in_abs = os.path.join(images_dir, image_id)
77
+ if not os.path.exists(image_filename_in_abs):
70
78
  print(f'Image {image_id} not found in images_dir')
71
79
  rendering_result['missing_image'] = True
72
80
  return rendering_result
73
81
 
82
+ # Load the image
83
+ image = vis_utils.open_image(image_filename_in_abs)
84
+
85
+ # Find categories we're supposed to blur
86
+ category_ids_to_blur = []
87
+ if category_names_to_blur is not None:
88
+ if isinstance(category_names_to_blur,str):
89
+ category_names_to_blur = [category_names_to_blur]
90
+ for category_id in detector_label_map:
91
+ if detector_label_map[category_id] in category_names_to_blur:
92
+ category_ids_to_blur.append(category_id)
93
+
94
+ detections_to_blur = []
95
+ for d in entry['detections']:
96
+ if d['conf'] >= confidence_threshold and d['category'] in category_ids_to_blur:
97
+ detections_to_blur.append(d)
98
+ if len(detections_to_blur) > 0:
99
+ blur_detections(image,detections_to_blur)
100
+
101
+ # Resize if necessary
102
+ #
74
103
  # If output_image_width is -1 or None, this will just return the original image
75
- image = vis_utils.resize_image(
76
- vis_utils.open_image(image_obj), output_image_width)
104
+ image = vis_utils.resize_image(image, output_image_width)
77
105
 
78
106
  vis_utils.render_detection_bounding_boxes(
79
107
  entry['detections'], image,
80
108
  label_map=detector_label_map,
81
109
  classification_label_map=classification_label_map,
82
110
  confidence_threshold=confidence_threshold,
83
- classification_confidence_threshold=classification_confidence_threshold)
111
+ classification_confidence_threshold=classification_confidence_threshold,
112
+ box_sort_order=box_sort_order)
84
113
 
85
114
  if not preserve_path_structure:
86
115
  for char in ['/', '\\', ':']:
@@ -113,7 +142,9 @@ def visualize_detector_output(detector_output_path,
113
142
  preserve_path_structure=False,
114
143
  parallelize_rendering=False,
115
144
  parallelize_rendering_n_cores=10,
116
- parallelize_rendering_with_threads=True):
145
+ parallelize_rendering_with_threads=True,
146
+ box_sort_order=None,
147
+ category_names_to_blur=None):
117
148
 
118
149
  """
119
150
  Draws bounding boxes on images given the output of a detector.
@@ -145,6 +176,10 @@ def visualize_detector_output(detector_output_path,
145
176
  parallelize_rendering_with_threads (bool, optional): determines whether we use
146
177
  threads (True) or processes (False) for parallelization (ignored if parallelize_rendering
147
178
  is False)
179
+ box_sort_order (str, optional): sorting scheme for detection boxes, can be None, "confidence", or
180
+ "reverse_confidence"
181
+ category_names_to_blur (list of str, optional): category names for which we should blur detections,
182
+ most commonly ['person']
148
183
 
149
184
  Returns:
150
185
  list: list of paths to annotated images
@@ -235,7 +270,9 @@ def visualize_detector_output(detector_output_path,
235
270
  preserve_path_structure=preserve_path_structure,
236
271
  out_dir=out_dir,
237
272
  images_dir=images_dir,
238
- output_image_width=output_image_width),
273
+ output_image_width=output_image_width,
274
+ box_sort_order=box_sort_order,
275
+ category_names_to_blur=category_names_to_blur),
239
276
  images), total=len(images)))
240
277
 
241
278
  else:
@@ -245,7 +282,8 @@ def visualize_detector_output(detector_output_path,
245
282
  rendering_result = _render_image(entry,detector_label_map,classification_label_map,
246
283
  confidence_threshold,classification_confidence_threshold,
247
284
  render_detections_only,preserve_path_structure,out_dir,
248
- images_dir,output_image_width)
285
+ images_dir,output_image_width,box_sort_order,
286
+ category_names_to_blur=category_names_to_blur)
249
287
  rendering_results.append(rendering_result)
250
288
 
251
289
  # ...for each image
@@ -334,12 +372,20 @@ def main():
334
372
  parser.add_argument(
335
373
  '-pps', '--preserve_path_structure', action='store_true',
336
374
  help='Preserve relative image paths (otherwise flattens and assigns unique file names)')
375
+ parser.add_argument(
376
+ '--category_names_to_blur', default=None, type=str,
377
+ help='Comma-separated list of category names to blur (or a single category name, typically "person")')
337
378
 
338
379
  if len(sys.argv[1:]) == 0:
339
380
  parser.print_help()
340
381
  parser.exit()
341
382
 
342
383
  args = parser.parse_args()
384
+
385
+ category_names_to_blur = args.category_names_to_blur
386
+ if category_names_to_blur is not None:
387
+ category_names_to_blur = category_names_to_blur.split(',')
388
+
343
389
  visualize_detector_output(
344
390
  detector_output_path=args.detector_output_path,
345
391
  out_dir=args.out_dir,
@@ -350,7 +396,8 @@ def main():
350
396
  random_seed=args.random_seed,
351
397
  render_detections_only=args.detections_only,
352
398
  preserve_path_structure=args.preserve_path_structure,
353
- html_output_file=args.html_output_file)
399
+ html_output_file=args.html_output_file,
400
+ category_names_to_blur=category_names_to_blur)
354
401
 
355
402
  if args.html_output_file is not None and args.open_html_output_file:
356
403
  from megadetector.utils.path_utils import open_file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: megadetector
3
- Version: 5.0.23
3
+ Version: 5.0.24
4
4
  Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
5
5
  Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
6
6
  Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
@@ -29,26 +29,27 @@ Project-URL: Documentation, https://megadetector.readthedocs.io
29
29
  Project-URL: Bug Reports, https://github.com/agentmorris/MegaDetector/issues
30
30
  Project-URL: Source, https://github.com/agentmorris/MegaDetector
31
31
  Keywords: camera traps,conservation,wildlife,ai,megadetector
32
- Classifier: Development Status :: 3 - Alpha
33
- Classifier: License :: OSI Approved :: MIT License
34
32
  Classifier: Programming Language :: Python :: 3
35
33
  Requires-Python: <=3.13,>=3.9
36
34
  Description-Content-Type: text/markdown
37
35
  License-File: LICENSE
36
+ Requires-Dist: mkl==2024.0; sys_platform != "darwin"
37
+ Requires-Dist: numpy<2.0,>=1.26.4
38
38
  Requires-Dist: Pillow>=9.5
39
39
  Requires-Dist: tqdm>=4.64.0
40
40
  Requires-Dist: jsonpickle>=3.0.2
41
41
  Requires-Dist: humanfriendly>=10.0
42
- Requires-Dist: numpy<2.0,>=1.26.4
43
42
  Requires-Dist: matplotlib>=3.8.0
44
43
  Requires-Dist: opencv-python>=4.8.0
45
44
  Requires-Dist: requests>=2.31.0
46
45
  Requires-Dist: pyqtree>=1.0.0
47
- Requires-Dist: seaborn>=0.12.2
48
46
  Requires-Dist: scikit-learn>=1.3.1
49
47
  Requires-Dist: pandas>=2.1.1
50
- Requires-Dist: PyYAML>=6.0.1
48
+ Requires-Dist: python-dateutil
49
+ Requires-Dist: send2trash
50
+ Requires-Dist: dill
51
51
  Requires-Dist: ultralytics-yolov5==0.1.1
52
+ Requires-Dist: yolov9pip==0.0.4
52
53
  Requires-Dist: python-dateutil
53
54
 
54
55
  # MegaDetector
@@ -57,28 +58,13 @@ This package is a pip-installable version of the support/inference code for [Meg
57
58
 
58
59
  If you aren't looking for the Python package specifically, and you just want to learn more about what MegaDetector is all about, head over to the [MegaDetector repo](https://github.com/agentmorris/MegaDetector/?tab=readme-ov-file#megadetector).
59
60
 
61
+ ## Installation
60
62
 
61
- ## Reasons you might not be looking for this package
62
-
63
- ### If you are an ecologist...
64
-
65
- If you are an ecologist looking to use MegaDetector to help you get through your camera trap images, you probably don't want this package, or at least you probably don't want to start at this page. We recommend starting with our "[Getting started with MegaDetector](https://github.com/agentmorris/MegaDetector/blob/main/getting-started.md)" page, then digging in to the [MegaDetector User Guide](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md), which will walk you through the process of using MegaDetector.
66
-
67
- ### If you are a computer-vision-y type...
68
-
69
- If you are a computer-vision-y person looking to run or fine-tune MegaDetector programmatically, you probably don't want this package. MegaDetector is just a fine-tuned version of [YOLOv5](https://github.com/ultralytics/yolov5), and the [ultralytics](https://github.com/ultralytics/ultralytics/) package (from the developers of YOLOv5) has a zillion bells and whistles for both inference and fine-tuning that this package doesn't.
70
-
71
- ## Reasons you might want to use this package
72
-
73
- If you want to programmatically interact with the postprocessing tools from the MegaDetector repo, or programmatically run MegaDetector in a way that produces [Timelapse](https://saul.cpsc.ucalgary.ca/timelapse)-friendly output (i.e., output in the standard [MegaDetector output format](https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#megadetector-batch-output-format)), this package might be for you.
74
-
75
- ## If I haven't talked you out of using this package...
76
-
77
- To install:
63
+ Install with:
78
64
 
79
65
  `pip install megadetector`
80
66
 
81
- MegaDetector model weights aren't downloaded at pip-install time, but they will be (optionally) automatically downloaded the first time you run the model.
67
+ MegaDetector model weights aren't downloaded at the time you install the package, but they will be (optionally) automatically downloaded the first time you run the model.
82
68
 
83
69
  ## Package reference
84
70