megadetector 5.0.18__py3-none-any.whl → 5.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (26) hide show
  1. megadetector/data_management/cct_to_md.py +1 -1
  2. megadetector/data_management/importers/bellevue_to_json.py +0 -1
  3. megadetector/data_management/importers/osu-small-animals-to-json.py +364 -0
  4. megadetector/data_management/lila/generate_lila_per_image_labels.py +1 -1
  5. megadetector/data_management/lila/get_lila_annotation_counts.py +2 -0
  6. megadetector/data_management/lila/lila_common.py +28 -12
  7. megadetector/data_management/lila/test_lila_metadata_urls.py +17 -8
  8. megadetector/data_management/read_exif.py +73 -0
  9. megadetector/data_management/yolo_output_to_md_output.py +2 -2
  10. megadetector/detection/process_video.py +90 -19
  11. megadetector/detection/run_detector.py +36 -13
  12. megadetector/detection/run_detector_batch.py +105 -16
  13. megadetector/detection/run_inference_with_yolov5_val.py +20 -23
  14. megadetector/detection/video_utils.py +63 -38
  15. megadetector/postprocessing/validate_batch_results.py +186 -0
  16. megadetector/taxonomy_mapping/map_new_lila_datasets.py +8 -3
  17. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +3 -2
  18. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +3 -1
  19. megadetector/utils/ct_utils.py +20 -0
  20. megadetector/utils/md_tests.py +57 -7
  21. {megadetector-5.0.18.dist-info → megadetector-5.0.20.dist-info}/METADATA +2 -2
  22. {megadetector-5.0.18.dist-info → megadetector-5.0.20.dist-info}/RECORD +25 -24
  23. {megadetector-5.0.18.dist-info → megadetector-5.0.20.dist-info}/WHEEL +1 -1
  24. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +0 -677
  25. {megadetector-5.0.18.dist-info → megadetector-5.0.20.dist-info}/LICENSE +0 -0
  26. {megadetector-5.0.18.dist-info → megadetector-5.0.20.dist-info}/top_level.txt +0 -0
@@ -788,38 +788,35 @@ def run_inference_with_yolo_val(options):
788
788
  yolo_read_failures = []
789
789
 
790
790
  for line in yolo_console_output:
791
- # Lines look like:
791
+
792
+ #
793
+ # Lines indicating read failures look like:
792
794
  #
793
795
  # For ultralytics val:
794
796
  #
795
- # val: WARNING ⚠️ /a/b/c/d.jpg: ignoring corrupt image/label: [Errno 13] Permission denied: '/a/b/c/d.jpg'
796
797
  # line = "val: WARNING ⚠️ /a/b/c/d.jpg: ignoring corrupt image/label: [Errno 13] Permission denied: '/a/b/c/d.jpg'"
797
798
  #
798
799
  # For yolov5 val.py:
799
800
  #
800
- # test: WARNING: a/b/c/d.jpg: ignoring corrupt image/label: cannot identify image file '/a/b/c/d.jpg'
801
801
  # line = "test: WARNING: a/b/c/d.jpg: ignoring corrupt image/label: cannot identify image file '/a/b/c/d.jpg'"
802
- if 'cannot identify image file' in line:
803
- tokens = line.split('cannot identify image file')
804
- image_name = tokens[-1].strip()
805
- assert image_name[0] == "'" and image_name [-1] == "'"
806
- image_name = image_name[1:-1]
807
- yolo_read_failures.append(image_name)
808
- elif 'ignoring corrupt image/label' in line:
809
- assert 'WARNING' in line
810
- if '⚠️' in line:
811
- assert line.startswith('val'), \
812
- 'Unrecognized line in YOLO output: {}'.format(line)
813
- tokens = line.split('ignoring corrupt image/label')
814
- image_name = tokens[0].split('⚠️')[-1].strip()
815
- else:
816
- assert line.startswith('test'), \
817
- 'Unrecognized line in YOLO output: {}'.format(line)
818
- tokens = line.split('ignoring corrupt image/label')
819
- image_name = tokens[0].split('WARNING:')[-1].strip()
820
- assert image_name.endswith(':')
821
- image_name = image_name[0:-1]
802
+ #
803
+ # In both cases, when we are using symlinks, the first filename is the symlink name, the
804
+ # second filename is the target, e.g.:
805
+ #
806
+ # line = "test: WARNING: /tmp/md_to_yolo/md_to_yolo_xyz/symlinks/xyz/0000000004.jpg: ignoring corrupt image/label: cannot identify image file '/tmp/md-tests/md-test-images/corrupt-images/real-file.jpg'"
807
+ #
808
+ line = line.replace('⚠️',':')
809
+ if 'ignoring corrupt image/label' in line:
810
+
811
+ tokens = line.split('ignoring corrupt image/label')
812
+ assert len(tokens) == 2
813
+ tokens = tokens[0].split(':',maxsplit=3)
814
+ assert len(tokens) == 4
815
+ assert 'warning' in tokens[1].lower()
816
+ image_name = tokens[2].strip()
822
817
  yolo_read_failures.append(image_name)
818
+
819
+ # ...for each line in the console output
823
820
 
824
821
  # image_file = yolo_read_failures[0]
825
822
  for image_file in yolo_read_failures:
@@ -228,9 +228,10 @@ def run_callback_on_frames(input_video_file,
228
228
  input_video_file (str): video file to process
229
229
  frame_callback (function): callback to run on frames, should take an np.array and a string and
230
230
  return a single value. callback should expect PIL-formatted (RGB) images.
231
- every_n_frames (int, optional): sample every Nth frame starting from the first frame;
232
- if this is None or 1, every frame is processed. Mutually exclusive with
233
- frames_to_process.
231
+ every_n_frames (float, optional): sample every Nth frame starting from the first frame;
232
+ if this is None or 1, every frame is processed. If this is a negative value, that's
233
+ interpreted as a sampling rate in seconds, which is rounded to the nearest frame sampling
234
+ rate. Mutually exclusive with frames_to_process.
234
235
  verbose (bool, optional): enable additional debug console output
235
236
  frames_to_process (list of int, optional): process this specific set of frames;
236
237
  mutually exclusive with every_n_frames. If all values are beyond the length
@@ -263,6 +264,13 @@ def run_callback_on_frames(input_video_file,
263
264
  frame_filenames = []
264
265
  results = []
265
266
 
267
+ if every_n_frames is not None and every_n_frames < 0:
268
+ every_n_seconds = abs(every_n_frames)
269
+ every_n_frames = int(every_n_seconds * frame_rate)
270
+ if verbose:
271
+ print('Interpreting a time sampling rate of {} hz as a frame interval of {}'.format(
272
+ every_n_seconds,every_n_frames))
273
+
266
274
  # frame_number = 0
267
275
  for frame_number in range(0,n_frames):
268
276
 
@@ -776,12 +784,18 @@ class FrameToVideoOptions:
776
784
  #: for the whole video, i.e. "1" means "use the confidence value from the highest-confidence frame"
777
785
  self.nth_highest_confidence = 1
778
786
 
787
+ #: Should we include just a single representative frame result for each video (default), or
788
+ #: every frame that was processed?
789
+ self.include_all_processed_frames = False
790
+
779
791
  #: What to do if a file referred to in a .json results file appears not to be a
780
792
  #: video; can be 'error' or 'skip_with_warning'
781
793
  self.non_video_behavior = 'error'
782
794
 
783
795
 
784
- def frame_results_to_video_results(input_file,output_file,options=None,
796
+ def frame_results_to_video_results(input_file,
797
+ output_file,
798
+ options=None,
785
799
  video_filename_to_frame_rate=None):
786
800
  """
787
801
  Given an MD results file produced at the *frame* level, corresponding to a directory
@@ -801,7 +815,7 @@ def frame_results_to_video_results(input_file,output_file,options=None,
801
815
 
802
816
  if options is None:
803
817
  options = FrameToVideoOptions()
804
-
818
+
805
819
  # Load results
806
820
  with open(input_file,'r') as f:
807
821
  input_data = json.load(f)
@@ -854,47 +868,58 @@ def frame_results_to_video_results(input_file,output_file,options=None,
854
868
  # video_name = list(video_to_frame_info.keys())[0]
855
869
  for video_name in tqdm(video_to_frame_info):
856
870
 
857
- frames = video_to_frame_info[video_name]
858
-
859
- all_detections_this_video = []
860
-
861
- # frame = frames[0]
862
- for frame in frames:
863
- if ('detections' in frame) and (frame['detections'] is not None):
864
- all_detections_this_video.extend(frame['detections'])
865
-
866
- # At most one detection for each category for the whole video
867
- canonical_detections = []
868
-
869
- # category_id = list(detection_categories.keys())[0]
870
- for category_id in detection_categories:
871
-
872
- category_detections = [det for det in all_detections_this_video if \
873
- det['category'] == category_id]
874
-
875
- # Find the nth-highest-confidence video to choose a confidence value
876
- if len(category_detections) >= options.nth_highest_confidence:
877
-
878
- category_detections_by_confidence = sorted(category_detections,
879
- key = lambda i: i['conf'],reverse=True)
880
- canonical_detection = category_detections_by_confidence[options.nth_highest_confidence-1]
881
- canonical_detections.append(canonical_detection)
882
-
883
871
  # Prepare the output representation for this video
884
872
  im_out = {}
885
873
  im_out['file'] = video_name
886
- im_out['detections'] = canonical_detections
887
874
 
888
875
  if (video_filename_to_frame_rate is not None) and \
889
876
  (video_name in video_filename_to_frame_rate):
890
877
  im_out['frame_rate'] = video_filename_to_frame_rate[video_name]
891
878
 
892
- # 'max_detection_conf' is no longer included in output files by default
893
- if False:
894
- im_out['max_detection_conf'] = 0
895
- if len(canonical_detections) > 0:
896
- confidences = [d['conf'] for d in canonical_detections]
897
- im_out['max_detection_conf'] = max(confidences)
879
+ # Find all detections for this video
880
+ all_detections_this_video = []
881
+
882
+ frames = video_to_frame_info[video_name]
883
+
884
+ # frame = frames[0]
885
+ for frame in frames:
886
+ if ('detections' in frame) and (frame['detections'] is not None):
887
+ all_detections_this_video.extend(frame['detections'])
888
+
889
+ # Should we keep detections for all frames?
890
+ if (options.include_all_processed_frames):
891
+
892
+ im_out['detections'] = all_detections_this_video
893
+
894
+ # ...or should we keep just a canonical detection for each category?
895
+ else:
896
+
897
+ canonical_detections = []
898
+
899
+ # category_id = list(detection_categories.keys())[0]
900
+ for category_id in detection_categories:
901
+
902
+ category_detections = [det for det in all_detections_this_video if \
903
+ det['category'] == category_id]
904
+
905
+ # Find the nth-highest-confidence video to choose a confidence value
906
+ if len(category_detections) >= options.nth_highest_confidence:
907
+
908
+ category_detections_by_confidence = sorted(category_detections,
909
+ key = lambda i: i['conf'],reverse=True)
910
+ canonical_detection = category_detections_by_confidence[options.nth_highest_confidence-1]
911
+ canonical_detections.append(canonical_detection)
912
+
913
+ im_out['detections'] = canonical_detections
914
+
915
+ # 'max_detection_conf' is no longer included in output files by default
916
+ if False:
917
+ im_out['max_detection_conf'] = 0
918
+ if len(canonical_detections) > 0:
919
+ confidences = [d['conf'] for d in canonical_detections]
920
+ im_out['max_detection_conf'] = max(confidences)
921
+
922
+ # ...if we're keeping output for all frames / canonical frames
898
923
 
899
924
  output_images.append(im_out)
900
925
 
@@ -0,0 +1,186 @@
1
+ """
2
+
3
+ validate_batch_results.py
4
+
5
+ Given a .json file containing MD results, validate that it's compliant with the format spec:
6
+
7
+ https://lila.science/megadetector-output-format
8
+
9
+ """
10
+
11
+ #%% Constants and imports
12
+
13
+ import os
14
+ import sys
15
+ import json
16
+ import argparse
17
+
18
+ from megadetector.detection.video_utils import is_video_file
19
+ from megadetector.utils.ct_utils import args_to_object
20
+
21
+ typical_info_fields = ['detector','detection_completion_time',
22
+ 'classifier','classification_completion_time',
23
+ 'detection_metadata','classifier_metadata']
24
+ required_keys = ['info','images','detection_categories']
25
+ typical_keys = ['classification_categories']
26
+
27
+
28
+ #%% Classes
29
+
30
+ class ValidateBatchResultsOptions:
31
+ """
32
+ Options controlling the behavior of validate_bach_results()
33
+ """
34
+
35
+ def __init__(self):
36
+
37
+ #: Should we verify that images exist? If this is True, and the .json
38
+ #: file contains relative paths, relative_path_base needs to be specified.
39
+ self.check_image_existence = False
40
+
41
+ #: If check_image_existence is True, where do the images live?
42
+ #:
43
+ #: If None, assumes absolute paths.
44
+ self.relative_path_base = None
45
+
46
+ # ...class ValidateBatchResultsOptions
47
+
48
+
49
+
50
+ #%% Main function
51
+
52
+ def validate_batch_results(json_filename,options=None):
53
+ """
54
+ Verify that [json_filename] is a valid MD output file. Currently errors on invalid files.
55
+
56
+ Args:
57
+ json_filename (str): the filename to validate
58
+ options (ValidateBatchResultsOptions, optionsl): all the parameters used to control this
59
+ process, see ValidateBatchResultsOptions for details
60
+
61
+ Returns:
62
+ bool: reserved; currently always errors or returns True.
63
+ """
64
+
65
+ if options is None:
66
+ options = ValidateBatchResultsOptions()
67
+
68
+ with open(json_filename,'r') as f:
69
+ d = json.load(f)
70
+
71
+ ## Info validation
72
+
73
+ assert 'info' in d
74
+ info = d['info']
75
+
76
+ assert isinstance(info,dict)
77
+ assert 'format_version' in info
78
+ format_version = float(info['format_version'])
79
+ assert format_version >= 1.3, 'This validator can only be used with format version 1.3 or later'
80
+
81
+ print('Validating a .json results file with format version {}'.format(format_version))
82
+
83
+ ## Category validation
84
+
85
+ assert 'detection_categories' in d
86
+ for k in d['detection_categories'].keys():
87
+ # Categories should be string-formatted ints
88
+ assert isinstance(k,str)
89
+ _ = int(k)
90
+ assert isinstance(d['detection_categories'][k],str)
91
+
92
+ if 'classification_categories' in d:
93
+ for k in d['classification_categories'].keys():
94
+ # Categories should be string-formatted ints
95
+ assert isinstance(k,str)
96
+ _ = int(k)
97
+ assert isinstance(d['classification_categories'][k],str)
98
+
99
+
100
+ ## Image validation
101
+
102
+ assert 'images' in d
103
+ assert isinstance(d['images'],list)
104
+
105
+ # im = d['images'][0]
106
+ for im in d['images']:
107
+
108
+ assert isinstance(im,dict)
109
+ assert 'file' in im
110
+
111
+ file = im['file']
112
+
113
+ if options.check_image_existence:
114
+ if options.relative_path_base is None:
115
+ file_abs = file
116
+ else:
117
+ file_abs = os.path.join(options.relative_path_base,file)
118
+ assert os.path.isfile(file_abs), 'Cannot find file {}'.format(file_abs)
119
+
120
+ if 'detections' not in im or im['detections'] is None:
121
+ assert 'failure' in im and isinstance(im['failure'],str)
122
+ else:
123
+ assert isinstance(im['detections'],list)
124
+
125
+ if is_video_file(im['file']) and (format_version >= 1.4):
126
+ assert 'frame_rate' in im
127
+ if 'detections' in im and im['detections'] is not None:
128
+ for det in im['detections']:
129
+ assert 'frame_number' in det
130
+
131
+ # ...for each image
132
+
133
+
134
+ ## Checking on other keys
135
+
136
+ for k in d.keys():
137
+ if k not in typical_keys and k not in required_keys:
138
+ print('Warning: non-standard key {} present at file level'.format(k))
139
+
140
+ # ...def validate_batch_results(...)
141
+
142
+
143
+ #%% Interactive driver(s)
144
+
145
+ if False:
146
+
147
+ #%%
148
+
149
+ options = ValidateBatchResultsOptions()
150
+ # json_filename = r'g:\temp\format.json'
151
+ # json_filename = r'g:\temp\test-videos\video_results.json'
152
+ json_filename = r'g:\temp\test-videos\image_results.json'
153
+ options.check_image_existence = True
154
+ options.relative_path_base = r'g:\temp\test-videos'
155
+ validate_batch_results(json_filename,options)
156
+
157
+
158
+ #%% Command-line driver
159
+
160
+ def main():
161
+
162
+ options = ValidateBatchResultsOptions()
163
+
164
+ parser = argparse.ArgumentParser()
165
+ parser.add_argument(
166
+ 'json_filename',
167
+ help='path to .json file containing MegaDetector results')
168
+ parser.add_argument(
169
+ '--check_image_existence', action='store_true',
170
+ help='check that all images referred to in the results file exist')
171
+ parser.add_argument(
172
+ '--relative_path_base', default=None,
173
+ help='if --check_image_existence is specified and paths are relative, use this as the base folder')
174
+ if len(sys.argv[1:]) == 0:
175
+ parser.print_help()
176
+ parser.exit()
177
+
178
+ args = parser.parse_args()
179
+
180
+ args_to_object(args, options)
181
+
182
+ validate_batch_results(args.json_filename,options)
183
+
184
+
185
+ if __name__ == '__main__':
186
+ main()
@@ -15,10 +15,10 @@ import json
15
15
  # Created by get_lila_category_list.py
16
16
  input_lila_category_list_file = os.path.expanduser('~/lila/lila_categories_list/lila_dataset_to_categories.json')
17
17
 
18
- output_file = os.path.expanduser('~/lila/lila_additions_2024.07.16.csv')
18
+ output_file = os.path.expanduser('~/lila/lila_additions_2024.10.05.csv')
19
19
 
20
20
  datasets_to_map = [
21
- 'Desert Lion Conservation Camera Traps'
21
+ 'Ohio Small Animals'
22
22
  ]
23
23
 
24
24
 
@@ -127,13 +127,18 @@ output_df.to_csv(output_file, index=None, header=True)
127
127
  #%% Manual lookup
128
128
 
129
129
  if False:
130
+
131
+ #%%
132
+
133
+ from megadetector.utils.path_utils import open_file
134
+ open_file(output_file)
130
135
 
131
136
  #%%
132
137
 
133
138
  # q = 'white-throated monkey'
134
139
  # q = 'cingulata'
135
140
  # q = 'notamacropus'
136
- q = 'aves'
141
+ q = 'thamnophis saurita saurita'
137
142
  taxonomy_preference = 'inat'
138
143
  m = get_preferred_taxonomic_match(q,taxonomy_preference)
139
144
  # print(m.scientific_name); import clipboard; clipboard.copy(m.scientific_name)
@@ -68,7 +68,8 @@ if False:
68
68
 
69
69
  #%% Generate the final output file
70
70
 
71
- assert not os.path.isfile(release_taxonomy_file)
71
+ assert not os.path.isfile(release_taxonomy_file), \
72
+ 'File {} exists, delete it manually before proceeding'.format(release_taxonomy_file)
72
73
 
73
74
  known_levels = ['stateofmatter', #noqa
74
75
  'kingdom',
@@ -88,7 +89,7 @@ if False:
88
89
  'genus',
89
90
  'species','subspecies','variety']
90
91
 
91
- levels_to_exclude = ['stateofmatter','zoosection','parvorder']
92
+ levels_to_exclude = ['stateofmatter','zoosection','parvorder','complex']
92
93
 
93
94
  for s in levels_to_exclude:
94
95
  assert s not in levels_to_include
@@ -16,7 +16,7 @@ import os
16
16
  import pandas as pd
17
17
 
18
18
  # lila_taxonomy_file = r"c:\git\agentmorrisprivate\lila-taxonomy\lila-taxonomy-mapping.csv"
19
- lila_taxonomy_file = os.path.expanduser('~/lila/lila_additions_2024.07.16.csv')
19
+ lila_taxonomy_file = os.path.expanduser('~/lila/lila_additions_2024.10.05.csv')
20
20
 
21
21
  preview_base = os.path.expanduser('~/lila/lila_taxonomy_preview')
22
22
  os.makedirs(preview_base,exist_ok=True)
@@ -383,6 +383,8 @@ for i_row,row in df.iterrows():
383
383
 
384
384
  #%% Download sample images for all scientific names
385
385
 
386
+ # Takes ~1 minute per 10 rows
387
+
386
388
  remapped_queries = {'papio':'papio+baboon',
387
389
  'damaliscus lunatus jimela':'damaliscus lunatus',
388
390
  'mazama':'genus+mazama',
@@ -105,6 +105,26 @@ def args_to_object(args, obj):
105
105
  return obj
106
106
 
107
107
 
108
+ def dict_to_object(d, obj):
109
+ """
110
+ Copies all fields from a dict to an object. Skips fields starting with _.
111
+ Does not check existence in the target object.
112
+
113
+ Args:
114
+ d (dict): the dict to convert to an object
115
+ obj (object): object whose whose attributes will be updated
116
+
117
+ Returns:
118
+ object: the modified object (modified in place, but also returned)
119
+ """
120
+
121
+ for k in d.keys():
122
+ if not k.startswith('_'):
123
+ setattr(obj, k, d[k])
124
+
125
+ return obj
126
+
127
+
108
128
  def pretty_print_object(obj, b_print=True):
109
129
  """
110
130
  Converts an arbitrary object to .json, optionally printing the .json representation.
@@ -177,7 +177,8 @@ def get_expected_results_filename(gpu_is_available,
177
177
 
178
178
  def download_test_data(options=None):
179
179
  """
180
- Downloads the test zipfile if necessary, unzips if necessary.
180
+ Downloads the test zipfile if necessary, unzips if necessary. Initializes
181
+ temporary fields in [options], particularly [options.scratch_dir].
181
182
 
182
183
  Args:
183
184
  options (MDTestOptions, optional): see MDTestOptions for details
@@ -683,10 +684,16 @@ def run_python_tests(options):
683
684
 
684
685
  ## Verify results
685
686
 
687
+ # Verify format correctness
688
+ from megadetector.postprocessing.validate_batch_results import validate_batch_results
689
+ validate_batch_results(inference_output_file)
690
+
691
+ # Verify value correctness
686
692
  expected_results_file = get_expected_results_filename(is_gpu_available(verbose=False),
687
693
  options=options)
688
694
  compare_results(inference_output_file,expected_results_file,options)
689
-
695
+
696
+
690
697
  # Make note of this filename, we will use it again later
691
698
  inference_output_file_standard_inference = inference_output_file
692
699
 
@@ -1341,11 +1348,13 @@ if False:
1341
1348
  options.force_data_download = False
1342
1349
  options.force_data_unzip = False
1343
1350
  options.warning_mode = False
1344
- options.max_coord_error = 0.001
1345
- options.max_conf_error = 0.005
1346
- options.cli_working_dir = r'c:\git\MegaDetector'
1347
- options.yolo_working_dir = r'c:\git\yolov5-md'
1348
-
1351
+ options.max_coord_error = 0.01 # 0.001
1352
+ options.max_conf_error = 0.01 # 0.005
1353
+ # options.cli_working_dir = r'c:\git\MegaDetector'
1354
+ # options.yolo_working_dir = r'c:\git\yolov5-md'
1355
+ options.cli_working_dir = os.path.expanduser('~')
1356
+ options.yolo_working_dir = '/mnt/c/git/yolov5-md'
1357
+ options = download_test_data(options)
1349
1358
 
1350
1359
  #%%
1351
1360
 
@@ -1357,6 +1366,47 @@ if False:
1357
1366
 
1358
1367
  run_tests(options)
1359
1368
 
1369
+ #%%
1370
+
1371
+ yolo_inference_options_dict = {'input_folder': '/tmp/md-tests/md-test-images',
1372
+ 'image_filename_list': None,
1373
+ 'model_filename': 'MDV5A',
1374
+ 'output_file': '/tmp/md-tests/folder_inference_output_yolo_val.json',
1375
+ 'yolo_working_folder': '/mnt/c/git/yolov5-md',
1376
+ 'model_type': 'yolov5',
1377
+ 'image_size': None,
1378
+ 'conf_thres': 0.005,
1379
+ 'batch_size': 1,
1380
+ 'device_string': '0',
1381
+ 'augment': False,
1382
+ 'half_precision_enabled': None,
1383
+ 'symlink_folder': None,
1384
+ 'use_symlinks': True,
1385
+ 'unique_id_strategy': 'links',
1386
+ 'yolo_results_folder': None,
1387
+ 'remove_symlink_folder': True,
1388
+ 'remove_yolo_results_folder': True,
1389
+ 'yolo_category_id_to_name': {0: 'animal', 1: 'person', 2: 'vehicle'},
1390
+ 'overwrite_handling': 'overwrite',
1391
+ 'preview_yolo_command_only': False,
1392
+ 'treat_copy_failures_as_warnings': False,
1393
+ 'save_yolo_debug_output': False,
1394
+ 'recursive': True,
1395
+ 'checkpoint_frequency': None}
1396
+
1397
+ from megadetector.utils.ct_utils import dict_to_object
1398
+ from megadetector.detection.run_inference_with_yolov5_val import \
1399
+ YoloInferenceOptions, run_inference_with_yolo_val
1400
+
1401
+ yolo_inference_options = YoloInferenceOptions()
1402
+ yolo_inference_options = dict_to_object(yolo_inference_options_dict, yolo_inference_options)
1403
+
1404
+ os.makedirs(options.scratch_dir,exist_ok=True)
1405
+
1406
+ inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
1407
+
1408
+ run_inference_with_yolo_val(yolo_inference_options)
1409
+
1360
1410
 
1361
1411
  #%% Command-line driver
1362
1412
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: megadetector
3
- Version: 5.0.18
3
+ Version: 5.0.20
4
4
  Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
5
5
  Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
6
6
  Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
@@ -39,7 +39,7 @@ Requires-Dist: Pillow >=9.5
39
39
  Requires-Dist: tqdm >=4.64.0
40
40
  Requires-Dist: jsonpickle >=3.0.2
41
41
  Requires-Dist: humanfriendly >=10.0
42
- Requires-Dist: numpy >=1.26.0
42
+ Requires-Dist: numpy <1.24,>=1.22
43
43
  Requires-Dist: matplotlib >=3.8.0
44
44
  Requires-Dist: opencv-python >=4.8.0
45
45
  Requires-Dist: requests >=2.31.0