megadetector 5.0.19__py3-none-any.whl → 5.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (34) hide show
  1. megadetector/data_management/importers/bellevue_to_json.py +0 -1
  2. megadetector/data_management/importers/osu-small-animals-to-json.py +364 -0
  3. megadetector/data_management/lila/generate_lila_per_image_labels.py +1 -1
  4. megadetector/data_management/lila/get_lila_annotation_counts.py +2 -0
  5. megadetector/data_management/lila/lila_common.py +28 -12
  6. megadetector/data_management/lila/test_lila_metadata_urls.py +17 -8
  7. megadetector/data_management/read_exif.py +73 -0
  8. megadetector/data_management/yolo_output_to_md_output.py +18 -5
  9. megadetector/detection/process_video.py +84 -16
  10. megadetector/detection/run_detector.py +36 -13
  11. megadetector/detection/run_detector_batch.py +104 -15
  12. megadetector/detection/run_inference_with_yolov5_val.py +20 -23
  13. megadetector/detection/video_utils.py +79 -44
  14. megadetector/postprocessing/combine_api_outputs.py +1 -1
  15. megadetector/postprocessing/detector_calibration.py +367 -0
  16. megadetector/postprocessing/md_to_coco.py +2 -1
  17. megadetector/postprocessing/postprocess_batch_results.py +32 -20
  18. megadetector/postprocessing/validate_batch_results.py +118 -58
  19. megadetector/taxonomy_mapping/map_new_lila_datasets.py +8 -3
  20. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +3 -2
  21. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +3 -1
  22. megadetector/utils/ct_utils.py +20 -0
  23. megadetector/utils/md_tests.py +63 -17
  24. megadetector/utils/path_utils.py +139 -30
  25. megadetector/utils/write_html_image_list.py +16 -5
  26. megadetector/visualization/visualization_utils.py +126 -23
  27. megadetector/visualization/visualize_db.py +104 -63
  28. {megadetector-5.0.19.dist-info → megadetector-5.0.21.dist-info}/METADATA +2 -2
  29. {megadetector-5.0.19.dist-info → megadetector-5.0.21.dist-info}/RECORD +32 -32
  30. {megadetector-5.0.19.dist-info → megadetector-5.0.21.dist-info}/WHEEL +1 -1
  31. megadetector/data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  32. megadetector/data_management/importers/snapshot_safari_importer_reprise.py +0 -677
  33. {megadetector-5.0.19.dist-info → megadetector-5.0.21.dist-info}/LICENSE +0 -0
  34. {megadetector-5.0.19.dist-info → megadetector-5.0.21.dist-info}/top_level.txt +0 -0
@@ -92,16 +92,18 @@ class PostProcessingOptions:
92
92
  #: Optional .json file containing ground truth information
93
93
  self.ground_truth_json_file = ''
94
94
 
95
- #: Classes we'll treat as negative
95
+ #: List of classes we'll treat as negative (defaults to "empty", typically includes
96
+ #: classes like "blank", "misfire", etc.).
96
97
  #:
97
98
  #: Include the token "#NO_LABELS#" to indicate that an image with no annotations
98
99
  #: should be considered empty.
99
100
  self.negative_classes = DEFAULT_NEGATIVE_CLASSES
100
101
 
101
- #: Classes we'll treat as neither positive nor negative
102
+ #: List of classes we'll treat as neither positive nor negative (defaults to
103
+ #: "unknown", typically includes classes like "unidentifiable").
102
104
  self.unlabeled_classes = DEFAULT_UNKNOWN_CLASSES
103
105
 
104
- #: A list of output sets that we should count, but not render images for.
106
+ #: List of output sets that we should count, but not render images for.
105
107
  #:
106
108
  #: Typically used to preview sets with lots of empties, where you don't want to
107
109
  #: subset but also don't want to render 100,000 empty images.
@@ -198,11 +200,16 @@ class PostProcessingOptions:
198
200
 
199
201
  #: When classification results are present, should be sort alphabetically by class name (False)
200
202
  #: or in descending order by frequency (True)?
201
- self.sort_classification_results_by_count = False
203
+ self.sort_classification_results_by_count = False
202
204
 
203
205
  #: Should we split individual pages up into smaller pages if there are more than
204
206
  #: N images?
205
207
  self.max_figures_per_html_file = None
208
+
209
+ #: Footer text for the index page
210
+ # self.footer_text = '<br/><p style="font-size:80%;">Preview page created with the <a href="{}">MegaDetector Python package</a>.</p>'.\
211
+ # format('https://megadetector.readthedocs.io')
212
+ self.footer_text = ''
206
213
 
207
214
  # ...__init__()
208
215
 
@@ -590,6 +597,7 @@ def _prepare_html_subpages(images_html, output_dir, options=None):
590
597
  html_image_list_options = {}
591
598
  html_image_list_options['maxFiguresPerHtmlFile'] = options.max_figures_per_html_file
592
599
  html_image_list_options['headerHtml'] = '<h1>{}</h1>'.format(res.upper())
600
+ html_image_list_options['pageTitle'] = '{}'.format(res.lower())
593
601
 
594
602
  # Don't write empty pages
595
603
  if len(array) == 0:
@@ -762,7 +770,7 @@ def _render_image_no_gt(file_info,detection_categories_to_results_name,
762
770
  if len(rendered_image_html_info) > 0:
763
771
 
764
772
  image_result = [[res, rendered_image_html_info]]
765
-
773
+ classes_rendered_this_image = set()
766
774
  max_conf = 0
767
775
 
768
776
  for det in detections:
@@ -782,11 +790,14 @@ def _render_image_no_gt(file_info,detection_categories_to_results_name,
782
790
  # confidence threshold
783
791
  if (options.classification_confidence_threshold < 0) or \
784
792
  (top1_class_score >= options.classification_confidence_threshold):
785
- image_result.append(['class_{}'.format(top1_class_name),
786
- rendered_image_html_info])
793
+ class_string = 'class_{}'.format(top1_class_name)
787
794
  else:
788
- image_result.append(['class_unreliable',
795
+ class_string = 'class_unreliable'
796
+
797
+ if class_string not in classes_rendered_this_image:
798
+ image_result.append([class_string,
789
799
  rendered_image_html_info])
800
+ classes_rendered_this_image.add(class_string)
790
801
 
791
802
  # ...if this detection has classification info
792
803
 
@@ -1083,7 +1094,8 @@ def process_batch_results(options):
1083
1094
 
1084
1095
  output_html_file = ''
1085
1096
 
1086
- style_header = """<head>
1097
+ style_header = """<head>
1098
+ <title>Detection results preview</title>
1087
1099
  <style type="text/css">
1088
1100
  a { text-decoration: none; }
1089
1101
  body { font-family: segoe ui, calibri, "trebuchet ms", verdana, arial, sans-serif; }
@@ -1424,7 +1436,7 @@ def process_batch_results(options):
1424
1436
  else:
1425
1437
  confidence_threshold_string = str(options.confidence_threshold)
1426
1438
 
1427
- index_page = """<html>
1439
+ index_page = """<html>
1428
1440
  {}
1429
1441
  <body>
1430
1442
  <h2>Evaluation</h2>
@@ -1509,7 +1521,7 @@ def process_batch_results(options):
1509
1521
  index_page += '</div>'
1510
1522
 
1511
1523
  # Close body and html tags
1512
- index_page += '</body></html>'
1524
+ index_page += '{}</body></html>'.format(options.footer_text)
1513
1525
  output_html_file = os.path.join(output_dir, 'index.html')
1514
1526
  with open(output_html_file, 'w') as f:
1515
1527
  f.write(index_page)
@@ -1529,7 +1541,6 @@ def process_batch_results(options):
1529
1541
  # for each category
1530
1542
  images_html = collections.defaultdict(list)
1531
1543
 
1532
-
1533
1544
  # Add default entries by accessing them for the first time
1534
1545
 
1535
1546
  # Maps sorted tuples of detection category IDs (string ints) - e.g. ("1"), ("1", "4", "7") - to
@@ -1637,14 +1648,15 @@ def process_batch_results(options):
1637
1648
  files_to_render), total=len(files_to_render)))
1638
1649
  else:
1639
1650
  for file_info in tqdm(files_to_render):
1640
- rendering_results.append(_render_image_no_gt(file_info,
1641
- detection_categories_to_results_name,
1642
- detection_categories,
1643
- classification_categories,
1644
- options=options))
1651
+ rendering_result = _render_image_no_gt(file_info,
1652
+ detection_categories_to_results_name,
1653
+ detection_categories,
1654
+ classification_categories,
1655
+ options=options)
1656
+ rendering_results.append(rendering_result)
1645
1657
 
1646
- elapsed = time.time() - start_time
1647
-
1658
+ elapsed = time.time() - start_time
1659
+
1648
1660
  # Do we have classification results in addition to detection results?
1649
1661
  has_classification_info = False
1650
1662
 
@@ -1793,7 +1805,7 @@ def process_batch_results(options):
1793
1805
  cname, cname.lower(), ccount)
1794
1806
  index_page += '</div>\n'
1795
1807
 
1796
- index_page += '</body></html>'
1808
+ index_page += '{}</body></html>'.format(options.footer_text)
1797
1809
  output_html_file = os.path.join(output_dir, 'index.html')
1798
1810
  with open(output_html_file, 'w') as f:
1799
1811
  f.write(index_page)
@@ -42,11 +42,13 @@ class ValidateBatchResultsOptions:
42
42
  #:
43
43
  #: If None, assumes absolute paths.
44
44
  self.relative_path_base = None
45
+
46
+ #: Should we return the loaded data, or just the validation results?
47
+ self.return_data = False
45
48
 
46
49
  # ...class ValidateBatchResultsOptions
47
50
 
48
51
 
49
-
50
52
  #%% Main function
51
53
 
52
54
  def validate_batch_results(json_filename,options=None):
@@ -55,11 +57,17 @@ def validate_batch_results(json_filename,options=None):
55
57
 
56
58
  Args:
57
59
  json_filename (str): the filename to validate
58
- options (ValidateBatchResultsOptions, optionsl): all the parameters used to control this
60
+ options (ValidateBatchResultsOptions, optional): all the parameters used to control this
59
61
  process, see ValidateBatchResultsOptions for details
60
62
 
61
63
  Returns:
62
- bool: reserved; currently always errors or returns True.
64
+ dict: a dict with a field called "validation_results", which is itself a dict. The reason
65
+ it's a dict inside a dict is that if return_data is True, the outer dict also contains all
66
+ the loaded data. The "validation_results" dict contains fields called "errors", "warnings",
67
+ and "filename". "errors" and "warnings" are lists of strings, although "errors" will never
68
+ be longer than N=1, since validation fails at the first error.
69
+
70
+
63
71
  """
64
72
 
65
73
  if options is None:
@@ -68,75 +76,127 @@ def validate_batch_results(json_filename,options=None):
68
76
  with open(json_filename,'r') as f:
69
77
  d = json.load(f)
70
78
 
71
- ## Info validation
79
+ validation_results = {}
80
+ validation_results['filename'] = json_filename
81
+ validation_results['warnings'] = []
82
+ validation_results['errors'] = []
72
83
 
73
- assert 'info' in d
74
- info = d['info']
84
+ if not isinstance(d,dict):
85
+
86
+ validation_results['errors'].append('Input data is not a dict')
87
+ to_return = {}
88
+ to_return['validation_results'] = validation_results
89
+ return to_return
75
90
 
76
- assert isinstance(info,dict)
77
- assert 'format_version' in info
78
- format_version = float(info['format_version'])
79
- assert format_version >= 1.3, 'This validator can only be used with format version 1.3 or later'
91
+ try:
92
+
93
+ ## Info validation
94
+
95
+ if not 'info' in d:
96
+ raise ValueError('Input does not contain info field')
80
97
 
81
- print('Validating a .json results file with format version {}'.format(format_version))
82
-
83
- ## Category validation
84
-
85
- assert 'detection_categories' in d
86
- for k in d['detection_categories'].keys():
87
- # Categories should be string-formatted ints
88
- assert isinstance(k,str)
89
- _ = int(k)
90
- assert isinstance(d['detection_categories'][k],str)
98
+ info = d['info']
99
+
100
+ if not isinstance(info,dict):
101
+ raise ValueError('Input contains invalid info field')
102
+
103
+ if 'format_version' not in info :
104
+ raise ValueError('Input does not specify format version')
105
+
106
+ format_version = float(info['format_version'])
107
+ if format_version < 1.3:
108
+ raise ValueError('This validator can only be used with format version 1.3 or later')
91
109
 
92
- if 'classification_categories' in d:
93
- for k in d['classification_categories'].keys():
94
- # Categories should be string-formatted ints
95
- assert isinstance(k,str)
110
+
111
+ ## Category validation
112
+
113
+ if 'detection_categories' not in d:
114
+ raise ValueError('Input does not contain detection_categories field')
115
+
116
+ for k in d['detection_categories'].keys():
117
+ # Category ID should be string-formatted ints
118
+ if not isinstance(k,str):
119
+ raise ValueError('Invalid detection category ID: {}'.format(k))
96
120
  _ = int(k)
97
- assert isinstance(d['classification_categories'][k],str)
98
-
99
-
100
- ## Image validation
101
-
102
- assert 'images' in d
103
- assert isinstance(d['images'],list)
104
-
105
- # im = d['images'][0]
106
- for im in d['images']:
121
+ if not isinstance(d['detection_categories'][k],str):
122
+ raise ValueError('Invalid detection category name: {}'.format(
123
+ d['detection_categories'][k]))
124
+
125
+ if 'classification_categories' in d:
126
+ for k in d['classification_categories'].keys():
127
+ # Categories should be string-formatted ints
128
+ if not isinstance(k,str):
129
+ raise ValueError('Invalid classification category ID: {}'.format(k))
130
+ _ = int(k)
131
+ if not isinstance(d['classification_categories'][k],str):
132
+ raise ValueError('Invalid classification category name: {}'.format(
133
+ d['classification_categories'][k]))
107
134
 
108
- assert isinstance(im,dict)
109
- assert 'file' in im
110
135
 
111
- file = im['file']
136
+ ## Image validation
112
137
 
113
- if options.check_image_existence:
114
- if options.relative_path_base is None:
115
- file_abs = file
116
- else:
117
- file_abs = os.path.join(options.relative_path_base,file)
118
- assert os.path.isfile(file_abs), 'Cannot find file {}'.format(file_abs)
138
+ if 'images' not in d:
139
+ raise ValueError('images field not present')
140
+ if not isinstance(d['images'],list):
141
+ raise ValueError('Invalid images field')
142
+
143
+ # im = d['images'][0]
144
+ for i_im,im in enumerate(d['images']):
119
145
 
120
- if 'detections' not in im or im['detections'] is None:
121
- assert 'failure' in im and isinstance(im['failure'],str)
122
- else:
123
- assert isinstance(im['detections'],list)
146
+ if not isinstance(im,dict):
147
+ raise ValueError('Invalid image at index {}'.format(i_im))
148
+ if 'file' not in im:
149
+ raise ValueError('Image without filename at index {}'.format(i_im))
124
150
 
125
- if is_video_file(im['file']) and (format_version >= 1.4):
126
- assert 'frame_rate' in im
127
- if 'detections' in im and im['detections'] is not None:
128
- for det in im['detections']:
129
- assert 'frame_number' in det
151
+ file = im['file']
130
152
 
131
- # ...for each image
153
+ if options.check_image_existence:
154
+ if options.relative_path_base is None:
155
+ file_abs = file
156
+ else:
157
+ file_abs = os.path.join(options.relative_path_base,file)
158
+ if not os.path.isfile(file_abs):
159
+ raise ValueError('Cannot find file {}'.format(file_abs))
160
+
161
+ if ('detections' not in im) or (im['detections'] is None):
162
+ if not ('failure' in im and isinstance(im['failure'],str)):
163
+ raise ValueError('Image {} has no detections and no failure'.format(im['file']))
164
+ else:
165
+ if not isinstance(im['detections'],list):
166
+ raise ValueError('Invalid detections list for image {}'.format(im['file']))
167
+
168
+ if is_video_file(im['file']) and (format_version >= 1.4):
169
+ if 'frame_rate' not in im:
170
+ raise ValueError('Video without frame rate: {}'.format(im['file']))
171
+ if 'detections' in im and im['detections'] is not None:
172
+ for det in im['detections']:
173
+ if 'frame_number' not in det:
174
+ raise ValueError('Frame without frame number in video {}'.format(
175
+ im['file']))
176
+
177
+ # ...for each image
178
+
179
+
180
+ ## Checking on other keys
181
+
182
+ for k in d.keys():
183
+ if (k not in typical_keys) and (k not in required_keys):
184
+ validation_results['warnings'].append(
185
+ 'Warning: non-standard key {} present at file level'.format(k))
132
186
 
187
+ except Exception as e:
188
+
189
+ validation_results['errors'].append(str(e))
190
+
191
+ if options.return_data:
192
+ to_return = d
193
+ else:
194
+ to_return = {}
133
195
 
134
- ## Checking on other keys
196
+ to_return['validation_results'] = validation_results
135
197
 
136
- for k in d.keys():
137
- if k not in typical_keys and k not in required_keys:
138
- print('Warning: non-standard key {} present at file level'.format(k))
139
-
198
+ return to_return
199
+
140
200
  # ...def validate_batch_results(...)
141
201
 
142
202
 
@@ -15,10 +15,10 @@ import json
15
15
  # Created by get_lila_category_list.py
16
16
  input_lila_category_list_file = os.path.expanduser('~/lila/lila_categories_list/lila_dataset_to_categories.json')
17
17
 
18
- output_file = os.path.expanduser('~/lila/lila_additions_2024.07.16.csv')
18
+ output_file = os.path.expanduser('~/lila/lila_additions_2024.10.05.csv')
19
19
 
20
20
  datasets_to_map = [
21
- 'Desert Lion Conservation Camera Traps'
21
+ 'Ohio Small Animals'
22
22
  ]
23
23
 
24
24
 
@@ -127,13 +127,18 @@ output_df.to_csv(output_file, index=None, header=True)
127
127
  #%% Manual lookup
128
128
 
129
129
  if False:
130
+
131
+ #%%
132
+
133
+ from megadetector.utils.path_utils import open_file
134
+ open_file(output_file)
130
135
 
131
136
  #%%
132
137
 
133
138
  # q = 'white-throated monkey'
134
139
  # q = 'cingulata'
135
140
  # q = 'notamacropus'
136
- q = 'aves'
141
+ q = 'thamnophis saurita saurita'
137
142
  taxonomy_preference = 'inat'
138
143
  m = get_preferred_taxonomic_match(q,taxonomy_preference)
139
144
  # print(m.scientific_name); import clipboard; clipboard.copy(m.scientific_name)
@@ -68,7 +68,8 @@ if False:
68
68
 
69
69
  #%% Generate the final output file
70
70
 
71
- assert not os.path.isfile(release_taxonomy_file)
71
+ assert not os.path.isfile(release_taxonomy_file), \
72
+ 'File {} exists, delete it manually before proceeding'.format(release_taxonomy_file)
72
73
 
73
74
  known_levels = ['stateofmatter', #noqa
74
75
  'kingdom',
@@ -88,7 +89,7 @@ if False:
88
89
  'genus',
89
90
  'species','subspecies','variety']
90
91
 
91
- levels_to_exclude = ['stateofmatter','zoosection','parvorder']
92
+ levels_to_exclude = ['stateofmatter','zoosection','parvorder','complex']
92
93
 
93
94
  for s in levels_to_exclude:
94
95
  assert s not in levels_to_include
@@ -16,7 +16,7 @@ import os
16
16
  import pandas as pd
17
17
 
18
18
  # lila_taxonomy_file = r"c:\git\agentmorrisprivate\lila-taxonomy\lila-taxonomy-mapping.csv"
19
- lila_taxonomy_file = os.path.expanduser('~/lila/lila_additions_2024.07.16.csv')
19
+ lila_taxonomy_file = os.path.expanduser('~/lila/lila_additions_2024.10.05.csv')
20
20
 
21
21
  preview_base = os.path.expanduser('~/lila/lila_taxonomy_preview')
22
22
  os.makedirs(preview_base,exist_ok=True)
@@ -383,6 +383,8 @@ for i_row,row in df.iterrows():
383
383
 
384
384
  #%% Download sample images for all scientific names
385
385
 
386
+ # Takes ~1 minute per 10 rows
387
+
386
388
  remapped_queries = {'papio':'papio+baboon',
387
389
  'damaliscus lunatus jimela':'damaliscus lunatus',
388
390
  'mazama':'genus+mazama',
@@ -105,6 +105,26 @@ def args_to_object(args, obj):
105
105
  return obj
106
106
 
107
107
 
108
+ def dict_to_object(d, obj):
109
+ """
110
+ Copies all fields from a dict to an object. Skips fields starting with _.
111
+ Does not check existence in the target object.
112
+
113
+ Args:
114
+ d (dict): the dict to convert to an object
115
+ obj (object): object whose whose attributes will be updated
116
+
117
+ Returns:
118
+ object: the modified object (modified in place, but also returned)
119
+ """
120
+
121
+ for k in d.keys():
122
+ if not k.startswith('_'):
123
+ setattr(obj, k, d[k])
124
+
125
+ return obj
126
+
127
+
108
128
  def pretty_print_object(obj, b_print=True):
109
129
  """
110
130
  Converts an arbitrary object to .json, optionally printing the .json representation.
@@ -29,10 +29,6 @@ import subprocess
29
29
  import argparse
30
30
  import inspect
31
31
 
32
- #: IoU threshold used to determine whether boxes in two detection files likely correspond
33
- #: to the same box.
34
- iou_threshold_for_file_comparison = 0.9
35
-
36
32
 
37
33
  #%% Classes
38
34
 
@@ -106,6 +102,10 @@ class MDTestOptions:
106
102
  #: PYTHONPATH to set for CLI tests; if None, inherits from the parent process. Only
107
103
  #: impacts the called functions, not the parent process.
108
104
  self.cli_test_pythonpath = None
105
+
106
+ #: IoU threshold used to determine whether boxes in two detection files likely correspond
107
+ #: to the same box.
108
+ self.iou_threshold_for_file_comparison = 0.85
109
109
 
110
110
  # ...class MDTestOptions()
111
111
 
@@ -177,7 +177,8 @@ def get_expected_results_filename(gpu_is_available,
177
177
 
178
178
  def download_test_data(options=None):
179
179
  """
180
- Downloads the test zipfile if necessary, unzips if necessary.
180
+ Downloads the test zipfile if necessary, unzips if necessary. Initializes
181
+ temporary fields in [options], particularly [options.scratch_dir].
181
182
 
182
183
  Args:
183
184
  options (MDTestOptions, optional): see MDTestOptions for details
@@ -409,7 +410,7 @@ def compare_detection_lists(detections_a,detections_b,options,bidirectional_comp
409
410
  iou = get_iou(det_a['bbox'],b_det['bbox'])
410
411
 
411
412
  # Is this likely the same detection as det_a?
412
- if iou >= iou_threshold_for_file_comparison and iou > highest_iou:
413
+ if iou >= options.iou_threshold_for_file_comparison and iou > highest_iou:
413
414
  matching_det_b = b_det
414
415
  highest_iou = iou
415
416
 
@@ -528,12 +529,14 @@ def compare_results(inference_output_file,expected_results_file,options):
528
529
  if not options.warning_mode:
529
530
 
530
531
  assert max_conf_error <= options.max_conf_error, \
531
- 'Confidence error {} is greater than allowable ({}), on file:\n{}'.format(
532
- max_conf_error,options.max_conf_error,max_conf_error_file)
532
+ 'Confidence error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
533
+ max_conf_error,options.max_conf_error,max_conf_error_file,
534
+ inference_output_file,expected_results_file)
533
535
 
534
536
  assert max_coord_error <= options.max_coord_error, \
535
- 'Coord error {} is greater than allowable ({}), on file:\n{}'.format(
536
- max_coord_error,options.max_coord_error,max_coord_error_file)
537
+ 'Coord error {} is greater than allowable ({}), on file:\n{} ({},{})'.format(
538
+ max_coord_error,options.max_coord_error,max_coord_error_file,
539
+ inference_output_file,expected_results_file)
537
540
 
538
541
  print('Max conf error: {} (file {})'.format(
539
542
  max_conf_error,max_conf_error_file))
@@ -846,7 +849,7 @@ def run_python_tests(options):
846
849
  video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
847
850
  video_options.render_output_video = True
848
851
  # video_options.keep_rendered_frames = False
849
- # video_options.keep_rendered_frames = False
852
+ # video_options.keep_extracted_frames = False
850
853
  video_options.force_extracted_frame_folder_deletion = True
851
854
  video_options.force_rendered_frame_folder_deletion = True
852
855
  # video_options.reuse_results_if_available = False
@@ -886,7 +889,7 @@ def run_python_tests(options):
886
889
  video_options.frame_rendering_folder = os.path.join(options.scratch_dir,'video_scratch/rendered_frame_folder')
887
890
  video_options.render_output_video = False
888
891
  video_options.keep_rendered_frames = False
889
- video_options.keep_rendered_frames = False
892
+ video_options.keep_extracted_frames = False
890
893
  video_options.force_extracted_frame_folder_deletion = True
891
894
  video_options.force_rendered_frame_folder_deletion = True
892
895
  video_options.reuse_results_if_available = False
@@ -1347,11 +1350,13 @@ if False:
1347
1350
  options.force_data_download = False
1348
1351
  options.force_data_unzip = False
1349
1352
  options.warning_mode = False
1350
- options.max_coord_error = 0.001
1351
- options.max_conf_error = 0.005
1352
- options.cli_working_dir = r'c:\git\MegaDetector'
1353
- options.yolo_working_dir = r'c:\git\yolov5-md'
1354
-
1353
+ options.max_coord_error = 0.01 # 0.001
1354
+ options.max_conf_error = 0.01 # 0.005
1355
+ # options.cli_working_dir = r'c:\git\MegaDetector'
1356
+ # options.yolo_working_dir = r'c:\git\yolov5-md'
1357
+ options.cli_working_dir = os.path.expanduser('~')
1358
+ # options.yolo_working_dir = '/mnt/c/git/yolov5-md'
1359
+ options = download_test_data(options)
1355
1360
 
1356
1361
  #%%
1357
1362
 
@@ -1363,6 +1368,47 @@ if False:
1363
1368
 
1364
1369
  run_tests(options)
1365
1370
 
1371
+ #%%
1372
+
1373
+ yolo_inference_options_dict = {'input_folder': '/tmp/md-tests/md-test-images',
1374
+ 'image_filename_list': None,
1375
+ 'model_filename': 'MDV5A',
1376
+ 'output_file': '/tmp/md-tests/folder_inference_output_yolo_val.json',
1377
+ 'yolo_working_folder': '/mnt/c/git/yolov5-md',
1378
+ 'model_type': 'yolov5',
1379
+ 'image_size': None,
1380
+ 'conf_thres': 0.005,
1381
+ 'batch_size': 1,
1382
+ 'device_string': '0',
1383
+ 'augment': False,
1384
+ 'half_precision_enabled': None,
1385
+ 'symlink_folder': None,
1386
+ 'use_symlinks': True,
1387
+ 'unique_id_strategy': 'links',
1388
+ 'yolo_results_folder': None,
1389
+ 'remove_symlink_folder': True,
1390
+ 'remove_yolo_results_folder': True,
1391
+ 'yolo_category_id_to_name': {0: 'animal', 1: 'person', 2: 'vehicle'},
1392
+ 'overwrite_handling': 'overwrite',
1393
+ 'preview_yolo_command_only': False,
1394
+ 'treat_copy_failures_as_warnings': False,
1395
+ 'save_yolo_debug_output': False,
1396
+ 'recursive': True,
1397
+ 'checkpoint_frequency': None}
1398
+
1399
+ from megadetector.utils.ct_utils import dict_to_object
1400
+ from megadetector.detection.run_inference_with_yolov5_val import \
1401
+ YoloInferenceOptions, run_inference_with_yolo_val
1402
+
1403
+ yolo_inference_options = YoloInferenceOptions()
1404
+ yolo_inference_options = dict_to_object(yolo_inference_options_dict, yolo_inference_options)
1405
+
1406
+ os.makedirs(options.scratch_dir,exist_ok=True)
1407
+
1408
+ inference_output_file_yolo_val = os.path.join(options.scratch_dir,'folder_inference_output_yolo_val.json')
1409
+
1410
+ run_inference_with_yolo_val(yolo_inference_options)
1411
+
1366
1412
 
1367
1413
  #%% Command-line driver
1368
1414