megadetector 5.0.26__py3-none-any.whl → 5.0.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (26) hide show
  1. megadetector/data_management/mewc_to_md.py +1 -1
  2. megadetector/data_management/read_exif.py +2 -0
  3. megadetector/detection/process_video.py +1 -1
  4. megadetector/detection/pytorch_detector.py +4 -4
  5. megadetector/detection/run_detector.py +10 -3
  6. megadetector/detection/run_detector_batch.py +4 -3
  7. megadetector/detection/run_tiled_inference.py +65 -13
  8. megadetector/detection/video_utils.py +2 -2
  9. megadetector/postprocessing/classification_postprocessing.py +517 -20
  10. megadetector/postprocessing/create_crop_folder.py +1 -1
  11. megadetector/postprocessing/generate_csv_report.py +499 -0
  12. megadetector/postprocessing/load_api_results.py +4 -4
  13. megadetector/postprocessing/postprocess_batch_results.py +6 -4
  14. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +0 -3
  15. megadetector/taxonomy_mapping/taxonomy_graph.py +1 -1
  16. megadetector/utils/ct_utils.py +3 -2
  17. megadetector/utils/path_utils.py +75 -29
  18. megadetector/utils/split_locations_into_train_val.py +16 -3
  19. megadetector/utils/wi_utils.py +68 -410
  20. megadetector/visualization/visualization_utils.py +25 -9
  21. megadetector/visualization/visualize_detector_output.py +50 -28
  22. {megadetector-5.0.26.dist-info → megadetector-5.0.28.dist-info}/METADATA +132 -132
  23. {megadetector-5.0.26.dist-info → megadetector-5.0.28.dist-info}/RECORD +26 -25
  24. {megadetector-5.0.26.dist-info → megadetector-5.0.28.dist-info}/WHEEL +1 -1
  25. {megadetector-5.0.26.dist-info → megadetector-5.0.28.dist-info}/licenses/LICENSE +0 -0
  26. {megadetector-5.0.26.dist-info → megadetector-5.0.28.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,499 @@
1
+ """
2
+
3
+ generate_csv_report.py
4
+
5
+ Generates a .csv report from a MD-formatted .json file with the following columns:
6
+
7
+ * filename
8
+ * datetime (if images or EXIF information is supplied)
9
+ * detection_category
10
+ * max_detection_confidence
11
+ * classification_category
12
+ * max_classification_confidence
13
+ * count
14
+
15
+ One row is generated per category pair per image. For example, these would be unique rows:
16
+
17
+ image0001.jpg,animal,deer,4
18
+ image0001.jpg,animal,lion,4
19
+ image0001.jpg,animal,[none],4
20
+ image0001.jpg,person,[none],2
21
+
22
+ Images with no above-threshold detections will have a single row:
23
+
24
+ image0001.jpg,empty,[none],-1
25
+
26
+ Images with processing errors will have a single row:
27
+
28
+ image0001.jpg,error,error_string,-1
29
+
30
+ """
31
+
32
+ #%% Constants and imports
33
+
34
+ import os
35
+ import json
36
+ import tempfile
37
+ import uuid
38
+ import pandas as pd
39
+
40
+ from copy import deepcopy
41
+
42
+ from megadetector.utils.wi_utils import load_md_or_speciesnet_file
43
+ from megadetector.utils.ct_utils import get_max_conf
44
+ from megadetector.utils.ct_utils import is_list_sorted
45
+
46
+ from megadetector.detection.run_detector import \
47
+ get_typical_confidence_threshold_from_results
48
+
49
+ from megadetector.data_management.read_exif import \
50
+ read_exif_from_folder, ReadExifOptions, minimal_exif_tags
51
+
52
+ default_classification_threshold = 0.3
53
+ unknown_datetime_tag = ''
54
+
55
+
56
+ #%% Functions
57
+
58
+ def generate_csv_report(md_results_file,
59
+ output_file=None,
60
+ datetime_source=None,
61
+ folder_level_columns=None,
62
+ detection_confidence_threshold=None,
63
+ classification_confidence_threshold=None,
64
+ verbose=True):
65
+ """
66
+ Generates a .csv report from a MD-formatted .json file
67
+
68
+ Args:
69
+ md_results_file (str): MD results .json file for which we should generate a report
70
+ output_file (str, optional): .csv file to write; if this is None, we'll use md_results_file.csv
71
+ datetime_source (str, optional): if datetime information is required, this should point to
72
+ a folder of images, a MD results .json file (can be the same as the input file), or
73
+ an exif_info.json file created with read_exif().
74
+ folder_level_columns (list of int, optional): list of folder levels (where zero is the top-level
75
+ folder in a path name) for which we should create separate columns. Should be zero-indexed ints,
76
+ or a comma-delimited list of zero-indexed int-strings.
77
+ detection_confidence_threshold (float, optional): detections below this confidence threshold will not
78
+ be included in the output data. Defaults to the recommended value based on the .json file.
79
+ classification_confidence_threshold (float, optional): classifications below this confidence threshold will
80
+ not be included in the output data (i.e., detections will be considered "animal").
81
+ verbose (bool, optional): enable debug output, including the progress bar,
82
+ """
83
+
84
+ ##%% Load results file
85
+
86
+ results = load_md_or_speciesnet_file(md_results_file)
87
+
88
+ print('Loaded results for {} images'.format(len(results['images'])))
89
+
90
+ detection_category_id_to_name = results['detection_categories']
91
+ classification_category_id_to_name = None
92
+ if 'classification_categories' in results:
93
+ classification_category_id_to_name = results['classification_categories']
94
+
95
+ if output_file is None:
96
+ output_file = md_results_file + '.csv'
97
+
98
+ ##%% Read datetime information if necessary
99
+
100
+ filename_to_datetime_string = None
101
+
102
+ if datetime_source is not None:
103
+
104
+ all_exif_results = None
105
+
106
+ if os.path.isdir(datetime_source):
107
+
108
+ # Read EXIF info from images
109
+ read_exif_options = ReadExifOptions()
110
+ read_exif_options.tags_to_include = minimal_exif_tags
111
+ read_exif_options.byte_handling = 'delete'
112
+ exif_cache_file = os.path.join(tempfile.gettempdir(),
113
+ 'md-exif-data',
114
+ str(uuid.uuid1())+'.json')
115
+ print('Reading EXIF datetime info from {}, writing to {}'.format(
116
+ datetime_source,exif_cache_file))
117
+ os.makedirs(os.path.dirname(exif_cache_file),exist_ok=True)
118
+
119
+ all_exif_results = read_exif_from_folder(input_folder=datetime_source,
120
+ output_file=exif_cache_file,
121
+ options=read_exif_options,
122
+ recursive=True)
123
+
124
+ else:
125
+ assert os.path.isfile(datetime_source), \
126
+ 'datetime source {} is neither a folder nor a file'.format(datetime_source)
127
+
128
+ # Is this the same file we've already read?
129
+
130
+ # Load this, decide whether it's a MD file or an exif_info file
131
+ with open(datetime_source,'r') as f:
132
+ d = json.load(f)
133
+
134
+ if isinstance(d,list):
135
+ all_exif_results = d
136
+ else:
137
+ assert isinstance(d,dict), 'Unrecognized file format supplied as datetime source'
138
+ assert 'images' in d,\
139
+ 'The datetime source you provided doesn\'t look like a valid source .json file'
140
+ all_exif_results = []
141
+ found_datetime = False
142
+ for im in d['images']:
143
+ exif_result = {'file_name':im['file']}
144
+ if 'datetime' in im:
145
+ found_datetime = True
146
+ exif_result['exif_tags'] = {'DateTimeOriginal':im['datetime']}
147
+ all_exif_results.append(exif_result)
148
+ if not found_datetime:
149
+ print('Warning: a MD results file was supplied as the datetime source, but it does not appear '
150
+ 'to contain datetime information.')
151
+
152
+ assert all_exif_results is not None
153
+
154
+ filename_to_datetime_string = {}
155
+
156
+ for exif_result in all_exif_results:
157
+ datetime_string = unknown_datetime_tag
158
+ if ('exif_tags' in exif_result) and \
159
+ (exif_result['exif_tags'] is not None) and \
160
+ ('DateTimeOriginal' in exif_result['exif_tags']):
161
+ datetime_string = exif_result['exif_tags']['DateTimeOriginal']
162
+ if datetime_string is None:
163
+ datetime_string = ''
164
+ else:
165
+ assert isinstance(datetime_string,str), 'Unrecognized datetime format'
166
+ filename_to_datetime_string[exif_result['file_name']] = datetime_string
167
+
168
+ image_files = [im['file'] for im in results['images']]
169
+ image_files_set = set(image_files)
170
+
171
+ files_in_exif_but_not_in_results = []
172
+ files_in_results_but_not_in_exif = []
173
+ files_with_no_datetime_info = []
174
+
175
+ for fn in filename_to_datetime_string:
176
+ dts = filename_to_datetime_string[fn]
177
+ if (dts is None) or (dts == unknown_datetime_tag) or (len(dts) == 0):
178
+ files_with_no_datetime_info.append(fn)
179
+ if fn not in image_files_set:
180
+ files_in_exif_but_not_in_results.append(fn)
181
+
182
+ for fn in image_files_set:
183
+ if fn not in filename_to_datetime_string:
184
+ files_in_results_but_not_in_exif.append(fn)
185
+
186
+ print('{} files (of {}) in EXIF info not found in MD results'.format(
187
+ len(files_in_exif_but_not_in_results),len(filename_to_datetime_string)
188
+ ))
189
+
190
+ print('{} files (of {}) in MD results not found in MD EXIF info'.format(
191
+ len(files_in_results_but_not_in_exif),len(image_files_set)
192
+ ))
193
+
194
+ print('Failed to read datetime information for {} files (of {}) in EXIF info'.format(
195
+ len(files_with_no_datetime_info),len(filename_to_datetime_string)
196
+ ))
197
+
198
+ # ...if we need to deal with datetimes
199
+
200
+
201
+ ##%% Parse folder level column specifier
202
+
203
+ if folder_level_columns is not None:
204
+ if isinstance(folder_level_columns,str):
205
+ tokens = folder_level_columns.split(',')
206
+ folder_level_columns = [int(s) for s in tokens]
207
+ for folder_level in folder_level_columns:
208
+ if (not isinstance(folder_level,int)) or (folder_level < 0):
209
+ raise ValueError('Illegal folder level specifier {}'.format(
210
+ str(folder_level_columns)))
211
+
212
+
213
+ ##%% Fill in default thresholds
214
+
215
+ if classification_confidence_threshold is None:
216
+ classification_confidence_threshold = default_classification_threshold
217
+ if detection_confidence_threshold is None:
218
+ detection_confidence_threshold = \
219
+ get_typical_confidence_threshold_from_results(results)
220
+
221
+ assert detection_confidence_threshold is not None
222
+
223
+
224
+ ##%% Fill in output records
225
+
226
+ output_records = []
227
+
228
+ # For each image
229
+ #
230
+ # im = results['images'][0]
231
+ for im in results['images']:
232
+
233
+ """
234
+ * filename
235
+ * datetime (if images or EXIF information is supplied)
236
+ * detection_category
237
+ * max_detection_confidence
238
+ * classification_category
239
+ * max_classification_confidence
240
+ * count
241
+ """
242
+
243
+ base_record = {}
244
+
245
+ base_record['filename'] = im['file'].replace('\\','/')
246
+
247
+ # Datetime (if necessary)
248
+ if filename_to_datetime_string is not None:
249
+ if im['file'] in filename_to_datetime_string:
250
+ datetime_string = filename_to_datetime_string[im['file']]
251
+ else:
252
+ datetime_string = ''
253
+ base_record['datetime'] = datetime_string
254
+
255
+ for s in ['detection_category','max_detection_confidence',
256
+ 'classification_category','max_classification_confidence',
257
+ 'count']:
258
+ base_record[s] = ''
259
+
260
+ # Folder level columns
261
+ tokens = im['file'].split('/')
262
+
263
+ for folder_level in folder_level_columns:
264
+ folder_level_column_name = 'folder_level_' + str(folder_level).zfill(2)
265
+ if folder_level >= len(tokens):
266
+ folder_level_value = ''
267
+ else:
268
+ folder_level_value = tokens[folder_level]
269
+ base_record[folder_level_column_name] = folder_level_value
270
+
271
+ records_this_image = []
272
+
273
+ # Create one output row if this image failed
274
+ if 'failure' in im and im['failure'] is not None and len(im['failure']) > 0:
275
+
276
+ record = deepcopy(base_record)
277
+ record['detection_category'] = 'error'
278
+ record['classification_category'] = im['failure']
279
+ records_this_image.append(record)
280
+ assert 'detections' not in im or im['detections'] is None
281
+
282
+ else:
283
+
284
+ assert 'detections' in im and im['detections'] is not None
285
+
286
+ # Count above-threshold detections
287
+ detections_above_threshold = []
288
+ for det in im['detections']:
289
+ if det['conf'] >= detection_confidence_threshold:
290
+ detections_above_threshold.append(det)
291
+ max_detection_conf = get_max_conf(im)
292
+
293
+ # Create one output row if this image is empty (i.e., has no
294
+ # above-threshold detections)
295
+ if len(detections_above_threshold) == 0:
296
+
297
+ record = deepcopy(base_record)
298
+ record['detection_category'] = 'empty'
299
+ record['max_detection_confidence'] = max_detection_conf
300
+ records_this_image.append(record)
301
+
302
+ # ...if this image is empty
303
+
304
+ else:
305
+
306
+ # Maps a string of the form:
307
+ #
308
+ # detection_category:classification_category
309
+ #
310
+ # ...to a dict with fields ['max_detection_conf','max_classification_conf','count']
311
+ category_info_string_to_record = {}
312
+
313
+ for det in detections_above_threshold:
314
+
315
+ assert det['conf'] >= detection_confidence_threshold
316
+
317
+ detection_category_name = detection_category_id_to_name[det['category']]
318
+ detection_confidence = det['conf']
319
+ classification_category_name = ''
320
+ classificaition_confidence = 0.0
321
+
322
+ if ('classifications' in det) and (len(det['classifications']) > 0):
323
+
324
+ # Classifications should always be sorted by confidence. Not
325
+ # technically required, but always true in practice.
326
+ assert is_list_sorted([c[1] for c in det['classifications']]), \
327
+ 'This script does not yet support unsorted classifications'
328
+ assert classification_category_id_to_name is not None, \
329
+ 'If classifications are present, category mappings should be present'
330
+
331
+ # Only use the first classification
332
+ classification = det['classifications'][0]
333
+ if classification[1] >= classification_confidence_threshold:
334
+ classification_category_name = \
335
+ classification_category_id_to_name[classification[0]]
336
+ classification_confidence = classification[1]
337
+
338
+ # ...if classifications are present
339
+
340
+ # E.g. "animal:rodent", or "vehicle:"
341
+ category_info_string = detection_category_name + ':' + classification_category_name
342
+
343
+ if category_info_string not in category_info_string_to_record:
344
+ category_info_string_to_record[category_info_string] = {
345
+ 'max_detection_confidence':0.0,
346
+ 'max_classification_confidence':0.0,
347
+ 'count':0,
348
+ 'detection_category':detection_category_name,
349
+ 'classification_category':classification_category_name
350
+ }
351
+
352
+ record = category_info_string_to_record[category_info_string]
353
+ record['count'] += 1
354
+ if detection_confidence > record['max_detection_confidence']:
355
+ record['max_detection_confidence'] = detection_confidence
356
+ if classification_confidence > record['max_classification_confidence']:
357
+ record['max_classification_confidence'] = classification_confidence
358
+
359
+ # ...for each detection
360
+
361
+ for record_in in category_info_string_to_record.values():
362
+ assert record_in['count'] > 0
363
+ record_out = deepcopy(base_record)
364
+ for k in record_in.keys():
365
+ assert k in record_out.keys()
366
+ record_out[k] = record_in[k]
367
+ records_this_image.append(record_out)
368
+
369
+ # ...is this empty/non-empty?
370
+
371
+ # ...if this image failed/didn't fail
372
+
373
+ # Add to [records]
374
+ output_records.extend(records_this_image)
375
+
376
+ # ...for each image
377
+
378
+ # Make sure every record has the same columns
379
+ column_names = output_records[0].keys()
380
+ for record in output_records:
381
+ assert record.keys() == column_names
382
+
383
+ # Write to .csv
384
+ df = pd.DataFrame(output_records)
385
+ df.to_csv(output_file,header=True,index=False)
386
+
387
+ # from megadetector.utils.path_utils import open_file; open_file(output_file)
388
+
389
+ # ...generate_csv_report(...)
390
+
391
+
392
+ #%% Interactive driver
393
+
394
+ if False:
395
+
396
+ pass
397
+
398
+ #%% Configure options
399
+
400
+ """
401
+ python run_detector_batch.py MDV5A "g:\temp\md-test-images" "g:\temp\md-test-images\md_results_with_datetime.json" --recursive --output_relative_filenames --include_image_timestamp --include_exif_data
402
+ """
403
+
404
+ md_results_file = 'g:/temp/csv-report-test/md-results.json'
405
+ datetime_source = 'g:/temp/csv-report-test/exif_data.json'
406
+
407
+ # datetime_source = 'g:/temp/md-test-images'
408
+ # datetime_source = 'g:/temp/md-test-images/md_results_with_datetime.json'
409
+ # md_results_file = 'g:/temp/md-test-images/md_results_with_datetime.json'
410
+ # md_results_file = 'g:/temp/md-test-images/speciesnet_results_md_format.json'
411
+
412
+ output_file = None
413
+ folder_level_columns = [0,1,2,3]
414
+ detection_confidence_threshold = None
415
+ classification_confidence_threshold = None
416
+ verbose = True
417
+
418
+
419
+ #%% Programmatic execution
420
+
421
+ generate_csv_report(md_results_file=md_results_file,
422
+ output_file=output_file,
423
+ datetime_source=datetime_source,
424
+ folder_level_columns=folder_level_columns,
425
+ detection_confidence_threshold=detection_confidence_threshold,
426
+ classification_confidence_threshold=classification_confidence_threshold,
427
+ verbose=verbose)
428
+
429
+
430
+ #%% Command-line driver
431
+
432
+ import sys,argparse
433
+
434
+ def main():
435
+
436
+ parser = argparse.ArgumentParser(
437
+ description='Generates a .csv report from a MD-formatted .json file')
438
+
439
+ parser.add_argument(
440
+ 'md_results_file',
441
+ type=str,
442
+ help='Path to MD results file (.json)')
443
+
444
+ parser.add_argument(
445
+ '--output_file',
446
+ type=str,
447
+ help='Output filename (.csv) (if omitted, will append .csv to the input file)')
448
+
449
+ parser.add_argument(
450
+ '--datetime_source',
451
+ type=str,
452
+ default=None,
453
+ help='Image folder, exif_info.json file, or MD results file from which we should read datetime information'
454
+ )
455
+
456
+ parser.add_argument(
457
+ '--folder_level_columns',
458
+ type=str,
459
+ default=None,
460
+ help='Comma-separated list of zero-indexed folder levels that should become columns in the output file'
461
+ )
462
+
463
+ parser.add_argument(
464
+ '--detection_confidence_threshold',
465
+ type=float,
466
+ default=None,
467
+ help='Detection threshold (if omitted, chooses a reasonable default based on the .json file)'
468
+ )
469
+
470
+ parser.add_argument(
471
+ '--classification_confidence_threshold',
472
+ type=float,
473
+ default=None,
474
+ help='Classification threshold (default {})'.format(default_classification_threshold)
475
+ )
476
+
477
+ parser.add_argument(
478
+ '--verbose',
479
+ action='store_true',
480
+ help='Enable additional debug output'
481
+ )
482
+
483
+
484
+ if len(sys.argv[1:]) == 0:
485
+ parser.print_help()
486
+ parser.exit()
487
+
488
+ args = parser.parse_args()
489
+
490
+ generate_csv_report(md_results_file=args.md_results_file,
491
+ output_file=args.output_file,
492
+ datetime_source=args.datetime_source,
493
+ folder_level_columns=args.folder_level_columns,
494
+ detection_confidence_threshold=args.detection_confidence_threshold,
495
+ classification_confidence_threshold=args.classification_confidence_threshold,
496
+ verbose=args.verbose)
497
+
498
+ if __name__ == '__main__':
499
+ main()
@@ -4,7 +4,7 @@ load_api_results.py
4
4
 
5
5
  DEPRECATED
6
6
 
7
- As of 2023.12, this module is used in postprocessing and RDE. Not recommended
7
+ As of 2023.12, this module is still used in postprocessing and RDE, but it's not recommended
8
8
  for new code.
9
9
 
10
10
  Loads the output of the batch processing API (json) into a Pandas dataframe.
@@ -23,6 +23,7 @@ from typing import Dict, Mapping, Optional, Tuple
23
23
  import pandas as pd
24
24
 
25
25
  from megadetector.utils import ct_utils
26
+ from megadetector.utils.wi_utils import load_md_or_speciesnet_file
26
27
 
27
28
 
28
29
  #%% Functions for loading .json results into a Pandas DataFrame, and writing back to .json
@@ -50,9 +51,8 @@ def load_api_results(api_output_path: str, normalize_paths: bool = True,
50
51
 
51
52
  print('Loading results from {}'.format(api_output_path))
52
53
 
53
- with open(api_output_path) as f:
54
- detection_results = json.load(f)
55
-
54
+ detection_results = load_md_or_speciesnet_file(api_output_path)
55
+
56
56
  # Validate that this is really a detector output file
57
57
  for s in ['info', 'detection_categories', 'images']:
58
58
  assert s in detection_results, 'Missing field {} in detection results'.format(s)
@@ -48,6 +48,7 @@ from tqdm import tqdm
48
48
  from megadetector.visualization import visualization_utils as vis_utils
49
49
  from megadetector.visualization import plot_utils
50
50
  from megadetector.utils.write_html_image_list import write_html_image_list
51
+ from megadetector.utils.wi_utils import load_md_or_speciesnet_file
51
52
  from megadetector.utils import path_utils
52
53
  from megadetector.utils.ct_utils import args_to_object
53
54
  from megadetector.utils.ct_utils import sets_overlap
@@ -89,7 +90,9 @@ class PostProcessingOptions:
89
90
  ### Options
90
91
 
91
92
  #: Folder where images live (filenames in [md_results_file] should be relative to this folder)
92
- self.image_base_dir = '.'
93
+ #:
94
+ #: Can be '' if [md_results_file] uses absolute paths.
95
+ self.image_base_dir = ''
93
96
 
94
97
  ## These apply only when we're doing ground-truth comparisons
95
98
 
@@ -493,7 +496,7 @@ def _render_bounding_boxes(
493
496
  original_size=original_size,label_map=label_map,
494
497
  thickness=4,expansion=4)
495
498
 
496
- # Preprare per-category confidence thresholds
499
+ # Prepare per-category confidence thresholds
497
500
  if isinstance(options.confidence_threshold,float):
498
501
  rendering_confidence_threshold = options.confidence_threshold
499
502
  else:
@@ -1873,8 +1876,7 @@ def process_batch_results(options):
1873
1876
 
1874
1877
  print('Generating classification category report')
1875
1878
 
1876
- with open(options.md_results_file,'r') as f:
1877
- d = json.load(f)
1879
+ d = load_md_or_speciesnet_file(options.md_results_file)
1878
1880
 
1879
1881
  classification_category_to_count = {}
1880
1882
 
@@ -66,9 +66,6 @@ df = pd.read_csv(lila_taxonomy_file)
66
66
  from megadetector.taxonomy_mapping.species_lookup import \
67
67
  initialize_taxonomy_lookup, get_preferred_taxonomic_match
68
68
 
69
- # from taxonomy_mapping.species_lookup import (
70
- # get_taxonomic_info, print_taxonomy_matche)
71
-
72
69
  initialize_taxonomy_lookup()
73
70
 
74
71
 
@@ -303,7 +303,7 @@ def dag_to_tree(graph: nx.DiGraph,
303
303
  component separately.
304
304
 
305
305
  Args:
306
- graph: nx.DiGraph, DAG representation of taxonomy hieararchy
306
+ graph: nx.DiGraph, DAG representation of taxonomy hierarchy
307
307
  taxon_to_node: dict, maps (taxon_level, taxon_name) to a TaxonNode
308
308
 
309
309
  Returns: nx.DiGraph, a tree-structured graph
@@ -193,7 +193,7 @@ def write_json(path, content, indent=1):
193
193
  indent (int, optional): indentation depth passed to json.dump
194
194
  """
195
195
 
196
- with open(path, 'w') as f:
196
+ with open(path, 'w', newline='\n') as f:
197
197
  json.dump(content, f, indent=indent)
198
198
 
199
199
 
@@ -299,7 +299,8 @@ def _get_max_conf_from_detections(detections):
299
299
  def get_max_conf(im):
300
300
  """
301
301
  Given an image dict in the MD output format, computes the maximum detection confidence for any
302
- class. Returns 0.0 (rather than None) if there was a failure or 'detections' isn't present.
302
+ class. Returns 0.0 if there were no detections, if there was a failure, or if 'detections' isn't
303
+ present.
303
304
 
304
305
  Args:
305
306
  im (dict): image dictionary in the MD output format (with a 'detections' field)