megadetector 5.0.24__py3-none-any.whl → 5.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

@@ -138,6 +138,9 @@ class BatchComparisonOptions:
138
138
  #: List of filenames to include in the comparison, or None to use all files
139
139
  self.filenames_to_include = None
140
140
 
141
+ #: List of category names to include in the comparison, or None to use all categories
142
+ self.category_names_to_include = None
143
+
141
144
  #: Compare only detections/non-detections, ignore categories (still renders categories)
142
145
  self.class_agnostic_comparison = False
143
146
 
@@ -986,7 +989,32 @@ def _pairwise_compare_batch_results(options,output_index,pairwise_options):
986
989
  if invalid_category_error:
987
990
 
988
991
  continue
989
-
992
+
993
+ # Should we be restricting the comparison to only certain categories?
994
+ if options.category_names_to_include is not None:
995
+
996
+ # Just in case the user provided a single category instead of a list
997
+ if isinstance(options.category_names_to_include,str):
998
+ options.category_names_to_include = [options.category_names_to_include]
999
+
1000
+ category_name_to_id_a = invert_dictionary(detection_categories_a)
1001
+ category_name_to_id_b = invert_dictionary(detection_categories_b)
1002
+ category_ids_to_include_a = []
1003
+ category_ids_to_include_b = []
1004
+
1005
+ for category_name in options.category_names_to_include:
1006
+ if category_name in category_name_to_id_a:
1007
+ category_ids_to_include_a.append(category_name_to_id_a[category_name])
1008
+ if category_name in category_name_to_id_b:
1009
+ category_ids_to_include_b.append(category_name_to_id_b[category_name])
1010
+
1011
+ # Restrict the categories we treat as above-threshold to the set we're supposed
1012
+ # to be using
1013
+ categories_above_threshold_a = [category_id for category_id in categories_above_threshold_a if \
1014
+ category_id in category_ids_to_include_a]
1015
+ categories_above_threshold_b = [category_id for category_id in categories_above_threshold_b if \
1016
+ category_id in category_ids_to_include_b]
1017
+
990
1018
  detection_a = (len(categories_above_threshold_a) > 0)
991
1019
  detection_b = (len(categories_above_threshold_b) > 0)
992
1020
 
@@ -1609,7 +1637,72 @@ def n_way_comparison(filenames,
1609
1637
  # ...def n_way_comparison(...)
1610
1638
 
1611
1639
 
1612
- def find_equivalent_threshold(results_a,results_b,threshold_a=0.2):
1640
+ def find_image_level_detections_above_threshold(results,threshold=0.2,category_names=None):
1641
+ """
1642
+ Returns images in the set of MD results [results] with detections above
1643
+ a threshold confidence level, optionally only counting certain categories.
1644
+
1645
+ Args:
1646
+ results (str or dict): the set of results, either a .json filename or a results
1647
+ dict
1648
+ threshold (float, optional): the threshold used to determine the target number of
1649
+ detections in [results]
1650
+ category_names (list or str, optional): the list of category names to consider (defaults
1651
+ to using all categories), or the name of a single category.
1652
+
1653
+ Returns:
1654
+ list: the images with above-threshold detections
1655
+ """
1656
+ if isinstance(results,str):
1657
+ with open(results,'r') as f:
1658
+ results = json.load(f)
1659
+
1660
+ category_ids_to_consider = None
1661
+
1662
+ if category_names is not None:
1663
+
1664
+ if isinstance(category_names,str):
1665
+ category_names = [category_names]
1666
+
1667
+ category_id_to_name = results['detection_categories']
1668
+ category_name_to_id = invert_dictionary(category_id_to_name)
1669
+
1670
+ category_ids_to_consider = []
1671
+
1672
+ # category_name = category_names[0]
1673
+ for category_name in category_names:
1674
+ category_id = category_name_to_id[category_name]
1675
+ category_ids_to_consider.append(category_id)
1676
+
1677
+ assert len(category_ids_to_consider) > 0, \
1678
+ 'Category name list did not map to any category IDs'
1679
+
1680
+ images_above_threshold = []
1681
+
1682
+ for im in results['images']:
1683
+
1684
+ if ('detections' in im) and (im['detections'] is not None) and (len(im['detections']) > 0):
1685
+ confidence_values_this_image = [0]
1686
+ for det in im['detections']:
1687
+ if category_ids_to_consider is not None:
1688
+ if det['category'] not in category_ids_to_consider:
1689
+ continue
1690
+ confidence_values_this_image.append(det['conf'])
1691
+ if max(confidence_values_this_image) >= threshold:
1692
+ images_above_threshold.append(im)
1693
+
1694
+ # ...for each image
1695
+
1696
+ return images_above_threshold
1697
+
1698
+ # ...def find_image_level_detections_above_threshold(...)
1699
+
1700
+
1701
+ def find_equivalent_threshold(results_a,
1702
+ results_b,
1703
+ threshold_a=0.2,
1704
+ category_names=None,
1705
+ verbose=False):
1613
1706
  """
1614
1707
  Given two sets of detector results, finds the confidence threshold for results_b
1615
1708
  that produces the same fraction of *images* with detections as threshold_a does for
@@ -1622,6 +1715,9 @@ def find_equivalent_threshold(results_a,results_b,threshold_a=0.2):
1622
1715
  dict
1623
1716
  threshold_a (float, optional): the threshold used to determine the target number of
1624
1717
  detections in results_a
1718
+ category_names (list or str, optional): the list of category names to consider (defaults
1719
+ to using all categories), or the name of a single category.
1720
+ verbose (bool, optional): enable additional debug output
1625
1721
 
1626
1722
  Returns:
1627
1723
  float: the threshold that - when applied to results_b - produces the same number
@@ -1629,35 +1725,106 @@ def find_equivalent_threshold(results_a,results_b,threshold_a=0.2):
1629
1725
  """
1630
1726
 
1631
1727
  if isinstance(results_a,str):
1728
+ if verbose:
1729
+ print('Loading results from {}'.format(results_a))
1632
1730
  with open(results_a,'r') as f:
1633
1731
  results_a = json.load(f)
1634
1732
 
1635
1733
  if isinstance(results_b,str):
1734
+ if verbose:
1735
+ print('Loading results from {}'.format(results_b))
1636
1736
  with open(results_b,'r') as f:
1637
1737
  results_b = json.load(f)
1738
+
1739
+ category_ids_to_consider_a = None
1740
+ category_ids_to_consider_b = None
1741
+
1742
+ if category_names is not None:
1743
+
1744
+ if isinstance(category_names,str):
1745
+ category_names = [category_names]
1746
+
1747
+ categories_a = results_a['detection_categories']
1748
+ categories_b = results_b['detection_categories']
1749
+ category_name_to_id_a = invert_dictionary(categories_a)
1750
+ category_name_to_id_b = invert_dictionary(categories_b)
1751
+
1752
+ category_ids_to_consider_a = []
1753
+ category_ids_to_consider_b = []
1754
+
1755
+ # category_name = category_names[0]
1756
+ for category_name in category_names:
1757
+ category_id_a = category_name_to_id_a[category_name]
1758
+ category_id_b = category_name_to_id_b[category_name]
1759
+ category_ids_to_consider_a.append(category_id_a)
1760
+ category_ids_to_consider_b.append(category_id_b)
1638
1761
 
1639
- def get_confidence_values_for_results(images):
1762
+ assert len(category_ids_to_consider_a) > 0 and len(category_ids_to_consider_b) > 0, \
1763
+ 'Category name list did not map to any category IDs in one or both detection sets'
1764
+
1765
+ def _get_confidence_values_for_results(images,category_ids_to_consider,threshold):
1766
+ """
1767
+ Return a list of the maximum confidence value for each image in [images].
1768
+ Returns zero confidence for images with no detections (or no detections
1769
+ in the specified categories). Does not return anything for invalid images.
1770
+ """
1771
+
1640
1772
  confidence_values = []
1773
+ images_above_threshold = []
1774
+
1641
1775
  for im in images:
1642
1776
  if 'detections' in im and im['detections'] is not None:
1643
1777
  if len(im['detections']) == 0:
1644
1778
  confidence_values.append(0)
1645
1779
  else:
1646
- confidence_values_this_image = [det['conf'] for det in im['detections']]
1647
- confidence_values.append(max(confidence_values_this_image))
1648
- return confidence_values
1780
+ confidence_values_this_image = []
1781
+ for det in im['detections']:
1782
+ if category_ids_to_consider is not None:
1783
+ if det['category'] not in category_ids_to_consider:
1784
+ continue
1785
+ confidence_values_this_image.append(det['conf'])
1786
+ if len(confidence_values_this_image) == 0:
1787
+ confidence_values.append(0)
1788
+ else:
1789
+ max_conf_value = max(confidence_values_this_image)
1790
+
1791
+ if threshold is not None and max_conf_value >= threshold:
1792
+ images_above_threshold.append(im)
1793
+ confidence_values.append(max_conf_value)
1794
+ # ...for each image
1795
+
1796
+ return confidence_values, images_above_threshold
1649
1797
 
1650
- confidence_values_a = get_confidence_values_for_results(results_a['images'])
1798
+ confidence_values_a,images_above_threshold_a = \
1799
+ _get_confidence_values_for_results(results_a['images'],
1800
+ category_ids_to_consider_a,
1801
+ threshold_a)
1802
+
1803
+ # ...def _get_confidence_values_for_results(...)
1804
+
1805
+ if verbose:
1806
+ print('For result set A, considering {} of {} images'.format(
1807
+ len(confidence_values_a),len(results_a['images'])))
1651
1808
  confidence_values_a_above_threshold = [c for c in confidence_values_a if c >= threshold_a]
1652
1809
 
1653
- confidence_values_b = get_confidence_values_for_results(results_b['images'])
1654
- confidence_values_b = sorted(confidence_values_b)
1810
+ confidence_values_b,_ = _get_confidence_values_for_results(results_b['images'],
1811
+ category_ids_to_consider_b,
1812
+ threshold=None)
1813
+ if verbose:
1814
+ print('For result set B, considering {} of {} images'.format(
1815
+ len(confidence_values_b),len(results_b['images'])))
1816
+ confidence_values_b = sorted(confidence_values_b)
1655
1817
 
1656
1818
  target_detection_fraction = len(confidence_values_a_above_threshold) / len(confidence_values_a)
1657
1819
 
1658
1820
  detection_cutoff_index = round((1.0-target_detection_fraction) * len(confidence_values_b))
1659
1821
  threshold_b = confidence_values_b[detection_cutoff_index]
1660
1822
 
1823
+ if verbose:
1824
+ print('{} confidence values above threshold (A)'.format(len(confidence_values_a_above_threshold)))
1825
+ confidence_values_b_above_threshold = [c for c in confidence_values_b if c >= threshold_b]
1826
+ print('{} confidence values above threshold (B)'.format(len(confidence_values_b_above_threshold)))
1827
+
1661
1828
  return threshold_b
1662
1829
 
1663
1830
  # ...def find_equivalent_threshold(...)
@@ -0,0 +1,358 @@
1
+ """
2
+
3
+ create_crop_folder.py
4
+
5
+ Given a MegaDetector .json file and a folder of images, creates a new folder
6
+ of images representing all above-threshold crops from the original folder.
7
+
8
+ """
9
+
10
+ #%% Constants and imports
11
+
12
+ import os
13
+ import json
14
+ from tqdm import tqdm
15
+
16
+ from multiprocessing.pool import Pool, ThreadPool
17
+ from collections import defaultdict
18
+ from functools import partial
19
+
20
+ from megadetector.utils.path_utils import insert_before_extension
21
+ from megadetector.visualization.visualization_utils import crop_image
22
+ from megadetector.visualization.visualization_utils import exif_preserving_save
23
+
24
+
25
+ #%% Support classes
26
+
27
+ class CreateCropFolderOptions:
28
+ """
29
+ Options used to parameterize create_crop_folder().
30
+ """
31
+
32
+ def __init__(self):
33
+
34
+ #: Confidence threshold determining which detections get written
35
+ self.confidence_threshold = 0.1
36
+
37
+ #: Number of pixels to expand each crop
38
+ self.expansion = 0
39
+
40
+ #: JPEG quality to use for saving crops (None for default)
41
+ self.quality = 95
42
+
43
+ #: Whether to overwrite existing images
44
+ self.overwrite = True
45
+
46
+ #: Number of concurrent workers
47
+ self.n_workers = 8
48
+
49
+ #: Whether to use processes ('process') or threads ('thread') for parallelization
50
+ self.pool_type = 'thread'
51
+
52
+
53
+ #%% Support functions
54
+
55
+ def _get_crop_filename(image_fn,crop_id):
56
+ """
57
+ Generate crop filenames in a consistent way.
58
+ """
59
+ if isinstance(crop_id,int):
60
+ crop_id = str(crop_id).zfill(3)
61
+ assert isinstance(crop_id,str)
62
+ return insert_before_extension(image_fn,'crop_' + crop_id)
63
+
64
+
65
+ def _generate_crops_for_single_image(crops_this_image,
66
+ input_folder,
67
+ output_folder,
68
+ options):
69
+ """
70
+ Generate all the crops required for a single image.
71
+ """
72
+ if len(crops_this_image) == 0:
73
+ return
74
+
75
+ image_fn_relative = crops_this_image[0]['image_fn_relative']
76
+ input_fn_abs = os.path.join(input_folder,image_fn_relative)
77
+ assert os.path.isfile(input_fn_abs)
78
+
79
+ detections_to_crop = [c['detection'] for c in crops_this_image]
80
+
81
+ cropped_images = crop_image(detections_to_crop,
82
+ input_fn_abs,
83
+ confidence_threshold=0,
84
+ expansion=options.expansion)
85
+
86
+ assert len(cropped_images) == len(crops_this_image)
87
+
88
+ # i_crop = 0; crop_info = crops_this_image[0]
89
+ for i_crop,crop_info in enumerate(crops_this_image):
90
+
91
+ assert crop_info['image_fn_relative'] == image_fn_relative
92
+ crop_filename_relative = _get_crop_filename(image_fn_relative, crop_info['crop_id'])
93
+ crop_filename_abs = os.path.join(output_folder,crop_filename_relative).replace('\\','/')
94
+
95
+ if os.path.isfile(crop_filename_abs) and not options.overwrite:
96
+ continue
97
+
98
+ cropped_image = cropped_images[i_crop]
99
+ os.makedirs(os.path.dirname(crop_filename_abs),exist_ok=True)
100
+ exif_preserving_save(cropped_image,crop_filename_abs,quality=options.quality)
101
+
102
+ # ...for each crop
103
+
104
+
105
+ #%% Main function
106
+
107
+ def crop_results_to_image_results(image_results_file_with_crop_ids,
108
+ crop_results_file,
109
+ output_file):
110
+ """
111
+ This function is intended to be run after you have:
112
+
113
+ 1. Run MegaDetector on a folder
114
+ 2. Generated a crop folder using create_crop_folder
115
+ 3. Run a species classifier on those crops
116
+
117
+ This function will take the crop-level results and transform them back
118
+ to the original images.
119
+
120
+ Args:
121
+ image_results_file_with_crop_ids (str): results file for the original images,
122
+ containing crop IDs, likely generated via create_crop_folder.
123
+ crop_results_file (str): results file for the crop folder
124
+ output_file (str): ouptut .json file, containing crop-level classifications
125
+ mapped back to the image level
126
+ """
127
+
128
+ ##%% Validate inputs
129
+
130
+ assert os.path.isfile(image_results_file_with_crop_ids), \
131
+ 'Could not find image-level input file {}'.format(image_results_file_with_crop_ids)
132
+ assert os.path.isfile(crop_results_file), \
133
+ 'Could not find crop results file {}'.format(crop_results_file)
134
+ os.makedirs(os.path.dirname(output_file),exist_ok=True)
135
+
136
+
137
+ ##%% Read input files
138
+
139
+ with open(image_results_file_with_crop_ids,'r') as f:
140
+ image_results_with_crop_ids = json.load(f)
141
+ with open(crop_results_file,'r') as f:
142
+ crop_results = json.load(f)
143
+
144
+ assert crop_results['detection_categories'] == \
145
+ image_results_with_crop_ids['detection_categories'], \
146
+ 'Crop results and image-level results use different detection categories'
147
+
148
+ crop_filename_to_results = {}
149
+
150
+ # im = crop_results['images'][0]
151
+ for im in crop_results['images']:
152
+ crop_filename_to_results[im['file']] = im
153
+
154
+ image_results_with_crop_ids['classification_categories'] = \
155
+ crop_results['classification_categories']
156
+
157
+
158
+ ##%% Read classifications from crop results
159
+
160
+ # im = image_results_with_crop_ids['images'][0]
161
+ for im in tqdm(image_results_with_crop_ids['images']):
162
+
163
+ if 'detections' not in im or im['detections'] is None:
164
+ continue
165
+
166
+ for det in im['detections']:
167
+
168
+ if 'classifications' in det:
169
+ del det['classifications']
170
+
171
+ if 'crop_id' in det:
172
+ crop_filename_relative = det['crop_filename_relative']
173
+ assert crop_filename_relative in crop_filename_to_results, \
174
+ 'Crop lookup error'
175
+ crop_results_this_detection = crop_filename_to_results[crop_filename_relative]
176
+ assert crop_results_this_detection['file'] == crop_filename_relative
177
+ assert len(crop_results_this_detection['detections']) == 1
178
+ assert crop_results_this_detection['detections'][0]['conf'] == det['conf']
179
+ assert crop_results_this_detection['detections'][0]['category'] == det['category']
180
+ assert crop_results_this_detection['detections'][0]['bbox'] == [0,0,1,1]
181
+ det['classifications'] = crop_results_this_detection['detections'][0]['classifications']
182
+
183
+ # ...for each detection
184
+
185
+ # ...for each image
186
+
187
+
188
+ ##%% Write output file
189
+
190
+ with open(output_file,'w') as f:
191
+ json.dump(image_results_with_crop_ids,f,indent=1)
192
+
193
+ # ...def crop_results_to_image_results(...)
194
+
195
+
196
+ def create_crop_folder(input_file,
197
+ input_folder,
198
+ output_folder,
199
+ output_file=None,
200
+ crops_output_file=None,
201
+ options=None):
202
+ """
203
+ Given a MegaDetector .json file and a folder of images, creates a new folder
204
+ of images representing all above-threshold crops from the original folder.
205
+
206
+ Optionally writes a new .json file that attaches unique IDs to each detection.
207
+
208
+ Args:
209
+ input_file (str): MD-formatted .json file to process
210
+ input_folder (str): Input image folder
211
+ output_folder (str): Output (cropped) image folder
212
+ output_file (str, optional): new .json file that attaches unique IDs to each detection.
213
+ crops_output_file (str, optional): new .json file that includes whole-image detections
214
+ for each of the crops, using confidence values from the original results
215
+ options (CreateCropFolderOptions, optional): crop parameters
216
+ """
217
+
218
+ ## Validate options, prepare output folders
219
+
220
+ if options is None:
221
+ options = CreateCropFolderOptions()
222
+
223
+ assert os.path.isfile(input_file), 'Input file {} not found'.format(input_file)
224
+ assert os.path.isdir(input_folder), 'Input folder {} not found'.format(input_folder)
225
+ os.makedirs(output_folder,exist_ok=True)
226
+ os.makedirs(os.path.dirname(output_file),exist_ok=True)
227
+
228
+
229
+ ##%% Read input
230
+
231
+ with open(input_file,'r') as f:
232
+ detection_results = json.load(f)
233
+
234
+
235
+ ##%% Make a list crops that we need to create
236
+
237
+ # Maps input images to list of dicts, with keys 'crop_id','detection'
238
+ image_fn_relative_to_crops = defaultdict(list)
239
+ n_crops = 0
240
+
241
+ # im = detection_results['images'][0]
242
+ for i_image,im in enumerate(detection_results['images']):
243
+
244
+ if 'detections' not in im or im['detections'] is None or len(im['detections']) == 0:
245
+ continue
246
+
247
+ detections_this_image = im['detections']
248
+
249
+ image_fn_relative = im['file']
250
+
251
+ for i_detection,det in enumerate(detections_this_image):
252
+
253
+ if det['conf'] > options.confidence_threshold:
254
+
255
+ det['crop_id'] = i_detection
256
+
257
+ crop_info = {'image_fn_relative':image_fn_relative,
258
+ 'crop_id':i_detection,
259
+ 'detection':det}
260
+
261
+ crop_filename_relative = _get_crop_filename(image_fn_relative,
262
+ crop_info['crop_id'])
263
+ det['crop_filename_relative'] = crop_filename_relative
264
+
265
+ image_fn_relative_to_crops[image_fn_relative].append(crop_info)
266
+ n_crops += 1
267
+
268
+ # ...for each input image
269
+
270
+ print('Prepared a list of {} crops from {} of {} input images'.format(
271
+ n_crops,len(image_fn_relative_to_crops),len(detection_results['images'])))
272
+
273
+
274
+ ##%% Generate crops
275
+
276
+ if options.n_workers <= 1:
277
+
278
+ # image_fn_relative = next(iter(image_fn_relative_to_crops))
279
+ for image_fn_relative in tqdm(image_fn_relative_to_crops.keys()):
280
+ crops_this_image = image_fn_relative_to_crops[image_fn_relative]
281
+ _generate_crops_for_single_image(crops_this_image=crops_this_image,
282
+ input_folder=input_folder,
283
+ output_folder=output_folder,
284
+ options=options)
285
+
286
+ else:
287
+
288
+ print('Creating a {} pool with {} workers'.format(options.pool_type,options.n_workers))
289
+
290
+ if options.pool_type == 'thread':
291
+ pool = ThreadPool(options.n_workers)
292
+ else:
293
+ assert options.pool_type == 'process'
294
+ pool = Pool(options.n_workers)
295
+
296
+ # Each element in this list is the list of crops for a single image
297
+ crop_lists = list(image_fn_relative_to_crops.values())
298
+
299
+ with tqdm(total=len(image_fn_relative_to_crops)) as pbar:
300
+ for i,_ in enumerate(pool.imap_unordered(partial(
301
+ _generate_crops_for_single_image,
302
+ input_folder=input_folder,
303
+ output_folder=output_folder,
304
+ options=options),
305
+ crop_lists)):
306
+ pbar.update()
307
+
308
+ # ...if we're using parallel processing
309
+
310
+ ##%% Write output file
311
+
312
+ if output_file is not None:
313
+ with open(output_file,'w') as f:
314
+ json.dump(detection_results,f,indent=1)
315
+
316
+ if crops_output_file is not None:
317
+
318
+ original_images = detection_results['images']
319
+
320
+ detection_results_cropped = detection_results
321
+ detection_results_cropped['images'] = []
322
+
323
+ # im = original_images[0]
324
+ for im in original_images:
325
+
326
+ if 'detections' not in im or im['detections'] is None or len(im['detections']) == 0:
327
+ continue
328
+
329
+ detections_this_image = im['detections']
330
+ image_fn_relative = im['file']
331
+
332
+ for i_detection,det in enumerate(detections_this_image):
333
+
334
+ if 'crop_id' in det:
335
+ im_out = {}
336
+ im_out['file'] = det['crop_filename_relative']
337
+ det_out = {}
338
+ det_out['category'] = det['category']
339
+ det_out['conf'] = det['conf']
340
+ det_out['bbox'] = [0, 0, 1, 1]
341
+ im_out['detections'] = [det_out]
342
+ detection_results_cropped['images'].append(im_out)
343
+
344
+ # ...if we need to include this crop in the new .json file
345
+
346
+ # ...for each crop
347
+
348
+ # ...for each original image
349
+
350
+ with open(crops_output_file,'w') as f:
351
+ json.dump(detection_results_cropped,f,indent=1)
352
+
353
+ # ...def create_crop_folder()
354
+
355
+
356
+ #%% Command-line driver
357
+
358
+ # TODO
@@ -1722,7 +1722,7 @@ def process_batch_results(options):
1722
1722
  <p>Model version: {}</p>
1723
1723
  </div>
1724
1724
 
1725
- <h3>Sample images</h3>\n
1725
+ <h3>Detection results</h3>\n
1726
1726
  <div class="contentdiv">\n""".format(
1727
1727
  style_header, job_name_string, image_count, len(detections_df), confidence_threshold_string,
1728
1728
  almost_detection_string, model_version_string)
@@ -1784,7 +1784,7 @@ def process_batch_results(options):
1784
1784
 
1785
1785
  if has_classification_info:
1786
1786
 
1787
- index_page += '<h3>Images of detected classes</h3>'
1787
+ index_page += '<h3>Species classification results</h3>'
1788
1788
  index_page += '<p>The same image might appear under multiple classes ' + \
1789
1789
  'if multiple species were detected.</p>\n'
1790
1790
  index_page += '<p>Classifications with confidence less than {:.1%} confidence are considered "unreliable".</p>\n'.format(