megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (176) hide show
  1. megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
  2. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
  3. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  7. megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
  8. megadetector/classification/aggregate_classifier_probs.py +3 -3
  9. megadetector/classification/analyze_failed_images.py +5 -5
  10. megadetector/classification/cache_batchapi_outputs.py +5 -5
  11. megadetector/classification/create_classification_dataset.py +11 -12
  12. megadetector/classification/crop_detections.py +10 -10
  13. megadetector/classification/csv_to_json.py +8 -8
  14. megadetector/classification/detect_and_crop.py +13 -15
  15. megadetector/classification/evaluate_model.py +7 -7
  16. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  17. megadetector/classification/json_to_azcopy_list.py +1 -1
  18. megadetector/classification/json_validator.py +29 -32
  19. megadetector/classification/map_classification_categories.py +9 -9
  20. megadetector/classification/merge_classification_detection_output.py +12 -9
  21. megadetector/classification/prepare_classification_script.py +19 -19
  22. megadetector/classification/prepare_classification_script_mc.py +23 -23
  23. megadetector/classification/run_classifier.py +4 -4
  24. megadetector/classification/save_mislabeled.py +6 -6
  25. megadetector/classification/train_classifier.py +1 -1
  26. megadetector/classification/train_classifier_tf.py +9 -9
  27. megadetector/classification/train_utils.py +10 -10
  28. megadetector/data_management/annotations/annotation_constants.py +1 -1
  29. megadetector/data_management/camtrap_dp_to_coco.py +45 -45
  30. megadetector/data_management/cct_json_utils.py +101 -101
  31. megadetector/data_management/cct_to_md.py +49 -49
  32. megadetector/data_management/cct_to_wi.py +33 -33
  33. megadetector/data_management/coco_to_labelme.py +75 -75
  34. megadetector/data_management/coco_to_yolo.py +189 -189
  35. megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
  36. megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
  37. megadetector/data_management/databases/integrity_check_json_db.py +202 -188
  38. megadetector/data_management/databases/subset_json_db.py +33 -33
  39. megadetector/data_management/generate_crops_from_cct.py +38 -38
  40. megadetector/data_management/get_image_sizes.py +54 -49
  41. megadetector/data_management/labelme_to_coco.py +130 -124
  42. megadetector/data_management/labelme_to_yolo.py +78 -72
  43. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  44. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  45. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  46. megadetector/data_management/lila/download_lila_subset.py +21 -24
  47. megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
  48. megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
  49. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  50. megadetector/data_management/lila/lila_common.py +70 -70
  51. megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
  52. megadetector/data_management/mewc_to_md.py +339 -340
  53. megadetector/data_management/ocr_tools.py +258 -252
  54. megadetector/data_management/read_exif.py +232 -223
  55. megadetector/data_management/remap_coco_categories.py +26 -26
  56. megadetector/data_management/remove_exif.py +31 -20
  57. megadetector/data_management/rename_images.py +187 -187
  58. megadetector/data_management/resize_coco_dataset.py +41 -41
  59. megadetector/data_management/speciesnet_to_md.py +41 -41
  60. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  61. megadetector/data_management/yolo_output_to_md_output.py +117 -120
  62. megadetector/data_management/yolo_to_coco.py +195 -188
  63. megadetector/detection/change_detection.py +831 -0
  64. megadetector/detection/process_video.py +341 -338
  65. megadetector/detection/pytorch_detector.py +308 -266
  66. megadetector/detection/run_detector.py +186 -166
  67. megadetector/detection/run_detector_batch.py +366 -364
  68. megadetector/detection/run_inference_with_yolov5_val.py +328 -325
  69. megadetector/detection/run_tiled_inference.py +312 -253
  70. megadetector/detection/tf_detector.py +24 -24
  71. megadetector/detection/video_utils.py +291 -283
  72. megadetector/postprocessing/add_max_conf.py +15 -11
  73. megadetector/postprocessing/categorize_detections_by_size.py +44 -44
  74. megadetector/postprocessing/classification_postprocessing.py +808 -311
  75. megadetector/postprocessing/combine_batch_outputs.py +20 -21
  76. megadetector/postprocessing/compare_batch_results.py +528 -517
  77. megadetector/postprocessing/convert_output_format.py +97 -97
  78. megadetector/postprocessing/create_crop_folder.py +220 -147
  79. megadetector/postprocessing/detector_calibration.py +173 -168
  80. megadetector/postprocessing/generate_csv_report.py +508 -0
  81. megadetector/postprocessing/load_api_results.py +25 -22
  82. megadetector/postprocessing/md_to_coco.py +129 -98
  83. megadetector/postprocessing/md_to_labelme.py +89 -83
  84. megadetector/postprocessing/md_to_wi.py +40 -40
  85. megadetector/postprocessing/merge_detections.py +87 -114
  86. megadetector/postprocessing/postprocess_batch_results.py +319 -302
  87. megadetector/postprocessing/remap_detection_categories.py +36 -36
  88. megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
  89. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  90. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  91. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
  92. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  93. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  94. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  95. megadetector/postprocessing/validate_batch_results.py +70 -70
  96. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  97. megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
  98. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
  99. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
  100. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  101. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  102. megadetector/taxonomy_mapping/species_lookup.py +33 -33
  103. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  104. megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
  105. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  106. megadetector/utils/azure_utils.py +22 -22
  107. megadetector/utils/ct_utils.py +1019 -200
  108. megadetector/utils/directory_listing.py +21 -77
  109. megadetector/utils/gpu_test.py +22 -22
  110. megadetector/utils/md_tests.py +541 -518
  111. megadetector/utils/path_utils.py +1511 -406
  112. megadetector/utils/process_utils.py +41 -41
  113. megadetector/utils/sas_blob_utils.py +53 -49
  114. megadetector/utils/split_locations_into_train_val.py +73 -60
  115. megadetector/utils/string_utils.py +147 -26
  116. megadetector/utils/url_utils.py +463 -173
  117. megadetector/utils/wi_utils.py +2629 -2868
  118. megadetector/utils/write_html_image_list.py +137 -137
  119. megadetector/visualization/plot_utils.py +21 -21
  120. megadetector/visualization/render_images_with_thumbnails.py +37 -73
  121. megadetector/visualization/visualization_utils.py +424 -404
  122. megadetector/visualization/visualize_db.py +197 -190
  123. megadetector/visualization/visualize_detector_output.py +126 -98
  124. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
  125. megadetector-5.0.29.dist-info/RECORD +163 -0
  126. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
  127. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  128. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  129. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  130. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  131. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  132. megadetector/data_management/importers/awc_to_json.py +0 -191
  133. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  134. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  135. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  136. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  137. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  138. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  139. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  140. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  141. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  142. megadetector/data_management/importers/ena24_to_json.py +0 -276
  143. megadetector/data_management/importers/filenames_to_json.py +0 -386
  144. megadetector/data_management/importers/helena_to_cct.py +0 -283
  145. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  146. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  147. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  148. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  149. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  150. megadetector/data_management/importers/missouri_to_json.py +0 -490
  151. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  152. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  153. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  154. megadetector/data_management/importers/pc_to_json.py +0 -365
  155. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  156. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  157. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  158. megadetector/data_management/importers/rspb_to_json.py +0 -356
  159. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  160. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  161. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  162. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  163. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  164. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  165. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  166. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  167. megadetector/data_management/importers/ubc_to_json.py +0 -399
  168. megadetector/data_management/importers/umn_to_json.py +0 -507
  169. megadetector/data_management/importers/wellington_to_json.py +0 -263
  170. megadetector/data_management/importers/wi_to_json.py +0 -442
  171. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  172. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  173. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  174. megadetector-5.0.27.dist-info/RECORD +0 -208
  175. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
  176. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,7 @@ Given a .json or .csv file containing MD results, do one or more of the followin
12
12
  truth)
13
13
 
14
14
  Ground truth, if available, must be in COCO Camera Traps format:
15
-
15
+
16
16
  https://github.com/agentmorris/MegaDetector/blob/main/megadetector/data_management/README.md#coco-camera-traps-format
17
17
 
18
18
  """
@@ -30,7 +30,6 @@ import time
30
30
  import uuid
31
31
  import warnings
32
32
  import random
33
- import json
34
33
 
35
34
  from enum import IntEnum
36
35
  from multiprocessing.pool import ThreadPool
@@ -48,6 +47,7 @@ from tqdm import tqdm
48
47
  from megadetector.visualization import visualization_utils as vis_utils
49
48
  from megadetector.visualization import plot_utils
50
49
  from megadetector.utils.write_html_image_list import write_html_image_list
50
+ from megadetector.utils.wi_utils import load_md_or_speciesnet_file
51
51
  from megadetector.utils import path_utils
52
52
  from megadetector.utils.ct_utils import args_to_object
53
53
  from megadetector.utils.ct_utils import sets_overlap
@@ -75,38 +75,40 @@ class PostProcessingOptions:
75
75
  """
76
76
  Options used to parameterize process_batch_results().
77
77
  """
78
-
78
+
79
79
  def __init__(self):
80
-
80
+
81
81
  ### Required inputs
82
-
82
+
83
83
  #: MD results .json file to process
84
84
  self.md_results_file = ''
85
-
85
+
86
86
  #: Folder to which we should write HTML output
87
87
  self.output_dir = ''
88
-
88
+
89
89
  ### Options
90
-
90
+
91
91
  #: Folder where images live (filenames in [md_results_file] should be relative to this folder)
92
- self.image_base_dir = '.'
93
-
92
+ #:
93
+ #: Can be '' if [md_results_file] uses absolute paths.
94
+ self.image_base_dir = ''
95
+
94
96
  ## These apply only when we're doing ground-truth comparisons
95
-
97
+
96
98
  #: Optional .json file containing ground truth information
97
99
  self.ground_truth_json_file = ''
98
-
99
- #: List of classes we'll treat as negative (defaults to "empty", typically includes
100
+
101
+ #: List of classes we'll treat as negative (defaults to "empty", typically includes
100
102
  #: classes like "blank", "misfire", etc.).
101
103
  #:
102
104
  #: Include the token "#NO_LABELS#" to indicate that an image with no annotations
103
105
  #: should be considered empty.
104
106
  self.negative_classes = DEFAULT_NEGATIVE_CLASSES
105
-
106
- #: List of classes we'll treat as neither positive nor negative (defaults to
107
+
108
+ #: List of classes we'll treat as neither positive nor negative (defaults to
107
109
  #: "unknown", typically includes classes like "unidentifiable").
108
110
  self.unlabeled_classes = DEFAULT_UNKNOWN_CLASSES
109
-
111
+
110
112
  #: List of output sets that we should count, but not render images for.
111
113
  #:
112
114
  #: Typically used to preview sets with lots of empties, where you don't want to
@@ -115,120 +117,122 @@ class PostProcessingOptions:
115
117
  #: detections, non_detections
116
118
  #: detections_animal, detections_person, detections_vehicle
117
119
  self.rendering_bypass_sets = []
118
-
120
+
119
121
  #: If this is None, choose a confidence threshold based on the detector version.
120
122
  #:
121
- #: This can either be a float or a dictionary mapping category names (not IDs) to
122
- #: thresholds. The category "default" can be used to specify thresholds for
123
- #: other categories. Currently the use of a dict here is not supported when
123
+ #: This can either be a float or a dictionary mapping category names (not IDs) to
124
+ #: thresholds. The category "default" can be used to specify thresholds for
125
+ #: other categories. Currently the use of a dict here is not supported when
124
126
  #: ground truth is supplied.
125
127
  self.confidence_threshold = None
126
-
128
+
127
129
  #: Confidence threshold to apply to classification (not detection) results
128
130
  #:
129
131
  #: Only a float is supported here (unlike the "confidence_threshold" parameter, which
130
- #: can be a dict).
132
+ #: can be a dict).
131
133
  self.classification_confidence_threshold = 0.5
132
-
134
+
133
135
  #: Used for summary statistics only
134
136
  self.target_recall = 0.9
135
-
137
+
136
138
  #: Number of images to sample, -1 for "all images"
137
139
  self.num_images_to_sample = 500
138
-
140
+
139
141
  #: Random seed for sampling, or None
140
142
  self.sample_seed = 0 # None
141
-
143
+
142
144
  #: Image width for images in the HTML output
143
145
  self.viz_target_width = 800
144
-
146
+
145
147
  #: Line width (in pixels) for rendering detections
146
148
  self.line_thickness = 4
147
-
149
+
148
150
  #: Box expansion (in pixels) for rendering detections
149
151
  self.box_expansion = 0
150
-
152
+
151
153
  #: Job name to include in big letters in the output HTML
152
154
  self.job_name_string = None
153
-
155
+
154
156
  #: Model version string to include in the output HTML
155
157
  self.model_version_string = None
156
-
158
+
157
159
  #: Sort order for the output, should be one of "filename", "confidence", or "random"
158
160
  self.html_sort_order = 'filename'
159
-
161
+
160
162
  #: If True, images in the output HTML will be links back to the original images
161
163
  self.link_images_to_originals = True
162
-
164
+
163
165
  #: Optionally separate detections into categories (animal/vehicle/human)
164
- #:
166
+ #:
165
167
  #: Currently only supported when ground truth is unavailable
166
168
  self.separate_detections_by_category = True
167
-
169
+
168
170
  #: Optionally replace one or more strings in filenames with other strings;
169
171
  #: useful for taking a set of results generated for one folder structure
170
172
  #: and applying them to a slightly different folder structure.
171
173
  self.api_output_filename_replacements = {}
172
-
174
+
173
175
  #: Optionally replace one or more strings in filenames with other strings;
174
176
  #: useful for taking a set of results generated for one folder structure
175
177
  #: and applying them to a slightly different folder structure.
176
178
  self.ground_truth_filename_replacements = {}
177
-
179
+
178
180
  #: Allow bypassing API output loading when operating on previously-loaded
179
181
  #: results. If present, this is a Pandas DataFrame. Almost never useful.
180
182
  self.api_detection_results = None
181
-
183
+
182
184
  #: Allow bypassing API output loading when operating on previously-loaded
183
185
  #: results. If present, this is a str --> obj dict. Almost never useful.
184
186
  self.api_other_fields = None
185
-
187
+
186
188
  #: Should we also split out a separate report about the detections that were
187
189
  #: just below our main confidence threshold?
188
190
  #:
189
191
  #: Currently only supported when ground truth is unavailable.
190
192
  self.include_almost_detections = False
191
-
193
+
192
194
  #: Only a float is supported here (unlike the "confidence_threshold" parameter, which
193
195
  #: can be a dict).
194
196
  self.almost_detection_confidence_threshold = None
195
-
196
- #: Enable/disable rendering parallelization
197
+
198
+ #: Enable/disable rendering parallelization
197
199
  self.parallelize_rendering = False
198
-
200
+
199
201
  #: Number of threads/processes to use for rendering parallelization
200
202
  self.parallelize_rendering_n_cores = 25
201
-
203
+
202
204
  #: Whether to use threads (True) or processes (False) for rendering parallelization
203
205
  self.parallelize_rendering_with_threads = True
204
-
206
+
205
207
  #: When classification results are present, should be sort alphabetically by class name (False)
206
208
  #: or in descending order by frequency (True)?
207
209
  self.sort_classification_results_by_count = False
208
-
210
+
209
211
  #: Should we split individual pages up into smaller pages if there are more than
210
212
  #: N images?
211
213
  self.max_figures_per_html_file = None
212
-
214
+
213
215
  #: Footer text for the index page
214
- # self.footer_text = '<br/><p style="font-size:80%;">Preview page created with the <a href="{}">MegaDetector Python package</a>.</p>'.\
216
+ # self.footer_text = \
217
+ # '<br/><p style="font-size:80%;">Preview page created with the ' + \
218
+ # <a href="{}">MegaDetector Python package</a>.</p>'.\
215
219
  # format('https://megadetector.readthedocs.io')
216
220
  self.footer_text = ''
217
221
 
218
222
  #: Character encoding to use when writing the index HTML html
219
223
  self.output_html_encoding = None
220
-
224
+
221
225
  #: Additional image fields to display in image headers. If this is a list,
222
226
  #: we'll include those fields; if this is a dict, we'll use that dict to choose
223
227
  #: alternative display names for each field.
224
228
  self.additional_image_fields_to_display = None
225
-
226
- #: If classification results are present, should we include a summary of
229
+
230
+ #: If classification results are present, should we include a summary of
227
231
  #: classification categories?
228
232
  self.include_classification_category_report = True
229
-
233
+
230
234
  # ...__init__()
231
-
235
+
232
236
  # ...PostProcessingOptions
233
237
 
234
238
 
@@ -236,15 +240,15 @@ class PostProcessingResults:
236
240
  """
237
241
  Return format from process_batch_results
238
242
  """
239
-
243
+
240
244
  def __init__(self):
241
-
245
+
242
246
  #: HTML file to which preview information was written
243
247
  self.output_html_file = ''
244
-
248
+
245
249
  #: Pandas Dataframe containing detection results
246
250
  self.api_detection_results = None
247
-
251
+
248
252
  #: str --> obj dictionary containing other information loaded from the results file
249
253
  self.api_other_fields = None
250
254
 
@@ -255,10 +259,10 @@ class DetectionStatus(IntEnum):
255
259
  """
256
260
  Flags used to mark images as positive or negative for P/R analysis
257
261
  (according to ground truth and/or detector output)
258
-
262
+
259
263
  :meta private:
260
264
  """
261
-
265
+
262
266
  DS_NEGATIVE = 0
263
267
  DS_POSITIVE = 1
264
268
 
@@ -291,7 +295,7 @@ def _mark_detection_status(indexed_db,
291
295
 
292
296
  returns (n_negative, n_positive, n_unknown, n_ambiguous)
293
297
  """
294
-
298
+
295
299
  negative_classes = set(negative_classes)
296
300
  unknown_classes = set(unknown_classes)
297
301
 
@@ -320,7 +324,7 @@ def _mark_detection_status(indexed_db,
320
324
 
321
325
  # If there are no image annotations...
322
326
  if len(categories) == 0:
323
-
327
+
324
328
  if '#NO_LABELS#' in negative_classes:
325
329
  n_negative += 1
326
330
  im['_detection_status'] = DetectionStatus.DS_NEGATIVE
@@ -376,10 +380,10 @@ def is_sas_url(s) -> bool:
376
380
  """
377
381
  Placeholder for a more robust way to verify that a link is a SAS URL.
378
382
  99.999% of the time this will suffice for what we're using it for right now.
379
-
383
+
380
384
  :meta private:
381
385
  """
382
-
386
+
383
387
  return (s.startswith(('http://', 'https://')) and ('core.windows.net' in s)
384
388
  and ('?' in s))
385
389
 
@@ -388,10 +392,10 @@ def relative_sas_url(folder_url, relative_path):
388
392
  """
389
393
  Given a container-level or folder-level SAS URL, create a SAS URL to the
390
394
  specified relative path.
391
-
395
+
392
396
  :meta private:
393
397
  """
394
-
398
+
395
399
  relative_path = relative_path.replace('%','%25')
396
400
  relative_path = relative_path.replace('#','%23')
397
401
  relative_path = relative_path.replace(' ','%20')
@@ -419,8 +423,8 @@ def _render_bounding_boxes(
419
423
  options=None):
420
424
  """
421
425
  Renders detection bounding boxes on a single image.
422
-
423
- This is an internal function; if you want tools for rendering boxes on images, see
426
+
427
+ This is an internal function; if you want tools for rendering boxes on images, see
424
428
  visualization.visualization_utils.
425
429
 
426
430
  The source image is:
@@ -429,18 +433,18 @@ def _render_bounding_boxes(
429
433
 
430
434
  The target image is, for example:
431
435
 
432
- [options.output_dir] /
433
- ['detections' or 'non_detections'] /
436
+ [options.output_dir] /
437
+ ['detections' or 'non_detections'] /
434
438
  [filename with slashes turned into tildes]
435
439
 
436
440
  "res" is a result type, e.g. "detections", "non-detections"; this determines the
437
441
  output folder for the rendered image.
438
-
442
+
439
443
  Only very preliminary support is provided for ground truth box rendering.
440
-
444
+
441
445
  Returns the html info struct for this image in the format that's used for
442
446
  write_html_image_list.
443
-
447
+
444
448
  :meta private:
445
449
  """
446
450
 
@@ -448,7 +452,7 @@ def _render_bounding_boxes(
448
452
  options = PostProcessingOptions()
449
453
 
450
454
  image_full_path = None
451
-
455
+
452
456
  if res in options.rendering_bypass_sets:
453
457
 
454
458
  sample_name = res + '_' + path_utils.flatten_path(image_relative_path)
@@ -464,26 +468,26 @@ def _render_bounding_boxes(
464
468
  # to just try/except on the image open.
465
469
  try:
466
470
  image = vis_utils.open_image(image_full_path)
467
- except:
468
- print('Warning: could not open image file {}'.format(image_full_path))
471
+ except Exception as e:
472
+ print('Warning: could not open image file {}: {}'.format(image_full_path,str(e)))
469
473
  image = None
470
474
  # return ''
471
-
475
+
472
476
  # Render images to a flat folder
473
477
  sample_name = res + '_' + path_utils.flatten_path(image_relative_path)
474
478
  fullpath = os.path.join(options.output_dir, res, sample_name)
475
479
 
476
480
  if image is not None:
477
-
481
+
478
482
  original_size = image.size
479
-
483
+
480
484
  # Resize the image if necessary
481
485
  if options.viz_target_width is not None:
482
486
  image = vis_utils.resize_image(image, options.viz_target_width)
483
-
487
+
484
488
  # Render ground truth boxes if necessary
485
489
  if ground_truth_boxes is not None and len(ground_truth_boxes) > 0:
486
-
490
+
487
491
  # Create class labels like "gt_1" or "gt_27"
488
492
  gt_classes = [0] * len(ground_truth_boxes)
489
493
  label_map = {0:'ground truth'}
@@ -492,8 +496,8 @@ def _render_bounding_boxes(
492
496
  vis_utils.render_db_bounding_boxes(ground_truth_boxes, gt_classes, image,
493
497
  original_size=original_size,label_map=label_map,
494
498
  thickness=4,expansion=4)
495
-
496
- # Preprare per-category confidence thresholds
499
+
500
+ # Prepare per-category confidence thresholds
497
501
  if isinstance(options.confidence_threshold,float):
498
502
  rendering_confidence_threshold = options.confidence_threshold
499
503
  else:
@@ -504,7 +508,7 @@ def _render_bounding_boxes(
504
508
  for category_id in category_ids:
505
509
  rendering_confidence_threshold[category_id] = \
506
510
  _get_threshold_for_category_id(category_id, options, detection_categories)
507
-
511
+
508
512
  # Render detection boxes
509
513
  vis_utils.render_detection_bounding_boxes(
510
514
  detections, image,
@@ -514,7 +518,7 @@ def _render_bounding_boxes(
514
518
  classification_confidence_threshold=options.classification_confidence_threshold,
515
519
  thickness=options.line_thickness,
516
520
  expansion=options.box_expansion)
517
-
521
+
518
522
  try:
519
523
  image.save(fullpath)
520
524
  except OSError as e:
@@ -536,18 +540,18 @@ def _render_bounding_boxes(
536
540
  'textStyle':\
537
541
  'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'
538
542
  }
539
-
543
+
540
544
  # Optionally add links back to the original images
541
545
  if options.link_images_to_originals and (image_full_path is not None):
542
-
546
+
543
547
  # Handling special characters in links has been pushed down into
544
548
  # write_html_image_list
545
549
  #
546
550
  # link_target = image_full_path.replace('\\','/')
547
551
  # link_target = urllib.parse.quote(link_target)
548
552
  link_target = image_full_path
549
- info['linkTarget'] = link_target
550
-
553
+ info['linkTarget'] = link_target
554
+
551
555
  return info
552
556
 
553
557
  # ..._render_bounding_boxes
@@ -558,12 +562,12 @@ def _prepare_html_subpages(images_html, output_dir, options=None):
558
562
  Write out a series of html image lists, e.g. the "detections" or "non-detections"
559
563
  pages.
560
564
 
561
- image_html is a dictionary mapping an html page name (e.g. "detections_animal") to
565
+ image_html is a dictionary mapping an html page name (e.g. "detections_animal") to
562
566
  a list of image structs friendly to write_html_image_list.
563
-
567
+
564
568
  Returns a dictionary mapping category names to image counts.
565
569
  """
566
-
570
+
567
571
  if options is None:
568
572
  options = PostProcessingOptions()
569
573
 
@@ -579,19 +583,20 @@ def _prepare_html_subpages(images_html, output_dir, options=None):
579
583
  sorted_array = sorted(array, key=lambda x: x['filename'])
580
584
  images_html_sorted[res] = sorted_array
581
585
  images_html = images_html_sorted
582
-
586
+
583
587
  # Optionally sort by confidence before writing to html
584
588
  elif options.html_sort_order == 'confidence':
585
589
  images_html_sorted = {}
586
590
  for res, array in images_html.items():
587
-
591
+
588
592
  if not all(['max_conf' in d for d in array]):
589
- print("Warning: some elements in the {} page don't have confidence values, can't sort by confidence".format(res))
593
+ print(f"Warning: some elements in the {res} page don't have confidence " + \
594
+ "values, can't sort by confidence")
590
595
  else:
591
596
  sorted_array = sorted(array, key=lambda x: x['max_conf'], reverse=True)
592
597
  images_html_sorted[res] = sorted_array
593
598
  images_html = images_html_sorted
594
-
599
+
595
600
  else:
596
601
  assert options.html_sort_order == 'random',\
597
602
  'Unrecognized sort order {}'.format(options.html_sort_order)
@@ -600,15 +605,15 @@ def _prepare_html_subpages(images_html, output_dir, options=None):
600
605
  sorted_array = random.sample(array,len(array))
601
606
  images_html_sorted[res] = sorted_array
602
607
  images_html = images_html_sorted
603
-
608
+
604
609
  # Write the individual HTML files
605
610
  for res, array in images_html.items():
606
-
607
- html_image_list_options = {}
611
+
612
+ html_image_list_options = {}
608
613
  html_image_list_options['maxFiguresPerHtmlFile'] = options.max_figures_per_html_file
609
614
  html_image_list_options['headerHtml'] = '<h1>{}</h1>'.format(res.upper())
610
615
  html_image_list_options['pageTitle'] = '{}'.format(res.lower())
611
-
616
+
612
617
  # Don't write empty pages
613
618
  if len(array) == 0:
614
619
  continue
@@ -627,49 +632,49 @@ def _get_threshold_for_category_name(category_name,options):
627
632
  """
628
633
  Determines the confidence threshold we should use for a specific category name.
629
634
  """
630
-
635
+
631
636
  if isinstance(options.confidence_threshold,float):
632
637
  return options.confidence_threshold
633
638
  else:
634
639
  assert isinstance(options.confidence_threshold,dict), \
635
640
  'confidence_threshold must either be a float or a dict'
636
-
641
+
637
642
  if category_name in options.confidence_threshold:
638
-
643
+
639
644
  return options.confidence_threshold[category_name]
640
-
645
+
641
646
  else:
642
647
  assert 'default' in options.confidence_threshold, \
643
648
  'category {} not in confidence_threshold dict, and no default supplied'.format(
644
649
  category_name)
645
650
  return options.confidence_threshold['default']
646
651
 
647
-
652
+
648
653
  def _get_threshold_for_category_id(category_id,options,detection_categories):
649
654
  """
650
655
  Determines the confidence threshold we should use for a specific category ID.
651
-
656
+
652
657
  [detection_categories] is a dict mapping category IDs to names.
653
658
  """
654
-
655
- if isinstance(options.confidence_threshold,float):
659
+
660
+ if isinstance(options.confidence_threshold,float):
656
661
  return options.confidence_threshold
657
-
662
+
658
663
  assert category_id in detection_categories, \
659
664
  'Invalid category ID {}'.format(category_id)
660
-
665
+
661
666
  category_name = detection_categories[category_id]
662
-
667
+
663
668
  return _get_threshold_for_category_name(category_name,options)
664
-
665
-
669
+
670
+
666
671
  def _get_positive_categories(detections,options,detection_categories):
667
672
  """
668
673
  Gets a sorted list of unique categories (as string IDs) above the threshold for this image
669
-
674
+
670
675
  [detection_categories] is a dict mapping category IDs to names.
671
676
  """
672
-
677
+
673
678
  positive_categories = set()
674
679
  for d in detections:
675
680
  threshold = _get_threshold_for_category_id(d['category'], options, detection_categories)
@@ -682,8 +687,8 @@ def _has_positive_detection(detections,options,detection_categories):
682
687
  """
683
688
  Determines whether any positive detections are present in the detection list
684
689
  [detections].
685
- """
686
-
690
+ """
691
+
687
692
  found_positive_detection = False
688
693
  for d in detections:
689
694
  threshold = _get_threshold_for_category_id(d['category'], options, detection_categories)
@@ -691,51 +696,51 @@ def _has_positive_detection(detections,options,detection_categories):
691
696
  found_positive_detection = True
692
697
  break
693
698
  return found_positive_detection
694
-
699
+
695
700
 
696
701
  def _render_image_no_gt(file_info,
697
702
  detection_categories_to_results_name,
698
703
  detection_categories,
699
704
  classification_categories,
700
705
  options):
701
- """
706
+ r"""
702
707
  Renders an image (with no ground truth information)
703
-
704
- Returns a list of rendering structs, where the first item is a category (e.g. "detections_animal"),
708
+
709
+ Returns a list of rendering structs, where the first item is a category (e.g. "detections_animal"),
705
710
  and the second is a dict of information needed for rendering. E.g.:
706
-
707
- [['detections_animal',
711
+
712
+ [['detections_animal',
708
713
  {
709
- 'filename': 'detections_animal/detections_animal_blah~01060415.JPG',
710
- 'title': '<b>Result type</b>: detections_animal,
714
+ 'filename': 'detections_animal/detections_animal_blah~01060415.JPG',
715
+ 'title': '<b>Result type</b>: detections_animal,
711
716
  <b>Image</b>: blah\\01060415.JPG,
712
717
  <b>Max conf</b>: 0.897',
713
718
  'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5',
714
719
  'linkTarget': 'full_path_to_%5C01060415.JPG'
715
720
  }]]
716
-
721
+
717
722
  When no classification data is present, this list will always be length-1. When
718
723
  classification data is present, an image may appear in multiple categories.
719
-
724
+
720
725
  Populates the 'max_conf' field of the first element of the list.
721
-
726
+
722
727
  Returns None if there are any errors.
723
728
  """
724
-
729
+
725
730
  image_relative_path = file_info['file']
726
-
731
+
727
732
  # Useful debug snippet
728
733
  #
729
734
  # if 'filename' in image_relative_path:
730
735
  # import pdb; pdb.set_trace()
731
-
736
+
732
737
  max_conf = file_info['max_detection_conf']
733
738
  detections = file_info['detections']
734
739
 
735
740
  # Determine whether any positive detections are present (using a threshold that
736
741
  # may vary by category)
737
742
  found_positive_detection = _has_positive_detection(detections,options,detection_categories)
738
-
743
+
739
744
  detection_status = DetectionStatus.DS_UNASSIGNED
740
745
  if found_positive_detection:
741
746
  detection_status = DetectionStatus.DS_POSITIVE
@@ -769,17 +774,17 @@ def _render_image_no_gt(file_info,
769
774
 
770
775
  # Are there any bonus fields we need to include in each image header?
771
776
  if options.additional_image_fields_to_display is not None:
772
-
777
+
773
778
  for field_name in options.additional_image_fields_to_display:
774
-
779
+
775
780
  if field_name in file_info:
776
-
781
+
777
782
  field_value = file_info[field_name]
778
-
783
+
779
784
  if (field_value is None) or \
780
785
  (isinstance(field_value,float) and np.isnan(field_value)):
781
786
  continue
782
-
787
+
783
788
  # Optionally use a display name that's different from the field name
784
789
  if isinstance(options.additional_image_fields_to_display,dict):
785
790
  field_display_name = \
@@ -788,12 +793,12 @@ def _render_image_no_gt(file_info,
788
793
  field_display_name = field_name
789
794
  field_string = '<b>{}</b>: {}'.format(field_display_name,field_value)
790
795
  display_name += ', {}'.format(field_string)
791
-
796
+
792
797
  rendering_options = copy.copy(options)
793
798
  if detection_status == DetectionStatus.DS_ALMOST:
794
799
  rendering_options.confidence_threshold = \
795
800
  rendering_options.almost_detection_confidence_threshold
796
-
801
+
797
802
  rendered_image_html_info = _render_bounding_boxes(
798
803
  image_base_dir=options.image_base_dir,
799
804
  image_relative_path=image_relative_path,
@@ -812,9 +817,9 @@ def _render_image_no_gt(file_info,
812
817
  image_result = [[res, rendered_image_html_info]]
813
818
  classes_rendered_this_image = set()
814
819
  max_conf = 0
815
-
820
+
816
821
  for det in detections:
817
-
822
+
818
823
  if det['conf'] > max_conf:
819
824
  max_conf = det['conf']
820
825
 
@@ -824,7 +829,7 @@ def _render_image_no_gt(file_info,
824
829
  _get_threshold_for_category_id(det['category'], options, detection_categories)
825
830
  if det['conf'] < detection_threshold:
826
831
  continue
827
-
832
+
828
833
  if ('classifications' in det) and (len(det['classifications']) > 0) and \
829
834
  (res != 'non_detections'):
830
835
 
@@ -834,14 +839,14 @@ def _render_image_no_gt(file_info,
834
839
  top1_class_name = classification_categories[top1_class_id]
835
840
  top1_class_score = classifications[0][1]
836
841
 
837
- # If we either don't have a classification confidence threshold, or
842
+ # If we either don't have a classification confidence threshold, or
838
843
  # we've met our classification confidence threshold
839
844
  if (options.classification_confidence_threshold < 0) or \
840
845
  (top1_class_score >= options.classification_confidence_threshold):
841
- class_string = 'class_{}'.format(top1_class_name)
846
+ class_string = 'class_{}'.format(top1_class_name)
842
847
  else:
843
848
  class_string = 'class_unreliable'
844
-
849
+
845
850
  if class_string not in classes_rendered_this_image:
846
851
  image_result.append([class_string,
847
852
  rendered_image_html_info])
@@ -852,13 +857,13 @@ def _render_image_no_gt(file_info,
852
857
  # ...for each detection
853
858
 
854
859
  image_result[0][1]['max_conf'] = max_conf
855
-
860
+
856
861
  # ...if we got valid rendering info back from _render_bounding_boxes()
857
-
862
+
858
863
  return image_result
859
864
 
860
865
  # ...def _render_image_no_gt()
861
-
866
+
862
867
 
863
868
  def _render_image_with_gt(file_info,ground_truth_indexed_db,
864
869
  detection_categories,classification_categories,options):
@@ -866,7 +871,7 @@ def _render_image_with_gt(file_info,ground_truth_indexed_db,
866
871
  Render an image with ground truth information. See _render_image_no_gt for return
867
872
  data format.
868
873
  """
869
-
874
+
870
875
  image_relative_path = file_info['file']
871
876
  max_conf = file_info['max_detection_conf']
872
877
  detections = file_info['detections']
@@ -887,7 +892,7 @@ def _render_image_with_gt(file_info,ground_truth_indexed_db,
887
892
  ground_truth_box = [x for x in ann['bbox']]
888
893
  ground_truth_box.append(ann['category_id'])
889
894
  ground_truth_boxes.append(ground_truth_box)
890
-
895
+
891
896
  gt_status = image['_detection_status']
892
897
 
893
898
  gt_presence = bool(gt_status)
@@ -917,7 +922,7 @@ def _render_image_with_gt(file_info,ground_truth_indexed_db,
917
922
  else:
918
923
  res = 'tn'
919
924
 
920
- display_name = '<b>Result type</b>: {}, <b>Presence</b>: {}, <b>Class</b>: {}, <b>Max conf</b>: {:0.3f}%, <b>Image</b>: {}'.format(
925
+ display_name = '<b>Result type</b>: {}, <b>Presence</b>: {}, <b>Class</b>: {}, <b>Max conf</b>: {:0.3f}%, <b>Image</b>: {}'.format( # noqa
921
926
  res.upper(), str(gt_presence), gt_class_summary,
922
927
  max_conf * 100, image_relative_path)
923
928
 
@@ -942,7 +947,7 @@ def _render_image_with_gt(file_info,ground_truth_indexed_db,
942
947
 
943
948
  # ...def _render_image_with_gt()
944
949
 
945
-
950
+
946
951
  #%% Main function
947
952
 
948
953
  def process_batch_results(options):
@@ -957,15 +962,15 @@ def process_batch_results(options):
957
962
  truth)
958
963
 
959
964
  Ground truth, if available, must be in COCO Camera Traps format:
960
-
965
+
961
966
  https://github.com/agentmorris/MegaDetector/blob/main/megadetector/data_management/README.md#coco-camera-traps-format
962
967
 
963
968
  Args:
964
969
  options (PostProcessingOptions): everything we need to render a preview/analysis for
965
970
  this set of results; see the PostProcessingOptions class for details.
966
-
971
+
967
972
  Returns:
968
- PostProcessingResults: information about the results/preview, most importantly the
973
+ PostProcessingResults: information about the results/preview, most importantly the
969
974
  HTML filename of the output. See the PostProcessingResults class for details.
970
975
  """
971
976
  ppresults = PostProcessingResults()
@@ -987,7 +992,7 @@ def process_batch_results(options):
987
992
  if (options.ground_truth_json_file is not None) and (len(options.ground_truth_json_file) > 0):
988
993
  assert (options.confidence_threshold is None) or (isinstance(options.confidence_threshold,float)), \
989
994
  'Variable confidence thresholds are not supported when supplying ground truth'
990
-
995
+
991
996
  if (options.ground_truth_json_file is not None) and (len(options.ground_truth_json_file) > 0):
992
997
 
993
998
  if options.separate_detections_by_category:
@@ -1024,7 +1029,7 @@ def process_batch_results(options):
1024
1029
  options.md_results_file, force_forward_slashes=True,
1025
1030
  filename_replacements=options.api_output_filename_replacements)
1026
1031
  ppresults.api_detection_results = detections_df
1027
- ppresults.api_other_fields = other_fields
1032
+ ppresults.api_other_fields = other_fields
1028
1033
 
1029
1034
  else:
1030
1035
  print('Bypassing detection results loading...')
@@ -1033,13 +1038,13 @@ def process_batch_results(options):
1033
1038
  other_fields = options.api_other_fields
1034
1039
 
1035
1040
  # Determine confidence thresholds if necessary
1036
-
1041
+
1037
1042
  if options.confidence_threshold is None:
1038
1043
  options.confidence_threshold = \
1039
1044
  get_typical_confidence_threshold_from_results(other_fields)
1040
1045
  print('Choosing default confidence threshold of {} based on MD version'.format(
1041
- options.confidence_threshold))
1042
-
1046
+ options.confidence_threshold))
1047
+
1043
1048
  if options.almost_detection_confidence_threshold is None and options.include_almost_detections:
1044
1049
  assert isinstance(options.confidence_threshold,float), \
1045
1050
  'If you are using a dictionary of confidence thresholds and almost-detections are enabled, ' + \
@@ -1047,7 +1052,7 @@ def process_batch_results(options):
1047
1052
  options.almost_detection_confidence_threshold = options.confidence_threshold - 0.05
1048
1053
  if options.almost_detection_confidence_threshold < 0:
1049
1054
  options.almost_detection_confidence_threshold = 0
1050
-
1055
+
1051
1056
  # Remove rows with inference failures (typically due to corrupt images)
1052
1057
  n_failures = 0
1053
1058
  if 'failure' in detections_df.columns:
@@ -1056,11 +1061,11 @@ def process_batch_results(options):
1056
1061
  # Explicitly forcing a copy() operation here to suppress "trying to be set
1057
1062
  # on a copy" warnings (and associated risks) below.
1058
1063
  detections_df = detections_df[detections_df['failure'].isna()].copy()
1059
-
1064
+
1060
1065
  assert other_fields is not None
1061
1066
 
1062
1067
  detection_categories = other_fields['detection_categories']
1063
-
1068
+
1064
1069
  # Convert keys and values to lowercase
1065
1070
  classification_categories = other_fields.get('classification_categories', {})
1066
1071
  if classification_categories is not None:
@@ -1072,19 +1077,19 @@ def process_batch_results(options):
1072
1077
  # Count detections and almost-detections for reporting purposes
1073
1078
  n_positives = 0
1074
1079
  n_almosts = 0
1075
-
1080
+
1076
1081
  print('Assigning images to rendering categories')
1077
-
1082
+
1078
1083
  for i_row,row in tqdm(detections_df.iterrows(),total=len(detections_df)):
1079
-
1084
+
1080
1085
  detections = row['detections']
1081
1086
  max_conf = row['max_detection_conf']
1082
1087
  if _has_positive_detection(detections, options, detection_categories):
1083
1088
  n_positives += 1
1084
1089
  elif (options.almost_detection_confidence_threshold is not None) and \
1085
1090
  (max_conf >= options.almost_detection_confidence_threshold):
1086
- n_almosts += 1
1087
-
1091
+ n_almosts += 1
1092
+
1088
1093
  print(f'Finished loading and preprocessing {len(detections_df)} rows '
1089
1094
  f'from detector output, predicted {n_positives} positives.')
1090
1095
 
@@ -1103,18 +1108,18 @@ def process_batch_results(options):
1103
1108
  job_name_string = 'unknown'
1104
1109
  else:
1105
1110
  job_name_string = os.path.basename(options.md_results_file)
1106
-
1111
+
1107
1112
  if options.model_version_string is not None:
1108
1113
  model_version_string = options.model_version_string
1109
1114
  else:
1110
-
1115
+
1111
1116
  if 'info' not in other_fields or 'detector' not in other_fields['info']:
1112
1117
  print('No model metadata supplied, assuming MDv4')
1113
1118
  model_version_string = 'MDv4 (assumed)'
1114
- else:
1119
+ else:
1115
1120
  model_version_string = other_fields['info']['detector']
1116
-
1117
-
1121
+
1122
+
1118
1123
  ##%% If we have ground truth, remove images we can't match to ground truth
1119
1124
 
1120
1125
  if ground_truth_indexed_db is not None:
@@ -1144,7 +1149,7 @@ def process_batch_results(options):
1144
1149
 
1145
1150
  output_html_file = ''
1146
1151
 
1147
- style_header = """<head>
1152
+ style_header = """<head>
1148
1153
  <title>Detection results preview</title>
1149
1154
  <style type="text/css">
1150
1155
  a { text-decoration: none; }
@@ -1173,9 +1178,9 @@ def process_batch_results(options):
1173
1178
 
1174
1179
  n_positive = 0
1175
1180
  n_negative = 0
1176
-
1181
+
1177
1182
  for i_detection, fn in enumerate(detector_files):
1178
-
1183
+
1179
1184
  image_id = ground_truth_indexed_db.filename_to_id[fn]
1180
1185
  image = ground_truth_indexed_db.image_id_to_image[image_id]
1181
1186
  detection_status = image['_detection_status']
@@ -1191,7 +1196,7 @@ def process_batch_results(options):
1191
1196
 
1192
1197
  print('Of {} ground truth values, found {} positives and {} negatives'.format(
1193
1198
  len(detections_df),n_positive,n_negative))
1194
-
1199
+
1195
1200
  # Don't include ambiguous/unknown ground truth in precision/recall analysis
1196
1201
  b_valid_ground_truth = gt_detections >= 0.0
1197
1202
 
@@ -1218,16 +1223,16 @@ def process_batch_results(options):
1218
1223
 
1219
1224
  # Thresholds go up throughout precisions/recalls/thresholds; find the last
1220
1225
  # value where recall is at or above target. That's our precision @ target recall.
1221
-
1226
+
1222
1227
  i_above_target_recall = (np.where(recalls >= options.target_recall))
1223
-
1224
- # np.where returns a tuple of arrays, but in this syntax where we're
1228
+
1229
+ # np.where returns a tuple of arrays, but in this syntax where we're
1225
1230
  # comparing an array with a scalar, there will only be one element.
1226
1231
  assert len (i_above_target_recall) == 1
1227
-
1232
+
1228
1233
  # Convert back to a list
1229
1234
  i_above_target_recall = i_above_target_recall[0].tolist()
1230
-
1235
+
1231
1236
  if len(i_above_target_recall) == 0:
1232
1237
  precision_at_target_recall = 0.0
1233
1238
  else:
@@ -1384,7 +1389,7 @@ def process_batch_results(options):
1384
1389
  t = 'Precision-Recall curve: AP={:0.1%}, P@{:0.1%}={:0.1%}'.format(
1385
1390
  average_precision, options.target_recall, precision_at_target_recall)
1386
1391
  fig = plot_utils.plot_precision_recall_curve(precisions, recalls, t)
1387
-
1392
+
1388
1393
  pr_figure_relative_filename = 'prec_recall.png'
1389
1394
  pr_figure_filename = os.path.join(output_dir, pr_figure_relative_filename)
1390
1395
  fig.savefig(pr_figure_filename)
@@ -1399,7 +1404,7 @@ def process_batch_results(options):
1399
1404
  # Accumulate html image structs (in the format expected by write_html_image_lists)
1400
1405
  # for each category, e.g. 'tp', 'fp', ..., 'class_bird', ...
1401
1406
  images_html = collections.defaultdict(list)
1402
-
1407
+
1403
1408
  # Add default entries by accessing them for the first time
1404
1409
  [images_html[res] for res in ['tp', 'tpc', 'tpi', 'fp', 'tn', 'fn']]
1405
1410
  for res in images_html.keys():
@@ -1423,28 +1428,35 @@ def process_batch_results(options):
1423
1428
 
1424
1429
  start_time = time.time()
1425
1430
  if options.parallelize_rendering:
1426
- if options.parallelize_rendering_n_cores is None:
1427
- if options.parallelize_rendering_with_threads:
1428
- pool = ThreadPool()
1429
- else:
1430
- pool = Pool()
1431
- else:
1432
- if options.parallelize_rendering_with_threads:
1433
- pool = ThreadPool(options.parallelize_rendering_n_cores)
1434
- worker_string = 'threads'
1431
+ pool = None
1432
+ try:
1433
+ if options.parallelize_rendering_n_cores is None:
1434
+ if options.parallelize_rendering_with_threads:
1435
+ pool = ThreadPool()
1436
+ else:
1437
+ pool = Pool()
1435
1438
  else:
1436
- pool = Pool(options.parallelize_rendering_n_cores)
1437
- worker_string = 'processes'
1438
- print('Rendering images with {} {}'.format(options.parallelize_rendering_n_cores,
1439
- worker_string))
1440
-
1441
- rendering_results = list(tqdm(pool.imap(
1442
- partial(_render_image_with_gt,
1443
- ground_truth_indexed_db=ground_truth_indexed_db,
1444
- detection_categories=detection_categories,
1445
- classification_categories=classification_categories,
1446
- options=options),
1447
- files_to_render), total=len(files_to_render)))
1439
+ if options.parallelize_rendering_with_threads:
1440
+ pool = ThreadPool(options.parallelize_rendering_n_cores)
1441
+ worker_string = 'threads'
1442
+ else:
1443
+ pool = Pool(options.parallelize_rendering_n_cores)
1444
+ worker_string = 'processes'
1445
+ print('Rendering images with {} {}'.format(options.parallelize_rendering_n_cores,
1446
+ worker_string))
1447
+
1448
+ rendering_results = list(tqdm(pool.imap(
1449
+ partial(_render_image_with_gt,
1450
+ ground_truth_indexed_db=ground_truth_indexed_db,
1451
+ detection_categories=detection_categories,
1452
+ classification_categories=classification_categories,
1453
+ options=options),
1454
+ files_to_render), total=len(files_to_render)))
1455
+ finally:
1456
+ if pool is not None:
1457
+ pool.close()
1458
+ pool.join()
1459
+ print("Pool closed and joined for GT rendering")
1448
1460
  else:
1449
1461
  for file_info in tqdm(files_to_render):
1450
1462
  rendering_results.append(_render_image_with_gt(
@@ -1485,19 +1497,19 @@ def process_batch_results(options):
1485
1497
  confidence_threshold_string = '{:.2%}'.format(options.confidence_threshold)
1486
1498
  else:
1487
1499
  confidence_threshold_string = str(options.confidence_threshold)
1488
-
1489
- index_page = """<html>
1500
+
1501
+ index_page = """<html>
1490
1502
  {}
1491
1503
  <body>
1492
1504
  <h2>Evaluation</h2>
1493
1505
 
1494
1506
  <h3>Job metadata</h3>
1495
-
1507
+
1496
1508
  <div class="contentdiv">
1497
1509
  <p>Job name: {}<br/>
1498
1510
  <p>Model version: {}</p>
1499
1511
  </div>
1500
-
1512
+
1501
1513
  <h3>Sample images</h3>
1502
1514
  <div class="contentdiv">
1503
1515
  <p>A sample of {} images, annotated with detections above confidence {}.</p>
@@ -1573,12 +1585,12 @@ def process_batch_results(options):
1573
1585
  # Write custom footer if it was provided
1574
1586
  if (options.footer_text is not None) and (len(options.footer_text) > 0):
1575
1587
  index_page += '{}\n'.format(options.footer_text)
1576
-
1588
+
1577
1589
  # Close open html tags
1578
1590
  index_page += '\n</body></html>\n'
1579
-
1591
+
1580
1592
  output_html_file = os.path.join(output_dir, 'index.html')
1581
- with open(output_html_file, 'w',
1593
+ with open(output_html_file, 'w',
1582
1594
  encoding=options.output_html_encoding) as f:
1583
1595
  f.write(index_page)
1584
1596
 
@@ -1596,34 +1608,34 @@ def process_batch_results(options):
1596
1608
  # Accumulate html image structs (in the format expected by write_html_image_list)
1597
1609
  # for each category
1598
1610
  images_html = collections.defaultdict(list)
1599
-
1611
+
1600
1612
  # Add default entries by accessing them for the first time
1601
1613
 
1602
- # Maps sorted tuples of detection category IDs (string ints) - e.g. ("1"), ("1", "4", "7") - to
1614
+ # Maps sorted tuples of detection category IDs (string ints) - e.g. ("1"), ("1", "4", "7") - to
1603
1615
  # result set names, e.g. "detections_human", "detections_cat_truck".
1604
1616
  detection_categories_to_results_name = {}
1605
-
1617
+
1606
1618
  # Keep track of which categories are single-class (e.g. "animal") and which are
1607
1619
  # combinations (e.g. "animal_vehicle")
1608
1620
  detection_categories_to_category_count = {}
1609
-
1621
+
1610
1622
  # For the creation of a "non-detections" category
1611
1623
  images_html['non_detections']
1612
1624
  detection_categories_to_category_count['non_detections'] = 0
1613
-
1614
-
1625
+
1626
+
1615
1627
  if not options.separate_detections_by_category:
1616
1628
  # For the creation of a "detections" category
1617
1629
  images_html['detections']
1618
- detection_categories_to_category_count['detections'] = 0
1630
+ detection_categories_to_category_count['detections'] = 0
1619
1631
  else:
1620
1632
  # Add a set of results for each category and combination of categories, e.g.
1621
1633
  # "detections_animal_vehicle". When we're using this script for non-MegaDetector
1622
1634
  # results, this can generate lots of categories, e.g. detections_bear_bird_cat_dog_pig.
1623
1635
  # We'll keep that huge set of combinations in this map, but we'll only write
1624
- # out links for the ones that are non-empty.
1636
+ # out links for the ones that are non-empty.
1625
1637
  used_combinations = set()
1626
-
1638
+
1627
1639
  # row = images_to_visualize.iloc[0]
1628
1640
  for i_row, row in images_to_visualize.iterrows():
1629
1641
  detections_this_row = row['detections']
@@ -1636,7 +1648,7 @@ def process_batch_results(options):
1636
1648
  continue
1637
1649
  sorted_categories_this_row = tuple(sorted(above_threshold_category_ids_this_row))
1638
1650
  used_combinations.add(sorted_categories_this_row)
1639
-
1651
+
1640
1652
  for sorted_subset in used_combinations:
1641
1653
  assert len(sorted_subset) > 0
1642
1654
  results_name = 'detections'
@@ -1644,7 +1656,7 @@ def process_batch_results(options):
1644
1656
  results_name = results_name + '_' + detection_categories[category_id]
1645
1657
  images_html[results_name]
1646
1658
  detection_categories_to_results_name[sorted_subset] = results_name
1647
- detection_categories_to_category_count[results_name] = len(sorted_subset)
1659
+ detection_categories_to_category_count[results_name] = len(sorted_subset)
1648
1660
 
1649
1661
  if options.include_almost_detections:
1650
1662
  images_html['almost_detections']
@@ -1655,7 +1667,7 @@ def process_batch_results(options):
1655
1667
  os.makedirs(os.path.join(output_dir, res), exist_ok=True)
1656
1668
 
1657
1669
  image_count = len(images_to_visualize)
1658
-
1670
+
1659
1671
  # Each element will be a list of 2-tuples, with elements [collection name,html info struct]
1660
1672
  rendering_results = []
1661
1673
 
@@ -1668,38 +1680,44 @@ def process_batch_results(options):
1668
1680
  for _, row in images_to_visualize.iterrows():
1669
1681
 
1670
1682
  assert isinstance(row['detections'],list)
1671
-
1683
+
1672
1684
  # Filenames should already have been normalized to either '/' or '\'
1673
1685
  files_to_render.append(row.to_dict())
1674
1686
 
1675
1687
  start_time = time.time()
1676
1688
  if options.parallelize_rendering:
1677
-
1678
- if options.parallelize_rendering_n_cores is None:
1679
- if options.parallelize_rendering_with_threads:
1680
- pool = ThreadPool()
1681
- else:
1682
- pool = Pool()
1683
- else:
1684
- if options.parallelize_rendering_with_threads:
1685
- pool = ThreadPool(options.parallelize_rendering_n_cores)
1686
- worker_string = 'threads'
1689
+ pool = None
1690
+ try:
1691
+ if options.parallelize_rendering_n_cores is None:
1692
+ if options.parallelize_rendering_with_threads:
1693
+ pool = ThreadPool()
1694
+ else:
1695
+ pool = Pool()
1687
1696
  else:
1688
- pool = Pool(options.parallelize_rendering_n_cores)
1689
- worker_string = 'processes'
1690
- print('Rendering images with {} {}'.format(options.parallelize_rendering_n_cores,
1691
- worker_string))
1692
-
1693
- # _render_image_no_gt(file_info,detection_categories_to_results_name,
1694
- # detection_categories,classification_categories)
1695
-
1696
- rendering_results = list(tqdm(pool.imap(
1697
- partial(_render_image_no_gt,
1698
- detection_categories_to_results_name=detection_categories_to_results_name,
1699
- detection_categories=detection_categories,
1700
- classification_categories=classification_categories,
1701
- options=options),
1702
- files_to_render), total=len(files_to_render)))
1697
+ if options.parallelize_rendering_with_threads:
1698
+ pool = ThreadPool(options.parallelize_rendering_n_cores)
1699
+ worker_string = 'threads'
1700
+ else:
1701
+ pool = Pool(options.parallelize_rendering_n_cores)
1702
+ worker_string = 'processes'
1703
+ print('Rendering images with {} {}'.format(options.parallelize_rendering_n_cores,
1704
+ worker_string))
1705
+
1706
+ # _render_image_no_gt(file_info,detection_categories_to_results_name,
1707
+ # detection_categories,classification_categories)
1708
+
1709
+ rendering_results = list(tqdm(pool.imap(
1710
+ partial(_render_image_no_gt,
1711
+ detection_categories_to_results_name=detection_categories_to_results_name,
1712
+ detection_categories=detection_categories,
1713
+ classification_categories=classification_categories,
1714
+ options=options),
1715
+ files_to_render), total=len(files_to_render)))
1716
+ finally:
1717
+ if pool is not None:
1718
+ pool.close()
1719
+ pool.join()
1720
+ print("Pool closed and joined for non-GT rendering")
1703
1721
  else:
1704
1722
  for file_info in tqdm(files_to_render):
1705
1723
  rendering_result = _render_image_no_gt(file_info,
@@ -1708,12 +1726,12 @@ def process_batch_results(options):
1708
1726
  classification_categories,
1709
1727
  options=options)
1710
1728
  rendering_results.append(rendering_result)
1711
-
1712
- elapsed = time.time() - start_time
1713
-
1729
+
1730
+ elapsed = time.time() - start_time
1731
+
1714
1732
  # Do we have classification results in addition to detection results?
1715
1733
  has_classification_info = False
1716
-
1734
+
1717
1735
  # Map all the rendering results in the list rendering_results into the
1718
1736
  # dictionary images_html
1719
1737
  image_rendered_count = 0
@@ -1728,7 +1746,7 @@ def process_batch_results(options):
1728
1746
 
1729
1747
  # Prepare the individual html image files
1730
1748
  image_counts = _prepare_html_subpages(images_html, output_dir, options)
1731
-
1749
+
1732
1750
  if image_rendered_count == 0:
1733
1751
  seconds_per_image = 0.0
1734
1752
  else:
@@ -1741,7 +1759,7 @@ def process_batch_results(options):
1741
1759
  # Write index.html
1742
1760
 
1743
1761
  # We can't just sum these, because image_counts includes images in both their
1744
- # detection and classification classes
1762
+ # detection and classification classes
1745
1763
  total_images = 0
1746
1764
  for k in image_counts.keys():
1747
1765
  v = image_counts[k]
@@ -1751,7 +1769,7 @@ def process_batch_results(options):
1751
1769
 
1752
1770
  if total_images != image_count:
1753
1771
  print('Warning, missing images: image_count is {}, total_images is {}'.format(total_images,image_count))
1754
-
1772
+
1755
1773
  almost_detection_string = ''
1756
1774
  if options.include_almost_detections:
1757
1775
  almost_detection_string = ' (&ldquo;almost detection&rdquo; threshold at {:.1%})'.format(
@@ -1762,15 +1780,15 @@ def process_batch_results(options):
1762
1780
  confidence_threshold_string = '{:.2%}'.format(options.confidence_threshold)
1763
1781
  else:
1764
1782
  confidence_threshold_string = str(options.confidence_threshold)
1765
-
1783
+
1766
1784
  index_page = """<html>\n{}\n<body>\n
1767
1785
  <h2>Visualization of results for {}</h2>\n
1768
1786
  <p>A sample of {} images (of {} total)FAILURE_PLACEHOLDER, annotated with detections above confidence {}{}.</p>\n
1769
-
1787
+
1770
1788
  <div class="contentdiv">
1771
1789
  <p>Model version: {}</p>
1772
1790
  </div>
1773
-
1791
+
1774
1792
  <h3>Detection results</h3>\n
1775
1793
  <div class="contentdiv">\n""".format(
1776
1794
  style_header, job_name_string, image_count, len(detections_df), confidence_threshold_string,
@@ -1778,9 +1796,9 @@ def process_batch_results(options):
1778
1796
 
1779
1797
  failure_string = ''
1780
1798
  if n_failures is not None:
1781
- failure_string = ' ({} failures)'.format(n_failures)
1799
+ failure_string = ' ({} failures)'.format(n_failures)
1782
1800
  index_page = index_page.replace('FAILURE_PLACEHOLDER',failure_string)
1783
-
1801
+
1784
1802
  def result_set_name_to_friendly_name(result_set_name):
1785
1803
  friendly_name = ''
1786
1804
  friendly_name = result_set_name.replace('_','-')
@@ -1790,7 +1808,7 @@ def process_batch_results(options):
1790
1808
  return friendly_name
1791
1809
 
1792
1810
  sorted_result_set_names = sorted(list(images_html.keys()))
1793
-
1811
+
1794
1812
  result_set_name_to_count = {}
1795
1813
  for result_set_name in sorted_result_set_names:
1796
1814
  image_count = image_counts[result_set_name]
@@ -1798,7 +1816,7 @@ def process_batch_results(options):
1798
1816
  sorted_result_set_names = sorted(sorted_result_set_names,
1799
1817
  key=lambda x: result_set_name_to_count[x],
1800
1818
  reverse=True)
1801
-
1819
+
1802
1820
  for result_set_name in sorted_result_set_names:
1803
1821
 
1804
1822
  # Don't print classification classes here; we'll do that later with a slightly
@@ -1809,17 +1827,17 @@ def process_batch_results(options):
1809
1827
  filename = result_set_name + '.html'
1810
1828
  label = result_set_name_to_friendly_name(result_set_name)
1811
1829
  image_count = image_counts[result_set_name]
1812
-
1830
+
1813
1831
  # Don't include line items for empty multi-category pages
1814
1832
  if image_count == 0 and \
1815
1833
  detection_categories_to_category_count[result_set_name] > 1:
1816
1834
  continue
1817
-
1835
+
1818
1836
  if total_images == 0:
1819
1837
  image_fraction = -1
1820
1838
  else:
1821
1839
  image_fraction = image_count / total_images
1822
-
1840
+
1823
1841
  # Write the line item for this category, including a link only if the
1824
1842
  # category is non-empty
1825
1843
  if image_count == 0:
@@ -1828,17 +1846,17 @@ def process_batch_results(options):
1828
1846
  else:
1829
1847
  index_page += '<a href="{}">{}</a> ({}, {:.1%})<br/>\n'.format(
1830
1848
  filename,label,image_count,image_fraction)
1831
-
1849
+
1832
1850
  # ...for each result set
1833
-
1851
+
1834
1852
  index_page += '</div>\n'
1835
1853
 
1836
1854
  # If classification information is present and we're supposed to create
1837
1855
  # a summary of classifications, we'll put it here
1838
1856
  category_count_footer = None
1839
-
1857
+
1840
1858
  if has_classification_info:
1841
-
1859
+
1842
1860
  index_page += '<h3>Species classification results</h3>'
1843
1861
  index_page += '<p>The same image might appear under multiple classes ' + \
1844
1862
  'if multiple species were detected.</p>\n'
@@ -1852,12 +1870,12 @@ def process_batch_results(options):
1852
1870
  class_names.append('unreliable')
1853
1871
 
1854
1872
  if options.sort_classification_results_by_count:
1855
- class_name_to_count = {}
1873
+ class_name_to_count = {}
1856
1874
  for cname in class_names:
1857
1875
  ccount = len(images_html['class_{}'.format(cname)])
1858
1876
  class_name_to_count[cname] = ccount
1859
- class_names = sorted(class_names,key=lambda x: class_name_to_count[x],reverse=True)
1860
-
1877
+ class_names = sorted(class_names,key=lambda x: class_name_to_count[x],reverse=True)
1878
+
1861
1879
  for cname in class_names:
1862
1880
  ccount = len(images_html['class_{}'.format(cname)])
1863
1881
  if ccount > 0:
@@ -1870,19 +1888,18 @@ def process_batch_results(options):
1870
1888
  # TODO: it's only for silly historical reasons that we re-read
1871
1889
  # the input file in this case; we're not currently carrying the json
1872
1890
  # representation around, only the Pandas representation.
1873
-
1891
+
1874
1892
  print('Generating classification category report')
1875
-
1876
- with open(options.md_results_file,'r') as f:
1877
- d = json.load(f)
1878
-
1893
+
1894
+ d = load_md_or_speciesnet_file(options.md_results_file)
1895
+
1879
1896
  classification_category_to_count = {}
1880
1897
 
1881
1898
  # im = d['images'][0]
1882
1899
  for im in d['images']:
1883
1900
  if 'detections' in im and im['detections'] is not None:
1884
1901
  for det in im['detections']:
1885
- if 'classifications' in det:
1902
+ if ('classifications' in det) and (len(det['classifications']) > 0):
1886
1903
  class_id = det['classifications'][0][0]
1887
1904
  if class_id not in classification_category_to_count:
1888
1905
  classification_category_to_count[class_id] = 0
@@ -1908,31 +1925,31 @@ def process_batch_results(options):
1908
1925
 
1909
1926
  for category_name in category_name_to_count.keys():
1910
1927
  count = category_name_to_count[category_name]
1911
- category_count_html = '{}: {}<br>\n'.format(category_name,count)
1928
+ category_count_html = '{}: {}<br>\n'.format(category_name,count)
1912
1929
  category_count_footer += category_count_html
1913
1930
 
1914
1931
  category_count_footer += '</div>\n'
1915
-
1932
+
1916
1933
  # ...if we're generating a classification category report
1917
-
1934
+
1918
1935
  # ...if classification info is present
1919
-
1936
+
1920
1937
  if category_count_footer is not None:
1921
1938
  index_page += category_count_footer + '\n'
1922
-
1939
+
1923
1940
  # Write custom footer if it was provided
1924
1941
  if (options.footer_text is not None) and (len(options.footer_text) > 0):
1925
1942
  index_page += options.footer_text + '\n'
1926
-
1943
+
1927
1944
  # Close open html tags
1928
1945
  index_page += '\n</body></html>\n'
1929
-
1946
+
1930
1947
  output_html_file = os.path.join(output_dir, 'index.html')
1931
- with open(output_html_file, 'w',
1948
+ with open(output_html_file, 'w',
1932
1949
  encoding=options.output_html_encoding) as f:
1933
1950
  f.write(index_page)
1934
1951
 
1935
- print('Finished writing html to {}'.format(output_html_file))
1952
+ print('Finished writing html to {}'.format(output_html_file))
1936
1953
 
1937
1954
  # ...if we do/don't have ground truth
1938
1955
 
@@ -1963,8 +1980,8 @@ if False:
1963
1980
 
1964
1981
  #%% Command-line driver
1965
1982
 
1966
- def main():
1967
-
1983
+ def main(): # noqa
1984
+
1968
1985
  options = PostProcessingOptions()
1969
1986
 
1970
1987
  parser = argparse.ArgumentParser()
@@ -2013,42 +2030,42 @@ def main():
2013
2030
  '--n_cores', type=int, default=1,
2014
2031
  help='Number of threads to use for rendering (default: 1)')
2015
2032
  parser.add_argument(
2016
- '--parallelize_rendering_with_processes',
2033
+ '--parallelize_rendering_with_processes',
2017
2034
  action='store_true',
2018
2035
  help='Should we use processes (instead of threads) for parallelization?')
2019
2036
  parser.add_argument(
2020
- '--no_separate_detections_by_category',
2037
+ '--no_separate_detections_by_category',
2021
2038
  action='store_true',
2022
- help='Collapse all categories into just "detections" and "non-detections"')
2039
+ help='Collapse all categories into just "detections" and "non-detections"')
2023
2040
  parser.add_argument(
2024
- '--open_output_file',
2041
+ '--open_output_file',
2025
2042
  action='store_true',
2026
- help='Open the HTML output file when finished')
2043
+ help='Open the HTML output file when finished')
2027
2044
  parser.add_argument(
2028
- '--max_figures_per_html_file',
2045
+ '--max_figures_per_html_file',
2029
2046
  type=int, default=None,
2030
2047
  help='Maximum number of images to put on a single HTML page')
2031
-
2048
+
2032
2049
  if len(sys.argv[1:]) == 0:
2033
2050
  parser.print_help()
2034
2051
  parser.exit()
2035
2052
 
2036
2053
  args = parser.parse_args()
2037
-
2054
+
2038
2055
  if args.n_cores != 1:
2039
2056
  assert (args.n_cores > 1), 'Illegal number of cores: {}'.format(args.n_cores)
2040
2057
  if args.parallelize_rendering_with_processes:
2041
2058
  args.parallelize_rendering_with_threads = False
2042
2059
  args.parallelize_rendering = True
2043
- args.parallelize_rendering_n_cores = args.n_cores
2060
+ args.parallelize_rendering_n_cores = args.n_cores
2044
2061
 
2045
- args_to_object(args, options)
2062
+ args_to_object(args, options)
2046
2063
 
2047
2064
  if args.no_separate_detections_by_category:
2048
2065
  options.separate_detections_by_category = False
2049
-
2066
+
2050
2067
  ppresults = process_batch_results(options)
2051
-
2068
+
2052
2069
  if options.open_output_file:
2053
2070
  path_utils.open_file(ppresults.output_html_file)
2054
2071