megadetector 5.0.10__py3-none-any.whl → 5.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (226) hide show
  1. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/LICENSE +0 -0
  2. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/METADATA +12 -11
  3. megadetector-5.0.11.dist-info/RECORD +5 -0
  4. megadetector-5.0.11.dist-info/top_level.txt +1 -0
  5. api/__init__.py +0 -0
  6. api/batch_processing/__init__.py +0 -0
  7. api/batch_processing/api_core/__init__.py +0 -0
  8. api/batch_processing/api_core/batch_service/__init__.py +0 -0
  9. api/batch_processing/api_core/batch_service/score.py +0 -439
  10. api/batch_processing/api_core/server.py +0 -294
  11. api/batch_processing/api_core/server_api_config.py +0 -98
  12. api/batch_processing/api_core/server_app_config.py +0 -55
  13. api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  14. api/batch_processing/api_core/server_job_status_table.py +0 -152
  15. api/batch_processing/api_core/server_orchestration.py +0 -360
  16. api/batch_processing/api_core/server_utils.py +0 -92
  17. api/batch_processing/api_core_support/__init__.py +0 -0
  18. api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  19. api/batch_processing/api_support/__init__.py +0 -0
  20. api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  21. api/batch_processing/data_preparation/__init__.py +0 -0
  22. api/batch_processing/data_preparation/manage_local_batch.py +0 -2391
  23. api/batch_processing/data_preparation/manage_video_batch.py +0 -327
  24. api/batch_processing/integration/digiKam/setup.py +0 -6
  25. api/batch_processing/integration/digiKam/xmp_integration.py +0 -465
  26. api/batch_processing/integration/eMammal/test_scripts/config_template.py +0 -5
  27. api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +0 -126
  28. api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +0 -55
  29. api/batch_processing/postprocessing/__init__.py +0 -0
  30. api/batch_processing/postprocessing/add_max_conf.py +0 -64
  31. api/batch_processing/postprocessing/categorize_detections_by_size.py +0 -163
  32. api/batch_processing/postprocessing/combine_api_outputs.py +0 -249
  33. api/batch_processing/postprocessing/compare_batch_results.py +0 -958
  34. api/batch_processing/postprocessing/convert_output_format.py +0 -397
  35. api/batch_processing/postprocessing/load_api_results.py +0 -195
  36. api/batch_processing/postprocessing/md_to_coco.py +0 -310
  37. api/batch_processing/postprocessing/md_to_labelme.py +0 -330
  38. api/batch_processing/postprocessing/merge_detections.py +0 -401
  39. api/batch_processing/postprocessing/postprocess_batch_results.py +0 -1904
  40. api/batch_processing/postprocessing/remap_detection_categories.py +0 -170
  41. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +0 -661
  42. api/batch_processing/postprocessing/repeat_detection_elimination/find_repeat_detections.py +0 -211
  43. api/batch_processing/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +0 -82
  44. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +0 -1631
  45. api/batch_processing/postprocessing/separate_detections_into_folders.py +0 -731
  46. api/batch_processing/postprocessing/subset_json_detector_output.py +0 -696
  47. api/batch_processing/postprocessing/top_folders_to_bottom.py +0 -223
  48. api/synchronous/__init__.py +0 -0
  49. api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  50. api/synchronous/api_core/animal_detection_api/api_backend.py +0 -152
  51. api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -266
  52. api/synchronous/api_core/animal_detection_api/config.py +0 -35
  53. api/synchronous/api_core/animal_detection_api/data_management/annotations/annotation_constants.py +0 -47
  54. api/synchronous/api_core/animal_detection_api/detection/detector_training/copy_checkpoints.py +0 -43
  55. api/synchronous/api_core/animal_detection_api/detection/detector_training/model_main_tf2.py +0 -114
  56. api/synchronous/api_core/animal_detection_api/detection/process_video.py +0 -543
  57. api/synchronous/api_core/animal_detection_api/detection/pytorch_detector.py +0 -304
  58. api/synchronous/api_core/animal_detection_api/detection/run_detector.py +0 -627
  59. api/synchronous/api_core/animal_detection_api/detection/run_detector_batch.py +0 -1029
  60. api/synchronous/api_core/animal_detection_api/detection/run_inference_with_yolov5_val.py +0 -581
  61. api/synchronous/api_core/animal_detection_api/detection/run_tiled_inference.py +0 -754
  62. api/synchronous/api_core/animal_detection_api/detection/tf_detector.py +0 -165
  63. api/synchronous/api_core/animal_detection_api/detection/video_utils.py +0 -495
  64. api/synchronous/api_core/animal_detection_api/md_utils/azure_utils.py +0 -174
  65. api/synchronous/api_core/animal_detection_api/md_utils/ct_utils.py +0 -262
  66. api/synchronous/api_core/animal_detection_api/md_utils/directory_listing.py +0 -251
  67. api/synchronous/api_core/animal_detection_api/md_utils/matlab_porting_tools.py +0 -97
  68. api/synchronous/api_core/animal_detection_api/md_utils/path_utils.py +0 -416
  69. api/synchronous/api_core/animal_detection_api/md_utils/process_utils.py +0 -110
  70. api/synchronous/api_core/animal_detection_api/md_utils/sas_blob_utils.py +0 -509
  71. api/synchronous/api_core/animal_detection_api/md_utils/string_utils.py +0 -59
  72. api/synchronous/api_core/animal_detection_api/md_utils/url_utils.py +0 -144
  73. api/synchronous/api_core/animal_detection_api/md_utils/write_html_image_list.py +0 -226
  74. api/synchronous/api_core/animal_detection_api/md_visualization/visualization_utils.py +0 -841
  75. api/synchronous/api_core/tests/__init__.py +0 -0
  76. api/synchronous/api_core/tests/load_test.py +0 -110
  77. classification/__init__.py +0 -0
  78. classification/aggregate_classifier_probs.py +0 -108
  79. classification/analyze_failed_images.py +0 -227
  80. classification/cache_batchapi_outputs.py +0 -198
  81. classification/create_classification_dataset.py +0 -627
  82. classification/crop_detections.py +0 -516
  83. classification/csv_to_json.py +0 -226
  84. classification/detect_and_crop.py +0 -855
  85. classification/efficientnet/__init__.py +0 -9
  86. classification/efficientnet/model.py +0 -415
  87. classification/efficientnet/utils.py +0 -610
  88. classification/evaluate_model.py +0 -520
  89. classification/identify_mislabeled_candidates.py +0 -152
  90. classification/json_to_azcopy_list.py +0 -63
  91. classification/json_validator.py +0 -695
  92. classification/map_classification_categories.py +0 -276
  93. classification/merge_classification_detection_output.py +0 -506
  94. classification/prepare_classification_script.py +0 -194
  95. classification/prepare_classification_script_mc.py +0 -228
  96. classification/run_classifier.py +0 -286
  97. classification/save_mislabeled.py +0 -110
  98. classification/train_classifier.py +0 -825
  99. classification/train_classifier_tf.py +0 -724
  100. classification/train_utils.py +0 -322
  101. data_management/__init__.py +0 -0
  102. data_management/annotations/__init__.py +0 -0
  103. data_management/annotations/annotation_constants.py +0 -34
  104. data_management/camtrap_dp_to_coco.py +0 -238
  105. data_management/cct_json_utils.py +0 -395
  106. data_management/cct_to_md.py +0 -176
  107. data_management/cct_to_wi.py +0 -289
  108. data_management/coco_to_labelme.py +0 -272
  109. data_management/coco_to_yolo.py +0 -662
  110. data_management/databases/__init__.py +0 -0
  111. data_management/databases/add_width_and_height_to_db.py +0 -33
  112. data_management/databases/combine_coco_camera_traps_files.py +0 -206
  113. data_management/databases/integrity_check_json_db.py +0 -477
  114. data_management/databases/subset_json_db.py +0 -115
  115. data_management/generate_crops_from_cct.py +0 -149
  116. data_management/get_image_sizes.py +0 -188
  117. data_management/importers/add_nacti_sizes.py +0 -52
  118. data_management/importers/add_timestamps_to_icct.py +0 -79
  119. data_management/importers/animl_results_to_md_results.py +0 -158
  120. data_management/importers/auckland_doc_test_to_json.py +0 -372
  121. data_management/importers/auckland_doc_to_json.py +0 -200
  122. data_management/importers/awc_to_json.py +0 -189
  123. data_management/importers/bellevue_to_json.py +0 -273
  124. data_management/importers/cacophony-thermal-importer.py +0 -796
  125. data_management/importers/carrizo_shrubfree_2018.py +0 -268
  126. data_management/importers/carrizo_trail_cam_2017.py +0 -287
  127. data_management/importers/cct_field_adjustments.py +0 -57
  128. data_management/importers/channel_islands_to_cct.py +0 -913
  129. data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  130. data_management/importers/eMammal/eMammal_helpers.py +0 -249
  131. data_management/importers/eMammal/make_eMammal_json.py +0 -223
  132. data_management/importers/ena24_to_json.py +0 -275
  133. data_management/importers/filenames_to_json.py +0 -385
  134. data_management/importers/helena_to_cct.py +0 -282
  135. data_management/importers/idaho-camera-traps.py +0 -1407
  136. data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  137. data_management/importers/jb_csv_to_json.py +0 -150
  138. data_management/importers/mcgill_to_json.py +0 -250
  139. data_management/importers/missouri_to_json.py +0 -489
  140. data_management/importers/nacti_fieldname_adjustments.py +0 -79
  141. data_management/importers/noaa_seals_2019.py +0 -181
  142. data_management/importers/pc_to_json.py +0 -365
  143. data_management/importers/plot_wni_giraffes.py +0 -123
  144. data_management/importers/prepare-noaa-fish-data-for-lila.py +0 -359
  145. data_management/importers/prepare_zsl_imerit.py +0 -131
  146. data_management/importers/rspb_to_json.py +0 -356
  147. data_management/importers/save_the_elephants_survey_A.py +0 -320
  148. data_management/importers/save_the_elephants_survey_B.py +0 -332
  149. data_management/importers/snapshot_safari_importer.py +0 -758
  150. data_management/importers/snapshot_safari_importer_reprise.py +0 -665
  151. data_management/importers/snapshot_serengeti_lila.py +0 -1067
  152. data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  153. data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  154. data_management/importers/sulross_get_exif.py +0 -65
  155. data_management/importers/timelapse_csv_set_to_json.py +0 -490
  156. data_management/importers/ubc_to_json.py +0 -399
  157. data_management/importers/umn_to_json.py +0 -507
  158. data_management/importers/wellington_to_json.py +0 -263
  159. data_management/importers/wi_to_json.py +0 -441
  160. data_management/importers/zamba_results_to_md_results.py +0 -181
  161. data_management/labelme_to_coco.py +0 -548
  162. data_management/labelme_to_yolo.py +0 -272
  163. data_management/lila/__init__.py +0 -0
  164. data_management/lila/add_locations_to_island_camera_traps.py +0 -97
  165. data_management/lila/add_locations_to_nacti.py +0 -147
  166. data_management/lila/create_lila_blank_set.py +0 -557
  167. data_management/lila/create_lila_test_set.py +0 -151
  168. data_management/lila/create_links_to_md_results_files.py +0 -106
  169. data_management/lila/download_lila_subset.py +0 -177
  170. data_management/lila/generate_lila_per_image_labels.py +0 -515
  171. data_management/lila/get_lila_annotation_counts.py +0 -170
  172. data_management/lila/get_lila_image_counts.py +0 -111
  173. data_management/lila/lila_common.py +0 -300
  174. data_management/lila/test_lila_metadata_urls.py +0 -132
  175. data_management/ocr_tools.py +0 -874
  176. data_management/read_exif.py +0 -681
  177. data_management/remap_coco_categories.py +0 -84
  178. data_management/remove_exif.py +0 -66
  179. data_management/resize_coco_dataset.py +0 -189
  180. data_management/wi_download_csv_to_coco.py +0 -246
  181. data_management/yolo_output_to_md_output.py +0 -441
  182. data_management/yolo_to_coco.py +0 -676
  183. detection/__init__.py +0 -0
  184. detection/detector_training/__init__.py +0 -0
  185. detection/detector_training/model_main_tf2.py +0 -114
  186. detection/process_video.py +0 -703
  187. detection/pytorch_detector.py +0 -337
  188. detection/run_detector.py +0 -779
  189. detection/run_detector_batch.py +0 -1219
  190. detection/run_inference_with_yolov5_val.py +0 -917
  191. detection/run_tiled_inference.py +0 -935
  192. detection/tf_detector.py +0 -188
  193. detection/video_utils.py +0 -606
  194. docs/source/conf.py +0 -43
  195. md_utils/__init__.py +0 -0
  196. md_utils/azure_utils.py +0 -174
  197. md_utils/ct_utils.py +0 -612
  198. md_utils/directory_listing.py +0 -246
  199. md_utils/md_tests.py +0 -968
  200. md_utils/path_utils.py +0 -1044
  201. md_utils/process_utils.py +0 -157
  202. md_utils/sas_blob_utils.py +0 -509
  203. md_utils/split_locations_into_train_val.py +0 -228
  204. md_utils/string_utils.py +0 -92
  205. md_utils/url_utils.py +0 -323
  206. md_utils/write_html_image_list.py +0 -225
  207. md_visualization/__init__.py +0 -0
  208. md_visualization/plot_utils.py +0 -293
  209. md_visualization/render_images_with_thumbnails.py +0 -275
  210. md_visualization/visualization_utils.py +0 -1537
  211. md_visualization/visualize_db.py +0 -551
  212. md_visualization/visualize_detector_output.py +0 -406
  213. megadetector-5.0.10.dist-info/RECORD +0 -224
  214. megadetector-5.0.10.dist-info/top_level.txt +0 -8
  215. taxonomy_mapping/__init__.py +0 -0
  216. taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +0 -491
  217. taxonomy_mapping/map_new_lila_datasets.py +0 -154
  218. taxonomy_mapping/prepare_lila_taxonomy_release.py +0 -142
  219. taxonomy_mapping/preview_lila_taxonomy.py +0 -591
  220. taxonomy_mapping/retrieve_sample_image.py +0 -71
  221. taxonomy_mapping/simple_image_download.py +0 -218
  222. taxonomy_mapping/species_lookup.py +0 -834
  223. taxonomy_mapping/taxonomy_csv_checker.py +0 -159
  224. taxonomy_mapping/taxonomy_graph.py +0 -346
  225. taxonomy_mapping/validate_lila_category_mappings.py +0 -83
  226. {megadetector-5.0.10.dist-info → megadetector-5.0.11.dist-info}/WHEEL +0 -0
@@ -1,696 +0,0 @@
1
- r"""
2
-
3
- subset_json_detector_output.py
4
-
5
- Creates one or more subsets of a detector results file (.json), doing either
6
- or both of the following (if both are requested, they happen in this order):
7
-
8
- 1) Retrieve all elements where filenames contain a specified query string,
9
- optionally replacing that query with a replacement token. If the query is blank,
10
- can also be used to prepend content to all filenames.
11
-
12
- Does not support regex's, but supports a special case of ^string to indicate "must start with
13
- to match".
14
-
15
- 2) Create separate .jsons for each unique path, optionally making the filenames
16
- in those .json's relative paths. In this case, you specify an output directory,
17
- rather than an output path. All images in the folder blah/foo/bar will end up
18
- in a .json file called blah_foo_bar.json.
19
-
20
- Can also apply a confidence threshold.
21
-
22
- Can also subset by categories above a threshold (programmatic invocation only, this is
23
- not supported at the command line yet).
24
-
25
- To subset a COCO Camera Traps .json database, see subset_json_db.py
26
-
27
- **Sample invocation (splitting into multiple json's)**
28
-
29
- Read from "1800_idfg_statewide_wolf_detections_w_classifications.json", split up into
30
- individual .jsons in 'd:/temp/idfg/output', making filenames relative to their individual
31
- folders:
32
-
33
- python subset_json_detector_output.py "d:/temp/idfg/1800_idfg_statewide_wolf_detections_w_classifications.json" "d:/temp/idfg/output" --split_folders --make_folder_relative
34
-
35
- Now do the same thing, but instead of writing .json's to d:/temp/idfg/output, write them to *subfolders*
36
- corresponding to the subfolders for each .json file.
37
-
38
- python subset_json_detector_output.py "d:/temp/idfg/1800_detections_S2.json" "d:/temp/idfg/output_to_folders" --split_folders --make_folder_relative --copy_jsons_to_folders
39
-
40
- **Sample invocation (creating a single subset matching a query)**
41
-
42
- Read from "1800_detections.json", write to "1800_detections_2017.json"
43
-
44
- Include only images matching "2017", and change "2017" to "blah"
45
-
46
- python subset_json_detector_output.py "d:/temp/1800_detections.json" "d:/temp/1800_detections_2017_blah.json" --query 2017 --replacement blah
47
-
48
- Include all images, prepend with "prefix/"
49
-
50
- python subset_json_detector_output.py "d:/temp/1800_detections.json" "d:/temp/1800_detections_prefix.json" --replacement "prefix/"
51
-
52
- """
53
-
54
- #%% Constants and imports
55
-
56
- import argparse
57
- import sys
58
- import copy
59
- import json
60
- import os
61
- import re
62
-
63
- from tqdm import tqdm
64
-
65
- from md_utils.ct_utils import args_to_object, get_max_conf, invert_dictionary
66
- from md_utils.path_utils import top_level_folder
67
-
68
-
69
- #%% Helper classes
70
-
71
- class SubsetJsonDetectorOutputOptions:
72
- """
73
- Options used to parameterize subset_json_detector_output()
74
- """
75
-
76
- #: Only process files containing the token 'query'
77
- query = None
78
-
79
- #: Replace 'query' with 'replacement' if 'replacement' is not None. If 'query' is None,
80
- #: prepend 'replacement'
81
- replacement = None
82
-
83
- #: Should we split output into individual .json files for each folder?
84
- split_folders = False
85
-
86
- #: Folder level to use for splitting ['bottom','top','n_from_bottom','n_from_top','dict']
87
- #:
88
- #: 'dict' requires 'split_folder_param' to be a dictionary mapping each filename
89
- #: to a token.
90
- split_folder_mode = 'bottom' # 'top'
91
-
92
- #: When using the 'n_from_bottom' parameter to define folder splitting, this
93
- #: defines the number of directories from the bottom. 'n_from_bottom' with
94
- #: a parameter of zero is the same as 'bottom'.
95
- #:
96
- #: Same story with 'n_from_top'.
97
- #:
98
- #: When 'split_folder_mode' is 'dict', this should be a dictionary mapping each filename
99
- #: to a token.
100
- split_folder_param = 0
101
-
102
- #: Only meaningful if split_folders is True: should we convert pathnames to be relative
103
- #: the folder for each .json file?
104
- make_folder_relative = False
105
-
106
- #: Only meaningful if split_folders and make_folder_relative are True: if not None,
107
- #: will copy .json files to their corresponding output directories, relative to
108
- #: output_filename
109
- copy_jsons_to_folders = False
110
-
111
- #: Should we over-write .json files?
112
- overwrite_json_files = False
113
-
114
- #: If copy_jsons_to_folders is true, do we require that directories already exist?
115
- copy_jsons_to_folders_directories_must_exist = True
116
-
117
- #: Optional confidence threshold; if not None, detections below this confidence won't be
118
- #: included in the output.
119
- confidence_threshold = None
120
-
121
- #: Should we remove failed images?
122
- remove_failed_images = False
123
-
124
- #: Either a list of category IDs (as string-ints) (not names), or a dictionary mapping category *IDs*
125
- #: (as string-ints) (not names) to thresholds. Removes non-matching detections, does not
126
- #: remove images. Not technically mutually exclusize with category_names_to_keep, but it's an esoteric
127
- #: scenario indeed where you would want to specify both.
128
- categories_to_keep = None
129
-
130
- #: Either a list of category names (not IDs), or a dictionary mapping category *names* (not IDs) to thresholds.
131
- #: Removes non-matching detections, does not remove images. Not technically mutually exclusize with
132
- #: category_ids_to_keep, but it's an esoteric scenario indeed where you would want to specify both.
133
- category_names_to_keep = None
134
-
135
- #: Set to >0 during testing to limit the number of images that get processed.
136
- debug_max_images = -1
137
-
138
-
139
- #%% Main function
140
-
141
- def _write_detection_results(data, output_filename, options):
142
- """
143
- Writes the detector-output-formatted dict *data* to *output_filename*.
144
- """
145
-
146
- if (not options.overwrite_json_files) and os.path.isfile(output_filename):
147
- raise ValueError('File {} exists'.format(output_filename))
148
-
149
- basedir = os.path.dirname(output_filename)
150
-
151
- if options.copy_jsons_to_folders and options.copy_jsons_to_folders_directories_must_exist:
152
- if not os.path.isdir(basedir):
153
- raise ValueError('Directory {} does not exist'.format(basedir))
154
- else:
155
- os.makedirs(basedir, exist_ok=True)
156
-
157
- print('Writing detection output to {}'.format(output_filename))
158
- with open(output_filename, 'w') as f:
159
- json.dump(data,f,indent=1)
160
-
161
- # ..._write_detection_results()
162
-
163
-
164
- def subset_json_detector_output_by_confidence(data, options):
165
- """
166
- Removes all detections below options.confidence_threshold.
167
-
168
- Args:
169
- data (dict): data loaded from a MD results file
170
- options (SubsetJsonDetectorOutputOptions): parameters for subsetting
171
-
172
- Returns:
173
- dict: Possibly-modified version of data (also modifies in place)
174
- """
175
-
176
- if options.confidence_threshold is None:
177
- return data
178
-
179
- images_in = data['images']
180
- images_out = []
181
-
182
- print('Subsetting by confidence >= {}'.format(options.confidence_threshold))
183
-
184
- n_max_changes = 0
185
-
186
- # im = images_in[0]
187
- for i_image, im in tqdm(enumerate(images_in), total=len(images_in)):
188
-
189
- # Always keep failed images; if the caller wants to remove these, they
190
- # will use remove_failed_images
191
- if ('detections' not in im) or (im['detections'] is None):
192
- images_out.append(im)
193
- continue
194
-
195
- p_orig = get_max_conf(im)
196
-
197
- # Find all detections above threshold for this image
198
- detections = [d for d in im['detections'] if d['conf'] >= options.confidence_threshold]
199
-
200
- # If there are no detections above threshold, set the max probability
201
- # to -1, unless it already had a negative probability.
202
- if len(detections) == 0:
203
- if p_orig <= 0:
204
- p = p_orig
205
- else:
206
- p = -1
207
-
208
- # Otherwise find the max confidence
209
- else:
210
- p = max([d['conf'] for d in detections])
211
-
212
- im['detections'] = detections
213
-
214
- # Did this thresholding result in a max-confidence change?
215
- if abs(p_orig - p) > 0.00001:
216
-
217
- # We should only be *lowering* max confidence values (i.e., making them negative)
218
- assert (p_orig <= 0) or (p < p_orig), \
219
- 'Confidence changed from {} to {}'.format(p_orig, p)
220
- n_max_changes += 1
221
-
222
- if 'max_detection_conf' in im:
223
- im['max_detection_conf'] = p
224
-
225
- images_out.append(im)
226
-
227
- # ...for each image
228
-
229
- data['images'] = images_out
230
- print('done, found {} matches (of {}), {} max conf changes'.format(
231
- len(data['images']),len(images_in),n_max_changes))
232
-
233
- return data
234
-
235
- # ...subset_json_detector_output_by_confidence()
236
-
237
-
238
- def subset_json_detector_output_by_categories(data, options):
239
- """
240
- Removes all detections without detections above a threshold for specific categories.
241
-
242
- Args:
243
- data (dict): data loaded from a MD results file
244
- options (SubsetJsonDetectorOutputOptions): parameters for subsetting
245
-
246
- Returns:
247
- dict: Possibly-modified version of data (also modifies in place)
248
- """
249
-
250
- # If categories_to_keep is supplied as a list, convert to a dict
251
- if options.categories_to_keep is not None:
252
- if not isinstance(options.categories_to_keep, dict):
253
- dict_categories_to_keep = {}
254
- for category_id in options.categories_to_keep:
255
- # Set unspecified thresholds to a silly negative value
256
- dict_categories_to_keep[category_id] = -100000.0
257
- options.categories_to_keep = dict_categories_to_keep
258
-
259
- # If category_names_to_keep is supplied as a list, convert to a dict
260
- if options.category_names_to_keep is not None:
261
- if not isinstance(options.category_names_to_keep, dict):
262
- dict_category_names_to_keep = {}
263
- for category_name in options.category_names_to_keep:
264
- # Set unspecified thresholds to a silly negative value
265
- dict_category_names_to_keep[category_name] = -100000.0
266
- options.category_names_to_keep = dict_category_names_to_keep
267
-
268
- category_name_to_category_id = invert_dictionary(data['detection_categories'])
269
-
270
- # If some categories are supplied as names, convert all to IDs and add to "categories_to_keep"
271
- if options.category_names_to_keep is not None:
272
- if options.categories_to_keep is None:
273
- options.categories_to_keep = {}
274
- for category_name in options.category_names_to_keep:
275
- assert category_name in category_name_to_category_id, \
276
- 'Category {} not in detection categories'.format(category_name)
277
- category_id = category_name_to_category_id[category_name]
278
- assert category_id not in options.categories_to_keep, \
279
- 'Category {} ({}) specified as both a name and an ID'.format(
280
- category_name,category_id)
281
- options.categories_to_keep[category_id] = options.category_names_to_keep[category_name]
282
-
283
- if options.categories_to_keep is None:
284
- return data
285
-
286
- images_in = data['images']
287
- images_out = []
288
-
289
- print('Subsetting by categories (keeping {} categories):'.format(
290
- len(options.categories_to_keep)))
291
-
292
- for category_id in sorted(list(options.categories_to_keep.keys())):
293
- if category_id not in data['detection_categories']:
294
- print('Warning: category ID {} not in category map in this file'.format(category_id))
295
- else:
296
- print('{} ({}) (threshold {})'.format(
297
- category_id,
298
- data['detection_categories'][category_id],
299
- options.categories_to_keep[category_id]))
300
-
301
- n_detections_in = 0
302
- n_detections_kept = 0
303
-
304
- # im = images_in[0]
305
- for i_image, im in tqdm(enumerate(images_in), total=len(images_in)):
306
-
307
- # Always keep failed images; if the caller wants to remove these, they
308
- # will use remove_failed_images
309
- if ('detections' not in im) or (im['detections'] is None):
310
- images_out.append(im)
311
- continue
312
-
313
- n_detections_in += len(im['detections'])
314
-
315
- # Find all matching detections for this image
316
- detections = []
317
- for d in im['detections']:
318
- if (d['category'] in options.categories_to_keep) and \
319
- (d['conf'] > options.categories_to_keep[d['category']]):
320
- detections.append(d)
321
-
322
- im['detections'] = detections
323
-
324
- if 'max_detection_conf' in im:
325
- if len(detections) == 0:
326
- p = 0
327
- else:
328
- p = max([d['conf'] for d in detections])
329
- im['max_detection_conf'] = p
330
-
331
- n_detections_kept += len(im['detections'])
332
-
333
- images_out.append(im)
334
-
335
- # ...for each image
336
-
337
- data['images'] = images_out
338
- print('done, kept {} detections (of {})'.format(
339
- n_detections_kept,n_detections_in))
340
-
341
- return data
342
-
343
- # ...subset_json_detector_output_by_categories()
344
-
345
-
346
- def remove_failed_images(data,options):
347
- """
348
- Removed failed images from [data]
349
-
350
- Args:
351
- data (dict): data loaded from a MD results file
352
- options (SubsetJsonDetectorOutputOptions): parameters for subsetting
353
-
354
- Returns:
355
- dict: Possibly-modified version of data (also modifies in place)
356
- """
357
-
358
- images_in = data['images']
359
- images_out = []
360
-
361
- if not options.remove_failed_images:
362
- return data
363
-
364
- print('Removing failed images...', end='')
365
-
366
- # i_image = 0; im = images_in[0]
367
- for i_image, im in tqdm(enumerate(images_in), total=len(images_in)):
368
-
369
- if 'failure' in im and isinstance(im['failure'],str):
370
- continue
371
- else:
372
- images_out.append(im)
373
-
374
- # ...for each image
375
-
376
- data['images'] = images_out
377
- n_removed = len(images_in) - len(data['images'])
378
- print('Done, removed {} of {}'.format(n_removed, len(images_in)))
379
-
380
- return data
381
-
382
- # ...remove_failed_images()
383
-
384
-
385
- def subset_json_detector_output_by_query(data, options):
386
- """
387
- Subsets to images whose filename matches options.query; replace all instances of
388
- options.query with options.replacement. No-op if options.query_string is None or ''.
389
-
390
- Args:
391
- data (dict): data loaded from a MD results file
392
- options (SubsetJsonDetectorOutputOptions): parameters for subsetting
393
-
394
- Returns:
395
- dict: Possibly-modified version of data (also modifies in place)
396
- """
397
-
398
- images_in = data['images']
399
- images_out = []
400
-
401
- print('Subsetting by query {}, replacement {}...'.format(options.query, options.replacement), end='')
402
-
403
- query_string = options.query
404
- query_starts_with = False
405
-
406
- # Support a special case regex-like notation for "starts with"
407
- if query_string is not None and query_string.startswith('^'):
408
- query_string = query_string[1:]
409
- query_starts_with = True
410
-
411
- # i_image = 0; im = images_in[0]
412
- for i_image, im in tqdm(enumerate(images_in), total=len(images_in)):
413
-
414
- fn = im['file']
415
-
416
- # Only take images that match the query
417
- if query_string is not None:
418
- if query_starts_with:
419
- if (not fn.startswith(query_string)):
420
- continue
421
- else:
422
- if query_string not in fn:
423
- continue
424
-
425
- if options.replacement is not None:
426
- if query_string is not None:
427
- fn = fn.replace(query_string, options.replacement)
428
- else:
429
- fn = options.replacement + fn
430
-
431
- im['file'] = fn
432
-
433
- images_out.append(im)
434
-
435
- # ...for each image
436
-
437
- data['images'] = images_out
438
- print('done, found {} matches (of {})'.format(len(data['images']), len(images_in)))
439
-
440
- return data
441
-
442
- # ...subset_json_detector_output_by_query()
443
-
444
-
445
- def subset_json_detector_output(input_filename, output_filename, options, data=None):
446
- """
447
- Main entry point; creates one or more subsets of a detector results file. See the
448
- module header comment for more information about the available subsetting approaches.
449
-
450
- Makes a copy of [data] before modifying if a data dictionary is supplied.
451
-
452
- Args:
453
- input_filename (str): filename to load and subset; can be None if [data] is supplied
454
- output_filename (str): file or folder name (depending on [options]) to which we should
455
- write subset results.
456
- options (SubsetJsonDetectorOutputOptions): parameters for .json splitting/subsetting;
457
- see SubsetJsonDetectorOutputOptions for details.
458
- data (dict, optional): data loaded from a .json file; if this is not None, [input_filename]
459
- will be ignored. If supplied, this will be copied before it's modified.
460
-
461
- Returns:
462
- dict: Results that are either loaded from [input_filename] and processed, or copied
463
- from [data] and processed.
464
-
465
- """
466
-
467
- if options is None:
468
- options = SubsetJsonDetectorOutputOptions()
469
-
470
- # Input validation
471
- if options.copy_jsons_to_folders:
472
- assert options.split_folders and options.make_folder_relative, \
473
- 'copy_jsons_to_folders set without make_folder_relative and split_folders'
474
-
475
- if options.split_folders:
476
- if os.path.isfile(output_filename):
477
- raise ValueError('When splitting by folders, output must be a valid directory name, you specified an existing file')
478
-
479
- if data is None:
480
- print('Reading json...', end='')
481
- with open(input_filename) as f:
482
- data = json.load(f)
483
- print(' ...done, read {} images'.format(len(data['images'])))
484
- if options.debug_max_images > 0:
485
- print('Trimming to {} images'.format(options.debug_max_images))
486
- data['images'] = data['images'][:options.debug_max_images]
487
- else:
488
- print('Copying data')
489
- data = copy.deepcopy(data)
490
- print('...done')
491
-
492
- if options.query is not None:
493
-
494
- data = subset_json_detector_output_by_query(data, options)
495
-
496
- if options.remove_failed_images:
497
-
498
- data = remove_failed_images(data, options)
499
-
500
- if options.confidence_threshold is not None:
501
-
502
- data = subset_json_detector_output_by_confidence(data, options)
503
-
504
- if (options.categories_to_keep is not None) or (options.category_names_to_keep is not None):
505
-
506
- data = subset_json_detector_output_by_categories(data, options)
507
-
508
- if not options.split_folders:
509
-
510
- _write_detection_results(data, output_filename, options)
511
- return data
512
-
513
- else:
514
-
515
- # Map images to unique folders
516
- print('Finding unique folders')
517
-
518
- folders_to_images = {}
519
-
520
- # im = data['images'][0]
521
- for im in tqdm(data['images']):
522
-
523
- fn = im['file']
524
-
525
- if options.split_folder_mode == 'bottom':
526
-
527
- dirname = os.path.dirname(fn)
528
-
529
- elif options.split_folder_mode == 'n_from_bottom':
530
-
531
- dirname = os.path.dirname(fn)
532
- for n in range(0, options.split_folder_param):
533
- dirname = os.path.dirname(dirname)
534
-
535
- elif options.split_folder_mode == 'n_from_top':
536
-
537
- # Split string into folders, keeping delimiters
538
-
539
- # Don't use this, it removes delimiters
540
- # tokens = _split_path(fn)
541
- tokens = re.split(r'([\\/])',fn)
542
-
543
- n_tokens_to_keep = ((options.split_folder_param + 1) * 2) - 1;
544
-
545
- if n_tokens_to_keep > len(tokens):
546
- raise ValueError('Cannot walk {} folders from the top in path {}'.format(
547
- options.split_folder_param, fn))
548
- dirname = ''.join(tokens[0:n_tokens_to_keep])
549
-
550
- elif options.split_folder_mode == 'top':
551
-
552
- dirname = top_level_folder(fn)
553
-
554
- elif options.split_folder_mode == 'dict':
555
-
556
- assert isinstance(options.split_folder_param, dict)
557
- dirname = options.split_folder_param[fn]
558
-
559
- else:
560
-
561
- raise ValueError('Unrecognized folder split mode {}'.format(options.split_folder_mode))
562
-
563
- folders_to_images.setdefault(dirname, []).append(im)
564
-
565
- # ...for each image
566
-
567
- print('Found {} unique folders'.format(len(folders_to_images)))
568
-
569
- # Optionally make paths relative
570
- # dirname = list(folders_to_images.keys())[0]
571
- if options.make_folder_relative:
572
-
573
- print('Converting database-relative paths to individual-json-relative paths...')
574
-
575
- for dirname in tqdm(folders_to_images):
576
- # im = folders_to_images[dirname][0]
577
- for im in folders_to_images[dirname]:
578
- fn = im['file']
579
- relfn = os.path.relpath(fn, dirname).replace('\\', '/')
580
- im['file'] = relfn
581
-
582
- # ...if we need to convert paths to be folder-relative
583
-
584
- print('Finished converting to json-relative paths, writing output')
585
-
586
- os.makedirs(output_filename, exist_ok=True)
587
- all_images = data['images']
588
-
589
- # dirname = list(folders_to_images.keys())[0]
590
- for dirname in tqdm(folders_to_images):
591
-
592
- json_fn = dirname.replace('/', '_').replace('\\', '_') + '.json'
593
-
594
- if options.copy_jsons_to_folders:
595
- json_fn = os.path.join(output_filename, dirname, json_fn)
596
- else:
597
- json_fn = os.path.join(output_filename, json_fn)
598
-
599
- # Recycle the 'data' struct, replacing 'images' every time... medium-hacky, but
600
- # forward-compatible in that I don't take dependencies on the other fields
601
- dir_data = data
602
- dir_data['images'] = folders_to_images[dirname]
603
- _write_detection_results(dir_data, json_fn, options)
604
- print('Wrote {} images to {}'.format(len(dir_data['images']), json_fn))
605
-
606
- # ...for each directory
607
-
608
- data['images'] = all_images
609
-
610
- return data
611
-
612
- # ...if we're splitting folders
613
-
614
- # ...subset_json_detector_output()
615
-
616
-
617
- #%% Interactive driver
618
-
619
- if False:
620
-
621
- #%%
622
-
623
- #%% Subset a file without splitting
624
-
625
- input_filename = r"c:\temp\sample.json"
626
- output_filename = r"c:\temp\output.json"
627
-
628
- options = SubsetJsonDetectorOutputOptions()
629
- options.replacement = None
630
- options.query = 'S2'
631
-
632
- data = subset_json_detector_output(input_filename,output_filename,options,None)
633
-
634
-
635
- #%% Subset and split, but don't copy to individual folders
636
-
637
- input_filename = r"C:\temp\xxx-20201028_detections.filtered_rde_0.60_0.85_10_0.05_r2_export\xxx-20201028_detections.filtered_rde_0.60_0.85_10_0.05_r2_export.json"
638
- output_filename = r"c:\temp\out"
639
-
640
- options = SubsetJsonDetectorOutputOptions()
641
- options.split_folders = True
642
- options.make_folder_relative = True
643
- options.split_folder_mode = 'n_from_top'
644
- options.split_folder_param = 1
645
-
646
- data = subset_json_detector_output(input_filename,output_filename,options,None)
647
-
648
-
649
- #%% Subset and split, copying to individual folders
650
-
651
- input_filename = r"c:\temp\sample.json"
652
- output_filename = r"c:\temp\out"
653
-
654
- options = SubsetJsonDetectorOutputOptions()
655
- options.split_folders = True
656
- options.make_folder_relative = True
657
- options.copy_jsons_to_folders = True
658
-
659
- data = subset_json_detector_output(input_filename,output_filename,options,data)
660
-
661
-
662
- #%% Command-line driver
663
-
664
- def main():
665
-
666
- parser = argparse.ArgumentParser()
667
- parser.add_argument('input_file', type=str, help='Input .json filename')
668
- parser.add_argument('output_file', type=str, help='Output .json filename')
669
- parser.add_argument('--query', type=str, default=None, help='Query string to search for (omitting this matches all)')
670
- parser.add_argument('--replacement', type=str, default=None, help='Replace [query] with this')
671
- parser.add_argument('--confidence_threshold', type=float, default=None, help='Remove detections below this confidence level')
672
- parser.add_argument('--split_folders', action='store_true', help='Split .json files by leaf-node folder')
673
- parser.add_argument('--split_folder_param', type=int, help='Directory level count for n_from_bottom and n_from_top splitting')
674
- parser.add_argument('--split_folder_mode', type=str, help='Folder level to use for splitting ("top" or "bottom")')
675
- parser.add_argument('--make_folder_relative', action='store_true', help='Make image paths relative to their containing folder (only meaningful with split_folders)')
676
- parser.add_argument('--overwrite_json_files', action='store_true', help='Overwrite output files')
677
- parser.add_argument('--copy_jsons_to_folders', action='store_true', help='When using split_folders and make_folder_relative, copy jsons to their corresponding folders (relative to output_file)')
678
- parser.add_argument('--create_folders', action='store_true', help='When using copy_jsons_to_folders, create folders that don''t exist')
679
-
680
- if len(sys.argv[1:]) == 0:
681
- parser.print_help()
682
- parser.exit()
683
-
684
- args = parser.parse_args()
685
-
686
- # Convert to an options object
687
- options = SubsetJsonDetectorOutputOptions()
688
- if args.create_folders:
689
- options.copy_jsons_to_folders_directories_must_exist = False
690
-
691
- args_to_object(args, options)
692
-
693
- subset_json_detector_output(args.input_file, args.output_file, options)
694
-
695
- if __name__ == '__main__':
696
- main()