megadetector 5.0.27__py3-none-any.whl → 5.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (176) hide show
  1. megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
  2. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
  3. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
  7. megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
  8. megadetector/classification/aggregate_classifier_probs.py +3 -3
  9. megadetector/classification/analyze_failed_images.py +5 -5
  10. megadetector/classification/cache_batchapi_outputs.py +5 -5
  11. megadetector/classification/create_classification_dataset.py +11 -12
  12. megadetector/classification/crop_detections.py +10 -10
  13. megadetector/classification/csv_to_json.py +8 -8
  14. megadetector/classification/detect_and_crop.py +13 -15
  15. megadetector/classification/evaluate_model.py +7 -7
  16. megadetector/classification/identify_mislabeled_candidates.py +6 -6
  17. megadetector/classification/json_to_azcopy_list.py +1 -1
  18. megadetector/classification/json_validator.py +29 -32
  19. megadetector/classification/map_classification_categories.py +9 -9
  20. megadetector/classification/merge_classification_detection_output.py +12 -9
  21. megadetector/classification/prepare_classification_script.py +19 -19
  22. megadetector/classification/prepare_classification_script_mc.py +23 -23
  23. megadetector/classification/run_classifier.py +4 -4
  24. megadetector/classification/save_mislabeled.py +6 -6
  25. megadetector/classification/train_classifier.py +1 -1
  26. megadetector/classification/train_classifier_tf.py +9 -9
  27. megadetector/classification/train_utils.py +10 -10
  28. megadetector/data_management/annotations/annotation_constants.py +1 -1
  29. megadetector/data_management/camtrap_dp_to_coco.py +45 -45
  30. megadetector/data_management/cct_json_utils.py +101 -101
  31. megadetector/data_management/cct_to_md.py +49 -49
  32. megadetector/data_management/cct_to_wi.py +33 -33
  33. megadetector/data_management/coco_to_labelme.py +75 -75
  34. megadetector/data_management/coco_to_yolo.py +189 -189
  35. megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
  36. megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
  37. megadetector/data_management/databases/integrity_check_json_db.py +202 -188
  38. megadetector/data_management/databases/subset_json_db.py +33 -33
  39. megadetector/data_management/generate_crops_from_cct.py +38 -38
  40. megadetector/data_management/get_image_sizes.py +54 -49
  41. megadetector/data_management/labelme_to_coco.py +130 -124
  42. megadetector/data_management/labelme_to_yolo.py +78 -72
  43. megadetector/data_management/lila/create_lila_blank_set.py +81 -83
  44. megadetector/data_management/lila/create_lila_test_set.py +32 -31
  45. megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
  46. megadetector/data_management/lila/download_lila_subset.py +21 -24
  47. megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
  48. megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
  49. megadetector/data_management/lila/get_lila_image_counts.py +22 -22
  50. megadetector/data_management/lila/lila_common.py +70 -70
  51. megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
  52. megadetector/data_management/mewc_to_md.py +339 -340
  53. megadetector/data_management/ocr_tools.py +258 -252
  54. megadetector/data_management/read_exif.py +232 -223
  55. megadetector/data_management/remap_coco_categories.py +26 -26
  56. megadetector/data_management/remove_exif.py +31 -20
  57. megadetector/data_management/rename_images.py +187 -187
  58. megadetector/data_management/resize_coco_dataset.py +41 -41
  59. megadetector/data_management/speciesnet_to_md.py +41 -41
  60. megadetector/data_management/wi_download_csv_to_coco.py +55 -55
  61. megadetector/data_management/yolo_output_to_md_output.py +117 -120
  62. megadetector/data_management/yolo_to_coco.py +195 -188
  63. megadetector/detection/change_detection.py +831 -0
  64. megadetector/detection/process_video.py +341 -338
  65. megadetector/detection/pytorch_detector.py +308 -266
  66. megadetector/detection/run_detector.py +186 -166
  67. megadetector/detection/run_detector_batch.py +366 -364
  68. megadetector/detection/run_inference_with_yolov5_val.py +328 -325
  69. megadetector/detection/run_tiled_inference.py +312 -253
  70. megadetector/detection/tf_detector.py +24 -24
  71. megadetector/detection/video_utils.py +291 -283
  72. megadetector/postprocessing/add_max_conf.py +15 -11
  73. megadetector/postprocessing/categorize_detections_by_size.py +44 -44
  74. megadetector/postprocessing/classification_postprocessing.py +808 -311
  75. megadetector/postprocessing/combine_batch_outputs.py +20 -21
  76. megadetector/postprocessing/compare_batch_results.py +528 -517
  77. megadetector/postprocessing/convert_output_format.py +97 -97
  78. megadetector/postprocessing/create_crop_folder.py +220 -147
  79. megadetector/postprocessing/detector_calibration.py +173 -168
  80. megadetector/postprocessing/generate_csv_report.py +508 -0
  81. megadetector/postprocessing/load_api_results.py +25 -22
  82. megadetector/postprocessing/md_to_coco.py +129 -98
  83. megadetector/postprocessing/md_to_labelme.py +89 -83
  84. megadetector/postprocessing/md_to_wi.py +40 -40
  85. megadetector/postprocessing/merge_detections.py +87 -114
  86. megadetector/postprocessing/postprocess_batch_results.py +319 -302
  87. megadetector/postprocessing/remap_detection_categories.py +36 -36
  88. megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
  89. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
  90. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
  91. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
  92. megadetector/postprocessing/separate_detections_into_folders.py +226 -211
  93. megadetector/postprocessing/subset_json_detector_output.py +265 -262
  94. megadetector/postprocessing/top_folders_to_bottom.py +45 -45
  95. megadetector/postprocessing/validate_batch_results.py +70 -70
  96. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
  97. megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
  98. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
  99. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -69
  100. megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
  101. megadetector/taxonomy_mapping/simple_image_download.py +8 -8
  102. megadetector/taxonomy_mapping/species_lookup.py +33 -33
  103. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
  104. megadetector/taxonomy_mapping/taxonomy_graph.py +11 -11
  105. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
  106. megadetector/utils/azure_utils.py +22 -22
  107. megadetector/utils/ct_utils.py +1019 -200
  108. megadetector/utils/directory_listing.py +21 -77
  109. megadetector/utils/gpu_test.py +22 -22
  110. megadetector/utils/md_tests.py +541 -518
  111. megadetector/utils/path_utils.py +1511 -406
  112. megadetector/utils/process_utils.py +41 -41
  113. megadetector/utils/sas_blob_utils.py +53 -49
  114. megadetector/utils/split_locations_into_train_val.py +73 -60
  115. megadetector/utils/string_utils.py +147 -26
  116. megadetector/utils/url_utils.py +463 -173
  117. megadetector/utils/wi_utils.py +2629 -2868
  118. megadetector/utils/write_html_image_list.py +137 -137
  119. megadetector/visualization/plot_utils.py +21 -21
  120. megadetector/visualization/render_images_with_thumbnails.py +37 -73
  121. megadetector/visualization/visualization_utils.py +424 -404
  122. megadetector/visualization/visualize_db.py +197 -190
  123. megadetector/visualization/visualize_detector_output.py +126 -98
  124. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/METADATA +6 -3
  125. megadetector-5.0.29.dist-info/RECORD +163 -0
  126. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
  127. megadetector/data_management/importers/add_nacti_sizes.py +0 -52
  128. megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
  129. megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
  130. megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
  131. megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
  132. megadetector/data_management/importers/awc_to_json.py +0 -191
  133. megadetector/data_management/importers/bellevue_to_json.py +0 -272
  134. megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
  135. megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
  136. megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
  137. megadetector/data_management/importers/cct_field_adjustments.py +0 -58
  138. megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
  139. megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
  140. megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
  141. megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
  142. megadetector/data_management/importers/ena24_to_json.py +0 -276
  143. megadetector/data_management/importers/filenames_to_json.py +0 -386
  144. megadetector/data_management/importers/helena_to_cct.py +0 -283
  145. megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
  146. megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
  147. megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
  148. megadetector/data_management/importers/jb_csv_to_json.py +0 -150
  149. megadetector/data_management/importers/mcgill_to_json.py +0 -250
  150. megadetector/data_management/importers/missouri_to_json.py +0 -490
  151. megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
  152. megadetector/data_management/importers/noaa_seals_2019.py +0 -181
  153. megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
  154. megadetector/data_management/importers/pc_to_json.py +0 -365
  155. megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
  156. megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
  157. megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
  158. megadetector/data_management/importers/rspb_to_json.py +0 -356
  159. megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
  160. megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
  161. megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
  162. megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
  163. megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
  164. megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
  165. megadetector/data_management/importers/sulross_get_exif.py +0 -65
  166. megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
  167. megadetector/data_management/importers/ubc_to_json.py +0 -399
  168. megadetector/data_management/importers/umn_to_json.py +0 -507
  169. megadetector/data_management/importers/wellington_to_json.py +0 -263
  170. megadetector/data_management/importers/wi_to_json.py +0 -442
  171. megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
  172. megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
  173. megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
  174. megadetector-5.0.27.dist-info/RECORD +0 -208
  175. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
  176. {megadetector-5.0.27.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
@@ -3,11 +3,11 @@
3
3
  md_to_labelme.py
4
4
 
5
5
  "Converts" a MegaDetector output .json file to labelme format (one .json per image
6
- file). "Convert" is in quotes because this is an opinionated transformation that
6
+ file). "Convert" is in quotes because this is an opinionated transformation that
7
7
  requires a confidence threshold.
8
8
 
9
- TODO:
10
-
9
+ TODO: # noqa
10
+
11
11
  * support variable confidence thresholds across classes
12
12
  * support classification data
13
13
 
@@ -17,6 +17,8 @@ TODO:
17
17
 
18
18
  import os
19
19
  import json
20
+ import sys
21
+ import argparse
20
22
 
21
23
  from tqdm import tqdm
22
24
 
@@ -39,10 +41,10 @@ def get_labelme_dict_for_image(im,image_base_name=None,category_id_to_name=None,
39
41
  """
40
42
  For the given image struct in MD results format, reformat the detections into
41
43
  labelme format.
42
-
44
+
43
45
  Args:
44
46
  im (dict): MegaDetector-formatted results dict, must include 'height' and 'width' fields
45
- image_base_name (str, optional): written directly to the 'imagePath' field in the output;
47
+ image_base_name (str, optional): written directly to the 'imagePath' field in the output;
46
48
  defaults to os.path.basename(im['file']).
47
49
  category_id_to_name (dict, optional): maps string-int category IDs to category names, defaults
48
50
  to the standard MD categories
@@ -50,20 +52,20 @@ def get_labelme_dict_for_image(im,image_base_name=None,category_id_to_name=None,
50
52
  dict
51
53
  confidence_threshold (float, optional): only detections at or above this confidence threshold
52
54
  will be included in the output dict
53
-
55
+
54
56
  Return:
55
57
  dict: labelme-formatted dictionary, suitable for writing directly to a labelme-formatted .json file
56
58
  """
57
-
59
+
58
60
  if image_base_name is None:
59
61
  image_base_name = os.path.basename(im['file'])
60
-
62
+
61
63
  if category_id_to_name:
62
64
  category_id_to_name = DEFAULT_DETECTOR_LABEL_MAP
63
-
64
- if confidence_threshold is None:
65
+
66
+ if confidence_threshold is None:
65
67
  confidence_threshold = -1.0
66
-
68
+
67
69
  output_dict = {}
68
70
  if info is not None:
69
71
  output_dict['detector_info'] = info
@@ -75,22 +77,22 @@ def get_labelme_dict_for_image(im,image_base_name=None,category_id_to_name=None,
75
77
  output_dict['imageWidth'] = im['width']
76
78
  output_dict['imageData'] = None
77
79
  output_dict['detections'] = im['detections']
78
-
80
+
79
81
  # det = im['detections'][1]
80
82
  for det in im['detections']:
81
-
83
+
82
84
  if det['conf'] < confidence_threshold:
83
85
  continue
84
-
86
+
85
87
  shape = {}
86
88
  shape['conf'] = det['conf']
87
- shape['label'] = category_id_to_name[det['category']]
89
+ shape['label'] = category_id_to_name[det['category']]
88
90
  shape['shape_type'] = 'rectangle'
89
91
  shape['description'] = ''
90
92
  shape['group_id'] = None
91
-
93
+
92
94
  # MD boxes are [x_min, y_min, width_of_box, height_of_box] (relative)
93
- #
95
+ #
94
96
  # labelme boxes are [[x0,y0],[x1,y1]] (absolute)
95
97
  x0 = round_float(det['bbox'][0] * im['width'],output_precision)
96
98
  y0 = round_float(det['bbox'][1] * im['height'],output_precision)
@@ -98,9 +100,9 @@ def get_labelme_dict_for_image(im,image_base_name=None,category_id_to_name=None,
98
100
  y1 = round_float(y0 + det['bbox'][3] * im['height'],output_precision)
99
101
  shape['points'] = [[x0,y0],[x1,y1]]
100
102
  output_dict['shapes'].append(shape)
101
-
103
+
102
104
  # ...for each detection
103
-
105
+
104
106
  return output_dict
105
107
 
106
108
  # ...def get_labelme_dict_for_image()
@@ -109,17 +111,17 @@ def get_labelme_dict_for_image(im,image_base_name=None,category_id_to_name=None,
109
111
  def _write_output_for_image(im,image_base,extension_prefix,info,
110
112
  confidence_threshold,category_id_to_name,overwrite,
111
113
  verbose=False):
112
-
114
+
113
115
  if 'failure' in im and im['failure'] is not None:
114
116
  assert 'detections' not in im or im['detections'] is None
115
117
  if verbose:
116
118
  print('Skipping labelme file generation for failed image {}'.format(
117
119
  im['file']))
118
120
  return
119
-
121
+
120
122
  im_full_path = os.path.join(image_base,im['file'])
121
123
  json_path = os.path.splitext(im_full_path)[0] + extension_prefix + '.json'
122
-
124
+
123
125
  if (not overwrite) and (os.path.isfile(json_path)):
124
126
  if verbose:
125
127
  print('Skipping existing file {}'.format(json_path))
@@ -130,10 +132,10 @@ def _write_output_for_image(im,image_base,extension_prefix,info,
130
132
  category_id_to_name=category_id_to_name,
131
133
  info=info,
132
134
  confidence_threshold=confidence_threshold)
133
-
135
+
134
136
  with open(json_path,'w') as f:
135
137
  json.dump(output_dict,f,indent=1)
136
-
138
+
137
139
  # ...def write_output_for_image(...)
138
140
 
139
141
 
@@ -145,7 +147,7 @@ def md_to_labelme(results_file,image_base,confidence_threshold=None,
145
147
  """
146
148
  For all the images in [results_file], write a .json file in labelme format alongside the
147
149
  corresponding relative path within image_base.
148
-
150
+
149
151
  Args:
150
152
  results_file (str): MD results .json file to convert to Labelme format
151
153
  image_base (str): folder of images; filenames in [results_file] should be relative to
@@ -154,19 +156,19 @@ def md_to_labelme(results_file,image_base,confidence_threshold=None,
154
156
  will be included in the output dict
155
157
  overwrite (bool, optional): whether to overwrite existing output files; if this is False
156
158
  and the output file for an image exists, we'll skip that image
157
- extension_prefix (str, optional): if non-empty, "extension_prefix" will be inserted before the .json
159
+ extension_prefix (str, optional): if non-empty, "extension_prefix" will be inserted before the .json
158
160
  extension
159
161
  n_workers (int, optional): enables multiprocessing if > 1
160
162
  use_threads (bool, optional): if [n_workers] > 1, determines whether we parallelize via threads (True)
161
163
  or processes (False)
162
164
  bypass_image_size_read (bool, optional): if True, skips reading image sizes and trusts whatever is in
163
165
  the MD results file (don't set this to "True" if your MD results file doesn't contain image sizes)
164
- verbose (bool, optional): enables additionald ebug output
166
+ verbose (bool, optional): enables additionald ebug output
165
167
  """
166
-
168
+
167
169
  if extension_prefix is None:
168
170
  extension_prefix = ''
169
-
171
+
170
172
  # Load MD results if necessary
171
173
  if isinstance(results_file,dict):
172
174
  md_results = results_file
@@ -174,34 +176,34 @@ def md_to_labelme(results_file,image_base,confidence_threshold=None,
174
176
  print('Loading MD results...')
175
177
  with open(results_file,'r') as f:
176
178
  md_results = json.load(f)
177
-
178
- # Read image sizes if necessary
179
- if bypass_image_size_read:
180
-
179
+
180
+ # Read image sizes if necessary
181
+ if bypass_image_size_read:
182
+
181
183
  print('Bypassing image size read')
182
-
184
+
183
185
  else:
184
-
186
+
185
187
  # TODO: parallelize this loop
186
-
188
+
187
189
  print('Reading image sizes...')
188
-
190
+
189
191
  # im = md_results['images'][0]
190
192
  for im in tqdm(md_results['images']):
191
-
193
+
192
194
  # Make sure this file exists
193
195
  im_full_path = os.path.join(image_base,im['file'])
194
196
  assert os.path.isfile(im_full_path), 'Image file {} does not exist'.format(im_full_path)
195
-
197
+
196
198
  json_path = os.path.splitext(im_full_path)[0] + extension_prefix + '.json'
197
-
199
+
198
200
  # Don't even bother reading sizes for files we're not going to generate
199
201
  if (not overwrite) and (os.path.isfile(json_path)):
200
202
  continue
201
-
203
+
202
204
  # Load w/h information if necessary
203
205
  if 'height' not in im or 'width' not in im:
204
-
206
+
205
207
  try:
206
208
  pil_im = open_image(im_full_path)
207
209
  im['width'] = pil_im.width
@@ -211,88 +213,92 @@ def md_to_labelme(results_file,image_base,confidence_threshold=None,
211
213
  im_full_path))
212
214
  if 'failure' not in im:
213
215
  im['failure'] = FAILURE_IMAGE_OPEN
214
-
216
+
215
217
  # ...if we need to read w/h information
216
-
218
+
217
219
  # ...for each image
218
-
219
- # ...if we're not bypassing image size read
220
-
220
+
221
+ # ...if we're not bypassing image size read
222
+
221
223
  print('\nGenerating labelme files...')
222
-
224
+
223
225
  # Write output
224
226
  if n_workers <= 1:
225
- for im in tqdm(md_results['images']):
227
+ for im in tqdm(md_results['images']):
226
228
  _write_output_for_image(im,image_base,extension_prefix,md_results['info'],confidence_threshold,
227
229
  md_results['detection_categories'],overwrite,verbose)
228
230
  else:
229
- if use_threads:
230
- print('Starting parallel thread pool with {} workers'.format(n_workers))
231
- pool = ThreadPool(n_workers)
232
- else:
233
- print('Starting parallel process pool with {} workers'.format(n_workers))
234
- pool = Pool(n_workers)
235
- _ = list(tqdm(pool.imap(
236
- partial(_write_output_for_image,
237
- image_base=image_base,extension_prefix=extension_prefix,
238
- info=md_results['info'],confidence_threshold=confidence_threshold,
239
- category_id_to_name=md_results['detection_categories'],
240
- overwrite=overwrite,verbose=verbose),
241
- md_results['images']),
242
- total=len(md_results['images'])))
243
-
231
+ pool = None
232
+ try:
233
+ if use_threads:
234
+ print('Starting parallel thread pool with {} workers'.format(n_workers))
235
+ pool = ThreadPool(n_workers)
236
+ else:
237
+ print('Starting parallel process pool with {} workers'.format(n_workers))
238
+ pool = Pool(n_workers)
239
+ _ = list(tqdm(pool.imap(
240
+ partial(_write_output_for_image,
241
+ image_base=image_base,extension_prefix=extension_prefix,
242
+ info=md_results['info'],confidence_threshold=confidence_threshold,
243
+ category_id_to_name=md_results['detection_categories'],
244
+ overwrite=overwrite,verbose=verbose),
245
+ md_results['images']),
246
+ total=len(md_results['images'])))
247
+ finally:
248
+ pool.close()
249
+ pool.join()
250
+ print("Pool closed and joined for labelme file writes")
251
+
244
252
  # ...for each image
245
-
253
+
246
254
  # ...def md_to_labelme()
247
255
 
248
256
 
249
257
  #%% Interactive driver
250
258
 
251
259
  if False:
252
-
260
+
253
261
  pass
254
262
 
255
263
  #%% Configure options
256
-
264
+
257
265
  md_results_file = os.path.expanduser('~/data/md-test.json')
258
266
  coco_output_file = os.path.expanduser('~/data/md-test-coco.json')
259
- image_folder = os.path.expanduser('~/data/md-test')
267
+ image_folder = os.path.expanduser('~/data/md-test')
260
268
  confidence_threshold = 0.2
261
- overwrite = True
262
-
263
-
269
+ overwrite = True
270
+
271
+
264
272
  #%% Programmatic execution
265
-
273
+
266
274
  md_to_labelme(results_file=md_results_file,
267
275
  image_base=image_folder,
268
276
  confidence_threshold=confidence_threshold,
269
277
  overwrite=overwrite)
270
278
 
271
-
279
+
272
280
  #%% Command-line execution
273
-
281
+
274
282
  s = 'python md_to_labelme.py {} {} --confidence_threshold {}'.format(md_results_file,
275
283
  image_folder,
276
284
  confidence_threshold)
277
285
  if overwrite:
278
286
  s += ' --overwrite'
279
-
287
+
280
288
  print(s)
281
289
  import clipboard; clipboard.copy(s)
282
290
 
283
291
 
284
292
  #%% Opening labelme
285
-
293
+
286
294
  s = 'python labelme {}'.format(image_folder)
287
295
  print(s)
288
296
  import clipboard; clipboard.copy(s)
289
-
290
297
 
291
- #%% Command-line driver
292
298
 
293
- import sys,argparse
299
+ #%% Command-line driver
294
300
 
295
- def main():
301
+ def main(): # noqa
296
302
 
297
303
  parser = argparse.ArgumentParser(
298
304
  description='Convert MD output to labelme annotation format')
@@ -300,19 +306,19 @@ def main():
300
306
  'results_file',
301
307
  type=str,
302
308
  help='Path to MD results file (.json)')
303
-
309
+
304
310
  parser.add_argument(
305
311
  'image_base',
306
312
  type=str,
307
313
  help='Path to images (also the output folder)')
308
-
314
+
309
315
  parser.add_argument(
310
316
  '--confidence_threshold',
311
317
  type=float,
312
318
  default=default_confidence_threshold,
313
319
  help='Confidence threshold (default {})'.format(default_confidence_threshold)
314
320
  )
315
-
321
+
316
322
  parser.add_argument(
317
323
  '--overwrite',
318
324
  action='store_true',
@@ -1,40 +1,40 @@
1
- """
2
-
3
- md_to_wi.py
4
-
5
- Converts the MD .json format to the WI predictions.json format.
6
-
7
- """
8
-
9
- #%% Imports and constants
10
-
11
- import sys
12
- import argparse
13
- from megadetector.utils.wi_utils import generate_predictions_json_from_md_results
14
-
15
-
16
- #%% Command-line driver
17
-
18
- def main():
19
-
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument('md_results_file', action='store', type=str,
22
- help='output file in MD format to convert')
23
- parser.add_argument('predictions_json_file', action='store', type=str,
24
- help='.json file to write in predictions.json format')
25
- parser.add_argument('--base_folder', action='store', type=str, default=None,
26
- help='folder name to prepend to each path in md_results_file, ' + \
27
- 'to convert relative paths to absolute paths.')
28
-
29
- if len(sys.argv[1:]) == 0:
30
- parser.print_help()
31
- parser.exit()
32
-
33
- args = parser.parse_args()
34
-
35
- generate_predictions_json_from_md_results(args.md_results_file,
36
- args.predictions_json_file,
37
- base_folder=None)
38
-
39
- if __name__ == '__main__':
40
- main()
1
+ """
2
+
3
+ md_to_wi.py
4
+
5
+ Converts the MD .json format to the WI predictions.json format.
6
+
7
+ """
8
+
9
+ #%% Imports and constants
10
+
11
+ import sys
12
+ import argparse
13
+ from megadetector.utils.wi_utils import generate_predictions_json_from_md_results
14
+
15
+
16
+ #%% Command-line driver
17
+
18
+ def main(): # noqa
19
+
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument('md_results_file', action='store', type=str,
22
+ help='output file in MD format to convert')
23
+ parser.add_argument('predictions_json_file', action='store', type=str,
24
+ help='.json file to write in predictions.json format')
25
+ parser.add_argument('--base_folder', action='store', type=str, default=None,
26
+ help='folder name to prepend to each path in md_results_file, ' + \
27
+ 'to convert relative paths to absolute paths.')
28
+
29
+ if len(sys.argv[1:]) == 0:
30
+ parser.print_help()
31
+ parser.exit()
32
+
33
+ args = parser.parse_args()
34
+
35
+ generate_predictions_json_from_md_results(args.md_results_file,
36
+ args.predictions_json_file,
37
+ base_folder=None)
38
+
39
+ if __name__ == '__main__':
40
+ main()