megadetector 5.0.7__py3-none-any.whl → 5.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (48) hide show
  1. api/batch_processing/data_preparation/manage_local_batch.py +28 -14
  2. api/batch_processing/postprocessing/combine_api_outputs.py +2 -2
  3. api/batch_processing/postprocessing/compare_batch_results.py +1 -1
  4. api/batch_processing/postprocessing/convert_output_format.py +24 -6
  5. api/batch_processing/postprocessing/load_api_results.py +1 -3
  6. api/batch_processing/postprocessing/md_to_labelme.py +118 -51
  7. api/batch_processing/postprocessing/merge_detections.py +30 -5
  8. api/batch_processing/postprocessing/postprocess_batch_results.py +24 -12
  9. api/batch_processing/postprocessing/remap_detection_categories.py +163 -0
  10. api/batch_processing/postprocessing/render_detection_confusion_matrix.py +15 -12
  11. api/batch_processing/postprocessing/repeat_detection_elimination/repeat_detections_core.py +2 -2
  12. data_management/cct_json_utils.py +7 -2
  13. data_management/coco_to_labelme.py +263 -0
  14. data_management/coco_to_yolo.py +7 -4
  15. data_management/databases/integrity_check_json_db.py +68 -59
  16. data_management/databases/subset_json_db.py +1 -1
  17. data_management/get_image_sizes.py +44 -26
  18. data_management/importers/animl_results_to_md_results.py +1 -3
  19. data_management/importers/noaa_seals_2019.py +1 -1
  20. data_management/labelme_to_coco.py +252 -143
  21. data_management/labelme_to_yolo.py +95 -52
  22. data_management/lila/create_lila_blank_set.py +106 -23
  23. data_management/lila/download_lila_subset.py +133 -65
  24. data_management/lila/generate_lila_per_image_labels.py +1 -1
  25. data_management/lila/lila_common.py +8 -38
  26. data_management/read_exif.py +65 -16
  27. data_management/remap_coco_categories.py +84 -0
  28. data_management/resize_coco_dataset.py +3 -22
  29. data_management/wi_download_csv_to_coco.py +239 -0
  30. data_management/yolo_to_coco.py +283 -83
  31. detection/run_detector_batch.py +12 -3
  32. detection/run_inference_with_yolov5_val.py +10 -3
  33. detection/run_tiled_inference.py +2 -2
  34. detection/tf_detector.py +2 -1
  35. detection/video_utils.py +1 -1
  36. md_utils/ct_utils.py +22 -3
  37. md_utils/md_tests.py +11 -2
  38. md_utils/path_utils.py +206 -32
  39. md_utils/url_utils.py +66 -1
  40. md_utils/write_html_image_list.py +12 -3
  41. md_visualization/visualization_utils.py +363 -72
  42. md_visualization/visualize_db.py +33 -10
  43. {megadetector-5.0.7.dist-info → megadetector-5.0.8.dist-info}/METADATA +10 -12
  44. {megadetector-5.0.7.dist-info → megadetector-5.0.8.dist-info}/RECORD +47 -44
  45. {megadetector-5.0.7.dist-info → megadetector-5.0.8.dist-info}/WHEEL +1 -1
  46. md_visualization/visualize_megadb.py +0 -183
  47. {megadetector-5.0.7.dist-info → megadetector-5.0.8.dist-info}/LICENSE +0 -0
  48. {megadetector-5.0.7.dist-info → megadetector-5.0.8.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,7 @@
2
2
  #
3
3
  # labelme_to_coco.py
4
4
  #
5
- # Converts a folder of labelme-formatted .json files to COCO.
5
+ # Converts a folder of labelme-formatted .json files to COCO format.
6
6
  #
7
7
  ########
8
8
 
@@ -15,10 +15,177 @@ import uuid
15
15
  from md_utils import path_utils
16
16
  from md_visualization.visualization_utils import open_image
17
17
 
18
+ from multiprocessing.pool import Pool, ThreadPool
19
+ from functools import partial
20
+
18
21
  from tqdm import tqdm
19
22
 
20
23
 
21
- #%% Functions
24
+ #%% Support functions
25
+
26
+ def add_category(category_name,category_name_to_id,candidate_category_id=0):
27
+ """
28
+ Add the category [category_name] to the dict [category_name_to_id], by default
29
+ using the next available integer index.
30
+ """
31
+
32
+ if category_name in category_name_to_id:
33
+ return category_name_to_id[category_name]
34
+ while candidate_category_id in category_name_to_id.values():
35
+ candidate_category_id += 1
36
+ category_name_to_id[category_name] = candidate_category_id
37
+ return candidate_category_id
38
+
39
+
40
+ def _process_labelme_file(image_fn_relative,input_folder,use_folders_as_labels,
41
+ no_json_handling,validate_image_sizes,
42
+ category_name_to_id,allow_new_categories=True):
43
+ """
44
+ Internal function for processing each image; this support function facilitates parallelization.
45
+ """
46
+
47
+ result = {}
48
+ result['im'] = None
49
+ result['annotations_this_image'] = None
50
+ result['status'] = None
51
+
52
+ image_fn_abs = os.path.join(input_folder,image_fn_relative)
53
+ json_fn_abs = os.path.splitext(image_fn_abs)[0] + '.json'
54
+
55
+ im = {}
56
+ im['id'] = image_fn_relative
57
+ im['file_name'] = image_fn_relative
58
+
59
+ # If there's no .json file for this image...
60
+ if not os.path.isfile(json_fn_abs):
61
+
62
+ # Either skip it...
63
+ if no_json_handling == 'skip':
64
+ print('Skipping image {} (no .json file)'.format(image_fn_relative))
65
+ result['status'] = 'skipped (no .json file)'
66
+ return result
67
+
68
+ # ...or error
69
+ elif no_json_handling == 'error':
70
+ raise ValueError('Image file {} has no corresponding .json file'.format(
71
+ image_fn_relative))
72
+
73
+ # ...or treat it as empty.
74
+ elif no_json_handling == 'empty':
75
+ try:
76
+ pil_im = open_image(image_fn_abs)
77
+ except Exception:
78
+ print('Warning: error opening image {}, skipping'.format(image_fn_abs))
79
+ result['status'] = 'image load error'
80
+ return result
81
+ im['width'] = pil_im.width
82
+ im['height'] = pil_im.height
83
+
84
+ # Just in case we need to differentiate between "no .json file" and "a .json file with no annotations"
85
+ im['no_labelme_json'] = True
86
+ shapes = []
87
+ else:
88
+ raise ValueError('Unrecognized specifier {} for handling images with no .json files'.format(
89
+ no_json_handling))
90
+
91
+ # If we found a .json file for this image...
92
+ else:
93
+
94
+ # Read the .json file
95
+ with open(json_fn_abs,'r') as f:
96
+ labelme_data = json.load(f)
97
+ im['width'] = labelme_data['imageWidth']
98
+ im['height'] = labelme_data['imageHeight']
99
+
100
+ if validate_image_sizes:
101
+ try:
102
+ pil_im = open_image(image_fn_abs)
103
+ except Exception:
104
+ print('Warning: error opening image {} for size validation, skipping'.format(image_fn_abs))
105
+ result['status'] = 'skipped (size validation error)'
106
+ return result
107
+ if not (im['width'] == pil_im.width and im['height'] == pil_im.height):
108
+ print('Warning: image size validation error for file {}'.format(image_fn_relative))
109
+ im['width'] = pil_im.width
110
+ im['height'] = pil_im.height
111
+ im['labelme_width'] = labelme_data['imageWidth']
112
+ im['labelme_height'] = labelme_data['imageHeight']
113
+
114
+ shapes = labelme_data['shapes']
115
+
116
+ if ('flags' in labelme_data) and (len(labelme_data['flags']) > 0):
117
+ im['flags'] = labelme_data['flags']
118
+
119
+ annotations_this_image = []
120
+
121
+ if len(shapes) == 0:
122
+
123
+ if allow_new_categories:
124
+ category_id = add_category('empty',category_name_to_id)
125
+ else:
126
+ assert 'empty' in category_name_to_id
127
+ category_id = category_name_to_id['empty']
128
+
129
+ ann = {}
130
+ ann['id'] = str(uuid.uuid1())
131
+ ann['image_id'] = im['id']
132
+ ann['category_id'] = category_id
133
+ ann['sequence_level_annotation'] = False
134
+ annotations_this_image.append(ann)
135
+
136
+ else:
137
+
138
+ for shape in shapes:
139
+
140
+ if shape['shape_type'] != 'rectangle':
141
+ print('Only rectangles are supported, skipping an annotation of type {} in {}'.format(
142
+ shape['shape_type'],image_fn_relative))
143
+ continue
144
+
145
+ if use_folders_as_labels:
146
+ category_name = os.path.basename(os.path.dirname(image_fn_abs))
147
+ else:
148
+ category_name = shape['label']
149
+
150
+ if allow_new_categories:
151
+ category_id = add_category(category_name,category_name_to_id)
152
+ else:
153
+ assert category_name in category_name_to_id
154
+ category_id = category_name_to_id[category_name]
155
+
156
+ points = shape['points']
157
+ if len(points) != 2:
158
+ print('Warning: illegal rectangle with {} points for {}'.format(
159
+ len(points),image_fn_relative))
160
+ continue
161
+
162
+ p0 = points[0]
163
+ p1 = points[1]
164
+ x0 = min(p0[0],p1[0])
165
+ x1 = max(p0[0],p1[0])
166
+ y0 = min(p0[1],p1[1])
167
+ y1 = max(p0[1],p1[1])
168
+
169
+ bbox = [x0,y0,abs(x1-x0),abs(y1-y0)]
170
+ ann = {}
171
+ ann['id'] = str(uuid.uuid1())
172
+ ann['image_id'] = im['id']
173
+ ann['category_id'] = category_id
174
+ ann['sequence_level_annotation'] = False
175
+ ann['bbox'] = bbox
176
+ annotations_this_image.append(ann)
177
+
178
+ # ...for each shape
179
+
180
+ result['im'] = im
181
+ result['annotations_this_image'] = annotations_this_image
182
+
183
+ return result
184
+
185
+ # ...def _process_labelme_file(...)
186
+
187
+
188
+ #%% Main function
22
189
 
23
190
  def labelme_to_coco(input_folder,
24
191
  output_file=None,
@@ -32,12 +199,17 @@ def labelme_to_coco(input_folder,
32
199
  recursive=True,
33
200
  no_json_handling='skip',
34
201
  validate_image_sizes=True,
35
- right_edge_quantization_threshold=None):
202
+ max_workers=1,
203
+ use_threads=True):
36
204
  """
37
205
  Find all images in [input_folder] that have corresponding .json files, and convert
38
206
  to a COCO .json file.
39
207
 
40
- Currently only supports bounding box annotations.
208
+ Currently only supports bounding box annotations and image-level flags (i.e., does not
209
+ support point or general polygon annotations).
210
+
211
+ Labelme's image-level flags don't quite fit the COCO annotations format, so they are attached
212
+ to image objects, rather than annotation objects.
41
213
 
42
214
  If output_file is None, just returns the resulting dict, does not write to file.
43
215
 
@@ -56,38 +228,59 @@ def labelme_to_coco(input_folder,
56
228
 
57
229
  * 'skip': ignore image files with no corresponding .json files
58
230
  * 'empty': treat image files with no corresponding .json files as empty
59
- * 'error': throw an error when an image file has no corresponding .json file
60
-
61
- right_edge_quantization_threshold is an off-by-default hack to handle cases where
62
- boxes that really should be running off the right side of the image only extend like 99%
63
- of the way there, due to what appears to be a slight bias inherent to MD. If a box extends
64
- within [right_edge_quantization_threshold] (a small number, from 0 to 1, but probably around
65
- 0.02) of the right edge of the image, it will be extended to the far right edge.
231
+ * 'error': throw an error when an image file has no corresponding .json file
66
232
  """
67
233
 
234
+ if max_workers > 1:
235
+ assert category_id_to_category_name is not None, \
236
+ 'When parallelizing labelme --> COCO conversion, you must supply a category mapping'
237
+
68
238
  if category_id_to_category_name is None:
69
239
  category_name_to_id = {}
70
240
  else:
71
241
  category_name_to_id = {v: k for k, v in category_id_to_category_name.items()}
72
-
73
242
  for category_name in category_name_to_id:
74
243
  try:
75
244
  category_name_to_id[category_name] = int(category_name_to_id[category_name])
76
245
  except ValueError:
77
246
  raise ValueError('Category IDs must be ints or string-formatted ints')
247
+
248
+ # If the user supplied an explicit empty category ID, and the empty category
249
+ # name is already in category_name_to_id, make sure they match.
250
+ if empty_category_id is not None:
251
+ if empty_category_name in category_name_to_id:
252
+ assert category_name_to_id[empty_category_name] == empty_category_id, \
253
+ 'Ambiguous empty category specification'
254
+ if empty_category_id in category_id_to_category_name:
255
+ assert category_id_to_category_name[empty_category_id] == empty_category_name, \
256
+ 'Ambiguous empty category specification'
257
+ else:
258
+ if empty_category_name in category_name_to_id:
259
+ empty_category_id = category_name_to_id[empty_category_name]
78
260
 
261
+ del category_id_to_category_name
262
+
79
263
  # Enumerate images
264
+ print('Enumerating images in {}'.format(input_folder))
80
265
  image_filenames_relative = path_utils.find_images(input_folder,recursive=recursive,
81
- return_relative_paths=True)
82
-
83
- def add_category(category_name,candidate_category_id=0):
84
- if category_name in category_name_to_id:
85
- return category_name_to_id[category_name]
86
- while candidate_category_id in category_name_to_id.values():
87
- candidate_category_id += 1
88
- category_name_to_id[category_name] = candidate_category_id
89
- return candidate_category_id
266
+ return_relative_paths=True,
267
+ convert_slashes=True)
268
+
269
+ # Remove any images we're supposed to skip
270
+ if (relative_paths_to_include is not None) or (relative_paths_to_exclude is not None):
271
+ image_filenames_relative_to_process = []
272
+ for image_fn_relative in image_filenames_relative:
273
+ if relative_paths_to_include is not None and image_fn_relative not in relative_paths_to_include:
274
+ continue
275
+ if relative_paths_to_exclude is not None and image_fn_relative in relative_paths_to_exclude:
276
+ continue
277
+ image_filenames_relative_to_process.append(image_fn_relative)
278
+ print('Processing {} of {} images'.format(
279
+ len(image_filenames_relative_to_process),
280
+ len(image_filenames_relative)))
281
+ image_filenames_relative = image_filenames_relative_to_process
90
282
 
283
+ # If the user supplied a category ID to use for empty images...
91
284
  if empty_category_id is not None:
92
285
  try:
93
286
  empty_category_id = int(empty_category_id)
@@ -95,136 +288,52 @@ def labelme_to_coco(input_folder,
95
288
  raise ValueError('Category IDs must be ints or string-formatted ints')
96
289
 
97
290
  if empty_category_id is None:
98
- empty_category_id = add_category(empty_category_name)
99
-
100
- images = []
101
- annotations = []
102
-
103
- n_edges_quantized = 0
104
-
105
- # image_fn_relative = image_filenames_relative[0]
106
- for image_fn_relative in tqdm(image_filenames_relative):
107
-
108
- if relative_paths_to_include is not None and image_fn_relative not in relative_paths_to_include:
109
- continue
110
- if relative_paths_to_exclude is not None and image_fn_relative in relative_paths_to_exclude:
111
- continue
112
-
113
- image_fn_abs = os.path.join(input_folder,image_fn_relative)
114
- json_fn_abs = os.path.splitext(image_fn_abs)[0] + '.json'
115
-
116
- im = {}
117
- im['id'] = image_fn_relative
118
- im['file_name'] = image_fn_relative
119
-
120
- # If there's no .json file for this image...
121
- if not os.path.isfile(json_fn_abs):
122
-
123
- # Either skip it...
124
- if no_json_handling == 'skip':
125
- continue
126
-
127
- # ...or error
128
- elif no_json_handling == 'error':
129
- raise ValueError('Image file {} has no corresponding .json file'.format(
130
- image_fn_relative))
291
+ empty_category_id = add_category(empty_category_name,category_name_to_id)
131
292
 
132
- # ...or treat it as empty.
133
- elif no_json_handling == 'empty':
134
- try:
135
- pil_im = open_image(image_fn_abs)
136
- except Exception:
137
- print('Warning: error opening image {}, skipping'.format(image_fn_abs))
138
- continue
139
- im['width'] = pil_im.width
140
- im['height'] = pil_im.height
141
- shapes = []
142
- else:
143
- raise ValueError('Unrecognized specifier {} for handling images with no .json files'.format(
144
- no_json_handling))
293
+ if max_workers <= 1:
145
294
 
146
- # If we found a .json file for this image...
147
- else:
295
+ image_results = []
296
+ for image_fn_relative in tqdm(image_filenames_relative):
148
297
 
149
- # Read the .json file
150
- with open(json_fn_abs,'r') as f:
151
- labelme_data = json.load(f)
152
- im['width'] = labelme_data['imageWidth']
153
- im['height'] = labelme_data['imageHeight']
298
+ result = _process_labelme_file(image_fn_relative,input_folder,use_folders_as_labels,
299
+ no_json_handling,validate_image_sizes,
300
+ category_name_to_id,allow_new_categories=True)
301
+ image_results.append(result)
154
302
 
155
- if validate_image_sizes:
156
- try:
157
- pil_im = open_image(image_fn_abs)
158
- except Exception:
159
- print('Warning: error opening image {}, skipping'.format(image_fn_abs))
160
- continue
161
- assert im['width'] == pil_im.width and im['height'] == pil_im.height, \
162
- 'Image size validation error for file {}'.format(image_fn_relative)
163
-
164
- shapes = labelme_data['shapes']
303
+ else:
165
304
 
166
- if len(shapes) == 0:
167
-
168
- category_id = add_category('empty')
169
- ann = {}
170
- ann['id'] = str(uuid.uuid1())
171
- ann['image_id'] = im['id']
172
- ann['category_id'] = category_id
173
- ann['sequence_level_annotation'] = False
174
- annotations.append(ann)
175
-
305
+ n_workers = min(max_workers,len(image_filenames_relative))
306
+ assert category_name_to_id is not None
307
+
308
+ if use_threads:
309
+ pool = ThreadPool(n_workers)
176
310
  else:
177
-
178
- for shape in shapes:
179
- if shape['shape_type'] != 'rectangle':
180
- print('Only rectangles are supported, skipping an annotation of type {} in {}'.format(
181
- shape['shape_type'],image_fn_relative))
182
- continue
183
-
184
- if use_folders_as_labels:
185
- category_name = os.path.basename(os.path.dirname(image_fn_abs))
186
- else:
187
- category_name = shape['label']
188
-
189
- category_id = add_category(category_name)
190
-
191
- points = shape['points']
192
- assert len(points) == 2, 'Illegal rectangle with {} points'.format(
193
- len(points))
194
-
195
- p0 = points[0]
196
- p1 = points[1]
197
- x0 = min(p0[0],p1[0])
198
- x1 = max(p0[0],p1[0])
199
- y0 = min(p0[1],p1[1])
200
- y1 = max(p0[1],p1[1])
201
-
202
- if right_edge_quantization_threshold is not None:
203
- x1_rel = x1 / (im['width'] - 1)
204
- right_edge_distance = 1.0 - x1_rel
205
- if right_edge_distance < right_edge_quantization_threshold:
206
- n_edges_quantized += 1
207
- x1 = im['width'] - 1
208
-
209
- bbox = [x0,y0,abs(x1-x0),abs(y1-y0)]
210
- ann = {}
211
- ann['id'] = str(uuid.uuid1())
212
- ann['image_id'] = im['id']
213
- ann['category_id'] = category_id
214
- ann['sequence_level_annotation'] = False
215
- ann['bbox'] = bbox
216
- annotations.append(ann)
217
-
218
- # ...for each shape
219
-
220
- images.append(im)
221
-
222
- # ..for each image
311
+ pool = Pool(n_workers)
312
+
313
+ image_results = list(tqdm(pool.imap(
314
+ partial(_process_labelme_file,
315
+ input_folder=input_folder,
316
+ use_folders_as_labels=use_folders_as_labels,
317
+ no_json_handling=no_json_handling,
318
+ validate_image_sizes=validate_image_sizes,
319
+ category_name_to_id=category_name_to_id,
320
+ allow_new_categories=False
321
+ ),image_filenames_relative), total=len(image_filenames_relative)))
322
+
323
+ images = []
324
+ annotations = []
223
325
 
224
- if n_edges_quantized > 0:
225
- print('Quantized the right edge in {} of {} images'.format(
226
- n_edges_quantized,len(image_filenames_relative)))
326
+ # Flatten the lists of images and annotations
327
+ for result in image_results:
328
+ im = result['im']
329
+ annotations_this_image = result['annotations_this_image']
227
330
 
331
+ if im is None:
332
+ assert annotations_this_image is None
333
+ else:
334
+ images.append(im)
335
+ annotations.extend(annotations_this_image)
336
+
228
337
  output_dict = {}
229
338
  output_dict['images'] = images
230
339
  output_dict['annotations'] = annotations