megadetector 10.0.9__py3-none-any.whl → 10.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (84) hide show
  1. megadetector/data_management/animl_to_md.py +5 -2
  2. megadetector/data_management/cct_json_utils.py +4 -2
  3. megadetector/data_management/cct_to_md.py +5 -4
  4. megadetector/data_management/cct_to_wi.py +5 -1
  5. megadetector/data_management/coco_to_yolo.py +3 -2
  6. megadetector/data_management/databases/combine_coco_camera_traps_files.py +4 -4
  7. megadetector/data_management/databases/integrity_check_json_db.py +2 -2
  8. megadetector/data_management/databases/subset_json_db.py +0 -3
  9. megadetector/data_management/generate_crops_from_cct.py +6 -4
  10. megadetector/data_management/get_image_sizes.py +5 -35
  11. megadetector/data_management/labelme_to_coco.py +10 -6
  12. megadetector/data_management/labelme_to_yolo.py +19 -28
  13. megadetector/data_management/lila/create_lila_test_set.py +22 -2
  14. megadetector/data_management/lila/generate_lila_per_image_labels.py +7 -5
  15. megadetector/data_management/lila/lila_common.py +2 -2
  16. megadetector/data_management/lila/test_lila_metadata_urls.py +0 -1
  17. megadetector/data_management/ocr_tools.py +6 -10
  18. megadetector/data_management/read_exif.py +69 -13
  19. megadetector/data_management/remap_coco_categories.py +1 -1
  20. megadetector/data_management/remove_exif.py +10 -5
  21. megadetector/data_management/rename_images.py +20 -13
  22. megadetector/data_management/resize_coco_dataset.py +10 -4
  23. megadetector/data_management/speciesnet_to_md.py +3 -3
  24. megadetector/data_management/yolo_output_to_md_output.py +3 -1
  25. megadetector/data_management/yolo_to_coco.py +28 -19
  26. megadetector/detection/change_detection.py +26 -18
  27. megadetector/detection/process_video.py +1 -1
  28. megadetector/detection/pytorch_detector.py +5 -5
  29. megadetector/detection/run_detector.py +34 -10
  30. megadetector/detection/run_detector_batch.py +60 -42
  31. megadetector/detection/run_inference_with_yolov5_val.py +3 -1
  32. megadetector/detection/run_md_and_speciesnet.py +282 -110
  33. megadetector/detection/run_tiled_inference.py +7 -7
  34. megadetector/detection/tf_detector.py +4 -6
  35. megadetector/detection/video_utils.py +9 -6
  36. megadetector/postprocessing/add_max_conf.py +4 -4
  37. megadetector/postprocessing/categorize_detections_by_size.py +3 -2
  38. megadetector/postprocessing/classification_postprocessing.py +19 -21
  39. megadetector/postprocessing/combine_batch_outputs.py +3 -2
  40. megadetector/postprocessing/compare_batch_results.py +49 -27
  41. megadetector/postprocessing/convert_output_format.py +8 -6
  42. megadetector/postprocessing/create_crop_folder.py +13 -4
  43. megadetector/postprocessing/generate_csv_report.py +22 -8
  44. megadetector/postprocessing/load_api_results.py +8 -4
  45. megadetector/postprocessing/md_to_coco.py +2 -3
  46. megadetector/postprocessing/md_to_labelme.py +12 -8
  47. megadetector/postprocessing/md_to_wi.py +2 -1
  48. megadetector/postprocessing/merge_detections.py +4 -6
  49. megadetector/postprocessing/postprocess_batch_results.py +4 -3
  50. megadetector/postprocessing/remap_detection_categories.py +6 -3
  51. megadetector/postprocessing/render_detection_confusion_matrix.py +18 -10
  52. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +1 -1
  53. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +5 -3
  54. megadetector/postprocessing/separate_detections_into_folders.py +10 -4
  55. megadetector/postprocessing/subset_json_detector_output.py +1 -1
  56. megadetector/postprocessing/top_folders_to_bottom.py +22 -7
  57. megadetector/postprocessing/validate_batch_results.py +1 -1
  58. megadetector/taxonomy_mapping/map_new_lila_datasets.py +59 -3
  59. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +1 -1
  60. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +26 -17
  61. megadetector/taxonomy_mapping/species_lookup.py +51 -2
  62. megadetector/utils/ct_utils.py +9 -4
  63. megadetector/utils/directory_listing.py +3 -0
  64. megadetector/utils/extract_frames_from_video.py +4 -0
  65. megadetector/utils/gpu_test.py +6 -6
  66. megadetector/utils/md_tests.py +21 -21
  67. megadetector/utils/path_utils.py +171 -36
  68. megadetector/utils/split_locations_into_train_val.py +0 -4
  69. megadetector/utils/string_utils.py +21 -0
  70. megadetector/utils/url_utils.py +5 -3
  71. megadetector/utils/wi_platform_utils.py +168 -24
  72. megadetector/utils/wi_taxonomy_utils.py +38 -8
  73. megadetector/utils/write_html_image_list.py +1 -2
  74. megadetector/visualization/plot_utils.py +31 -19
  75. megadetector/visualization/render_images_with_thumbnails.py +3 -0
  76. megadetector/visualization/visualization_utils.py +18 -7
  77. megadetector/visualization/visualize_db.py +9 -26
  78. megadetector/visualization/visualize_detector_output.py +1 -0
  79. megadetector/visualization/visualize_video_output.py +14 -2
  80. {megadetector-10.0.9.dist-info → megadetector-10.0.11.dist-info}/METADATA +1 -1
  81. {megadetector-10.0.9.dist-info → megadetector-10.0.11.dist-info}/RECORD +84 -84
  82. {megadetector-10.0.9.dist-info → megadetector-10.0.11.dist-info}/WHEEL +0 -0
  83. {megadetector-10.0.9.dist-info → megadetector-10.0.11.dist-info}/licenses/LICENSE +0 -0
  84. {megadetector-10.0.9.dist-info → megadetector-10.0.11.dist-info}/top_level.txt +0 -0
@@ -210,6 +210,8 @@ def read_pil_exif(im,options=None):
210
210
  if exif_info is None:
211
211
  return exif_tags
212
212
 
213
+ # Read all standard EXIF tags; if necessary, we'll filter later to a restricted
214
+ # list of tags.
213
215
  for k, v in exif_info.items():
214
216
  assert isinstance(k,str) or isinstance(k,int), \
215
217
  'Invalid EXIF key {}'.format(str(k))
@@ -221,6 +223,7 @@ def read_pil_exif(im,options=None):
221
223
 
222
224
  exif_ifd_tags = _get_exif_ifd(exif_info)
223
225
 
226
+ # Read tags that are only available via offset
224
227
  for k in exif_ifd_tags.keys():
225
228
  v = exif_ifd_tags[k]
226
229
  if k in exif_tags:
@@ -266,7 +269,7 @@ def read_pil_exif(im,options=None):
266
269
 
267
270
  # Convert to strings, e.g. 'GPSTimeStamp'
268
271
  gps_info = {}
269
- for int_tag,v in enumerate(gps_info_raw.keys()):
272
+ for int_tag,v in gps_info_raw.items():
270
273
  assert isinstance(int_tag,int)
271
274
  if int_tag in ExifTags.GPSTAGS:
272
275
  gps_info[ExifTags.GPSTAGS[int_tag]] = v
@@ -276,11 +279,15 @@ def read_pil_exif(im,options=None):
276
279
  exif_tags['GPSInfo'] = gps_info
277
280
 
278
281
  except Exception as e:
282
+
279
283
  if options.verbose:
280
284
  print('Warning: error reading GPS info: {}'.format(str(e)))
281
285
 
282
286
  # ...if we think there might be GPS tags in this image
283
287
 
288
+ # Filter tags if necessary
289
+ exif_tags = _filter_tags(exif_tags,options)
290
+
284
291
  return exif_tags
285
292
 
286
293
  # ...read_pil_exif()
@@ -332,25 +339,68 @@ def _filter_tags(tags,options):
332
339
  dict.
333
340
  """
334
341
 
342
+ ## No-op cases
343
+
335
344
  if options is None:
336
345
  return tags
337
- if options.tags_to_include is None and options.tags_to_exclude is None:
346
+
347
+ if (options.tags_to_include is None) and (options.tags_to_exclude is None):
338
348
  return tags
349
+
350
+ ## If we're including specific tags
351
+
339
352
  if options.tags_to_include is not None:
340
- assert options.tags_to_exclude is None, "tags_to_include and tags_to_exclude are incompatible"
353
+
354
+ assert options.tags_to_exclude is None, \
355
+ "tags_to_include and tags_to_exclude are incompatible"
356
+
357
+ tags_to_include = options.tags_to_include
358
+
359
+ if isinstance(tags_to_include,str):
360
+
361
+ # Special case:return all tags
362
+ if tags_to_include == 'all':
363
+ return tags
364
+
365
+ # Otherwise convert string to list
366
+ tags_to_include = tags_to_include.split(',')
367
+
368
+ # Case-insensitive matching
369
+ tags_to_include = [s.lower().strip() for s in tags_to_include]
370
+
341
371
  tags_to_return = {}
372
+
342
373
  for tag_name in tags.keys():
343
- if tag_name in options.tags_to_include:
374
+ if str(tag_name).strip().lower() in tags_to_include:
344
375
  tags_to_return[tag_name] = tags[tag_name]
376
+
345
377
  return tags_to_return
378
+
379
+ ## If we're excluding specific tags
380
+
346
381
  if options.tags_to_exclude is not None:
347
- assert options.tags_to_include is None, "tags_to_include and tags_to_exclude are incompatible"
382
+
383
+ assert options.tags_to_include is None, \
384
+ "tags_to_include and tags_to_exclude are incompatible"
385
+
386
+ tags_to_exclude = options.tags_to_exclude
387
+
388
+ # Convert string to list
389
+ if isinstance(tags_to_exclude,str):
390
+ tags_to_exclude = tags_to_exclude.split(',')
391
+
392
+ # Case-insensitive matching
393
+ tags_to_exclude = [s.lower().strip() for s in tags_to_exclude]
394
+
348
395
  tags_to_return = {}
349
396
  for tag_name in tags.keys():
350
- if tag_name not in options.tags_to_exclude:
397
+ if str(tag_name).strip().lower() not in tags_to_exclude:
351
398
  tags_to_return[tag_name] = tags[tag_name]
399
+
352
400
  return tags_to_return
353
401
 
402
+ # ...def _filter_tags(...)
403
+
354
404
 
355
405
  def read_exif_tags_for_image(file_path,options=None):
356
406
  """
@@ -381,8 +431,10 @@ def read_exif_tags_for_image(file_path,options=None):
381
431
 
382
432
  except Exception as e:
383
433
  if options.verbose:
384
- print('Read failure for image {}: {}'.format(
434
+ print('PIL EXIF read failure for image {}: {}'.format(
385
435
  file_path,str(e)))
436
+ import traceback
437
+ traceback.print_exc()
386
438
  result['status'] = 'read_failure'
387
439
  result['error'] = str(e)
388
440
 
@@ -582,9 +634,10 @@ def _populate_exif_for_images(image_base,images,options=None):
582
634
  results = list(tqdm(pool.imap(partial(_populate_exif_data,image_base=image_base,
583
635
  options=options),images),total=len(images)))
584
636
  finally:
585
- pool.close()
586
- pool.join()
587
- print("Pool closed and joined for EXIF extraction")
637
+ if pool is not None:
638
+ pool.close()
639
+ pool.join()
640
+ print("Pool closed and joined for EXIF extraction")
588
641
 
589
642
  return results
590
643
 
@@ -691,6 +744,7 @@ def read_exif_from_folder(input_folder,
691
744
  list: list of dicts, each of which contains EXIF information for one images. Fields include at least:
692
745
  * 'file_name': the relative path to the image
693
746
  * 'exif_tags': a dict of EXIF tags whose exact format depends on [options.processing_library].
747
+ * 'status' and 'error': only populated when reading EXIF information for an image fails
694
748
  """
695
749
 
696
750
  if options is None:
@@ -698,9 +752,11 @@ def read_exif_from_folder(input_folder,
698
752
 
699
753
  # Validate options
700
754
  if options.tags_to_include is not None:
701
- assert options.tags_to_exclude is None, "tags_to_include and tags_to_exclude are incompatible"
755
+ assert options.tags_to_exclude is None, \
756
+ "tags_to_include and tags_to_exclude are incompatible"
702
757
  if options.tags_to_exclude is not None:
703
- assert options.tags_to_include is None, "tags_to_include and tags_to_exclude are incompatible"
758
+ assert options.tags_to_include is None, \
759
+ "tags_to_include and tags_to_exclude are incompatible"
704
760
 
705
761
  if input_folder is None:
706
762
  input_folder = ''
@@ -724,7 +780,7 @@ def read_exif_from_folder(input_folder,
724
780
  print('Could not write to file {}'.format(output_file))
725
781
  raise
726
782
 
727
- if options.processing_library == 'exif':
783
+ if options.processing_library == 'exiftool':
728
784
  assert is_executable(options.exiftool_command_name), 'exiftool not available'
729
785
 
730
786
  if filenames is None:
@@ -126,7 +126,7 @@ def remap_coco_categories(input_data,
126
126
  with open(output_file,'w') as f:
127
127
  json.dump(output_data,f,indent=1)
128
128
 
129
- return input_data
129
+ return output_data
130
130
 
131
131
  # ...def remap_coco_categories(...)
132
132
 
@@ -13,9 +13,10 @@ included in package-level dependency lists. YMMV.
13
13
  #%% Imports and constants
14
14
 
15
15
  import os
16
- import glob
17
16
  import argparse
18
17
 
18
+ from megadetector.utils.path_utils import recursive_file_list
19
+
19
20
  from multiprocessing.pool import Pool as Pool
20
21
  from tqdm import tqdm
21
22
 
@@ -77,7 +78,10 @@ def remove_exif(image_base_folder,recursive=True,n_processes=1):
77
78
 
78
79
  assert os.path.isdir(image_base_folder), \
79
80
  'Could not find folder {}'.format(image_base_folder)
80
- all_files = [f for f in glob.glob(image_base_folder+ "*/**", recursive=recursive)]
81
+ all_files = recursive_file_list(image_base_folder,
82
+ recursive=True,
83
+ return_relative_paths=False,
84
+ convert_slashes=True)
81
85
  image_files = [s for s in all_files if \
82
86
  (s.lower().endswith('.jpg') or s.lower().endswith('.jpeg'))]
83
87
 
@@ -98,9 +102,10 @@ def remove_exif(image_base_folder,recursive=True,n_processes=1):
98
102
  pool = Pool(n_processes)
99
103
  _ = list(tqdm(pool.imap(remove_exif_from_image,image_files),total=len(image_files)))
100
104
  finally:
101
- pool.close()
102
- pool.join()
103
- print("Pool closed and joined for EXIF removal")
105
+ if pool is not None:
106
+ pool.close()
107
+ pool.join()
108
+ print("Pool closed and joined for EXIF removal")
104
109
 
105
110
  # ...remove_exif(...)
106
111
 
@@ -28,8 +28,9 @@ def rename_images(input_folder,
28
28
  read_exif_options=None,
29
29
  n_copy_workers=8):
30
30
  """
31
- For the given image struct in COCO format and associated list of annotations, reformats the
32
- detections into labelme format.
31
+ Copies images from a possibly-nested folder structure to a flat folder structure,
32
+ including EXIF timestamps in each filename. Loosely equivalent to camtrapR's
33
+ imageRename() function.
33
34
 
34
35
  Args:
35
36
  input_folder (str): the folder to search for images, always recursive
@@ -56,8 +57,9 @@ def rename_images(input_folder,
56
57
  if read_exif_options is None:
57
58
  read_exif_options = ReadExifOptions()
58
59
 
59
- read_exif_options.tags_to_include = ['DateTime','Model','Make','ExifImageWidth','ExifImageHeight','DateTime',
60
- 'DateTimeOriginal']
60
+ read_exif_options.tags_to_include = ['DateTime','Model',
61
+ 'Make','ExifImageWidth',
62
+ 'ExifImageHeight','DateTimeOriginal']
61
63
  read_exif_options.verbose = False
62
64
 
63
65
  exif_info = read_exif_from_folder(input_folder=input_folder,
@@ -104,17 +106,21 @@ def rename_images(input_folder,
104
106
  if not dry_run:
105
107
 
106
108
  input_fn_abs_to_output_fn_abs = {}
109
+
107
110
  for input_fn_relative in input_fn_relative_to_output_fn_relative:
111
+
108
112
  output_fn_relative = input_fn_relative_to_output_fn_relative[input_fn_relative]
109
113
  input_fn_abs = os.path.join(input_folder,input_fn_relative)
110
114
  output_fn_abs = os.path.join(output_folder,output_fn_relative)
111
115
  input_fn_abs_to_output_fn_abs[input_fn_abs] = output_fn_abs
112
116
 
113
- parallel_copy_files(input_file_to_output_file=input_fn_abs_to_output_fn_abs,
114
- max_workers=n_copy_workers,
115
- use_threads=True,
116
- overwrite=True,
117
- verbose=verbose)
117
+ parallel_copy_files(input_file_to_output_file=input_fn_abs_to_output_fn_abs,
118
+ max_workers=n_copy_workers,
119
+ use_threads=True,
120
+ overwrite=True,
121
+ verbose=verbose)
122
+
123
+ # ...if this is not a dry run
118
124
 
119
125
  return input_fn_relative_to_output_fn_relative
120
126
 
@@ -129,13 +135,14 @@ if False:
129
135
 
130
136
  #%% Configure options
131
137
 
132
- input_folder = r'G:\camera_traps\camera_trap_videos\2024.05.25\cam3'
133
- output_folder = r'G:\camera_traps\camera_trap_videos\2024.05.25\cam3_flat'
138
+ input_folder = r'G:\camera_traps\camera_trap_images\2018.05.04'
139
+ output_folder = r'g:\temp\rename-test-out'
134
140
  dry_run = False
135
141
  verbose = True
136
142
  read_exif_options = ReadExifOptions()
137
- read_exif_options.tags_to_include = ['DateTime','Model','Make','ExifImageWidth','ExifImageHeight','DateTime',
138
- 'DateTimeOriginal']
143
+ read_exif_options.tags_to_include = ['DateTime','Model','Make',
144
+ 'ExifImageWidth','ExifImageHeight',
145
+ 'DateTimeOriginal']
139
146
  read_exif_options.n_workers = 8
140
147
  read_exif_options.verbose = verbose
141
148
  n_copy_workers = 8
@@ -65,7 +65,9 @@ def _process_single_image_for_resize(image_data,
65
65
  return None, None
66
66
 
67
67
  output_fn_abs = os.path.join(output_folder, input_fn_relative)
68
- os.makedirs(os.path.dirname(output_fn_abs), exist_ok=True)
68
+ output_dir = os.path.dirname(output_fn_abs)
69
+ if len(output_dir) > 0:
70
+ os.makedirs(output_dir, exist_ok=True)
69
71
 
70
72
  if verbose:
71
73
  print('Resizing {} to {}'.format(input_fn_abs,output_fn_abs))
@@ -235,6 +237,9 @@ def resize_coco_dataset(input_folder,
235
237
  processed_results.append(result)
236
238
 
237
239
  else:
240
+
241
+ pool = None
242
+
238
243
  try:
239
244
 
240
245
  assert pool_type in ('process', 'thread'), f'Illegal pool type {pool_type}'
@@ -257,9 +262,10 @@ def resize_coco_dataset(input_folder,
257
262
  desc=f"Resizing images with {pool_type} pool"))
258
263
 
259
264
  finally:
260
- pool.close()
261
- pool.join()
262
- print(f"{pool_type.capitalize()} pool closed and joined.")
265
+ if pool is not None:
266
+ pool.close()
267
+ pool.join()
268
+ print('Pool closed and joined for COCO dataset resizing')
263
269
 
264
270
  new_images_list = []
265
271
  new_annotations_list = []
@@ -33,9 +33,9 @@ def main(): # noqa
33
33
 
34
34
  args = parser.parse_args()
35
35
 
36
- generate_md_results_from_predictions_json(args.predictions_json_file,
37
- args.md_results_file,
38
- args.base_folder)
36
+ generate_md_results_from_predictions_json(predictions_json_file=args.predictions_json_file,
37
+ md_results_file=args.md_results_file,
38
+ base_folder=args.base_folder)
39
39
 
40
40
  if __name__ == '__main__':
41
41
  main()
@@ -110,6 +110,8 @@ def read_classes_from_yolo_dataset_file(fn):
110
110
 
111
111
  return category_id_to_name
112
112
 
113
+ # ...def def read_classes_from_yolo_dataset_file(...)
114
+
113
115
 
114
116
  def yolo_json_output_to_md_output(yolo_json_file,
115
117
  image_folder,
@@ -187,7 +189,7 @@ def yolo_json_output_to_md_output(yolo_json_file,
187
189
  for image_id in image_id_to_relative_path:
188
190
  relative_path = image_id_to_relative_path[image_id]
189
191
  assert relative_path not in image_file_relative_to_image_id, \
190
- 'Duplication image IDs in YOLO output conversion for image {}'.format(relative_path)
192
+ 'Duplicate image IDs in YOLO output conversion for image {}'.format(relative_path)
191
193
  image_file_relative_to_image_id[relative_path] = image_id
192
194
 
193
195
  with open(yolo_json_file,'r') as f:
@@ -23,6 +23,7 @@ from megadetector.utils.path_utils import find_images
23
23
  from megadetector.utils.path_utils import recursive_file_list
24
24
  from megadetector.utils.path_utils import find_image_strings
25
25
  from megadetector.utils.ct_utils import invert_dictionary
26
+ from megadetector.utils.ct_utils import write_json
26
27
  from megadetector.visualization.visualization_utils import open_image
27
28
  from megadetector.data_management.yolo_output_to_md_output import \
28
29
  read_classes_from_yolo_dataset_file
@@ -175,8 +176,8 @@ def load_yolo_class_list(class_name_file):
175
176
 
176
177
  with open(class_name_file,'r') as f:
177
178
  lines = f.readlines()
179
+ lines = [s.strip() for s in lines]
178
180
  assert len(lines) > 0, 'Empty class name file {}'.format(class_name_file)
179
- class_names = [s.strip() for s in lines]
180
181
  assert len(lines[0]) > 0, 'Empty class name file {} (empty first line)'.format(class_name_file)
181
182
 
182
183
  # Blank lines should only appear at the end
@@ -189,7 +190,7 @@ def load_yolo_class_list(class_name_file):
189
190
  class_name_file))
190
191
 
191
192
  category_id_to_name = {}
192
- for i_category_id,category_name in enumerate(class_names):
193
+ for i_category_id,category_name in enumerate(lines):
193
194
  assert len(category_name) > 0
194
195
  category_id_to_name[i_category_id] = category_name
195
196
 
@@ -387,9 +388,10 @@ def validate_yolo_dataset(input_folder,
387
388
  label_results = list(tqdm(pool.imap(p, label_files),
388
389
  total=len(label_files)))
389
390
  finally:
390
- pool.close()
391
- pool.join()
392
- print("Pool closed and joined for label file validation")
391
+ if pool is not None:
392
+ pool.close()
393
+ pool.join()
394
+ print("Pool closed and joined for label file validation")
393
395
 
394
396
  assert len(label_results) == len(label_files)
395
397
 
@@ -470,7 +472,8 @@ def yolo_to_coco(input_folder,
470
472
  input_folder = input_folder.replace('\\','/')
471
473
 
472
474
  assert os.path.isdir(input_folder)
473
- assert os.path.isfile(class_name_file)
475
+ if isinstance(class_name_file,str):
476
+ assert os.path.isfile(class_name_file)
474
477
 
475
478
  assert empty_image_handling in \
476
479
  ('no_annotations','empty_annotations','skip','error'), \
@@ -591,19 +594,26 @@ def yolo_to_coco(input_folder,
591
594
 
592
595
  assert pool_type in ('process','thread'), 'Illegal pool type {}'.format(pool_type)
593
596
 
594
- if pool_type == 'thread':
595
- pool = ThreadPool(n_workers)
596
- else:
597
- pool = Pool(n_workers)
597
+ pool = None
598
+ try:
599
+ if pool_type == 'thread':
600
+ pool = ThreadPool(n_workers)
601
+ else:
602
+ pool = Pool(n_workers)
598
603
 
599
- print('Starting a {} pool of {} workers'.format(pool_type,n_workers))
604
+ print('Starting a {} pool of {} workers'.format(pool_type,n_workers))
600
605
 
601
- p = partial(_process_image,
602
- input_folder=input_folder,
603
- category_id_to_name=category_id_to_name,
604
- label_folder=label_folder)
605
- image_results = list(tqdm(pool.imap(p, image_files_abs),
606
- total=len(image_files_abs)))
606
+ p = partial(_process_image,
607
+ input_folder=input_folder,
608
+ category_id_to_name=category_id_to_name,
609
+ label_folder=label_folder)
610
+ image_results = list(tqdm(pool.imap(p, image_files_abs),
611
+ total=len(image_files_abs)))
612
+ finally:
613
+ if pool is not None:
614
+ pool.close()
615
+ pool.join()
616
+ print('Pool closed and joined for YOLO to COCO conversion')
607
617
 
608
618
 
609
619
  assert len(image_results) == len(image_files_abs)
@@ -674,8 +684,7 @@ def yolo_to_coco(input_folder,
674
684
 
675
685
  if output_file is not None:
676
686
  print('Writing to {}'.format(output_file))
677
- with open(output_file,'w') as f:
678
- json.dump(d,f,indent=1)
687
+ write_json(output_file,d)
679
688
 
680
689
  return d
681
690
 
@@ -262,7 +262,7 @@ def detect_motion(prev_image_path,
262
262
  curr_image = cv2.imread(str(curr_image_path))
263
263
 
264
264
  if curr_image is None:
265
- print(f"Could not read image: {curr_image_path}")
265
+ print(f'Could not read image: {curr_image_path}')
266
266
  return to_return, motion_state
267
267
 
268
268
  # Read previous image if available (used for frame diff mode)
@@ -270,7 +270,7 @@ def detect_motion(prev_image_path,
270
270
  if prev_image_path is not None:
271
271
  prev_image = cv2.imread(str(prev_image_path))
272
272
  if prev_image is None:
273
- print(f"Could not read image: {prev_image_path}")
273
+ print(f'Could not read image: {prev_image_path}')
274
274
  return to_return, motion_state
275
275
 
276
276
 
@@ -470,10 +470,10 @@ def process_camera_folder(folder_path, options=None):
470
470
  image_files = find_images(folder_path, recursive=True, return_relative_paths=False)
471
471
 
472
472
  if len(image_files) == 0:
473
- print(f'"No images found in {folder_path}"')
473
+ print(f'No images found in {folder_path}')
474
474
  return pd.DataFrame()
475
475
 
476
- print(f"Processing {len(image_files)} images in {camera_name}")
476
+ print(f'Processing {len(image_files)} images in {camera_name}')
477
477
 
478
478
  # Initialize results
479
479
  results = []
@@ -527,7 +527,7 @@ def process_camera_folder(folder_path, options=None):
527
527
  diff_percentage = motion_result['diff_percentage']
528
528
 
529
529
  # Format regions as semicolon-separated list of "x,y,w,h"
530
- regions_str = ';'.join([f"{x},{y},{w},{h}" for x, y, w, h in regions])
530
+ regions_str = ';'.join([f'{x},{y},{w},{h}' for x, y, w, h in regions])
531
531
 
532
532
  # Add result for current image
533
533
  results.append({
@@ -587,9 +587,9 @@ def process_folders(folders, options=None, output_csv=None):
587
587
  try:
588
588
  folder_results = future.result()
589
589
  all_results.append(folder_results)
590
- print(f"Finished processing {folder}")
590
+ print(f'Finished processing {folder}')
591
591
  except Exception as e:
592
- print(f"Error processing {folder}: {e}")
592
+ print(f'Error processing {folder}: {e}')
593
593
 
594
594
  # Combine all results
595
595
  if all_results:
@@ -598,7 +598,7 @@ def process_folders(folders, options=None, output_csv=None):
598
598
  # Save to CSV if requested
599
599
  if output_csv:
600
600
  combined_results.to_csv(output_csv, index=False)
601
- print(f"Results saved to {output_csv}")
601
+ print(f'Results saved to {output_csv}')
602
602
 
603
603
  return combined_results
604
604
  else:
@@ -656,7 +656,7 @@ def create_change_previews(motion_results, output_folder, num_samples=10, random
656
656
  prev_image = cv2.imread(prev_image_path)
657
657
 
658
658
  if curr_image is None or prev_image is None:
659
- print(f"Could not read images: {prev_image_path} or {curr_image_path}")
659
+ print(f'Could not read images: {prev_image_path} or {curr_image_path}')
660
660
  continue
661
661
 
662
662
  # Ensure that both images have the same dimensions
@@ -675,7 +675,7 @@ def create_change_previews(motion_results, output_folder, num_samples=10, random
675
675
  # Add details at the bottom
676
676
  camera = row['camera']
677
677
  diff_pct = row['diff_percentage']
678
- info_text = f"Camera: {camera} | Change: {diff_pct:.2f}%"
678
+ info_text = f'Camera: {camera} | Change: {diff_pct:.2f}%'
679
679
  cv2.putText(combined, info_text, (10, combined.shape[0] - 10), font, 0.7, (0, 255, 0), 2)
680
680
 
681
681
  # Draw bounding boxes on the 'after' image if regions exist
@@ -690,12 +690,12 @@ def create_change_previews(motion_results, output_folder, num_samples=10, random
690
690
  (curr_image.shape[1] + x + w, y + h),
691
691
  (0, 0, 255), 2)
692
692
  except ValueError:
693
- print(f"Invalid region format: {region}")
693
+ print(f'Invalid region format: {region}')
694
694
 
695
695
  # Save the combined image
696
696
  camera_name = Path(curr_image_path).parent.name
697
697
  image_name = Path(curr_image_path).name
698
- output_path = output_folder / f"preview_{camera_name}_{image_name}"
698
+ output_path = output_folder / f'preview_{camera_name}_{image_name}'
699
699
  cv2.imwrite(str(output_path), combined)
700
700
 
701
701
  preview_paths.append(str(output_path))
@@ -708,7 +708,11 @@ def create_change_previews(motion_results, output_folder, num_samples=10, random
708
708
 
709
709
  #%% Command-line driver
710
710
 
711
- def main(): # noqa
711
+ def main():
712
+ """
713
+ Command-line driver
714
+ """
715
+
712
716
  parser = argparse.ArgumentParser(description='Detect motion in timelapse camera images')
713
717
  parser.add_argument('--root_dir', required=True, help='Root directory containing camera folders')
714
718
  parser.add_argument('--output_csv', default=None, help='Optional output CSV file')
@@ -801,7 +805,7 @@ def main(): # noqa
801
805
  root_dir = Path(args.root_dir)
802
806
  camera_folders = [f for f in root_dir.iterdir() if f.is_dir()]
803
807
 
804
- print(f"Found {len(camera_folders)} camera folders")
808
+ print(f'Found {len(camera_folders)} camera folders')
805
809
 
806
810
  # Process all folders
807
811
  results = process_folders(
@@ -817,16 +821,20 @@ def main(): # noqa
817
821
  args.preview_folder,
818
822
  num_samples=args.num_previews
819
823
  )
820
- print(f"Created {len(preview_paths)} preview images in {args.preview_folder}")
824
+ print(f'Created {len(preview_paths)} preview images in {args.preview_folder}')
821
825
 
822
- print("Motion detection completed")
826
+ print('Motion detection completed')
823
827
 
824
828
  # Display summary
825
829
  motion_detected_count = results['motion_detected'].sum()
826
830
  total_images = len(results)
827
- print(f"Motion detected in {motion_detected_count} out of {total_images} images "
828
- f"({motion_detected_count/total_images*100:.2f}%)")
831
+ if total_images > 0:
832
+ print(f'Motion detected in {motion_detected_count} out of {total_images} images '
833
+ f'({motion_detected_count/total_images*100:.2f}%)')
834
+ else:
835
+ print('No images were processed')
829
836
 
837
+ # ...def main(...)
830
838
 
831
839
  if __name__ == "__main__":
832
840
  main()
@@ -303,7 +303,7 @@ def options_to_command(options):
303
303
  if options.verbose:
304
304
  cmd += ' --verbose'
305
305
  if options.detector_options is not None and len(options.detector_options) > 0:
306
- cmd += '--detector_options {}'.format(dict_to_kvp_list(options.detector_options))
306
+ cmd += ' --detector_options {}'.format(dict_to_kvp_list(options.detector_options))
307
307
 
308
308
  return cmd
309
309
 
@@ -112,9 +112,9 @@ def _get_model_type_for_model(model_file,
112
112
  print('Warning: model type from model version is {}, from file metadata is {}'.format(
113
113
  model_type_from_model_version,model_type_from_model_file_metadata))
114
114
  if prefer_model_type_source == 'table':
115
- model_type = model_type_from_model_file_metadata
116
- else:
117
115
  model_type = model_type_from_model_version
116
+ else:
117
+ model_type = model_type_from_model_file_metadata
118
118
 
119
119
  elif model_type_from_model_file_metadata is not None:
120
120
 
@@ -373,7 +373,7 @@ def _initialize_yolo_imports(model_type='yolov5',
373
373
  except Exception:
374
374
 
375
375
  print('It looks like you are trying to run a model that requires the ultralytics package, '
376
- 'but the ultralytics package is not installed, but . For licensing reasons, this '
376
+ 'but the ultralytics package is not installed. For licensing reasons, this '
377
377
  'is not installed by default with the MegaDetector Python package. Run '
378
378
  '"pip install ultralytics" to install it, and try again.')
379
379
  raise
@@ -1314,8 +1314,8 @@ class PTDetector:
1314
1314
 
1315
1315
  assert isinstance(pred, list)
1316
1316
  assert len(pred) == len(batch_metadata), \
1317
- print('Mismatch between prediction length {} and batch size {}'.format(
1318
- len(pred),len(batch_metadata)))
1317
+ 'Mismatch between prediction length {} and batch size {}'.format(
1318
+ len(pred),len(batch_metadata))
1319
1319
 
1320
1320
  # Process each image's detections
1321
1321
  for i_image, det in enumerate(pred):