megadetector 10.0.6__py3-none-any.whl → 10.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/data_management/cct_json_utils.py +16 -6
- megadetector/data_management/databases/subset_json_db.py +57 -2
- megadetector/detection/pytorch_detector.py +29 -15
- megadetector/detection/run_inference_with_yolov5_val.py +3 -1
- megadetector/detection/run_tiled_inference.py +5 -2
- megadetector/detection/video_utils.py +23 -7
- megadetector/postprocessing/classification_postprocessing.py +218 -69
- megadetector/postprocessing/convert_output_format.py +81 -87
- megadetector/postprocessing/subset_json_detector_output.py +3 -0
- megadetector/utils/directory_listing.py +19 -13
- megadetector/utils/path_utils.py +58 -8
- megadetector/utils/url_utils.py +91 -1
- megadetector/utils/wi_taxonomy_utils.py +44 -26
- megadetector/visualization/visualize_video_output.py +16 -6
- {megadetector-10.0.6.dist-info → megadetector-10.0.8.dist-info}/METADATA +134 -134
- {megadetector-10.0.6.dist-info → megadetector-10.0.8.dist-info}/RECORD +19 -19
- {megadetector-10.0.6.dist-info → megadetector-10.0.8.dist-info}/licenses/LICENSE +0 -0
- {megadetector-10.0.6.dist-info → megadetector-10.0.8.dist-info}/top_level.txt +0 -0
- {megadetector-10.0.6.dist-info → megadetector-10.0.8.dist-info}/WHEEL +0 -0
|
@@ -305,6 +305,9 @@ class SequenceOptions:
|
|
|
305
305
|
#: How to handle invalid datetimes: 'error' or 'none'
|
|
306
306
|
self.datetime_conversion_failure_behavior = 'none'
|
|
307
307
|
|
|
308
|
+
#: Enable additional debug output
|
|
309
|
+
self.verbose = False
|
|
310
|
+
|
|
308
311
|
|
|
309
312
|
#%% Functions
|
|
310
313
|
|
|
@@ -331,7 +334,9 @@ def write_object_with_serialized_datetimes(d,json_fn):
|
|
|
331
334
|
json.dump(d,f,indent=1,default=json_serialize_datetime)
|
|
332
335
|
|
|
333
336
|
|
|
334
|
-
def parse_datetimes_from_cct_image_list(images,
|
|
337
|
+
def parse_datetimes_from_cct_image_list(images,
|
|
338
|
+
conversion_failure_behavior='error',
|
|
339
|
+
verbose=False):
|
|
335
340
|
"""
|
|
336
341
|
Given the "images" field from a COCO camera traps dictionary, converts all
|
|
337
342
|
string-formatted datetime fields to Python datetimes, making reasonable assumptions
|
|
@@ -342,6 +347,7 @@ def parse_datetimes_from_cct_image_list(images,conversion_failure_behavior='erro
|
|
|
342
347
|
conversion_failure_behavior (str, optional): determines what happens on a failed
|
|
343
348
|
conversion; can be "error" (raise an error), "str" (leave as a string), or
|
|
344
349
|
"none" (convert to None)
|
|
350
|
+
verbose (bool, optional): enable additional debug output
|
|
345
351
|
|
|
346
352
|
Returns:
|
|
347
353
|
images: the input list, with datetimes converted (after modifying in place)
|
|
@@ -359,14 +365,17 @@ def parse_datetimes_from_cct_image_list(images,conversion_failure_behavior='erro
|
|
|
359
365
|
dt = dateutil.parser.parse(im['datetime'])
|
|
360
366
|
im['datetime'] = dt
|
|
361
367
|
except Exception as e:
|
|
362
|
-
s = 'could not parse datetime {}: {}'.format(
|
|
368
|
+
s = 'could not parse datetime {} from {}: {}'.format(
|
|
369
|
+
str(im['datetime']),im['file_name'],str(e))
|
|
363
370
|
if conversion_failure_behavior == 'error':
|
|
364
371
|
raise ValueError(s)
|
|
365
372
|
elif conversion_failure_behavior == 'str':
|
|
366
|
-
|
|
373
|
+
if verbose:
|
|
374
|
+
print('Warning: {}'.format(s))
|
|
367
375
|
pass
|
|
368
376
|
elif conversion_failure_behavior == 'none':
|
|
369
|
-
|
|
377
|
+
if verbose:
|
|
378
|
+
print('Warning: {}'.format(s))
|
|
370
379
|
im['datetime'] = None
|
|
371
380
|
|
|
372
381
|
# ...for each image
|
|
@@ -450,7 +459,8 @@ def create_sequences(image_info,options=None):
|
|
|
450
459
|
|
|
451
460
|
# Modifies the images in place
|
|
452
461
|
_ = parse_datetimes_from_cct_image_list(image_info,
|
|
453
|
-
conversion_failure_behavior=options.datetime_conversion_failure_behavior
|
|
462
|
+
conversion_failure_behavior=options.datetime_conversion_failure_behavior,
|
|
463
|
+
verbose=options.verbose)
|
|
454
464
|
|
|
455
465
|
n_invalid_datetimes = 0
|
|
456
466
|
for im in image_info:
|
|
@@ -505,7 +515,7 @@ def create_sequences(image_info,options=None):
|
|
|
505
515
|
delta = (im['datetime'] - previous_datetime).total_seconds()
|
|
506
516
|
|
|
507
517
|
# Start a new sequence if necessary, including the case where this datetime is invalid
|
|
508
|
-
if delta is None or delta > options.episode_interval_seconds or invalid_datetime:
|
|
518
|
+
if (delta is None) or (delta > options.episode_interval_seconds) or (invalid_datetime):
|
|
509
519
|
next_frame_number = 0
|
|
510
520
|
current_sequence_id = 'location_{}_sequence_index_{}'.format(
|
|
511
521
|
location,str(next_sequence_number).zfill(5))
|
|
@@ -18,13 +18,20 @@ import json
|
|
|
18
18
|
import argparse
|
|
19
19
|
|
|
20
20
|
from tqdm import tqdm
|
|
21
|
-
from megadetector.utils import ct_utils
|
|
22
21
|
from copy import copy
|
|
23
22
|
|
|
23
|
+
from megadetector.utils import ct_utils
|
|
24
|
+
from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
|
|
25
|
+
|
|
24
26
|
|
|
25
27
|
#%% Functions
|
|
26
28
|
|
|
27
|
-
def subset_json_db(input_json,
|
|
29
|
+
def subset_json_db(input_json,
|
|
30
|
+
query,
|
|
31
|
+
output_json=None,
|
|
32
|
+
ignore_case=False,
|
|
33
|
+
remap_categories=True,
|
|
34
|
+
verbose=False):
|
|
28
35
|
"""
|
|
29
36
|
Given a json file (or dictionary already loaded from a json file), produce a new
|
|
30
37
|
database containing only the images whose filenames contain the string 'query',
|
|
@@ -36,6 +43,8 @@ def subset_json_db(input_json, query, output_json=None, ignore_case=False, verbo
|
|
|
36
43
|
contain this string. If this is a list, test for exact matches.
|
|
37
44
|
output_json (str, optional): file to write the resulting .json file to
|
|
38
45
|
ignore_case (bool, optional): whether to perform a case-insensitive search for [query]
|
|
46
|
+
remap_categories (bool, optional): trim the category list to only the categores used
|
|
47
|
+
in the subset
|
|
39
48
|
verbose (bool, optional): enable additional debug output
|
|
40
49
|
|
|
41
50
|
Returns:
|
|
@@ -92,6 +101,52 @@ def subset_json_db(input_json, query, output_json=None, ignore_case=False, verbo
|
|
|
92
101
|
output_data['images'] = images
|
|
93
102
|
output_data['annotations'] = annotations
|
|
94
103
|
|
|
104
|
+
# Remap categories if necessary
|
|
105
|
+
if remap_categories:
|
|
106
|
+
|
|
107
|
+
category_ids_used = set()
|
|
108
|
+
for ann in annotations:
|
|
109
|
+
category_ids_used.add(ann['category_id'])
|
|
110
|
+
|
|
111
|
+
if verbose:
|
|
112
|
+
print('Keeping {} of {} categories'.format(
|
|
113
|
+
len(category_ids_used),len(input_data['categories'])))
|
|
114
|
+
|
|
115
|
+
input_category_id_to_output_category_id = {}
|
|
116
|
+
|
|
117
|
+
next_category_id = 0
|
|
118
|
+
|
|
119
|
+
# Build mappings from old to new category IDs
|
|
120
|
+
for input_category_id in category_ids_used:
|
|
121
|
+
assert isinstance(input_category_id,int), \
|
|
122
|
+
'Illegal category ID {}'.format(input_category_id)
|
|
123
|
+
output_category_id = next_category_id
|
|
124
|
+
next_category_id = next_category_id + 1
|
|
125
|
+
input_category_id_to_output_category_id[input_category_id] = output_category_id
|
|
126
|
+
|
|
127
|
+
# Modify the annotations
|
|
128
|
+
for ann in annotations:
|
|
129
|
+
assert ann['category_id'] in input_category_id_to_output_category_id
|
|
130
|
+
ann['category_id'] = input_category_id_to_output_category_id[ann['category_id']]
|
|
131
|
+
|
|
132
|
+
output_categories = []
|
|
133
|
+
|
|
134
|
+
# Re-write the category table
|
|
135
|
+
for cat in input_data['categories']:
|
|
136
|
+
|
|
137
|
+
if cat['id'] in input_category_id_to_output_category_id:
|
|
138
|
+
|
|
139
|
+
# There may be non-required fields, so don't just create an empty dict
|
|
140
|
+
# and copy the name/id field, keep the original dict other than "id"
|
|
141
|
+
output_category = copy(cat)
|
|
142
|
+
output_category['id'] = input_category_id_to_output_category_id[cat['id']]
|
|
143
|
+
output_categories.append(output_category)
|
|
144
|
+
|
|
145
|
+
output_categories = sort_list_of_dicts_by_key(output_categories,'id')
|
|
146
|
+
output_data['categories'] = output_categories
|
|
147
|
+
|
|
148
|
+
# ...if we need to remap categories
|
|
149
|
+
|
|
95
150
|
# Write the output file if requested
|
|
96
151
|
if output_json is not None:
|
|
97
152
|
if verbose:
|
|
@@ -328,7 +328,8 @@ def _initialize_yolo_imports(model_type='yolov5',
|
|
|
328
328
|
if try_yolov5_import and not utils_imported:
|
|
329
329
|
|
|
330
330
|
try:
|
|
331
|
-
from yolov5.utils.general import non_max_suppression
|
|
331
|
+
# from yolov5.utils.general import non_max_suppression # type: ignore
|
|
332
|
+
from yolov5.utils.general import xyxy2xywh # noqa
|
|
332
333
|
from yolov5.utils.augmentations import letterbox # noqa
|
|
333
334
|
try:
|
|
334
335
|
from yolov5.utils.general import scale_boxes as scale_coords
|
|
@@ -348,7 +349,8 @@ def _initialize_yolo_imports(model_type='yolov5',
|
|
|
348
349
|
|
|
349
350
|
try:
|
|
350
351
|
|
|
351
|
-
from yolov9.utils.general import non_max_suppression
|
|
352
|
+
# from yolov9.utils.general import non_max_suppression # noqa
|
|
353
|
+
from yolov9.utils.general import xyxy2xywh # noqa
|
|
352
354
|
from yolov9.utils.augmentations import letterbox # noqa
|
|
353
355
|
from yolov9.utils.general import scale_boxes as scale_coords # noqa
|
|
354
356
|
utils_imported = True
|
|
@@ -378,7 +380,12 @@ def _initialize_yolo_imports(model_type='yolov5',
|
|
|
378
380
|
|
|
379
381
|
try:
|
|
380
382
|
|
|
381
|
-
from
|
|
383
|
+
# The non_max_suppression() function moved from the ops module to the nms module
|
|
384
|
+
# in mid-2025
|
|
385
|
+
try:
|
|
386
|
+
from ultralytics.utils.ops import non_max_suppression # type: ignore # noqa
|
|
387
|
+
except Exception:
|
|
388
|
+
from ultralytics.utils.nms import non_max_suppression # type: ignore # noqa
|
|
382
389
|
from ultralytics.utils.ops import xyxy2xywh # type: ignore # noqa
|
|
383
390
|
|
|
384
391
|
# In the ultralytics package, scale_boxes and scale_coords both exist;
|
|
@@ -444,9 +451,9 @@ def _initialize_yolo_imports(model_type='yolov5',
|
|
|
444
451
|
if verbose:
|
|
445
452
|
print('Imported utils from ultralytics package')
|
|
446
453
|
|
|
447
|
-
except Exception:
|
|
454
|
+
except Exception as e:
|
|
448
455
|
|
|
449
|
-
|
|
456
|
+
print('Ultralytics module import failed: {}'.format(str(e)))
|
|
450
457
|
pass
|
|
451
458
|
|
|
452
459
|
# If we haven't succeeded yet, assume the YOLOv5 repo is on our PYTHONPATH.
|
|
@@ -455,7 +462,8 @@ def _initialize_yolo_imports(model_type='yolov5',
|
|
|
455
462
|
try:
|
|
456
463
|
|
|
457
464
|
# import pre- and post-processing functions from the YOLOv5 repo
|
|
458
|
-
from utils.general import non_max_suppression
|
|
465
|
+
# from utils.general import non_max_suppression # type: ignore
|
|
466
|
+
from utils.general import xyxy2xywh # type: ignore
|
|
459
467
|
from utils.augmentations import letterbox # type: ignore
|
|
460
468
|
|
|
461
469
|
# scale_coords() is scale_boxes() in some YOLOv5 versions
|
|
@@ -1283,17 +1291,23 @@ class PTDetector:
|
|
|
1283
1291
|
else:
|
|
1284
1292
|
nms_iou_thres = 0.6
|
|
1285
1293
|
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1294
|
+
use_library_nms = False
|
|
1295
|
+
|
|
1296
|
+
# Model output format changed in recent ultralytics packages, and the nms implementation
|
|
1297
|
+
# in this module hasn't been updated to handle that format yet.
|
|
1298
|
+
if (yolo_model_type_imported is not None) and (yolo_model_type_imported == 'ultralytics'):
|
|
1299
|
+
use_library_nms = True
|
|
1289
1300
|
|
|
1290
|
-
|
|
1291
|
-
if False:
|
|
1301
|
+
if use_library_nms:
|
|
1292
1302
|
pred = non_max_suppression(prediction=pred,
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1303
|
+
conf_thres=detection_threshold,
|
|
1304
|
+
iou_thres=nms_iou_thres,
|
|
1305
|
+
agnostic=False,
|
|
1306
|
+
multi_label=False)
|
|
1307
|
+
else:
|
|
1308
|
+
pred = nms(prediction=pred,
|
|
1309
|
+
conf_thres=detection_threshold,
|
|
1310
|
+
iou_thres=nms_iou_thres)
|
|
1297
1311
|
|
|
1298
1312
|
assert isinstance(pred, list)
|
|
1299
1313
|
assert len(pred) == len(batch_metadata), \
|
|
@@ -159,7 +159,9 @@ class YoloInferenceOptions:
|
|
|
159
159
|
#: These are deliberately offset from the standard MD categories; YOLOv5
|
|
160
160
|
#: needs categories IDs to start at 0.
|
|
161
161
|
#:
|
|
162
|
-
#: This can also be a string that points to
|
|
162
|
+
#: This can also be a string that points to any class mapping file supported
|
|
163
|
+
#: by read_classes_from_yolo_dataset_file(): a YOLO dataset.yaml file, a text
|
|
164
|
+
#: file with a list of classes, or a .json file with an ID --> name dict
|
|
163
165
|
self.yolo_category_id_to_name = {0:'animal',1:'person',2:'vehicle'}
|
|
164
166
|
|
|
165
167
|
#: What should we do if the output file already exists?
|
|
@@ -907,7 +907,10 @@ if False:
|
|
|
907
907
|
|
|
908
908
|
#%% Command-line driver
|
|
909
909
|
|
|
910
|
-
def main():
|
|
910
|
+
def main():
|
|
911
|
+
"""
|
|
912
|
+
Command-line driver for run_tiled_inference
|
|
913
|
+
"""
|
|
911
914
|
|
|
912
915
|
parser = argparse.ArgumentParser(
|
|
913
916
|
description='Chop a folder of images up into tiles, run MD on the tiles, and stitch the results together')
|
|
@@ -956,7 +959,7 @@ def main(): # noqa
|
|
|
956
959
|
'--detector_options',
|
|
957
960
|
type=str,
|
|
958
961
|
default=None,
|
|
959
|
-
help=('A list of detector options (key-value pairs)
|
|
962
|
+
help=('A list of detector options (key-value pairs)'))
|
|
960
963
|
|
|
961
964
|
# detector_options = parse_kvp_list(args.detector_options)
|
|
962
965
|
|
|
@@ -22,6 +22,7 @@ from functools import partial
|
|
|
22
22
|
from inspect import signature
|
|
23
23
|
|
|
24
24
|
from megadetector.utils import path_utils
|
|
25
|
+
from megadetector.utils.path_utils import clean_path
|
|
25
26
|
from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
|
|
26
27
|
from megadetector.visualization import visualization_utils as vis_utils
|
|
27
28
|
|
|
@@ -592,7 +593,7 @@ def video_to_frames(input_video_file,
|
|
|
592
593
|
quality=None,
|
|
593
594
|
max_width=None,
|
|
594
595
|
frames_to_extract=None,
|
|
595
|
-
allow_empty_videos=
|
|
596
|
+
allow_empty_videos=True):
|
|
596
597
|
"""
|
|
597
598
|
Renders frames from [input_video_file] to .jpg files in [output_folder].
|
|
598
599
|
|
|
@@ -618,8 +619,8 @@ def video_to_frames(input_video_file,
|
|
|
618
619
|
a single frame number. In the special case where frames_to_extract
|
|
619
620
|
is [], this function still reads video frame rates and verifies that videos
|
|
620
621
|
are readable, but no frames are extracted.
|
|
621
|
-
allow_empty_videos (bool, optional): Just print a warning if a video appears to have
|
|
622
|
-
frames (by default, this is an error).
|
|
622
|
+
allow_empty_videos (bool, optional): Just print a warning if a video appears to have
|
|
623
|
+
no frames (by default, this is an error).
|
|
623
624
|
|
|
624
625
|
Returns:
|
|
625
626
|
tuple: length-2 tuple containing (list of frame filenames,frame rate)
|
|
@@ -883,7 +884,14 @@ def _video_to_frames_for_folder(relative_fn,input_folder,output_folder_base,
|
|
|
883
884
|
|
|
884
885
|
# Create the target output folder
|
|
885
886
|
output_folder_video = os.path.join(output_folder_base,relative_fn)
|
|
886
|
-
|
|
887
|
+
try:
|
|
888
|
+
os.makedirs(output_folder_video,exist_ok=True)
|
|
889
|
+
except Exception:
|
|
890
|
+
output_folder_clean = clean_path(output_folder_video)
|
|
891
|
+
print('Warning: failed to create folder {}, trying {}'.format(
|
|
892
|
+
output_folder_video,output_folder_clean))
|
|
893
|
+
output_folder_video = output_folder_clean
|
|
894
|
+
os.makedirs(output_folder_video,exist_ok=True)
|
|
887
895
|
|
|
888
896
|
# Render frames
|
|
889
897
|
# input_video_file = input_fn_absolute; output_folder = output_folder_video
|
|
@@ -1090,6 +1098,9 @@ class FrameToVideoOptions:
|
|
|
1090
1098
|
#: Are frame rates required?
|
|
1091
1099
|
self.frame_rates_are_required = False
|
|
1092
1100
|
|
|
1101
|
+
#: Enable additional debug output
|
|
1102
|
+
self.verbose = False
|
|
1103
|
+
|
|
1093
1104
|
|
|
1094
1105
|
def frame_results_to_video_results(input_file,
|
|
1095
1106
|
output_file,
|
|
@@ -1176,9 +1187,14 @@ def frame_results_to_video_results(input_file,
|
|
|
1176
1187
|
|
|
1177
1188
|
if (video_filename_to_frame_rate is not None):
|
|
1178
1189
|
|
|
1179
|
-
if
|
|
1180
|
-
|
|
1181
|
-
|
|
1190
|
+
if video_name not in video_filename_to_frame_rate:
|
|
1191
|
+
|
|
1192
|
+
s = 'Could not determine frame rate for {}'.format(video_name)
|
|
1193
|
+
if options.frame_rates_are_required:
|
|
1194
|
+
raise ValueError(s)
|
|
1195
|
+
elif options.verbose:
|
|
1196
|
+
print('Warning: {}'.format(s))
|
|
1197
|
+
|
|
1182
1198
|
if video_name in video_filename_to_frame_rate:
|
|
1183
1199
|
im_out['frame_rate'] = video_filename_to_frame_rate[video_name]
|
|
1184
1200
|
|