megadetector 5.0.16__py3-none-any.whl → 5.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/data_management/importers/snapshot_safari_importer_reprise.py +28 -16
- megadetector/detection/process_video.py +20 -10
- megadetector/detection/run_detector_batch.py +1 -1
- megadetector/detection/video_utils.py +15 -4
- megadetector/postprocessing/postprocess_batch_results.py +4 -4
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +5 -2
- megadetector/utils/ct_utils.py +48 -0
- megadetector/utils/md_tests.py +43 -15
- megadetector/utils/torch_test.py +32 -0
- megadetector/utils/url_utils.py +94 -3
- megadetector/visualization/visualization_utils.py +21 -6
- {megadetector-5.0.16.dist-info → megadetector-5.0.18.dist-info}/METADATA +4 -6
- {megadetector-5.0.16.dist-info → megadetector-5.0.18.dist-info}/RECORD +16 -15
- {megadetector-5.0.16.dist-info → megadetector-5.0.18.dist-info}/WHEEL +1 -1
- {megadetector-5.0.16.dist-info → megadetector-5.0.18.dist-info}/LICENSE +0 -0
- {megadetector-5.0.16.dist-info → megadetector-5.0.18.dist-info}/top_level.txt +0 -0
|
@@ -24,7 +24,7 @@ from collections import defaultdict
|
|
|
24
24
|
|
|
25
25
|
from megadetector.utils import path_utils
|
|
26
26
|
|
|
27
|
-
input_base = '
|
|
27
|
+
input_base = 'e:/'
|
|
28
28
|
output_base = os.path.expanduser('~/data/snapshot-safari-metadata')
|
|
29
29
|
file_list_cache_file = os.path.join(output_base,'file_list.json')
|
|
30
30
|
|
|
@@ -76,23 +76,16 @@ print('Found a total of {} files, {} of which are images'.format(
|
|
|
76
76
|
len(all_files_relative),len(all_image_files)))
|
|
77
77
|
|
|
78
78
|
|
|
79
|
-
#%% Copy all csv files to the annotation cache folder
|
|
79
|
+
#%% Copy all .csv files to the annotation cache folder
|
|
80
80
|
|
|
81
81
|
# fn = csv_files[0]
|
|
82
|
-
for fn in csv_files:
|
|
82
|
+
for fn in tqdm(csv_files):
|
|
83
|
+
|
|
83
84
|
target_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
84
85
|
source_file = os.path.join(input_base,fn)
|
|
85
86
|
shutil.copyfile(source_file,target_file)
|
|
86
87
|
|
|
87
|
-
|
|
88
|
-
"""
|
|
89
|
-
Later cells will ask to read a .csv file from the original hard drive;
|
|
90
|
-
read from the annotation cache instead.
|
|
91
|
-
"""
|
|
92
|
-
|
|
93
|
-
cached_csv_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
94
|
-
df = pd.read_csv(cached_csv_file)
|
|
95
|
-
return df
|
|
88
|
+
print('Copied {} .csv files to cache folder'.format(len(csv_files)))
|
|
96
89
|
|
|
97
90
|
|
|
98
91
|
#%% List project folders
|
|
@@ -123,6 +116,21 @@ project_folder_to_project_code = {v: k for k, v in project_code_to_project_folde
|
|
|
123
116
|
project_codes = sorted(list(project_code_to_project_folder.keys()))
|
|
124
117
|
project_folders = sorted(list(project_code_to_project_folder.values()))
|
|
125
118
|
|
|
119
|
+
print('Eumerated {} project folders'.format(len(project_folders)))
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
#%% Support functions
|
|
123
|
+
|
|
124
|
+
def read_cached_csv_file(fn):
|
|
125
|
+
"""
|
|
126
|
+
Later cells will ask to read a .csv file from the original hard drive;
|
|
127
|
+
read from the annotation cache instead.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
cached_csv_file = os.path.join(annotation_cache_dir,os.path.basename(fn))
|
|
131
|
+
df = pd.read_csv(cached_csv_file)
|
|
132
|
+
return df
|
|
133
|
+
|
|
126
134
|
def file_to_project_folder(fn):
|
|
127
135
|
"""
|
|
128
136
|
For a given filename relative to the drive root, return the corresponding
|
|
@@ -138,7 +146,6 @@ def file_to_project_folder(fn):
|
|
|
138
146
|
assert project_folder in project_folders
|
|
139
147
|
return project_folder
|
|
140
148
|
|
|
141
|
-
|
|
142
149
|
def file_to_project_code(fn):
|
|
143
150
|
"""
|
|
144
151
|
For a given filename relative to the drive root, return the corresponding
|
|
@@ -147,6 +154,9 @@ def file_to_project_code(fn):
|
|
|
147
154
|
|
|
148
155
|
return project_folder_to_project_code[file_to_project_folder(fn)]
|
|
149
156
|
|
|
157
|
+
|
|
158
|
+
#%% Consistency checking
|
|
159
|
+
|
|
150
160
|
assert file_to_project_folder(
|
|
151
161
|
'APN/APN_S2/DW/DW_R5/APN_S2_DW_R5_IMAG0003.JPG') == 'APN'
|
|
152
162
|
assert file_to_project_folder(
|
|
@@ -163,9 +173,11 @@ assert file_to_project_code(
|
|
|
163
173
|
#
|
|
164
174
|
# E.g.:
|
|
165
175
|
#
|
|
166
|
-
# 'DHP': [
|
|
167
|
-
#
|
|
168
|
-
#
|
|
176
|
+
# 'DHP': [
|
|
177
|
+
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S1_report_lila.csv',
|
|
178
|
+
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S2_report_lila.csv',
|
|
179
|
+
# 'Snapshot South Africa/DHP/LILA_Reports/DHP_S3_report_lila.csv'
|
|
180
|
+
# ]
|
|
169
181
|
#
|
|
170
182
|
project_code_to_report_files = defaultdict(list)
|
|
171
183
|
|
|
@@ -345,7 +345,8 @@ def process_video(options):
|
|
|
345
345
|
caller_provided_rendering_output_folder = (options.frame_rendering_folder is not None)
|
|
346
346
|
|
|
347
347
|
frame_output_folder = None
|
|
348
|
-
|
|
348
|
+
frame_filenames = None
|
|
349
|
+
|
|
349
350
|
# If we should re-use existing results, and the output file exists, don't bother running MD
|
|
350
351
|
if (options.reuse_results_if_available and os.path.isfile(options.output_json_file)):
|
|
351
352
|
|
|
@@ -383,7 +384,7 @@ def process_video(options):
|
|
|
383
384
|
verbose=options.verbose,
|
|
384
385
|
frames_to_process=options.frames_to_extract)
|
|
385
386
|
|
|
386
|
-
_add_frame_numbers_to_results(frame_results['results'])
|
|
387
|
+
frame_results['results'] = _add_frame_numbers_to_results(frame_results['results'])
|
|
387
388
|
|
|
388
389
|
run_detector_batch.write_results_to_file(
|
|
389
390
|
frame_results['results'],
|
|
@@ -444,7 +445,7 @@ def process_video(options):
|
|
|
444
445
|
augment=options.augment,
|
|
445
446
|
image_size=options.image_size)
|
|
446
447
|
|
|
447
|
-
_add_frame_numbers_to_results(results)
|
|
448
|
+
results = _add_frame_numbers_to_results(results)
|
|
448
449
|
|
|
449
450
|
run_detector_batch.write_results_to_file(
|
|
450
451
|
results,
|
|
@@ -545,6 +546,7 @@ def process_video_folder(options):
|
|
|
545
546
|
|
|
546
547
|
frame_output_folder = None
|
|
547
548
|
image_file_names = None
|
|
549
|
+
video_filename_to_fs = {}
|
|
548
550
|
|
|
549
551
|
# Run MD in memory if we don't need to generate frames
|
|
550
552
|
#
|
|
@@ -575,6 +577,10 @@ def process_video_folder(options):
|
|
|
575
577
|
|
|
576
578
|
video_results = md_results['results']
|
|
577
579
|
|
|
580
|
+
for i_video,video_filename in enumerate(md_results['video_filenames']):
|
|
581
|
+
assert video_filename not in video_filename_to_fs
|
|
582
|
+
video_filename_to_fs[video_filename] = md_results['frame_rates'][i_video]
|
|
583
|
+
|
|
578
584
|
all_frame_results = []
|
|
579
585
|
|
|
580
586
|
# r = video_results[0]
|
|
@@ -586,13 +592,15 @@ def process_video_folder(options):
|
|
|
586
592
|
all_frame_results,
|
|
587
593
|
frames_json,
|
|
588
594
|
relative_path_base=None,
|
|
589
|
-
detector_file=options.model_file
|
|
590
|
-
custom_metadata={'video_frame_rate':md_results['frame_rates']})
|
|
595
|
+
detector_file=options.model_file)
|
|
591
596
|
|
|
592
597
|
else:
|
|
593
598
|
|
|
594
599
|
## Split every video into frames
|
|
595
600
|
|
|
601
|
+
if options.verbose:
|
|
602
|
+
print('Extracting frames for folder {}'.format(options.input_video_file))
|
|
603
|
+
|
|
596
604
|
if caller_provided_frame_output_folder:
|
|
597
605
|
frame_output_folder = options.frame_folder
|
|
598
606
|
else:
|
|
@@ -600,8 +608,6 @@ def process_video_folder(options):
|
|
|
600
608
|
|
|
601
609
|
os.makedirs(frame_output_folder, exist_ok=True)
|
|
602
610
|
|
|
603
|
-
print('Extracting frames')
|
|
604
|
-
|
|
605
611
|
frame_filenames, Fs, video_filenames = \
|
|
606
612
|
video_folder_to_frames(input_folder=options.input_video_file,
|
|
607
613
|
output_folder_base=frame_output_folder,
|
|
@@ -615,6 +621,10 @@ def process_video_folder(options):
|
|
|
615
621
|
frames_to_extract=options.frames_to_extract,
|
|
616
622
|
allow_empty_videos=options.allow_empty_videos)
|
|
617
623
|
|
|
624
|
+
for i_video,video_filename in enumerate(video_filenames):
|
|
625
|
+
assert video_filename not in video_filename_to_fs
|
|
626
|
+
video_filename_to_fs[video_filename] = Fs[i_video]
|
|
627
|
+
|
|
618
628
|
print('Extracted frames for {} videos'.format(len(set(video_filenames))))
|
|
619
629
|
image_file_names = list(itertools.chain.from_iterable(frame_filenames))
|
|
620
630
|
|
|
@@ -660,15 +670,15 @@ def process_video_folder(options):
|
|
|
660
670
|
results,
|
|
661
671
|
frames_json,
|
|
662
672
|
relative_path_base=frame_output_folder,
|
|
663
|
-
detector_file=options.model_file
|
|
664
|
-
custom_metadata={'video_frame_rate':Fs})
|
|
673
|
+
detector_file=options.model_file)
|
|
665
674
|
|
|
666
675
|
# ...if we're running MD on in-memory frames vs. extracting frames to disk
|
|
667
676
|
|
|
668
677
|
## Convert frame-level results to video-level results
|
|
669
678
|
|
|
670
679
|
print('Converting frame-level results to video-level results')
|
|
671
|
-
frame_results_to_video_results(frames_json,video_json
|
|
680
|
+
frame_results_to_video_results(frames_json,video_json,
|
|
681
|
+
video_filename_to_frame_rate=video_filename_to_fs)
|
|
672
682
|
|
|
673
683
|
|
|
674
684
|
## (Optionally) render output videos
|
|
@@ -846,7 +846,7 @@ def write_results_to_file(results,
|
|
|
846
846
|
https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#batch-processing-api-output-format
|
|
847
847
|
|
|
848
848
|
Args:
|
|
849
|
-
results (list):
|
|
849
|
+
results (list): list of dict, each dict represents detections on one image
|
|
850
850
|
output_file (str): path to JSON output file, should end in '.json'
|
|
851
851
|
relative_path_base (str, optional): path to a directory as the base for relative paths, can
|
|
852
852
|
be None if the paths in [results] are absolute
|
|
@@ -22,6 +22,7 @@ from functools import partial
|
|
|
22
22
|
from inspect import signature
|
|
23
23
|
|
|
24
24
|
from megadetector.utils import path_utils
|
|
25
|
+
from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
|
|
25
26
|
from megadetector.visualization import visualization_utils as vis_utils
|
|
26
27
|
|
|
27
28
|
default_fourcc = 'h264'
|
|
@@ -197,7 +198,7 @@ def _filename_to_frame_number(filename):
|
|
|
197
198
|
def _add_frame_numbers_to_results(results):
|
|
198
199
|
"""
|
|
199
200
|
Given the 'images' list from a set of MD results that was generated on video frames,
|
|
200
|
-
add a 'frame_number' field to each image.
|
|
201
|
+
add a 'frame_number' field to each image, and return the list, sorted by frame number.
|
|
201
202
|
|
|
202
203
|
Args:
|
|
203
204
|
results (list): list of image dicts
|
|
@@ -208,6 +209,9 @@ def _add_frame_numbers_to_results(results):
|
|
|
208
209
|
fn = im['file']
|
|
209
210
|
frame_number = _filename_to_frame_number(fn)
|
|
210
211
|
im['frame_number'] = frame_number
|
|
212
|
+
|
|
213
|
+
results = sort_list_of_dicts_by_key(results,'frame_number')
|
|
214
|
+
return results
|
|
211
215
|
|
|
212
216
|
|
|
213
217
|
def run_callback_on_frames(input_video_file,
|
|
@@ -332,8 +336,8 @@ def run_callback_on_frames_for_folder(input_video_folder,
|
|
|
332
336
|
recursive (bool, optional): recurse into [input_video_folder]
|
|
333
337
|
|
|
334
338
|
Returns:
|
|
335
|
-
dict: dict with keys 'video_filenames' (list), 'frame_rates' (list of floats),
|
|
336
|
-
video_filenames will
|
|
339
|
+
dict: dict with keys 'video_filenames' (list of str), 'frame_rates' (list of floats),
|
|
340
|
+
'results' (list of list of dicts). 'video_filenames' will contain *relative* filenames.
|
|
337
341
|
"""
|
|
338
342
|
|
|
339
343
|
to_return = {'video_filenames':[],'frame_rates':[],'results':[]}
|
|
@@ -777,7 +781,8 @@ class FrameToVideoOptions:
|
|
|
777
781
|
self.non_video_behavior = 'error'
|
|
778
782
|
|
|
779
783
|
|
|
780
|
-
def frame_results_to_video_results(input_file,output_file,options=None
|
|
784
|
+
def frame_results_to_video_results(input_file,output_file,options=None,
|
|
785
|
+
video_filename_to_frame_rate=None):
|
|
781
786
|
"""
|
|
782
787
|
Given an MD results file produced at the *frame* level, corresponding to a directory
|
|
783
788
|
created with video_folder_to_frames, maps those frame-level results back to the
|
|
@@ -790,6 +795,8 @@ def frame_results_to_video_results(input_file,output_file,options=None):
|
|
|
790
795
|
output_file (str): the .json file to which we should write video-level results
|
|
791
796
|
options (FrameToVideoOptions, optional): parameters for converting frame-level results
|
|
792
797
|
to video-level results, see FrameToVideoOptions for details
|
|
798
|
+
video_filename_to_frame_rate (dict): maps (relative) video path names to frame rates,
|
|
799
|
+
used only to populate the output file
|
|
793
800
|
"""
|
|
794
801
|
|
|
795
802
|
if options is None:
|
|
@@ -878,6 +885,10 @@ def frame_results_to_video_results(input_file,output_file,options=None):
|
|
|
878
885
|
im_out['file'] = video_name
|
|
879
886
|
im_out['detections'] = canonical_detections
|
|
880
887
|
|
|
888
|
+
if (video_filename_to_frame_rate is not None) and \
|
|
889
|
+
(video_name in video_filename_to_frame_rate):
|
|
890
|
+
im_out['frame_rate'] = video_filename_to_frame_rate[video_name]
|
|
891
|
+
|
|
881
892
|
# 'max_detection_conf' is no longer included in output files by default
|
|
882
893
|
if False:
|
|
883
894
|
im_out['max_detection_conf'] = 0
|
|
@@ -770,7 +770,7 @@ def _render_image_no_gt(file_info,detection_categories_to_results_name,
|
|
|
770
770
|
if det['conf'] > max_conf:
|
|
771
771
|
max_conf = det['conf']
|
|
772
772
|
|
|
773
|
-
if ('classifications' in det):
|
|
773
|
+
if ('classifications' in det) and (len(det['classifications']) > 0):
|
|
774
774
|
|
|
775
775
|
# This is a list of [class,confidence] pairs, sorted by confidence
|
|
776
776
|
classifications = det['classifications']
|
|
@@ -1203,13 +1203,13 @@ def process_batch_results(options):
|
|
|
1203
1203
|
# Rows / first index is ground truth, columns / second index is predicted category
|
|
1204
1204
|
classifier_cm = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
|
|
1205
1205
|
|
|
1206
|
-
#
|
|
1206
|
+
# i_detection = 0; fn = detector_files[i_detection]; print(fn)
|
|
1207
1207
|
assert len(detector_files) == len(detections_df)
|
|
1208
|
-
for
|
|
1208
|
+
for i_detection, fn in enumerate(detector_files):
|
|
1209
1209
|
|
|
1210
1210
|
image_id = ground_truth_indexed_db.filename_to_id[fn]
|
|
1211
1211
|
image = ground_truth_indexed_db.image_id_to_image[image_id]
|
|
1212
|
-
detections = detections_df['detections'].iloc[
|
|
1212
|
+
detections = detections_df['detections'].iloc[i_detection]
|
|
1213
1213
|
pred_class_ids = [det['classifications'][0][0] \
|
|
1214
1214
|
for det in detections if 'classifications' in det.keys()]
|
|
1215
1215
|
pred_classnames = [classification_categories[pd] for pd in pred_class_ids]
|
|
@@ -210,9 +210,12 @@ class RepeatDetectionOptions:
|
|
|
210
210
|
#: a/b/c/RECONYX100 and a/b/c/RECONYX101 may really be the same camera).
|
|
211
211
|
#:
|
|
212
212
|
#: See ct_utils for a common replacement function that handles most common
|
|
213
|
-
#: manufacturer folder names
|
|
213
|
+
#: manufacturer folder names:
|
|
214
|
+
#:
|
|
215
|
+
#: from megadetector.utils import ct_utils
|
|
216
|
+
#: self.customDirNameFunction = ct_utils.image_file_to_camera_folder
|
|
214
217
|
self.customDirNameFunction = None
|
|
215
|
-
|
|
218
|
+
|
|
216
219
|
#: Include only specific folders, mutually exclusive with [excludeFolders]
|
|
217
220
|
self.includeFolders = None
|
|
218
221
|
|
megadetector/utils/ct_utils.py
CHANGED
|
@@ -16,6 +16,8 @@ import os
|
|
|
16
16
|
import jsonpickle
|
|
17
17
|
import numpy as np
|
|
18
18
|
|
|
19
|
+
from operator import itemgetter
|
|
20
|
+
|
|
19
21
|
# List of file extensions we'll consider images; comparisons will be case-insensitive
|
|
20
22
|
# (i.e., no need to include both .jpg and .JPG on this list).
|
|
21
23
|
image_extensions = ['.jpg', '.jpeg', '.gif', '.png']
|
|
@@ -294,6 +296,29 @@ def get_max_conf(im):
|
|
|
294
296
|
return max_conf
|
|
295
297
|
|
|
296
298
|
|
|
299
|
+
def sort_results_for_image(im):
|
|
300
|
+
"""
|
|
301
|
+
Sort classification and detection results in descending order by confidence (in place).
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
im (dict): image dictionary in the MD output format (with a 'detections' field)
|
|
305
|
+
"""
|
|
306
|
+
if 'detections' not in im or im['detections'] is None:
|
|
307
|
+
return
|
|
308
|
+
|
|
309
|
+
# Sort detections in descending order by confidence
|
|
310
|
+
im['detections'] = sort_list_of_dicts_by_key(im['detections'],k='conf',reverse=True)
|
|
311
|
+
|
|
312
|
+
for det in im['detections']:
|
|
313
|
+
|
|
314
|
+
# Sort classifications (which are (class,conf) tuples) in descending order by confidence
|
|
315
|
+
if 'classifications' in det and \
|
|
316
|
+
(det['classifications'] is not None) and \
|
|
317
|
+
(len(det['classifications']) > 0):
|
|
318
|
+
L = det['classifications']
|
|
319
|
+
det['classifications'] = sorted(L,key=itemgetter(1),reverse=True)
|
|
320
|
+
|
|
321
|
+
|
|
297
322
|
def point_dist(p1,p2):
|
|
298
323
|
"""
|
|
299
324
|
Computes the distance between two points, represented as length-two tuples.
|
|
@@ -406,6 +431,21 @@ def split_list_into_n_chunks(L, n, chunk_strategy='greedy'):
|
|
|
406
431
|
raise ValueError('Invalid chunk strategy: {}'.format(chunk_strategy))
|
|
407
432
|
|
|
408
433
|
|
|
434
|
+
def sort_list_of_dicts_by_key(L,k,reverse=False):
|
|
435
|
+
"""
|
|
436
|
+
Sorts the list of dictionaries [L] by the key [k].
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
L (list): list of dictionaries to sort
|
|
440
|
+
k (object, typically str): the sort key
|
|
441
|
+
reverse (bool, optional): whether to sort in reverse (descending) order
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
dict: sorted copy of [d]
|
|
445
|
+
"""
|
|
446
|
+
return sorted(L, key=lambda d: d[k], reverse=reverse)
|
|
447
|
+
|
|
448
|
+
|
|
409
449
|
def sort_dictionary_by_key(d,reverse=False):
|
|
410
450
|
"""
|
|
411
451
|
Sorts the dictionary [d] by key.
|
|
@@ -611,3 +651,11 @@ if False:
|
|
|
611
651
|
r1 = [0.4,0.8,10,22]; r2 = [100, 101, 200, 210.4]; assert abs(rect_distance(r1,r2)-119.753) < 0.001
|
|
612
652
|
r1 = [0.4,0.8,10,22]; r2 = [101, 101, 200, 210.4]; assert abs(rect_distance(r1,r2)-120.507) < 0.001
|
|
613
653
|
r1 = [0.4,0.8,10,22]; r2 = [120, 120, 200, 210.4]; assert abs(rect_distance(r1,r2)-147.323) < 0.001
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
#%% Test dictionary sorting
|
|
657
|
+
|
|
658
|
+
L = [{'a':5},{'a':0},{'a':10}]
|
|
659
|
+
k = 'a'
|
|
660
|
+
sort_list_of_dicts_by_key(L, k, reverse=True)
|
|
661
|
+
|
megadetector/utils/md_tests.py
CHANGED
|
@@ -528,12 +528,12 @@ def compare_results(inference_output_file,expected_results_file,options):
|
|
|
528
528
|
if not options.warning_mode:
|
|
529
529
|
|
|
530
530
|
assert max_conf_error <= options.max_conf_error, \
|
|
531
|
-
'Confidence error {} is greater than allowable ({})'.format(
|
|
532
|
-
max_conf_error,options.max_conf_error)
|
|
531
|
+
'Confidence error {} is greater than allowable ({}), on file:\n{}'.format(
|
|
532
|
+
max_conf_error,options.max_conf_error,max_conf_error_file)
|
|
533
533
|
|
|
534
534
|
assert max_coord_error <= options.max_coord_error, \
|
|
535
|
-
'Coord error {} is greater than allowable ({})'.format(
|
|
536
|
-
max_coord_error,options.max_coord_error)
|
|
535
|
+
'Coord error {} is greater than allowable ({}), on file:\n{}'.format(
|
|
536
|
+
max_coord_error,options.max_coord_error,max_coord_error_file)
|
|
537
537
|
|
|
538
538
|
print('Max conf error: {} (file {})'.format(
|
|
539
539
|
max_conf_error,max_conf_error_file))
|
|
@@ -653,7 +653,7 @@ def run_python_tests(options):
|
|
|
653
653
|
|
|
654
654
|
## Run inference on an image
|
|
655
655
|
|
|
656
|
-
print('\n** Running MD on a single image **\n')
|
|
656
|
+
print('\n** Running MD on a single image (module) **\n')
|
|
657
657
|
|
|
658
658
|
from megadetector.detection import run_detector
|
|
659
659
|
from megadetector.visualization import visualization_utils as vis_utils
|
|
@@ -665,7 +665,7 @@ def run_python_tests(options):
|
|
|
665
665
|
|
|
666
666
|
## Run inference on a folder
|
|
667
667
|
|
|
668
|
-
print('\n** Running MD on a folder of images **\n')
|
|
668
|
+
print('\n** Running MD on a folder of images (module) **\n')
|
|
669
669
|
|
|
670
670
|
from megadetector.detection.run_detector_batch import load_and_run_detector_batch,write_results_to_file
|
|
671
671
|
from megadetector.utils import path_utils
|
|
@@ -693,7 +693,7 @@ def run_python_tests(options):
|
|
|
693
693
|
|
|
694
694
|
## Run and verify again with augmentation enabled
|
|
695
695
|
|
|
696
|
-
print('\n** Running MD on images with augmentation **\n')
|
|
696
|
+
print('\n** Running MD on images with augmentation (module) **\n')
|
|
697
697
|
|
|
698
698
|
from megadetector.utils.path_utils import insert_before_extension
|
|
699
699
|
|
|
@@ -712,6 +712,8 @@ def run_python_tests(options):
|
|
|
712
712
|
|
|
713
713
|
## Postprocess results
|
|
714
714
|
|
|
715
|
+
print('\n** Post-processing results (module) **\n')
|
|
716
|
+
|
|
715
717
|
from megadetector.postprocessing.postprocess_batch_results import \
|
|
716
718
|
PostProcessingOptions,process_batch_results
|
|
717
719
|
postprocessing_options = PostProcessingOptions()
|
|
@@ -727,6 +729,8 @@ def run_python_tests(options):
|
|
|
727
729
|
|
|
728
730
|
## Partial RDE test
|
|
729
731
|
|
|
732
|
+
print('\n** Testing RDE (module) **\n')
|
|
733
|
+
|
|
730
734
|
from megadetector.postprocessing.repeat_detection_elimination.repeat_detections_core import \
|
|
731
735
|
RepeatDetectionOptions, find_repeat_detections
|
|
732
736
|
|
|
@@ -749,7 +753,9 @@ def run_python_tests(options):
|
|
|
749
753
|
print('Skipping YOLO val inference tests, no YOLO folder supplied')
|
|
750
754
|
|
|
751
755
|
else:
|
|
752
|
-
|
|
756
|
+
|
|
757
|
+
print('\n** Running YOLO val inference test (module) **\n')
|
|
758
|
+
|
|
753
759
|
from megadetector.detection.run_inference_with_yolov5_val import \
|
|
754
760
|
YoloInferenceOptions, run_inference_with_yolo_val
|
|
755
761
|
from megadetector.utils.path_utils import insert_before_extension
|
|
@@ -775,9 +781,7 @@ def run_python_tests(options):
|
|
|
775
781
|
compare_results(inference_output_file=inference_output_file_yolo_val,
|
|
776
782
|
expected_results_file=inference_output_file_standard_inference,
|
|
777
783
|
options=options)
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
784
|
+
|
|
781
785
|
# Run again, without symlinks this time
|
|
782
786
|
|
|
783
787
|
inference_output_file_yolo_val_no_links = insert_before_extension(inference_output_file_yolo_val,
|
|
@@ -822,7 +826,7 @@ def run_python_tests(options):
|
|
|
822
826
|
|
|
823
827
|
## Video test (single video)
|
|
824
828
|
|
|
825
|
-
print('\n** Running MD on a single video **\n')
|
|
829
|
+
print('\n** Running MD on a single video (module) **\n')
|
|
826
830
|
|
|
827
831
|
from megadetector.detection.process_video import ProcessVideoOptions, process_video
|
|
828
832
|
from megadetector.utils.path_utils import insert_before_extension
|
|
@@ -861,7 +865,7 @@ def run_python_tests(options):
|
|
|
861
865
|
|
|
862
866
|
## Video test (folder)
|
|
863
867
|
|
|
864
|
-
print('\n** Running MD on a folder of videos **\n')
|
|
868
|
+
print('\n** Running MD on a folder of videos (module) **\n')
|
|
865
869
|
|
|
866
870
|
from megadetector.detection.process_video import ProcessVideoOptions, process_video_folder
|
|
867
871
|
from megadetector.utils.path_utils import insert_before_extension
|
|
@@ -920,7 +924,7 @@ def run_python_tests(options):
|
|
|
920
924
|
|
|
921
925
|
# They won't be quite the same, because the on-disk path goes through a jpeg intermediate
|
|
922
926
|
|
|
923
|
-
print('\n** Running MD on a folder of videos (in memory) **\n')
|
|
927
|
+
print('\n** Running MD on a folder of videos (in memory) (module) **\n')
|
|
924
928
|
|
|
925
929
|
video_options.output_json_file = insert_before_extension(video_options.output_json_file,'in-memory')
|
|
926
930
|
video_options.force_on_disk_frame_extraction = False
|
|
@@ -977,6 +981,8 @@ def run_cli_tests(options):
|
|
|
977
981
|
|
|
978
982
|
## Run inference on an image
|
|
979
983
|
|
|
984
|
+
print('\n** Running MD on a single image (CLI) **\n')
|
|
985
|
+
|
|
980
986
|
image_fn = os.path.join(options.scratch_dir,options.test_images[0])
|
|
981
987
|
output_dir = os.path.join(options.scratch_dir,'single_image_test')
|
|
982
988
|
if options.cli_working_dir is None:
|
|
@@ -999,6 +1005,7 @@ def run_cli_tests(options):
|
|
|
999
1005
|
|
|
1000
1006
|
## Run inference on a folder
|
|
1001
1007
|
|
|
1008
|
+
print('\n** Running MD on a folder (CLI) **\n')
|
|
1002
1009
|
|
|
1003
1010
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1004
1011
|
assert os.path.isdir(image_folder), 'Test image folder {} is not available'.format(image_folder)
|
|
@@ -1018,6 +1025,8 @@ def run_cli_tests(options):
|
|
|
1018
1025
|
|
|
1019
1026
|
## Run again with checkpointing enabled, make sure the results are the same
|
|
1020
1027
|
|
|
1028
|
+
print('\n** Running MD on a folder (with checkpoints) (CLI) **\n')
|
|
1029
|
+
|
|
1021
1030
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1022
1031
|
|
|
1023
1032
|
checkpoint_string = ' --checkpoint_frequency 5'
|
|
@@ -1033,6 +1042,8 @@ def run_cli_tests(options):
|
|
|
1033
1042
|
|
|
1034
1043
|
## Run again with the image queue enabled, make sure the results are the same
|
|
1035
1044
|
|
|
1045
|
+
print('\n** Running MD on a folder (with image queue) (CLI) **\n')
|
|
1046
|
+
|
|
1036
1047
|
cmd = base_cmd + ' --use_image_queue'
|
|
1037
1048
|
from megadetector.utils.path_utils import insert_before_extension
|
|
1038
1049
|
inference_output_file_queue = insert_before_extension(inference_output_file,'_queue')
|
|
@@ -1060,11 +1071,16 @@ def run_cli_tests(options):
|
|
|
1060
1071
|
if not gpu_available:
|
|
1061
1072
|
inference_output_file_cpu = inference_output_file
|
|
1062
1073
|
else:
|
|
1074
|
+
|
|
1075
|
+
print('\n** Running MD on a folder (single CPU) (CLI) **\n')
|
|
1076
|
+
|
|
1063
1077
|
inference_output_file_cpu = insert_before_extension(inference_output_file,'cpu')
|
|
1064
1078
|
cmd = base_cmd
|
|
1065
1079
|
cmd = cmd.replace(inference_output_file,inference_output_file_cpu)
|
|
1066
1080
|
cmd_results = execute_and_print(cmd)
|
|
1067
1081
|
|
|
1082
|
+
print('\n** Running MD on a folder (multiple CPUs) (CLI) **\n')
|
|
1083
|
+
|
|
1068
1084
|
cpu_string = ' --ncores 4'
|
|
1069
1085
|
cmd = base_cmd + cpu_string
|
|
1070
1086
|
from megadetector.utils.path_utils import insert_before_extension
|
|
@@ -1085,6 +1101,8 @@ def run_cli_tests(options):
|
|
|
1085
1101
|
|
|
1086
1102
|
## Postprocessing
|
|
1087
1103
|
|
|
1104
|
+
print('\n** Testing post-processing (CLI) **\n')
|
|
1105
|
+
|
|
1088
1106
|
postprocessing_output_dir = os.path.join(options.scratch_dir,'postprocessing_output_cli')
|
|
1089
1107
|
|
|
1090
1108
|
if options.cli_working_dir is None:
|
|
@@ -1099,6 +1117,8 @@ def run_cli_tests(options):
|
|
|
1099
1117
|
|
|
1100
1118
|
## RDE
|
|
1101
1119
|
|
|
1120
|
+
print('\n** Running RDE (CLI) **\n')
|
|
1121
|
+
|
|
1102
1122
|
rde_output_dir = os.path.join(options.scratch_dir,'rde_output_cli')
|
|
1103
1123
|
|
|
1104
1124
|
if options.cli_working_dir is None:
|
|
@@ -1135,6 +1155,8 @@ def run_cli_tests(options):
|
|
|
1135
1155
|
|
|
1136
1156
|
## Run inference on a folder (tiled)
|
|
1137
1157
|
|
|
1158
|
+
print('\n** Running tiled inference (CLI) **\n')
|
|
1159
|
+
|
|
1138
1160
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1139
1161
|
tiling_folder = os.path.join(options.scratch_dir,'tiling-folder')
|
|
1140
1162
|
inference_output_file_tiled = os.path.join(options.scratch_dir,'folder_inference_output_tiled.json')
|
|
@@ -1159,6 +1181,8 @@ def run_cli_tests(options):
|
|
|
1159
1181
|
|
|
1160
1182
|
else:
|
|
1161
1183
|
|
|
1184
|
+
print('\n** Running YOLOv5 val tests (CLI) **\n')
|
|
1185
|
+
|
|
1162
1186
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1163
1187
|
yolo_results_folder = os.path.join(options.scratch_dir,'yolo-output-folder')
|
|
1164
1188
|
yolo_symlink_folder = os.path.join(options.scratch_dir,'yolo-symlink_folder')
|
|
@@ -1193,6 +1217,8 @@ def run_cli_tests(options):
|
|
|
1193
1217
|
|
|
1194
1218
|
## Video test
|
|
1195
1219
|
|
|
1220
|
+
print('\n** Testing video rendering (CLI) **\n')
|
|
1221
|
+
|
|
1196
1222
|
video_inference_output_file = os.path.join(options.scratch_dir,'video_inference_output.json')
|
|
1197
1223
|
output_video_file = os.path.join(options.scratch_dir,'video_scratch/cli_rendered_video.mp4')
|
|
1198
1224
|
frame_folder = os.path.join(options.scratch_dir,'video_scratch/frame_folder_cli')
|
|
@@ -1219,6 +1245,8 @@ def run_cli_tests(options):
|
|
|
1219
1245
|
|
|
1220
1246
|
## Run inference on a folder (with MDV5B, so we can do a comparison)
|
|
1221
1247
|
|
|
1248
|
+
print('\n** Running MDv5b (CLI) **\n')
|
|
1249
|
+
|
|
1222
1250
|
image_folder = os.path.join(options.scratch_dir,'md-test-images')
|
|
1223
1251
|
inference_output_file_alt = os.path.join(options.scratch_dir,'folder_inference_output_alt.json')
|
|
1224
1252
|
if options.cli_working_dir is None:
|
|
@@ -1307,7 +1335,7 @@ if False:
|
|
|
1307
1335
|
options.cpu_execution_is_error = False
|
|
1308
1336
|
options.skip_video_tests = False
|
|
1309
1337
|
options.skip_python_tests = False
|
|
1310
|
-
options.skip_cli_tests =
|
|
1338
|
+
options.skip_cli_tests = False
|
|
1311
1339
|
options.scratch_dir = None
|
|
1312
1340
|
options.test_data_url = 'https://lila.science/public/md-test-package.zip'
|
|
1313
1341
|
options.force_data_download = False
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
torch_test.py
|
|
4
|
+
|
|
5
|
+
Simple script to verify CUDA availability, used to verify a CUDA/PyTorch
|
|
6
|
+
environment.
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def torch_test():
|
|
11
|
+
"""
|
|
12
|
+
Print diagnostic information about Torch/CUDA status, including Torch/CUDA versions
|
|
13
|
+
and all available CUDA device names.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import torch
|
|
17
|
+
|
|
18
|
+
print('Torch version: {}'.format(str(torch.__version__)))
|
|
19
|
+
print('CUDA available: {}'.format(torch.cuda.is_available()))
|
|
20
|
+
|
|
21
|
+
device_ids = list(range(torch.cuda.device_count()))
|
|
22
|
+
print('Found {} CUDA devices:'.format(len(device_ids)))
|
|
23
|
+
for device_id in device_ids:
|
|
24
|
+
device_name = 'unknown'
|
|
25
|
+
try:
|
|
26
|
+
device_name = torch.cuda.get_device_name(device=device_id)
|
|
27
|
+
except Exception as e:
|
|
28
|
+
pass
|
|
29
|
+
print('{}: {}'.format(device_id,device_name))
|
|
30
|
+
|
|
31
|
+
if __name__ == '__main__':
|
|
32
|
+
torch_test()
|
megadetector/utils/url_utils.py
CHANGED
|
@@ -141,6 +141,8 @@ def download_url(url,
|
|
|
141
141
|
|
|
142
142
|
return destination_filename
|
|
143
143
|
|
|
144
|
+
# ...def download_url(...)
|
|
145
|
+
|
|
144
146
|
|
|
145
147
|
def download_relative_filename(url, output_base, verbose=False):
|
|
146
148
|
"""
|
|
@@ -168,6 +170,8 @@ def download_relative_filename(url, output_base, verbose=False):
|
|
|
168
170
|
destination_filename = os.path.join(output_base,relative_filename)
|
|
169
171
|
return download_url(url, destination_filename, verbose=verbose)
|
|
170
172
|
|
|
173
|
+
# ...def download_relative_filename(...)
|
|
174
|
+
|
|
171
175
|
|
|
172
176
|
def _do_parallelized_download(download_info,overwrite=False,verbose=False):
|
|
173
177
|
"""
|
|
@@ -197,6 +201,8 @@ def _do_parallelized_download(download_info,overwrite=False,verbose=False):
|
|
|
197
201
|
result['status'] = 'success'
|
|
198
202
|
return result
|
|
199
203
|
|
|
204
|
+
# ...def _do_parallelized_download(...)
|
|
205
|
+
|
|
200
206
|
|
|
201
207
|
def parallel_download_urls(url_to_target_file,verbose=False,overwrite=False,
|
|
202
208
|
n_workers=20,pool_type='thread'):
|
|
@@ -257,8 +263,10 @@ def parallel_download_urls(url_to_target_file,verbose=False,overwrite=False,
|
|
|
257
263
|
|
|
258
264
|
return results
|
|
259
265
|
|
|
260
|
-
|
|
261
|
-
|
|
266
|
+
# ...def parallel_download_urls(...)
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def test_url(url,error_on_failure=True,timeout=None):
|
|
262
270
|
"""
|
|
263
271
|
Tests the availability of [url], returning an http status code.
|
|
264
272
|
|
|
@@ -281,7 +289,7 @@ def test_url(url, error_on_failure=True, timeout=None):
|
|
|
281
289
|
return r.status_code
|
|
282
290
|
|
|
283
291
|
|
|
284
|
-
def test_urls(urls,
|
|
292
|
+
def test_urls(urls,error_on_failure=True,n_workers=1,pool_type='thread',timeout=None):
|
|
285
293
|
"""
|
|
286
294
|
Verify that URLs are available (i.e., returns status 200). By default,
|
|
287
295
|
errors if any URL is unavailable.
|
|
@@ -327,3 +335,86 @@ def test_urls(urls, error_on_failure=True, n_workers=1, pool_type='thread', time
|
|
|
327
335
|
urls), total=len(urls)))
|
|
328
336
|
|
|
329
337
|
return status_codes
|
|
338
|
+
|
|
339
|
+
# ...def test_urls(...)
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def get_url_size(url,verbose=False,timeout=None):
|
|
343
|
+
"""
|
|
344
|
+
Get the size of the file pointed to by a URL, based on the Content-Length property. If the
|
|
345
|
+
URL is not available, or the Content-Length property is not available, or the content-Length
|
|
346
|
+
property is not an integer, returns None.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
url (str): the url to test
|
|
350
|
+
verbose (bool, optional): enable additional debug output
|
|
351
|
+
timeout (int, optional): timeout in seconds to wait before considering this
|
|
352
|
+
access attempt to be a failure; see requests.head() for precise documentation
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
int: the file size in bytes, or None if it can't be retrieved
|
|
356
|
+
"""
|
|
357
|
+
|
|
358
|
+
try:
|
|
359
|
+
r = urllib.request.Request(url,method='HEAD')
|
|
360
|
+
f = urllib.request.urlopen(r, timeout=timeout)
|
|
361
|
+
if f.status != 200:
|
|
362
|
+
if verbose:
|
|
363
|
+
print('Status {} retrieving file size for {}'.format(f.status,url))
|
|
364
|
+
return None
|
|
365
|
+
size_bytes = int(f.headers['Content-Length'])
|
|
366
|
+
return size_bytes
|
|
367
|
+
except Exception as e:
|
|
368
|
+
if verbose:
|
|
369
|
+
print('Error retrieving file size for {}:\n{}'.format(url,str(e)))
|
|
370
|
+
return None
|
|
371
|
+
|
|
372
|
+
# ...def get_url_size(...)
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def get_url_sizes(urls,n_workers=1,pool_type='thread',timeout=None,verbose=False):
|
|
376
|
+
"""
|
|
377
|
+
Retrieve file sizes for the URLs specified by [urls]. Returns None for any URLs
|
|
378
|
+
that we can't access, or URLs for which the Content-Length property is not set.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
urls (list): list of URLs for which we should retrieve sizes
|
|
382
|
+
n_workers (int, optional): number of concurrent workers, set to <=1 to disable
|
|
383
|
+
parallelization
|
|
384
|
+
pool_type (str, optional): worker type to use; should be 'thread' or 'process'
|
|
385
|
+
timeout (int, optional): timeout in seconds to wait before considering this
|
|
386
|
+
access attempt to be a failure; see requests.head() for precise documentation
|
|
387
|
+
verbose (bool, optional): print additional debug information
|
|
388
|
+
|
|
389
|
+
Returns:
|
|
390
|
+
dict: maps urls to file sizes, which will be None for URLs for which we were unable
|
|
391
|
+
to retrieve a valid size.
|
|
392
|
+
"""
|
|
393
|
+
|
|
394
|
+
url_to_size = {}
|
|
395
|
+
|
|
396
|
+
if n_workers <= 1:
|
|
397
|
+
|
|
398
|
+
for url in tqdm(urls):
|
|
399
|
+
url_to_size[url] = get_url_size(url,verbose=verbose,timeout=timeout)
|
|
400
|
+
|
|
401
|
+
else:
|
|
402
|
+
|
|
403
|
+
if pool_type == 'thread':
|
|
404
|
+
pool = ThreadPool(n_workers)
|
|
405
|
+
else:
|
|
406
|
+
assert pool_type == 'process', 'Unsupported pool type {}'.format(pool_type)
|
|
407
|
+
pool = Pool(n_workers)
|
|
408
|
+
|
|
409
|
+
print('Starting a {} pool with {} workers'.format(pool_type,n_workers))
|
|
410
|
+
|
|
411
|
+
file_sizes = list(tqdm(pool.imap(
|
|
412
|
+
partial(get_url_size,verbose=verbose,timeout=timeout),
|
|
413
|
+
urls), total=len(urls)))
|
|
414
|
+
|
|
415
|
+
for i_url,url in enumerate(urls):
|
|
416
|
+
url_to_size[url] = file_sizes[i_url]
|
|
417
|
+
|
|
418
|
+
return url_to_size
|
|
419
|
+
|
|
420
|
+
# ...get_url_sizes(...)
|
|
@@ -536,6 +536,9 @@ def render_detection_bounding_boxes(detections,
|
|
|
536
536
|
|
|
537
537
|
x1, y1, w_box, h_box = detection['bbox']
|
|
538
538
|
display_boxes.append([y1, x1, y1 + h_box, x1 + w_box])
|
|
539
|
+
|
|
540
|
+
# The class index to use for coloring this box, which may be based on the detection
|
|
541
|
+
# category or on the most confident classification category.
|
|
539
542
|
clss = detection['category']
|
|
540
543
|
|
|
541
544
|
# {} is the default, which means "show labels with no mapping", so don't use "if label_map" here
|
|
@@ -558,22 +561,30 @@ def render_detection_bounding_boxes(detections,
|
|
|
558
561
|
assert len(displayed_label) == 1
|
|
559
562
|
displayed_label[0] += ' ' + custom_string
|
|
560
563
|
|
|
561
|
-
if 'classifications' in detection:
|
|
564
|
+
if ('classifications' in detection) and len(detection['classifications']) > 0:
|
|
562
565
|
|
|
563
|
-
# To avoid duplicate colors with detection-only visualization, offset
|
|
564
|
-
# the classification class index by the number of detection classes
|
|
565
|
-
clss = annotation_constants.NUM_DETECTOR_CATEGORIES + int(detection['classifications'][0][0])
|
|
566
566
|
classifications = detection['classifications']
|
|
567
|
+
|
|
567
568
|
if len(classifications) > max_classifications:
|
|
568
569
|
classifications = classifications[0:max_classifications]
|
|
569
570
|
|
|
571
|
+
max_classification_category = 0
|
|
572
|
+
max_classification_conf = -100
|
|
573
|
+
|
|
570
574
|
for classification in classifications:
|
|
571
575
|
|
|
572
576
|
classification_conf = classification[1]
|
|
573
|
-
if classification_conf is
|
|
574
|
-
|
|
577
|
+
if classification_conf is None or \
|
|
578
|
+
classification_conf < classification_confidence_threshold:
|
|
575
579
|
continue
|
|
580
|
+
|
|
576
581
|
class_key = classification[0]
|
|
582
|
+
|
|
583
|
+
# Is this the most confident classification for this detection?
|
|
584
|
+
if classification_conf > max_classification_conf:
|
|
585
|
+
max_classification_conf = classification_conf
|
|
586
|
+
max_classification_category = int(class_key)
|
|
587
|
+
|
|
577
588
|
if (classification_label_map is not None) and (class_key in classification_label_map):
|
|
578
589
|
class_name = classification_label_map[class_key]
|
|
579
590
|
else:
|
|
@@ -585,6 +596,10 @@ def render_detection_bounding_boxes(detections,
|
|
|
585
596
|
|
|
586
597
|
# ...for each classification
|
|
587
598
|
|
|
599
|
+
# To avoid duplicate colors with detection-only visualization, offset
|
|
600
|
+
# the classification class index by the number of detection classes
|
|
601
|
+
clss = annotation_constants.NUM_DETECTOR_CATEGORIES + max_classification_category
|
|
602
|
+
|
|
588
603
|
# ...if we have classification results
|
|
589
604
|
|
|
590
605
|
display_strs.append(displayed_label)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: megadetector
|
|
3
|
-
Version: 5.0.
|
|
3
|
+
Version: 5.0.18
|
|
4
4
|
Summary: MegaDetector is an AI model that helps conservation folks spend less time doing boring things with camera trap images.
|
|
5
5
|
Author-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
6
6
|
Maintainer-email: Your friendly neighborhood MegaDetector team <cameratraps@lila.science>
|
|
@@ -57,22 +57,20 @@ This package is a pip-installable version of the support/inference code for [Meg
|
|
|
57
57
|
If you aren't looking for the Python package specifically, and you just want to learn more about what MegaDetector is all about, head over to the [MegaDetector repo](https://github.com/agentmorris/MegaDetector/?tab=readme-ov-file#megadetector).
|
|
58
58
|
|
|
59
59
|
|
|
60
|
-
## Reasons you
|
|
60
|
+
## Reasons you might not be looking for this package
|
|
61
61
|
|
|
62
62
|
### If you are an ecologist...
|
|
63
63
|
|
|
64
|
-
If you are an ecologist looking to use MegaDetector to help you get through your camera trap images, you probably don't want this package. We recommend starting with our "[Getting started with MegaDetector](https://github.com/agentmorris/MegaDetector/blob/main/getting-started.md)" page, then digging in to the [MegaDetector User Guide](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md), which will walk you through the process of using MegaDetector.
|
|
64
|
+
If you are an ecologist looking to use MegaDetector to help you get through your camera trap images, you probably don't want this package, or at least you probably don't want to start at this page. We recommend starting with our "[Getting started with MegaDetector](https://github.com/agentmorris/MegaDetector/blob/main/getting-started.md)" page, then digging in to the [MegaDetector User Guide](https://github.com/agentmorris/MegaDetector/blob/main/megadetector.md), which will walk you through the process of using MegaDetector.
|
|
65
65
|
|
|
66
66
|
### If you are a computer-vision-y type...
|
|
67
67
|
|
|
68
|
-
If you are a computer-vision-y person looking to run or fine-tune MegaDetector programmatically, you
|
|
68
|
+
If you are a computer-vision-y person looking to run or fine-tune MegaDetector programmatically, you probably don't want this package. MegaDetector is just a fine-tuned version of [YOLOv5](https://github.com/ultralytics/yolov5), and the [ultralytics](https://github.com/ultralytics/ultralytics/) package (from the developers of YOLOv5) has a zillion bells and whistles for both inference and fine-tuning that this package doesn't.
|
|
69
69
|
|
|
70
70
|
## Reasons you might want to use this package
|
|
71
71
|
|
|
72
72
|
If you want to programmatically interact with the postprocessing tools from the MegaDetector repo, or programmatically run MegaDetector in a way that produces [Timelapse](https://saul.cpsc.ucalgary.ca/timelapse)-friendly output (i.e., output in the standard [MegaDetector output format](https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#megadetector-batch-output-format)), this package might be for you.
|
|
73
73
|
|
|
74
|
-
Although even if that describes you, you <i>still</i> might be better off cloning the MegaDetector repo. Pip-installability requires that some dependencies be newer than what was available at the time MDv5 was trained, so results are <i>very slightly</i> different than results produced in the "official" environment. These differences <i>probably</i> don't matter much, but they have not been formally characterized.
|
|
75
|
-
|
|
76
74
|
## If I haven't talked you out of using this package...
|
|
77
75
|
|
|
78
76
|
To install:
|
|
@@ -109,7 +109,7 @@ megadetector/data_management/importers/rspb_to_json.py,sha256=y03v1d1un9mI3HZRCZ
|
|
|
109
109
|
megadetector/data_management/importers/save_the_elephants_survey_A.py,sha256=lugw8m5Nh2Fhs-FYo9L0mDL3_29nAweLxEul6GekdkI,10669
|
|
110
110
|
megadetector/data_management/importers/save_the_elephants_survey_B.py,sha256=SWClXENsIePwifP8eJeRsj3kh3Bztl6Kzc_BdqNZvFw,11172
|
|
111
111
|
megadetector/data_management/importers/snapshot_safari_importer.py,sha256=dQ1GmpHcrQCQF9YZ0UaLTvc_3aOZEDqWGcxzYQeq4ho,23605
|
|
112
|
-
megadetector/data_management/importers/snapshot_safari_importer_reprise.py,sha256=
|
|
112
|
+
megadetector/data_management/importers/snapshot_safari_importer_reprise.py,sha256=f2WXC22fzbKaQl2888bfUlzap4oDhRG3ysZOUMBrcw0,22549
|
|
113
113
|
megadetector/data_management/importers/snapshot_serengeti_lila.py,sha256=-aYq_5IxhpcR6oxFYYVv98WVnGAr0mnVkbX-oJCPd8M,33865
|
|
114
114
|
megadetector/data_management/importers/sulross_get_exif.py,sha256=Bt1tGYtr5CllxCe2BL8uI3SfPu3e1SSqijnOz--iRqQ,2071
|
|
115
115
|
megadetector/data_management/importers/timelapse_csv_set_to_json.py,sha256=B9VbBltf3IdPBI2O1Cmg8wODhlIML4MQpjdhTFD4GP4,15916
|
|
@@ -136,14 +136,14 @@ megadetector/data_management/lila/get_lila_image_counts.py,sha256=UxXS5RDnSA_Wbx
|
|
|
136
136
|
megadetector/data_management/lila/lila_common.py,sha256=IEnGoyRgcqbek1qJ1gFE83p1Pg_5kaMS-nQI25lRWIs,10132
|
|
137
137
|
megadetector/data_management/lila/test_lila_metadata_urls.py,sha256=jqN7UID16fu78BK_2sygb4s9BBeVCpSZT3_oL2GYxxY,4438
|
|
138
138
|
megadetector/detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
139
|
-
megadetector/detection/process_video.py,sha256=
|
|
139
|
+
megadetector/detection/process_video.py,sha256=yVjkm5SxHH-R5SazMQmF-XXRhrhPdE2z4X4bGkfeN-k,50019
|
|
140
140
|
megadetector/detection/pytorch_detector.py,sha256=StOnaspDBkMeePiTyq5ZEcFUDBEddq36nigHXbF-zAQ,14029
|
|
141
141
|
megadetector/detection/run_detector.py,sha256=vEfq3jJTseD0sIM9MaIhbeEVqP6JoLXOC2cl8Dhehxs,30553
|
|
142
|
-
megadetector/detection/run_detector_batch.py,sha256=
|
|
142
|
+
megadetector/detection/run_detector_batch.py,sha256=d0fayCVXzKxa1tCiw6D8kmDqcwOAIuvrgw_Zfw0eRjE,57304
|
|
143
143
|
megadetector/detection/run_inference_with_yolov5_val.py,sha256=yjNm130qntOyJ4jbetdt5xDHWnSmBXRydyxB2I56XjM,49099
|
|
144
144
|
megadetector/detection/run_tiled_inference.py,sha256=vw0713eNuMiEOjHfweQl58zPHNxPOMdFWZ8bTDLhlMY,37883
|
|
145
145
|
megadetector/detection/tf_detector.py,sha256=5V94a0gR6WmGPacKm59hl1eYEZI8cG04frF4EvHrmzU,8285
|
|
146
|
-
megadetector/detection/video_utils.py,sha256=
|
|
146
|
+
megadetector/detection/video_utils.py,sha256=MzD8aUgdyAD9xK9w2fK0lvZtDhjpeHDkftvRXk3YJVA,41399
|
|
147
147
|
megadetector/detection/detector_training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
megadetector/detection/detector_training/model_main_tf2.py,sha256=YwNsZ7hkIFaEuwKU0rHG_VyqiR_0E01BbdlD0Yx4Smo,4936
|
|
149
149
|
megadetector/postprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -157,7 +157,7 @@ megadetector/postprocessing/load_api_results.py,sha256=FqcaiPMuqTojZOV3Jn14pJESp
|
|
|
157
157
|
megadetector/postprocessing/md_to_coco.py,sha256=x3sUnOLd2lVfdG2zRN7k-oUvx6rvRD7DWmWJymPc108,12359
|
|
158
158
|
megadetector/postprocessing/md_to_labelme.py,sha256=hejMKVxaz_xdtsGDPTQkeWuis7gzT-VOrL2Qf8ym1x0,11703
|
|
159
159
|
megadetector/postprocessing/merge_detections.py,sha256=AEMgMivhph1vph_t_Qv85d9iHynT2nvq7otN4KGrDLU,17776
|
|
160
|
-
megadetector/postprocessing/postprocess_batch_results.py,sha256=
|
|
160
|
+
megadetector/postprocessing/postprocess_batch_results.py,sha256=xa1FCQnzo1B6Inq8EWqS_In5xDu3qNzES_YdZ0INKr0,78978
|
|
161
161
|
megadetector/postprocessing/remap_detection_categories.py,sha256=d9IYTa0i_KbbrarJc_mczABmdwypscl5-KpK8Hx_z8o,6640
|
|
162
162
|
megadetector/postprocessing/render_detection_confusion_matrix.py,sha256=_wsk4W0PbNiqmFuHy-EA0Z07B1tQLMsdCTPatnHAdZw,27382
|
|
163
163
|
megadetector/postprocessing/separate_detections_into_folders.py,sha256=k42gxnL8hbBiV0e2T-jmFrhxzIxnhi57Nx9cDSSL5s0,31218
|
|
@@ -165,7 +165,7 @@ megadetector/postprocessing/subset_json_detector_output.py,sha256=PDgb6cnsFm9d4E
|
|
|
165
165
|
megadetector/postprocessing/top_folders_to_bottom.py,sha256=Dqk-KZXiRlIYlmLZmk6aUapmaaLJUKOf8wK1kxt9W6A,6283
|
|
166
166
|
megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py,sha256=e4Y9CyMyd-bLN3il8tu76vI0nVYHZlhZr6vcL0J4zQ0,9832
|
|
167
167
|
megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py,sha256=tARPxuY0OyQgpKU2XqiQPko3f-hHnWuISB8ZlZgXwxI,2819
|
|
168
|
-
megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py,sha256=
|
|
168
|
+
megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py,sha256=vEmWLSSv0_rxDwhjz_S9YaKZ_LM2tADTz2JYb_zUCnc,67923
|
|
169
169
|
megadetector/taxonomy_mapping/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
170
170
|
megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py,sha256=6D_YHTeWTs6O8S9ABog2t9-wfQSh9dW2k9XTqXUZKfo,17927
|
|
171
171
|
megadetector/taxonomy_mapping/map_new_lila_datasets.py,sha256=FSJ6ygpADtlYLf5Bhp9kMb5km2-MH0mmM_ccyStxo34,4054
|
|
@@ -179,24 +179,25 @@ megadetector/taxonomy_mapping/taxonomy_graph.py,sha256=ayrTFseVaIMbtMXhnjWCkZdxI
|
|
|
179
179
|
megadetector/taxonomy_mapping/validate_lila_category_mappings.py,sha256=1qyZr23bvZSVUYLQnO1XAtIZ4jdpARA5dxt8euKVyOA,2527
|
|
180
180
|
megadetector/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
181
181
|
megadetector/utils/azure_utils.py,sha256=0BdnkG2hW-X0yFpsJqmBhOd2wysz_LvhuyImPJMVPJs,6271
|
|
182
|
-
megadetector/utils/ct_utils.py,sha256=
|
|
182
|
+
megadetector/utils/ct_utils.py,sha256=1LXAjnzeeFeQqp59cWn3Nxt5OQk3t2DfO5wQ30flA5E,19441
|
|
183
183
|
megadetector/utils/directory_listing.py,sha256=r4rg2xA4O9ZVxVtzPZzXIXa0DOEukAJMTTNcNSiQcuM,9668
|
|
184
|
-
megadetector/utils/md_tests.py,sha256=
|
|
184
|
+
megadetector/utils/md_tests.py,sha256=n_5PkeUComn8pbvN-sLS4XdNsNPnvz8jk6DhrCcm9PU,58225
|
|
185
185
|
megadetector/utils/path_utils.py,sha256=o68jfPDaLj3NizipVCQEnmB5GfPHpMOLUmQWamYM4w0,37165
|
|
186
186
|
megadetector/utils/process_utils.py,sha256=2SdFVxqob-YUW2BTjUEavNuRH3jA4V05fbKMtrVSd3c,5635
|
|
187
187
|
megadetector/utils/sas_blob_utils.py,sha256=k76EcMmJc_otrEHcfV2fxAC6fNhxU88FxM3ddSYrsKU,16917
|
|
188
188
|
megadetector/utils/split_locations_into_train_val.py,sha256=jvaDu1xKB51L3Xq2nXQo0XtXRjNRf8RglBApl1g6gHo,10101
|
|
189
189
|
megadetector/utils/string_utils.py,sha256=ZQapJodzvTDyQhjZgMoMl3-9bqnKAUlORpws8Db9AkA,2050
|
|
190
|
-
megadetector/utils/
|
|
190
|
+
megadetector/utils/torch_test.py,sha256=aEYE-1vGt5PujD0bHAVRTJiLrKFlGWpS8zeYhqEYZLY,853
|
|
191
|
+
megadetector/utils/url_utils.py,sha256=yybWwJ-vl2A6Fci66i-xt_dl3Uqh72Ylnb8XOT2Grog,14835
|
|
191
192
|
megadetector/utils/write_html_image_list.py,sha256=apzoWkgZWG-ybCT4k92PlS4-guN_sNBSMMMbj7Cfm1k,8638
|
|
192
193
|
megadetector/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
193
194
|
megadetector/visualization/plot_utils.py,sha256=lOfU3uPrcuHZagV_1SN8erT8PujIepocgw6KZ17Ej6c,10671
|
|
194
195
|
megadetector/visualization/render_images_with_thumbnails.py,sha256=kgJYW8BsqRO4C7T3sqItdBuSkZ64I1vOtIWAsVG4XBI,10589
|
|
195
|
-
megadetector/visualization/visualization_utils.py,sha256=
|
|
196
|
+
megadetector/visualization/visualization_utils.py,sha256=J53VsI8aQmzzBBeu-msm8c-qC6pm_HCMkMKYvnylqjo,63083
|
|
196
197
|
megadetector/visualization/visualize_db.py,sha256=x9jScwG-3V-mZGy5cB1s85KWbiAIfvgVUcLqUplHxGA,22110
|
|
197
198
|
megadetector/visualization/visualize_detector_output.py,sha256=LY8QgDWpWlXVLZJUskvT29CdkNvIlEsFTk4DC_lS6pk,17052
|
|
198
|
-
megadetector-5.0.
|
|
199
|
-
megadetector-5.0.
|
|
200
|
-
megadetector-5.0.
|
|
201
|
-
megadetector-5.0.
|
|
202
|
-
megadetector-5.0.
|
|
199
|
+
megadetector-5.0.18.dist-info/LICENSE,sha256=RMa3qq-7Cyk7DdtqRj_bP1oInGFgjyHn9-PZ3PcrqIs,1100
|
|
200
|
+
megadetector-5.0.18.dist-info/METADATA,sha256=utglc5y0-WOnPflCpNPA36mrZMtTN6KTLboBU3LvTaw,7464
|
|
201
|
+
megadetector-5.0.18.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
|
|
202
|
+
megadetector-5.0.18.dist-info/top_level.txt,sha256=wf9DXa8EwiOSZ4G5IPjakSxBPxTDjhYYnqWRfR-zS4M,13
|
|
203
|
+
megadetector-5.0.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|