megadetector 5.0.28__py3-none-any.whl → 5.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of megadetector might be problematic. Click here for more details.
- megadetector/api/batch_processing/api_core/batch_service/score.py +4 -5
- megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +1 -1
- megadetector/api/batch_processing/api_support/summarize_daily_activity.py +1 -1
- megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +2 -2
- megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +1 -1
- megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +1 -1
- megadetector/api/synchronous/api_core/tests/load_test.py +2 -3
- megadetector/classification/aggregate_classifier_probs.py +3 -3
- megadetector/classification/analyze_failed_images.py +5 -5
- megadetector/classification/cache_batchapi_outputs.py +5 -5
- megadetector/classification/create_classification_dataset.py +11 -12
- megadetector/classification/crop_detections.py +10 -10
- megadetector/classification/csv_to_json.py +8 -8
- megadetector/classification/detect_and_crop.py +13 -15
- megadetector/classification/evaluate_model.py +7 -7
- megadetector/classification/identify_mislabeled_candidates.py +6 -6
- megadetector/classification/json_to_azcopy_list.py +1 -1
- megadetector/classification/json_validator.py +29 -32
- megadetector/classification/map_classification_categories.py +9 -9
- megadetector/classification/merge_classification_detection_output.py +12 -9
- megadetector/classification/prepare_classification_script.py +19 -19
- megadetector/classification/prepare_classification_script_mc.py +23 -23
- megadetector/classification/run_classifier.py +4 -4
- megadetector/classification/save_mislabeled.py +6 -6
- megadetector/classification/train_classifier.py +1 -1
- megadetector/classification/train_classifier_tf.py +9 -9
- megadetector/classification/train_utils.py +10 -10
- megadetector/data_management/annotations/annotation_constants.py +1 -1
- megadetector/data_management/camtrap_dp_to_coco.py +45 -45
- megadetector/data_management/cct_json_utils.py +101 -101
- megadetector/data_management/cct_to_md.py +49 -49
- megadetector/data_management/cct_to_wi.py +33 -33
- megadetector/data_management/coco_to_labelme.py +75 -75
- megadetector/data_management/coco_to_yolo.py +189 -189
- megadetector/data_management/databases/add_width_and_height_to_db.py +3 -2
- megadetector/data_management/databases/combine_coco_camera_traps_files.py +38 -38
- megadetector/data_management/databases/integrity_check_json_db.py +202 -188
- megadetector/data_management/databases/subset_json_db.py +33 -33
- megadetector/data_management/generate_crops_from_cct.py +38 -38
- megadetector/data_management/get_image_sizes.py +54 -49
- megadetector/data_management/labelme_to_coco.py +130 -124
- megadetector/data_management/labelme_to_yolo.py +78 -72
- megadetector/data_management/lila/create_lila_blank_set.py +81 -83
- megadetector/data_management/lila/create_lila_test_set.py +32 -31
- megadetector/data_management/lila/create_links_to_md_results_files.py +18 -18
- megadetector/data_management/lila/download_lila_subset.py +21 -24
- megadetector/data_management/lila/generate_lila_per_image_labels.py +91 -91
- megadetector/data_management/lila/get_lila_annotation_counts.py +30 -30
- megadetector/data_management/lila/get_lila_image_counts.py +22 -22
- megadetector/data_management/lila/lila_common.py +70 -70
- megadetector/data_management/lila/test_lila_metadata_urls.py +13 -14
- megadetector/data_management/mewc_to_md.py +339 -340
- megadetector/data_management/ocr_tools.py +258 -252
- megadetector/data_management/read_exif.py +231 -224
- megadetector/data_management/remap_coco_categories.py +26 -26
- megadetector/data_management/remove_exif.py +31 -20
- megadetector/data_management/rename_images.py +187 -187
- megadetector/data_management/resize_coco_dataset.py +41 -41
- megadetector/data_management/speciesnet_to_md.py +41 -41
- megadetector/data_management/wi_download_csv_to_coco.py +55 -55
- megadetector/data_management/yolo_output_to_md_output.py +117 -120
- megadetector/data_management/yolo_to_coco.py +195 -188
- megadetector/detection/change_detection.py +831 -0
- megadetector/detection/process_video.py +340 -337
- megadetector/detection/pytorch_detector.py +304 -262
- megadetector/detection/run_detector.py +177 -164
- megadetector/detection/run_detector_batch.py +364 -363
- megadetector/detection/run_inference_with_yolov5_val.py +328 -325
- megadetector/detection/run_tiled_inference.py +256 -249
- megadetector/detection/tf_detector.py +24 -24
- megadetector/detection/video_utils.py +290 -282
- megadetector/postprocessing/add_max_conf.py +15 -11
- megadetector/postprocessing/categorize_detections_by_size.py +44 -44
- megadetector/postprocessing/classification_postprocessing.py +415 -415
- megadetector/postprocessing/combine_batch_outputs.py +20 -21
- megadetector/postprocessing/compare_batch_results.py +528 -517
- megadetector/postprocessing/convert_output_format.py +97 -97
- megadetector/postprocessing/create_crop_folder.py +219 -146
- megadetector/postprocessing/detector_calibration.py +173 -168
- megadetector/postprocessing/generate_csv_report.py +508 -499
- megadetector/postprocessing/load_api_results.py +23 -20
- megadetector/postprocessing/md_to_coco.py +129 -98
- megadetector/postprocessing/md_to_labelme.py +89 -83
- megadetector/postprocessing/md_to_wi.py +40 -40
- megadetector/postprocessing/merge_detections.py +87 -114
- megadetector/postprocessing/postprocess_batch_results.py +313 -298
- megadetector/postprocessing/remap_detection_categories.py +36 -36
- megadetector/postprocessing/render_detection_confusion_matrix.py +205 -199
- megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +57 -57
- megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +27 -28
- megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +702 -677
- megadetector/postprocessing/separate_detections_into_folders.py +226 -211
- megadetector/postprocessing/subset_json_detector_output.py +265 -262
- megadetector/postprocessing/top_folders_to_bottom.py +45 -45
- megadetector/postprocessing/validate_batch_results.py +70 -70
- megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +52 -52
- megadetector/taxonomy_mapping/map_new_lila_datasets.py +15 -15
- megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +14 -14
- megadetector/taxonomy_mapping/preview_lila_taxonomy.py +66 -66
- megadetector/taxonomy_mapping/retrieve_sample_image.py +16 -16
- megadetector/taxonomy_mapping/simple_image_download.py +8 -8
- megadetector/taxonomy_mapping/species_lookup.py +33 -33
- megadetector/taxonomy_mapping/taxonomy_csv_checker.py +14 -14
- megadetector/taxonomy_mapping/taxonomy_graph.py +10 -10
- megadetector/taxonomy_mapping/validate_lila_category_mappings.py +13 -13
- megadetector/utils/azure_utils.py +22 -22
- megadetector/utils/ct_utils.py +1018 -200
- megadetector/utils/directory_listing.py +21 -77
- megadetector/utils/gpu_test.py +22 -22
- megadetector/utils/md_tests.py +541 -518
- megadetector/utils/path_utils.py +1457 -398
- megadetector/utils/process_utils.py +41 -41
- megadetector/utils/sas_blob_utils.py +53 -49
- megadetector/utils/split_locations_into_train_val.py +61 -61
- megadetector/utils/string_utils.py +147 -26
- megadetector/utils/url_utils.py +463 -173
- megadetector/utils/wi_utils.py +2629 -2526
- megadetector/utils/write_html_image_list.py +137 -137
- megadetector/visualization/plot_utils.py +21 -21
- megadetector/visualization/render_images_with_thumbnails.py +37 -73
- megadetector/visualization/visualization_utils.py +401 -397
- megadetector/visualization/visualize_db.py +197 -190
- megadetector/visualization/visualize_detector_output.py +79 -73
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/METADATA +135 -132
- megadetector-5.0.29.dist-info/RECORD +163 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/WHEEL +1 -1
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/licenses/LICENSE +0 -0
- {megadetector-5.0.28.dist-info → megadetector-5.0.29.dist-info}/top_level.txt +0 -0
- megadetector/data_management/importers/add_nacti_sizes.py +0 -52
- megadetector/data_management/importers/add_timestamps_to_icct.py +0 -79
- megadetector/data_management/importers/animl_results_to_md_results.py +0 -158
- megadetector/data_management/importers/auckland_doc_test_to_json.py +0 -373
- megadetector/data_management/importers/auckland_doc_to_json.py +0 -201
- megadetector/data_management/importers/awc_to_json.py +0 -191
- megadetector/data_management/importers/bellevue_to_json.py +0 -272
- megadetector/data_management/importers/cacophony-thermal-importer.py +0 -793
- megadetector/data_management/importers/carrizo_shrubfree_2018.py +0 -269
- megadetector/data_management/importers/carrizo_trail_cam_2017.py +0 -289
- megadetector/data_management/importers/cct_field_adjustments.py +0 -58
- megadetector/data_management/importers/channel_islands_to_cct.py +0 -913
- megadetector/data_management/importers/eMammal/copy_and_unzip_emammal.py +0 -180
- megadetector/data_management/importers/eMammal/eMammal_helpers.py +0 -249
- megadetector/data_management/importers/eMammal/make_eMammal_json.py +0 -223
- megadetector/data_management/importers/ena24_to_json.py +0 -276
- megadetector/data_management/importers/filenames_to_json.py +0 -386
- megadetector/data_management/importers/helena_to_cct.py +0 -283
- megadetector/data_management/importers/idaho-camera-traps.py +0 -1407
- megadetector/data_management/importers/idfg_iwildcam_lila_prep.py +0 -294
- megadetector/data_management/importers/import_desert_lion_conservation_camera_traps.py +0 -387
- megadetector/data_management/importers/jb_csv_to_json.py +0 -150
- megadetector/data_management/importers/mcgill_to_json.py +0 -250
- megadetector/data_management/importers/missouri_to_json.py +0 -490
- megadetector/data_management/importers/nacti_fieldname_adjustments.py +0 -79
- megadetector/data_management/importers/noaa_seals_2019.py +0 -181
- megadetector/data_management/importers/osu-small-animals-to-json.py +0 -364
- megadetector/data_management/importers/pc_to_json.py +0 -365
- megadetector/data_management/importers/plot_wni_giraffes.py +0 -123
- megadetector/data_management/importers/prepare_zsl_imerit.py +0 -131
- megadetector/data_management/importers/raic_csv_to_md_results.py +0 -416
- megadetector/data_management/importers/rspb_to_json.py +0 -356
- megadetector/data_management/importers/save_the_elephants_survey_A.py +0 -320
- megadetector/data_management/importers/save_the_elephants_survey_B.py +0 -329
- megadetector/data_management/importers/snapshot_safari_importer.py +0 -758
- megadetector/data_management/importers/snapshot_serengeti_lila.py +0 -1067
- megadetector/data_management/importers/snapshotserengeti/make_full_SS_json.py +0 -150
- megadetector/data_management/importers/snapshotserengeti/make_per_season_SS_json.py +0 -153
- megadetector/data_management/importers/sulross_get_exif.py +0 -65
- megadetector/data_management/importers/timelapse_csv_set_to_json.py +0 -490
- megadetector/data_management/importers/ubc_to_json.py +0 -399
- megadetector/data_management/importers/umn_to_json.py +0 -507
- megadetector/data_management/importers/wellington_to_json.py +0 -263
- megadetector/data_management/importers/wi_to_json.py +0 -442
- megadetector/data_management/importers/zamba_results_to_md_results.py +0 -180
- megadetector/data_management/lila/add_locations_to_island_camera_traps.py +0 -101
- megadetector/data_management/lila/add_locations_to_nacti.py +0 -151
- megadetector-5.0.28.dist-info/RECORD +0 -209
|
@@ -21,7 +21,7 @@ from tqdm import tqdm
|
|
|
21
21
|
from functools import partial
|
|
22
22
|
from inspect import signature
|
|
23
23
|
|
|
24
|
-
from megadetector.utils import path_utils
|
|
24
|
+
from megadetector.utils import path_utils
|
|
25
25
|
from megadetector.utils.ct_utils import sort_list_of_dicts_by_key
|
|
26
26
|
from megadetector.visualization import visualization_utils as vis_utils
|
|
27
27
|
|
|
@@ -37,15 +37,15 @@ def is_video_file(s,video_extensions=VIDEO_EXTENSIONS):
|
|
|
37
37
|
Checks a file's extension against a set of known video file
|
|
38
38
|
extensions to determine whether it's a video file. Performs a
|
|
39
39
|
case-insensitive comparison.
|
|
40
|
-
|
|
40
|
+
|
|
41
41
|
Args:
|
|
42
42
|
s (str): filename to check for probable video-ness
|
|
43
43
|
video_extensions (list, optional): list of video file extensions
|
|
44
|
-
|
|
44
|
+
|
|
45
45
|
Returns:
|
|
46
46
|
bool: True if this looks like a video file, else False
|
|
47
47
|
"""
|
|
48
|
-
|
|
48
|
+
|
|
49
49
|
ext = os.path.splitext(s)[1]
|
|
50
50
|
return ext.lower() in video_extensions
|
|
51
51
|
|
|
@@ -54,49 +54,49 @@ def find_video_strings(strings):
|
|
|
54
54
|
"""
|
|
55
55
|
Given a list of strings that are potentially video file names, looks for
|
|
56
56
|
strings that actually look like video file names (based on extension).
|
|
57
|
-
|
|
57
|
+
|
|
58
58
|
Args:
|
|
59
59
|
strings (list): list of strings to check for video-ness
|
|
60
|
-
|
|
60
|
+
|
|
61
61
|
Returns:
|
|
62
62
|
list: a subset of [strings] that looks like they are video filenames
|
|
63
63
|
"""
|
|
64
|
-
|
|
64
|
+
|
|
65
65
|
return [s for s in strings if is_video_file(s.lower())]
|
|
66
66
|
|
|
67
67
|
|
|
68
|
-
def find_videos(dirname,
|
|
68
|
+
def find_videos(dirname,
|
|
69
69
|
recursive=False,
|
|
70
70
|
convert_slashes=True,
|
|
71
71
|
return_relative_paths=False):
|
|
72
72
|
"""
|
|
73
73
|
Finds all files in a directory that look like video file names.
|
|
74
|
-
|
|
74
|
+
|
|
75
75
|
Args:
|
|
76
76
|
dirname (str): folder to search for video files
|
|
77
77
|
recursive (bool, optional): whether to search [dirname] recursively
|
|
78
78
|
convert_slashes (bool, optional): forces forward slashes in the returned files,
|
|
79
79
|
otherwise uses the native path separator
|
|
80
|
-
return_relative_paths (bool, optional): forces the returned filenames to be
|
|
80
|
+
return_relative_paths (bool, optional): forces the returned filenames to be
|
|
81
81
|
relative to [dirname], otherwise returns absolute paths
|
|
82
|
-
|
|
82
|
+
|
|
83
83
|
Returns:
|
|
84
84
|
A list of filenames within [dirname] that appear to be videos
|
|
85
85
|
"""
|
|
86
|
-
|
|
86
|
+
|
|
87
87
|
if recursive:
|
|
88
88
|
files = glob.glob(os.path.join(dirname, '**', '*.*'), recursive=True)
|
|
89
89
|
else:
|
|
90
90
|
files = glob.glob(os.path.join(dirname, '*.*'))
|
|
91
|
-
|
|
91
|
+
|
|
92
92
|
files = [fn for fn in files if os.path.isfile(fn)]
|
|
93
|
-
|
|
93
|
+
|
|
94
94
|
if return_relative_paths:
|
|
95
95
|
files = [os.path.relpath(fn,dirname) for fn in files]
|
|
96
96
|
|
|
97
97
|
if convert_slashes:
|
|
98
98
|
files = [fn.replace('\\', '/') for fn in files]
|
|
99
|
-
|
|
99
|
+
|
|
100
100
|
return find_video_strings(files)
|
|
101
101
|
|
|
102
102
|
|
|
@@ -104,30 +104,30 @@ def find_videos(dirname,
|
|
|
104
104
|
|
|
105
105
|
# http://tsaith.github.io/combine-images-into-a-video-with-python-3-and-opencv-3.html
|
|
106
106
|
|
|
107
|
-
def frames_to_video(images,
|
|
107
|
+
def frames_to_video(images, fs, output_file_name, codec_spec=default_fourcc):
|
|
108
108
|
"""
|
|
109
109
|
Given a list of image files and a sample rate, concatenates those images into
|
|
110
110
|
a video and writes to a new video file.
|
|
111
|
-
|
|
111
|
+
|
|
112
112
|
Args:
|
|
113
113
|
images (list): a list of frame file names to concatenate into a video
|
|
114
|
-
|
|
114
|
+
fs (float): the frame rate in fps
|
|
115
115
|
output_file_name (str): the output video file, no checking is performed to make
|
|
116
116
|
sure the extension is compatible with the codec
|
|
117
|
-
codec_spec (str, optional): codec to use for encoding; h264 is a sensible default
|
|
118
|
-
and generally works on Windows, but when this fails (which is around 50% of the time
|
|
117
|
+
codec_spec (str, optional): codec to use for encoding; h264 is a sensible default
|
|
118
|
+
and generally works on Windows, but when this fails (which is around 50% of the time
|
|
119
119
|
on Linux), mp4v is a good second choice
|
|
120
120
|
"""
|
|
121
|
-
|
|
121
|
+
|
|
122
122
|
if codec_spec is None:
|
|
123
123
|
codec_spec = 'h264'
|
|
124
|
-
|
|
124
|
+
|
|
125
125
|
if len(images) == 0:
|
|
126
126
|
print('Warning: no frames to render')
|
|
127
127
|
return
|
|
128
128
|
|
|
129
129
|
os.makedirs(os.path.dirname(output_file_name),exist_ok=True)
|
|
130
|
-
|
|
130
|
+
|
|
131
131
|
# Determine the width and height from the first image
|
|
132
132
|
frame = cv2.imread(images[0])
|
|
133
133
|
cv2.imshow('video',frame)
|
|
@@ -135,7 +135,7 @@ def frames_to_video(images, Fs, output_file_name, codec_spec=default_fourcc):
|
|
|
135
135
|
|
|
136
136
|
# Define the codec and create VideoWriter object
|
|
137
137
|
fourcc = cv2.VideoWriter_fourcc(*codec_spec)
|
|
138
|
-
out = cv2.VideoWriter(output_file_name, fourcc,
|
|
138
|
+
out = cv2.VideoWriter(output_file_name, fourcc, fs, (width, height))
|
|
139
139
|
|
|
140
140
|
for image in images:
|
|
141
141
|
frame = cv2.imread(image)
|
|
@@ -148,40 +148,41 @@ def frames_to_video(images, Fs, output_file_name, codec_spec=default_fourcc):
|
|
|
148
148
|
def get_video_fs(input_video_file):
|
|
149
149
|
"""
|
|
150
150
|
Retrieves the frame rate of [input_video_file].
|
|
151
|
-
|
|
151
|
+
|
|
152
152
|
Args:
|
|
153
153
|
input_video_file (str): video file for which we want the frame rate
|
|
154
|
-
|
|
154
|
+
|
|
155
155
|
Returns:
|
|
156
156
|
float: the frame rate of [input_video_file]
|
|
157
157
|
"""
|
|
158
|
-
|
|
159
|
-
assert os.path.isfile(input_video_file), 'File {} not found'.format(input_video_file)
|
|
158
|
+
|
|
159
|
+
assert os.path.isfile(input_video_file), 'File {} not found'.format(input_video_file)
|
|
160
160
|
vidcap = cv2.VideoCapture(input_video_file)
|
|
161
|
-
|
|
161
|
+
fs = vidcap.get(cv2.CAP_PROP_FPS)
|
|
162
162
|
vidcap.release()
|
|
163
|
-
return
|
|
163
|
+
return fs
|
|
164
164
|
|
|
165
165
|
|
|
166
166
|
def _frame_number_to_filename(frame_number):
|
|
167
167
|
"""
|
|
168
168
|
Ensures that frame images are given consistent filenames.
|
|
169
169
|
"""
|
|
170
|
-
|
|
170
|
+
|
|
171
171
|
return 'frame{:06d}.jpg'.format(frame_number)
|
|
172
172
|
|
|
173
173
|
|
|
174
174
|
def _filename_to_frame_number(filename):
|
|
175
175
|
"""
|
|
176
|
-
Extract the frame number from a filename that was created using
|
|
176
|
+
Extract the frame number from a filename that was created using
|
|
177
177
|
_frame_number_to_filename.
|
|
178
|
-
|
|
178
|
+
|
|
179
179
|
Args:
|
|
180
180
|
filename (str): a filename created with _frame_number_to_filename.
|
|
181
|
+
|
|
181
182
|
Returns:
|
|
182
183
|
int: the frame number extracted from [filename]
|
|
183
184
|
"""
|
|
184
|
-
|
|
185
|
+
|
|
185
186
|
filename = os.path.basename(filename)
|
|
186
187
|
match = re.search(r'frame(\d+)\.jpg', filename)
|
|
187
188
|
if match is None:
|
|
@@ -189,9 +190,9 @@ def _filename_to_frame_number(filename):
|
|
|
189
190
|
frame_number = match.group(1)
|
|
190
191
|
try:
|
|
191
192
|
frame_number = int(frame_number)
|
|
192
|
-
except:
|
|
193
|
+
except Exception:
|
|
193
194
|
raise ValueError('Filename {} does contain a valid frame number'.format(filename))
|
|
194
|
-
|
|
195
|
+
|
|
195
196
|
return frame_number
|
|
196
197
|
|
|
197
198
|
|
|
@@ -199,38 +200,38 @@ def _add_frame_numbers_to_results(results):
|
|
|
199
200
|
"""
|
|
200
201
|
Given the 'images' list from a set of MD results that was generated on video frames,
|
|
201
202
|
add a 'frame_number' field to each image, and return the list, sorted by frame number.
|
|
202
|
-
|
|
203
|
+
|
|
203
204
|
Args:
|
|
204
|
-
results (list): list of image dicts
|
|
205
|
+
results (list): list of image dicts
|
|
205
206
|
"""
|
|
206
|
-
|
|
207
|
+
|
|
207
208
|
# Add video-specific fields to the results
|
|
208
209
|
for im in results:
|
|
209
210
|
fn = im['file']
|
|
210
211
|
frame_number = _filename_to_frame_number(fn)
|
|
211
212
|
im['frame_number'] = frame_number
|
|
212
|
-
|
|
213
|
+
|
|
213
214
|
results = sort_list_of_dicts_by_key(results,'frame_number')
|
|
214
215
|
return results
|
|
215
|
-
|
|
216
216
|
|
|
217
|
-
|
|
217
|
+
|
|
218
|
+
def run_callback_on_frames(input_video_file,
|
|
218
219
|
frame_callback,
|
|
219
|
-
every_n_frames=None,
|
|
220
|
-
verbose=False,
|
|
220
|
+
every_n_frames=None,
|
|
221
|
+
verbose=False,
|
|
221
222
|
frames_to_process=None,
|
|
222
223
|
allow_empty_videos=False):
|
|
223
224
|
"""
|
|
224
225
|
Calls the function frame_callback(np.array,image_id) on all (or selected) frames in
|
|
225
226
|
[input_video_file].
|
|
226
|
-
|
|
227
|
+
|
|
227
228
|
Args:
|
|
228
229
|
input_video_file (str): video file to process
|
|
229
|
-
frame_callback (function): callback to run on frames, should take an np.array and a string and
|
|
230
|
+
frame_callback (function): callback to run on frames, should take an np.array and a string and
|
|
230
231
|
return a single value. callback should expect PIL-formatted (RGB) images.
|
|
231
232
|
every_n_frames (float, optional): sample every Nth frame starting from the first frame;
|
|
232
233
|
if this is None or 1, every frame is processed. If this is a negative value, it's
|
|
233
|
-
interpreted as a sampling rate in seconds, which is rounded to the nearest frame sampling
|
|
234
|
+
interpreted as a sampling rate in seconds, which is rounded to the nearest frame sampling
|
|
234
235
|
rate. Mutually exclusive with frames_to_process.
|
|
235
236
|
verbose (bool, optional): enable additional debug console output
|
|
236
237
|
frames_to_process (list of int, optional): process this specific set of frames;
|
|
@@ -239,43 +240,43 @@ def run_callback_on_frames(input_video_file,
|
|
|
239
240
|
a single frame number.
|
|
240
241
|
allow_empty_videos (bool, optional): Just print a warning if a video appears to have no
|
|
241
242
|
frames (by default, this is an error).
|
|
242
|
-
|
|
243
|
+
|
|
243
244
|
Returns:
|
|
244
245
|
dict: dict with keys 'frame_filenames' (list), 'frame_rate' (float), 'results' (list).
|
|
245
246
|
'frame_filenames' are synthetic filenames (e.g. frame000000.jpg); 'results' are
|
|
246
247
|
in the same format used in the 'images' array in the MD results format.
|
|
247
248
|
"""
|
|
248
|
-
|
|
249
|
+
|
|
249
250
|
assert os.path.isfile(input_video_file), 'File {} not found'.format(input_video_file)
|
|
250
|
-
|
|
251
|
+
|
|
251
252
|
if isinstance(frames_to_process,int):
|
|
252
253
|
frames_to_process = [frames_to_process]
|
|
253
|
-
|
|
254
|
+
|
|
254
255
|
if (frames_to_process is not None) and (every_n_frames is not None):
|
|
255
256
|
raise ValueError('frames_to_process and every_n_frames are mutually exclusive')
|
|
256
|
-
|
|
257
|
+
|
|
257
258
|
vidcap = cv2.VideoCapture(input_video_file)
|
|
258
259
|
n_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
259
260
|
frame_rate = vidcap.get(cv2.CAP_PROP_FPS)
|
|
260
|
-
|
|
261
|
+
|
|
261
262
|
if verbose:
|
|
262
263
|
print('Video {} contains {} frames at {} Hz'.format(input_video_file,n_frames,frame_rate))
|
|
263
264
|
|
|
264
265
|
frame_filenames = []
|
|
265
266
|
results = []
|
|
266
|
-
|
|
267
|
+
|
|
267
268
|
if (every_n_frames is not None) and (every_n_frames < 0):
|
|
268
269
|
every_n_seconds = abs(every_n_frames)
|
|
269
270
|
every_n_frames = int(every_n_seconds * frame_rate)
|
|
270
271
|
if verbose:
|
|
271
272
|
print('Interpreting a time sampling rate of {} hz as a frame interval of {}'.format(
|
|
272
273
|
every_n_seconds,every_n_frames))
|
|
273
|
-
|
|
274
|
+
|
|
274
275
|
# frame_number = 0
|
|
275
276
|
for frame_number in range(0,n_frames):
|
|
276
277
|
|
|
277
278
|
success,image = vidcap.read()
|
|
278
|
-
|
|
279
|
+
|
|
279
280
|
if not success:
|
|
280
281
|
assert image is None
|
|
281
282
|
if verbose:
|
|
@@ -291,88 +292,88 @@ def run_callback_on_frames(input_video_file,
|
|
|
291
292
|
break
|
|
292
293
|
if frame_number not in frames_to_process:
|
|
293
294
|
continue
|
|
294
|
-
|
|
295
|
-
frame_filename_relative = _frame_number_to_filename(frame_number)
|
|
295
|
+
|
|
296
|
+
frame_filename_relative = _frame_number_to_filename(frame_number)
|
|
296
297
|
frame_filenames.append(frame_filename_relative)
|
|
297
|
-
|
|
298
|
-
image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
298
|
+
|
|
299
|
+
image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
299
300
|
frame_results = frame_callback(image_np,frame_filename_relative)
|
|
300
301
|
results.append(frame_results)
|
|
301
|
-
|
|
302
|
-
# ...for each frame
|
|
303
|
-
|
|
302
|
+
|
|
303
|
+
# ...for each frame
|
|
304
|
+
|
|
304
305
|
if len(frame_filenames) == 0:
|
|
305
306
|
if allow_empty_videos:
|
|
306
307
|
print('Warning: found no frames in file {}'.format(input_video_file))
|
|
307
308
|
else:
|
|
308
309
|
raise Exception('Error: found no frames in file {}'.format(input_video_file))
|
|
309
|
-
|
|
310
|
+
|
|
310
311
|
if verbose:
|
|
311
312
|
print('\nProcessed {} of {} frames for {}'.format(
|
|
312
313
|
len(frame_filenames),n_frames,input_video_file))
|
|
313
314
|
|
|
314
|
-
vidcap.release()
|
|
315
|
+
vidcap.release()
|
|
315
316
|
to_return = {}
|
|
316
317
|
to_return['frame_filenames'] = frame_filenames
|
|
317
318
|
to_return['frame_rate'] = frame_rate
|
|
318
319
|
to_return['results'] = results
|
|
319
|
-
|
|
320
|
+
|
|
320
321
|
return to_return
|
|
321
322
|
|
|
322
323
|
# ...def run_callback_on_frames(...)
|
|
323
324
|
|
|
324
325
|
|
|
325
|
-
def run_callback_on_frames_for_folder(input_video_folder,
|
|
326
|
+
def run_callback_on_frames_for_folder(input_video_folder,
|
|
326
327
|
frame_callback,
|
|
327
|
-
every_n_frames=None,
|
|
328
|
-
verbose=False,
|
|
328
|
+
every_n_frames=None,
|
|
329
|
+
verbose=False,
|
|
329
330
|
allow_empty_videos=False,
|
|
330
331
|
recursive=True):
|
|
331
332
|
"""
|
|
332
|
-
Calls the function frame_callback(np.array,image_id) on all (or selected) frames in
|
|
333
|
+
Calls the function frame_callback(np.array,image_id) on all (or selected) frames in
|
|
333
334
|
all videos in [input_video_folder].
|
|
334
|
-
|
|
335
|
+
|
|
335
336
|
Args:
|
|
336
337
|
input_video_folder (str): video folder to process
|
|
337
|
-
frame_callback (function): callback to run on frames, should take an np.array and a string and
|
|
338
|
+
frame_callback (function): callback to run on frames, should take an np.array and a string and
|
|
338
339
|
return a single value. callback should expect PIL-formatted (RGB) images.
|
|
339
340
|
every_n_frames (int, optional): sample every Nth frame starting from the first frame;
|
|
340
|
-
if this is None or 1, every frame is processed. If this is a negative value, it's
|
|
341
|
-
interpreted as a sampling rate in seconds, which is rounded to the nearest frame
|
|
341
|
+
if this is None or 1, every frame is processed. If this is a negative value, it's
|
|
342
|
+
interpreted as a sampling rate in seconds, which is rounded to the nearest frame
|
|
342
343
|
sampling rate.
|
|
343
344
|
verbose (bool, optional): enable additional debug console output
|
|
344
345
|
allow_empty_videos (bool, optional): Just print a warning if a video appears to have no
|
|
345
346
|
frames (by default, this is an error).
|
|
346
347
|
recursive (bool, optional): recurse into [input_video_folder]
|
|
347
|
-
|
|
348
|
+
|
|
348
349
|
Returns:
|
|
349
350
|
dict: dict with keys 'video_filenames' (list of str), 'frame_rates' (list of floats),
|
|
350
351
|
'results' (list of list of dicts). 'video_filenames' will contain *relative* filenames.
|
|
351
352
|
"""
|
|
352
|
-
|
|
353
|
+
|
|
353
354
|
to_return = {'video_filenames':[],'frame_rates':[],'results':[]}
|
|
354
|
-
|
|
355
|
+
|
|
355
356
|
# Recursively enumerate video files
|
|
356
357
|
input_files_full_paths = find_videos(input_video_folder,
|
|
357
358
|
recursive=recursive,
|
|
358
359
|
convert_slashes=True,
|
|
359
360
|
return_relative_paths=False)
|
|
360
361
|
print('Found {} videos in folder {}'.format(len(input_files_full_paths),input_video_folder))
|
|
361
|
-
|
|
362
|
+
|
|
362
363
|
if len(input_files_full_paths) == 0:
|
|
363
364
|
return to_return
|
|
364
|
-
|
|
365
|
+
|
|
365
366
|
# Process each video
|
|
366
|
-
|
|
367
|
+
|
|
367
368
|
# video_fn_abs = input_files_full_paths[0]
|
|
368
369
|
for video_fn_abs in tqdm(input_files_full_paths):
|
|
369
370
|
video_results = run_callback_on_frames(input_video_file=video_fn_abs,
|
|
370
371
|
frame_callback=frame_callback,
|
|
371
|
-
every_n_frames=every_n_frames,
|
|
372
|
-
verbose=verbose,
|
|
372
|
+
every_n_frames=every_n_frames,
|
|
373
|
+
verbose=verbose,
|
|
373
374
|
frames_to_process=None,
|
|
374
375
|
allow_empty_videos=allow_empty_videos)
|
|
375
|
-
|
|
376
|
+
|
|
376
377
|
"""
|
|
377
378
|
dict: dict with keys 'frame_filenames' (list), 'frame_rate' (float), 'results' (list).
|
|
378
379
|
'frame_filenames' are synthetic filenames (e.g. frame000000.jpg); 'results' are
|
|
@@ -386,42 +387,42 @@ def run_callback_on_frames_for_folder(input_video_folder,
|
|
|
386
387
|
assert r['file'].startswith('frame')
|
|
387
388
|
r['file'] = video_filename_relative + '/' + r['file']
|
|
388
389
|
to_return['results'].append(video_results['results'])
|
|
389
|
-
|
|
390
|
+
|
|
390
391
|
# ...for each video
|
|
391
|
-
|
|
392
|
+
|
|
392
393
|
n_videos = len(input_files_full_paths)
|
|
393
394
|
assert len(to_return['video_filenames']) == n_videos
|
|
394
395
|
assert len(to_return['frame_rates']) == n_videos
|
|
395
396
|
assert len(to_return['results']) == n_videos
|
|
396
|
-
|
|
397
|
+
|
|
397
398
|
return to_return
|
|
398
399
|
|
|
399
400
|
# ...def run_callback_on_frames_for_folder(...)
|
|
400
401
|
|
|
401
|
-
|
|
402
|
-
def video_to_frames(input_video_file,
|
|
403
|
-
output_folder,
|
|
404
|
-
overwrite=True,
|
|
405
|
-
every_n_frames=None,
|
|
406
|
-
verbose=False,
|
|
402
|
+
|
|
403
|
+
def video_to_frames(input_video_file,
|
|
404
|
+
output_folder,
|
|
405
|
+
overwrite=True,
|
|
406
|
+
every_n_frames=None,
|
|
407
|
+
verbose=False,
|
|
407
408
|
quality=None,
|
|
408
|
-
max_width=None,
|
|
409
|
+
max_width=None,
|
|
409
410
|
frames_to_extract=None,
|
|
410
411
|
allow_empty_videos=False):
|
|
411
412
|
"""
|
|
412
413
|
Renders frames from [input_video_file] to .jpg files in [output_folder].
|
|
413
|
-
|
|
414
|
+
|
|
414
415
|
With help from:
|
|
415
|
-
|
|
416
|
+
|
|
416
417
|
https://stackoverflow.com/questions/33311153/python-extracting-and-saving-video-frames
|
|
417
|
-
|
|
418
|
+
|
|
418
419
|
Args:
|
|
419
420
|
input_video_file (str): video file to split into frames
|
|
420
421
|
output_folder (str): folder to put frame images in
|
|
421
422
|
overwrite (bool, optional): whether to overwrite existing frame images
|
|
422
423
|
every_n_frames (int, optional): sample every Nth frame starting from the first frame;
|
|
423
424
|
if this is None or 1, every frame is extracted. If this is a negative value, it's
|
|
424
|
-
interpreted as a sampling rate in seconds, which is rounded to the nearest frame sampling
|
|
425
|
+
interpreted as a sampling rate in seconds, which is rounded to the nearest frame sampling
|
|
425
426
|
rate. Mutually exclusive with frames_to_extract.
|
|
426
427
|
verbose (bool, optional): enable additional debug console output
|
|
427
428
|
quality (int, optional): JPEG quality for frame output, from 0-100. Defaults
|
|
@@ -433,57 +434,57 @@ def video_to_frames(input_video_file,
|
|
|
433
434
|
a single frame number.
|
|
434
435
|
allow_empty_videos (bool, optional): Just print a warning if a video appears to have no
|
|
435
436
|
frames (by default, this is an error).
|
|
436
|
-
|
|
437
|
+
|
|
437
438
|
Returns:
|
|
438
439
|
tuple: length-2 tuple containing (list of frame filenames,frame rate)
|
|
439
440
|
"""
|
|
440
|
-
|
|
441
|
+
|
|
441
442
|
assert os.path.isfile(input_video_file), 'File {} not found'.format(input_video_file)
|
|
442
|
-
|
|
443
|
+
|
|
443
444
|
if quality is not None and quality < 0:
|
|
444
445
|
quality = None
|
|
445
|
-
|
|
446
|
+
|
|
446
447
|
if isinstance(frames_to_extract,int):
|
|
447
448
|
frames_to_extract = [frames_to_extract]
|
|
448
|
-
|
|
449
|
+
|
|
449
450
|
if (frames_to_extract is not None) and (every_n_frames is not None):
|
|
450
451
|
raise ValueError('frames_to_extract and every_n_frames are mutually exclusive')
|
|
451
|
-
|
|
452
|
+
|
|
452
453
|
os.makedirs(output_folder,exist_ok=True)
|
|
453
|
-
|
|
454
|
+
|
|
454
455
|
vidcap = cv2.VideoCapture(input_video_file)
|
|
455
456
|
n_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
456
|
-
|
|
457
|
-
|
|
457
|
+
fs = vidcap.get(cv2.CAP_PROP_FPS)
|
|
458
|
+
|
|
458
459
|
if (every_n_frames is not None) and (every_n_frames < 0):
|
|
459
460
|
every_n_seconds = abs(every_n_frames)
|
|
460
|
-
every_n_frames = int(every_n_seconds *
|
|
461
|
+
every_n_frames = int(every_n_seconds * fs)
|
|
461
462
|
if verbose:
|
|
462
463
|
print('Interpreting a time sampling rate of {} hz as a frame interval of {}'.format(
|
|
463
464
|
every_n_seconds,every_n_frames))
|
|
464
|
-
|
|
465
|
+
|
|
465
466
|
# If we're not over-writing, check whether all frame images already exist
|
|
466
|
-
if overwrite
|
|
467
|
-
|
|
467
|
+
if not overwrite:
|
|
468
|
+
|
|
468
469
|
missing_frame_number = None
|
|
469
470
|
missing_frame_filename = None
|
|
470
471
|
frame_filenames = []
|
|
471
472
|
found_existing_frame = False
|
|
472
|
-
|
|
473
|
+
|
|
473
474
|
for frame_number in range(0,n_frames):
|
|
474
|
-
|
|
475
|
+
|
|
475
476
|
if every_n_frames is not None:
|
|
476
477
|
assert frames_to_extract is None, \
|
|
477
478
|
'Internal error: frames_to_extract and every_n_frames are exclusive'
|
|
478
479
|
if (frame_number % every_n_frames) != 0:
|
|
479
480
|
continue
|
|
480
|
-
|
|
481
|
+
|
|
481
482
|
if frames_to_extract is not None:
|
|
482
483
|
assert every_n_frames is None, \
|
|
483
484
|
'Internal error: frames_to_extract and every_n_frames are exclusive'
|
|
484
485
|
if frame_number not in frames_to_extract:
|
|
485
486
|
continue
|
|
486
|
-
|
|
487
|
+
|
|
487
488
|
frame_filename = _frame_number_to_filename(frame_number)
|
|
488
489
|
frame_filename = os.path.join(output_folder,frame_filename)
|
|
489
490
|
frame_filenames.append(frame_filename)
|
|
@@ -494,39 +495,39 @@ def video_to_frames(input_video_file,
|
|
|
494
495
|
missing_frame_number = frame_number
|
|
495
496
|
missing_frame_filename = frame_filename
|
|
496
497
|
break
|
|
497
|
-
|
|
498
|
+
|
|
498
499
|
if verbose and missing_frame_number is not None:
|
|
499
500
|
print('Missing frame {} ({}) for video {}'.format(
|
|
500
501
|
missing_frame_number,
|
|
501
502
|
missing_frame_filename,
|
|
502
503
|
input_video_file))
|
|
503
|
-
|
|
504
|
+
|
|
504
505
|
# OpenCV seems to over-report the number of frames by 1 in some cases, or fails
|
|
505
506
|
# to read the last frame; either way, I'm allowing one missing frame.
|
|
506
507
|
allow_last_frame_missing = True
|
|
507
|
-
|
|
508
|
+
|
|
508
509
|
# This doesn't have to mean literally the last frame number, it just means that if
|
|
509
510
|
# we find this frame or later, we consider the video done
|
|
510
511
|
last_expected_frame_number = n_frames-1
|
|
511
512
|
if every_n_frames is not None:
|
|
512
513
|
last_expected_frame_number -= (every_n_frames*2)
|
|
513
|
-
|
|
514
|
+
|
|
514
515
|
# When specific frames are requested, if anything is missing, reprocess the video
|
|
515
516
|
if (frames_to_extract is not None) and (missing_frame_number is not None):
|
|
516
|
-
|
|
517
|
+
|
|
517
518
|
pass
|
|
518
|
-
|
|
519
|
+
|
|
519
520
|
# If no frames are missing, or only frames very close to the end of the video are "missing",
|
|
520
521
|
# skip this video
|
|
521
522
|
elif (missing_frame_number is None) or \
|
|
522
523
|
(allow_last_frame_missing and (missing_frame_number >= last_expected_frame_number)):
|
|
523
|
-
|
|
524
|
+
|
|
524
525
|
if verbose:
|
|
525
526
|
print('Skipping video {}, all output frames exist'.format(input_video_file))
|
|
526
|
-
return frame_filenames,
|
|
527
|
-
|
|
527
|
+
return frame_filenames,fs
|
|
528
|
+
|
|
528
529
|
else:
|
|
529
|
-
|
|
530
|
+
|
|
530
531
|
# If we found some frames, but not all, print a message
|
|
531
532
|
if verbose and found_existing_frame:
|
|
532
533
|
print("Rendering video {}, couldn't find frame {} ({}) of {}".format(
|
|
@@ -534,17 +535,17 @@ def video_to_frames(input_video_file,
|
|
|
534
535
|
missing_frame_number,
|
|
535
536
|
missing_frame_filename,
|
|
536
537
|
last_expected_frame_number))
|
|
537
|
-
|
|
538
|
+
|
|
538
539
|
# ...if we need to check whether to skip this video entirely
|
|
539
|
-
|
|
540
|
+
|
|
540
541
|
if verbose:
|
|
541
|
-
print('Video {} contains {} frames at {} Hz'.format(input_video_file,n_frames,
|
|
542
|
+
print('Video {} contains {} frames at {} Hz'.format(input_video_file,n_frames,fs))
|
|
542
543
|
|
|
543
544
|
frame_filenames = []
|
|
544
545
|
|
|
545
|
-
# YOLOv5 does some totally bananas monkey-patching of opencv, which causes
|
|
546
|
-
# problems if we try to supply a third parameter to imwrite (to specify JPEG
|
|
547
|
-
# quality). Detect this case, and ignore the quality parameter if it looks
|
|
546
|
+
# YOLOv5 does some totally bananas monkey-patching of opencv, which causes
|
|
547
|
+
# problems if we try to supply a third parameter to imwrite (to specify JPEG
|
|
548
|
+
# quality). Detect this case, and ignore the quality parameter if it looks
|
|
548
549
|
# like imwrite has been messed with.
|
|
549
550
|
#
|
|
550
551
|
# See:
|
|
@@ -552,7 +553,7 @@ def video_to_frames(input_video_file,
|
|
|
552
553
|
# https://github.com/ultralytics/yolov5/issues/7285
|
|
553
554
|
imwrite_patched = False
|
|
554
555
|
n_imwrite_parameters = None
|
|
555
|
-
|
|
556
|
+
|
|
556
557
|
try:
|
|
557
558
|
# calling signature() on the native cv2.imwrite function will
|
|
558
559
|
# fail, so an exception here is a good thing. In fact I don't think
|
|
@@ -562,12 +563,12 @@ def video_to_frames(input_video_file,
|
|
|
562
563
|
n_imwrite_parameters = len(sig.parameters)
|
|
563
564
|
except Exception:
|
|
564
565
|
pass
|
|
565
|
-
|
|
566
|
+
|
|
566
567
|
if (n_imwrite_parameters is not None) and (n_imwrite_parameters < 3):
|
|
567
568
|
imwrite_patched = True
|
|
568
569
|
if verbose and (quality is not None):
|
|
569
570
|
print('Warning: quality value supplied, but YOLOv5 has mucked with cv2.imwrite, ignoring quality')
|
|
570
|
-
|
|
571
|
+
|
|
571
572
|
# for frame_number in tqdm(range(0,n_frames)):
|
|
572
573
|
for frame_number in range(0,n_frames):
|
|
573
574
|
|
|
@@ -587,40 +588,40 @@ def video_to_frames(input_video_file,
|
|
|
587
588
|
break
|
|
588
589
|
if frame_number not in frames_to_extract:
|
|
589
590
|
continue
|
|
590
|
-
|
|
591
|
+
|
|
591
592
|
# Has resizing been requested?
|
|
592
593
|
if max_width is not None:
|
|
593
|
-
|
|
594
|
-
# image.shape is h/w/dims
|
|
594
|
+
|
|
595
|
+
# image.shape is h/w/dims
|
|
595
596
|
input_shape = image.shape
|
|
596
597
|
assert input_shape[2] == 3
|
|
597
598
|
input_width = input_shape[1]
|
|
598
|
-
|
|
599
|
+
|
|
599
600
|
# Is resizing necessary?
|
|
600
601
|
if input_width > max_width:
|
|
601
|
-
|
|
602
|
+
|
|
602
603
|
scale = max_width / input_width
|
|
603
604
|
assert scale <= 1.0
|
|
604
|
-
|
|
605
|
+
|
|
605
606
|
# INTER_AREA is recommended for size reduction
|
|
606
607
|
image = cv2.resize(image, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
|
|
607
|
-
|
|
608
|
+
|
|
608
609
|
# ...if we need to deal with resizing
|
|
609
|
-
|
|
610
|
-
frame_filename_relative = _frame_number_to_filename(frame_number)
|
|
610
|
+
|
|
611
|
+
frame_filename_relative = _frame_number_to_filename(frame_number)
|
|
611
612
|
frame_filename = os.path.join(output_folder,frame_filename_relative)
|
|
612
613
|
frame_filenames.append(frame_filename)
|
|
613
|
-
|
|
614
|
-
if overwrite
|
|
614
|
+
|
|
615
|
+
if (not overwrite) and (os.path.isfile(frame_filename)):
|
|
615
616
|
# print('Skipping frame {}'.format(frame_filename))
|
|
616
|
-
pass
|
|
617
|
+
pass
|
|
617
618
|
else:
|
|
618
619
|
try:
|
|
619
620
|
if frame_filename.isascii():
|
|
620
|
-
|
|
621
|
+
|
|
621
622
|
if quality is None or imwrite_patched:
|
|
622
623
|
cv2.imwrite(os.path.normpath(frame_filename),image)
|
|
623
|
-
else:
|
|
624
|
+
else:
|
|
624
625
|
cv2.imwrite(os.path.normpath(frame_filename),image,
|
|
625
626
|
[int(cv2.IMWRITE_JPEG_QUALITY), quality])
|
|
626
627
|
else:
|
|
@@ -639,19 +640,19 @@ def video_to_frames(input_video_file,
|
|
|
639
640
|
print('Error on frame {} of {}: {}'.format(frame_number,n_frames,str(e)))
|
|
640
641
|
|
|
641
642
|
# ...for each frame
|
|
642
|
-
|
|
643
|
+
|
|
643
644
|
if len(frame_filenames) == 0:
|
|
644
645
|
if allow_empty_videos:
|
|
645
646
|
print('Warning: found no frames in file {}'.format(input_video_file))
|
|
646
647
|
else:
|
|
647
648
|
raise Exception('Error: found no frames in file {}'.format(input_video_file))
|
|
648
|
-
|
|
649
|
+
|
|
649
650
|
if verbose:
|
|
650
651
|
print('\nExtracted {} of {} frames for {}'.format(
|
|
651
652
|
len(frame_filenames),n_frames,input_video_file))
|
|
652
653
|
|
|
653
|
-
vidcap.release()
|
|
654
|
-
return frame_filenames,
|
|
654
|
+
vidcap.release()
|
|
655
|
+
return frame_filenames,fs
|
|
655
656
|
|
|
656
657
|
# ...def video_to_frames(...)
|
|
657
658
|
|
|
@@ -660,11 +661,11 @@ def _video_to_frames_for_folder(relative_fn,input_folder,output_folder_base,
|
|
|
660
661
|
every_n_frames,overwrite,verbose,quality,max_width,
|
|
661
662
|
frames_to_extract,allow_empty_videos):
|
|
662
663
|
"""
|
|
663
|
-
Internal function to call video_to_frames for a single video in the context of
|
|
664
|
-
video_folder_to_frames; makes sure the right output folder exists, then calls
|
|
664
|
+
Internal function to call video_to_frames for a single video in the context of
|
|
665
|
+
video_folder_to_frames; makes sure the right output folder exists, then calls
|
|
665
666
|
video_to_frames.
|
|
666
|
-
"""
|
|
667
|
-
|
|
667
|
+
"""
|
|
668
|
+
|
|
668
669
|
input_fn_absolute = os.path.join(input_folder,relative_fn)
|
|
669
670
|
assert os.path.isfile(input_fn_absolute),\
|
|
670
671
|
'Could not find file {}'.format(input_fn_absolute)
|
|
@@ -684,26 +685,26 @@ def _video_to_frames_for_folder(relative_fn,input_folder,output_folder_base,
|
|
|
684
685
|
max_width=max_width,
|
|
685
686
|
frames_to_extract=frames_to_extract,
|
|
686
687
|
allow_empty_videos=allow_empty_videos)
|
|
687
|
-
|
|
688
|
+
|
|
688
689
|
return frame_filenames,fs
|
|
689
690
|
|
|
690
691
|
|
|
691
692
|
def video_folder_to_frames(input_folder,
|
|
692
|
-
output_folder_base,
|
|
693
|
-
recursive=True,
|
|
693
|
+
output_folder_base,
|
|
694
|
+
recursive=True,
|
|
694
695
|
overwrite=True,
|
|
695
696
|
n_threads=1,
|
|
696
697
|
every_n_frames=None,
|
|
697
698
|
verbose=False,
|
|
698
699
|
parallelization_uses_threads=True,
|
|
699
700
|
quality=None,
|
|
700
|
-
max_width=None,
|
|
701
|
+
max_width=None,
|
|
701
702
|
frames_to_extract=None,
|
|
702
703
|
allow_empty_videos=False):
|
|
703
704
|
"""
|
|
704
|
-
For every video file in input_folder, creates a folder within output_folder_base, and
|
|
705
|
+
For every video file in input_folder, creates a folder within output_folder_base, and
|
|
705
706
|
renders frame of that video to images in that folder.
|
|
706
|
-
|
|
707
|
+
|
|
707
708
|
Args:
|
|
708
709
|
input_folder (str): folder to process
|
|
709
710
|
output_folder_base (str): root folder for output images; subfolders will be
|
|
@@ -714,7 +715,7 @@ def video_folder_to_frames(input_folder,
|
|
|
714
715
|
parallelism
|
|
715
716
|
every_n_frames (int, optional): sample every Nth frame starting from the first frame;
|
|
716
717
|
if this is None or 1, every frame is extracted. If this is a negative value, it's
|
|
717
|
-
interpreted as a sampling rate in seconds, which is rounded to the nearest frame
|
|
718
|
+
interpreted as a sampling rate in seconds, which is rounded to the nearest frame
|
|
718
719
|
sampling rate. Mutually exclusive with frames_to_extract.
|
|
719
720
|
verbose (bool, optional): enable additional debug console output
|
|
720
721
|
parallelization_uses_threads (bool, optional): whether to use threads (True) or
|
|
@@ -723,20 +724,20 @@ def video_folder_to_frames(input_folder,
|
|
|
723
724
|
to the opencv default (typically 95).
|
|
724
725
|
max_width (int, optional): resize frames to be no wider than [max_width]
|
|
725
726
|
frames_to_extract (list of int, optional): extract this specific set of frames from
|
|
726
|
-
each video; mutually exclusive with every_n_frames. If all values are beyond
|
|
727
|
-
the length of a video, no frames are extracted. Can also be a single int,
|
|
727
|
+
each video; mutually exclusive with every_n_frames. If all values are beyond
|
|
728
|
+
the length of a video, no frames are extracted. Can also be a single int,
|
|
728
729
|
specifying a single frame number.
|
|
729
730
|
allow_empty_videos (bool, optional): Just print a warning if a video appears to have no
|
|
730
731
|
frames (by default, this is an error).
|
|
731
|
-
|
|
732
|
+
|
|
732
733
|
Returns:
|
|
733
734
|
tuple: a length-3 tuple containing:
|
|
734
|
-
- list of lists of frame filenames; the Nth list of frame filenames corresponds to
|
|
735
|
+
- list of lists of frame filenames; the Nth list of frame filenames corresponds to
|
|
735
736
|
the Nth video
|
|
736
737
|
- list of video frame rates; the Nth value corresponds to the Nth video
|
|
737
|
-
- list of video filenames
|
|
738
|
+
- list of video filenames
|
|
738
739
|
"""
|
|
739
|
-
|
|
740
|
+
|
|
740
741
|
# Recursively enumerate video files
|
|
741
742
|
if verbose:
|
|
742
743
|
print('Enumerating videos in {}'.format(input_folder))
|
|
@@ -745,21 +746,21 @@ def video_folder_to_frames(input_folder,
|
|
|
745
746
|
print('Found {} videos in folder {}'.format(len(input_files_full_paths),input_folder))
|
|
746
747
|
if len(input_files_full_paths) == 0:
|
|
747
748
|
return [],[],[]
|
|
748
|
-
|
|
749
|
+
|
|
749
750
|
input_files_relative_paths = [os.path.relpath(s,input_folder) for s in input_files_full_paths]
|
|
750
751
|
input_files_relative_paths = [s.replace('\\','/') for s in input_files_relative_paths]
|
|
751
|
-
|
|
752
|
-
os.makedirs(output_folder_base,exist_ok=True)
|
|
753
|
-
|
|
752
|
+
|
|
753
|
+
os.makedirs(output_folder_base,exist_ok=True)
|
|
754
|
+
|
|
754
755
|
frame_filenames_by_video = []
|
|
755
756
|
fs_by_video = []
|
|
756
|
-
|
|
757
|
+
|
|
757
758
|
if n_threads == 1:
|
|
758
759
|
# For each video
|
|
759
760
|
#
|
|
760
761
|
# input_fn_relative = input_files_relative_paths[0]
|
|
761
762
|
for input_fn_relative in tqdm(input_files_relative_paths):
|
|
762
|
-
|
|
763
|
+
|
|
763
764
|
frame_filenames,fs = \
|
|
764
765
|
_video_to_frames_for_folder(input_fn_relative,input_folder,output_folder_base,
|
|
765
766
|
every_n_frames,overwrite,verbose,quality,max_width,
|
|
@@ -767,97 +768,104 @@ def video_folder_to_frames(input_folder,
|
|
|
767
768
|
frame_filenames_by_video.append(frame_filenames)
|
|
768
769
|
fs_by_video.append(fs)
|
|
769
770
|
else:
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
771
|
+
pool = None
|
|
772
|
+
results = None
|
|
773
|
+
try:
|
|
774
|
+
if parallelization_uses_threads:
|
|
775
|
+
print('Starting a worker pool with {} threads'.format(n_threads))
|
|
776
|
+
pool = ThreadPool(n_threads)
|
|
777
|
+
else:
|
|
778
|
+
print('Starting a worker pool with {} processes'.format(n_threads))
|
|
779
|
+
pool = Pool(n_threads)
|
|
780
|
+
process_video_with_options = partial(_video_to_frames_for_folder,
|
|
781
|
+
input_folder=input_folder,
|
|
782
|
+
output_folder_base=output_folder_base,
|
|
783
|
+
every_n_frames=every_n_frames,
|
|
784
|
+
overwrite=overwrite,
|
|
785
|
+
verbose=verbose,
|
|
786
|
+
quality=quality,
|
|
787
|
+
max_width=max_width,
|
|
788
|
+
frames_to_extract=frames_to_extract,
|
|
789
|
+
allow_empty_videos=allow_empty_videos)
|
|
790
|
+
results = list(tqdm(pool.imap(
|
|
791
|
+
partial(process_video_with_options),input_files_relative_paths),
|
|
792
|
+
total=len(input_files_relative_paths)))
|
|
793
|
+
finally:
|
|
794
|
+
pool.close()
|
|
795
|
+
pool.join()
|
|
796
|
+
print("Pool closed and joined for video processing")
|
|
789
797
|
frame_filenames_by_video = [x[0] for x in results]
|
|
790
798
|
fs_by_video = [x[1] for x in results]
|
|
791
|
-
|
|
799
|
+
|
|
792
800
|
return frame_filenames_by_video,fs_by_video,input_files_full_paths
|
|
793
|
-
|
|
801
|
+
|
|
794
802
|
# ...def video_folder_to_frames(...)
|
|
795
803
|
|
|
796
804
|
|
|
797
805
|
class FrameToVideoOptions:
|
|
798
806
|
"""
|
|
799
807
|
Options controlling the conversion of frame-level results to video-level results via
|
|
800
|
-
frame_results_to_video_results()
|
|
808
|
+
frame_results_to_video_results()
|
|
801
809
|
"""
|
|
802
|
-
|
|
810
|
+
|
|
803
811
|
def __init__(self):
|
|
804
|
-
|
|
812
|
+
|
|
805
813
|
#: One-indexed indicator of which frame-level confidence value to use to determine detection confidence
|
|
806
814
|
#: for the whole video, i.e. "1" means "use the confidence value from the highest-confidence frame"
|
|
807
815
|
self.nth_highest_confidence = 1
|
|
808
|
-
|
|
816
|
+
|
|
809
817
|
#: Should we include just a single representative frame result for each video (default), or
|
|
810
818
|
#: every frame that was processed?
|
|
811
819
|
self.include_all_processed_frames = False
|
|
812
|
-
|
|
813
|
-
#: What to do if a file referred to in a .json results file appears not to be a
|
|
820
|
+
|
|
821
|
+
#: What to do if a file referred to in a .json results file appears not to be a
|
|
814
822
|
#: video; can be 'error' or 'skip_with_warning'
|
|
815
823
|
self.non_video_behavior = 'error'
|
|
816
|
-
|
|
824
|
+
|
|
817
825
|
|
|
818
826
|
def frame_results_to_video_results(input_file,
|
|
819
827
|
output_file,
|
|
820
828
|
options=None,
|
|
821
829
|
video_filename_to_frame_rate=None):
|
|
822
830
|
"""
|
|
823
|
-
Given an MD results file produced at the *frame* level, corresponding to a directory
|
|
824
|
-
created with video_folder_to_frames, maps those frame-level results back to the
|
|
831
|
+
Given an MD results file produced at the *frame* level, corresponding to a directory
|
|
832
|
+
created with video_folder_to_frames, maps those frame-level results back to the
|
|
825
833
|
video level for use in Timelapse.
|
|
826
|
-
|
|
834
|
+
|
|
827
835
|
Preserves everything in the input .json file other than the images.
|
|
828
|
-
|
|
836
|
+
|
|
829
837
|
Args:
|
|
830
838
|
input_file (str): the frame-level MD results file to convert to video-level results
|
|
831
839
|
output_file (str): the .json file to which we should write video-level results
|
|
832
840
|
options (FrameToVideoOptions, optional): parameters for converting frame-level results
|
|
833
|
-
to video-level results, see FrameToVideoOptions for details
|
|
841
|
+
to video-level results, see FrameToVideoOptions for details
|
|
834
842
|
video_filename_to_frame_rate (dict): maps (relative) video path names to frame rates,
|
|
835
843
|
used only to populate the output file
|
|
836
844
|
"""
|
|
837
845
|
|
|
838
846
|
if options is None:
|
|
839
847
|
options = FrameToVideoOptions()
|
|
840
|
-
|
|
848
|
+
|
|
841
849
|
# Load results
|
|
842
850
|
with open(input_file,'r') as f:
|
|
843
851
|
input_data = json.load(f)
|
|
844
852
|
|
|
845
853
|
images = input_data['images']
|
|
846
854
|
detection_categories = input_data['detection_categories']
|
|
847
|
-
|
|
848
|
-
|
|
855
|
+
|
|
856
|
+
|
|
849
857
|
## Break into videos
|
|
850
|
-
|
|
851
|
-
video_to_frame_info = defaultdict(list)
|
|
852
|
-
|
|
858
|
+
|
|
859
|
+
video_to_frame_info = defaultdict(list)
|
|
860
|
+
|
|
853
861
|
# im = images[0]
|
|
854
862
|
for im in tqdm(images):
|
|
855
|
-
|
|
863
|
+
|
|
856
864
|
fn = im['file']
|
|
857
865
|
video_name = os.path.dirname(fn)
|
|
858
|
-
|
|
866
|
+
|
|
859
867
|
if not is_video_file(video_name):
|
|
860
|
-
|
|
868
|
+
|
|
861
869
|
if options.non_video_behavior == 'error':
|
|
862
870
|
raise ValueError('{} is not a video file'.format(video_name))
|
|
863
871
|
elif options.non_video_behavior == 'skip_with_warning':
|
|
@@ -866,74 +874,74 @@ def frame_results_to_video_results(input_file,
|
|
|
866
874
|
else:
|
|
867
875
|
raise ValueError('Unrecognized non-video handling behavior: {}'.format(
|
|
868
876
|
options.non_video_behavior))
|
|
869
|
-
|
|
877
|
+
|
|
870
878
|
# Attach video-specific fields to the output, specifically attach the frame
|
|
871
|
-
# number to both the video and each detection. Only the frame number for the
|
|
879
|
+
# number to both the video and each detection. Only the frame number for the
|
|
872
880
|
# canonical detection will end up in the video-level output file.
|
|
873
881
|
frame_number = _filename_to_frame_number(fn)
|
|
874
882
|
im['frame_number'] = frame_number
|
|
875
|
-
for detection in im['detections']:
|
|
883
|
+
for detection in im['detections']:
|
|
876
884
|
detection['frame_number'] = frame_number
|
|
877
|
-
|
|
885
|
+
|
|
878
886
|
video_to_frame_info[video_name].append(im)
|
|
879
|
-
|
|
887
|
+
|
|
880
888
|
# ...for each frame referred to in the results file
|
|
881
|
-
|
|
889
|
+
|
|
882
890
|
print('Found {} unique videos in {} frame-level results'.format(
|
|
883
891
|
len(video_to_frame_info),len(images)))
|
|
884
|
-
|
|
892
|
+
|
|
885
893
|
output_images = []
|
|
886
|
-
|
|
887
|
-
|
|
894
|
+
|
|
895
|
+
|
|
888
896
|
## For each video...
|
|
889
|
-
|
|
897
|
+
|
|
890
898
|
# video_name = list(video_to_frame_info.keys())[0]
|
|
891
899
|
for video_name in tqdm(video_to_frame_info):
|
|
892
|
-
|
|
900
|
+
|
|
893
901
|
# Prepare the output representation for this video
|
|
894
902
|
im_out = {}
|
|
895
903
|
im_out['file'] = video_name
|
|
896
|
-
|
|
904
|
+
|
|
897
905
|
if (video_filename_to_frame_rate is not None) and \
|
|
898
906
|
(video_name in video_filename_to_frame_rate):
|
|
899
907
|
im_out['frame_rate'] = video_filename_to_frame_rate[video_name]
|
|
900
|
-
|
|
908
|
+
|
|
901
909
|
# Find all detections for this video
|
|
902
910
|
all_detections_this_video = []
|
|
903
|
-
|
|
911
|
+
|
|
904
912
|
frames = video_to_frame_info[video_name]
|
|
905
|
-
|
|
913
|
+
|
|
906
914
|
# frame = frames[0]
|
|
907
915
|
for frame in frames:
|
|
908
|
-
if ('detections' in frame) and (frame['detections'] is not None):
|
|
916
|
+
if ('detections' in frame) and (frame['detections'] is not None):
|
|
909
917
|
all_detections_this_video.extend(frame['detections'])
|
|
910
|
-
|
|
918
|
+
|
|
911
919
|
# Should we keep detections for all frames?
|
|
912
920
|
if (options.include_all_processed_frames):
|
|
913
|
-
|
|
921
|
+
|
|
914
922
|
im_out['detections'] = all_detections_this_video
|
|
915
|
-
|
|
923
|
+
|
|
916
924
|
# ...or should we keep just a canonical detection for each category?
|
|
917
925
|
else:
|
|
918
|
-
|
|
926
|
+
|
|
919
927
|
canonical_detections = []
|
|
920
|
-
|
|
928
|
+
|
|
921
929
|
# category_id = list(detection_categories.keys())[0]
|
|
922
930
|
for category_id in detection_categories:
|
|
923
|
-
|
|
931
|
+
|
|
924
932
|
category_detections = [det for det in all_detections_this_video if \
|
|
925
933
|
det['category'] == category_id]
|
|
926
|
-
|
|
934
|
+
|
|
927
935
|
# Find the nth-highest-confidence video to choose a confidence value
|
|
928
936
|
if len(category_detections) >= options.nth_highest_confidence:
|
|
929
|
-
|
|
930
|
-
category_detections_by_confidence = sorted(category_detections,
|
|
937
|
+
|
|
938
|
+
category_detections_by_confidence = sorted(category_detections,
|
|
931
939
|
key = lambda i: i['conf'],reverse=True)
|
|
932
940
|
canonical_detection = category_detections_by_confidence[options.nth_highest_confidence-1]
|
|
933
941
|
canonical_detections.append(canonical_detection)
|
|
934
|
-
|
|
942
|
+
|
|
935
943
|
im_out['detections'] = canonical_detections
|
|
936
|
-
|
|
944
|
+
|
|
937
945
|
# 'max_detection_conf' is no longer included in output files by default
|
|
938
946
|
if False:
|
|
939
947
|
im_out['max_detection_conf'] = 0
|
|
@@ -942,19 +950,19 @@ def frame_results_to_video_results(input_file,
|
|
|
942
950
|
im_out['max_detection_conf'] = max(confidences)
|
|
943
951
|
|
|
944
952
|
# ...if we're keeping output for all frames / canonical frames
|
|
945
|
-
|
|
953
|
+
|
|
946
954
|
output_images.append(im_out)
|
|
947
|
-
|
|
955
|
+
|
|
948
956
|
# ...for each video
|
|
949
|
-
|
|
957
|
+
|
|
950
958
|
output_data = input_data
|
|
951
959
|
output_data['images'] = output_images
|
|
952
960
|
s = json.dumps(output_data,indent=1)
|
|
953
|
-
|
|
961
|
+
|
|
954
962
|
# Write the output file
|
|
955
963
|
with open(output_file,'w') as f:
|
|
956
964
|
f.write(s)
|
|
957
|
-
|
|
965
|
+
|
|
958
966
|
# ...def frame_results_to_video_results(...)
|
|
959
967
|
|
|
960
968
|
|
|
@@ -965,40 +973,40 @@ if False:
|
|
|
965
973
|
pass
|
|
966
974
|
|
|
967
975
|
#%% Constants
|
|
968
|
-
|
|
976
|
+
|
|
969
977
|
input_folder = r'G:\temp\usu-long\data'
|
|
970
978
|
frame_folder_base = r'g:\temp\usu-long-single-frames'
|
|
971
979
|
assert os.path.isdir(input_folder)
|
|
972
|
-
|
|
973
|
-
|
|
980
|
+
|
|
981
|
+
|
|
974
982
|
#%% Split videos into frames
|
|
975
|
-
|
|
983
|
+
|
|
976
984
|
frame_filenames_by_video,fs_by_video,video_filenames = \
|
|
977
985
|
video_folder_to_frames(input_folder,
|
|
978
986
|
frame_folder_base,
|
|
979
987
|
recursive=True,
|
|
980
988
|
overwrite=True,
|
|
981
|
-
n_threads=10,
|
|
989
|
+
n_threads=10,
|
|
982
990
|
every_n_frames=None,
|
|
983
|
-
verbose=True,
|
|
991
|
+
verbose=True,
|
|
984
992
|
parallelization_uses_threads=True,
|
|
985
|
-
quality=None,
|
|
986
|
-
max_width=None,
|
|
993
|
+
quality=None,
|
|
994
|
+
max_width=None,
|
|
987
995
|
frames_to_extract=150)
|
|
988
|
-
|
|
989
|
-
|
|
996
|
+
|
|
997
|
+
|
|
990
998
|
#%% Constants for detection tests
|
|
991
|
-
|
|
999
|
+
|
|
992
1000
|
detected_frame_folder_base = r'e:\video_test\detected_frames'
|
|
993
1001
|
rendered_videos_folder_base = r'e:\video_test\rendered_videos'
|
|
994
1002
|
os.makedirs(detected_frame_folder_base,exist_ok=True)
|
|
995
1003
|
os.makedirs(rendered_videos_folder_base,exist_ok=True)
|
|
996
1004
|
results_file = r'results.json'
|
|
997
1005
|
confidence_threshold = 0.75
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
#%% Load detector output
|
|
1001
|
-
|
|
1006
|
+
|
|
1007
|
+
|
|
1008
|
+
#%% Load detector output
|
|
1009
|
+
|
|
1002
1010
|
with open(results_file,'r') as f:
|
|
1003
1011
|
detection_results = json.load(f)
|
|
1004
1012
|
detections = detection_results['images']
|
|
@@ -1008,11 +1016,11 @@ if False:
|
|
|
1008
1016
|
|
|
1009
1017
|
|
|
1010
1018
|
#%% List image files, break into folders
|
|
1011
|
-
|
|
1019
|
+
|
|
1012
1020
|
frame_files = path_utils.find_images(frame_folder_base,True)
|
|
1013
1021
|
frame_files = [s.replace('\\','/') for s in frame_files]
|
|
1014
1022
|
print('Enumerated {} total frames'.format(len(frame_files)))
|
|
1015
|
-
|
|
1023
|
+
|
|
1016
1024
|
# Find unique folders
|
|
1017
1025
|
folders = set()
|
|
1018
1026
|
# fn = frame_files[0]
|
|
@@ -1020,57 +1028,57 @@ if False:
|
|
|
1020
1028
|
folders.add(os.path.dirname(fn))
|
|
1021
1029
|
folders = [s.replace('\\','/') for s in folders]
|
|
1022
1030
|
print('Found {} folders for {} files'.format(len(folders),len(frame_files)))
|
|
1023
|
-
|
|
1024
|
-
|
|
1031
|
+
|
|
1032
|
+
|
|
1025
1033
|
#%% Render detector frames
|
|
1026
|
-
|
|
1034
|
+
|
|
1027
1035
|
# folder = list(folders)[0]
|
|
1028
1036
|
for folder in folders:
|
|
1029
|
-
|
|
1037
|
+
|
|
1030
1038
|
frame_files_this_folder = [fn for fn in frame_files if folder in fn]
|
|
1031
1039
|
folder_relative = folder.replace((frame_folder_base + '/').replace('\\','/'),'')
|
|
1032
1040
|
detection_results_this_folder = [d for d in detections if folder_relative in d['file']]
|
|
1033
1041
|
print('Found {} detections in folder {}'.format(len(detection_results_this_folder),folder))
|
|
1034
1042
|
assert len(frame_files_this_folder) == len(detection_results_this_folder)
|
|
1035
|
-
|
|
1043
|
+
|
|
1036
1044
|
rendered_frame_output_folder = os.path.join(detected_frame_folder_base,folder_relative)
|
|
1037
1045
|
os.makedirs(rendered_frame_output_folder,exist_ok=True)
|
|
1038
|
-
|
|
1046
|
+
|
|
1039
1047
|
# d = detection_results_this_folder[0]
|
|
1040
1048
|
for d in tqdm(detection_results_this_folder):
|
|
1041
|
-
|
|
1049
|
+
|
|
1042
1050
|
input_file = os.path.join(frame_folder_base,d['file'])
|
|
1043
1051
|
output_file = os.path.join(detected_frame_folder_base,d['file'])
|
|
1044
1052
|
os.makedirs(os.path.dirname(output_file),exist_ok=True)
|
|
1045
1053
|
vis_utils.draw_bounding_boxes_on_file(input_file,output_file,d['detections'],
|
|
1046
1054
|
confidence_threshold)
|
|
1047
|
-
|
|
1055
|
+
|
|
1048
1056
|
# ...for each file in this folder
|
|
1049
|
-
|
|
1057
|
+
|
|
1050
1058
|
# ...for each folder
|
|
1051
1059
|
|
|
1052
1060
|
|
|
1053
1061
|
#%% Render output videos
|
|
1054
|
-
|
|
1062
|
+
|
|
1055
1063
|
# folder = list(folders)[0]
|
|
1056
1064
|
for folder in tqdm(folders):
|
|
1057
|
-
|
|
1065
|
+
|
|
1058
1066
|
folder_relative = folder.replace((frame_folder_base + '/').replace('\\','/'),'')
|
|
1059
1067
|
rendered_detector_output_folder = os.path.join(detected_frame_folder_base,folder_relative)
|
|
1060
1068
|
assert os.path.isdir(rendered_detector_output_folder)
|
|
1061
|
-
|
|
1069
|
+
|
|
1062
1070
|
frame_files_relative = os.listdir(rendered_detector_output_folder)
|
|
1063
1071
|
frame_files_absolute = [os.path.join(rendered_detector_output_folder,s) \
|
|
1064
1072
|
for s in frame_files_relative]
|
|
1065
|
-
|
|
1073
|
+
|
|
1066
1074
|
output_video_filename = os.path.join(rendered_videos_folder_base,folder_relative)
|
|
1067
1075
|
os.makedirs(os.path.dirname(output_video_filename),exist_ok=True)
|
|
1068
|
-
|
|
1076
|
+
|
|
1069
1077
|
original_video_filename = output_video_filename.replace(
|
|
1070
1078
|
rendered_videos_folder_base,input_folder)
|
|
1071
1079
|
assert os.path.isfile(original_video_filename)
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
frames_to_video(frame_files_absolute,
|
|
1080
|
+
fs = get_video_fs(original_video_filename)
|
|
1081
|
+
|
|
1082
|
+
frames_to_video(frame_files_absolute, fs, output_video_filename)
|
|
1075
1083
|
|
|
1076
1084
|
# ...for each video
|