simba-uw-tf-dev 4.5.8__py3-none-any.whl → 4.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of simba-uw-tf-dev might be problematic. Click here for more details.
- simba/SimBA.py +2 -2
- simba/assets/.recent_projects.txt +1 -0
- simba/assets/icons/frames_2.png +0 -0
- simba/assets/lookups/tooptips.json +15 -1
- simba/data_processors/agg_clf_counter_mp.py +52 -53
- simba/data_processors/blob_location_computer.py +1 -1
- simba/data_processors/circling_detector.py +30 -13
- simba/data_processors/cuda/geometry.py +45 -27
- simba/data_processors/cuda/image.py +1648 -1598
- simba/data_processors/cuda/statistics.py +72 -26
- simba/data_processors/cuda/timeseries.py +1 -1
- simba/data_processors/cue_light_analyzer.py +5 -9
- simba/data_processors/egocentric_aligner.py +25 -7
- simba/data_processors/freezing_detector.py +55 -47
- simba/data_processors/kleinberg_calculator.py +61 -29
- simba/feature_extractors/feature_subsets.py +14 -7
- simba/feature_extractors/mitra_feature_extractor.py +2 -2
- simba/feature_extractors/straub_tail_analyzer.py +4 -6
- simba/labelling/standard_labeller.py +1 -1
- simba/mixins/config_reader.py +5 -2
- simba/mixins/geometry_mixin.py +22 -36
- simba/mixins/image_mixin.py +24 -28
- simba/mixins/plotting_mixin.py +28 -10
- simba/mixins/statistics_mixin.py +48 -11
- simba/mixins/timeseries_features_mixin.py +1 -1
- simba/mixins/train_model_mixin.py +67 -29
- simba/model/inference_batch.py +1 -1
- simba/model/yolo_seg_inference.py +3 -3
- simba/outlier_tools/skip_outlier_correction.py +1 -1
- simba/plotting/ROI_feature_visualizer_mp.py +3 -5
- simba/plotting/clf_validator_mp.py +4 -5
- simba/plotting/cue_light_visualizer.py +6 -7
- simba/plotting/directing_animals_visualizer_mp.py +2 -3
- simba/plotting/distance_plotter_mp.py +378 -378
- simba/plotting/frame_mergerer_ffmpeg.py +137 -196
- simba/plotting/gantt_creator.py +29 -10
- simba/plotting/gantt_creator_mp.py +96 -33
- simba/plotting/geometry_plotter.py +270 -272
- simba/plotting/heat_mapper_clf_mp.py +4 -6
- simba/plotting/heat_mapper_location_mp.py +2 -2
- simba/plotting/light_dark_box_plotter.py +2 -2
- simba/plotting/path_plotter_mp.py +26 -29
- simba/plotting/plot_clf_results_mp.py +455 -454
- simba/plotting/pose_plotter_mp.py +28 -29
- simba/plotting/probability_plot_creator_mp.py +288 -288
- simba/plotting/roi_plotter_mp.py +31 -31
- simba/plotting/single_run_model_validation_video_mp.py +427 -427
- simba/plotting/spontaneous_alternation_plotter.py +2 -3
- simba/plotting/yolo_pose_track_visualizer.py +32 -27
- simba/plotting/yolo_pose_visualizer.py +35 -36
- simba/plotting/yolo_seg_visualizer.py +2 -3
- simba/pose_importers/simba_blob_importer.py +3 -3
- simba/roi_tools/roi_aggregate_stats_mp.py +5 -4
- simba/roi_tools/roi_clf_calculator_mp.py +4 -4
- simba/sandbox/analyze_runtimes.py +30 -0
- simba/sandbox/cuda/egocentric_rotator.py +374 -0
- simba/sandbox/get_cpu_pool.py +5 -0
- simba/sandbox/proboscis_to_tip.py +28 -0
- simba/sandbox/test_directionality.py +47 -0
- simba/sandbox/test_nonstatic_directionality.py +27 -0
- simba/sandbox/test_pycharm_cuda.py +51 -0
- simba/sandbox/test_simba_install.py +41 -0
- simba/sandbox/test_static_directionality.py +26 -0
- simba/sandbox/test_static_directionality_2d.py +26 -0
- simba/sandbox/verify_env.py +42 -0
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo.py +3 -3
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo_bbox.py +2 -2
- simba/ui/pop_ups/clf_add_remove_print_pop_up.py +37 -30
- simba/ui/pop_ups/clf_plot_pop_up.py +2 -2
- simba/ui/pop_ups/egocentric_alignment_pop_up.py +20 -21
- simba/ui/pop_ups/fsttc_pop_up.py +27 -25
- simba/ui/pop_ups/gantt_pop_up.py +31 -6
- simba/ui/pop_ups/interpolate_pop_up.py +2 -4
- simba/ui/pop_ups/kleinberg_pop_up.py +39 -40
- simba/ui/pop_ups/multiple_videos_to_frames_popup.py +10 -11
- simba/ui/pop_ups/single_video_to_frames_popup.py +10 -10
- simba/ui/pop_ups/video_processing_pop_up.py +186 -174
- simba/ui/tkinter_functions.py +10 -1
- simba/utils/custom_feature_extractor.py +1 -1
- simba/utils/data.py +90 -14
- simba/utils/enums.py +1 -0
- simba/utils/errors.py +441 -440
- simba/utils/lookups.py +1203 -1203
- simba/utils/printing.py +124 -124
- simba/utils/read_write.py +3769 -3721
- simba/utils/yolo.py +10 -1
- simba/video_processors/blob_tracking_executor.py +2 -2
- simba/video_processors/clahe_ui.py +66 -23
- simba/video_processors/egocentric_video_rotator.py +46 -44
- simba/video_processors/multi_cropper.py +1 -1
- simba/video_processors/video_processing.py +5264 -5300
- simba/video_processors/videos_to_frames.py +43 -32
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/METADATA +4 -3
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/RECORD +98 -86
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/top_level.txt +0 -0
|
@@ -1,1598 +1,1648 @@
|
|
|
1
|
-
__author__ = "Simon Nilsson; sronilsson@gmail.com"
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import math
|
|
5
|
-
import multiprocessing as mp
|
|
6
|
-
import os
|
|
7
|
-
import time
|
|
8
|
-
from typing import Optional, Tuple, Union
|
|
9
|
-
|
|
10
|
-
try:
|
|
11
|
-
from typing import Literal
|
|
12
|
-
except:
|
|
13
|
-
from typing_extensions import Literal
|
|
14
|
-
try:
|
|
15
|
-
import cupy as cp
|
|
16
|
-
from cupyx.scipy.ndimage import rotate
|
|
17
|
-
except:
|
|
18
|
-
import numpy as cp
|
|
19
|
-
from scipy.ndimage import rotate
|
|
20
|
-
|
|
21
|
-
import platform
|
|
22
|
-
import warnings
|
|
23
|
-
from copy import deepcopy
|
|
24
|
-
|
|
25
|
-
import cv2
|
|
26
|
-
import numpy as np
|
|
27
|
-
from numba import cuda
|
|
28
|
-
from numba.core.errors import NumbaPerformanceWarning
|
|
29
|
-
|
|
30
|
-
from simba.data_processors.cuda.utils import (_cuda_luminance_pixel_to_grey,
|
|
31
|
-
_cuda_mse, _is_cuda_available)
|
|
32
|
-
from simba.mixins.image_mixin import ImageMixin
|
|
33
|
-
from simba.mixins.plotting_mixin import PlottingMixin
|
|
34
|
-
from simba.utils.checks import (check_file_exist_and_readable, check_float,
|
|
35
|
-
check_if_dir_exists,
|
|
36
|
-
check_if_string_value_is_valid_video_timestamp,
|
|
37
|
-
check_if_valid_img, check_if_valid_rgb_tuple,
|
|
38
|
-
check_instance, check_int,
|
|
39
|
-
check_nvidea_gpu_available,
|
|
40
|
-
check_that_hhmmss_start_is_before_end,
|
|
41
|
-
check_valid_array, check_valid_boolean,
|
|
42
|
-
is_video_color)
|
|
43
|
-
from simba.utils.data import (create_color_palette,
|
|
44
|
-
find_frame_numbers_from_time_stamp)
|
|
45
|
-
from simba.utils.enums import OS, Formats
|
|
46
|
-
from simba.utils.errors import (FFMPEGCodecGPUError, FrameRangeError,
|
|
47
|
-
InvalidInputError, SimBAGPUError)
|
|
48
|
-
from simba.utils.lookups import get_current_time
|
|
49
|
-
from simba.utils.printing import SimbaTimer, stdout_success
|
|
50
|
-
from simba.utils.read_write import (
|
|
51
|
-
check_if_hhmmss_timestamp_is_valid_part_of_video,
|
|
52
|
-
concatenate_videos_in_folder, create_directory, get_fn_ext,
|
|
53
|
-
get_memory_usage_array, get_video_meta_data, read_df, read_img,
|
|
54
|
-
read_img_batch_from_video, read_img_batch_from_video_gpu)
|
|
55
|
-
from simba.video_processors.async_frame_reader import (AsyncVideoFrameReader,
|
|
56
|
-
get_async_frame_batch)
|
|
57
|
-
|
|
58
|
-
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
PHOTOMETRIC = 'photometric'
|
|
62
|
-
DIGITAL = 'digital'
|
|
63
|
-
THREADS_PER_BLOCK = 2024
|
|
64
|
-
if platform.system() != OS.WINDOWS.value: mp.set_start_method("spawn", force=True)
|
|
65
|
-
|
|
66
|
-
def create_average_frm_cupy(video_path: Union[str, os.PathLike],
|
|
67
|
-
start_frm: Optional[int] = None,
|
|
68
|
-
end_frm: Optional[int] = None,
|
|
69
|
-
start_time: Optional[str] = None,
|
|
70
|
-
end_time: Optional[str] = None,
|
|
71
|
-
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
72
|
-
batch_size: Optional[int] = 3000,
|
|
73
|
-
verbose: Optional[bool] = False,
|
|
74
|
-
async_frame_read: bool = False) -> Union[None, np.ndarray]:
|
|
75
|
-
|
|
76
|
-
"""
|
|
77
|
-
Computes the average frame using GPU acceleration from a specified range of frames or time interval in a video file.
|
|
78
|
-
This average frame is typically used for background subtraction.
|
|
79
|
-
|
|
80
|
-
The function reads frames from the video, calculates their average, and optionally saves the result
|
|
81
|
-
to a specified file. If `save_path` is provided, the average frame is saved as an image file;
|
|
82
|
-
otherwise, the average frame is returned as a NumPy array.
|
|
83
|
-
|
|
84
|
-
.. seealso::
|
|
85
|
-
For CPU function see :func:`~simba.video_processors.video_processing.create_average_frm`.
|
|
86
|
-
For CUDA function see :func:`~simba.data_processors.cuda.image.create_average_frm_cuda`
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
.. csv-table::
|
|
90
|
-
:header: EXPECTED RUNTIMES
|
|
91
|
-
:file: ../../../docs/tables/create_average_frm_cupy.csv
|
|
92
|
-
:widths: 10, 45, 45
|
|
93
|
-
:align: center
|
|
94
|
-
:class: simba-table
|
|
95
|
-
:header-rows: 1
|
|
96
|
-
|
|
97
|
-
:param Union[str, os.PathLike] video_path: The path to the video file from which to extract frames.
|
|
98
|
-
:param Optional[int] start_frm: The starting frame number (inclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both. If both `start_frm` and `end_frm` are `None`, processes all frames in the video.
|
|
99
|
-
:param Optional[int] end_frm: The ending frame number (exclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
100
|
-
:param Optional[str] start_time: The start time in the format 'HH:MM:SS' from which to begin extracting frames. Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
101
|
-
:param Optional[str] end_time: The end time in the format 'HH:MM:SS' up to which frames should be extracted. Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
102
|
-
:param Optional[Union[str, os.PathLike]] save_path: The path where the average frame image will be saved. If `None`, the average frame is returned as a NumPy array.
|
|
103
|
-
:param Optional[int] batch_size: The number of frames to process in each batch. Default is 3000. Increase if your RAM allows it.
|
|
104
|
-
:param Optional[bool] verbose: If `True`, prints progress and informational messages during execution. Default: False.
|
|
105
|
-
:param bool async_frame_read: If `True`, uses asynchronous frame reading for improved performance. Default: False.
|
|
106
|
-
:return: Returns `None` if the result is saved to `save_path`. Otherwise, returns the average frame as a NumPy array.
|
|
107
|
-
|
|
108
|
-
:example:
|
|
109
|
-
>>> create_average_frm_cupy(video_path=r"C:/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4_downsampled.mp4", verbose=True, start_frm=0, end_frm=9000)
|
|
110
|
-
>>> create_average_frm_cupy(video_path=r"C:/videos/my_video.mp4", start_time="00:00:00", end_time="00:01:00", async_frame_read=True, save_path=r"C:/output/avg_frame.png")
|
|
111
|
-
|
|
112
|
-
"""
|
|
113
|
-
|
|
114
|
-
def average_3d_stack(image_stack: np.ndarray) -> np.ndarray:
|
|
115
|
-
num_frames, height, width, _ = image_stack.shape
|
|
116
|
-
image_stack = cp.array(image_stack).astype(cp.float32)
|
|
117
|
-
img = cp.clip(cp.sum(image_stack, axis=0) / num_frames, 0, 255).astype(cp.uint8)
|
|
118
|
-
return img.get()
|
|
119
|
-
|
|
120
|
-
if not check_nvidea_gpu_available():
|
|
121
|
-
raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=create_average_frm_cupy.__name__)
|
|
122
|
-
|
|
123
|
-
timer = SimbaTimer(start=True)
|
|
124
|
-
if ((start_frm is not None) or (end_frm is not None)) and ((start_time is not None) or (end_time is not None)):
|
|
125
|
-
raise InvalidInputError(msg=f'Pass start_frm and end_frm OR start_time and end_time', source=create_average_frm_cupy.__name__)
|
|
126
|
-
elif type(start_frm) != type(end_frm):
|
|
127
|
-
raise InvalidInputError(msg=f'Pass start frame and end frame', source=create_average_frm_cupy.__name__)
|
|
128
|
-
elif type(start_time) != type(end_time):
|
|
129
|
-
raise InvalidInputError(msg=f'Pass start time and end time', source=create_average_frm_cupy.__name__)
|
|
130
|
-
if save_path is not None:
|
|
131
|
-
check_if_dir_exists(in_dir=os.path.dirname(save_path), source=create_average_frm_cupy.__name__)
|
|
132
|
-
check_file_exist_and_readable(file_path=video_path)
|
|
133
|
-
video_meta_data = get_video_meta_data(video_path=video_path)
|
|
134
|
-
video_name = get_fn_ext(filepath=video_path)[1]
|
|
135
|
-
if verbose:
|
|
136
|
-
print(f'Getting average frame from {video_name}...')
|
|
137
|
-
if (start_frm is not None) and (end_frm is not None):
|
|
138
|
-
check_int(name='start_frm', value=start_frm, min_value=0, max_value=video_meta_data['frame_count'])
|
|
139
|
-
check_int(name='end_frm', value=end_frm, min_value=0, max_value=video_meta_data['frame_count'])
|
|
140
|
-
if start_frm > end_frm:
|
|
141
|
-
raise InvalidInputError(msg=f'Start frame ({start_frm}) has to be before end frame ({end_frm}).', source=create_average_frm_cupy.__name__)
|
|
142
|
-
frame_ids_lst = list(range(start_frm, end_frm))
|
|
143
|
-
elif (start_time is not None) and (end_time is not None):
|
|
144
|
-
check_if_string_value_is_valid_video_timestamp(value=start_time, name=create_average_frm_cupy.__name__)
|
|
145
|
-
check_if_string_value_is_valid_video_timestamp(value=end_time, name=create_average_frm_cupy.__name__)
|
|
146
|
-
check_that_hhmmss_start_is_before_end(start_time=start_time, end_time=end_time, name=create_average_frm_cupy.__name__)
|
|
147
|
-
check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start_time, video_path=video_path)
|
|
148
|
-
frame_ids_lst = find_frame_numbers_from_time_stamp(start_time=start_time, end_time=end_time, fps=video_meta_data['fps'])
|
|
149
|
-
else:
|
|
150
|
-
frame_ids_lst = list(range(0, video_meta_data['frame_count']))
|
|
151
|
-
frame_ids = [frame_ids_lst[i:i+batch_size] for i in range(0,len(frame_ids_lst),batch_size)]
|
|
152
|
-
avg_imgs = []
|
|
153
|
-
if async_frame_read:
|
|
154
|
-
async_frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, max_que_size=5, start_idx=int(min(frame_ids_lst)), end_idx=int(max(frame_ids_lst))+1, verbose=True, gpu=True)
|
|
155
|
-
async_frm_reader.start()
|
|
156
|
-
else:
|
|
157
|
-
async_frm_reader = None
|
|
158
|
-
for batch_cnt in range(len(frame_ids)):
|
|
159
|
-
start_idx, end_idx = frame_ids[batch_cnt][0], frame_ids[batch_cnt][-1]
|
|
160
|
-
if start_idx == end_idx:
|
|
161
|
-
continue
|
|
162
|
-
if not async_frm_reader:
|
|
163
|
-
imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=start_idx, end_frm=end_idx, verbose=verbose)
|
|
164
|
-
imgs = np.stack(list(imgs.values()), axis=0)
|
|
165
|
-
else:
|
|
166
|
-
imgs = get_async_frame_batch(batch_reader=async_frm_reader, timeout=15)[2]
|
|
167
|
-
avg_imgs.append(average_3d_stack(image_stack=imgs))
|
|
168
|
-
avg_img = average_3d_stack(image_stack=np.stack(avg_imgs, axis=0))
|
|
169
|
-
timer.stop_timer()
|
|
170
|
-
if async_frm_reader is not None: async_frm_reader.kill()
|
|
171
|
-
if save_path is not None:
|
|
172
|
-
cv2.imwrite(save_path, avg_img)
|
|
173
|
-
if verbose:
|
|
174
|
-
stdout_success(msg=f'Saved average frame at {save_path}', source=create_average_frm_cupy.__name__, elapsed_time=timer.elapsed_time_str)
|
|
175
|
-
else:
|
|
176
|
-
if verbose: stdout_success(msg=f'Average frame compute complete', source=create_average_frm_cupy.__name__, elapsed_time=timer.elapsed_time_str)
|
|
177
|
-
return avg_img
|
|
178
|
-
|
|
179
|
-
def average_3d_stack_cupy(image_stack: np.ndarray) -> np.ndarray:
|
|
180
|
-
num_frames, height, width, _ = image_stack.shape
|
|
181
|
-
image_stack = cp.array(image_stack).astype(cp.float32)
|
|
182
|
-
img = cp.clip(cp.sum(image_stack, axis=0) / num_frames, 0, 255).astype(cp.uint8)
|
|
183
|
-
return img.get()
|
|
184
|
-
|
|
185
|
-
@cuda.jit()
|
|
186
|
-
def _average_3d_stack_cuda_kernel(data, results):
|
|
187
|
-
x, y, i = cuda.grid(3)
|
|
188
|
-
if i < 0 or x < 0 or y < 0:
|
|
189
|
-
return
|
|
190
|
-
if i > data.shape[0] - 1 or y > data.shape[1] - 1 or x > data.shape[2] - 1:
|
|
191
|
-
return
|
|
192
|
-
else:
|
|
193
|
-
sum_value = 0.0
|
|
194
|
-
for n in range(data.shape[0]):
|
|
195
|
-
sum_value += data[n, y, x, i]
|
|
196
|
-
results[y, x, i] = sum_value / data.shape[0]
|
|
197
|
-
|
|
198
|
-
def _average_3d_stack_cuda(image_stack: np.ndarray) -> np.ndarray:
|
|
199
|
-
check_instance(source=_average_3d_stack_cuda.__name__, instance=image_stack, accepted_types=(np.ndarray,))
|
|
200
|
-
check_if_valid_img(data=image_stack[0], source=_average_3d_stack_cuda.__name__)
|
|
201
|
-
if image_stack.ndim != 4:
|
|
202
|
-
return image_stack
|
|
203
|
-
x = np.ascontiguousarray(image_stack)
|
|
204
|
-
x_dev = cuda.to_device(x)
|
|
205
|
-
results = cuda.device_array((x.shape[1], x.shape[2], x.shape[3]), dtype=np.float32)
|
|
206
|
-
grid_x = (x.shape[1] + 16 - 1) // 16
|
|
207
|
-
grid_y = (x.shape[2] + 16 - 1) // 16
|
|
208
|
-
grid_z = 3
|
|
209
|
-
threads_per_block = (16, 16, 1)
|
|
210
|
-
blocks_per_grid = (grid_y, grid_x, grid_z)
|
|
211
|
-
_average_3d_stack_cuda_kernel[blocks_per_grid, threads_per_block](x_dev, results)
|
|
212
|
-
results = results.copy_to_host()
|
|
213
|
-
return results
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
For
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
:param
|
|
240
|
-
:param Optional[int]
|
|
241
|
-
:param Optional[
|
|
242
|
-
:param Optional[str]
|
|
243
|
-
:param Optional[str]
|
|
244
|
-
:param Optional[
|
|
245
|
-
:param Optional[
|
|
246
|
-
:
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
check_int(name='
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
check_if_string_value_is_valid_video_timestamp(value=
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
async_frm_reader
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
start_idx
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
..
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
""
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
"""
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
:
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
return
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
@cuda.jit(device=True)
|
|
766
|
-
def
|
|
767
|
-
"""
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
:param int x: Pixel x location.
|
|
771
|
-
:param int y: Pixel y location.
|
|
772
|
-
:param
|
|
773
|
-
:
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
n,
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
"""
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
:
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
if not is_color:
|
|
980
|
-
|
|
981
|
-
else:
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
:
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
:
|
|
1113
|
-
:
|
|
1114
|
-
|
|
1115
|
-
:
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
>>>
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
check_int(name=f'{rotate_img_stack_cupy.__name__} rotation', value=rotation_degrees, min_value=1, max_value=359)
|
|
1125
|
-
check_valid_boolean(source=f'{rotate_img_stack_cupy.__name__} verbose',
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
timer.
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
:
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
for
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
#
|
|
1529
|
-
#
|
|
1530
|
-
#
|
|
1531
|
-
#
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
#
|
|
1536
|
-
#
|
|
1537
|
-
#
|
|
1538
|
-
#
|
|
1539
|
-
#
|
|
1540
|
-
#
|
|
1541
|
-
#
|
|
1542
|
-
#
|
|
1543
|
-
#
|
|
1544
|
-
#
|
|
1545
|
-
#
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
#
|
|
1554
|
-
#
|
|
1555
|
-
#
|
|
1556
|
-
#
|
|
1557
|
-
#
|
|
1558
|
-
#
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
#
|
|
1563
|
-
|
|
1564
|
-
#
|
|
1565
|
-
#
|
|
1566
|
-
#
|
|
1567
|
-
#
|
|
1568
|
-
#
|
|
1569
|
-
|
|
1570
|
-
#
|
|
1571
|
-
|
|
1572
|
-
#
|
|
1573
|
-
|
|
1574
|
-
#
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
#
|
|
1579
|
-
#
|
|
1580
|
-
# SAVE_PATH =
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
#
|
|
1585
|
-
#
|
|
1586
|
-
#
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
#
|
|
1591
|
-
# #
|
|
1592
|
-
#
|
|
1593
|
-
#
|
|
1594
|
-
#
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1
|
+
__author__ = "Simon Nilsson; sronilsson@gmail.com"
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
import math
|
|
5
|
+
import multiprocessing as mp
|
|
6
|
+
import os
|
|
7
|
+
import time
|
|
8
|
+
from typing import Optional, Tuple, Union
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from typing import Literal
|
|
12
|
+
except:
|
|
13
|
+
from typing_extensions import Literal
|
|
14
|
+
try:
|
|
15
|
+
import cupy as cp
|
|
16
|
+
from cupyx.scipy.ndimage import rotate
|
|
17
|
+
except:
|
|
18
|
+
import numpy as cp
|
|
19
|
+
from scipy.ndimage import rotate
|
|
20
|
+
|
|
21
|
+
import platform
|
|
22
|
+
import warnings
|
|
23
|
+
from copy import deepcopy
|
|
24
|
+
|
|
25
|
+
import cv2
|
|
26
|
+
import numpy as np
|
|
27
|
+
from numba import cuda
|
|
28
|
+
from numba.core.errors import NumbaPerformanceWarning
|
|
29
|
+
|
|
30
|
+
from simba.data_processors.cuda.utils import (_cuda_luminance_pixel_to_grey,
|
|
31
|
+
_cuda_mse, _is_cuda_available)
|
|
32
|
+
from simba.mixins.image_mixin import ImageMixin
|
|
33
|
+
from simba.mixins.plotting_mixin import PlottingMixin
|
|
34
|
+
from simba.utils.checks import (check_file_exist_and_readable, check_float,
|
|
35
|
+
check_if_dir_exists,
|
|
36
|
+
check_if_string_value_is_valid_video_timestamp,
|
|
37
|
+
check_if_valid_img, check_if_valid_rgb_tuple,
|
|
38
|
+
check_instance, check_int,
|
|
39
|
+
check_nvidea_gpu_available,
|
|
40
|
+
check_that_hhmmss_start_is_before_end,
|
|
41
|
+
check_valid_array, check_valid_boolean,
|
|
42
|
+
is_video_color)
|
|
43
|
+
from simba.utils.data import (create_color_palette,
|
|
44
|
+
find_frame_numbers_from_time_stamp)
|
|
45
|
+
from simba.utils.enums import OS, Formats
|
|
46
|
+
from simba.utils.errors import (FFMPEGCodecGPUError, FrameRangeError,
|
|
47
|
+
InvalidInputError, SimBAGPUError)
|
|
48
|
+
from simba.utils.lookups import get_current_time
|
|
49
|
+
from simba.utils.printing import SimbaTimer, stdout_success
|
|
50
|
+
from simba.utils.read_write import (
|
|
51
|
+
check_if_hhmmss_timestamp_is_valid_part_of_video,
|
|
52
|
+
concatenate_videos_in_folder, create_directory, get_fn_ext,
|
|
53
|
+
get_memory_usage_array, get_video_meta_data, read_df, read_img,
|
|
54
|
+
read_img_batch_from_video, read_img_batch_from_video_gpu)
|
|
55
|
+
from simba.video_processors.async_frame_reader import (AsyncVideoFrameReader,
|
|
56
|
+
get_async_frame_batch)
|
|
57
|
+
|
|
58
|
+
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
PHOTOMETRIC = 'photometric'
|
|
62
|
+
DIGITAL = 'digital'
|
|
63
|
+
THREADS_PER_BLOCK = 2024
|
|
64
|
+
if platform.system() != OS.WINDOWS.value: mp.set_start_method("spawn", force=True)
|
|
65
|
+
|
|
66
|
+
def create_average_frm_cupy(video_path: Union[str, os.PathLike],
|
|
67
|
+
start_frm: Optional[int] = None,
|
|
68
|
+
end_frm: Optional[int] = None,
|
|
69
|
+
start_time: Optional[str] = None,
|
|
70
|
+
end_time: Optional[str] = None,
|
|
71
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
72
|
+
batch_size: Optional[int] = 3000,
|
|
73
|
+
verbose: Optional[bool] = False,
|
|
74
|
+
async_frame_read: bool = False) -> Union[None, np.ndarray]:
|
|
75
|
+
|
|
76
|
+
"""
|
|
77
|
+
Computes the average frame using GPU acceleration from a specified range of frames or time interval in a video file.
|
|
78
|
+
This average frame is typically used for background subtraction.
|
|
79
|
+
|
|
80
|
+
The function reads frames from the video, calculates their average, and optionally saves the result
|
|
81
|
+
to a specified file. If `save_path` is provided, the average frame is saved as an image file;
|
|
82
|
+
otherwise, the average frame is returned as a NumPy array.
|
|
83
|
+
|
|
84
|
+
.. seealso::
|
|
85
|
+
For CPU function see :func:`~simba.video_processors.video_processing.create_average_frm`.
|
|
86
|
+
For CUDA function see :func:`~simba.data_processors.cuda.image.create_average_frm_cuda`
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
.. csv-table::
|
|
90
|
+
:header: EXPECTED RUNTIMES
|
|
91
|
+
:file: ../../../docs/tables/create_average_frm_cupy.csv
|
|
92
|
+
:widths: 10, 45, 45
|
|
93
|
+
:align: center
|
|
94
|
+
:class: simba-table
|
|
95
|
+
:header-rows: 1
|
|
96
|
+
|
|
97
|
+
:param Union[str, os.PathLike] video_path: The path to the video file from which to extract frames.
|
|
98
|
+
:param Optional[int] start_frm: The starting frame number (inclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both. If both `start_frm` and `end_frm` are `None`, processes all frames in the video.
|
|
99
|
+
:param Optional[int] end_frm: The ending frame number (exclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
100
|
+
:param Optional[str] start_time: The start time in the format 'HH:MM:SS' from which to begin extracting frames. Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
101
|
+
:param Optional[str] end_time: The end time in the format 'HH:MM:SS' up to which frames should be extracted. Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
102
|
+
:param Optional[Union[str, os.PathLike]] save_path: The path where the average frame image will be saved. If `None`, the average frame is returned as a NumPy array.
|
|
103
|
+
:param Optional[int] batch_size: The number of frames to process in each batch. Default is 3000. Increase if your RAM allows it.
|
|
104
|
+
:param Optional[bool] verbose: If `True`, prints progress and informational messages during execution. Default: False.
|
|
105
|
+
:param bool async_frame_read: If `True`, uses asynchronous frame reading for improved performance. Default: False.
|
|
106
|
+
:return: Returns `None` if the result is saved to `save_path`. Otherwise, returns the average frame as a NumPy array.
|
|
107
|
+
|
|
108
|
+
:example:
|
|
109
|
+
>>> create_average_frm_cupy(video_path=r"C:/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4_downsampled.mp4", verbose=True, start_frm=0, end_frm=9000)
|
|
110
|
+
>>> create_average_frm_cupy(video_path=r"C:/videos/my_video.mp4", start_time="00:00:00", end_time="00:01:00", async_frame_read=True, save_path=r"C:/output/avg_frame.png")
|
|
111
|
+
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def average_3d_stack(image_stack: np.ndarray) -> np.ndarray:
|
|
115
|
+
num_frames, height, width, _ = image_stack.shape
|
|
116
|
+
image_stack = cp.array(image_stack).astype(cp.float32)
|
|
117
|
+
img = cp.clip(cp.sum(image_stack, axis=0) / num_frames, 0, 255).astype(cp.uint8)
|
|
118
|
+
return img.get()
|
|
119
|
+
|
|
120
|
+
if not check_nvidea_gpu_available():
|
|
121
|
+
raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=create_average_frm_cupy.__name__)
|
|
122
|
+
|
|
123
|
+
timer = SimbaTimer(start=True)
|
|
124
|
+
if ((start_frm is not None) or (end_frm is not None)) and ((start_time is not None) or (end_time is not None)):
|
|
125
|
+
raise InvalidInputError(msg=f'Pass start_frm and end_frm OR start_time and end_time', source=create_average_frm_cupy.__name__)
|
|
126
|
+
elif type(start_frm) != type(end_frm):
|
|
127
|
+
raise InvalidInputError(msg=f'Pass start frame and end frame', source=create_average_frm_cupy.__name__)
|
|
128
|
+
elif type(start_time) != type(end_time):
|
|
129
|
+
raise InvalidInputError(msg=f'Pass start time and end time', source=create_average_frm_cupy.__name__)
|
|
130
|
+
if save_path is not None:
|
|
131
|
+
check_if_dir_exists(in_dir=os.path.dirname(save_path), source=create_average_frm_cupy.__name__)
|
|
132
|
+
check_file_exist_and_readable(file_path=video_path)
|
|
133
|
+
video_meta_data = get_video_meta_data(video_path=video_path)
|
|
134
|
+
video_name = get_fn_ext(filepath=video_path)[1]
|
|
135
|
+
if verbose:
|
|
136
|
+
print(f'Getting average frame from {video_name}...')
|
|
137
|
+
if (start_frm is not None) and (end_frm is not None):
|
|
138
|
+
check_int(name='start_frm', value=start_frm, min_value=0, max_value=video_meta_data['frame_count'])
|
|
139
|
+
check_int(name='end_frm', value=end_frm, min_value=0, max_value=video_meta_data['frame_count'])
|
|
140
|
+
if start_frm > end_frm:
|
|
141
|
+
raise InvalidInputError(msg=f'Start frame ({start_frm}) has to be before end frame ({end_frm}).', source=create_average_frm_cupy.__name__)
|
|
142
|
+
frame_ids_lst = list(range(start_frm, end_frm))
|
|
143
|
+
elif (start_time is not None) and (end_time is not None):
|
|
144
|
+
check_if_string_value_is_valid_video_timestamp(value=start_time, name=create_average_frm_cupy.__name__)
|
|
145
|
+
check_if_string_value_is_valid_video_timestamp(value=end_time, name=create_average_frm_cupy.__name__)
|
|
146
|
+
check_that_hhmmss_start_is_before_end(start_time=start_time, end_time=end_time, name=create_average_frm_cupy.__name__)
|
|
147
|
+
check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start_time, video_path=video_path)
|
|
148
|
+
frame_ids_lst = find_frame_numbers_from_time_stamp(start_time=start_time, end_time=end_time, fps=video_meta_data['fps'])
|
|
149
|
+
else:
|
|
150
|
+
frame_ids_lst = list(range(0, video_meta_data['frame_count']))
|
|
151
|
+
frame_ids = [frame_ids_lst[i:i+batch_size] for i in range(0,len(frame_ids_lst),batch_size)]
|
|
152
|
+
avg_imgs = []
|
|
153
|
+
if async_frame_read:
|
|
154
|
+
async_frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, max_que_size=5, start_idx=int(min(frame_ids_lst)), end_idx=int(max(frame_ids_lst))+1, verbose=True, gpu=True)
|
|
155
|
+
async_frm_reader.start()
|
|
156
|
+
else:
|
|
157
|
+
async_frm_reader = None
|
|
158
|
+
for batch_cnt in range(len(frame_ids)):
|
|
159
|
+
start_idx, end_idx = frame_ids[batch_cnt][0], frame_ids[batch_cnt][-1]
|
|
160
|
+
if start_idx == end_idx:
|
|
161
|
+
continue
|
|
162
|
+
if not async_frm_reader:
|
|
163
|
+
imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=start_idx, end_frm=end_idx, verbose=verbose)
|
|
164
|
+
imgs = np.stack(list(imgs.values()), axis=0)
|
|
165
|
+
else:
|
|
166
|
+
imgs = get_async_frame_batch(batch_reader=async_frm_reader, timeout=15)[2]
|
|
167
|
+
avg_imgs.append(average_3d_stack(image_stack=imgs))
|
|
168
|
+
avg_img = average_3d_stack(image_stack=np.stack(avg_imgs, axis=0))
|
|
169
|
+
timer.stop_timer()
|
|
170
|
+
if async_frm_reader is not None: async_frm_reader.kill()
|
|
171
|
+
if save_path is not None:
|
|
172
|
+
cv2.imwrite(save_path, avg_img)
|
|
173
|
+
if verbose:
|
|
174
|
+
stdout_success(msg=f'Saved average frame at {save_path}', source=create_average_frm_cupy.__name__, elapsed_time=timer.elapsed_time_str)
|
|
175
|
+
else:
|
|
176
|
+
if verbose: stdout_success(msg=f'Average frame compute complete', source=create_average_frm_cupy.__name__, elapsed_time=timer.elapsed_time_str)
|
|
177
|
+
return avg_img
|
|
178
|
+
|
|
179
|
+
def average_3d_stack_cupy(image_stack: np.ndarray) -> np.ndarray:
|
|
180
|
+
num_frames, height, width, _ = image_stack.shape
|
|
181
|
+
image_stack = cp.array(image_stack).astype(cp.float32)
|
|
182
|
+
img = cp.clip(cp.sum(image_stack, axis=0) / num_frames, 0, 255).astype(cp.uint8)
|
|
183
|
+
return img.get()
|
|
184
|
+
|
|
185
|
+
@cuda.jit()
|
|
186
|
+
def _average_3d_stack_cuda_kernel(data, results):
|
|
187
|
+
x, y, i = cuda.grid(3)
|
|
188
|
+
if i < 0 or x < 0 or y < 0:
|
|
189
|
+
return
|
|
190
|
+
if i > data.shape[0] - 1 or y > data.shape[1] - 1 or x > data.shape[2] - 1:
|
|
191
|
+
return
|
|
192
|
+
else:
|
|
193
|
+
sum_value = 0.0
|
|
194
|
+
for n in range(data.shape[0]):
|
|
195
|
+
sum_value += data[n, y, x, i]
|
|
196
|
+
results[y, x, i] = sum_value / data.shape[0]
|
|
197
|
+
|
|
198
|
+
def _average_3d_stack_cuda(image_stack: np.ndarray) -> np.ndarray:
|
|
199
|
+
check_instance(source=_average_3d_stack_cuda.__name__, instance=image_stack, accepted_types=(np.ndarray,))
|
|
200
|
+
check_if_valid_img(data=image_stack[0], source=_average_3d_stack_cuda.__name__)
|
|
201
|
+
if image_stack.ndim != 4:
|
|
202
|
+
return image_stack
|
|
203
|
+
x = np.ascontiguousarray(image_stack)
|
|
204
|
+
x_dev = cuda.to_device(x)
|
|
205
|
+
results = cuda.device_array((x.shape[1], x.shape[2], x.shape[3]), dtype=np.float32)
|
|
206
|
+
grid_x = (x.shape[1] + 16 - 1) // 16
|
|
207
|
+
grid_y = (x.shape[2] + 16 - 1) // 16
|
|
208
|
+
grid_z = 3
|
|
209
|
+
threads_per_block = (16, 16, 1)
|
|
210
|
+
blocks_per_grid = (grid_y, grid_x, grid_z)
|
|
211
|
+
_average_3d_stack_cuda_kernel[blocks_per_grid, threads_per_block](x_dev, results)
|
|
212
|
+
results = results.copy_to_host()
|
|
213
|
+
return results
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def create_average_frm_cuda(video_path: Union[str, os.PathLike],
|
|
217
|
+
start_frm: Optional[int] = None,
|
|
218
|
+
end_frm: Optional[int] = None,
|
|
219
|
+
start_time: Optional[str] = None,
|
|
220
|
+
end_time: Optional[str] = None,
|
|
221
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
222
|
+
batch_size: Optional[int] = 6000,
|
|
223
|
+
verbose: Optional[bool] = False,
|
|
224
|
+
async_frame_read: bool = False) -> Union[None, np.ndarray]:
|
|
225
|
+
"""
|
|
226
|
+
Computes the average frame using GPU acceleration from a specified range of frames or time interval in a video file.
|
|
227
|
+
This average frame typically used for background substraction.
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
The function reads frames from the video, calculates their average, and optionally saves the result
|
|
231
|
+
to a specified file. If `save_path` is provided, the average frame is saved as an image file;
|
|
232
|
+
otherwise, the average frame is returned as a NumPy array.
|
|
233
|
+
|
|
234
|
+
.. seealso::
|
|
235
|
+
For CuPy function see :func:`~simba.data_processors.cuda.image.create_average_frm_cupy`.
|
|
236
|
+
For CPU function see :func:`~simba.video_processors.video_processing.create_average_frm`.
|
|
237
|
+
|
|
238
|
+
:param Union[str, os.PathLike] video_path: The path to the video file from which to extract frames.
|
|
239
|
+
:param Optional[int] start_frm: The starting frame number (inclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both.
|
|
240
|
+
:param Optional[int] end_frm: The ending frame number (exclusive).
|
|
241
|
+
:param Optional[str] start_time: The start time in the format 'HH:MM:SS' from which to begin extracting frames.
|
|
242
|
+
:param Optional[str] end_time: The end time in the format 'HH:MM:SS' up to which frames should be extracted.
|
|
243
|
+
:param Optional[Union[str, os.PathLike]] save_path: The path where the average frame image will be saved. If `None`, the average frame is returned as a NumPy array.
|
|
244
|
+
:param Optional[int] batch_size: The number of frames to process in each batch. Default is 3000. Increase if your RAM allows it.
|
|
245
|
+
:param Optional[bool] verbose: If `True`, prints progress and informational messages during execution.
|
|
246
|
+
:return: Returns `None` if the result is saved to `save_path`. Otherwise, returns the average frame as a NumPy array.
|
|
247
|
+
|
|
248
|
+
:example:
|
|
249
|
+
>>> create_average_frm_cuda(video_path=r"C:/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4_downsampled.mp4", verbose=True, start_frm=0, end_frm=9000)
|
|
250
|
+
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
if not check_nvidea_gpu_available():
|
|
254
|
+
raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=create_average_frm_cuda.__name__)
|
|
255
|
+
|
|
256
|
+
if ((start_frm is not None) or (end_frm is not None)) and ((start_time is not None) or (end_time is not None)):
|
|
257
|
+
raise InvalidInputError(msg=f'Pass start_frm and end_frm OR start_time and end_time', source=create_average_frm_cuda.__name__)
|
|
258
|
+
elif type(start_frm) != type(end_frm):
|
|
259
|
+
raise InvalidInputError(msg=f'Pass start frame and end frame', source=create_average_frm_cuda.__name__)
|
|
260
|
+
elif type(start_time) != type(end_time):
|
|
261
|
+
raise InvalidInputError(msg=f'Pass start time and end time', source=create_average_frm_cuda.__name__)
|
|
262
|
+
if save_path is not None:
|
|
263
|
+
check_if_dir_exists(in_dir=os.path.dirname(save_path), source=create_average_frm_cuda.__name__)
|
|
264
|
+
check_file_exist_and_readable(file_path=video_path)
|
|
265
|
+
video_meta_data = get_video_meta_data(video_path=video_path)
|
|
266
|
+
video_name = get_fn_ext(filepath=video_path)[1]
|
|
267
|
+
if verbose:
|
|
268
|
+
print(f'Getting average frame from {video_name}...')
|
|
269
|
+
if (start_frm is not None) and (end_frm is not None):
|
|
270
|
+
check_int(name='start_frm', value=start_frm, min_value=0, max_value=video_meta_data['frame_count'])
|
|
271
|
+
check_int(name='end_frm', value=end_frm, min_value=0, max_value=video_meta_data['frame_count'])
|
|
272
|
+
if start_frm > end_frm:
|
|
273
|
+
raise InvalidInputError(msg=f'Start frame ({start_frm}) has to be before end frame ({end_frm}).', source=create_average_frm_cuda.__name__)
|
|
274
|
+
frame_ids_lst = list(range(start_frm, end_frm))
|
|
275
|
+
elif (start_time is not None) and (end_time is not None):
|
|
276
|
+
check_if_string_value_is_valid_video_timestamp(value=start_time, name=create_average_frm_cuda.__name__)
|
|
277
|
+
check_if_string_value_is_valid_video_timestamp(value=end_time, name=create_average_frm_cuda.__name__)
|
|
278
|
+
check_that_hhmmss_start_is_before_end(start_time=start_time, end_time=end_time, name=create_average_frm_cuda.__name__)
|
|
279
|
+
check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start_time, video_path=video_path)
|
|
280
|
+
frame_ids_lst = find_frame_numbers_from_time_stamp(start_time=start_time, end_time=end_time, fps=video_meta_data['fps'])
|
|
281
|
+
else:
|
|
282
|
+
frame_ids_lst = list(range(0, video_meta_data['frame_count']))
|
|
283
|
+
frame_ids = [frame_ids_lst[i:i + batch_size] for i in range(0, len(frame_ids_lst), batch_size)]
|
|
284
|
+
avg_imgs = []
|
|
285
|
+
if async_frame_read:
|
|
286
|
+
async_frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, max_que_size=5, start_idx=int(min(frame_ids_lst)), end_idx=int(max(frame_ids_lst))+1, verbose=True, gpu=True)
|
|
287
|
+
async_frm_reader.start()
|
|
288
|
+
else:
|
|
289
|
+
async_frm_reader = None
|
|
290
|
+
for batch_cnt in range(len(frame_ids)):
|
|
291
|
+
start_idx, end_idx = frame_ids[batch_cnt][0], frame_ids[batch_cnt][-1]
|
|
292
|
+
if start_idx == end_idx:
|
|
293
|
+
continue
|
|
294
|
+
if not async_frm_reader:
|
|
295
|
+
imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=start_idx, end_frm=end_idx, verbose=verbose)
|
|
296
|
+
avg_imgs.append(_average_3d_stack_cuda(image_stack=np.stack(list(imgs.values()), axis=0)))
|
|
297
|
+
else:
|
|
298
|
+
imgs = get_async_frame_batch(batch_reader=async_frm_reader, timeout=15)[2]
|
|
299
|
+
avg_imgs.append(_average_3d_stack_cuda(image_stack=imgs))
|
|
300
|
+
avg_img = average_3d_stack_cupy(image_stack=np.stack(avg_imgs, axis=0))
|
|
301
|
+
if save_path is not None:
|
|
302
|
+
cv2.imwrite(save_path, avg_img)
|
|
303
|
+
if verbose:
|
|
304
|
+
stdout_success(msg=f'Saved average frame at {save_path}', source=create_average_frm_cuda.__name__)
|
|
305
|
+
else:
|
|
306
|
+
return avg_img
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
@cuda.jit()
|
|
311
|
+
def _photometric(data, results):
|
|
312
|
+
y, x, i = cuda.grid(3)
|
|
313
|
+
if i < 0 or x < 0 or y < 0:
|
|
314
|
+
return
|
|
315
|
+
if i > results.shape[0] - 1 or x > results.shape[1] - 1 or y > results.shape[2] - 1:
|
|
316
|
+
return
|
|
317
|
+
else:
|
|
318
|
+
r, g, b = data[i][x][y][0], data[i][x][y][1], data[i][x][y][2]
|
|
319
|
+
results[i][x][y] = (0.2126 * r) + (0.7152 * g) + (0.0722 * b)
|
|
320
|
+
|
|
321
|
+
@cuda.jit()
|
|
322
|
+
def _digital(data, results):
|
|
323
|
+
y, x, i = cuda.grid(3)
|
|
324
|
+
if i < 0 or x < 0 or y < 0:
|
|
325
|
+
return
|
|
326
|
+
if i > results.shape[0] - 1 or x > results.shape[1] - 1 or y > results.shape[2] - 1:
|
|
327
|
+
return
|
|
328
|
+
else:
|
|
329
|
+
r, g, b = data[i][x][y][0], data[i][x][y][1], data[i][x][y][2]
|
|
330
|
+
results[i][x][y] = (0.299 * r) + (0.587 * g) + (0.114 * b)
|
|
331
|
+
|
|
332
|
+
def img_stack_brightness(x: np.ndarray,
|
|
333
|
+
method: Optional[Literal['photometric', 'digital']] = 'digital',
|
|
334
|
+
ignore_black: bool = True,
|
|
335
|
+
verbose: bool = False,
|
|
336
|
+
batch_size: int = 2500) -> np.ndarray:
|
|
337
|
+
"""
|
|
338
|
+
Calculate the average brightness of a stack of images using a specified method.
|
|
339
|
+
|
|
340
|
+
Useful for analyzing light cues or brightness changes over time. For example, compute brightness in images containing a light cue ROI, then perform clustering (e.g., k-means) on brightness values to identify frames when the light cue is on vs off.
|
|
341
|
+
|
|
342
|
+
.. csv-table::
|
|
343
|
+
:header: EXPECTED RUNTIMES
|
|
344
|
+
:file: ../../../docs/tables/img_stack_brightness_gpu.csv
|
|
345
|
+
:widths: 10, 45, 45
|
|
346
|
+
:align: center
|
|
347
|
+
:class: simba-table
|
|
348
|
+
:header-rows: 1
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
- **Photometric Method**: The brightness is calculated using the formula:
|
|
352
|
+
|
|
353
|
+
.. math::
|
|
354
|
+
\text{brightness} = 0.2126 \cdot R + 0.7152 \cdot G + 0.0722 \cdot B
|
|
355
|
+
|
|
356
|
+
- **Digital Method**: The brightness is calculated using the formula:
|
|
357
|
+
|
|
358
|
+
.. math::
|
|
359
|
+
\text{brightness} = 0.299 \cdot R + 0.587 \cdot G + 0.114 \cdot B
|
|
360
|
+
|
|
361
|
+
.. seealso::
|
|
362
|
+
For CPU function see :func:`~simba.mixins.image_mixin.ImageMixin.brightness_intensity`.
|
|
363
|
+
|
|
364
|
+
:param np.ndarray x: A 4D array of images with dimensions (N, H, W, C), where N is the number of images, H and W are the height and width, and C is the number of channels (RGB).
|
|
365
|
+
:param Optional[Literal['photometric', 'digital']] method: The method to use for calculating brightness. It can be 'photometric' for the standard luminance calculation or 'digital' for an alternative set of coefficients. Default is 'digital'.
|
|
366
|
+
:param Optional[bool] ignore_black: If True, black pixels (i.e., pixels with brightness value 0) will be ignored in the calculation of the average brightness. Default is True.
|
|
367
|
+
:return np.ndarray: A 1D array of average brightness values for each image in the stack. If `ignore_black` is True, black pixels are ignored in the averaging process.
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
:example:
|
|
371
|
+
>>> imgs = read_img_batch_from_video_gpu(video_path=r"/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4_downsampled.mp4", start_frm=0, end_frm=5000)
|
|
372
|
+
>>> imgs = np.stack(list(imgs.values()), axis=0)
|
|
373
|
+
>>> x = img_stack_brightness(x=imgs)
|
|
374
|
+
"""
|
|
375
|
+
|
|
376
|
+
check_instance(source=img_stack_brightness.__name__, instance=x, accepted_types=(np.ndarray,))
|
|
377
|
+
check_if_valid_img(data=x[0], source=img_stack_brightness.__name__)
|
|
378
|
+
check_int(name=f'{img_stack_brightness.__name__} batch_size', value=batch_size, allow_zero=False, allow_negative=False, raise_error=True)
|
|
379
|
+
x, timer = np.ascontiguousarray(x).astype(np.uint8), SimbaTimer(start=True)
|
|
380
|
+
results = []
|
|
381
|
+
if x.ndim == 4:
|
|
382
|
+
batch_results_dev = cuda.device_array((batch_size, x.shape[1], x.shape[2]), dtype=np.uint8)
|
|
383
|
+
for batch_cnt, l in enumerate(range(0, x.shape[0], batch_size)):
|
|
384
|
+
r = l + batch_size
|
|
385
|
+
batch_x = x[l:r]
|
|
386
|
+
if batch_x.ndim == 4:
|
|
387
|
+
grid_x = (batch_x.shape[1] + 16 - 1) // 16
|
|
388
|
+
grid_y = (batch_x.shape[2] + 16 - 1) // 16
|
|
389
|
+
grid_z = batch_x.shape[0]
|
|
390
|
+
threads_per_block = (16, 16, 1)
|
|
391
|
+
blocks_per_grid = (grid_y, grid_x, grid_z)
|
|
392
|
+
x_dev = cuda.to_device(batch_x)
|
|
393
|
+
if method == PHOTOMETRIC:
|
|
394
|
+
_photometric[blocks_per_grid, threads_per_block](x_dev, batch_results_dev)
|
|
395
|
+
else:
|
|
396
|
+
_digital[blocks_per_grid, threads_per_block](x_dev, batch_results_dev)
|
|
397
|
+
batch_results_host = batch_results_dev.copy_to_host()[:batch_x.shape[0]]
|
|
398
|
+
batch_results_cp = cp.asarray(batch_results_host)
|
|
399
|
+
if ignore_black:
|
|
400
|
+
mask = batch_results_cp != 0
|
|
401
|
+
batch_results_cp = cp.where(mask, batch_results_cp, cp.nan)
|
|
402
|
+
batch_results = cp.nanmean(batch_results_cp, axis=(1, 2))
|
|
403
|
+
batch_results = cp.where(cp.isnan(batch_results), 0, batch_results)
|
|
404
|
+
batch_results = batch_results.get()
|
|
405
|
+
else:
|
|
406
|
+
batch_results = cp.mean(batch_results_cp, axis=(1, 2)).get()
|
|
407
|
+
else:
|
|
408
|
+
batch_results = deepcopy(x)
|
|
409
|
+
batch_results = np.mean(batch_results, axis=(1, 2))
|
|
410
|
+
results.append(batch_results)
|
|
411
|
+
timer.stop_timer()
|
|
412
|
+
results = np.concatenate(results) if len(results) > 0 else np.array([])
|
|
413
|
+
if verbose: print(f'Brightness computed in {results.shape[0]} images (elapsed time {timer.elapsed_time_str}s)')
|
|
414
|
+
return results
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
@cuda.jit()
|
|
419
|
+
def _grey_mse(data, ref_img, stride, batch_cnt, mse_arr):
|
|
420
|
+
y, x, i = cuda.grid(3)
|
|
421
|
+
stride = stride[0]
|
|
422
|
+
batch_cnt = batch_cnt[0]
|
|
423
|
+
if batch_cnt == 0:
|
|
424
|
+
if (i - stride) < 0 or x < 0 or y < 0:
|
|
425
|
+
return
|
|
426
|
+
else:
|
|
427
|
+
if i < 0 or x < 0 or y < 0:
|
|
428
|
+
return
|
|
429
|
+
if i > mse_arr.shape[0] - 1 or x > mse_arr.shape[1] - 1 or y > mse_arr.shape[2] - 1:
|
|
430
|
+
return
|
|
431
|
+
else:
|
|
432
|
+
img_val = data[i][x][y]
|
|
433
|
+
if i == 0:
|
|
434
|
+
prev_val = ref_img[x][y]
|
|
435
|
+
else:
|
|
436
|
+
img_val = data[i][x][y]
|
|
437
|
+
prev_val = data[i - stride][x][y]
|
|
438
|
+
mse_arr[i][x][y] = (img_val - prev_val) ** 2
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
@cuda.jit()
|
|
442
|
+
def _rgb_mse(data, ref_img, stride, batch_cnt, mse_arr):
|
|
443
|
+
y, x, i = cuda.grid(3)
|
|
444
|
+
stride = stride[0]
|
|
445
|
+
batch_cnt = batch_cnt[0]
|
|
446
|
+
if batch_cnt == 0:
|
|
447
|
+
if (i - stride) < 0 or x < 0 or y < 0:
|
|
448
|
+
return
|
|
449
|
+
else:
|
|
450
|
+
if i < 0 or x < 0 or y < 0:
|
|
451
|
+
return
|
|
452
|
+
if i > mse_arr.shape[0] - 1 or x > mse_arr.shape[1] - 1 or y > mse_arr.shape[2] - 1:
|
|
453
|
+
return
|
|
454
|
+
else:
|
|
455
|
+
img_val = data[i][x][y]
|
|
456
|
+
if i != 0:
|
|
457
|
+
prev_val = data[i - stride][x][y]
|
|
458
|
+
else:
|
|
459
|
+
prev_val = ref_img[x][y]
|
|
460
|
+
r_diff = (img_val[0] - prev_val[0]) ** 2
|
|
461
|
+
g_diff = (img_val[1] - prev_val[1]) ** 2
|
|
462
|
+
b_diff = (img_val[2] - prev_val[2]) ** 2
|
|
463
|
+
mse_arr[i][x][y] = r_diff + g_diff + b_diff
|
|
464
|
+
|
|
465
|
+
def stack_sliding_mse(x: np.ndarray,
|
|
466
|
+
stride: Optional[int] = 1,
|
|
467
|
+
batch_size: Optional[int] = 1000) -> np.ndarray:
|
|
468
|
+
r"""
|
|
469
|
+
Computes the Mean Squared Error (MSE) between each image in a stack and a reference image,
|
|
470
|
+
where the reference image is determined by a sliding window approach with a specified stride.
|
|
471
|
+
The function is optimized for large image stacks by processing them in batches.
|
|
472
|
+
|
|
473
|
+
.. seealso::
|
|
474
|
+
For CPU function see :func:`~simba.mixins.image_mixin.ImageMixin.img_stack_mse` and
|
|
475
|
+
:func:`~simba.mixins.image_mixin.ImageMixin.img_sliding_mse`.
|
|
476
|
+
|
|
477
|
+
.. math::
|
|
478
|
+
|
|
479
|
+
\text{MSE} = \frac{1}{n} \sum_{i=1}^{n} (y_i - \hat{y}_i)^2
|
|
480
|
+
|
|
481
|
+
:param np.ndarray x: Input array of images, where the first dimension corresponds to the stack of images. The array should be either 3D (height, width, channels) or 4D (batch, height, width, channels).
|
|
482
|
+
:param Optional[int] stride: The stride or step size for the sliding window that determines the reference image. Defaults to 1, meaning the previous image in the stack is used as the reference.
|
|
483
|
+
:param Optional[int] batch_size: The number of images to process in a single batch. Larger batch sizes may improve performance but require more GPU memory. Defaults to 1000.
|
|
484
|
+
:return: A 1D NumPy array containing the MSE for each image in the stack compared to its corresponding reference image. The length of the array is equal to the number of images in the input stack.
|
|
485
|
+
:rtype: np.ndarray
|
|
486
|
+
|
|
487
|
+
"""
|
|
488
|
+
|
|
489
|
+
check_instance(source=stack_sliding_mse.__name__, instance=x, accepted_types=(np.ndarray,))
|
|
490
|
+
check_if_valid_img(data=x[0], source=stack_sliding_mse.__name__)
|
|
491
|
+
check_valid_array(data=x, source=stack_sliding_mse.__name__, accepted_ndims=[3, 4])
|
|
492
|
+
stride = np.array([stride], dtype=np.int32)
|
|
493
|
+
stride_dev = cuda.to_device(stride)
|
|
494
|
+
out = np.full((x.shape[0]), fill_value=0.0, dtype=np.float32)
|
|
495
|
+
for batch_cnt, l in enumerate(range(0, x.shape[0], batch_size)):
|
|
496
|
+
r = l + batch_size
|
|
497
|
+
batch_x = x[l:r]
|
|
498
|
+
if batch_cnt != 0:
|
|
499
|
+
if x.ndim == 3:
|
|
500
|
+
ref_img = x[l-stride].astype(np.uint8).reshape(x.shape[1], x.shape[2])
|
|
501
|
+
else:
|
|
502
|
+
ref_img = x[l-stride].astype(np.uint8).reshape(x.shape[1], x.shape[2], 3)
|
|
503
|
+
else:
|
|
504
|
+
ref_img = np.full_like(x[l], dtype=np.uint8, fill_value=0)
|
|
505
|
+
ref_img = ref_img.astype(np.uint8)
|
|
506
|
+
grid_x = (batch_x.shape[1] + 16 - 1) // 16
|
|
507
|
+
grid_y = (batch_x.shape[2] + 16 - 1) // 16
|
|
508
|
+
grid_z = batch_x.shape[0]
|
|
509
|
+
threads_per_block = (16, 16, 1)
|
|
510
|
+
blocks_per_grid = (grid_y, grid_x, grid_z)
|
|
511
|
+
ref_img_dev = cuda.to_device(ref_img)
|
|
512
|
+
x_dev = cuda.to_device(batch_x)
|
|
513
|
+
results = cuda.device_array((batch_x.shape[0], batch_x.shape[1], batch_x.shape[2]), dtype=np.uint8)
|
|
514
|
+
batch_cnt_dev = np.array([batch_cnt], dtype=np.int32)
|
|
515
|
+
if x.ndim == 3:
|
|
516
|
+
_grey_mse[blocks_per_grid, threads_per_block](x_dev, ref_img_dev, stride_dev, batch_cnt_dev, results)
|
|
517
|
+
else:
|
|
518
|
+
_rgb_mse[blocks_per_grid, threads_per_block](x_dev, ref_img_dev, stride_dev, batch_cnt_dev, results)
|
|
519
|
+
results = results.copy_to_host()
|
|
520
|
+
results = np.mean(results, axis=(1, 2))
|
|
521
|
+
out[l:r] = results
|
|
522
|
+
return out
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def img_stack_to_grayscale_cupy(imgs: Union[np.ndarray, cp.ndarray],
|
|
526
|
+
batch_size: Optional[int] = 250) -> np.ndarray:
|
|
527
|
+
"""
|
|
528
|
+
Converts a stack of color images to grayscale using GPU acceleration with CuPy.
|
|
529
|
+
|
|
530
|
+
.. seealso::
|
|
531
|
+
For CPU function single images :func:`~simba.mixins.image_mixin.ImageMixin.img_to_greyscale` and
|
|
532
|
+
:func:`~simba.mixins.image_mixin.ImageMixin.img_stack_to_greyscale` for stack. For CUDA JIT, see
|
|
533
|
+
:func:`~simba.data_processors.cuda.image.img_stack_to_grayscale_cuda`.
|
|
534
|
+
|
|
535
|
+
.. csv-table::
|
|
536
|
+
:header: EXPECTED RUNTIMES
|
|
537
|
+
:file: ../../../docs/tables/img_stack_to_grayscale_cupy.csv
|
|
538
|
+
:widths: 10, 90
|
|
539
|
+
:align: center
|
|
540
|
+
:class: simba-table
|
|
541
|
+
:header-rows: 1
|
|
542
|
+
|
|
543
|
+
:param np.ndarray imgs: A 4D NumPy or CuPy array representing a stack of images with shape (num_images, height, width, channels). The images are expected to have 3 channels (RGB).
|
|
544
|
+
:param Optional[int] batch_size: The number of images to process in each batch. Defaults to 250. Adjust this parameter to fit your GPU's memory capacity.
|
|
545
|
+
:return np.ndarray: m A 3D NumPy or CuPy array of shape (num_images, height, width) containing the grayscale images. If the input array is not 4D, the function returns the input as is.
|
|
546
|
+
|
|
547
|
+
:example:
|
|
548
|
+
>>> imgs = read_img_batch_from_video_gpu(video_path=r"/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_IOT_1_cropped.mp4", verbose=False, start_frm=0, end_frm=i)
|
|
549
|
+
>>> imgs = np.stack(list(imgs.values()), axis=0).astype(np.uint8)
|
|
550
|
+
>>> gray_imgs = img_stack_to_grayscale_cupy(imgs=imgs)
|
|
551
|
+
"""
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
check_instance(source=img_stack_to_grayscale_cupy.__name__, instance=imgs, accepted_types=(np.ndarray, cp.ndarray))
|
|
555
|
+
check_if_valid_img(data=imgs[0], source=img_stack_to_grayscale_cupy.__name__)
|
|
556
|
+
if imgs.ndim != 4:
|
|
557
|
+
return imgs
|
|
558
|
+
results = cp.zeros((imgs.shape[0], imgs.shape[1], imgs.shape[2]), dtype=np.uint8)
|
|
559
|
+
n = int(np.ceil((imgs.shape[0] / batch_size)))
|
|
560
|
+
imgs = np.array_split(imgs, n)
|
|
561
|
+
start = 0
|
|
562
|
+
for i in range(len(imgs)):
|
|
563
|
+
img_batch = cp.array(imgs[i])
|
|
564
|
+
batch_cnt = img_batch.shape[0]
|
|
565
|
+
end = start + batch_cnt
|
|
566
|
+
vals = (0.07 * img_batch[:, :, :, 2] + 0.72 * img_batch[:, :, :, 1] + 0.21 * img_batch[:, :, :, 0])
|
|
567
|
+
results[start:end] = vals.astype(cp.uint8)
|
|
568
|
+
start = end
|
|
569
|
+
if isinstance(imgs, np.ndarray):
|
|
570
|
+
return results.get()
|
|
571
|
+
else:
|
|
572
|
+
return results
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
@cuda.jit()
|
|
577
|
+
def _img_stack_to_grayscale(data, results):
|
|
578
|
+
y, x, i = cuda.grid(3)
|
|
579
|
+
if i < 0 or x < 0 or y < 0:
|
|
580
|
+
return
|
|
581
|
+
if i > results.shape[0] - 1 or x > results.shape[1] - 1 or y > results.shape[2] - 1:
|
|
582
|
+
return
|
|
583
|
+
else:
|
|
584
|
+
b = 0.07 * data[i][x][y][2]
|
|
585
|
+
g = 0.72 * data[i][x][y][1]
|
|
586
|
+
r = 0.21 * data[i][x][y][0]
|
|
587
|
+
val = b + g + r
|
|
588
|
+
results[i][x][y] = val
|
|
589
|
+
|
|
590
|
+
def img_stack_to_grayscale_cuda(x: np.ndarray) -> np.ndarray:
|
|
591
|
+
"""
|
|
592
|
+
Convert image stack to grayscale using CUDA.
|
|
593
|
+
|
|
594
|
+
.. seealso::
|
|
595
|
+
For CPU function single images :func:`~simba.mixins.image_mixin.ImageMixin.img_to_greyscale` and
|
|
596
|
+
:func:`~simba.mixins.image_mixin.ImageMixin.img_stack_to_greyscale` for stack. For CuPy, see
|
|
597
|
+
:func:`~simba.data_processors.cuda.image.img_stack_to_grayscale_cupy`.
|
|
598
|
+
|
|
599
|
+
.. csv-table::
|
|
600
|
+
:header: EXPECTED RUNTIMES
|
|
601
|
+
:file: ../../../docs/tables/img_stack_to_grayscale_cuda.csv
|
|
602
|
+
:widths: 10, 45, 45
|
|
603
|
+
:align: center
|
|
604
|
+
:class: simba-table
|
|
605
|
+
:header-rows: 1
|
|
606
|
+
|
|
607
|
+
:param np.ndarray x: 4d array of color images in numpy format.
|
|
608
|
+
:return np.ndarray: 3D array of greyscaled images.
|
|
609
|
+
|
|
610
|
+
:example:
|
|
611
|
+
>>> imgs = read_img_batch_from_video_gpu(video_path=r"/mnt/c/troubleshooting/mitra/project_folder/videos/temp_2/592_MA147_Gq_Saline_0516_downsampled.mp4", verbose=False, start_frm=0, end_frm=i)
|
|
612
|
+
>>> imgs = np.stack(list(imgs.values()), axis=0).astype(np.uint8)
|
|
613
|
+
>>> grey_images = img_stack_to_grayscale_cuda(x=imgs)
|
|
614
|
+
"""
|
|
615
|
+
check_instance(source=img_stack_to_grayscale_cuda.__name__, instance=x, accepted_types=(np.ndarray,))
|
|
616
|
+
check_if_valid_img(data=x[0], source=img_stack_to_grayscale_cuda.__name__)
|
|
617
|
+
if x.ndim != 4:
|
|
618
|
+
return x
|
|
619
|
+
x = np.ascontiguousarray(x).astype(np.uint8)
|
|
620
|
+
x_dev = cuda.to_device(x)
|
|
621
|
+
results = cuda.device_array((x.shape[0], x.shape[1], x.shape[2]), dtype=np.uint8)
|
|
622
|
+
grid_x = (x.shape[1] + 16 - 1) // 16
|
|
623
|
+
grid_y = (x.shape[2] + 16 - 1) // 16
|
|
624
|
+
grid_z = x.shape[0]
|
|
625
|
+
threads_per_block = (16, 16, 1)
|
|
626
|
+
blocks_per_grid = (grid_y, grid_x, grid_z)
|
|
627
|
+
_img_stack_to_grayscale[blocks_per_grid, threads_per_block](x_dev, results)
|
|
628
|
+
results = results.copy_to_host()
|
|
629
|
+
return results
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
def img_stack_to_bw(imgs: np.ndarray,
|
|
633
|
+
lower_thresh: Optional[int] = 100,
|
|
634
|
+
upper_thresh: Optional[int] = 100,
|
|
635
|
+
invert: Optional[bool] = True,
|
|
636
|
+
batch_size: Optional[int] = 1000) -> np.ndarray:
|
|
637
|
+
"""
|
|
638
|
+
|
|
639
|
+
Converts a stack of RGB images to binary (black and white) images based on given threshold values using GPU acceleration.
|
|
640
|
+
|
|
641
|
+
This function processes a 4D stack of images, converting each RGB image to a binary image using
|
|
642
|
+
specified lower and upper threshold values. The conversion can be inverted if desired, and the
|
|
643
|
+
processing is done in batches for efficiency.
|
|
644
|
+
|
|
645
|
+
.. csv-table::
|
|
646
|
+
:header: EXPECTED RUNTIMES
|
|
647
|
+
:file: ../../../docs/tables/img_stack_to_bw.csv
|
|
648
|
+
:widths: 10, 90
|
|
649
|
+
:align: center
|
|
650
|
+
:header-rows: 1
|
|
651
|
+
|
|
652
|
+
.. seealso::
|
|
653
|
+
:func:`simba.mixins.image_mixin.ImageMixin.img_to_bw`
|
|
654
|
+
:func:`simba.mixins.image_mixin.ImageMixin.img_stack_to_bw`
|
|
655
|
+
|
|
656
|
+
:param np.ndarray imgs: A 4D NumPy array representing a stack of RGB images, with shape (N, H, W, C).
|
|
657
|
+
:param Optional[int] lower_thresh: The lower threshold value. Pixel values below this threshold are set to 0 (or 1 if `invert` is True). Default is 100.
|
|
658
|
+
:param Optional[int] upper_thresh: The upper threshold value. Pixel values above this threshold are set to 1 (or 0 if `invert` is True). Default is 100.
|
|
659
|
+
:param Optional[bool] invert: If True, the binary conversion is inverted, meaning that values below `lower_thresh` become 1, and values above `upper_thresh` become 0. Default is True.
|
|
660
|
+
:param Optional[int] batch_size: The number of images to process in a single batch. This helps manage memory usage for large stacks of images. Default is 1000.
|
|
661
|
+
:return: A 3D NumPy array of shape (N, H, W), where each image has been converted to a binary format with pixel values of either 0 or 1.
|
|
662
|
+
:rtype: np.ndarray
|
|
663
|
+
"""
|
|
664
|
+
|
|
665
|
+
check_valid_array(data=imgs, source=img_stack_to_bw.__name__, accepted_ndims=(4,))
|
|
666
|
+
check_int(name='lower_thresh', value=lower_thresh, max_value=255, min_value=0)
|
|
667
|
+
check_int(name='upper_thresh', value=upper_thresh, max_value=255, min_value=0)
|
|
668
|
+
check_int(name='batch_size', value=batch_size, min_value=1)
|
|
669
|
+
results = cp.full((imgs.shape[0], imgs.shape[1], imgs.shape[2]), fill_value=cp.nan, dtype=cp.uint8)
|
|
670
|
+
|
|
671
|
+
for l in range(0, imgs.shape[0], batch_size):
|
|
672
|
+
r = l + batch_size
|
|
673
|
+
batch_imgs = cp.array(imgs[l:r]).astype(cp.uint8)
|
|
674
|
+
img_mean = cp.sum(batch_imgs, axis=3) / 3
|
|
675
|
+
if not invert:
|
|
676
|
+
batch_imgs = cp.where(img_mean < lower_thresh, 0, img_mean)
|
|
677
|
+
batch_imgs = cp.where(batch_imgs > upper_thresh, 1, batch_imgs).astype(cp.uint8)
|
|
678
|
+
else:
|
|
679
|
+
batch_imgs = cp.where(img_mean < lower_thresh, 1, img_mean)
|
|
680
|
+
batch_imgs = cp.where(batch_imgs > upper_thresh, 0, batch_imgs).astype(cp.uint8)
|
|
681
|
+
|
|
682
|
+
results[l:r] = batch_imgs
|
|
683
|
+
|
|
684
|
+
return results.get()
|
|
685
|
+
|
|
686
|
+
def segment_img_stack_vertical(imgs: np.ndarray,
|
|
687
|
+
pct: float,
|
|
688
|
+
left: bool,
|
|
689
|
+
right: bool) -> np.ndarray:
|
|
690
|
+
"""
|
|
691
|
+
Segment a stack of images vertically based on a given percentage using GPU acceleration. For example, return the left half, right half, or senter half of each image in the stack.
|
|
692
|
+
|
|
693
|
+
.. note::
|
|
694
|
+
If both left and right are true, the center portion is returned.
|
|
695
|
+
|
|
696
|
+
.. seealso::
|
|
697
|
+
:func:`simba.mixins.image_mixin.ImageMixin.segment_img_vertical`
|
|
698
|
+
|
|
699
|
+
:param np.ndarray imgs: A 3D or 4D NumPy array representing a stack of images. The array should have shape (N, H, W) for grayscale images or (N, H, W, C) for color images.
|
|
700
|
+
:param float pct: The percentage of the image width to be used for segmentation. This value should be between a small positive value (e.g., 10e-6) and 0.99.
|
|
701
|
+
:param bool left: If True, the left side of the image stack will be segmented.
|
|
702
|
+
:param bool right: If True, the right side of the image stack will be segmented.
|
|
703
|
+
:return: A NumPy array containing the segmented images, with the same number of dimensions as the input.
|
|
704
|
+
:rtype: np.ndarray
|
|
705
|
+
"""
|
|
706
|
+
|
|
707
|
+
check_valid_boolean(value=[left, right], source=segment_img_stack_vertical.__name__)
|
|
708
|
+
check_float(name=f'{segment_img_stack_vertical.__name__} pct', value=pct, min_value=10e-6, max_value=0.99)
|
|
709
|
+
check_valid_array(data=imgs, source=f'{segment_img_stack_vertical.__name__} imgs', accepted_ndims=(3, 4,), accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
710
|
+
if not left and not right:
|
|
711
|
+
raise InvalidInputError(msg='left are right argument are both False. Set one or both to True.', source=segment_img_stack_vertical.__name__)
|
|
712
|
+
imgs = cp.array(imgs).astype(cp.uint8)
|
|
713
|
+
h, w = imgs[0].shape[0], imgs[0].shape[1]
|
|
714
|
+
px_crop = int(w * pct)
|
|
715
|
+
if left and not right:
|
|
716
|
+
imgs = imgs[:, :, :px_crop]
|
|
717
|
+
elif right and not left:
|
|
718
|
+
imgs = imgs[:, :, imgs.shape[2] - px_crop:]
|
|
719
|
+
else:
|
|
720
|
+
imgs = imgs[:, :, int(px_crop/2):int(imgs.shape[2] - (px_crop/2))]
|
|
721
|
+
return imgs.get()
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
def segment_img_stack_horizontal(imgs: np.ndarray,
|
|
725
|
+
pct: float,
|
|
726
|
+
upper: Optional[bool] = False,
|
|
727
|
+
lower: Optional[bool] = False) -> np.ndarray:
|
|
728
|
+
|
|
729
|
+
"""
|
|
730
|
+
Segment a stack of images horizontally based on a given percentage using GPU acceleration. For example, return the top half, bottom half, or center half of each image in the stack.
|
|
731
|
+
|
|
732
|
+
.. note::
|
|
733
|
+
If both top and bottom are true, the center portion is returned.
|
|
734
|
+
|
|
735
|
+
.. seealso::
|
|
736
|
+
:func:`simba.mixins.image_mixin.ImageMixin.segment_img_stack_horizontal`
|
|
737
|
+
|
|
738
|
+
:param np.ndarray imgs: A 3D or 4D NumPy array representing a stack of images. The array should have shape (N, H, W) for grayscale images or (N, H, W, C) for color images.
|
|
739
|
+
:param float pct: The percentage of the image width to be used for segmentation. This value should be between a small positive value (e.g., 10e-6) and 0.99.
|
|
740
|
+
:param bool upper: If True, the top part of the image stack will be segmented.
|
|
741
|
+
:param bool lower: If True, the bottom part of the image stack will be segmented.
|
|
742
|
+
:return: A NumPy array containing the segmented images, with the same number of dimensions as the input.
|
|
743
|
+
:rtype: np.ndarray
|
|
744
|
+
"""
|
|
745
|
+
|
|
746
|
+
check_valid_boolean(value=[upper, lower], source=segment_img_stack_horizontal.__name__)
|
|
747
|
+
check_float(name=f'{segment_img_stack_horizontal.__name__} pct', value=pct, min_value=10e-6, max_value=0.99)
|
|
748
|
+
check_valid_array(data=imgs, source=f'{segment_img_stack_vertical.__name__} imgs', accepted_ndims=(3, 4,), accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
749
|
+
if not upper and not lower:
|
|
750
|
+
raise InvalidInputError(msg='upper and lower argument are both False. Set one or both to True.', source=segment_img_stack_horizontal.__name__)
|
|
751
|
+
imgs = cp.array(imgs).astype(cp.uint8)
|
|
752
|
+
h, w = imgs[0].shape[0], imgs[0].shape[1]
|
|
753
|
+
px_crop = int(h * pct)
|
|
754
|
+
if upper and not lower:
|
|
755
|
+
imgs = imgs[: , :px_crop, :]
|
|
756
|
+
elif not upper and lower:
|
|
757
|
+
imgs = imgs[:, imgs.shape[0] - px_crop :, :]
|
|
758
|
+
else:
|
|
759
|
+
imgs = imgs[:, int(px_crop/2):int((imgs.shape[0] - px_crop) / 2), :]
|
|
760
|
+
|
|
761
|
+
return imgs.get()
|
|
762
|
+
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
@cuda.jit(device=True)
|
|
766
|
+
def _cuda_is_inside_polygon(x, y, polygon_vertices):
|
|
767
|
+
"""
|
|
768
|
+
Checks if the pixel location is inside the polygon.
|
|
769
|
+
|
|
770
|
+
:param int x: Pixel x location.
|
|
771
|
+
:param int y: Pixel y location.
|
|
772
|
+
:param np.ndarray polygon_vertices: 2-dimensional array representing the x and y coordinates of the polygon vertices.
|
|
773
|
+
:return: Boolean representing if the x and y are located in the polygon.
|
|
774
|
+
"""
|
|
775
|
+
|
|
776
|
+
n = len(polygon_vertices)
|
|
777
|
+
p2x, p2y, xints, inside = 0.0, 0.0, 0.0, False
|
|
778
|
+
p1x, p1y = polygon_vertices[0]
|
|
779
|
+
for j in range(n + 1):
|
|
780
|
+
p2x, p2y = polygon_vertices[j % n]
|
|
781
|
+
if ((y > min(p1y, p2y)) and (y <= max(p1y, p2y)) and (x <= max(p1x, p2x))):
|
|
782
|
+
if p1y != p2y:
|
|
783
|
+
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
|
|
784
|
+
if p1x == p2x or x <= xints:
|
|
785
|
+
inside = not inside
|
|
786
|
+
p1x, p1y = p2x, p2y
|
|
787
|
+
return inside
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
|
|
791
|
+
@cuda.jit(device=True)
|
|
792
|
+
def _cuda_is_inside_circle(x, y, circle_x, circle_y, circle_r):
|
|
793
|
+
"""
|
|
794
|
+
Device func to check if the pixel location is inside a circle.
|
|
795
|
+
|
|
796
|
+
:param int x: Pixel x location.
|
|
797
|
+
:param int y: Pixel y location.
|
|
798
|
+
:param int circle_x: Center of circle x coordinate.
|
|
799
|
+
:param int circle_y: Center of circle y coordinate.
|
|
800
|
+
:param int y: Circle radius.
|
|
801
|
+
:return: Boolean representing if the x and y are located in the circle.
|
|
802
|
+
"""
|
|
803
|
+
|
|
804
|
+
p = (math.sqrt((x - circle_x) ** 2 + (y - circle_y) ** 2))
|
|
805
|
+
if p <= circle_r:
|
|
806
|
+
return True
|
|
807
|
+
else:
|
|
808
|
+
return False
|
|
809
|
+
@cuda.jit()
|
|
810
|
+
def _cuda_create_rectangle_masks(shapes, imgs, results, bboxes):
|
|
811
|
+
"""
|
|
812
|
+
CUDA kernel to apply rectangular masks to a batch of images.
|
|
813
|
+
"""
|
|
814
|
+
n, y, x = cuda.grid(3)
|
|
815
|
+
if n >= imgs.shape[0]:
|
|
816
|
+
return
|
|
817
|
+
|
|
818
|
+
x_min = bboxes[n, 0]
|
|
819
|
+
y_min = bboxes[n, 1]
|
|
820
|
+
x_max = bboxes[n, 2]
|
|
821
|
+
y_max = bboxes[n, 3]
|
|
822
|
+
|
|
823
|
+
max_w = x_max - x_min
|
|
824
|
+
max_h = y_max - y_min
|
|
825
|
+
|
|
826
|
+
if x >= max_w or y >= max_h:
|
|
827
|
+
return
|
|
828
|
+
|
|
829
|
+
x_input = x + x_min
|
|
830
|
+
y_input = y + y_min
|
|
831
|
+
|
|
832
|
+
polygon = shapes[n]
|
|
833
|
+
|
|
834
|
+
if _cuda_is_inside_polygon(x_input, y_input, polygon):
|
|
835
|
+
if imgs.ndim == 4:
|
|
836
|
+
for c in range(imgs.shape[3]):
|
|
837
|
+
results[n, y, x, c] = imgs[n, y_input, x_input, c]
|
|
838
|
+
else:
|
|
839
|
+
results[n, y, x] = imgs[n, y_input, x_input]
|
|
840
|
+
|
|
841
|
+
@cuda.jit()
|
|
842
|
+
def _cuda_create_circle_masks(shapes, imgs, results, bboxes):
|
|
843
|
+
"""
|
|
844
|
+
CUDA kernel to apply circular masks to a batch of images.
|
|
845
|
+
"""
|
|
846
|
+
n, y, x = cuda.grid(3)
|
|
847
|
+
if n >= imgs.shape[0]:
|
|
848
|
+
return
|
|
849
|
+
|
|
850
|
+
x_min = bboxes[n, 0]
|
|
851
|
+
y_min = bboxes[n, 1]
|
|
852
|
+
x_max = bboxes[n, 2]
|
|
853
|
+
y_max = bboxes[n, 3]
|
|
854
|
+
|
|
855
|
+
max_w = x_max - x_min
|
|
856
|
+
max_h = y_max - y_min
|
|
857
|
+
|
|
858
|
+
if x >= max_w or y >= max_h:
|
|
859
|
+
return
|
|
860
|
+
|
|
861
|
+
x_input = x + x_min
|
|
862
|
+
y_input = y + y_min
|
|
863
|
+
|
|
864
|
+
circle_x = shapes[n, 0]
|
|
865
|
+
circle_y = shapes[n, 1]
|
|
866
|
+
circle_r = shapes[n, 2]
|
|
867
|
+
|
|
868
|
+
|
|
869
|
+
if _cuda_is_inside_circle(x_input, y_input, circle_x, circle_y, circle_r):
|
|
870
|
+
if imgs.ndim == 4:
|
|
871
|
+
for c in range(imgs.shape[3]):
|
|
872
|
+
results[n, y, x, c] = imgs[n, y_input, x_input, c]
|
|
873
|
+
else:
|
|
874
|
+
results[n, y, x] = imgs[n, y_input, x_input]
|
|
875
|
+
|
|
876
|
+
|
|
877
|
+
def _get_bboxes(shapes):
|
|
878
|
+
"""
|
|
879
|
+
Helper to get geometries in :func:`simba.data_processors.cuda.image.slice_imgs`.
|
|
880
|
+
"""
|
|
881
|
+
bboxes = []
|
|
882
|
+
for shape in shapes:
|
|
883
|
+
if shape.shape[0] == 3: # circle: [cx, cy, r]
|
|
884
|
+
cx, cy, r = shape
|
|
885
|
+
x_min = int(np.floor(cx - r))
|
|
886
|
+
y_min = int(np.floor(cy - r))
|
|
887
|
+
x_max = int(np.ceil(cx + r))
|
|
888
|
+
y_max = int(np.ceil(cy + r))
|
|
889
|
+
else:
|
|
890
|
+
xs = shape[:, 0]
|
|
891
|
+
ys = shape[:, 1]
|
|
892
|
+
x_min = int(np.floor(xs.min()))
|
|
893
|
+
y_min = int(np.floor(ys.min()))
|
|
894
|
+
x_max = int(np.ceil(xs.max()))
|
|
895
|
+
y_max = int(np.ceil(ys.max()))
|
|
896
|
+
bboxes.append([x_min, y_min, x_max, y_max])
|
|
897
|
+
return np.array(bboxes, dtype=np.int32)
|
|
898
|
+
|
|
899
|
+
def slice_imgs(video_path: Union[str, os.PathLike],
|
|
900
|
+
shapes: np.ndarray,
|
|
901
|
+
batch_size: int = 1000,
|
|
902
|
+
verbose: bool = True,
|
|
903
|
+
save_dir: Optional[Union[str, os.PathLike]] = None):
|
|
904
|
+
"""
|
|
905
|
+
Slice frames from a video based on given polygon or circle coordinates, and return or save masked/cropped frame regions using GPU acceleration.
|
|
906
|
+
|
|
907
|
+
This function supports two types of shapes:
|
|
908
|
+
- Polygon: array of shape (N, M, 2), where N = number of frames, M = number of polygon vertices.
|
|
909
|
+
- Circle: array of shape (N, 3), where each row represents [center_x, center_y, radius].
|
|
910
|
+
|
|
911
|
+
:param Union[str, os.PathLike] video_path: Path to the input video file.
|
|
912
|
+
:param np.ndarray shapes: Array of polygon coordinates or circle parameters for each frame. - Polygon: shape = (n_frames, n_vertices, 2) - Circle: shape = (n_frames, 3)
|
|
913
|
+
:param int batch_size: Number of frames to process per batch during GPU processing. Default 1000.
|
|
914
|
+
:param bool verbose: Whether to print progress and status messages. Default True.
|
|
915
|
+
:param Optional[Union[str, os.PathLike]] save_dir: If provided, the masked/cropped video will be saved in this directory. Otherwise, the cropped image stack will be returned.
|
|
916
|
+
|
|
917
|
+
.. video:: _static/img/simba.sandbox.cuda_slice_w_crop.slice_imgs.webm
|
|
918
|
+
:width: 900
|
|
919
|
+
:loop:
|
|
920
|
+
|
|
921
|
+
.. video:: _static/img/slice_imgs_gpu.webm
|
|
922
|
+
:width: 800
|
|
923
|
+
:autoplay:
|
|
924
|
+
:loop:
|
|
925
|
+
|
|
926
|
+
.. csv-table::
|
|
927
|
+
:header: EXPECTED RUNTIMES
|
|
928
|
+
:file: ../../../docs/tables/slice_imgs.csv
|
|
929
|
+
:widths: 10, 90
|
|
930
|
+
:align: center
|
|
931
|
+
:class: simba-table
|
|
932
|
+
:header-rows: 1
|
|
933
|
+
|
|
934
|
+
.. note::
|
|
935
|
+
For CPU multicore implementation, see :func:`simba.mixins.image_mixin.ImageMixin.slice_shapes_in_imgs`.
|
|
936
|
+
For single core process, see :func:`simba.mixins.image_mixin.ImageMixin.slice_shapes_in_img`
|
|
937
|
+
|
|
938
|
+
:example I:
|
|
939
|
+
Example 1: Mask video using circular regions derived from body part center positions
|
|
940
|
+
>>> video_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/03152021_NOB_IOT_8.mp4"
|
|
941
|
+
>>> data_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/csv/outlier_corrected_movement_location/03152021_NOB_IOT_8.csv"
|
|
942
|
+
>>> save_dir = '/mnt/d/netholabs/yolo_videos/input/mp4_20250606083508'
|
|
943
|
+
>>> nose_arr = read_df(file_path=data_path, file_type='csv', usecols=['Nose_x', 'Nose_y']).values.reshape(-1, 2).astype(np.int32)
|
|
944
|
+
>>> polygons = GeometryMixin().multiframe_bodyparts_to_circle(data=nose_arr, parallel_offset=60)
|
|
945
|
+
>>> polygon_lst = []
|
|
946
|
+
>>> center = GeometryMixin.get_center(polygons)
|
|
947
|
+
>>> polygons = np.hstack([center, np.full(shape=(len(center), 1), fill_value=60)])
|
|
948
|
+
>>> slice_imgs(video_path=video_path, shapes=polygons, batch_size=500, save_dir=save_dir)
|
|
949
|
+
|
|
950
|
+
:example II:
|
|
951
|
+
Example 2: Mask video using minimum rotated rectangles from polygon hulls
|
|
952
|
+
>>> video_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/03152021_NOB_IOT_8.mp4"
|
|
953
|
+
>>> data_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/csv/outlier_corrected_movement_location/03152021_NOB_IOT_8.csv"
|
|
954
|
+
>>> save_dir = '/mnt/d/netholabs/yolo_videos/input/mp4_20250606083508'
|
|
955
|
+
>>> nose_arr = read_df(file_path=data_path, file_type='csv', usecols=['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y', 'Lat_left_x', 'Lat_left_y', 'Lat_right_x', 'Lat_right_y']).values.reshape(-1, 4, 2).astype(np.int32) ## READ THE BODY-PART THAT DEFINES THE HULL AND CONVERT TO ARRAY
|
|
956
|
+
>>> polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=nose_arr, parallel_offset=60)
|
|
957
|
+
>>> polygons = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=polygons)
|
|
958
|
+
>>> polygon_lst = []
|
|
959
|
+
>>> for i in polygons:
|
|
960
|
+
>>> polygon_lst.append(np.array(i.exterior.coords).astype(np.int32))
|
|
961
|
+
>>> polygons = np.stack(polygon_lst, axis=0)
|
|
962
|
+
>>> sliced_imgs = slice_imgs(video_path=video_path, shapes=polygons, batch_size=500, save_dir=save_dir)
|
|
963
|
+
"""
|
|
964
|
+
|
|
965
|
+
THREADS_PER_BLOCK = (16, 8, 8)
|
|
966
|
+
video_meta_data = get_video_meta_data(video_path=video_path, fps_as_int=False)
|
|
967
|
+
video_meta_data['frame_count'] = shapes.shape[0]
|
|
968
|
+
n, w, h = video_meta_data['frame_count'], video_meta_data['width'], video_meta_data['height']
|
|
969
|
+
is_color = ImageMixin.is_video_color(video=video_path)
|
|
970
|
+
timer, save_temp_dir, results, video_out_path = SimbaTimer(start=True), None, None, None
|
|
971
|
+
bboxes = _get_bboxes(shapes)
|
|
972
|
+
crop_heights = bboxes[:, 3] - bboxes[:, 1]
|
|
973
|
+
crop_widths = bboxes[:, 2] - bboxes[:, 0]
|
|
974
|
+
|
|
975
|
+
max_h = int(np.max(crop_heights))
|
|
976
|
+
max_w = int(np.max(crop_widths))
|
|
977
|
+
|
|
978
|
+
if save_dir is None:
|
|
979
|
+
if not is_color:
|
|
980
|
+
results = np.zeros((n, max_h, max_w), dtype=np.uint8)
|
|
981
|
+
else:
|
|
982
|
+
results = np.zeros((n, max_h, max_w, 3), dtype=np.uint8)
|
|
983
|
+
else:
|
|
984
|
+
save_temp_dir = os.path.join(save_dir, f'temp_{video_meta_data["video_name"]}')
|
|
985
|
+
create_directory(paths=save_temp_dir, overwrite=True)
|
|
986
|
+
video_out_path = os.path.join(save_dir, f'{video_meta_data["video_name"]}.mp4')
|
|
987
|
+
|
|
988
|
+
frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, verbose=True, max_que_size=2)
|
|
989
|
+
frm_reader.start()
|
|
990
|
+
|
|
991
|
+
for batch_cnt in range(frm_reader.batch_cnt):
|
|
992
|
+
start_img_idx, end_img_idx, batch_imgs = get_async_frame_batch(batch_reader=frm_reader, timeout=10)
|
|
993
|
+
if verbose:
|
|
994
|
+
print(f'Processing images {start_img_idx} - {end_img_idx} (of {n}; batch count: {batch_cnt+1}/{frm_reader.batch_cnt})...')
|
|
995
|
+
|
|
996
|
+
batch_save_path = os.path.join(save_temp_dir, f'{batch_cnt}.mp4') if save_dir is not None else None
|
|
997
|
+
|
|
998
|
+
batch_shapes = shapes[start_img_idx:end_img_idx].astype(np.int32)
|
|
999
|
+
batch_bboxes = bboxes[start_img_idx:end_img_idx]
|
|
1000
|
+
|
|
1001
|
+
x_dev = cuda.to_device(batch_shapes)
|
|
1002
|
+
bboxes_dev = cuda.to_device(batch_bboxes)
|
|
1003
|
+
batch_img_dev = cuda.to_device(batch_imgs)
|
|
1004
|
+
|
|
1005
|
+
if not is_color:
|
|
1006
|
+
batch_results = np.zeros((batch_imgs.shape[0], max_h, max_w), dtype=np.uint8)
|
|
1007
|
+
else:
|
|
1008
|
+
batch_results = np.zeros((batch_imgs.shape[0], max_h, max_w, 3), dtype=np.uint8)
|
|
1009
|
+
batch_results_dev = cuda.to_device(batch_results)
|
|
1010
|
+
grid_n = math.ceil(batch_imgs.shape[0] / THREADS_PER_BLOCK[0])
|
|
1011
|
+
grid_y = math.ceil(max_h / THREADS_PER_BLOCK[1])
|
|
1012
|
+
grid_x = math.ceil(max_w / THREADS_PER_BLOCK[2])
|
|
1013
|
+
bpg = (grid_n, grid_y, grid_x)
|
|
1014
|
+
if batch_shapes.shape[1] == 3:
|
|
1015
|
+
_cuda_create_circle_masks[bpg, THREADS_PER_BLOCK](x_dev, batch_img_dev, batch_results_dev, bboxes_dev)
|
|
1016
|
+
else:
|
|
1017
|
+
_cuda_create_rectangle_masks[bpg, THREADS_PER_BLOCK](x_dev, batch_img_dev, batch_results_dev, bboxes_dev)
|
|
1018
|
+
if save_dir is None:
|
|
1019
|
+
results[start_img_idx:end_img_idx] = batch_results_dev.copy_to_host()
|
|
1020
|
+
else:
|
|
1021
|
+
frame_results = batch_results_dev.copy_to_host()
|
|
1022
|
+
results = {k: v for k, v in enumerate(frame_results)}
|
|
1023
|
+
ImageMixin().img_stack_to_video(imgs=results, fps=video_meta_data['fps'], save_path=batch_save_path, verbose=False)
|
|
1024
|
+
|
|
1025
|
+
frm_reader.kill()
|
|
1026
|
+
timer.stop_timer()
|
|
1027
|
+
|
|
1028
|
+
if save_dir:
|
|
1029
|
+
concatenate_videos_in_folder(in_folder=save_temp_dir, save_path=video_out_path, remove_splits=True, gpu=True)
|
|
1030
|
+
if verbose:
|
|
1031
|
+
stdout_success(msg=f'Shapes sliced in video saved at {video_out_path}.', elapsed_time=timer.elapsed_time_str)
|
|
1032
|
+
return None
|
|
1033
|
+
else:
|
|
1034
|
+
if verbose:
|
|
1035
|
+
stdout_success(msg='Shapes sliced in video.', elapsed_time=timer.elapsed_time_str)
|
|
1036
|
+
return results
|
|
1037
|
+
|
|
1038
|
+
|
|
1039
|
+
@cuda.jit()
|
|
1040
|
+
def _sliding_psnr(data, stride, results):
|
|
1041
|
+
r = cuda.grid(1)
|
|
1042
|
+
l = int(r - stride[0])
|
|
1043
|
+
if (r < 0) or (r > data.shape[0] -1):
|
|
1044
|
+
return
|
|
1045
|
+
if l < 0:
|
|
1046
|
+
return
|
|
1047
|
+
else:
|
|
1048
|
+
img_1, img_2 = data[r], data[l]
|
|
1049
|
+
mse = _cuda_mse(img_1, img_2)
|
|
1050
|
+
if mse == 0:
|
|
1051
|
+
results[r] = 0.0
|
|
1052
|
+
else:
|
|
1053
|
+
results[r] = 20 * math.log10(255 / math.sqrt(mse))
|
|
1054
|
+
|
|
1055
|
+
def sliding_psnr(data: np.ndarray,
|
|
1056
|
+
stride_s: int,
|
|
1057
|
+
sample_rate: float) -> np.ndarray:
|
|
1058
|
+
r"""
|
|
1059
|
+
Computes the Peak Signal-to-Noise Ratio (PSNR) between pairs of images in a stack using a sliding window approach.
|
|
1060
|
+
|
|
1061
|
+
This function calculates PSNR for each image in a stack compared to another image in the stack that is separated by a specified stride.
|
|
1062
|
+
The sliding window approach allows for the comparison of image quality over a sequence of images.
|
|
1063
|
+
|
|
1064
|
+
.. note::
|
|
1065
|
+
- PSNR values are measured in decibels (dB).
|
|
1066
|
+
- Higher PSNR values indicate better quality with minimal differences from the reference image.
|
|
1067
|
+
- Lower PSNR values indicate higher distortion or noise.
|
|
1068
|
+
|
|
1069
|
+
.. math::
|
|
1070
|
+
|
|
1071
|
+
\text{PSNR} = 20 \log_{10} \left( \frac{\text{MAX}}{\sqrt{\text{MSE}}} \right)
|
|
1072
|
+
|
|
1073
|
+
where:
|
|
1074
|
+
- :math:`\text{MAX}` is the maximum possible pixel value (255 for 8-bit images)
|
|
1075
|
+
- :math:`\text{MSE}` is the Mean Squared Error between the two images
|
|
1076
|
+
|
|
1077
|
+
:param data: A 4D NumPy array of shape (N, H, W, C) representing a stack of images, where N is the number of images, H is the height, W is the width, and C is the number of color channels.
|
|
1078
|
+
:param stride_s: The base stride length in terms of the number of images between the images being compared. Determines the separation between images for comparison in the stack.
|
|
1079
|
+
:param sample_rate: The sample rate to scale the stride length. This allows for adjusting the stride dynamically based on the sample rate.
|
|
1080
|
+
:return: A 1D NumPy array of PSNR values, where each element represents the PSNR between the image at index `r` and the image at index `l = r - stride`, for all valid indices `r`.
|
|
1081
|
+
:rtype: np.ndarray
|
|
1082
|
+
|
|
1083
|
+
:example:
|
|
1084
|
+
>>> data = ImageMixin().read_img_batch_from_video(video_path =r"/mnt/c/troubleshooting/mitra/project_folder/videos/clipped/501_MA142_Gi_CNO_0514_clipped.mp4", start_frm=0, end_frm=299)
|
|
1085
|
+
>>> data = np.stack(list(data.values()), axis=0).astype(np.uint8)
|
|
1086
|
+
>>> data = ImageMixin.img_stack_to_greyscale(imgs=data)
|
|
1087
|
+
>>> p = sliding_psnr(data=data, stride_s=1, sample_rate=1)
|
|
1088
|
+
"""
|
|
1089
|
+
|
|
1090
|
+
results = np.full(data.shape[0], fill_value=255.0, dtype=np.float32)
|
|
1091
|
+
stride = np.array([stride_s * sample_rate], dtype=np.int32)
|
|
1092
|
+
if stride[0] < 1: stride[0] = 1
|
|
1093
|
+
stride_dev = cuda.to_device(stride)
|
|
1094
|
+
results_dev = cuda.to_device(results)
|
|
1095
|
+
data_dev = cuda.to_device(data)
|
|
1096
|
+
bpg = (data.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK
|
|
1097
|
+
_sliding_psnr[bpg, THREADS_PER_BLOCK](data_dev, stride_dev, results_dev)
|
|
1098
|
+
return results_dev.copy_to_host()
|
|
1099
|
+
|
|
1100
|
+
def rotate_img_stack_cupy(imgs: np.ndarray,
|
|
1101
|
+
rotation_degrees: Optional[float] = 180,
|
|
1102
|
+
batch_size: Optional[int] = 500,
|
|
1103
|
+
verbose: bool = True) -> np.ndarray:
|
|
1104
|
+
"""
|
|
1105
|
+
Rotates a stack of images by a specified number of degrees using GPU acceleration with CuPy.
|
|
1106
|
+
|
|
1107
|
+
Accepts a 3D (single-channel images) or 4D (multichannel images) NumPy array, rotates each image in the stack by the specified degree around the center, and returns the result as a NumPy array.
|
|
1108
|
+
|
|
1109
|
+
:param np.ndarray imgs: The input stack of images to be rotated. Expected to be a NumPy array with 3 or 4 dimensions. 3D shape: (num_images, height, width) - 4D shape: (num_images, height, width, channels)
|
|
1110
|
+
:param Optional[float] rotation_degrees: The angle by which the images should be rotated, in degrees. Must be between 1 and 359 degrees. Defaults to 180 degrees.
|
|
1111
|
+
:param Optional[int] batch_size: Number of images to process on GPU in each batch. Decrease if data can't fit on GPU RAM.
|
|
1112
|
+
:returns: A NumPy array containing the rotated images with the same shape as the input.
|
|
1113
|
+
:rtype: np.ndarray
|
|
1114
|
+
|
|
1115
|
+
:example:
|
|
1116
|
+
>>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/F0_gq_Saline_0626_clipped.mp4"
|
|
1117
|
+
>>> imgs = read_img_batch_from_video_gpu(video_path=video_path)
|
|
1118
|
+
>>> imgs = np.stack(np.array(list(imgs.values())), axis=0)
|
|
1119
|
+
>>> imgs = rotate_img_stack_cupy(imgs=imgs, rotation=50)
|
|
1120
|
+
"""
|
|
1121
|
+
|
|
1122
|
+
timer = SimbaTimer(start=True)
|
|
1123
|
+
check_valid_array(data=imgs, source=f'{rotate_img_stack_cupy.__name__} imgs', accepted_ndims=(3, 4))
|
|
1124
|
+
check_int(name=f'{rotate_img_stack_cupy.__name__} rotation', value=rotation_degrees, min_value=1, max_value=359)
|
|
1125
|
+
check_valid_boolean(value=verbose, source=f'{rotate_img_stack_cupy.__name__} verbose', raise_error=True)
|
|
1126
|
+
|
|
1127
|
+
first_img = cp.array(imgs[0:1])
|
|
1128
|
+
rotated_first = rotate(input=first_img, angle=rotation_degrees, axes=(2, 1), reshape=True)
|
|
1129
|
+
output_shape = (imgs.shape[0],) + rotated_first.shape[1:]
|
|
1130
|
+
|
|
1131
|
+
results = cp.zeros(output_shape, dtype=np.uint8)
|
|
1132
|
+
|
|
1133
|
+
for l in range(0, imgs.shape[0], batch_size):
|
|
1134
|
+
r = min(l + batch_size, imgs.shape[0])
|
|
1135
|
+
if verbose:
|
|
1136
|
+
print(f'Rotating image {l}-{r}...')
|
|
1137
|
+
batch_imgs = cp.array(imgs[l:r])
|
|
1138
|
+
rotated_batch = rotate(input=batch_imgs, angle=rotation_degrees, axes=(2, 1), reshape=True)
|
|
1139
|
+
results[l:r] = rotated_batch
|
|
1140
|
+
|
|
1141
|
+
if hasattr(results, 'get'):
|
|
1142
|
+
final_results = results.get()
|
|
1143
|
+
else:
|
|
1144
|
+
final_results = results
|
|
1145
|
+
|
|
1146
|
+
timer.stop_timer()
|
|
1147
|
+
if verbose: print(f'[{get_current_time()}] Image rotation complete (elapsed time: {timer.elapsed_time_str}s)')
|
|
1148
|
+
return final_results
|
|
1149
|
+
|
|
1150
|
+
def rotate_video_cupy(video_path: Union[str, os.PathLike],
|
|
1151
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
1152
|
+
rotation_degrees: Optional[float] = 180,
|
|
1153
|
+
batch_size: Optional[int] = None,
|
|
1154
|
+
verbose: Optional[bool] = True) -> None:
|
|
1155
|
+
"""
|
|
1156
|
+
Rotates a video by a specified angle using GPU acceleration and CuPy for image processing.
|
|
1157
|
+
|
|
1158
|
+
:param Union[str, os.PathLike] video_path: Path to the input video file.
|
|
1159
|
+
:param Optional[Union[str, os.PathLike]] save_path: Path to save the rotated video. If None, saves the video in the same directory as the input with '_rotated_<rotation_degrees>' appended to the filename.
|
|
1160
|
+
:param nptional[float] rotation_degrees: Degrees to rotate the video. Must be between 1 and 359 degrees. Default is 180.
|
|
1161
|
+
:param Optional[int] batch_size: The number of frames to process in each batch. Deafults to None meaning all images will be processed in a single batch.
|
|
1162
|
+
:returns: None.
|
|
1163
|
+
|
|
1164
|
+
:example:
|
|
1165
|
+
>>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/F0_gq_Saline_0626_clipped.mp4"
|
|
1166
|
+
>>> rotate_video_cupy(video_path=video_path, rotation_degrees=45)
|
|
1167
|
+
"""
|
|
1168
|
+
|
|
1169
|
+
timer = SimbaTimer(start=True)
|
|
1170
|
+
check_int(name=f'{rotate_img_stack_cupy.__name__} rotation', value=rotation_degrees, min_value=1, max_value=359)
|
|
1171
|
+
check_valid_boolean(source=f'{rotate_img_stack_cupy.__name__} verbose', value=verbose)
|
|
1172
|
+
if save_path is None:
|
|
1173
|
+
video_dir, video_name, _ = get_fn_ext(filepath=video_path)
|
|
1174
|
+
save_path = os.path.join(video_dir, f'{video_name}_rotated_{rotation_degrees}.mp4')
|
|
1175
|
+
video_meta_data = get_video_meta_data(video_path=video_path)
|
|
1176
|
+
if batch_size is not None:
|
|
1177
|
+
check_int(name=f'{rotate_img_stack_cupy.__name__} batch_size', value=batch_size, min_value=1)
|
|
1178
|
+
else:
|
|
1179
|
+
batch_size = video_meta_data['frame_count']
|
|
1180
|
+
fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)
|
|
1181
|
+
is_clr = ImageMixin.is_video_color(video=video_path)
|
|
1182
|
+
frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, max_que_size=3, verbose=False)
|
|
1183
|
+
frm_reader.start()
|
|
1184
|
+
for batch_cnt in range(frm_reader.batch_cnt):
|
|
1185
|
+
start_idx, end_idx, imgs = get_async_frame_batch(batch_reader=frm_reader, timeout=10)
|
|
1186
|
+
if verbose:
|
|
1187
|
+
print(f'Rotating frames {start_idx}-{end_idx}... (of {video_meta_data["frame_count"]}, video: {video_meta_data["video_name"]})')
|
|
1188
|
+
imgs = rotate_img_stack_cupy(imgs=imgs, rotation_degrees=rotation_degrees, batch_size=batch_size)
|
|
1189
|
+
if batch_cnt == 0:
|
|
1190
|
+
writer = cv2.VideoWriter(save_path, fourcc, video_meta_data['fps'], (imgs.shape[2], imgs.shape[1]), isColor=is_clr)
|
|
1191
|
+
for img in imgs: writer.write(img)
|
|
1192
|
+
writer.release()
|
|
1193
|
+
timer.stop_timer()
|
|
1194
|
+
frm_reader.kill()
|
|
1195
|
+
if verbose:
|
|
1196
|
+
stdout_success(f'Rotated video saved at {save_path}', source=rotate_video_cupy.__name__)
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
@cuda.jit()
|
|
1200
|
+
def _bg_subtraction_cuda_kernel(imgs, avg_img, results, is_clr, fg_clr, threshold):
|
|
1201
|
+
x, y, n = cuda.grid(3)
|
|
1202
|
+
if n < 0 or n > (imgs.shape[0] -1):
|
|
1203
|
+
return
|
|
1204
|
+
if y < 0 or y > (imgs.shape[1] -1):
|
|
1205
|
+
return
|
|
1206
|
+
if x < 0 or x > (imgs.shape[2] -1):
|
|
1207
|
+
return
|
|
1208
|
+
if is_clr[0] == 1:
|
|
1209
|
+
r1, g1, b1 = imgs[n][y][x][0],imgs[n][y][x][1], imgs[n][y][x][2]
|
|
1210
|
+
r2, g2, b2 = avg_img[y][x][0], avg_img[y][x][1], avg_img[y][x][2]
|
|
1211
|
+
r_diff, g_diff, b_diff = abs(r1-r2), abs(g1-g2), abs(b1-b2)
|
|
1212
|
+
grey_diff = _cuda_luminance_pixel_to_grey(r_diff, g_diff, b_diff)
|
|
1213
|
+
if grey_diff > threshold[0]:
|
|
1214
|
+
if fg_clr[0] != -1:
|
|
1215
|
+
r_out, g_out, b_out = fg_clr[0], fg_clr[1], fg_clr[2]
|
|
1216
|
+
else:
|
|
1217
|
+
r_out, g_out, b_out = r1, g1, b1
|
|
1218
|
+
else:
|
|
1219
|
+
r_out, g_out, b_out = results[n][y][x][0], results[n][y][x][1], results[n][y][x][2]
|
|
1220
|
+
results[n][y][x][0], results[n][y][x][1], results[n][y][x][2] = r_out, g_out, b_out
|
|
1221
|
+
|
|
1222
|
+
else:
|
|
1223
|
+
val_1, val_2 = imgs[n][y][x][0], avg_img[y][x][0]
|
|
1224
|
+
grey_diff = abs(val_1-val_2)
|
|
1225
|
+
if grey_diff > threshold[0]:
|
|
1226
|
+
if fg_clr[0] != -1:
|
|
1227
|
+
val_out = val_1
|
|
1228
|
+
else:
|
|
1229
|
+
val_out = 255
|
|
1230
|
+
else:
|
|
1231
|
+
val_out = 0
|
|
1232
|
+
results[n][y][x] = val_out
|
|
1233
|
+
|
|
1234
|
+
|
|
1235
|
+
def bg_subtraction_cuda(video_path: Union[str, os.PathLike],
|
|
1236
|
+
avg_frm: np.ndarray,
|
|
1237
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
1238
|
+
bg_clr: Optional[Tuple[int, int, int]] = (0, 0, 0),
|
|
1239
|
+
fg_clr: Optional[Tuple[int, int, int]] = None,
|
|
1240
|
+
batch_size: Optional[int] = 500,
|
|
1241
|
+
threshold: Optional[int] = 50):
|
|
1242
|
+
"""
|
|
1243
|
+
Remove background from videos using GPU acceleration.
|
|
1244
|
+
|
|
1245
|
+
.. video:: _static/img/video_bg_subtraction.webm
|
|
1246
|
+
:width: 800
|
|
1247
|
+
:autoplay:
|
|
1248
|
+
:loop:
|
|
1249
|
+
|
|
1250
|
+
.. note::
|
|
1251
|
+
To create an `avg_frm`, use :func:`simba.video_processors.video_processing.create_average_frm`, :func:`simba.data_processors.cuda.image.create_average_frm_cupy`, or :func:`~simba.data_processors.cuda.image.create_average_frm_cuda`
|
|
1252
|
+
|
|
1253
|
+
.. seealso::
|
|
1254
|
+
For CPU-based alternative, see :func:`simba.video_processors.video_processing.video_bg_subtraction` or :func:`~simba.video_processors.video_processing.video_bg_subtraction_mp`
|
|
1255
|
+
For GPU-based alternative, see :func:`~simba.data_processors.cuda.image.bg_subtraction_cupy`. Needs work, CPU/multicore appears faster.
|
|
1256
|
+
|
|
1257
|
+
.. seealso::
|
|
1258
|
+
To create average frame on the CPU, see :func:`simba.video_processors.video_processing.create_average_frm`. CPU/multicore appears faster.
|
|
1259
|
+
|
|
1260
|
+
.. csv-table::
|
|
1261
|
+
:header: EXPECTED RUNTIMES
|
|
1262
|
+
:file: ../../../docs/tables/bg_subtraction_cuda.csv
|
|
1263
|
+
:widths: 10, 45, 45
|
|
1264
|
+
:align: center
|
|
1265
|
+
:class: simba-table
|
|
1266
|
+
:header-rows: 1
|
|
1267
|
+
|
|
1268
|
+
:param Union[str, os.PathLike] video_path: The path to the video to remove the background from.
|
|
1269
|
+
:param np.ndarray avg_frm: Average frame of the video. Can be created with e.g., :func:`simba.video_processors.video_processing.create_average_frm`.
|
|
1270
|
+
:param Optional[Union[str, os.PathLike]] save_path: Optional location to store the background removed video. If None, then saved in the same directory as the input video with the `_bg_removed` suffix.
|
|
1271
|
+
:param Optional[Tuple[int, int, int]] bg_clr: Tuple representing the background color of the video.
|
|
1272
|
+
:param Optional[Tuple[int, int, int]] fg_clr: Tuple representing the foreground color of the video (e.g., the animal). If None, then the original pixel colors will be used. Default: 50.
|
|
1273
|
+
:param Optional[int] batch_size: Number of frames to process concurrently. Use higher values of RAM memory allows. Default: 500.
|
|
1274
|
+
:param Optional[int] threshold: Value between 0-255 representing the difference threshold between the average frame subtracted from each frame. Higher values and more pixels will be considered background. Default: 50.
|
|
1275
|
+
|
|
1276
|
+
:example:
|
|
1277
|
+
>>> video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/clipped/592_MA147_Gq_CNO_0515.mp4"
|
|
1278
|
+
>>> avg_frm = create_average_frm(video_path=video_path)
|
|
1279
|
+
>>> bg_subtraction_cuda(video_path=video_path, avg_frm=avg_frm, fg_clr=(255, 255, 255))
|
|
1280
|
+
"""
|
|
1281
|
+
|
|
1282
|
+
check_if_valid_img(data=avg_frm, source=f'{bg_subtraction_cuda}')
|
|
1283
|
+
check_if_valid_rgb_tuple(data=bg_clr)
|
|
1284
|
+
check_int(name=f'{bg_subtraction_cuda.__name__} batch_size', value=batch_size, min_value=1)
|
|
1285
|
+
check_int(name=f'{bg_subtraction_cuda.__name__} threshold', value=threshold, min_value=0, max_value=255)
|
|
1286
|
+
THREADS_PER_BLOCK = (32, 32, 1)
|
|
1287
|
+
timer = SimbaTimer(start=True)
|
|
1288
|
+
video_meta = get_video_meta_data(video_path=video_path)
|
|
1289
|
+
batch_cnt = int(max(1, np.ceil(video_meta['frame_count'] / batch_size)))
|
|
1290
|
+
frm_batches = np.array_split(np.arange(0, video_meta['frame_count']), batch_cnt)
|
|
1291
|
+
n, w, h = video_meta['frame_count'], video_meta['width'], video_meta['height']
|
|
1292
|
+
avg_frm = cv2.resize(avg_frm, (w, h))
|
|
1293
|
+
if is_video_color(video_path): is_color = np.array([1])
|
|
1294
|
+
else: is_color = np.array([0])
|
|
1295
|
+
fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)
|
|
1296
|
+
if save_path is None:
|
|
1297
|
+
in_dir, video_name, _ = get_fn_ext(filepath=video_path)
|
|
1298
|
+
save_path = os.path.join(in_dir, f'{video_name}_bg_removed.mp4')
|
|
1299
|
+
if fg_clr is not None:
|
|
1300
|
+
check_if_valid_rgb_tuple(data=fg_clr)
|
|
1301
|
+
fg_clr = np.array(fg_clr)
|
|
1302
|
+
else:
|
|
1303
|
+
fg_clr = np.array([-1])
|
|
1304
|
+
threshold = np.array([threshold]).astype(np.int32)
|
|
1305
|
+
writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (w, h))
|
|
1306
|
+
y_dev = cuda.to_device(avg_frm.astype(np.float32))
|
|
1307
|
+
fg_clr_dev = cuda.to_device(fg_clr)
|
|
1308
|
+
is_color_dev = cuda.to_device(is_color)
|
|
1309
|
+
for frm_batch_cnt, frm_batch in enumerate(frm_batches):
|
|
1310
|
+
print(f'Processing frame batch {frm_batch_cnt+1} / {len(frm_batches)} (complete: {round((frm_batch_cnt / len(frm_batches)) * 100, 2)}%)')
|
|
1311
|
+
batch_imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_batch[0], end_frm=frm_batch[-1])
|
|
1312
|
+
batch_imgs = np.stack(list(batch_imgs.values()), axis=0).astype(np.float32)
|
|
1313
|
+
batch_n = batch_imgs.shape[0]
|
|
1314
|
+
results = np.zeros_like(batch_imgs).astype(np.uint8)
|
|
1315
|
+
results[:] = bg_clr
|
|
1316
|
+
results = cuda.to_device(results)
|
|
1317
|
+
grid_x = math.ceil(w / THREADS_PER_BLOCK[0])
|
|
1318
|
+
grid_y = math.ceil(h / THREADS_PER_BLOCK[1])
|
|
1319
|
+
grid_z = math.ceil(batch_n / THREADS_PER_BLOCK[2])
|
|
1320
|
+
bpg = (grid_x, grid_y, grid_z)
|
|
1321
|
+
x_dev = cuda.to_device(batch_imgs)
|
|
1322
|
+
_bg_subtraction_cuda_kernel[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results, is_color_dev, fg_clr_dev, threshold)
|
|
1323
|
+
results = results.copy_to_host()
|
|
1324
|
+
for img_cnt, img in enumerate(results):
|
|
1325
|
+
writer.write(img)
|
|
1326
|
+
writer.release()
|
|
1327
|
+
timer.stop_timer()
|
|
1328
|
+
stdout_success(msg=f'Video saved at {save_path}', elapsed_time=timer.elapsed_time_str)
|
|
1329
|
+
|
|
1330
|
+
|
|
1331
|
+
def bg_subtraction_cupy(video_path: Union[str, os.PathLike],
|
|
1332
|
+
avg_frm: Union[np.ndarray, str, os.PathLike],
|
|
1333
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
1334
|
+
bg_clr: Optional[Tuple[int, int, int]] = (0, 0, 0),
|
|
1335
|
+
fg_clr: Optional[Tuple[int, int, int]] = None,
|
|
1336
|
+
batch_size: Optional[int] = 500,
|
|
1337
|
+
threshold: Optional[int] = 50,
|
|
1338
|
+
verbose: bool = True,
|
|
1339
|
+
async_frame_read: bool = True):
|
|
1340
|
+
"""
|
|
1341
|
+
Remove background from videos using GPU acceleration through CuPY.
|
|
1342
|
+
|
|
1343
|
+
.. video:: _static/img/bg_remover_example_1.webm
|
|
1344
|
+
:width: 800
|
|
1345
|
+
:autoplay:
|
|
1346
|
+
:loop:
|
|
1347
|
+
|
|
1348
|
+
.. seealso::
|
|
1349
|
+
For CPU-based alternative, see :func:`simba.video_processors.video_processing.video_bg_subtraction` or :func:`~simba.video_processors.video_processing.video_bg_subtraction_mp`
|
|
1350
|
+
For GPU-based alternative, see :func:`~simba.data_processors.cuda.image.bg_subtraction_cuda`.
|
|
1351
|
+
Needs work, CPU/multicore appears faster.
|
|
1352
|
+
|
|
1353
|
+
:param Union[str, os.PathLike] video_path: The path to the video to remove the background from.
|
|
1354
|
+
:param np.ndarray avg_frm: Average frame of the video. Can be created with e.g., :func:`simba.video_processors.video_processing.create_average_frm`.
|
|
1355
|
+
:param Optional[Union[str, os.PathLike]] save_path: Optional location to store the background removed video. If None, then saved in the same directory as the input video with the `_bg_removed` suffix.
|
|
1356
|
+
:param Optional[Tuple[int, int, int]] bg_clr: Tuple representing the background color of the video.
|
|
1357
|
+
:param Optional[Tuple[int, int, int]] fg_clr: Tuple representing the foreground color of the video (e.g., the animal). If None, then the original pixel colors will be used. Default: 50.
|
|
1358
|
+
:param Optional[int] batch_size: Number of frames to process concurrently. Use higher values of RAM memory allows. Default: 500.
|
|
1359
|
+
:param Optional[int] threshold: Value between 0-255 representing the difference threshold between the average frame subtracted from each frame. Higher values and more pixels will be considered background. Default: 50.
|
|
1360
|
+
|
|
1361
|
+
|
|
1362
|
+
:example:
|
|
1363
|
+
>>> avg_frm = create_average_frm(video_path="/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/original/844_MA131_gq_CNO_0624.mp4")
|
|
1364
|
+
>>> video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/844_MA131_gq_CNO_0624_7.mp4"
|
|
1365
|
+
>>> bg_subtraction_cupy(video_path=video_path, avg_frm=avg_frm, batch_size=500)
|
|
1366
|
+
"""
|
|
1367
|
+
|
|
1368
|
+
if not _is_cuda_available()[0]:
|
|
1369
|
+
raise SimBAGPUError('NP GPU detected using numba.cuda', source=bg_subtraction_cupy.__name__)
|
|
1370
|
+
if isinstance(avg_frm, (str, os.PathLike)):
|
|
1371
|
+
check_file_exist_and_readable(file_path=avg_frm, raise_error=True)
|
|
1372
|
+
avg_frm = read_img(img_path=avg_frm, greyscale=False, clahe=False)
|
|
1373
|
+
check_if_valid_img(data=avg_frm, source=f'{bg_subtraction_cupy}')
|
|
1374
|
+
check_if_valid_rgb_tuple(data=bg_clr)
|
|
1375
|
+
check_int(name=f'{bg_subtraction_cupy.__name__} batch_size', value=batch_size, min_value=1)
|
|
1376
|
+
check_int(name=f'{bg_subtraction_cupy.__name__} threshold', value=threshold, min_value=0, max_value=255)
|
|
1377
|
+
timer = SimbaTimer(start=True)
|
|
1378
|
+
video_meta = get_video_meta_data(video_path=video_path)
|
|
1379
|
+
n, w, h = video_meta['frame_count'], video_meta['width'], video_meta['height']
|
|
1380
|
+
is_video_color_bool = is_video_color(video_path)
|
|
1381
|
+
is_avg_frm_color = avg_frm.ndim == 3 and avg_frm.shape[2] == 3
|
|
1382
|
+
if avg_frm.shape[0] != h or avg_frm.shape[1] != w:
|
|
1383
|
+
raise InvalidInputError(msg=f'The avg_frm and video must have the same resolution: avg_frm is {avg_frm.shape[1]}x{avg_frm.shape[0]}, video is {w}x{h}', source=bg_subtraction_cupy.__name__)
|
|
1384
|
+
if is_video_color_bool != is_avg_frm_color:
|
|
1385
|
+
video_type = 'color' if is_video_color_bool else 'grayscale'
|
|
1386
|
+
avg_frm_type = 'color' if is_avg_frm_color else 'grayscale'
|
|
1387
|
+
raise InvalidInputError(msg=f'Color/grayscale mismatch: video is {video_type} but avg_frm is {avg_frm_type}', source=bg_subtraction_cupy.__name__)
|
|
1388
|
+
|
|
1389
|
+
avg_frm = cp.array(avg_frm)
|
|
1390
|
+
is_color = is_video_color_bool
|
|
1391
|
+
batch_cnt = int(max(1, np.ceil(video_meta['frame_count'] / batch_size)))
|
|
1392
|
+
frm_batches = np.array_split(np.arange(0, video_meta['frame_count']), batch_cnt)
|
|
1393
|
+
fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)
|
|
1394
|
+
if save_path is None:
|
|
1395
|
+
in_dir, video_name, _ = get_fn_ext(filepath=video_path)
|
|
1396
|
+
save_path = os.path.join(in_dir, f'{video_name}_bg_removed_ppp.mp4')
|
|
1397
|
+
if fg_clr is not None:
|
|
1398
|
+
check_if_valid_rgb_tuple(data=fg_clr)
|
|
1399
|
+
fg_clr = np.array(fg_clr)
|
|
1400
|
+
else:
|
|
1401
|
+
fg_clr = np.array([-1])
|
|
1402
|
+
writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (w, h), isColor=is_color)
|
|
1403
|
+
if async_frame_read:
|
|
1404
|
+
async_frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, max_que_size=3, verbose=True, gpu=True)
|
|
1405
|
+
async_frm_reader.start()
|
|
1406
|
+
else:
|
|
1407
|
+
async_frm_reader = None
|
|
1408
|
+
for frm_batch_cnt, frm_batch in enumerate(frm_batches):
|
|
1409
|
+
if verbose: print(f'Processing frame batch {frm_batch_cnt + 1} / {len(frm_batches)} (complete: {round((frm_batch_cnt / len(frm_batches)) * 100, 2)}%, {get_current_time()})')
|
|
1410
|
+
if not async_frame_read:
|
|
1411
|
+
batch_imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_batch[0], end_frm=frm_batch[-1], verbose=verbose)
|
|
1412
|
+
batch_imgs = cp.array(np.stack(list(batch_imgs.values()), axis=0).astype(np.float32))
|
|
1413
|
+
else:
|
|
1414
|
+
batch_imgs = cp.array(get_async_frame_batch(batch_reader=async_frm_reader, timeout=15)[2])
|
|
1415
|
+
img_diff = cp.abs(batch_imgs - avg_frm)
|
|
1416
|
+
if is_color:
|
|
1417
|
+
img_diff = img_stack_to_grayscale_cupy(imgs=img_diff, batch_size=img_diff.shape[0])
|
|
1418
|
+
threshold_cp = cp.array([threshold], dtype=cp.float32)
|
|
1419
|
+
mask = cp.where(img_diff > threshold_cp, 1, 0).astype(cp.uint8)
|
|
1420
|
+
if is_color:
|
|
1421
|
+
batch_imgs[mask == 0] = bg_clr
|
|
1422
|
+
if fg_clr[0] != -1:
|
|
1423
|
+
batch_imgs[mask == 1] = fg_clr
|
|
1424
|
+
else:
|
|
1425
|
+
bg_clr_gray = int(0.07 * bg_clr[2] + 0.72 * bg_clr[1] + 0.21 * bg_clr[0])
|
|
1426
|
+
batch_imgs[mask == 0] = bg_clr_gray
|
|
1427
|
+
if fg_clr[0] != -1:
|
|
1428
|
+
fg_clr_gray = int(0.07 * fg_clr[2] + 0.72 * fg_clr[1] + 0.21 * fg_clr[0])
|
|
1429
|
+
batch_imgs[mask == 1] = fg_clr_gray
|
|
1430
|
+
batch_imgs = batch_imgs.astype(cp.uint8).get()
|
|
1431
|
+
for img_cnt, img in enumerate(batch_imgs):
|
|
1432
|
+
writer.write(img)
|
|
1433
|
+
if async_frm_reader is not None:
|
|
1434
|
+
async_frm_reader.kill()
|
|
1435
|
+
|
|
1436
|
+
writer.release()
|
|
1437
|
+
timer.stop_timer()
|
|
1438
|
+
stdout_success(msg=f'Video saved at {save_path}', elapsed_time=timer.elapsed_time_str)
|
|
1439
|
+
|
|
1440
|
+
|
|
1441
|
+
@cuda.jit(max_registers=None)
|
|
1442
|
+
def _pose_plot_kernel(imgs, data, circle_size, resolution, colors):
|
|
1443
|
+
bp_n, img_n = cuda.grid(2)
|
|
1444
|
+
if img_n < 0 or img_n > (imgs.shape[0] -1):
|
|
1445
|
+
return
|
|
1446
|
+
if bp_n < 0 or bp_n > (data[0].shape[0] -1):
|
|
1447
|
+
return
|
|
1448
|
+
|
|
1449
|
+
img, bp_loc, color = imgs[img_n], data[img_n][bp_n], colors[bp_n]
|
|
1450
|
+
for x1 in range(bp_loc[0]-circle_size[0], bp_loc[0]+circle_size[0]):
|
|
1451
|
+
for y1 in range(bp_loc[1]-circle_size[0], bp_loc[1]+circle_size[0]):
|
|
1452
|
+
if (x1 > 0) and (x1 < resolution[0]):
|
|
1453
|
+
if (y1 > 0) and (y1 < resolution[1]):
|
|
1454
|
+
b = (x1 - bp_loc[0]) ** 2
|
|
1455
|
+
c = (y1 - bp_loc[1]) ** 2
|
|
1456
|
+
if (b + c) < (circle_size[0] ** 2):
|
|
1457
|
+
imgs[img_n][y1][x1][0] = int(color[0])
|
|
1458
|
+
imgs[img_n][y1][x1][1] = int(color[1])
|
|
1459
|
+
imgs[img_n][y1][x1][2] = int(color[2])
|
|
1460
|
+
|
|
1461
|
+
|
|
1462
|
+
def pose_plotter(data: Union[str, os.PathLike, np.ndarray],
|
|
1463
|
+
video_path: Union[str, os.PathLike],
|
|
1464
|
+
save_path: Union[str, os.PathLike],
|
|
1465
|
+
circle_size: Optional[int] = None,
|
|
1466
|
+
colors: Optional[str] = 'Set1',
|
|
1467
|
+
batch_size: int = 750,
|
|
1468
|
+
verbose: bool = True) -> None:
|
|
1469
|
+
|
|
1470
|
+
"""
|
|
1471
|
+
Creates a video overlaying pose-estimation data on frames from a given video using GPU acceleration.
|
|
1472
|
+
|
|
1473
|
+
.. video:: _static/img/pose_plotter_cuda.mp4
|
|
1474
|
+
:width: 800
|
|
1475
|
+
:autoplay:
|
|
1476
|
+
:loop:
|
|
1477
|
+
|
|
1478
|
+
.. seealso::
|
|
1479
|
+
For CPU based methods, see :func:`~simba.plotting.path_plotter.PathPlotterSingleCore` and :func:`~simba.plotting.path_plotter_mp.PathPlotterMulticore`.
|
|
1480
|
+
|
|
1481
|
+
.. csv-table::
|
|
1482
|
+
:header: EXPECTED RUNTIMES
|
|
1483
|
+
:file: ../../../docs/tables/pose_plotter.csv
|
|
1484
|
+
:widths: 10, 90
|
|
1485
|
+
:align: center
|
|
1486
|
+
:class: simba-table
|
|
1487
|
+
:header-rows: 1
|
|
1488
|
+
|
|
1489
|
+
:param Union[str, os.PathLike, np.ndarray] data: Path to a CSV file with pose-estimation data or a 3d numpy array (n_images, n_bodyparts, 2) with pose-estimated locations.
|
|
1490
|
+
:param Union[str, os.PathLike] video_path: Path to a video file where the ``data`` has been pose-estimated.
|
|
1491
|
+
:param Union[str, os.PathLike] save_path: Location where to store the output visualization.
|
|
1492
|
+
:param Optional[int] circle_size: The size of the circles representing the location of the pose-estimated locations. If None, the optimal size will be inferred as a 100th of the max(resultion_w, h).
|
|
1493
|
+
:param int batch_size: The number of frames to process concurrently on the GPU. Default: 750. Increase of host and device RAM allows it to improve runtime. Decrease if you hit memory errors.
|
|
1494
|
+
|
|
1495
|
+
:example:
|
|
1496
|
+
>>> DATA_PATH = "/mnt/c/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/501_MA142_Gi_CNO_0521.csv"
|
|
1497
|
+
>>> VIDEO_PATH = "/mnt/c/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_CNO_0521.mp4"
|
|
1498
|
+
>>> SAVE_PATH = "/mnt/c/troubleshooting/mitra/project_folder/frames/output/pose_ex/test.mp4"
|
|
1499
|
+
>>> pose_plotter(data=DATA_PATH, video_path=VIDEO_PATH, save_path=SAVE_PATH, circle_size=10, batch_size=1000)
|
|
1500
|
+
"""
|
|
1501
|
+
|
|
1502
|
+
THREADS_PER_BLOCK = (32, 32, 1)
|
|
1503
|
+
if isinstance(data, str):
|
|
1504
|
+
check_file_exist_and_readable(file_path=data)
|
|
1505
|
+
df = read_df(file_path=data, file_type='csv')
|
|
1506
|
+
cols = [x for x in df.columns if not x.lower().endswith('_p')]
|
|
1507
|
+
data = df[cols].values
|
|
1508
|
+
data = np.ascontiguousarray(data.reshape(data.shape[0], int(data.shape[1] / 2), 2).astype(np.int32))
|
|
1509
|
+
elif isinstance(data, np.ndarray):
|
|
1510
|
+
check_valid_array(data=data, source=pose_plotter.__name__, accepted_ndims=(3,), accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
1511
|
+
|
|
1512
|
+
check_int(name=f'{pose_plotter.__name__} batch_size', value=batch_size, min_value=1)
|
|
1513
|
+
check_valid_boolean(value=[verbose], source=f'{pose_plotter.__name__} verbose')
|
|
1514
|
+
video_meta_data = get_video_meta_data(video_path=video_path)
|
|
1515
|
+
n, w, h = video_meta_data['frame_count'], video_meta_data['width'], video_meta_data['height']
|
|
1516
|
+
check_if_dir_exists(in_dir=os.path.dirname(save_path))
|
|
1517
|
+
if data.shape[0] != video_meta_data['frame_count']:
|
|
1518
|
+
raise FrameRangeError(msg=f'The data contains {data.shape[0]} frames while the video contains {video_meta_data["frame_count"]} frames')
|
|
1519
|
+
if circle_size is None:
|
|
1520
|
+
circle_size = np.array([PlottingMixin().get_optimal_circle_size(frame_size=(w, h))]).astype(np.int32)
|
|
1521
|
+
else:
|
|
1522
|
+
check_int(name=f'{pose_plotter.__name__} circle_size', value=circle_size, min_value=1)
|
|
1523
|
+
circle_size = np.array([circle_size]).astype(np.int32)
|
|
1524
|
+
fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)
|
|
1525
|
+
video_writer = cv2.VideoWriter(save_path, fourcc, video_meta_data['fps'], (w, h))
|
|
1526
|
+
colors = np.array(create_color_palette(pallete_name=colors, increments=data[0].shape[0])).astype(np.int32)
|
|
1527
|
+
circle_size_dev = cuda.to_device(circle_size)
|
|
1528
|
+
# colors_dev = cuda.to_device(colors)
|
|
1529
|
+
# resolution_dev = cuda.to_device(np.array([video_meta_data['width'], video_meta_data['height']]))
|
|
1530
|
+
# data = np.ascontiguousarray(data, dtype=np.int32)
|
|
1531
|
+
# img_dev = cuda.device_array((batch_size, h, w, 3), dtype=np.int32)
|
|
1532
|
+
# data_dev = cuda.device_array((batch_size, data.shape[1], 2), dtype=np.int32)
|
|
1533
|
+
# total_timer, video_start_time = SimbaTimer(start=True), time.time()
|
|
1534
|
+
# frm_reader = AsyncVideoFrameReader(video_path=video_path, batch_size=batch_size, max_que_size=3, verbose=False)
|
|
1535
|
+
# frm_reader.start()
|
|
1536
|
+
# for batch_cnt in range(frm_reader.batch_cnt):
|
|
1537
|
+
# start_img_idx, end_img_idx, batch_frms = get_async_frame_batch(batch_reader=frm_reader, timeout=10)
|
|
1538
|
+
# video_elapsed_time = str(round(time.time() - video_start_time, 4)) + 's'
|
|
1539
|
+
# if verbose: print(f'Processing images {start_img_idx} - {end_img_idx} (of {n}; batch count: {batch_cnt+1}/{frm_reader.batch_cnt}, video: {video_meta_data["video_name"]}, elapsed video processing time: {video_elapsed_time})...')
|
|
1540
|
+
# batch_data = data[start_img_idx:end_img_idx + 1]
|
|
1541
|
+
# batch_n = batch_frms.shape[0]
|
|
1542
|
+
# if verbose: print(f'Moving frames {start_img_idx}-{end_img_idx} to device...')
|
|
1543
|
+
# img_dev[:batch_n].copy_to_device(batch_frms[:batch_n].astype(np.int32))
|
|
1544
|
+
# data_dev[:batch_n] = cuda.to_device(batch_data[:batch_n])
|
|
1545
|
+
# del batch_frms; del batch_data
|
|
1546
|
+
# bpg = (math.ceil(batch_n / THREADS_PER_BLOCK[0]), math.ceil(batch_n / THREADS_PER_BLOCK[2]))
|
|
1547
|
+
# if verbose: print(f'Creating frames {start_img_idx}-{end_img_idx} ...')
|
|
1548
|
+
# _pose_plot_kernel[bpg, THREADS_PER_BLOCK](img_dev, data_dev, circle_size_dev, resolution_dev, colors_dev)
|
|
1549
|
+
# if verbose: print(f'Moving frames to host {start_img_idx}-{end_img_idx} ...')
|
|
1550
|
+
# batch_frms = img_dev.copy_to_host()
|
|
1551
|
+
# if verbose: print(f'Writing frames to host {start_img_idx}-{end_img_idx} ...')
|
|
1552
|
+
# for img_idx in range(0, batch_n):
|
|
1553
|
+
# video_writer.write(batch_frms[img_idx].astype(np.uint8))
|
|
1554
|
+
# video_writer.release()
|
|
1555
|
+
# total_timer.stop_timer()
|
|
1556
|
+
# frm_reader.kill()
|
|
1557
|
+
# if verbose:
|
|
1558
|
+
# stdout_success(msg=f'Pose-estimation video saved at {save_path}.', elapsed_time=total_timer.elapsed_time_str)
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
|
|
1562
|
+
#x = create_average_frm_cuda(video_path=r"D:\troubleshooting\mitra\project_folder\videos\average_cpu_test\20min.mp4", verbose=True, batch_size=500, async_frame_read=False)
|
|
1563
|
+
|
|
1564
|
+
# VIDEO_PATH = "/mnt/d/troubleshooting/maplight_ri/project_folder/blob/videos/Trial_1_C24_D1_1.mp4"
|
|
1565
|
+
# #
|
|
1566
|
+
#
|
|
1567
|
+
#
|
|
1568
|
+
#
|
|
1569
|
+
# avg_frm = create_average_frm_cuda(video_path=VIDEO_PATH, verbose=True, batch_size=100, start_frm=0, end_frm=100, async_frame_read=True, save_path=SAVE_PATH)
|
|
1570
|
+
# if _
|
|
1571
|
+
# VIDEO_PATH = r"D:\troubleshooting\maplight_ri\project_folder\blob\videos\111.mp4"
|
|
1572
|
+
# AVG_FRM = r"D:\troubleshooting\maplight_ri\project_folder\blob\Trial_1_C24_D1_1_bg_removed.png"
|
|
1573
|
+
# SAVE_PATH = r"D:\troubleshooting\maplight_ri\project_folder\blob\Trial_1_C24_D1_1_bg_removed.mp4"
|
|
1574
|
+
#
|
|
1575
|
+
|
|
1576
|
+
|
|
1577
|
+
# VIDEO_PATH = "/mnt/d/troubleshooting/maplight_ri/project_folder/blob/videos/111.mp4"
|
|
1578
|
+
# AVG_FRM = "/mnt/d/troubleshooting/maplight_ri/project_folder/blob/Trial_1_C24_D1_1_bg_removed.png"
|
|
1579
|
+
# SAVE_PATH = "/mnt/d/troubleshooting/maplight_ri/project_folder/blob/Trial_1_C24_D1_1_bg_removed.mp4"
|
|
1580
|
+
# bg_subtraction_cupy(video_path=VIDEO_PATH, avg_frm=AVG_FRM, save_path=SAVE_PATH, batch_size=100, verbose=True, async_frame_read=True, threshold=240, fg_clr=(255, 0,0), bg_clr=(0, 0, 255))
|
|
1581
|
+
|
|
1582
|
+
|
|
1583
|
+
|
|
1584
|
+
# DATA_PATH = "/mnt/c/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/501_MA142_Gi_CNO_0521.csv"
|
|
1585
|
+
# VIDEO_PATH = "/mnt/c/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_CNO_0521.mp4"
|
|
1586
|
+
# SAVE_PATH = "/mnt/c/troubleshooting/mitra/project_folder/frames/output/pose_ex/test.mp4"
|
|
1587
|
+
# pose_plotter(data=DATA_PATH, video_path=VIDEO_PATH, save_path=SAVE_PATH, circle_size=10, batch_size=1000)
|
|
1588
|
+
# # VIDEO_PATH = "/mnt/c/troubleshooting/mitra/project_folder/frames/output/pose_ex/test.mp4"
|
|
1589
|
+
# # SAVE_PATH = "/mnt/c/troubleshooting/mitra/project_folder/frames/output/pose_ex/test_ROTATED.mp4"
|
|
1590
|
+
# #
|
|
1591
|
+
# # rotate_video_cupy(video_path=VIDEO_PATH, save_path=SAVE_PATH, batch_size=1000)
|
|
1592
|
+
#
|
|
1593
|
+
# #"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\501_MA142_Gi_CNO_0521.csv"
|
|
1594
|
+
# pose_plotter(data=DATA_PATH, video_path=VIDEO_PATH, save_path=SAVE_PATH, circle_size=10, batch_size=1000)
|
|
1595
|
+
|
|
1596
|
+
|
|
1597
|
+
|
|
1598
|
+
|
|
1599
|
+
|
|
1600
|
+
|
|
1601
|
+
|
|
1602
|
+
# from simba.mixins.geometry_mixin import GeometryMixin
|
|
1603
|
+
#
|
|
1604
|
+
# video_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/03152021_NOB_IOT_8.mp4"
|
|
1605
|
+
# data_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/csv/outlier_corrected_movement_location/03152021_NOB_IOT_8.csv"
|
|
1606
|
+
# save_dir = '/mnt/d/netholabs/yolo_videos/input/mp4_20250606083508'
|
|
1607
|
+
#
|
|
1608
|
+
# get_video_meta_data(video_path)
|
|
1609
|
+
#
|
|
1610
|
+
# nose_arr = read_df(file_path=data_path, file_type='csv', usecols=['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y', 'Lat_left_x', 'Lat_left_y', 'Lat_right_x', 'Lat_right_y']).values.reshape(-1, 4, 2).astype(np.int32) ## READ THE BODY-PART THAT DEFINES THE HULL AND CONVERT TO ARRAY
|
|
1611
|
+
#
|
|
1612
|
+
# polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=nose_arr, parallel_offset=60) ## CONVERT THE BODY-PART TO POLYGONS WITH A LITTLE BUFFER
|
|
1613
|
+
# polygons = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=polygons) # CONVERT THE POLYGONS TO RECTANGLES (I.E., WITH 4 UNIQUE POINTS).
|
|
1614
|
+
# polygon_lst = [] # GET THE POINTS OF THE RECTANGLES
|
|
1615
|
+
# for i in polygons: polygon_lst.append(np.array(i.exterior.coords))
|
|
1616
|
+
# polygons = np.stack(polygon_lst, axis=0)
|
|
1617
|
+
# sliced_imgs = slice_imgs(video_path=video_path, shapes=polygons, batch_size=500, save_dir=save_dir) #SLICE THE RECTANGLES IN THE VIDEO.
|
|
1618
|
+
|
|
1619
|
+
#sliced_imgs = {k: v for k, v in enumerate(sliced_imgs)}
|
|
1620
|
+
|
|
1621
|
+
#ImageMixin().img_stack_to_video(imgs=sliced_imgs, fps=29.97, save_path=r'/mnt/d/netholabs/yolo_videos/input/mp4_20250606083508/stacked.mp4')
|
|
1622
|
+
|
|
1623
|
+
#get_video_meta_data("/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/03152021_NOB_IOT_8.mp4")
|
|
1624
|
+
# cv2.imshow('asdasdas', sliced_imgs[500])
|
|
1625
|
+
# cv2.waitKey(0)
|
|
1626
|
+
|
|
1627
|
+
# DATA_PATH = "/mnt/c/troubleshooting/RAT_NOR/project_folder/csv/outlier_corrected_movement_location/03152021_NOB_IOT_8.csv"
|
|
1628
|
+
# VIDEO_PATH = "/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/03152021_NOB_IOT_8.mp4"
|
|
1629
|
+
# SAVE_PATH = "/mnt/c/troubleshooting/mitra/project_folder/frames/output/pose_ex/test.mp4"
|
|
1630
|
+
#
|
|
1631
|
+
#
|
|
1632
|
+
if __name__ == "__main__":
|
|
1633
|
+
DATA_PATH = "/mnt/d/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/592_MA147_CNO1_0515.csv"
|
|
1634
|
+
VIDEO_PATH = "/mnt/d/troubleshooting/mitra/project_folder/videos/592_MA147_CNO1_0515.mp4"
|
|
1635
|
+
SAVE_PATH = "/mnt/d/troubleshooting/mitra/project_folder/videos/test_cuda.mp4"
|
|
1636
|
+
pose_plotter(data=DATA_PATH, video_path=VIDEO_PATH, save_path=SAVE_PATH, circle_size=10, batch_size=100)
|
|
1637
|
+
|
|
1638
|
+
|
|
1639
|
+
|
|
1640
|
+
#
|
|
1641
|
+
# #from simba.data_processors.cuda.image import create_average_frm_cupy
|
|
1642
|
+
# SAVE_PATH = "/mnt/c/Users/sroni/Downloads/bg_remove_nb/bg_removed_ex_7.mp4"
|
|
1643
|
+
# VIDEO_PATH = "/mnt/c/Users/sroni/Downloads/bg_remove_nb/open_field.mp4"
|
|
1644
|
+
# avg_frm = create_average_frm_cuda(video_path=VIDEO_PATH)
|
|
1645
|
+
# #
|
|
1646
|
+
# get_video_meta_data(VIDEO_PATH)
|
|
1647
|
+
# #
|
|
1648
|
+
# bg_subtraction_cuda(video_path=VIDEO_PATH, avg_frm=avg_frm, save_path=SAVE_PATH, threshold=70)
|