simba-uw-tf-dev 4.5.8__py3-none-any.whl → 4.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simba/SimBA.py +2 -2
- simba/assets/icons/frames_2.png +0 -0
- simba/assets/lookups/tooptips.json +10 -1
- simba/plotting/frame_mergerer_ffmpeg.py +137 -196
- simba/sandbox/cuda/egocentric_rotator.py +374 -0
- simba/ui/pop_ups/clf_add_remove_print_pop_up.py +37 -30
- simba/ui/pop_ups/egocentric_alignment_pop_up.py +20 -21
- simba/ui/pop_ups/interpolate_pop_up.py +2 -4
- simba/ui/pop_ups/multiple_videos_to_frames_popup.py +10 -11
- simba/ui/pop_ups/single_video_to_frames_popup.py +10 -10
- simba/ui/pop_ups/video_processing_pop_up.py +149 -145
- simba/ui/tkinter_functions.py +7 -1
- simba/video_processors/clahe_ui.py +65 -22
- simba/video_processors/egocentric_video_rotator.py +6 -7
- simba/video_processors/video_processing.py +73 -151
- simba/video_processors/videos_to_frames.py +3 -2
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/METADATA +1 -1
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/RECORD +22 -20
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/top_level.txt +0 -0
simba/ui/tkinter_functions.py
CHANGED
|
@@ -156,6 +156,7 @@ class SimBAScaleBar(Frame):
|
|
|
156
156
|
def __init__(self,
|
|
157
157
|
parent: Union[Frame, Canvas, LabelFrame, Toplevel, Tk],
|
|
158
158
|
label: Optional[str] = None,
|
|
159
|
+
label_width: Optional[int] = None,
|
|
159
160
|
orient: Literal['horizontal', 'vertical'] = HORIZONTAL,
|
|
160
161
|
length: int = 200,
|
|
161
162
|
value: Optional[int] = 95,
|
|
@@ -194,7 +195,7 @@ class SimBAScaleBar(Frame):
|
|
|
194
195
|
showvalue=showvalue)
|
|
195
196
|
|
|
196
197
|
if label is not None:
|
|
197
|
-
self.lbl = SimBALabel(parent=self, txt=label, font=lbl_font, txt_clr=label_clr)
|
|
198
|
+
self.lbl = SimBALabel(parent=self, txt=label, font=lbl_font, txt_clr=label_clr, width=label_width)
|
|
198
199
|
self.lbl.grid(row=0, column=1, sticky=SW)
|
|
199
200
|
|
|
200
201
|
self.scale.grid(row=0, column=2, sticky=NW)
|
|
@@ -207,6 +208,11 @@ class SimBAScaleBar(Frame):
|
|
|
207
208
|
def get_value(self) -> Union[int, float]:
|
|
208
209
|
return self.scale.get()
|
|
209
210
|
|
|
211
|
+
def get(self) -> Union[int, float]:
|
|
212
|
+
## Alternative for ``get_value`` for legacy reasons.
|
|
213
|
+
return self.scale.get()
|
|
214
|
+
|
|
215
|
+
|
|
210
216
|
|
|
211
217
|
|
|
212
218
|
class Entry_Box(Frame):
|
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
2
4
|
from typing import Tuple, Union
|
|
3
5
|
|
|
4
6
|
import cv2
|
|
@@ -32,29 +34,66 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
|
|
|
32
34
|
"""
|
|
33
35
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
34
36
|
|
|
37
|
+
callback_lock = threading.Lock()
|
|
38
|
+
last_update_time = [0]
|
|
39
|
+
update_delay = 0.05
|
|
40
|
+
|
|
35
41
|
def _get_trackbar_values(v):
|
|
36
42
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
if
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
43
|
+
nonlocal callback_lock, last_update_time, update_delay
|
|
44
|
+
current_time = time.time()
|
|
45
|
+
if current_time - last_update_time[0] < update_delay:
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
if not callback_lock.acquire(blocking=False):
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
if cv2.getWindowProperty(WIN_NAME, cv2.WND_PROP_VISIBLE) < 1:
|
|
53
|
+
return
|
|
54
|
+
try:
|
|
55
|
+
clip_limit = cv2.getTrackbarPos(CLIP_LIMIT, WIN_NAME) / 10.0
|
|
56
|
+
tile_size = cv2.getTrackbarPos(TILE_SIZE, WIN_NAME)
|
|
57
|
+
if tile_size % 2 == 0: tile_size += 1
|
|
58
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(tile_size, tile_size))
|
|
59
|
+
img_clahe = clahe.apply(original_img)
|
|
60
|
+
cv2.putText(img_clahe, txt,(TextOptions.BORDER_BUFFER_X.value, TextOptions.BORDER_BUFFER_Y.value + y_spacer), TextOptions.FONT.value, font_size, (255, 255, 255), 3)
|
|
61
|
+
cv2.imshow(WIN_NAME, img_clahe)
|
|
62
|
+
cv2.waitKey(1)
|
|
63
|
+
last_update_time[0] = current_time
|
|
64
|
+
except cv2.error:
|
|
65
|
+
pass
|
|
66
|
+
finally:
|
|
67
|
+
callback_lock.release()
|
|
45
68
|
|
|
46
69
|
def _change_img(v):
|
|
47
70
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
if
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
71
|
+
current_time = time.time()
|
|
72
|
+
if current_time - last_update_time[0] < update_delay:
|
|
73
|
+
return
|
|
74
|
+
|
|
75
|
+
if not callback_lock.acquire(blocking=False):
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
if cv2.getWindowProperty(WIN_NAME, cv2.WND_PROP_VISIBLE) < 1:
|
|
80
|
+
return
|
|
81
|
+
try:
|
|
82
|
+
new_frm_id = cv2.getTrackbarPos(SELECT_VIDEO_FRAME, WIN_NAME)
|
|
83
|
+
original_img = read_frm_of_video(video_path=data, frame_index=new_frm_id, greyscale=True)
|
|
84
|
+
clip_limit = cv2.getTrackbarPos(CLIP_LIMIT, WIN_NAME) / 10.0
|
|
85
|
+
tile_size = cv2.getTrackbarPos(TILE_SIZE, WIN_NAME)
|
|
86
|
+
if tile_size % 2 == 0: tile_size += 1
|
|
87
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(tile_size, tile_size))
|
|
88
|
+
img_clahe = clahe.apply(original_img)
|
|
89
|
+
cv2.putText(img_clahe, txt,(TextOptions.BORDER_BUFFER_X.value, TextOptions.BORDER_BUFFER_Y.value + y_spacer), TextOptions.FONT.value, font_size, (255, 255, 255), 3)
|
|
90
|
+
cv2.imshow(WIN_NAME, img_clahe)
|
|
91
|
+
cv2.waitKey(1)
|
|
92
|
+
last_update_time[0] = current_time
|
|
93
|
+
except cv2.error:
|
|
94
|
+
pass
|
|
95
|
+
finally:
|
|
96
|
+
callback_lock.release()
|
|
58
97
|
|
|
59
98
|
check_instance(source=interactive_clahe_ui.__name__, instance=data, accepted_types=(np.ndarray, str))
|
|
60
99
|
if isinstance(data, str):
|
|
@@ -80,14 +119,18 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
|
|
|
80
119
|
break
|
|
81
120
|
k = cv2.waitKey(1) & 0xFF
|
|
82
121
|
if k == 27:
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
122
|
+
try:
|
|
123
|
+
clip_limit = cv2.getTrackbarPos(CLIP_LIMIT, WIN_NAME) / 10.0
|
|
124
|
+
tile_size = cv2.getTrackbarPos(TILE_SIZE, WIN_NAME)
|
|
125
|
+
if tile_size % 2 == 0: tile_size += 1
|
|
126
|
+
except cv2.error:
|
|
127
|
+
clip_limit = 1.0
|
|
128
|
+
tile_size = 8
|
|
86
129
|
cv2.destroyAllWindows()
|
|
87
130
|
return clip_limit, tile_size
|
|
88
131
|
|
|
89
132
|
|
|
90
|
-
#interactive_clahe_ui(data=r"
|
|
133
|
+
#interactive_clahe_ui(data=r"C:\troubleshooting\cue_light\t1\project_folder\videos\2025-05-21 16-10-06_cropped.mp4")
|
|
91
134
|
|
|
92
135
|
# # Function to update CLAHE
|
|
93
136
|
# def update_clahe(x):
|
|
@@ -35,7 +35,6 @@ def egocentric_video_aligner(frm_range: np.ndarray,
|
|
|
35
35
|
gpu: bool = True):
|
|
36
36
|
|
|
37
37
|
video_meta = get_video_meta_data(video_path=video_path)
|
|
38
|
-
|
|
39
38
|
batch, frm_range = frm_range[0], frm_range[1]
|
|
40
39
|
save_path = os.path.join(temp_dir, f'{batch}.mp4')
|
|
41
40
|
fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}')
|
|
@@ -57,7 +56,7 @@ def egocentric_video_aligner(frm_range: np.ndarray,
|
|
|
57
56
|
final_frame = cv2.warpAffine(rotated_frame, m_translations[img_counter],(video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
58
57
|
writer.write(final_frame)
|
|
59
58
|
if verbose:
|
|
60
|
-
print(f'Creating frame {frame_id} ({video_name}, CPU core: {batch + 1}).')
|
|
59
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
61
60
|
img_counter+=1
|
|
62
61
|
else:
|
|
63
62
|
cap = cv2.VideoCapture(video_path)
|
|
@@ -67,7 +66,7 @@ def egocentric_video_aligner(frm_range: np.ndarray,
|
|
|
67
66
|
final_frame = cv2.warpAffine(rotated_frame, m_translations[frm_idx], (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
68
67
|
writer.write(final_frame)
|
|
69
68
|
if verbose:
|
|
70
|
-
print(f'Creating frame {frm_id} ({video_name}, CPU core: {batch + 1}).')
|
|
69
|
+
print(f'Creating frame {frm_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
71
70
|
writer.release()
|
|
72
71
|
return batch + 1
|
|
73
72
|
|
|
@@ -176,9 +175,9 @@ class EgocentricVideoRotator():
|
|
|
176
175
|
stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
|
|
177
176
|
|
|
178
177
|
if __name__ == "__main__":
|
|
179
|
-
DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
|
|
180
|
-
VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
|
|
181
|
-
SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
178
|
+
DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
|
|
179
|
+
VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
|
|
180
|
+
SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
182
181
|
ANCHOR_LOC = np.array([250, 250])
|
|
183
182
|
|
|
184
183
|
df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
@@ -186,5 +185,5 @@ if __name__ == "__main__":
|
|
|
186
185
|
data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
187
186
|
|
|
188
187
|
_, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
189
|
-
rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True)
|
|
188
|
+
rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
|
|
190
189
|
rotater.run()
|
|
@@ -166,7 +166,7 @@ def convert_to_jpeg(path: Union[str, os.PathLike, List[Union[str, os.PathLike]]]
|
|
|
166
166
|
dir, file_name, _ = get_fn_ext(filepath=file_path)
|
|
167
167
|
save_path = os.path.join(save_dir, f'{file_name}.jpeg')
|
|
168
168
|
if verbose:
|
|
169
|
-
print(f"Converting file {file_cnt+1}/{len(file_paths)} ...")
|
|
169
|
+
print(f"Converting file {file_cnt+1}/{len(file_paths)} ({file_name}) ...")
|
|
170
170
|
img = Image.open(file_path)
|
|
171
171
|
if img.mode in ('RGBA', 'LA'): img = img.convert('RGB')
|
|
172
172
|
img.save(save_path, format='JPEG', quality=quality)
|
|
@@ -218,7 +218,7 @@ def convert_to_bmp(path: Union[str, os.PathLike, List[Union[str, os.PathLike]]],
|
|
|
218
218
|
dir, file_name, _ = get_fn_ext(filepath=file_path)
|
|
219
219
|
save_path = os.path.join(save_dir, f'{file_name}.bmp')
|
|
220
220
|
if verbose:
|
|
221
|
-
print(f"Converting file {file_cnt + 1}/{len(file_paths)} ...")
|
|
221
|
+
print(f"Converting file {file_cnt + 1}/{len(file_paths)} ({file_name}) ...")
|
|
222
222
|
img = Image.open(file_path)
|
|
223
223
|
if img.mode in ('RGBA', 'LA'): img = img.convert('RGB')
|
|
224
224
|
img.save(save_path, format='BMP')
|
|
@@ -2578,14 +2578,13 @@ def create_blank_video(path: Union[str, os.PathLike],
|
|
|
2578
2578
|
)
|
|
2579
2579
|
|
|
2580
2580
|
|
|
2581
|
-
def horizontal_video_concatenator(
|
|
2582
|
-
|
|
2583
|
-
|
|
2584
|
-
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
|
|
2588
|
-
) -> None:
|
|
2581
|
+
def horizontal_video_concatenator(video_paths: List[Union[str, os.PathLike]],
|
|
2582
|
+
save_path: Union[str, os.PathLike],
|
|
2583
|
+
height_px: Optional[Union[int, str]] = None,
|
|
2584
|
+
height_idx: Optional[Union[int, str]] = None,
|
|
2585
|
+
gpu: bool = False,
|
|
2586
|
+
quality: int = 23,
|
|
2587
|
+
verbose: Optional[bool] = True) -> None:
|
|
2589
2588
|
"""
|
|
2590
2589
|
Concatenates multiple videos horizontally.
|
|
2591
2590
|
|
|
@@ -2596,10 +2595,14 @@ def horizontal_video_concatenator(
|
|
|
2596
2595
|
.. seealso::
|
|
2597
2596
|
:func:`simba.video_processors.video_processing.vertical_video_concatenator`
|
|
2598
2597
|
|
|
2598
|
+
.. note::
|
|
2599
|
+
Set the height of each video in the moseic by passing **EITHER** ``height_px`` or ``height_idx``.
|
|
2600
|
+
|
|
2599
2601
|
:param List[Union[str, os.PathLike]] video_paths: List of input video file paths.
|
|
2600
2602
|
:param Union[str, os.PathLike] save_path: File path to save the concatenated video.
|
|
2601
2603
|
:param Optional[int] height_px: Height of the output video in pixels.
|
|
2602
2604
|
:param Optional[int] height_idx: Index of the video to use for determining Height.
|
|
2605
|
+
:param int quality: Video quality (CRF value). Lower values = higher quality. Range 0-52. Default 23.
|
|
2603
2606
|
:param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False).
|
|
2604
2607
|
:param Optional[bool] verbose:Whether to print progress messages (default: True).
|
|
2605
2608
|
|
|
@@ -2609,69 +2612,41 @@ def horizontal_video_concatenator(
|
|
|
2609
2612
|
"""
|
|
2610
2613
|
check_ffmpeg_available()
|
|
2611
2614
|
if gpu and not check_nvidea_gpu_available():
|
|
2612
|
-
raise FFMPEGCodecGPUError(
|
|
2613
|
-
msg="NVIDEA GPU not available (as evaluated by nvidea-smi returning None)",
|
|
2614
|
-
source=horizontal_video_concatenator.__name__,
|
|
2615
|
-
)
|
|
2615
|
+
raise FFMPEGCodecGPUError(msg="NVIDEA GPU not available (as evaluated by nvidea-smi returning None)", source=horizontal_video_concatenator.__name__)
|
|
2616
2616
|
timer = SimbaTimer(start=True)
|
|
2617
|
-
check_valid_lst(
|
|
2618
|
-
|
|
2619
|
-
)
|
|
2620
|
-
|
|
2621
|
-
|
|
2622
|
-
)
|
|
2623
|
-
video_meta_data = [
|
|
2624
|
-
get_video_meta_data(video_path=video_path) for video_path in video_paths
|
|
2625
|
-
]
|
|
2626
|
-
if ((height_px is None) and (height_idx is None)) or (
|
|
2627
|
-
(height_px is not None) and (height_idx is not None)
|
|
2628
|
-
):
|
|
2629
|
-
raise InvalidInputError(
|
|
2630
|
-
msg="Provide a height_px OR height_idx",
|
|
2631
|
-
source=horizontal_video_concatenator.__name__,
|
|
2632
|
-
)
|
|
2617
|
+
check_valid_lst(data=video_paths, source=horizontal_video_concatenator.__name__, min_len=2)
|
|
2618
|
+
check_if_dir_exists(in_dir=os.path.dirname(save_path), source=horizontal_video_concatenator.__name__)
|
|
2619
|
+
video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths]
|
|
2620
|
+
if ((height_px is None) and (height_idx is None)) or ((height_px is not None) and (height_idx is not None)):
|
|
2621
|
+
raise InvalidInputError(msg="Provide a height_px OR height_idx",source=horizontal_video_concatenator.__name__)
|
|
2633
2622
|
if height_idx is not None:
|
|
2634
|
-
check_int(
|
|
2635
|
-
name=f"{horizontal_video_concatenator.__name__} height",
|
|
2636
|
-
value=height_idx,
|
|
2637
|
-
min_value=0,
|
|
2638
|
-
max_value=len(video_paths) - 1,
|
|
2639
|
-
)
|
|
2623
|
+
check_int(name=f"{horizontal_video_concatenator.__name__} height", value=height_idx, min_value=0, max_value=len(video_paths) - 1)
|
|
2640
2624
|
height = int(video_meta_data[height_idx]["height"])
|
|
2641
2625
|
else:
|
|
2642
|
-
check_int(
|
|
2643
|
-
name=f"{horizontal_video_concatenator.__name__} height",
|
|
2644
|
-
value=height_px,
|
|
2645
|
-
min_value=1,
|
|
2646
|
-
)
|
|
2626
|
+
check_int(name=f"{horizontal_video_concatenator.__name__} height", value=height_px,min_value=1)
|
|
2647
2627
|
height = int(height_px)
|
|
2648
2628
|
video_path_str = " ".join([f'-i "{path}"' for path in video_paths])
|
|
2629
|
+
quality = 23 if not check_int(name='quality', value=quality, min_value=0, max_value=52, raise_error=False)[0] else int(quality)
|
|
2630
|
+
quality_cmd = f'-crf {quality}' if not gpu else f'-rc vbr -cq {quality}'
|
|
2649
2631
|
codec = "h264_nvenc" if gpu else "libvpx-vp9"
|
|
2650
|
-
filter_complex = ";".join(
|
|
2651
|
-
[f"[{idx}:v]scale=-1:{height}[v{idx}]" for idx in range(len(video_paths))]
|
|
2652
|
-
)
|
|
2632
|
+
filter_complex = ";".join([f"[{idx}:v]scale=-1:{height}[v{idx}]" for idx in range(len(video_paths))])
|
|
2653
2633
|
filter_complex += f";{''.join([f'[v{idx}]' for idx in range(len(video_paths))])}hstack=inputs={len(video_paths)}[v]"
|
|
2654
2634
|
if verbose:
|
|
2655
|
-
print(
|
|
2656
|
-
|
|
2657
|
-
)
|
|
2658
|
-
cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} -loglevel error -stats "{save_path}" -y'
|
|
2635
|
+
print(f"Concatenating {len(video_paths)} videos horizontally with a {height} pixel height... ")
|
|
2636
|
+
cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} {quality_cmd} -loglevel error -stats "{save_path}" -y'
|
|
2659
2637
|
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
|
|
2660
2638
|
timer.stop_timer()
|
|
2661
2639
|
if verbose:
|
|
2662
|
-
print(
|
|
2663
|
-
f"Horizontal concatenation complete, saved at {save_path} (elapsed time: {timer.elapsed_time_str}s.)"
|
|
2664
|
-
)
|
|
2640
|
+
print(f"Horizontal concatenation complete, saved at {save_path} (elapsed time: {timer.elapsed_time_str}s.)")
|
|
2665
2641
|
|
|
2666
2642
|
|
|
2667
|
-
def vertical_video_concatenator(
|
|
2668
|
-
|
|
2669
|
-
|
|
2670
|
-
|
|
2671
|
-
|
|
2672
|
-
|
|
2673
|
-
|
|
2674
|
-
) -> None:
|
|
2643
|
+
def vertical_video_concatenator(video_paths: List[Union[str, os.PathLike]],
|
|
2644
|
+
save_path: Union[str, os.PathLike],
|
|
2645
|
+
width_px: Optional[int] = None,
|
|
2646
|
+
width_idx: Optional[int] = None,
|
|
2647
|
+
gpu: bool = False,
|
|
2648
|
+
quality: int = 23,
|
|
2649
|
+
verbose: bool = True) -> None:
|
|
2675
2650
|
"""
|
|
2676
2651
|
Concatenates multiple videos vertically.
|
|
2677
2652
|
|
|
@@ -2738,22 +2713,18 @@ def vertical_video_concatenator(
|
|
|
2738
2713
|
width = int(width_px)
|
|
2739
2714
|
video_path_str = " ".join([f'-i "{path}"' for path in video_paths])
|
|
2740
2715
|
codec = "h264_nvenc" if gpu else "libvpx-vp9"
|
|
2741
|
-
|
|
2742
|
-
|
|
2743
|
-
)
|
|
2716
|
+
quality = 23 if not check_int(name='quality', value=quality, min_value=0, max_value=52, raise_error=False)[0] else int(quality)
|
|
2717
|
+
quality_cmd = f'-crf {quality}' if not gpu else f'-rc vbr -cq {quality}'
|
|
2718
|
+
filter_complex = ";".join([f"[{idx}:v]scale={width}:-1[v{idx}]" for idx in range(len(video_paths))])
|
|
2744
2719
|
filter_complex += f";{''.join([f'[v{idx}]' for idx in range(len(video_paths))])}"
|
|
2745
2720
|
filter_complex += f"vstack=inputs={len(video_paths)}[v]"
|
|
2746
2721
|
if verbose:
|
|
2747
|
-
print(
|
|
2748
|
-
|
|
2749
|
-
)
|
|
2750
|
-
cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} -loglevel error -stats "{save_path}" -y'
|
|
2722
|
+
print(f"Concatenating {len(video_paths)} videos vertically with a {width} pixel width...")
|
|
2723
|
+
cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} {quality_cmd} "{save_path}" -loglevel error -stats -y'
|
|
2751
2724
|
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
|
|
2752
2725
|
timer.stop_timer()
|
|
2753
2726
|
if verbose:
|
|
2754
|
-
print(
|
|
2755
|
-
f"Vertical concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)"
|
|
2756
|
-
)
|
|
2727
|
+
print(f"Vertical concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)")
|
|
2757
2728
|
|
|
2758
2729
|
|
|
2759
2730
|
def mosaic_concatenator(
|
|
@@ -2763,8 +2734,9 @@ def mosaic_concatenator(
|
|
|
2763
2734
|
width_px: Optional[Union[int, str]] = None,
|
|
2764
2735
|
height_idx: Optional[Union[int, str]] = None,
|
|
2765
2736
|
height_px: Optional[Union[int, str]] = None,
|
|
2766
|
-
gpu:
|
|
2767
|
-
|
|
2737
|
+
gpu: bool = False,
|
|
2738
|
+
quality: int = 23,
|
|
2739
|
+
verbose: bool = True,
|
|
2768
2740
|
uneven_fill_color: Optional[str] = "black",
|
|
2769
2741
|
) -> None:
|
|
2770
2742
|
"""
|
|
@@ -2789,6 +2761,7 @@ def mosaic_concatenator(
|
|
|
2789
2761
|
:param Optional[int] height_px: Height of the output video panels in pixels.
|
|
2790
2762
|
:param Optional[int] height_idx: Height of the video to use for determining width.
|
|
2791
2763
|
:param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False).
|
|
2764
|
+
:param int quality: Video quality (CRF value). Lower values = higher quality. Range 0-52. Default 23.
|
|
2792
2765
|
:param Optional[bool] verbose: Whether to print progress messages (default: True).
|
|
2793
2766
|
|
|
2794
2767
|
:example:
|
|
@@ -2799,49 +2772,24 @@ def mosaic_concatenator(
|
|
|
2799
2772
|
|
|
2800
2773
|
check_ffmpeg_available()
|
|
2801
2774
|
if gpu and not check_nvidea_gpu_available():
|
|
2802
|
-
raise FFMPEGCodecGPUError(
|
|
2803
|
-
msg="NVIDIA GPU not available", source=mosaic_concatenator.__name__
|
|
2804
|
-
)
|
|
2775
|
+
raise FFMPEGCodecGPUError(msg="NVIDIA GPU not available", source=mosaic_concatenator.__name__)
|
|
2805
2776
|
timer = SimbaTimer(start=True)
|
|
2806
2777
|
dt = datetime.now().strftime("%Y%m%d%H%M%S")
|
|
2807
|
-
check_valid_lst(
|
|
2808
|
-
|
|
2809
|
-
source=f"{mosaic_concatenator.__name__} video_paths",
|
|
2810
|
-
min_len=3,
|
|
2811
|
-
)
|
|
2812
|
-
video_meta_data = [
|
|
2813
|
-
get_video_meta_data(video_path=video_path) for video_path in video_paths
|
|
2814
|
-
]
|
|
2778
|
+
check_valid_lst(data=video_paths, source=f"{mosaic_concatenator.__name__} video_paths", min_len=3)
|
|
2779
|
+
video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths]
|
|
2815
2780
|
max_video_length = max([x["video_length_s"] for x in video_meta_data])
|
|
2816
|
-
|
|
2817
|
-
|
|
2818
|
-
|
|
2819
|
-
|
|
2820
|
-
|
|
2821
|
-
)
|
|
2822
|
-
if ((height_px is None) and (height_idx is None)) or (
|
|
2823
|
-
(height_px is not None) and (height_idx is not None)
|
|
2824
|
-
):
|
|
2825
|
-
raise InvalidInputError(
|
|
2826
|
-
msg="Provide a height_px OR height_idx", source=mosaic_concatenator.__name__
|
|
2827
|
-
)
|
|
2781
|
+
quality = 23 if not check_int(name='quality', value=quality, min_value=0, max_value=52, raise_error=False)[0] else int(quality)
|
|
2782
|
+
if ((width_px is None) and (width_idx is None)) or ((width_px is not None) and (width_idx is not None)):
|
|
2783
|
+
raise InvalidInputError(msg="Provide a width_px OR width_idx", source=mosaic_concatenator.__name__)
|
|
2784
|
+
if ((height_px is None) and (height_idx is None)) or ((height_px is not None) and (height_idx is not None)):
|
|
2785
|
+
raise InvalidInputError(msg="Provide a height_px OR height_idx", source=mosaic_concatenator.__name__)
|
|
2828
2786
|
if width_idx is not None:
|
|
2829
|
-
check_int(
|
|
2830
|
-
name=f"{vertical_video_concatenator.__name__} width index",
|
|
2831
|
-
value=width_idx,
|
|
2832
|
-
min_value=1,
|
|
2833
|
-
max_value=len(video_paths) - 1,
|
|
2834
|
-
)
|
|
2787
|
+
check_int(name=f"{vertical_video_concatenator.__name__} width index", value=width_idx, min_value=1, max_value=len(video_paths) - 1)
|
|
2835
2788
|
width = int(video_meta_data[width_idx]["width"])
|
|
2836
2789
|
else:
|
|
2837
2790
|
width = width_px
|
|
2838
2791
|
if height_idx is not None:
|
|
2839
|
-
check_int(
|
|
2840
|
-
name=f"{vertical_video_concatenator.__name__} height index",
|
|
2841
|
-
value=width_idx,
|
|
2842
|
-
min_value=1,
|
|
2843
|
-
max_value=len(video_paths) - 1,
|
|
2844
|
-
)
|
|
2792
|
+
check_int(name=f"{vertical_video_concatenator.__name__} height index", value=width_idx, min_value=1, max_value=len(video_paths) - 1)
|
|
2845
2793
|
height = int(video_meta_data[width_idx]["height"])
|
|
2846
2794
|
else:
|
|
2847
2795
|
height = height_px
|
|
@@ -2851,73 +2799,40 @@ def mosaic_concatenator(
|
|
|
2851
2799
|
os.makedirs(temp_dir)
|
|
2852
2800
|
if not (len(video_paths) % 2) == 0:
|
|
2853
2801
|
blank_path = os.path.join(temp_dir, f"{dt}.mp4")
|
|
2854
|
-
create_blank_video(
|
|
2855
|
-
path=blank_path,
|
|
2856
|
-
length=max_video_length,
|
|
2857
|
-
width=width,
|
|
2858
|
-
height=height,
|
|
2859
|
-
gpu=gpu,
|
|
2860
|
-
verbose=verbose,
|
|
2861
|
-
color=uneven_fill_color,
|
|
2862
|
-
)
|
|
2802
|
+
create_blank_video(path=blank_path, length=max_video_length, width=width, height=height, gpu=gpu, verbose=verbose, color=uneven_fill_color)
|
|
2863
2803
|
video_paths.append(blank_path)
|
|
2864
|
-
upper_videos, lower_videos = (
|
|
2865
|
-
video_paths[: len(video_paths) // 2],
|
|
2866
|
-
video_paths[len(video_paths) // 2 :],
|
|
2867
|
-
)
|
|
2804
|
+
upper_videos, lower_videos = (video_paths[: len(video_paths) // 2], video_paths[len(video_paths) // 2 :])
|
|
2868
2805
|
if verbose:
|
|
2869
2806
|
print("Creating upper mosaic... (Step 1/3)")
|
|
2870
2807
|
if len(upper_videos) > 1:
|
|
2871
2808
|
upper_path = os.path.join(temp_dir, "upper.mp4")
|
|
2872
|
-
horizontal_video_concatenator(
|
|
2873
|
-
video_paths=upper_videos,
|
|
2874
|
-
save_path=upper_path,
|
|
2875
|
-
gpu=gpu,
|
|
2876
|
-
height_px=height,
|
|
2877
|
-
verbose=verbose,
|
|
2878
|
-
)
|
|
2809
|
+
horizontal_video_concatenator(video_paths=upper_videos, save_path=upper_path, gpu=gpu, quality=quality, height_px=height, verbose=verbose)
|
|
2879
2810
|
else:
|
|
2880
2811
|
upper_path = upper_videos[0]
|
|
2881
2812
|
if verbose:
|
|
2882
2813
|
print("Creating lower mosaic... (Step 2/3)")
|
|
2883
2814
|
if len(lower_videos) > 1:
|
|
2884
2815
|
lower_path = os.path.join(temp_dir, "lower.mp4")
|
|
2885
|
-
horizontal_video_concatenator(
|
|
2886
|
-
video_paths=lower_videos,
|
|
2887
|
-
save_path=lower_path,
|
|
2888
|
-
gpu=gpu,
|
|
2889
|
-
height_px=height,
|
|
2890
|
-
verbose=verbose,
|
|
2891
|
-
)
|
|
2816
|
+
horizontal_video_concatenator(video_paths=lower_videos, save_path=lower_path, gpu=gpu, quality=quality, height_px=height, verbose=verbose)
|
|
2892
2817
|
else:
|
|
2893
2818
|
lower_path = lower_videos[0]
|
|
2894
|
-
panels_meta = [
|
|
2895
|
-
get_video_meta_data(video_path=video_path)
|
|
2896
|
-
for video_path in [lower_path, upper_path]
|
|
2897
|
-
]
|
|
2819
|
+
panels_meta = [get_video_meta_data(video_path=video_path) for video_path in [lower_path, upper_path]]
|
|
2898
2820
|
if verbose:
|
|
2899
|
-
print("Joining upper and lower mosaic... (Step
|
|
2900
|
-
vertical_video_concatenator(
|
|
2901
|
-
video_paths=[upper_path, lower_path],
|
|
2902
|
-
save_path=save_path,
|
|
2903
|
-
verbose=verbose,
|
|
2904
|
-
gpu=gpu,
|
|
2905
|
-
width_px=max([x["width"] for x in panels_meta]),
|
|
2906
|
-
)
|
|
2821
|
+
print("Joining upper and lower mosaic... (Step 3/3)")
|
|
2822
|
+
vertical_video_concatenator(video_paths=[upper_path, lower_path], save_path=save_path, verbose=verbose, gpu=gpu, quality=quality, width_px=max([x["width"] for x in panels_meta]))
|
|
2907
2823
|
timer.stop_timer()
|
|
2908
2824
|
shutil.rmtree(temp_dir)
|
|
2909
2825
|
if verbose:
|
|
2910
|
-
print(
|
|
2911
|
-
f"Mosaic concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)"
|
|
2912
|
-
)
|
|
2826
|
+
print(f"Mosaic concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)")
|
|
2913
2827
|
|
|
2914
2828
|
|
|
2915
2829
|
def mixed_mosaic_concatenator(
|
|
2916
2830
|
video_paths: List[Union[str, os.PathLike]],
|
|
2917
2831
|
save_path: Union[str, os.PathLike],
|
|
2918
|
-
gpu:
|
|
2919
|
-
|
|
2920
|
-
|
|
2832
|
+
gpu: bool = False,
|
|
2833
|
+
quality: int = 23,
|
|
2834
|
+
verbose: bool = True,
|
|
2835
|
+
uneven_fill_color: str = "black",
|
|
2921
2836
|
) -> None:
|
|
2922
2837
|
"""
|
|
2923
2838
|
Create a mixed mosaic video by concatenating multiple input videos in a mosaic layout of various sizes.
|
|
@@ -2939,6 +2854,7 @@ def mixed_mosaic_concatenator(
|
|
|
2939
2854
|
:param List[Union[str, os.PathLike]] video_paths: List of input video file paths.
|
|
2940
2855
|
:param Union[str, os.PathLike] save_path: File path to save the concatenated video.
|
|
2941
2856
|
:param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False).
|
|
2857
|
+
:param int quality: Video quality (CRF value). Lower values = higher quality. Range 0-52. Default 23.
|
|
2942
2858
|
:param Optional[bool] verbose: Whether to print progress messages (default: True).
|
|
2943
2859
|
|
|
2944
2860
|
:example:
|
|
@@ -2962,6 +2878,8 @@ def mixed_mosaic_concatenator(
|
|
|
2962
2878
|
print("Creating mixed mosaic video... ")
|
|
2963
2879
|
temp_dir = os.path.join(os.path.dirname(large_mosaic_path), f"temp_{dt}")
|
|
2964
2880
|
os.makedirs(temp_dir)
|
|
2881
|
+
quality = 23 if not check_int(name='quality', value=quality, min_value=0, max_value=52, raise_error=False)[0] else int(quality)
|
|
2882
|
+
|
|
2965
2883
|
if not (len(video_paths) % 2) == 0:
|
|
2966
2884
|
blank_path = os.path.join(temp_dir, f"{dt}.mp4")
|
|
2967
2885
|
create_blank_video(
|
|
@@ -2986,6 +2904,7 @@ def mixed_mosaic_concatenator(
|
|
|
2986
2904
|
video_paths=upper_videos,
|
|
2987
2905
|
save_path=upper_path,
|
|
2988
2906
|
gpu=gpu,
|
|
2907
|
+
quality=quality,
|
|
2989
2908
|
height_px=mosaic_height,
|
|
2990
2909
|
verbose=verbose,
|
|
2991
2910
|
)
|
|
@@ -2999,6 +2918,7 @@ def mixed_mosaic_concatenator(
|
|
|
2999
2918
|
video_paths=lower_videos,
|
|
3000
2919
|
save_path=lower_path,
|
|
3001
2920
|
gpu=gpu,
|
|
2921
|
+
quality=quality,
|
|
3002
2922
|
verbose=verbose,
|
|
3003
2923
|
height_px=mosaic_height,
|
|
3004
2924
|
)
|
|
@@ -3016,6 +2936,7 @@ def mixed_mosaic_concatenator(
|
|
|
3016
2936
|
width_px=max([x["width"] for x in panels_meta]),
|
|
3017
2937
|
save_path=mosaic_path,
|
|
3018
2938
|
gpu=gpu,
|
|
2939
|
+
quality=quality,
|
|
3019
2940
|
verbose=verbose,
|
|
3020
2941
|
)
|
|
3021
2942
|
if verbose:
|
|
@@ -3025,6 +2946,7 @@ def mixed_mosaic_concatenator(
|
|
|
3025
2946
|
height_idx=0,
|
|
3026
2947
|
save_path=save_path,
|
|
3027
2948
|
gpu=gpu,
|
|
2949
|
+
quality=quality,
|
|
3028
2950
|
)
|
|
3029
2951
|
timer.stop_timer()
|
|
3030
2952
|
shutil.rmtree(temp_dir)
|
|
@@ -16,6 +16,7 @@ from simba.utils.checks import (check_if_dir_exists, check_int, check_str,
|
|
|
16
16
|
from simba.utils.printing import SimbaTimer, stdout_success
|
|
17
17
|
from simba.utils.read_write import (find_core_cnt, get_video_meta_data,
|
|
18
18
|
read_frm_of_video)
|
|
19
|
+
from simba.utils.data import terminate_cpu_pool
|
|
19
20
|
|
|
20
21
|
JPEG, PNG, WEBP = 'jpeg', 'png', 'webp'
|
|
21
22
|
|
|
@@ -117,8 +118,8 @@ def video_to_frames(video_path: Union[str, os.PathLike],
|
|
|
117
118
|
for cnt, batch_id in enumerate(pool.imap(constants, frm_ids, chunksize=1)):
|
|
118
119
|
if verbose:
|
|
119
120
|
print(f'Video frame batch {batch_id} (of {core_cnt}) complete...')
|
|
120
|
-
|
|
121
|
-
pool
|
|
121
|
+
|
|
122
|
+
terminate_cpu_pool(pool=pool, force=True)
|
|
122
123
|
timer.stop_timer()
|
|
123
124
|
if verbose:
|
|
124
125
|
stdout_success(msg=f'All frames for video {video_path} saved in {save_dir}', elapsed_time=timer.elapsed_time_str)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: simba-uw-tf-dev
|
|
3
|
-
Version: 4.
|
|
3
|
+
Version: 4.6.2
|
|
4
4
|
Summary: Toolkit for computer classification and analysis of behaviors in experimental animals
|
|
5
5
|
Home-page: https://github.com/sgoldenlab/simba
|
|
6
6
|
Author: Simon Nilsson, Jia Jie Choong, Sophia Hwang
|