simba-uw-tf-dev 4.7.3__py3-none-any.whl → 4.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Binary file
Binary file
Binary file
Binary file
@@ -18,6 +18,7 @@ from collections import ChainMap
18
18
  import cv2
19
19
  import pandas as pd
20
20
  from numba import float64, int64, jit, njit, prange, uint8
21
+ from PIL import Image, ImageDraw, ImageFont
21
22
  from shapely.geometry import Polygon
22
23
  from skimage.metrics import structural_similarity
23
24
 
@@ -30,11 +31,12 @@ from simba.utils.checks import (check_file_exist_and_readable, check_float,
30
31
  from simba.utils.data import terminate_cpu_pool
31
32
  from simba.utils.enums import Defaults, Formats, GeometryEnum, Options
32
33
  from simba.utils.errors import ArrayError, FrameRangeError, InvalidInputError
34
+ from simba.utils.lookups import get_fonts
33
35
  from simba.utils.printing import SimbaTimer, stdout_success
34
36
  from simba.utils.read_write import (find_core_cnt,
35
37
  find_files_of_filetypes_in_directory,
36
38
  get_fn_ext, get_video_meta_data,
37
- read_frm_of_video)
39
+ read_frm_of_video, seconds_to_timestamp)
38
40
 
39
41
 
40
42
  class ImageMixin(object):
@@ -2052,18 +2054,141 @@ class ImageMixin(object):
2052
2054
 
2053
2055
  return denoised_img
2054
2056
 
2057
+ @staticmethod
2058
+ def get_timelapse_img(video_path: Union[str, os.PathLike],
2059
+ frame_cnt: int = 25,
2060
+ size: Optional[int] = None,
2061
+ crop_ratio: int = 50) -> np.ndarray:
2055
2062
 
2063
+ """
2064
+ Creates timelapse image from video.
2056
2065
 
2066
+ .. image:: _static/img/get_timelapse_img.png
2067
+ :width: 600
2068
+ :align: center
2057
2069
 
2070
+ :param Union[str, os.PathLike] video_path: Path to the video to cerate the timelapse image from.
2071
+ :param int frame_cnt: Number of frames to grab from the video. There will be an even interval between each frame.
2072
+ :param Optional[int] size: The total width in pixels of the final timelapse image. If None, uses the video width (adjusted for crop_ratio).
2073
+ :param int crop_ratio: The percent of each original video (from the left) to show.
2074
+ :return np.ndarray: The timelapse image as a numpy array
2058
2075
 
2076
+ :example:
2077
+ >>> img = ImageMixin.get_timelapse_img(video_path=r"E:\troubleshooting\mitra_emergence\project_folder\clip_test\Box1_180mISOcontrol_Females_clipped_progress_bar.mp4", size=100)
2078
+ """
2059
2079
 
2080
+ video_meta = get_video_meta_data(video_path=video_path, raise_error=True)
2081
+ frm_ids = [int(i * video_meta['frame_count'] / frame_cnt) for i in range(frame_cnt)]
2082
+ cap = cv2.VideoCapture(video_path)
2083
+ frms = [read_frm_of_video(video_path=cap, frame_index=x, use_ffmpeg=False) for x in frm_ids]
2084
+
2085
+ effective_video_width = int(video_meta['width'] * (crop_ratio / 100))
2086
+ if size is None:
2087
+ size = effective_video_width
2088
+ per_frame_width_after_crop = size / frame_cnt
2089
+ per_frame_width_before_crop = per_frame_width_after_crop / (crop_ratio / 100)
2090
+ scale_factor = per_frame_width_before_crop / frms[0].shape[1]
2091
+ scaled_frms = [cv2.resize(x, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR) for x in frms]
2092
+ if crop_ratio is not None:
2093
+ scaled_frms = [ImageMixin.segment_img_vertical(img=x, pct=crop_ratio, left=True) for x in scaled_frms]
2094
+
2095
+ return cv2.hconcat(scaled_frms)
2060
2096
 
2097
+ @staticmethod
2098
+ def create_time_ruler(width: int,
2099
+ video_path: Union[str, os.PathLike],
2100
+ height: int = 60,
2101
+ num_divisions: int = 6,
2102
+ font: str = 'Arial',
2103
+ bg_color: Tuple[int, int, int] = (255, 255, 255),
2104
+ line_color: Tuple[int, int, int] = (128, 128, 128),
2105
+ text_color: Tuple[int, int, int] = (0, 0, 0),
2106
+ padding: int = 60,
2107
+ show_time: bool = True) -> np.ndarray:
2108
+ """
2109
+ Create a horizontal ruler/scale bar with tick marks and labels.
2110
+
2111
+ .. image:: _static/img/create_time_ruler.png
2112
+ :width: 600
2113
+ :align: center
2061
2114
 
2115
+ :param int width: Width of the ruler in pixels (should match timelapse image width if one is used)
2116
+ :param Union[str, os.PathLike] video_path: Path to video file to get metadata from
2117
+ :param int height: Height of the ruler in pixels. Default 60.
2118
+ :param int num_divisions: Number of major divisions on the ruler. Default 6.
2119
+ :param str font: Font name to use for labels. Default 'Algerian'.
2120
+ :param Tuple[int, int, int] bg_color: Background color (R, G, B). Default white.
2121
+ :param Tuple[int, int, int] line_color: Color for tick marks and lines (R, G, B). Default grey.
2122
+ :param Tuple[int, int, int] text_color: Color for text labels (R, G, B). Default black.
2123
+ :param bool show_time: If True, show time labels, else show frame numbers. Default True.
2124
+ :return: Ruler image as numpy array (BGR format for OpenCV compatibility)
2125
+ :rtype: np.ndarray
2062
2126
 
2127
+ :example:
2128
+ >>> ruler = ImageMixin.create_time_ruler(width=1920, video_path='path/to/video.mp4', height=60, num_divisions=6)
2129
+ """
2063
2130
 
2064
-
2065
-
2066
-
2131
+ check_file_exist_and_readable(file_path=video_path)
2132
+ check_int(name='width', value=width, min_value=1, raise_error=True)
2133
+ check_int(name='height', value=height, min_value=1, raise_error=True)
2134
+ check_int(name='num_divisions', value=num_divisions, min_value=1, raise_error=True)
2135
+ check_int(name='padding', value=padding, min_value=0, raise_error=True)
2136
+ check_str(name='font', value=font, allow_blank=False, raise_error=True)
2137
+ check_if_valid_rgb_tuple(data=bg_color, raise_error=True, source=ImageMixin.create_time_ruler.__name__)
2138
+ check_if_valid_rgb_tuple(data=line_color, raise_error=True, source=ImageMixin.create_time_ruler.__name__)
2139
+ check_if_valid_rgb_tuple(data=text_color, raise_error=True, source=ImageMixin.create_time_ruler.__name__)
2140
+
2141
+ video_meta = get_video_meta_data(video_path=video_path, raise_error=True)
2142
+ total_width = width + (2 * padding)
2143
+
2144
+ img = Image.new('RGB', (total_width, height), color=bg_color)
2145
+ draw = ImageDraw.Draw(img)
2146
+ font_dict = get_fonts()
2147
+ try:
2148
+ font_path = font_dict[font]
2149
+ pil_font = ImageFont.truetype(font_path, size=12)
2150
+ except (KeyError, OSError):
2151
+ pil_font = ImageFont.load_default()
2152
+ major_tick_height, half_tick_height = height * 0.6, height * 0.4
2153
+ quarter_tick_height, eighth_tick_height = height * 0.25, height * 0.15
2154
+
2155
+ for i in range(num_divisions + 1):
2156
+ x = padding + int(i * width / num_divisions)
2157
+ draw.line([(x, 0), (x, major_tick_height)], fill=line_color, width=2)
2158
+ if show_time and video_meta['video_length_s'] is not None:
2159
+ seconds_at_division = i * video_meta['video_length_s'] / num_divisions
2160
+ label = seconds_to_timestamp(seconds=seconds_at_division)
2161
+ elif video_meta['frame_count'] is not None:
2162
+ label = str(int(i * video_meta['frame_count'] / num_divisions))
2163
+ else:
2164
+ label = str(i)
2165
+ bbox = draw.textbbox((0, 0), label, font=pil_font)
2166
+ text_width = bbox[2] - bbox[0]
2167
+ if i == 0:
2168
+ draw.text((x, major_tick_height + 5), label, fill=text_color, font=pil_font)
2169
+ elif i == num_divisions:
2170
+ draw.text((x - text_width, major_tick_height + 5), label, fill=text_color, font=pil_font)
2171
+ else:
2172
+ draw.text((x - text_width // 2, major_tick_height + 5), label, fill=text_color, font=pil_font)
2173
+ if i < num_divisions:
2174
+ x_half = padding + int((i + 0.5) * width / num_divisions)
2175
+ draw.line([(x_half, 0), (x_half, half_tick_height)], fill=line_color, width=1)
2176
+ for q in [0.25, 0.75]:
2177
+ x_quarter = padding + int((i + q) * width / num_divisions)
2178
+ draw.line([(x_quarter, 0), (x_quarter, quarter_tick_height)], fill=line_color, width=1)
2179
+ for e in [0.125, 0.375, 0.625, 0.875]:
2180
+ x_eighth = padding + int((i + e) * width / num_divisions)
2181
+ draw.line([(x_eighth, 0), (x_eighth, eighth_tick_height)], fill=line_color, width=1)
2182
+
2183
+ draw.line([(0, height - 1), (total_width, height - 1)], fill=line_color, width=1)
2184
+ img_bgr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
2185
+ return img_bgr
2186
+
2187
+
2188
+ # img = ImageMixin.create_time_ruler(width=1920, video_path=r"E:\troubleshooting\mitra_emergence\project_folder\clip_test\Box1_180mISOcontrol_Females_clipped_progress_bar.mp4", height=60, num_divisions=6)
2189
+ #
2190
+ # cv2.imshow('sadasdas', img)
2191
+ # cv2.waitKey(40000)
2067
2192
 
2068
2193
  #x = ImageMixin.get_blob_locations(video_path=r"C:\troubleshooting\RAT_NOR\project_folder\videos\2022-06-20_NOB_DOT_4_downsampled_bg_subtracted.mp4", gpu=True)
2069
2194
  # imgs = ImageMixin().read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples')
@@ -17,8 +17,8 @@ from PIL import ImageTk
17
17
  from scipy.spatial.distance import cdist
18
18
  from shapely.geometry import Polygon
19
19
 
20
- from simba.utils.checks import (check_file_exist_and_readable, check_instance,
21
- check_int, check_str, check_valid_array,
20
+ from simba.utils.checks import (check_file_exist_and_readable, check_int,
21
+ check_str, check_valid_array,
22
22
  check_valid_dataframe, check_valid_tuple,
23
23
  check_video_and_data_frm_count_align)
24
24
  from simba.utils.enums import (ROI_SETTINGS, ConfigKey, Formats, Keys, Options,
@@ -0,0 +1,80 @@
1
+ """
2
+ Function to remove N seconds from the end of a video file.
3
+
4
+ Similar to simba.video_processors.video_processing.remove_beginning_of_video
5
+ but removes from the end instead of the beginning.
6
+ """
7
+
8
+ import os
9
+ import subprocess
10
+ from typing import Union, Optional
11
+
12
+ from simba.utils.checks import (
13
+ check_ffmpeg_available,
14
+ check_file_exist_and_readable,
15
+ check_int,
16
+ check_if_dir_exists,
17
+ check_nvidea_gpu_available
18
+ )
19
+ from simba.utils.errors import FFMPEGCodecGPUError, InvalidInputError
20
+ from simba.utils.printing import SimbaTimer, stdout_success
21
+ from simba.utils.read_write import get_fn_ext, get_video_meta_data
22
+ from simba.utils.lookups import quality_pct_to_crf
23
+
24
+
25
+ def remove_end_of_video(file_path: Union[str, os.PathLike],
26
+ time: int,
27
+ quality: int = 60,
28
+ save_path: Optional[Union[str, os.PathLike]] = None,
29
+ gpu: Optional[bool] = False) -> None:
30
+ """
31
+ Remove N seconds from the end of a video file.
32
+
33
+ :param Union[str, os.PathLike] file_path: Path to video file
34
+ :param int time: Number of seconds to remove from the end of the video.
35
+ :param int quality: Video quality percentage (1-100). Higher values = higher quality. Default 60.
36
+ :param Optional[Union[str, os.PathLike]] save_path: Optional save location for the shortened video. If None, then the new video is saved in the same directory as the input video with the ``_shortened`` suffix.
37
+ :param Optional[bool] gpu: If True, use NVIDEA GPU codecs. Default False.
38
+ :returns: None. If save_path is not passed, the result is stored in the same directory as the input file with the ``_shorten.mp4`` suffix.
39
+
40
+ .. note::
41
+ Codec is automatically selected: libx264 for CPU encoding (ignored if gpu=True).
42
+
43
+ :example:
44
+ >>> _ = remove_end_of_video(file_path='project_folder/videos/Video_1.avi', time=10)
45
+ >>> remove_end_of_video(file_path=f'/Users/simon/Desktop/imgs_4/test/blahhhh.mp4', save_path='/Users/simon/Desktop/imgs_4/test/CUT.mp4', time=3)
46
+ """
47
+
48
+ check_ffmpeg_available(raise_error=True)
49
+ if gpu and not check_nvidea_gpu_available():
50
+ raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=remove_end_of_video.__name__)
51
+ timer = SimbaTimer(start=True)
52
+ check_file_exist_and_readable(file_path=file_path)
53
+ video_meta_data = get_video_meta_data(video_path=file_path)
54
+ check_int(name="Cut time", value=time, min_value=1)
55
+ check_int(name=f'{remove_end_of_video.__name__} quality', value=quality, min_value=1, max_value=100, raise_error=True)
56
+ quality_crf = quality_pct_to_crf(pct=int(quality))
57
+ time = int(time)
58
+ dir, file_name, ext = get_fn_ext(filepath=file_path)
59
+ if video_meta_data['video_length_s'] <= time:
60
+ raise InvalidInputError(msg=f"The cut time {time}s is invalid for video {file_name} with length {video_meta_data['video_length_s']}s", source=remove_end_of_video.__name__)
61
+ if save_path is None:
62
+ save_name = os.path.join(dir, f"{file_name}_shorten.mp4")
63
+ else:
64
+ check_if_dir_exists(in_dir=os.path.dirname(save_path), source=f'{remove_end_of_video.__name__} save_path', create_if_not_exist=True)
65
+ save_name = save_path
66
+ duration = video_meta_data['video_length_s'] - time
67
+ if gpu:
68
+ cmd = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -t {duration} -rc vbr -cq {quality_crf} -c:v h264_nvenc -c:a aac "{save_name}" -loglevel error -stats -hide_banner -y'
69
+ else:
70
+ cmd = f'ffmpeg -i "{file_path}" -t {duration} -c:v libx264 -crf {quality_crf} -c:a aac "{save_name}" -loglevel error -stats -hide_banner -y'
71
+ print(f"Removing final {time}s from {file_name}... ")
72
+ subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
73
+ timer.stop_timer()
74
+ stdout_success(msg=f"SIMBA COMPLETE: Video converted! {save_name} generated!", elapsed_time=timer.elapsed_time_str, source=remove_end_of_video.__name__)
75
+
76
+
77
+ if __name__ == "__main__":
78
+ # Example usage
79
+ # remove_end_of_video(file_path='path/to/video.mp4', time=10)
80
+ pass
@@ -0,0 +1,291 @@
1
+ from typing import Union, Optional, Tuple
2
+ import os
3
+ from simba.mixins.image_mixin import ImageMixin
4
+ import cv2
5
+ import numpy as np
6
+ from simba.utils.checks import check_file_exist_and_readable, check_int, check_valid_boolean
7
+ from simba.utils.read_write import get_video_meta_data, seconds_to_timestamp, read_frm_of_video
8
+ from simba.utils.lookups import get_fonts
9
+ from simba.utils.enums import Formats
10
+ from simba.ui.tkinter_functions import SimBAScaleBar, SimBALabel
11
+ from tkinter import *
12
+ from PIL import Image, ImageDraw, ImageFont, ImageTk
13
+
14
+ class TimelapseSlider():
15
+ """
16
+ Interactive timelapse viewer with segment selection sliders.
17
+
18
+ Creates a Tkinter GUI window displaying a timelapse composite image generated from evenly-spaced frames
19
+ across a video. Includes interactive sliders to select start and end times for video segments, with
20
+ visual highlighting of the selected segment and frame previews.
21
+
22
+ :param Union[str, os.PathLike] video_path: Path to video file to create timelapse from.
23
+ :param int frame_cnt: Number of frames to include in timelapse composite. Default 25.
24
+ :param Optional[int] size: Width per frame in pixels. If None, calculated to match video width. Default None.
25
+ :param Optional[int] crop_ratio: Percentage of frame width to keep (0-100). Default 50.
26
+ :param int padding: Padding in pixels added to timelapse when ruler is shown. Default 60.
27
+ :param int ruler_divisions: Number of major divisions on time ruler. Default 6.
28
+ :param bool show_ruler: If True, display time ruler below timelapse. Default True.
29
+ :param int ruler_height: Height of ruler in pixels. Default 60.
30
+
31
+ :example:
32
+ >>> slider = TimelapseSlider(video_path='path/to/video.mp4', frame_cnt=25, crop_ratio=75)
33
+ >>> slider.run()
34
+ >>> # Use sliders to select segment, then access selected times:
35
+ >>> start_time = slider.img_window.get_selected_start_time()
36
+ >>> end_time = slider.img_window.get_selected_end_time()
37
+ >>> slider.close()
38
+ """
39
+
40
+ def __init__(self,
41
+ video_path: Union[str, os.PathLike],
42
+ frame_cnt: int = 25,
43
+ size: Optional[int] = None,
44
+ crop_ratio: Optional[int] = 50,
45
+ padding: int = 60,
46
+ ruler_divisions: int = 6,
47
+ show_ruler: bool = True,
48
+ ruler_height: int = 60):
49
+
50
+ check_file_exist_and_readable(file_path=video_path)
51
+ check_int(name='frame_cnt', value=frame_cnt, min_value=1, raise_error=True)
52
+ if size is not None: check_int(name='size', value=size, min_value=1, raise_error=True)
53
+ check_int(name='padding', value=padding, min_value=1, raise_error=True)
54
+ check_int(name='ruler_height', value=ruler_height, min_value=1, raise_error=True)
55
+ check_valid_boolean(value=show_ruler, source=f'{self.__class__.__name__} show_ruler', raise_error=True)
56
+ self.video_meta = get_video_meta_data(video_path=video_path, raise_error=True)
57
+ if show_ruler: check_int(name='ruler_divisions', value=ruler_divisions, min_value=1, raise_error=True)
58
+ self.size, self.padding, self.crop_ratio, self.frame_cnt = size, padding, crop_ratio, frame_cnt
59
+ self.ruler_height, self.video_path, self.show_ruler, self.ruler_divisions = ruler_height, video_path, show_ruler, ruler_divisions
60
+ self.frm_name = f'{self.video_meta["video_name"]} - TIMELAPSE VIEWER'
61
+ self.video_capture = None # Will be initialized in run()
62
+ self._pending_frame_update = None # For debouncing frame preview updates
63
+ self._frame_debounce_ms = 300 # Wait 300ms after slider stops before updating frame preview
64
+
65
+ def _draw_img(self, img: np.ndarray, lbl: SimBALabel):
66
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
67
+ self.pil_image = Image.fromarray(img_rgb)
68
+ self.tk_image = ImageTk.PhotoImage(self.pil_image)
69
+ lbl.configure(image=self.tk_image)
70
+ lbl.image = self.tk_image
71
+
72
+ def _update_selection(self, slider_type: str):
73
+ start_sec = int(self.start_scale.get_value())
74
+ end_sec = int(self.end_scale.get_value())
75
+ max_sec = int(self.video_meta['video_length_s'])
76
+ if slider_type == 'start':
77
+ if start_sec >= end_sec:
78
+ end_sec = min(start_sec + 1, max_sec)
79
+ self.end_scale.set_value(end_sec)
80
+ else:
81
+ if end_sec <= start_sec:
82
+ start_sec = max(end_sec - 1, 0)
83
+ self.start_scale.set_value(start_sec)
84
+
85
+ self.selected_start[0] = start_sec
86
+ self.selected_end[0] = end_sec
87
+
88
+ # Update time labels immediately (fast, no I/O)
89
+ self.start_time_label.config(text=seconds_to_timestamp(start_sec), fg='blue')
90
+ self.end_time_label.config(text=seconds_to_timestamp(end_sec), fg='red')
91
+
92
+ if self.video_meta['video_length_s'] > 0:
93
+ # Update highlight immediately (moderate cost, but needed for visual feedback)
94
+ self._highlight_segment(start_sec, end_sec)
95
+ # Debounce frame preview update (expensive I/O operation)
96
+ self._schedule_frame_update(slider_type=slider_type)
97
+
98
+ def _schedule_frame_update(self, slider_type: str):
99
+ """Schedule frame preview update with debouncing.
100
+
101
+ Cancels any pending frame update and schedules a new one. If the slider
102
+ moves again before the delay expires, the update is cancelled and rescheduled.
103
+ This prevents expensive frame reads during fast slider dragging.
104
+ """
105
+ # Check if window exists (might be called during cleanup)
106
+ if not hasattr(self, 'img_window') or not self.img_window.winfo_exists():
107
+ return
108
+
109
+ # Cancel any pending frame update
110
+ if self._pending_frame_update is not None:
111
+ self.img_window.after_cancel(self._pending_frame_update)
112
+
113
+ # Schedule new update after debounce delay
114
+ self._pending_frame_update = self.img_window.after(
115
+ self._frame_debounce_ms,
116
+ lambda: self._update_frame_display(slider_type=slider_type)
117
+ )
118
+
119
+ def _update_frame_display(self, slider_type: str):
120
+ """Update the frame preview display using the persistent VideoCapture object."""
121
+ if slider_type == 'start':
122
+ seconds = self.selected_start[0]
123
+ self.frame_label.config(text=f"Start Frame Preview ({seconds_to_timestamp(seconds)})", font=Formats.FONT_LARGE_BOLD.value, fg='blue')
124
+ else:
125
+ seconds = self.selected_end[0]
126
+ self.frame_label.config(text=f"End Frame Preview ({seconds_to_timestamp(seconds)})", font=Formats.FONT_LARGE_BOLD.value, fg='red')
127
+
128
+ frame_index = int(seconds * self.video_meta['fps'])
129
+ if frame_index >= self.video_meta['frame_count']: frame_index = self.video_meta['frame_count'] - 1
130
+ if frame_index < 0: frame_index = 0
131
+
132
+ # Use persistent VideoCapture object instead of read_frm_of_video
133
+ if self.video_capture is not None and self.video_capture.isOpened():
134
+ self.video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
135
+ ret, frame = self.video_capture.read()
136
+ if ret and frame is not None:
137
+ self._draw_img(img=frame, lbl=self.frame_display_lbl)
138
+
139
+ def _create_time_ruler(self, width: int, height: int, num_divisions: int) -> np.ndarray:
140
+ total_width = width + (2 * self.padding)
141
+ bg_color, line_color, text_color = (255, 255, 255), (128, 128, 128), (0, 0, 0)
142
+ img = Image.new('RGB', (total_width, height), color=bg_color)
143
+ draw = ImageDraw.Draw(img)
144
+ font_dict = get_fonts()
145
+ try:
146
+ font_path = font_dict['Algerian']
147
+ pil_font = ImageFont.truetype(font_path, size=12)
148
+ except (KeyError, OSError):
149
+ pil_font = ImageFont.load_default()
150
+ major_tick_height, half_tick_height = height * 0.6, height * 0.4
151
+ quarter_tick_height, eighth_tick_height = height * 0.25, height * 0.15
152
+
153
+ for i in range(num_divisions + 1):
154
+ x = self.padding + int(i * width / num_divisions)
155
+ draw.line([(x, 0), (x, major_tick_height)], fill=line_color, width=2)
156
+ if self.video_meta['video_length_s'] is not None:
157
+ seconds_at_division = i * self.video_meta['video_length_s'] / num_divisions
158
+ label = seconds_to_timestamp(seconds=seconds_at_division)
159
+ elif self.video_meta['frame_count'] is not None:
160
+ label = str(int(i * self.video_meta['frame_count'] / num_divisions))
161
+ else:
162
+ label = str(i)
163
+ bbox = draw.textbbox((0, 0), label, font=pil_font)
164
+ text_width = bbox[2] - bbox[0]
165
+ if i == 0:
166
+ draw.text((x, major_tick_height + 5), label, fill=text_color, font=pil_font)
167
+ elif i == num_divisions:
168
+ draw.text((x - text_width, major_tick_height + 5), label, fill=text_color, font=pil_font)
169
+ else:
170
+ draw.text((x - text_width // 2, major_tick_height + 5), label, fill=text_color, font=pil_font)
171
+ if i < num_divisions:
172
+ x_half = self.padding + int((i + 0.5) * width / num_divisions)
173
+ draw.line([(x_half, 0), (x_half, half_tick_height)], fill=line_color, width=1)
174
+ for q in [0.25, 0.75]:
175
+ x_quarter = self.padding + int((i + q) * width / num_divisions)
176
+ draw.line([(x_quarter, 0), (x_quarter, quarter_tick_height)], fill=line_color, width=1)
177
+ for e in [0.125, 0.375, 0.625, 0.875]:
178
+ x_eighth = self.padding + int((i + e) * width / num_divisions)
179
+ draw.line([(x_eighth, 0), (x_eighth, eighth_tick_height)], fill=line_color, width=1)
180
+
181
+ draw.line([(0, height - 1), (total_width, height - 1)], fill=line_color, width=1)
182
+ img_array = np.array(img)
183
+ img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
184
+ return img_bgr
185
+
186
+ def _highlight_segment(self, start_sec: int, end_sec: int):
187
+ timelapse_width = self.original_timelapse.shape[1]
188
+ start_x = int((start_sec / self.video_meta['video_length_s']) * timelapse_width)
189
+ end_x = int((end_sec / self.video_meta['video_length_s']) * timelapse_width)
190
+ highlighted = self.original_timelapse.copy()
191
+ mask = np.ones(highlighted.shape[:2], dtype=np.uint8) * 128
192
+ mask[:, start_x:end_x] = 255
193
+ mask = cv2.merge([mask, mask, mask])
194
+ highlighted = cv2.multiply(highlighted, mask.astype(np.uint8), scale=1/255.0)
195
+ cv2.line(highlighted, (start_x, 0), (start_x, highlighted.shape[0]), (0, 255, 0), 2)
196
+ cv2.line(highlighted, (end_x, 0), (end_x, highlighted.shape[0]), (0, 255, 0), 2)
197
+ self._draw_img(img=highlighted, lbl=self.img_lbl)
198
+
199
+ def run(self):
200
+ # Create persistent VideoCapture object for efficient frame reading
201
+ self.video_capture = cv2.VideoCapture(self.video_path)
202
+ if not self.video_capture.isOpened():
203
+ raise ValueError(f"Failed to open video file: {self.video_path}")
204
+
205
+ self.timelapse_img = ImageMixin.get_timelapse_img(video_path=self.video_path, frame_cnt=self.frame_cnt, size=self.size, crop_ratio=self.crop_ratio)
206
+ if self.show_ruler:
207
+ timelapse_height, timelapse_width = self.timelapse_img.shape[0], self.timelapse_img.shape[1]
208
+ padded_timelapse = np.zeros((timelapse_height, timelapse_width + (2 * self.padding), 3), dtype=np.uint8)
209
+ padded_timelapse[:, self.padding:self.padding + timelapse_width] = self.timelapse_img
210
+ ruler = self._create_time_ruler(width=timelapse_width, height=self.ruler_height, num_divisions=self.ruler_divisions)
211
+ self.timelapse_img = cv2.vconcat([padded_timelapse, ruler])
212
+
213
+ self.original_timelapse = self.timelapse_img.copy()
214
+ self.img_window = Toplevel()
215
+ self.img_window.resizable(False, False)
216
+ self.img_window.title(self.frm_name)
217
+ self.img_window.protocol("WM_DELETE_WINDOW", self.close)
218
+
219
+
220
+ self.img_lbl = SimBALabel(parent=self.img_window, txt='')
221
+ self.img_lbl.pack()
222
+ self._draw_img(img=self.timelapse_img, lbl=self.img_lbl)
223
+ self.frame_display_frame = Frame(self.img_window)
224
+ self.frame_display_frame.pack(pady=10, padx=10, fill=BOTH, expand=True)
225
+ self.frame_label = SimBALabel(parent=self.frame_display_frame, txt="Frame Preview", font=Formats.FONT_REGULAR_BOLD.value)
226
+ self.frame_label.pack()
227
+ self.frame_display_lbl = SimBALabel(parent=self.frame_display_frame, txt='', bg_clr='black')
228
+ self.frame_display_lbl.pack(pady=5)
229
+ self.slider_frame = Frame(self.img_window)
230
+ self.slider_frame.pack(pady=10, padx=10, fill=X)
231
+ self.slider_frame.columnconfigure(index=0, weight=1)
232
+ self.slider_frame.columnconfigure(index=1, weight=0)
233
+ self.slider_frame.columnconfigure(index=2, weight=0)
234
+ self.slider_frame.columnconfigure(index=3, weight=1)
235
+
236
+ self.start_scale = SimBAScaleBar(parent=self.slider_frame, label="START TIME:", from_=0, to=self.video_meta['video_length_s'], orient=HORIZONTAL, length=400, resolution=1, value=0, showvalue=False, label_width=15, sliderrelief='raised', troughcolor='white', activebackground='blue', lbl_font=Formats.FONT_LARGE_BOLD.value)
237
+ self.start_scale.grid(row=0, column=1, padx=5)
238
+ self.start_scale.scale.config(command=lambda x: self._update_selection(slider_type='start'))
239
+
240
+ self.start_time_label = SimBALabel(parent=self.slider_frame, txt="00:00:00", font=Formats.FONT_LARGE_BOLD.value, width=10, txt_clr='blue')
241
+ self.start_time_label.grid(row=0, column=2, padx=5)
242
+
243
+ self.end_scale = SimBAScaleBar(parent=self.slider_frame, label="END TIME:", from_=0, to=int(self.video_meta['video_length_s']), orient=HORIZONTAL, length=400, resolution=1, value=int(self.video_meta['video_length_s']), showvalue=False, label_width=15, sliderrelief='raised', troughcolor='white', activebackground='red', lbl_font=Formats.FONT_LARGE_BOLD.value)
244
+ self.end_scale.grid(row=1, column=1, padx=5)
245
+ self.end_scale.scale.config(command=lambda x: self._update_selection(slider_type='end'))
246
+
247
+ self.end_time_label = SimBALabel(parent=self.slider_frame, txt=seconds_to_timestamp(int(self.video_meta['video_length_s'])), font=Formats.FONT_LARGE_BOLD.value, width=10, txt_clr='red')
248
+ self.end_time_label.grid(row=1, column=2, padx=5)
249
+
250
+ self.selected_start = [0]
251
+ self.selected_end = [int(self.video_meta['video_length_s'])]
252
+
253
+ self.img_window.get_selected_start = lambda: self.selected_start[0]
254
+ self.img_window.get_selected_end = lambda: self.selected_end[0]
255
+ self.img_window.get_selected_start_time = lambda: seconds_to_timestamp(self.selected_start[0])
256
+ self.img_window.get_selected_end_time = lambda: seconds_to_timestamp(self.selected_end[0])
257
+
258
+ self.img_window.update_idletasks()
259
+ self.img_window.update()
260
+ req_width, req_height = self.img_window.winfo_reqwidth(), self.img_window.winfo_reqheight()
261
+ min_width = max(self.timelapse_img.shape[1] + 60, req_width + 20)
262
+ min_height = max(self.timelapse_img.shape[0] + 800, req_height + 100)
263
+ self.img_window.minsize(min_width, min_height)
264
+ self.img_window.geometry(f"{min_width}x{min_height}")
265
+ self._update_frame_display(slider_type='start')
266
+
267
+ def close(self):
268
+ # Cancel any pending frame updates
269
+ if self._pending_frame_update is not None:
270
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
271
+ self.img_window.after_cancel(self._pending_frame_update)
272
+ self._pending_frame_update = None
273
+
274
+ # Release VideoCapture object
275
+ if self.video_capture is not None:
276
+ self.video_capture.release()
277
+ self.video_capture = None
278
+
279
+ # Destroy window
280
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
281
+ self.img_window.destroy()
282
+
283
+
284
+
285
+
286
+ x = TimelapseSlider(video_path=r"E:\troubleshooting\mitra_emergence\project_folder\clip_test\Box1_180mISOcontrol_Females_clipped_progress_bar.mp4",
287
+ frame_cnt=25,
288
+ crop_ratio=75,
289
+ size=100)
290
+
291
+ x.run()