simba-uw-tf-dev 4.7.2__py3-none-any.whl → 4.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. simba/SimBA.py +1178 -1171
  2. simba/assets/icons/left_arrow_green.png +0 -0
  3. simba/assets/icons/left_arrow_red.png +0 -0
  4. simba/assets/icons/right_arrow_green.png +0 -0
  5. simba/assets/icons/right_arrow_red.png +0 -0
  6. simba/assets/lookups/yolo_schematics/yolo_mitra.csv +1 -1
  7. simba/mixins/image_mixin.py +129 -4
  8. simba/model/yolo_fit.py +22 -15
  9. simba/model/yolo_pose_inference.py +7 -2
  10. simba/roi_tools/roi_utils.py +2 -2
  11. simba/sandbox/convert_h264_to_mp4_lossless.py +129 -0
  12. simba/sandbox/extract_and_convert_videos.py +257 -0
  13. simba/sandbox/remove_end_of_video.py +80 -0
  14. simba/sandbox/video_timelaps.py +291 -0
  15. simba/ui/pop_ups/run_machine_models_popup.py +2 -2
  16. simba/ui/pop_ups/video_processing_pop_up.py +3637 -3469
  17. simba/ui/tkinter_functions.py +3 -1
  18. simba/ui/video_timelaps.py +332 -0
  19. simba/utils/lookups.py +67 -1
  20. simba/utils/read_write.py +10 -3
  21. simba/video_processors/batch_process_create_ffmpeg_commands.py +0 -1
  22. simba/video_processors/video_processing.py +5385 -5264
  23. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.4.dist-info}/METADATA +1 -1
  24. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.4.dist-info}/RECORD +28 -19
  25. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.4.dist-info}/LICENSE +0 -0
  26. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.4.dist-info}/WHEEL +0 -0
  27. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.4.dist-info}/entry_points.txt +0 -0
  28. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,80 @@
1
+ """
2
+ Function to remove N seconds from the end of a video file.
3
+
4
+ Similar to simba.video_processors.video_processing.remove_beginning_of_video
5
+ but removes from the end instead of the beginning.
6
+ """
7
+
8
+ import os
9
+ import subprocess
10
+ from typing import Union, Optional
11
+
12
+ from simba.utils.checks import (
13
+ check_ffmpeg_available,
14
+ check_file_exist_and_readable,
15
+ check_int,
16
+ check_if_dir_exists,
17
+ check_nvidea_gpu_available
18
+ )
19
+ from simba.utils.errors import FFMPEGCodecGPUError, InvalidInputError
20
+ from simba.utils.printing import SimbaTimer, stdout_success
21
+ from simba.utils.read_write import get_fn_ext, get_video_meta_data
22
+ from simba.utils.lookups import quality_pct_to_crf
23
+
24
+
25
+ def remove_end_of_video(file_path: Union[str, os.PathLike],
26
+ time: int,
27
+ quality: int = 60,
28
+ save_path: Optional[Union[str, os.PathLike]] = None,
29
+ gpu: Optional[bool] = False) -> None:
30
+ """
31
+ Remove N seconds from the end of a video file.
32
+
33
+ :param Union[str, os.PathLike] file_path: Path to video file
34
+ :param int time: Number of seconds to remove from the end of the video.
35
+ :param int quality: Video quality percentage (1-100). Higher values = higher quality. Default 60.
36
+ :param Optional[Union[str, os.PathLike]] save_path: Optional save location for the shortened video. If None, then the new video is saved in the same directory as the input video with the ``_shortened`` suffix.
37
+ :param Optional[bool] gpu: If True, use NVIDEA GPU codecs. Default False.
38
+ :returns: None. If save_path is not passed, the result is stored in the same directory as the input file with the ``_shorten.mp4`` suffix.
39
+
40
+ .. note::
41
+ Codec is automatically selected: libx264 for CPU encoding (ignored if gpu=True).
42
+
43
+ :example:
44
+ >>> _ = remove_end_of_video(file_path='project_folder/videos/Video_1.avi', time=10)
45
+ >>> remove_end_of_video(file_path=f'/Users/simon/Desktop/imgs_4/test/blahhhh.mp4', save_path='/Users/simon/Desktop/imgs_4/test/CUT.mp4', time=3)
46
+ """
47
+
48
+ check_ffmpeg_available(raise_error=True)
49
+ if gpu and not check_nvidea_gpu_available():
50
+ raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=remove_end_of_video.__name__)
51
+ timer = SimbaTimer(start=True)
52
+ check_file_exist_and_readable(file_path=file_path)
53
+ video_meta_data = get_video_meta_data(video_path=file_path)
54
+ check_int(name="Cut time", value=time, min_value=1)
55
+ check_int(name=f'{remove_end_of_video.__name__} quality', value=quality, min_value=1, max_value=100, raise_error=True)
56
+ quality_crf = quality_pct_to_crf(pct=int(quality))
57
+ time = int(time)
58
+ dir, file_name, ext = get_fn_ext(filepath=file_path)
59
+ if video_meta_data['video_length_s'] <= time:
60
+ raise InvalidInputError(msg=f"The cut time {time}s is invalid for video {file_name} with length {video_meta_data['video_length_s']}s", source=remove_end_of_video.__name__)
61
+ if save_path is None:
62
+ save_name = os.path.join(dir, f"{file_name}_shorten.mp4")
63
+ else:
64
+ check_if_dir_exists(in_dir=os.path.dirname(save_path), source=f'{remove_end_of_video.__name__} save_path', create_if_not_exist=True)
65
+ save_name = save_path
66
+ duration = video_meta_data['video_length_s'] - time
67
+ if gpu:
68
+ cmd = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -t {duration} -rc vbr -cq {quality_crf} -c:v h264_nvenc -c:a aac "{save_name}" -loglevel error -stats -hide_banner -y'
69
+ else:
70
+ cmd = f'ffmpeg -i "{file_path}" -t {duration} -c:v libx264 -crf {quality_crf} -c:a aac "{save_name}" -loglevel error -stats -hide_banner -y'
71
+ print(f"Removing final {time}s from {file_name}... ")
72
+ subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
73
+ timer.stop_timer()
74
+ stdout_success(msg=f"SIMBA COMPLETE: Video converted! {save_name} generated!", elapsed_time=timer.elapsed_time_str, source=remove_end_of_video.__name__)
75
+
76
+
77
+ if __name__ == "__main__":
78
+ # Example usage
79
+ # remove_end_of_video(file_path='path/to/video.mp4', time=10)
80
+ pass
@@ -0,0 +1,291 @@
1
+ from typing import Union, Optional, Tuple
2
+ import os
3
+ from simba.mixins.image_mixin import ImageMixin
4
+ import cv2
5
+ import numpy as np
6
+ from simba.utils.checks import check_file_exist_and_readable, check_int, check_valid_boolean
7
+ from simba.utils.read_write import get_video_meta_data, seconds_to_timestamp, read_frm_of_video
8
+ from simba.utils.lookups import get_fonts
9
+ from simba.utils.enums import Formats
10
+ from simba.ui.tkinter_functions import SimBAScaleBar, SimBALabel
11
+ from tkinter import *
12
+ from PIL import Image, ImageDraw, ImageFont, ImageTk
13
+
14
+ class TimelapseSlider():
15
+ """
16
+ Interactive timelapse viewer with segment selection sliders.
17
+
18
+ Creates a Tkinter GUI window displaying a timelapse composite image generated from evenly-spaced frames
19
+ across a video. Includes interactive sliders to select start and end times for video segments, with
20
+ visual highlighting of the selected segment and frame previews.
21
+
22
+ :param Union[str, os.PathLike] video_path: Path to video file to create timelapse from.
23
+ :param int frame_cnt: Number of frames to include in timelapse composite. Default 25.
24
+ :param Optional[int] size: Width per frame in pixels. If None, calculated to match video width. Default None.
25
+ :param Optional[int] crop_ratio: Percentage of frame width to keep (0-100). Default 50.
26
+ :param int padding: Padding in pixels added to timelapse when ruler is shown. Default 60.
27
+ :param int ruler_divisions: Number of major divisions on time ruler. Default 6.
28
+ :param bool show_ruler: If True, display time ruler below timelapse. Default True.
29
+ :param int ruler_height: Height of ruler in pixels. Default 60.
30
+
31
+ :example:
32
+ >>> slider = TimelapseSlider(video_path='path/to/video.mp4', frame_cnt=25, crop_ratio=75)
33
+ >>> slider.run()
34
+ >>> # Use sliders to select segment, then access selected times:
35
+ >>> start_time = slider.img_window.get_selected_start_time()
36
+ >>> end_time = slider.img_window.get_selected_end_time()
37
+ >>> slider.close()
38
+ """
39
+
40
+ def __init__(self,
41
+ video_path: Union[str, os.PathLike],
42
+ frame_cnt: int = 25,
43
+ size: Optional[int] = None,
44
+ crop_ratio: Optional[int] = 50,
45
+ padding: int = 60,
46
+ ruler_divisions: int = 6,
47
+ show_ruler: bool = True,
48
+ ruler_height: int = 60):
49
+
50
+ check_file_exist_and_readable(file_path=video_path)
51
+ check_int(name='frame_cnt', value=frame_cnt, min_value=1, raise_error=True)
52
+ if size is not None: check_int(name='size', value=size, min_value=1, raise_error=True)
53
+ check_int(name='padding', value=padding, min_value=1, raise_error=True)
54
+ check_int(name='ruler_height', value=ruler_height, min_value=1, raise_error=True)
55
+ check_valid_boolean(value=show_ruler, source=f'{self.__class__.__name__} show_ruler', raise_error=True)
56
+ self.video_meta = get_video_meta_data(video_path=video_path, raise_error=True)
57
+ if show_ruler: check_int(name='ruler_divisions', value=ruler_divisions, min_value=1, raise_error=True)
58
+ self.size, self.padding, self.crop_ratio, self.frame_cnt = size, padding, crop_ratio, frame_cnt
59
+ self.ruler_height, self.video_path, self.show_ruler, self.ruler_divisions = ruler_height, video_path, show_ruler, ruler_divisions
60
+ self.frm_name = f'{self.video_meta["video_name"]} - TIMELAPSE VIEWER'
61
+ self.video_capture = None # Will be initialized in run()
62
+ self._pending_frame_update = None # For debouncing frame preview updates
63
+ self._frame_debounce_ms = 300 # Wait 300ms after slider stops before updating frame preview
64
+
65
+ def _draw_img(self, img: np.ndarray, lbl: SimBALabel):
66
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
67
+ self.pil_image = Image.fromarray(img_rgb)
68
+ self.tk_image = ImageTk.PhotoImage(self.pil_image)
69
+ lbl.configure(image=self.tk_image)
70
+ lbl.image = self.tk_image
71
+
72
+ def _update_selection(self, slider_type: str):
73
+ start_sec = int(self.start_scale.get_value())
74
+ end_sec = int(self.end_scale.get_value())
75
+ max_sec = int(self.video_meta['video_length_s'])
76
+ if slider_type == 'start':
77
+ if start_sec >= end_sec:
78
+ end_sec = min(start_sec + 1, max_sec)
79
+ self.end_scale.set_value(end_sec)
80
+ else:
81
+ if end_sec <= start_sec:
82
+ start_sec = max(end_sec - 1, 0)
83
+ self.start_scale.set_value(start_sec)
84
+
85
+ self.selected_start[0] = start_sec
86
+ self.selected_end[0] = end_sec
87
+
88
+ # Update time labels immediately (fast, no I/O)
89
+ self.start_time_label.config(text=seconds_to_timestamp(start_sec), fg='blue')
90
+ self.end_time_label.config(text=seconds_to_timestamp(end_sec), fg='red')
91
+
92
+ if self.video_meta['video_length_s'] > 0:
93
+ # Update highlight immediately (moderate cost, but needed for visual feedback)
94
+ self._highlight_segment(start_sec, end_sec)
95
+ # Debounce frame preview update (expensive I/O operation)
96
+ self._schedule_frame_update(slider_type=slider_type)
97
+
98
+ def _schedule_frame_update(self, slider_type: str):
99
+ """Schedule frame preview update with debouncing.
100
+
101
+ Cancels any pending frame update and schedules a new one. If the slider
102
+ moves again before the delay expires, the update is cancelled and rescheduled.
103
+ This prevents expensive frame reads during fast slider dragging.
104
+ """
105
+ # Check if window exists (might be called during cleanup)
106
+ if not hasattr(self, 'img_window') or not self.img_window.winfo_exists():
107
+ return
108
+
109
+ # Cancel any pending frame update
110
+ if self._pending_frame_update is not None:
111
+ self.img_window.after_cancel(self._pending_frame_update)
112
+
113
+ # Schedule new update after debounce delay
114
+ self._pending_frame_update = self.img_window.after(
115
+ self._frame_debounce_ms,
116
+ lambda: self._update_frame_display(slider_type=slider_type)
117
+ )
118
+
119
+ def _update_frame_display(self, slider_type: str):
120
+ """Update the frame preview display using the persistent VideoCapture object."""
121
+ if slider_type == 'start':
122
+ seconds = self.selected_start[0]
123
+ self.frame_label.config(text=f"Start Frame Preview ({seconds_to_timestamp(seconds)})", font=Formats.FONT_LARGE_BOLD.value, fg='blue')
124
+ else:
125
+ seconds = self.selected_end[0]
126
+ self.frame_label.config(text=f"End Frame Preview ({seconds_to_timestamp(seconds)})", font=Formats.FONT_LARGE_BOLD.value, fg='red')
127
+
128
+ frame_index = int(seconds * self.video_meta['fps'])
129
+ if frame_index >= self.video_meta['frame_count']: frame_index = self.video_meta['frame_count'] - 1
130
+ if frame_index < 0: frame_index = 0
131
+
132
+ # Use persistent VideoCapture object instead of read_frm_of_video
133
+ if self.video_capture is not None and self.video_capture.isOpened():
134
+ self.video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
135
+ ret, frame = self.video_capture.read()
136
+ if ret and frame is not None:
137
+ self._draw_img(img=frame, lbl=self.frame_display_lbl)
138
+
139
+ def _create_time_ruler(self, width: int, height: int, num_divisions: int) -> np.ndarray:
140
+ total_width = width + (2 * self.padding)
141
+ bg_color, line_color, text_color = (255, 255, 255), (128, 128, 128), (0, 0, 0)
142
+ img = Image.new('RGB', (total_width, height), color=bg_color)
143
+ draw = ImageDraw.Draw(img)
144
+ font_dict = get_fonts()
145
+ try:
146
+ font_path = font_dict['Algerian']
147
+ pil_font = ImageFont.truetype(font_path, size=12)
148
+ except (KeyError, OSError):
149
+ pil_font = ImageFont.load_default()
150
+ major_tick_height, half_tick_height = height * 0.6, height * 0.4
151
+ quarter_tick_height, eighth_tick_height = height * 0.25, height * 0.15
152
+
153
+ for i in range(num_divisions + 1):
154
+ x = self.padding + int(i * width / num_divisions)
155
+ draw.line([(x, 0), (x, major_tick_height)], fill=line_color, width=2)
156
+ if self.video_meta['video_length_s'] is not None:
157
+ seconds_at_division = i * self.video_meta['video_length_s'] / num_divisions
158
+ label = seconds_to_timestamp(seconds=seconds_at_division)
159
+ elif self.video_meta['frame_count'] is not None:
160
+ label = str(int(i * self.video_meta['frame_count'] / num_divisions))
161
+ else:
162
+ label = str(i)
163
+ bbox = draw.textbbox((0, 0), label, font=pil_font)
164
+ text_width = bbox[2] - bbox[0]
165
+ if i == 0:
166
+ draw.text((x, major_tick_height + 5), label, fill=text_color, font=pil_font)
167
+ elif i == num_divisions:
168
+ draw.text((x - text_width, major_tick_height + 5), label, fill=text_color, font=pil_font)
169
+ else:
170
+ draw.text((x - text_width // 2, major_tick_height + 5), label, fill=text_color, font=pil_font)
171
+ if i < num_divisions:
172
+ x_half = self.padding + int((i + 0.5) * width / num_divisions)
173
+ draw.line([(x_half, 0), (x_half, half_tick_height)], fill=line_color, width=1)
174
+ for q in [0.25, 0.75]:
175
+ x_quarter = self.padding + int((i + q) * width / num_divisions)
176
+ draw.line([(x_quarter, 0), (x_quarter, quarter_tick_height)], fill=line_color, width=1)
177
+ for e in [0.125, 0.375, 0.625, 0.875]:
178
+ x_eighth = self.padding + int((i + e) * width / num_divisions)
179
+ draw.line([(x_eighth, 0), (x_eighth, eighth_tick_height)], fill=line_color, width=1)
180
+
181
+ draw.line([(0, height - 1), (total_width, height - 1)], fill=line_color, width=1)
182
+ img_array = np.array(img)
183
+ img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
184
+ return img_bgr
185
+
186
+ def _highlight_segment(self, start_sec: int, end_sec: int):
187
+ timelapse_width = self.original_timelapse.shape[1]
188
+ start_x = int((start_sec / self.video_meta['video_length_s']) * timelapse_width)
189
+ end_x = int((end_sec / self.video_meta['video_length_s']) * timelapse_width)
190
+ highlighted = self.original_timelapse.copy()
191
+ mask = np.ones(highlighted.shape[:2], dtype=np.uint8) * 128
192
+ mask[:, start_x:end_x] = 255
193
+ mask = cv2.merge([mask, mask, mask])
194
+ highlighted = cv2.multiply(highlighted, mask.astype(np.uint8), scale=1/255.0)
195
+ cv2.line(highlighted, (start_x, 0), (start_x, highlighted.shape[0]), (0, 255, 0), 2)
196
+ cv2.line(highlighted, (end_x, 0), (end_x, highlighted.shape[0]), (0, 255, 0), 2)
197
+ self._draw_img(img=highlighted, lbl=self.img_lbl)
198
+
199
+ def run(self):
200
+ # Create persistent VideoCapture object for efficient frame reading
201
+ self.video_capture = cv2.VideoCapture(self.video_path)
202
+ if not self.video_capture.isOpened():
203
+ raise ValueError(f"Failed to open video file: {self.video_path}")
204
+
205
+ self.timelapse_img = ImageMixin.get_timelapse_img(video_path=self.video_path, frame_cnt=self.frame_cnt, size=self.size, crop_ratio=self.crop_ratio)
206
+ if self.show_ruler:
207
+ timelapse_height, timelapse_width = self.timelapse_img.shape[0], self.timelapse_img.shape[1]
208
+ padded_timelapse = np.zeros((timelapse_height, timelapse_width + (2 * self.padding), 3), dtype=np.uint8)
209
+ padded_timelapse[:, self.padding:self.padding + timelapse_width] = self.timelapse_img
210
+ ruler = self._create_time_ruler(width=timelapse_width, height=self.ruler_height, num_divisions=self.ruler_divisions)
211
+ self.timelapse_img = cv2.vconcat([padded_timelapse, ruler])
212
+
213
+ self.original_timelapse = self.timelapse_img.copy()
214
+ self.img_window = Toplevel()
215
+ self.img_window.resizable(False, False)
216
+ self.img_window.title(self.frm_name)
217
+ self.img_window.protocol("WM_DELETE_WINDOW", self.close)
218
+
219
+
220
+ self.img_lbl = SimBALabel(parent=self.img_window, txt='')
221
+ self.img_lbl.pack()
222
+ self._draw_img(img=self.timelapse_img, lbl=self.img_lbl)
223
+ self.frame_display_frame = Frame(self.img_window)
224
+ self.frame_display_frame.pack(pady=10, padx=10, fill=BOTH, expand=True)
225
+ self.frame_label = SimBALabel(parent=self.frame_display_frame, txt="Frame Preview", font=Formats.FONT_REGULAR_BOLD.value)
226
+ self.frame_label.pack()
227
+ self.frame_display_lbl = SimBALabel(parent=self.frame_display_frame, txt='', bg_clr='black')
228
+ self.frame_display_lbl.pack(pady=5)
229
+ self.slider_frame = Frame(self.img_window)
230
+ self.slider_frame.pack(pady=10, padx=10, fill=X)
231
+ self.slider_frame.columnconfigure(index=0, weight=1)
232
+ self.slider_frame.columnconfigure(index=1, weight=0)
233
+ self.slider_frame.columnconfigure(index=2, weight=0)
234
+ self.slider_frame.columnconfigure(index=3, weight=1)
235
+
236
+ self.start_scale = SimBAScaleBar(parent=self.slider_frame, label="START TIME:", from_=0, to=self.video_meta['video_length_s'], orient=HORIZONTAL, length=400, resolution=1, value=0, showvalue=False, label_width=15, sliderrelief='raised', troughcolor='white', activebackground='blue', lbl_font=Formats.FONT_LARGE_BOLD.value)
237
+ self.start_scale.grid(row=0, column=1, padx=5)
238
+ self.start_scale.scale.config(command=lambda x: self._update_selection(slider_type='start'))
239
+
240
+ self.start_time_label = SimBALabel(parent=self.slider_frame, txt="00:00:00", font=Formats.FONT_LARGE_BOLD.value, width=10, txt_clr='blue')
241
+ self.start_time_label.grid(row=0, column=2, padx=5)
242
+
243
+ self.end_scale = SimBAScaleBar(parent=self.slider_frame, label="END TIME:", from_=0, to=int(self.video_meta['video_length_s']), orient=HORIZONTAL, length=400, resolution=1, value=int(self.video_meta['video_length_s']), showvalue=False, label_width=15, sliderrelief='raised', troughcolor='white', activebackground='red', lbl_font=Formats.FONT_LARGE_BOLD.value)
244
+ self.end_scale.grid(row=1, column=1, padx=5)
245
+ self.end_scale.scale.config(command=lambda x: self._update_selection(slider_type='end'))
246
+
247
+ self.end_time_label = SimBALabel(parent=self.slider_frame, txt=seconds_to_timestamp(int(self.video_meta['video_length_s'])), font=Formats.FONT_LARGE_BOLD.value, width=10, txt_clr='red')
248
+ self.end_time_label.grid(row=1, column=2, padx=5)
249
+
250
+ self.selected_start = [0]
251
+ self.selected_end = [int(self.video_meta['video_length_s'])]
252
+
253
+ self.img_window.get_selected_start = lambda: self.selected_start[0]
254
+ self.img_window.get_selected_end = lambda: self.selected_end[0]
255
+ self.img_window.get_selected_start_time = lambda: seconds_to_timestamp(self.selected_start[0])
256
+ self.img_window.get_selected_end_time = lambda: seconds_to_timestamp(self.selected_end[0])
257
+
258
+ self.img_window.update_idletasks()
259
+ self.img_window.update()
260
+ req_width, req_height = self.img_window.winfo_reqwidth(), self.img_window.winfo_reqheight()
261
+ min_width = max(self.timelapse_img.shape[1] + 60, req_width + 20)
262
+ min_height = max(self.timelapse_img.shape[0] + 800, req_height + 100)
263
+ self.img_window.minsize(min_width, min_height)
264
+ self.img_window.geometry(f"{min_width}x{min_height}")
265
+ self._update_frame_display(slider_type='start')
266
+
267
+ def close(self):
268
+ # Cancel any pending frame updates
269
+ if self._pending_frame_update is not None:
270
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
271
+ self.img_window.after_cancel(self._pending_frame_update)
272
+ self._pending_frame_update = None
273
+
274
+ # Release VideoCapture object
275
+ if self.video_capture is not None:
276
+ self.video_capture.release()
277
+ self.video_capture = None
278
+
279
+ # Destroy window
280
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
281
+ self.img_window.destroy()
282
+
283
+
284
+
285
+
286
+ x = TimelapseSlider(video_path=r"E:\troubleshooting\mitra_emergence\project_folder\clip_test\Box1_180mISOcontrol_Females_clipped_progress_bar.mp4",
287
+ frame_cnt=25,
288
+ crop_ratio=75,
289
+ size=100)
290
+
291
+ x.run()
@@ -8,8 +8,8 @@ from simba.mixins.config_reader import ConfigReader
8
8
  from simba.mixins.pop_up_mixin import PopUpMixin
9
9
  from simba.model.inference_batch import InferenceBatch
10
10
  from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, Entry_Box,
11
- FileSelect, SimbaButton, SimBASeperator,
12
- SimBALabel)
11
+ FileSelect, SimbaButton, SimBALabel,
12
+ SimBASeperator)
13
13
  from simba.utils.checks import (check_file_exist_and_readable, check_float,
14
14
  check_int)
15
15
  from simba.utils.enums import ConfigKey, Dtypes, Formats, Keys, Links