simba-uw-tf-dev 4.7.2__py3-none-any.whl → 4.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of simba-uw-tf-dev might be problematic. Click here for more details.

Files changed (35) hide show
  1. simba/SimBA.py +13 -4
  2. simba/assets/icons/left_arrow_green.png +0 -0
  3. simba/assets/icons/left_arrow_red.png +0 -0
  4. simba/assets/icons/right_arrow_green.png +0 -0
  5. simba/assets/icons/right_arrow_red.png +0 -0
  6. simba/assets/lookups/yolo_schematics/yolo_mitra.csv +1 -1
  7. simba/mixins/geometry_mixin.py +357 -302
  8. simba/mixins/image_mixin.py +129 -4
  9. simba/model/yolo_fit.py +22 -15
  10. simba/model/yolo_pose_inference.py +7 -2
  11. simba/outlier_tools/skip_outlier_correction.py +2 -2
  12. simba/plotting/heat_mapper_clf_mp.py +45 -23
  13. simba/plotting/plot_clf_results.py +2 -1
  14. simba/plotting/plot_clf_results_mp.py +456 -455
  15. simba/roi_tools/roi_utils.py +2 -2
  16. simba/sandbox/convert_h264_to_mp4_lossless.py +129 -0
  17. simba/sandbox/extract_and_convert_videos.py +257 -0
  18. simba/sandbox/remove_end_of_video.py +80 -0
  19. simba/sandbox/video_timelaps.py +291 -0
  20. simba/ui/import_pose_frame.py +13 -13
  21. simba/ui/pop_ups/clf_plot_pop_up.py +1 -1
  22. simba/ui/pop_ups/run_machine_models_popup.py +2 -2
  23. simba/ui/pop_ups/video_processing_pop_up.py +3638 -3469
  24. simba/ui/tkinter_functions.py +3 -1
  25. simba/ui/video_timelaps.py +454 -0
  26. simba/utils/lookups.py +67 -1
  27. simba/utils/read_write.py +10 -3
  28. simba/video_processors/batch_process_create_ffmpeg_commands.py +0 -1
  29. simba/video_processors/video_processing.py +5385 -5264
  30. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.5.dist-info}/METADATA +1 -1
  31. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.5.dist-info}/RECORD +35 -26
  32. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.5.dist-info}/LICENSE +0 -0
  33. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.5.dist-info}/WHEEL +0 -0
  34. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.5.dist-info}/entry_points.txt +0 -0
  35. {simba_uw_tf_dev-4.7.2.dist-info → simba_uw_tf_dev-4.7.5.dist-info}/top_level.txt +0 -0
@@ -170,6 +170,7 @@ class SimBAScaleBar(Frame):
170
170
  to: int = 100,
171
171
  tickinterval: Optional[int] = None,
172
172
  troughcolor: Optional[str] = None,
173
+ activebackground: Optional[str] = None,
173
174
  sliderrelief: Literal["raised", "sunken", "flat", "ridge", "solid", "groove"] = 'flat'):
174
175
 
175
176
  super().__init__(master=parent)
@@ -192,7 +193,8 @@ class SimBAScaleBar(Frame):
192
193
  troughcolor=troughcolor,
193
194
  tickinterval=tickinterval,
194
195
  resolution=resolution,
195
- showvalue=showvalue)
196
+ showvalue=showvalue,
197
+ activebackground=activebackground)
196
198
 
197
199
  if label is not None:
198
200
  self.lbl = SimBALabel(parent=self, txt=label, font=lbl_font, txt_clr=label_clr, width=label_width)
@@ -0,0 +1,454 @@
1
+ import os
2
+ from tkinter import *
3
+ from typing import Optional, Union
4
+
5
+ import cv2
6
+ import numpy as np
7
+ from PIL import Image, ImageTk
8
+
9
+ from simba.mixins.image_mixin import ImageMixin
10
+ from simba.ui.tkinter_functions import SimbaButton, SimBALabel, SimBAScaleBar
11
+ from simba.utils.checks import (check_file_exist_and_readable, check_int,
12
+ check_valid_boolean)
13
+ from simba.utils.enums import Formats, TkBinds
14
+ from simba.utils.lookups import get_icons_paths, get_monitor_info
15
+ from simba.utils.read_write import (get_video_meta_data, read_frm_of_video,
16
+ seconds_to_timestamp)
17
+
18
+
19
+ class TimelapseSlider():
20
+
21
+ """
22
+ Interactive timelapse viewer with segment selection sliders.
23
+
24
+ Creates a Tkinter GUI window displaying a timelapse composite image generated from evenly-spaced frames
25
+ across a video. Includes interactive sliders to select start and end times for video segments, with
26
+ visual highlighting of the selected segment and frame previews.
27
+
28
+ .. image:: _static/img/TimelapseSlider.png
29
+ :width: 600
30
+ :align: center
31
+
32
+ :param Union[str, os.PathLike] video_path: Path to video file to create timelapse from.
33
+ :param int frame_cnt: Number of frames to include in timelapse composite. Default 25.
34
+ :param Optional[int] ruler_width: Width per frame in pixels. If None, calculated to match video width. Default None.
35
+ :param Optional[int] crop_ratio: Percentage of frame width to keep (0-100). Default 50.
36
+ :param int padding: Padding in pixels added to timelapse when ruler is shown. Default 60.
37
+ :param int ruler_divisions: Number of major divisions on time ruler. Default 6.
38
+ :param bool show_ruler: If True, display time ruler below timelapse. Default True.
39
+ :param int ruler_height: Height of ruler in pixels. Default 60.
40
+ :param bool use_timestamps: If True, display timestamps (HH:MM:SS) in labels and ruler. If False, display frame numbers. Default True.
41
+
42
+ :example:
43
+ >>> slider = TimelapseSlider(video_path='path/to/video.mp4', frame_cnt=25, crop_ratio=75)
44
+ >>> slider.run()
45
+ >>> # Use sliders to select segment, then access selected times and frames:
46
+ >>> start_time = slider.get_start_time() # seconds (float)
47
+ >>> end_time = slider.get_end_time() # seconds (float)
48
+ >>> start_time_str = slider.get_start_time_str() # "HH:MM:SS" string
49
+ >>> end_time_str = slider.get_end_time_str() # "HH:MM:SS" string
50
+ >>> start_frame = slider.get_start_frame() # frame number (int)
51
+ >>> end_frame = slider.get_end_frame() # frame number (int)
52
+ >>> slider.close()
53
+ """
54
+
55
+ def __init__(self,
56
+ video_path: Union[str, os.PathLike],
57
+ frame_cnt: int = 25,
58
+ crop_ratio: Optional[int] = 50,
59
+ padding: int = 60,
60
+ ruler_divisions: int = 6,
61
+ show_ruler: bool = True,
62
+ ruler_height: Optional[int] = None,
63
+ ruler_width: Optional[int] = None,
64
+ img_width: Optional[int] = None,
65
+ img_height: Optional[int] = None,
66
+ use_timestamps: bool = True):
67
+
68
+ check_file_exist_and_readable(file_path=video_path)
69
+ check_int(name='frame_cnt', value=frame_cnt, min_value=1, raise_error=True)
70
+ _, (self.monitor_width, self.monitor_height) = get_monitor_info()
71
+ if ruler_width is not None: check_int(name='size', value=ruler_width, min_value=1, raise_error=True)
72
+ else: ruler_width = int(self.monitor_width * 0.5)
73
+ if ruler_height is not None: check_int(name='ruler_height', value=ruler_height, min_value=1, raise_error=True)
74
+ else: ruler_height = int(self.monitor_height * 0.05)
75
+ if img_width is not None: check_int(name='img_width', value=img_width, min_value=1, raise_error=True)
76
+ else: img_width = int(self.monitor_width * 0.5)
77
+ if img_height is not None: check_int(name='img_height', value=img_height, min_value=1, raise_error=True)
78
+ else: img_height = int(self.monitor_height * 0.5)
79
+
80
+
81
+ check_int(name='padding', value=padding, min_value=1, raise_error=True)
82
+ check_valid_boolean(value=show_ruler, source=f'{self.__class__.__name__} show_ruler', raise_error=True)
83
+ self.video_meta = get_video_meta_data(video_path=video_path, raise_error=True)
84
+ if show_ruler: check_int(name='ruler_divisions', value=ruler_divisions, min_value=1, raise_error=True)
85
+ check_valid_boolean(value=use_timestamps, source=f'{self.__class__.__name__} use_timestamps', raise_error=True)
86
+ self.size, self.padding, self.crop_ratio, self.frame_cnt = ruler_width, padding, crop_ratio, frame_cnt
87
+ self.ruler_height, self.video_path, self.show_ruler, self.ruler_divisions = ruler_height, video_path, show_ruler, ruler_divisions
88
+ self.img_width, self.img_height = img_width, img_height
89
+ self.use_timestamps = use_timestamps
90
+ self.frm_name = f'{self.video_meta["video_name"]} - TIMELAPSE VIEWER - hit "X" or ESC to close'
91
+ self.video_capture, self._pending_frame_update, self._frame_debounce_ms = None, None, 50
92
+
93
+ def _draw_img(self, img: np.ndarray, lbl: SimBALabel):
94
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
95
+ self.pil_image = Image.fromarray(img_rgb)
96
+ self.tk_image = ImageTk.PhotoImage(self.pil_image)
97
+ lbl.configure(image=self.tk_image)
98
+ lbl.image = self.tk_image
99
+
100
+ def _update_selection(self, slider_type: str):
101
+ start_sec = self.start_scale.get_value()
102
+ end_sec = self.end_scale.get_value()
103
+ max_sec = int(self.video_meta['video_length_s'])
104
+
105
+ # Convert to int for timestamp mode, keep float for frame mode to preserve precision
106
+ if self.use_timestamps:
107
+ start_sec = int(start_sec)
108
+ end_sec = int(end_sec)
109
+ else:
110
+ # In frame mode, use float precision but ensure we're within bounds
111
+ start_sec = max(0.0, min(float(start_sec), float(max_sec)))
112
+ end_sec = max(0.0, min(float(end_sec), float(max_sec)))
113
+
114
+ if slider_type == 'start':
115
+ if start_sec >= end_sec:
116
+ end_sec = min(start_sec + (1.0 if self.use_timestamps else 1.0/self.video_meta['fps']), max_sec)
117
+ self.end_scale.set_value(end_sec)
118
+ else:
119
+ if end_sec <= start_sec:
120
+ start_sec = max(end_sec - (1.0 if self.use_timestamps else 1.0/self.video_meta['fps']), 0)
121
+ self.start_scale.set_value(start_sec)
122
+
123
+ self.selected_start[0] = start_sec
124
+ self.selected_end[0] = end_sec
125
+
126
+ start_frame = int(start_sec * self.video_meta['fps'])
127
+ end_frame = int(end_sec * self.video_meta['fps'])
128
+ if start_frame >= self.video_meta['frame_count']: start_frame = self.video_meta['frame_count'] - 1
129
+ if end_frame >= self.video_meta['frame_count']: end_frame = self.video_meta['frame_count'] - 1
130
+ if start_frame < 0: start_frame = 0
131
+ if end_frame < 0: end_frame = 0
132
+ self.selected_start_frame[0] = start_frame
133
+ self.selected_end_frame[0] = end_frame
134
+
135
+ if self.use_timestamps:
136
+ self.start_time_label.config(text=seconds_to_timestamp(start_sec), fg='green')
137
+ self.end_time_label.config(text=seconds_to_timestamp(end_sec), fg='red')
138
+ else:
139
+ start_frame = int(start_sec * self.video_meta['fps'])
140
+ end_frame = int(end_sec * self.video_meta['fps'])
141
+ self.start_time_label.config(text=str(start_frame), fg='green')
142
+ self.end_time_label.config(text=str(end_frame), fg='red')
143
+
144
+ if self.video_meta['video_length_s'] > 0:
145
+ self._highlight_segment(start_sec, end_sec)
146
+ self._schedule_frame_update(slider_type=slider_type)
147
+
148
+ def _move_start_frame(self, direction: int):
149
+ # Temporarily unbind the callback to avoid conflicts
150
+ original_cmd = self.start_scale.scale.cget('command')
151
+ self.start_scale.scale.config(command='')
152
+
153
+ try:
154
+ if self.use_timestamps:
155
+ current_seconds = self.selected_start[0]
156
+ new_seconds = current_seconds + direction
157
+ # Ensure start doesn't exceed end
158
+ end_seconds = self.selected_end[0]
159
+ new_seconds = max(0, min(new_seconds, int(self.video_meta['video_length_s']), end_seconds - 1))
160
+ self.start_scale.set_value(int(new_seconds))
161
+ self.selected_start[0] = int(new_seconds)
162
+ # Recalculate frame from seconds
163
+ new_frame = int(new_seconds * self.video_meta['fps'])
164
+ new_frame = max(0, min(new_frame, self.video_meta['frame_count'] - 1))
165
+ self.selected_start_frame[0] = new_frame
166
+ else:
167
+ # Get current frame from the selected_start_frame
168
+ current_frame = self.selected_start_frame[0]
169
+ new_frame = current_frame + direction
170
+ # Ensure start doesn't exceed end
171
+ end_frame = self.selected_end_frame[0]
172
+ new_frame = max(0, min(new_frame, self.video_meta['frame_count'] - 1, end_frame - 1))
173
+ # Convert frame to seconds for the slider
174
+ new_seconds = new_frame / self.video_meta['fps']
175
+ # Update the slider value
176
+ self.start_scale.set_value(new_seconds)
177
+ # Directly update both frame and seconds
178
+ self.selected_start_frame[0] = new_frame
179
+ self.selected_start[0] = new_seconds
180
+
181
+ # Update the display labels and highlights
182
+ if self.use_timestamps:
183
+ self.start_time_label.config(text=seconds_to_timestamp(self.selected_start[0]), fg='green')
184
+ else:
185
+ self.start_time_label.config(text=str(self.selected_start_frame[0]), fg='green')
186
+
187
+ # Update highlights and frame preview
188
+ if self.video_meta['video_length_s'] > 0:
189
+ self._highlight_segment(self.selected_start[0], self.selected_end[0])
190
+ if self._pending_frame_update is not None:
191
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
192
+ self.img_window.after_cancel(self._pending_frame_update)
193
+ self._update_frame_display(slider_type='start')
194
+ finally:
195
+ # Rebind the callback
196
+ self.start_scale.scale.config(command=original_cmd)
197
+
198
+ def _move_end_frame(self, direction: int):
199
+ # Temporarily unbind the callback to avoid conflicts
200
+ original_cmd = self.end_scale.scale.cget('command')
201
+ self.end_scale.scale.config(command='')
202
+
203
+ try:
204
+ if self.use_timestamps:
205
+ current_seconds = self.selected_end[0]
206
+ new_seconds = current_seconds + direction
207
+ # Ensure end doesn't go below start
208
+ start_seconds = self.selected_start[0]
209
+ new_seconds = max(start_seconds + 1, min(new_seconds, int(self.video_meta['video_length_s'])))
210
+ self.end_scale.set_value(int(new_seconds))
211
+ self.selected_end[0] = int(new_seconds)
212
+ # Recalculate frame from seconds
213
+ new_frame = int(new_seconds * self.video_meta['fps'])
214
+ new_frame = max(0, min(new_frame, self.video_meta['frame_count'] - 1))
215
+ self.selected_end_frame[0] = new_frame
216
+ else:
217
+ # Get current frame from the selected_end_frame
218
+ current_frame = self.selected_end_frame[0]
219
+ new_frame = current_frame + direction
220
+ # Ensure end doesn't go below start
221
+ start_frame = self.selected_start_frame[0]
222
+ new_frame = max(start_frame + 1, min(new_frame, self.video_meta['frame_count'] - 1))
223
+ # Convert frame to seconds for the slider
224
+ new_seconds = new_frame / self.video_meta['fps']
225
+ # Update the slider value
226
+ self.end_scale.set_value(new_seconds)
227
+ # Directly update both frame and seconds
228
+ self.selected_end_frame[0] = new_frame
229
+ self.selected_end[0] = new_seconds
230
+
231
+ # Update the display labels and highlights
232
+ if self.use_timestamps:
233
+ self.end_time_label.config(text=seconds_to_timestamp(self.selected_end[0]), fg='red')
234
+ else:
235
+ self.end_time_label.config(text=str(self.selected_end_frame[0]), fg='red')
236
+
237
+ # Update highlights and frame preview
238
+ if self.video_meta['video_length_s'] > 0:
239
+ self._highlight_segment(self.selected_start[0], self.selected_end[0])
240
+ if self._pending_frame_update is not None:
241
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
242
+ self.img_window.after_cancel(self._pending_frame_update)
243
+ self._update_frame_display(slider_type='end')
244
+ finally:
245
+ # Rebind the callback
246
+ self.end_scale.scale.config(command=original_cmd)
247
+
248
+ def _schedule_frame_update(self, slider_type: str):
249
+ """Schedule frame preview update with debouncing.
250
+
251
+ Cancels any pending frame update and schedules a new one. If the slider
252
+ moves again before the delay expires, the update is cancelled and rescheduled.
253
+ This prevents expensive frame reads during fast slider dragging.
254
+ """
255
+ if not hasattr(self, 'img_window') or not self.img_window.winfo_exists():
256
+ return
257
+
258
+ if self._pending_frame_update is not None: self.img_window.after_cancel(self._pending_frame_update)
259
+
260
+ self._pending_frame_update = self.img_window.after(self._frame_debounce_ms, lambda: self._update_frame_display(slider_type=slider_type))
261
+
262
+ def _update_frame_display(self, slider_type: str):
263
+ if slider_type == 'start':
264
+ seconds = self.selected_start[0]
265
+ if self.use_timestamps:
266
+ label_text = f"Start Frame Preview ({seconds_to_timestamp(seconds)})"
267
+ else:
268
+ frame_num = int(seconds * self.video_meta['fps'])
269
+ label_text = f"Start Frame Preview (Frame {frame_num})"
270
+ self.frame_label.config(text=label_text, font=Formats.FONT_LARGE_BOLD.value, fg='green')
271
+ else:
272
+ seconds = self.selected_end[0]
273
+ if self.use_timestamps:
274
+ label_text = f"End Frame Preview ({seconds_to_timestamp(seconds)})"
275
+ else:
276
+ frame_num = int(seconds * self.video_meta['fps'])
277
+ label_text = f"End Frame Preview (Frame {frame_num})"
278
+ self.frame_label.config(text=label_text, font=Formats.FONT_LARGE_BOLD.value, fg='red')
279
+
280
+ frame_index = int(seconds * self.video_meta['fps'])
281
+ if frame_index >= self.video_meta['frame_count']: frame_index = self.video_meta['frame_count'] - 1
282
+ if frame_index < 0: frame_index = 0
283
+
284
+ if self.video_capture is not None and self.video_capture.isOpened():
285
+ self.video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
286
+ ret, frame = self.video_capture.read()
287
+ if ret and frame is not None:
288
+ h, w = frame.shape[:2]
289
+ target_w, target_h = self.img_width, self.img_height
290
+ scale = min(target_w / w, target_h / h)
291
+ new_w, new_h = int(w * scale), int(h * scale)
292
+ frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
293
+ self._draw_img(img=frame, lbl=self.frame_display_lbl)
294
+
295
+ def _highlight_segment(self, start_sec: int, end_sec: int):
296
+ timelapse_width = self.original_timelapse.shape[1]
297
+ start_x = int((start_sec / self.video_meta['video_length_s']) * timelapse_width)
298
+ end_x = int((end_sec / self.video_meta['video_length_s']) * timelapse_width)
299
+ highlighted = self.original_timelapse.copy()
300
+ mask = np.ones(highlighted.shape[:2], dtype=np.uint8) * 128
301
+ mask[:, start_x:end_x] = 255
302
+ mask = cv2.merge([mask, mask, mask])
303
+ highlighted = cv2.multiply(highlighted, mask.astype(np.uint8), scale=1/255.0)
304
+ cv2.line(highlighted, (start_x, 0), (start_x, highlighted.shape[0]), (0, 255, 0), 2)
305
+ cv2.line(highlighted, (end_x, 0), (end_x, highlighted.shape[0]), (0, 255, 0), 2)
306
+ self._draw_img(img=highlighted, lbl=self.img_lbl)
307
+
308
+ def run(self):
309
+ self.video_capture = cv2.VideoCapture(self.video_path)
310
+ if not self.video_capture.isOpened():
311
+ raise ValueError(f"Failed to open video file: {self.video_path}")
312
+
313
+ self.timelapse_img = ImageMixin.get_timelapse_img(video_path=self.video_path, frame_cnt=self.frame_cnt, size=self.size, crop_ratio=self.crop_ratio)
314
+ if self.show_ruler:
315
+ timelapse_height, timelapse_width = self.timelapse_img.shape[0], self.timelapse_img.shape[1]
316
+ padded_timelapse = np.zeros((timelapse_height, timelapse_width + (2 * self.padding), 3), dtype=np.uint8)
317
+ padded_timelapse[:, self.padding:self.padding + timelapse_width] = self.timelapse_img
318
+ ruler = ImageMixin.create_time_ruler(video_path=self.video_path, width=timelapse_width, height=self.ruler_height, num_divisions=self.ruler_divisions, show_time=self.use_timestamps)
319
+ self.timelapse_img = cv2.vconcat([padded_timelapse, ruler])
320
+
321
+ self.original_timelapse = self.timelapse_img.copy()
322
+ self.img_window = Toplevel()
323
+ self.img_window.resizable(True, True)
324
+ self.img_window.title(self.frm_name)
325
+ self.img_window.protocol("WM_DELETE_WINDOW", self.close)
326
+ # Bind Escape key to close window
327
+ self.img_window.bind(TkBinds.ESCAPE.value, lambda event: self.close())
328
+
329
+
330
+ self.img_lbl = SimBALabel(parent=self.img_window, txt='')
331
+ self.img_lbl.pack()
332
+ self._draw_img(img=self.timelapse_img, lbl=self.img_lbl)
333
+ self.frame_display_frame = Frame(self.img_window)
334
+ self.frame_display_frame.pack(pady=10, padx=10, fill=BOTH, expand=True)
335
+ self.frame_label = SimBALabel(parent=self.frame_display_frame, txt="Frame Preview", font=Formats.FONT_REGULAR_BOLD.value)
336
+ self.frame_label.pack()
337
+ self.frame_display_lbl = SimBALabel(parent=self.frame_display_frame, txt='', bg_clr='black')
338
+ self.frame_display_lbl.pack(pady=5)
339
+ self.slider_frame = Frame(self.img_window)
340
+ self.slider_frame.pack(pady=10, padx=10, fill=X)
341
+ self.slider_frame.columnconfigure(index=0, weight=1)
342
+ self.slider_frame.columnconfigure(index=1, weight=0)
343
+ self.slider_frame.columnconfigure(index=2, weight=0)
344
+ self.slider_frame.columnconfigure(index=3, weight=0)
345
+ self.slider_frame.columnconfigure(index=4, weight=0)
346
+ self.slider_frame.columnconfigure(index=5, weight=1)
347
+
348
+ self.start_scale = SimBAScaleBar(parent=self.slider_frame, label="START TIME:", from_=0, to=self.video_meta['video_length_s'], orient=HORIZONTAL, length=400, resolution=1, value=0, showvalue=False, label_width=15, sliderrelief='raised', troughcolor='white', activebackground='green', lbl_font=Formats.FONT_LARGE_BOLD.value)
349
+ self.start_scale.grid(row=0, column=1, padx=5)
350
+ self.start_scale.scale.config(command=lambda x: self._update_selection(slider_type='start'))
351
+
352
+ initial_start_text = "00:00:00" if self.use_timestamps else "0"
353
+ self.start_time_label = SimBALabel(parent=self.slider_frame, txt=initial_start_text, font=Formats.FONT_LARGE_BOLD.value, width=10, txt_clr='green')
354
+ self.start_time_label.grid(row=0, column=2, padx=5)
355
+
356
+ start_btn_txt = "Previous second" if self.use_timestamps else "Previous frame"
357
+ start_btn_tooltip = "Move start time back by 1 second" if self.use_timestamps else "Move start frame back by 1 frame"
358
+ self.start_frame_left_btn = SimbaButton(parent=self.slider_frame, txt=start_btn_txt, tooltip_txt=start_btn_tooltip, cmd=self._move_start_frame, cmd_kwargs={'direction': -1}, font=Formats.FONT_REGULAR_BOLD.value, img='left_arrow_green')
359
+ self.start_frame_left_btn.grid(row=0, column=3, padx=2)
360
+ start_btn_txt_right = "Next second" if self.use_timestamps else "Next frame"
361
+ start_btn_tooltip_right = "Move start time forward by 1 second" if self.use_timestamps else "Move start frame forward by 1 frame"
362
+ self.start_frame_right_btn = SimbaButton(parent=self.slider_frame, txt=start_btn_txt_right, tooltip_txt=start_btn_tooltip_right, cmd=self._move_start_frame, cmd_kwargs={'direction': 1}, font=Formats.FONT_REGULAR_BOLD.value, img='right_arrow_green')
363
+ self.start_frame_right_btn.grid(row=0, column=4, padx=2)
364
+
365
+ self.end_scale = SimBAScaleBar(parent=self.slider_frame, label="END TIME:", from_=0, to=int(self.video_meta['video_length_s']), orient=HORIZONTAL, length=400, resolution=1, value=int(self.video_meta['video_length_s']), showvalue=False, label_width=15, sliderrelief='raised', troughcolor='white', activebackground='red', lbl_font=Formats.FONT_LARGE_BOLD.value)
366
+ self.end_scale.grid(row=1, column=1, padx=5)
367
+ self.end_scale.scale.config(command=lambda x: self._update_selection(slider_type='end'))
368
+
369
+ if self.use_timestamps:
370
+ initial_end_text = seconds_to_timestamp(int(self.video_meta['video_length_s']))
371
+ else:
372
+ initial_end_text = str(self.video_meta['frame_count'] - 1)
373
+ self.end_time_label = SimBALabel(parent=self.slider_frame, txt=initial_end_text, font=Formats.FONT_LARGE_BOLD.value, width=10, txt_clr='red')
374
+ self.end_time_label.grid(row=1, column=2, padx=5)
375
+
376
+ end_btn_txt = "Previous second" if self.use_timestamps else "Previous frame"
377
+ end_btn_tooltip = "Move end time back by 1 second" if self.use_timestamps else "Move end frame back by 1 frame"
378
+ self.end_frame_left_btn = SimbaButton(parent=self.slider_frame, txt=end_btn_txt, tooltip_txt=end_btn_tooltip, cmd=self._move_end_frame, cmd_kwargs={'direction': -1}, font=Formats.FONT_REGULAR_BOLD.value, img='left_arrow_red')
379
+ self.end_frame_left_btn.grid(row=1, column=3, padx=2)
380
+ end_btn_txt_right = "Next second" if self.use_timestamps else "Next frame"
381
+ end_btn_tooltip_right = "Move end time forward by 1 second" if self.use_timestamps else "Move end frame forward by 1 frame"
382
+ self.end_frame_right_btn = SimbaButton(parent=self.slider_frame, txt=end_btn_txt_right, tooltip_txt=end_btn_tooltip_right, cmd=self._move_end_frame, cmd_kwargs={'direction': 1}, font=Formats.FONT_REGULAR_BOLD.value, img='right_arrow_red')
383
+ self.end_frame_right_btn.grid(row=1, column=4, padx=2)
384
+
385
+ self.selected_start = [0]
386
+ self.selected_end = [int(self.video_meta['video_length_s'])]
387
+ self.selected_start_frame = [0]
388
+ end_frame = int(self.video_meta['frame_count']) - 1
389
+ if end_frame < 0: end_frame = 0
390
+ self.selected_end_frame = [end_frame]
391
+
392
+ self.img_window.update_idletasks()
393
+ self.img_window.update()
394
+
395
+ req_width, req_height = self.img_window.winfo_reqwidth(), self.img_window.winfo_reqheight()
396
+ min_width = max(self.timelapse_img.shape[1] + 60, req_width + 20)
397
+ timelapse_height = self.timelapse_img.shape[0]
398
+ frame_preview_height = self.img_height
399
+ slider_height, padding_total = 150, 50
400
+ calculated_min_height = timelapse_height + frame_preview_height + slider_height + padding_total
401
+ min_height = max(calculated_min_height, req_height + 50, timelapse_height + 400)
402
+ max_height = int(self.monitor_height * 0.95)
403
+ if min_height > max_height: min_height = max_height
404
+
405
+ self.img_window.minsize(min_width, min_height)
406
+ self.img_window.geometry(f"{min_width}x{min_height}")
407
+ self._update_frame_display(slider_type='start')
408
+
409
+ def get_start_time(self) -> float:
410
+ return self.selected_start[0]
411
+
412
+ def get_end_time(self) -> float:
413
+ return self.selected_end[0]
414
+
415
+ def get_start_time_str(self) -> str:
416
+ return seconds_to_timestamp(self.selected_start[0])
417
+
418
+ def get_end_time_str(self) -> str:
419
+ return seconds_to_timestamp(self.selected_end[0])
420
+
421
+ def get_start_frame(self) -> int:
422
+ return self.selected_start_frame[0]
423
+
424
+ def get_end_frame(self) -> int:
425
+ return self.selected_end_frame[0]
426
+
427
+ def close(self):
428
+ if self._pending_frame_update is not None:
429
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
430
+ self.img_window.after_cancel(self._pending_frame_update)
431
+ self._pending_frame_update = None
432
+
433
+ # Unbind Escape key if window still exists
434
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
435
+ try:
436
+ self.img_window.unbind(TkBinds.ESCAPE.value)
437
+ except:
438
+ pass
439
+
440
+ if self.video_capture is not None:
441
+ self.video_capture.release()
442
+ self.video_capture = None
443
+
444
+ if hasattr(self, 'img_window') and self.img_window.winfo_exists():
445
+ self.img_window.destroy()
446
+
447
+
448
+
449
+
450
+ # x = TimelapseSlider(video_path=r"E:\troubleshooting\mitra_emergence\project_folder\clip_test\Box1_180mISOcontrol_Females_clipped_progress_bar.mp4",
451
+ # frame_cnt=25,
452
+ # crop_ratio=75,
453
+ # use_timestamps=False)
454
+ # x.run()
simba/utils/lookups.py CHANGED
@@ -1201,4 +1201,70 @@ def check_for_updates(time_out: int = 2):
1201
1201
  else:
1202
1202
  msg = (f'NEW SimBA VERSION AVAILABLE. \nYou have SimBA version {env_simba_version}. \nThe latest version is {latest_simba_version}. '
1203
1203
  f'\nYou can update using "pip install simba-uw-tf-dev --upgrade"')
1204
- stdout_information(msg=msg, source=check_for_updates.__name__)
1204
+ stdout_information(msg=msg, source=check_for_updates.__name__)
1205
+
1206
+
1207
+ def get_ext_codec_map() -> Dict[str, str]:
1208
+ """
1209
+ Get a dictionary mapping video file extensions to their recommended FFmpeg codecs.
1210
+ Automatically falls back to alternative codecs if the preferred codec is not available.
1211
+
1212
+ :return: Dictionary mapping file extensions (without leading dot) to codec names.
1213
+ :rtype: Dict[str, str]
1214
+
1215
+ :example:
1216
+ >>> codec_map = get_ext_codec_map()
1217
+ >>> codec = codec_map.get('webm', 'libx264') # Returns 'libvpx-vp9' or fallback
1218
+ """
1219
+ codecs_available = get_ffmpeg_encoders(raise_error=False)
1220
+ if not codecs_available: codecs_available = []
1221
+
1222
+ common_codecs = ['libx264', 'mpeg4', 'h264', 'mjpeg', 'libx265']
1223
+ fallback_codec = None
1224
+ for codec in common_codecs:
1225
+ if codec in codecs_available:
1226
+ fallback_codec = codec
1227
+ break
1228
+
1229
+ # If no common codec found, use first available or default to mpeg4 (most universal)
1230
+ if fallback_codec is None:
1231
+ fallback_codec = codecs_available[0] if codecs_available else 'mpeg4'
1232
+
1233
+ def get_codec(preferred: str, alternative: str = None) -> str:
1234
+ if preferred in codecs_available:
1235
+ return preferred
1236
+ alt = alternative if alternative else fallback_codec
1237
+ return alt if alt in codecs_available else preferred
1238
+
1239
+ return {
1240
+ 'webm': get_codec(preferred='libvpx-vp9', alternative='libvpx'),
1241
+ 'avi': get_codec(preferred='mpeg4', alternative='libx264'),
1242
+ 'mp4': get_codec(preferred='libx264', alternative='mpeg4'),
1243
+ 'mov': get_codec(preferred='libx264', alternative='mpeg4'),
1244
+ 'mkv': get_codec(preferred='libx264', alternative='mpeg4'),
1245
+ 'flv': get_codec(preferred='libx264', alternative='mpeg4'),
1246
+ 'm4v': get_codec(preferred='libx264', alternative='mpeg4'),
1247
+ 'h264': get_codec(preferred='libx264', alternative='h264'),
1248
+ }
1249
+
1250
+ def get_ffmpeg_codec(file_name: Union[str, os.PathLike],
1251
+ fallback: str = 'mpeg4') -> str:
1252
+ """
1253
+ Get the recommended FFmpeg codec for a video file based on its extension.
1254
+
1255
+ :param Union[str, os.PathLike] file_name: Path to video file or file extension.
1256
+ :param str fallback: Codec to return if file extension is not recognized. Default: 'mpeg4'.
1257
+ :return: Recommended FFmpeg codec name for the video file.
1258
+ :rtype: str
1259
+
1260
+ :example:
1261
+ >>> codec = get_ffmpeg_codec(file_name='video.mp4')
1262
+ >>> codec = get_ffmpeg_codec(file_name='video.webm', fallback='libx264')
1263
+ >>> codec = get_ffmpeg_codec(file_name=r'C:/videos/my_video.avi')
1264
+ """
1265
+ codec_map = get_ext_codec_map()
1266
+ _, file_name, ext = get_fn_ext(filepath=file_name)
1267
+ if ext[1:] in codec_map.keys():
1268
+ return codec_map[ext[1:]]
1269
+ else:
1270
+ return fallback
simba/utils/read_write.py CHANGED
@@ -814,6 +814,7 @@ def read_frm_of_video(video_path: Union[str, os.PathLike, cv2.VideoCapture],
814
814
  frame_index: Optional[int] = 0,
815
815
  opacity: Optional[float] = None,
816
816
  size: Optional[Tuple[int, int]] = None,
817
+ keep_aspect_ratio: bool = False,
817
818
  greyscale: Optional[bool] = False,
818
819
  black_and_white: Optional[bool] = False,
819
820
  clahe: Optional[bool] = False,
@@ -831,7 +832,8 @@ def read_frm_of_video(video_path: Union[str, os.PathLike, cv2.VideoCapture],
831
832
  :param Union[str, os.PathLike, cv2.VideoCapture] video_path: Path to video file, or cv2.VideoCapture object.
832
833
  :param Optional[int] frame_index: The frame index to return (0-based). Default: 0. If -1 is passed, the last frame of the video is read.
833
834
  :param Optional[float] opacity: Value between 0 and 100 or None. If float value, returns image with opacity. 100 fully opaque. 0.0 fully transparent.
834
- :param Optional[Tuple[int, int]] size: If tuple, resizes the image to size. Else, returns original image size.
835
+ :param Optional[Tuple[int, int]] size: If tuple (width, height), resizes the image. If None, returns original image size. When used with keep_aspect_ratio=True, the image is resized to fit within the target size while maintaining aspect ratio.
836
+ :param bool keep_aspect_ratio: If True and size is provided, resizes the image to fit within the target size while maintaining aspect ratio. If False, resizes to exact size (may distort aspect ratio). Default False.
835
837
  :param Optional[bool] greyscale: If True, returns the greyscale image. Default False.
836
838
  :param Optional[bool] black_and_white: If True, returns black and white image at threshold 127. Default False.
837
839
  :param Optional[bool] clahe: If True, returns CLAHE enhanced image. Default False.
@@ -906,8 +908,14 @@ def read_frm_of_video(video_path: Union[str, os.PathLike, cv2.VideoCapture],
906
908
  h, w, clr = img.shape[:3]
907
909
  opacity_image = np.ones((h, w, clr), dtype=np.uint8) * int(255 * opacity)
908
910
  img = cv2.addWeighted( img.astype(np.uint8), 1 - opacity, opacity_image.astype(np.uint8), opacity, 0)
909
- if size:
911
+ if size is not None and not keep_aspect_ratio:
910
912
  img = cv2.resize(img, size, interpolation=cv2.INTER_LINEAR)
913
+ elif size is not None and keep_aspect_ratio:
914
+ target_w, target_h = size
915
+ h, w = img.shape[:2]
916
+ scale = min(target_w / w, target_h / h)
917
+ new_w, new_h = int(w * scale), int(h * scale)
918
+ img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
911
919
  if greyscale or black_and_white:
912
920
  if len(img.shape) > 2:
913
921
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
@@ -3761,7 +3769,6 @@ def find_closest_readable_frame(video_path: Union[str, os.PathLike],
3761
3769
  return None, None
3762
3770
 
3763
3771
 
3764
-
3765
3772
  #copy_multiple_videos_to_project(config_path=r"C:\troubleshooting\multi_animal_dlc_two_c57\project_folder\project_config.ini", source=r'E:\maplight_videos\video_test', file_type='mp4', recursive_search=False)
3766
3773
 
3767
3774
 
@@ -12,7 +12,6 @@ import shutil
12
12
 
13
13
  from simba.utils.checks import (check_ffmpeg_available,
14
14
  check_file_exist_and_readable, check_str)
15
- from simba.utils.enums import Formats
16
15
  from simba.utils.errors import CropError, FFMPEGNotFoundError, PermissionError
17
16
  from simba.utils.lookups import (get_current_time, get_ffmpeg_encoders,
18
17
  gpu_quality_to_cpu_quality_lk)