simba-uw-tf-dev 4.5.8__py3-none-any.whl → 4.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simba/SimBA.py +2 -2
- simba/assets/icons/frames_2.png +0 -0
- simba/assets/lookups/tooptips.json +10 -1
- simba/plotting/frame_mergerer_ffmpeg.py +137 -196
- simba/sandbox/cuda/egocentric_rotator.py +374 -0
- simba/ui/pop_ups/clf_add_remove_print_pop_up.py +37 -30
- simba/ui/pop_ups/egocentric_alignment_pop_up.py +20 -21
- simba/ui/pop_ups/interpolate_pop_up.py +2 -4
- simba/ui/pop_ups/multiple_videos_to_frames_popup.py +10 -11
- simba/ui/pop_ups/single_video_to_frames_popup.py +10 -10
- simba/ui/pop_ups/video_processing_pop_up.py +149 -145
- simba/ui/tkinter_functions.py +7 -1
- simba/video_processors/clahe_ui.py +65 -22
- simba/video_processors/egocentric_video_rotator.py +6 -7
- simba/video_processors/video_processing.py +73 -151
- simba/video_processors/videos_to_frames.py +3 -2
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/METADATA +1 -1
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/RECORD +22 -20
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.6.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import multiprocessing
|
|
3
|
+
import os
|
|
4
|
+
from typing import Optional, Tuple, Union
|
|
5
|
+
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import cupy as cp
|
|
11
|
+
from cupyx.scipy.ndimage import affine_transform
|
|
12
|
+
CUPY_AVAILABLE = True
|
|
13
|
+
except ImportError:
|
|
14
|
+
import numpy as cp
|
|
15
|
+
from scipy.ndimage import affine_transform
|
|
16
|
+
CUPY_AVAILABLE = False
|
|
17
|
+
|
|
18
|
+
from simba.utils.checks import (check_file_exist_and_readable,
|
|
19
|
+
check_if_dir_exists, check_if_valid_rgb_tuple,
|
|
20
|
+
check_int, check_valid_array,
|
|
21
|
+
check_valid_boolean, check_valid_tuple)
|
|
22
|
+
from simba.utils.data import (align_target_warpaffine_vectors,
|
|
23
|
+
center_rotation_warpaffine_vectors,
|
|
24
|
+
egocentrically_align_pose)
|
|
25
|
+
from simba.utils.enums import Defaults, Formats
|
|
26
|
+
from simba.utils.printing import SimbaTimer, stdout_success
|
|
27
|
+
from simba.utils.read_write import (concatenate_videos_in_folder,
|
|
28
|
+
create_directory, find_core_cnt,
|
|
29
|
+
get_fn_ext, get_video_meta_data, read_df,
|
|
30
|
+
read_frm_of_video,
|
|
31
|
+
read_img_batch_from_video_gpu,
|
|
32
|
+
remove_a_folder,
|
|
33
|
+
_read_img_batch_from_video_helper)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def egocentric_video_aligner(frm_range: np.ndarray,
|
|
37
|
+
video_path: Union[str, os.PathLike],
|
|
38
|
+
temp_dir: Union[str, os.PathLike],
|
|
39
|
+
video_name: str,
|
|
40
|
+
centers: np.ndarray,
|
|
41
|
+
rotation_vectors: np.ndarray,
|
|
42
|
+
target: Tuple[int, int],
|
|
43
|
+
fill_clr: Tuple[int, int, int] = (255, 255, 255),
|
|
44
|
+
verbose: bool = False,
|
|
45
|
+
gpu: bool = True):
|
|
46
|
+
|
|
47
|
+
video_meta = get_video_meta_data(video_path=video_path)
|
|
48
|
+
|
|
49
|
+
batch, frm_range = frm_range[0], frm_range[1]
|
|
50
|
+
save_path = os.path.join(temp_dir, f'{batch}.mp4')
|
|
51
|
+
fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}')
|
|
52
|
+
writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (video_meta['width'], video_meta['height']))
|
|
53
|
+
batch_rotation_vectors = rotation_vectors[frm_range[0]: frm_range[-1]+1]
|
|
54
|
+
batch_centers = centers[frm_range[0]: frm_range[-1]+1]
|
|
55
|
+
m_rotates = center_rotation_warpaffine_vectors(rotation_vectors=batch_rotation_vectors, centers=batch_centers)
|
|
56
|
+
m_translations = align_target_warpaffine_vectors(centers=batch_centers, target=np.array(target))
|
|
57
|
+
|
|
58
|
+
if gpu:
|
|
59
|
+
# Combine rotation and translation matrices into single transform
|
|
60
|
+
# This reduces two sequential operations to one
|
|
61
|
+
batch_size = len(frm_range)
|
|
62
|
+
m_combined = np.zeros((batch_size, 2, 3), dtype=np.float32)
|
|
63
|
+
|
|
64
|
+
for i in range(batch_size):
|
|
65
|
+
# Convert rotation matrix (2x3) to 3x3 homogeneous
|
|
66
|
+
m_rot_3x3 = np.eye(3, dtype=np.float32)
|
|
67
|
+
m_rot_3x3[:2, :] = m_rotates[i].astype(np.float32)
|
|
68
|
+
|
|
69
|
+
# Convert translation matrix (2x3) to 3x3 homogeneous
|
|
70
|
+
m_trans_3x3 = np.eye(3, dtype=np.float32)
|
|
71
|
+
m_trans_3x3[:2, :] = m_translations[i].astype(np.float32)
|
|
72
|
+
|
|
73
|
+
# Combine: translation after rotation (matches sequential cv2.warpAffine order)
|
|
74
|
+
m_combined_3x3 = m_trans_3x3 @ m_rot_3x3
|
|
75
|
+
|
|
76
|
+
# Convert back to 2x3 for warpAffine compatibility
|
|
77
|
+
m_combined[i] = m_combined_3x3[:2, :]
|
|
78
|
+
|
|
79
|
+
# Process frames in batches using GPU reading
|
|
80
|
+
# Use same batch size as original (30) for optimal I/O overlap
|
|
81
|
+
# Main optimization: combined matrix (one warpAffine instead of two)
|
|
82
|
+
img_counter = 0
|
|
83
|
+
frm_batches = np.array_split(frm_range, (len(frm_range) + 30 - 1) // 30)
|
|
84
|
+
for frm_batch_cnt, frm_ids in enumerate(frm_batches):
|
|
85
|
+
frms = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_ids[0], end_frm=frm_ids[-1], verbose=False)
|
|
86
|
+
frms = np.stack(list(frms.values()), axis=0)
|
|
87
|
+
for img_cnt, img in enumerate(frms):
|
|
88
|
+
# Use combined matrix for single warpAffine (faster than two separate calls)
|
|
89
|
+
m = m_combined[img_counter].astype(np.float32)
|
|
90
|
+
final_frame = cv2.warpAffine(img, m, (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
91
|
+
writer.write(final_frame)
|
|
92
|
+
if verbose:
|
|
93
|
+
frame_id = frm_ids[img_cnt]
|
|
94
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
95
|
+
img_counter += 1
|
|
96
|
+
|
|
97
|
+
# Legacy CuPy code (commented out - CPU is faster for this use case)
|
|
98
|
+
if False and CUPY_AVAILABLE:
|
|
99
|
+
# Pre-compute all inverse matrices upfront (much faster than per-frame)
|
|
100
|
+
# For CuPy affine_transform, we need inverse matrices
|
|
101
|
+
m_inv_matrices = []
|
|
102
|
+
m_offsets = []
|
|
103
|
+
for i in range(batch_size):
|
|
104
|
+
m = m_combined[i]
|
|
105
|
+
matrix_2x2 = m[:2, :2].astype(np.float32)
|
|
106
|
+
offset = m[:2, 2].astype(np.float32)
|
|
107
|
+
m_inv_matrices.append(cp.asarray(matrix_2x2))
|
|
108
|
+
m_offsets.append(cp.asarray(offset))
|
|
109
|
+
# Batch invert all matrices at once
|
|
110
|
+
m_inv_matrices_gpu = cp.stack(m_inv_matrices)
|
|
111
|
+
m_inv_matrices_gpu = cp.linalg.inv(m_inv_matrices_gpu)
|
|
112
|
+
m_offsets_gpu = cp.stack(m_offsets)
|
|
113
|
+
|
|
114
|
+
# Create async reader for GPU
|
|
115
|
+
async_reader = AsyncVideoFrameReader(
|
|
116
|
+
video_path=video_path,
|
|
117
|
+
batch_size=batch_size_gpu,
|
|
118
|
+
max_que_size=3,
|
|
119
|
+
start_idx=frm_range[0],
|
|
120
|
+
end_idx=frm_range[-1] + 1,
|
|
121
|
+
gpu=True, # Use GPU reading
|
|
122
|
+
verbose=False
|
|
123
|
+
)
|
|
124
|
+
async_reader.start()
|
|
125
|
+
|
|
126
|
+
# Process batches as they become available from async reader
|
|
127
|
+
# Batch process and transfer to minimize GPU->CPU overhead
|
|
128
|
+
processed_frames_batch = []
|
|
129
|
+
frame_ids_batch = []
|
|
130
|
+
|
|
131
|
+
while True:
|
|
132
|
+
batch_result = get_async_frame_batch(batch_reader=async_reader, timeout=10)
|
|
133
|
+
if batch_result is None:
|
|
134
|
+
# Write any remaining frames
|
|
135
|
+
if processed_frames_batch:
|
|
136
|
+
for frame in processed_frames_batch:
|
|
137
|
+
writer.write(frame)
|
|
138
|
+
break
|
|
139
|
+
|
|
140
|
+
start_idx, end_idx, frms = batch_result
|
|
141
|
+
batch_len = end_idx - start_idx + 1
|
|
142
|
+
frms_gpu = cp.asarray(frms)
|
|
143
|
+
|
|
144
|
+
# Process all frames in batch on GPU first
|
|
145
|
+
batch_transformed = []
|
|
146
|
+
batch_frame_indices = []
|
|
147
|
+
|
|
148
|
+
for i in range(batch_len):
|
|
149
|
+
# Map frame index from video to frm_range index
|
|
150
|
+
frame_id = start_idx + i
|
|
151
|
+
frame_idx_in_range = np.where(frm_range == frame_id)[0]
|
|
152
|
+
if len(frame_idx_in_range) == 0:
|
|
153
|
+
continue
|
|
154
|
+
frame_idx_in_range = frame_idx_in_range[0]
|
|
155
|
+
batch_frame_indices.append((i, frame_idx_in_range))
|
|
156
|
+
|
|
157
|
+
# Process all frames in this batch on GPU
|
|
158
|
+
for i, frame_idx_in_range in batch_frame_indices:
|
|
159
|
+
img_gpu = frms_gpu[i]
|
|
160
|
+
matrix_inv = m_inv_matrices_gpu[frame_idx_in_range]
|
|
161
|
+
offset = m_offsets_gpu[frame_idx_in_range]
|
|
162
|
+
|
|
163
|
+
if len(img_gpu.shape) == 3: # Multi-channel
|
|
164
|
+
transformed_channels = []
|
|
165
|
+
for c in range(img_gpu.shape[2]):
|
|
166
|
+
transformed_ch = affine_transform(
|
|
167
|
+
img_gpu[:, :, c],
|
|
168
|
+
matrix=matrix_inv,
|
|
169
|
+
offset=offset,
|
|
170
|
+
output_shape=(video_meta['height'], video_meta['width']),
|
|
171
|
+
order=1,
|
|
172
|
+
mode='constant',
|
|
173
|
+
cval=fill_clr[c] if c < len(fill_clr) else fill_clr[0],
|
|
174
|
+
prefilter=False
|
|
175
|
+
)
|
|
176
|
+
transformed_channels.append(transformed_ch)
|
|
177
|
+
transformed = cp.stack(transformed_channels, axis=2)
|
|
178
|
+
else: # Single channel
|
|
179
|
+
transformed = affine_transform(
|
|
180
|
+
img_gpu,
|
|
181
|
+
matrix=matrix_inv,
|
|
182
|
+
offset=offset,
|
|
183
|
+
output_shape=(video_meta['height'], video_meta['width']),
|
|
184
|
+
order=1,
|
|
185
|
+
mode='constant',
|
|
186
|
+
cval=fill_clr[0] if len(fill_clr) > 0 else 0,
|
|
187
|
+
prefilter=False
|
|
188
|
+
)
|
|
189
|
+
batch_transformed.append(transformed)
|
|
190
|
+
|
|
191
|
+
# Batch transfer all frames from GPU to CPU at once
|
|
192
|
+
if batch_transformed:
|
|
193
|
+
# Stack all transformed frames and transfer in one go
|
|
194
|
+
batch_transformed_stack = cp.stack(batch_transformed)
|
|
195
|
+
batch_cpu = cp.asnumpy(batch_transformed_stack).astype(np.uint8)
|
|
196
|
+
|
|
197
|
+
# Write all frames from this batch
|
|
198
|
+
for frame_idx, (i, frame_idx_in_range) in enumerate(batch_frame_indices):
|
|
199
|
+
final_frame = batch_cpu[frame_idx]
|
|
200
|
+
writer.write(final_frame)
|
|
201
|
+
|
|
202
|
+
if verbose:
|
|
203
|
+
frame_id = start_idx + i
|
|
204
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
205
|
+
|
|
206
|
+
async_reader.kill()
|
|
207
|
+
|
|
208
|
+
else:
|
|
209
|
+
# Fallback to CPU with combined matrix and batch reading
|
|
210
|
+
# Process frames in batches
|
|
211
|
+
# Use helper function directly to avoid nested multiprocessing (we're already in a worker process)
|
|
212
|
+
# Larger batch size reduces overhead
|
|
213
|
+
batch_size_gpu = 500
|
|
214
|
+
frm_batches = np.array_split(frm_range, (len(frm_range) + batch_size_gpu - 1) // batch_size_gpu)
|
|
215
|
+
|
|
216
|
+
# Create a mapping from frame_id to index in frm_range for fast lookup
|
|
217
|
+
frm_id_to_idx = {frame_id: idx for idx, frame_id in enumerate(frm_range)}
|
|
218
|
+
|
|
219
|
+
for frm_batch_cnt, frm_ids in enumerate(frm_batches):
|
|
220
|
+
# Read batch of frames directly using helper (no multiprocessing)
|
|
221
|
+
frm_idx_array = np.array(frm_ids)
|
|
222
|
+
frms_dict = _read_img_batch_from_video_helper(
|
|
223
|
+
frm_idx=frm_idx_array,
|
|
224
|
+
video_path=video_path,
|
|
225
|
+
greyscale=False,
|
|
226
|
+
verbose=False,
|
|
227
|
+
black_and_white=False,
|
|
228
|
+
clahe=False
|
|
229
|
+
)
|
|
230
|
+
frms = np.stack([frms_dict[f] for f in frm_ids], axis=0)
|
|
231
|
+
|
|
232
|
+
# Process all frames in batch using optimized CPU cv2.warpAffine with combined matrices
|
|
233
|
+
for i, frame_id in enumerate(frm_ids):
|
|
234
|
+
# Fast dictionary lookup instead of np.where
|
|
235
|
+
frame_idx_in_range = frm_id_to_idx.get(frame_id)
|
|
236
|
+
if frame_idx_in_range is None:
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
img = frms[i]
|
|
240
|
+
m = m_combined[frame_idx_in_range].astype(np.float32)
|
|
241
|
+
final_frame = cv2.warpAffine(img, m, (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
242
|
+
writer.write(final_frame)
|
|
243
|
+
|
|
244
|
+
if verbose:
|
|
245
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
246
|
+
else:
|
|
247
|
+
cap = cv2.VideoCapture(video_path)
|
|
248
|
+
for frm_idx, frm_id in enumerate(frm_range):
|
|
249
|
+
img = read_frm_of_video(video_path=cap, frame_index=frm_id)
|
|
250
|
+
rotated_frame = cv2.warpAffine(img, m_rotates[frm_idx], (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
251
|
+
final_frame = cv2.warpAffine(rotated_frame, m_translations[frm_idx], (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
252
|
+
writer.write(final_frame)
|
|
253
|
+
if verbose:
|
|
254
|
+
print(f'Creating frame {frm_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
255
|
+
writer.release()
|
|
256
|
+
return batch + 1
|
|
257
|
+
|
|
258
|
+
class EgocentricVideoRotator():
|
|
259
|
+
"""
|
|
260
|
+
Perform egocentric rotation of a video using CPU multiprocessing.
|
|
261
|
+
|
|
262
|
+
.. video:: _static/img/EgocentricalAligner_2.webm
|
|
263
|
+
:width: 800
|
|
264
|
+
:autoplay:
|
|
265
|
+
:loop:
|
|
266
|
+
|
|
267
|
+
.. seealso::
|
|
268
|
+
To perform joint egocentric alignment of both pose and video, or pose only, use :func:`~simba.data_processors.egocentric_aligner.EgocentricalAligner`.
|
|
269
|
+
To produce rotation vectors, use :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`.
|
|
270
|
+
|
|
271
|
+
:param Union[str, os.PathLike] video_path: Path to a video file.
|
|
272
|
+
:param np.ndarray centers: A 2D array of shape `(num_frames, 2)` containing the original locations of `anchor_1_idx` in each frame before alignment. Returned by :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`.
|
|
273
|
+
:param np.ndarray rotation_vectors: A 3D array of shape `(num_frames, 2, 2)` containing the rotation matrices applied to each frame. Returned by :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`.
|
|
274
|
+
:param bool verbose: If True, prints progress. Deafult True.
|
|
275
|
+
:param Tuple[int, int, int] fill_clr: The color of the additional pixels. Deafult black. (0, 0, 0).
|
|
276
|
+
:param int core_cnt: Number of CPU cores to use for video rotation; `-1` uses all available cores.
|
|
277
|
+
:param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix.
|
|
278
|
+
|
|
279
|
+
:example:
|
|
280
|
+
>>> DATA_PATH = "C:\501_MA142_Gi_Saline_0513.csv"
|
|
281
|
+
>>> VIDEO_PATH = "C:\501_MA142_Gi_Saline_0513.mp4"
|
|
282
|
+
>>> SAVE_PATH = "C:\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
283
|
+
>>> ANCHOR_LOC = np.array([250, 250])
|
|
284
|
+
|
|
285
|
+
>>> df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
286
|
+
>>> bp_cols = [x for x in df.columns if not x.endswith('_p')]
|
|
287
|
+
>>> data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
288
|
+
>>> _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
289
|
+
>>> rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=ANCHOR_LOC, save_path=SAVE_PATH)
|
|
290
|
+
>>> rotater.run()
|
|
291
|
+
"""
|
|
292
|
+
|
|
293
|
+
def __init__(self,
|
|
294
|
+
video_path: Union[str, os.PathLike],
|
|
295
|
+
centers: np.ndarray,
|
|
296
|
+
rotation_vectors: np.ndarray,
|
|
297
|
+
anchor_location: Tuple[int, int],
|
|
298
|
+
verbose: bool = True,
|
|
299
|
+
fill_clr: Tuple[int, int, int] = (0, 0, 0),
|
|
300
|
+
core_cnt: int = -1,
|
|
301
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
302
|
+
gpu: Optional[bool] = True):
|
|
303
|
+
|
|
304
|
+
check_file_exist_and_readable(file_path=video_path)
|
|
305
|
+
self.video_meta_data = get_video_meta_data(video_path=video_path)
|
|
306
|
+
check_valid_array(data=centers, source=f'{self.__class__.__name__} centers', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
307
|
+
check_valid_array(data=rotation_vectors, source=f'{self.__class__.__name__} rotation_vectors', accepted_ndims=(3,), accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
308
|
+
check_valid_tuple(x=anchor_location, source=f'{self.__class__.__name__} anchor_location', accepted_lengths=(2,), valid_dtypes=(int,))
|
|
309
|
+
for i in anchor_location: check_int(name=f'{self.__class__.__name__} anchor_location', value=i, min_value=1)
|
|
310
|
+
check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
|
|
311
|
+
check_if_valid_rgb_tuple(data=fill_clr)
|
|
312
|
+
check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
|
|
313
|
+
if core_cnt > find_core_cnt()[0] or core_cnt == -1:
|
|
314
|
+
self.core_cnt = find_core_cnt()[0]
|
|
315
|
+
else:
|
|
316
|
+
self.core_cnt = core_cnt
|
|
317
|
+
video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
|
|
318
|
+
if save_path is not None:
|
|
319
|
+
self.save_dir = os.path.dirname(save_path)
|
|
320
|
+
check_if_dir_exists(in_dir=self.save_dir, source=f'{self.__class__.__name__} save_path')
|
|
321
|
+
else:
|
|
322
|
+
self.save_dir = video_dir
|
|
323
|
+
save_path = os.path.join(video_dir, f'{self.video_name}_rotated.mp4')
|
|
324
|
+
self.video_path, self.save_path = video_path, save_path
|
|
325
|
+
self.centers, self.rotation_vectors, self.gpu = centers, rotation_vectors, gpu
|
|
326
|
+
self.verbose, self.fill_clr, self.anchor_loc = verbose, fill_clr, anchor_location
|
|
327
|
+
|
|
328
|
+
def run(self):
|
|
329
|
+
video_timer = SimbaTimer(start=True)
|
|
330
|
+
temp_dir = os.path.join(self.save_dir, 'temp')
|
|
331
|
+
if not os.path.isdir(temp_dir):
|
|
332
|
+
create_directory(paths=temp_dir)
|
|
333
|
+
else:
|
|
334
|
+
remove_a_folder(folder_dir=temp_dir)
|
|
335
|
+
create_directory(paths=temp_dir)
|
|
336
|
+
frm_list = np.arange(0, self.video_meta_data['frame_count'])
|
|
337
|
+
frm_list = np.array_split(frm_list, self.core_cnt)
|
|
338
|
+
frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
|
|
339
|
+
if self.verbose:
|
|
340
|
+
print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
|
|
341
|
+
with multiprocessing.Pool(self.core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
|
|
342
|
+
constants = functools.partial(egocentric_video_aligner,
|
|
343
|
+
temp_dir=temp_dir,
|
|
344
|
+
video_name=self.video_name,
|
|
345
|
+
video_path=self.video_path,
|
|
346
|
+
centers=self.centers,
|
|
347
|
+
rotation_vectors=self.rotation_vectors,
|
|
348
|
+
target=self.anchor_loc,
|
|
349
|
+
verbose=self.verbose,
|
|
350
|
+
fill_clr=self.fill_clr,
|
|
351
|
+
gpu=self.gpu)
|
|
352
|
+
for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)):
|
|
353
|
+
if self.verbose:
|
|
354
|
+
print(f"Rotate batch {result}/{self.core_cnt} complete...")
|
|
355
|
+
pool.terminate()
|
|
356
|
+
pool.join()
|
|
357
|
+
|
|
358
|
+
concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
|
|
359
|
+
video_timer.stop_timer()
|
|
360
|
+
stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
|
|
361
|
+
|
|
362
|
+
if __name__ == "__main__":
|
|
363
|
+
DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
|
|
364
|
+
VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
|
|
365
|
+
SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
366
|
+
ANCHOR_LOC = np.array([250, 250])
|
|
367
|
+
|
|
368
|
+
df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
369
|
+
bp_cols = [x for x in df.columns if not x.endswith('_p')]
|
|
370
|
+
data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
371
|
+
|
|
372
|
+
_, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
373
|
+
rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16, gpu=True)
|
|
374
|
+
rotater.run()
|
|
@@ -7,21 +7,22 @@ from typing import Union
|
|
|
7
7
|
from simba.mixins.config_reader import ConfigReader
|
|
8
8
|
from simba.mixins.pop_up_mixin import PopUpMixin
|
|
9
9
|
from simba.pose_processors.pose_reset import PoseResetter
|
|
10
|
-
from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon,
|
|
11
|
-
|
|
10
|
+
from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, Entry_Box,
|
|
11
|
+
FileSelect, SimbaButton, SimBADropDown,
|
|
12
12
|
TwoOptionQuestionPopUp)
|
|
13
13
|
from simba.utils.checks import check_str
|
|
14
14
|
from simba.utils.enums import ConfigKey, Keys, Links
|
|
15
|
-
from simba.utils.errors import DuplicationError
|
|
15
|
+
from simba.utils.errors import DuplicationError, NoDataError
|
|
16
16
|
from simba.utils.printing import stdout_success, stdout_trash
|
|
17
17
|
from simba.utils.read_write import tabulate_clf_info
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class AddClfPopUp(PopUpMixin, ConfigReader):
|
|
21
|
-
def __init__(self,
|
|
22
|
-
|
|
21
|
+
def __init__(self,
|
|
22
|
+
config_path: Union[str, os.PathLike]):
|
|
23
|
+
PopUpMixin.__init__(self, config_path=config_path, title="ADD CLASSIFIER", icon='plus')
|
|
23
24
|
ConfigReader.__init__(self, config_path=config_path, read_video_info=False)
|
|
24
|
-
self.clf_eb = Entry_Box(self.main_frm, "CLASSIFIER NAME:",
|
|
25
|
+
self.clf_eb = Entry_Box(parent=self.main_frm, fileDescription="CLASSIFIER NAME:", labelwidth=25, entry_box_width=30, justify='center', img='decision_tree_small')
|
|
25
26
|
add_btn = SimbaButton(parent=self.main_frm, txt="ADD CLASSIFIER", cmd=self.run, img='rocket')
|
|
26
27
|
self.clf_eb.grid(row=0, column=0, sticky=NW)
|
|
27
28
|
add_btn.grid(row=1, column=0, sticky=NW)
|
|
@@ -43,36 +44,42 @@ class AddClfPopUp(PopUpMixin, ConfigReader):
|
|
|
43
44
|
|
|
44
45
|
class RemoveAClassifierPopUp(PopUpMixin, ConfigReader):
|
|
45
46
|
def __init__(self, config_path: Union[str, os.PathLike]):
|
|
46
|
-
|
|
47
|
+
|
|
47
48
|
ConfigReader.__init__(self, config_path=config_path, read_video_info=False)
|
|
49
|
+
if not isinstance(self.clf_names, (list, tuple)) or len(self.clf_names) < 1:
|
|
50
|
+
raise NoDataError(msg='The SimBA project has no classifiers: Cannot remove a classifier.', source=self.__class__.__name__)
|
|
51
|
+
PopUpMixin.__init__(self, title="WARNING: REMOVE CLASSIFIER", icon='trash_red')
|
|
48
52
|
self.remove_clf_frm = CreateLabelFrameWithIcon( parent=self.main_frm, header="SELECT A CLASSIFIER TO REMOVE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.REMOVE_CLF.value)
|
|
49
|
-
self.clf_dropdown =
|
|
50
|
-
self.clf_dropdown.setChoices(self.clf_names[0])
|
|
51
|
-
|
|
53
|
+
self.clf_dropdown = SimBADropDown(parent=self.remove_clf_frm, dropdown_options=self.clf_names, label_width=20, dropdown_width=40, label='CLASSIFIER:', value=self.clf_names[0])
|
|
52
54
|
run_btn = SimbaButton(parent=self.main_frm, txt="REMOVE CLASSIFIER", cmd=self.run, img='trash')
|
|
53
|
-
self.remove_clf_frm.grid(row=0, sticky=W)
|
|
54
|
-
self.clf_dropdown.grid(row=0, sticky=W)
|
|
55
|
+
self.remove_clf_frm.grid(row=0, column=0, sticky=W)
|
|
56
|
+
self.clf_dropdown.grid(row=0, column=0, sticky=W)
|
|
55
57
|
run_btn.grid(row=1, pady=10)
|
|
56
58
|
|
|
57
59
|
def run(self):
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
self.
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
self.config.set("SML settings",
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
60
|
+
clf_to_remove = self.clf_dropdown.get_value()
|
|
61
|
+
question = TwoOptionQuestionPopUp(title="WARNING!", question=f"Do you want to remove the {clf_to_remove} \nclassifier from the SimBA project?", option_one="YES", option_two="NO")
|
|
62
|
+
if question.selected_option == "YES":
|
|
63
|
+
for i in range(len(self.clf_names)):
|
|
64
|
+
self.config.remove_option("SML settings", f"model_path_{i+1}")
|
|
65
|
+
self.config.remove_option("SML settings", f"target_name_{i+1}")
|
|
66
|
+
self.config.remove_option("threshold_settings", f"threshold_{i+1}")
|
|
67
|
+
self.config.remove_option("Minimum_bout_lengths", f"min_bout_{i+1}")
|
|
68
|
+
self.clf_names.remove(self.clf_dropdown.getChoices())
|
|
69
|
+
self.config.set("SML settings", "no_targets", str(len(self.clf_names)))
|
|
70
|
+
|
|
71
|
+
for clf_cnt, clf_name in enumerate(self.clf_names):
|
|
72
|
+
self.config.set("SML settings", f"model_path_{clf_cnt + 1}", "")
|
|
73
|
+
self.config.set("SML settings", f"target_name_{clf_cnt + 1}", clf_name)
|
|
74
|
+
self.config.set("threshold_settings", f"threshold_{clf_cnt + 1}", "None")
|
|
75
|
+
self.config.set("Minimum_bout_lengths", f"min_bout_{clf_cnt + 1}", "None")
|
|
76
|
+
|
|
77
|
+
with open(self.config_path, "w") as f:
|
|
78
|
+
self.config.write(f)
|
|
79
|
+
|
|
80
|
+
stdout_trash(msg=f"{self.clf_dropdown.getChoices()} classifier removed from SimBA project.", source=self.__class__.__name__)
|
|
81
|
+
else:
|
|
82
|
+
pass
|
|
76
83
|
|
|
77
84
|
|
|
78
85
|
# _ = RemoveAClassifierPopUp(config_path='/Users/simon/Desktop/envs/troubleshooting/Two_animals_16bps/project_folder/project_config.ini')
|
|
@@ -5,13 +5,12 @@ from typing import Union
|
|
|
5
5
|
from simba.data_processors.egocentric_aligner import EgocentricalAligner
|
|
6
6
|
from simba.mixins.config_reader import ConfigReader
|
|
7
7
|
from simba.mixins.pop_up_mixin import PopUpMixin
|
|
8
|
-
from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon,
|
|
9
|
-
FolderSelect, SimbaCheckbox,
|
|
8
|
+
from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, FolderSelect,
|
|
10
9
|
SimBADropDown)
|
|
11
10
|
from simba.utils.checks import check_if_dir_exists, check_nvidea_gpu_available
|
|
12
11
|
from simba.utils.enums import Keys, Links
|
|
13
12
|
from simba.utils.errors import InvalidInputError, NoDataError, SimBAGPUError
|
|
14
|
-
from simba.utils.lookups import get_color_dict
|
|
13
|
+
from simba.utils.lookups import find_closest_string, get_color_dict
|
|
15
14
|
from simba.utils.read_write import (find_all_videos_in_directory,
|
|
16
15
|
find_files_of_filetypes_in_directory,
|
|
17
16
|
get_fn_ext, str_2_bool)
|
|
@@ -31,19 +30,19 @@ class EgocentricAlignPopUp(ConfigReader, PopUpMixin):
|
|
|
31
30
|
ConfigReader.__init__(self, config_path=config_path, read_video_info=False, create_logger=False)
|
|
32
31
|
self.clr_dict = get_color_dict()
|
|
33
32
|
gpu_status = NORMAL if check_nvidea_gpu_available() else DISABLED
|
|
34
|
-
|
|
35
|
-
|
|
36
33
|
settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value)
|
|
37
|
-
self.data_dir = FolderSelect(settings_frm, "DATA DIRECTORY:", lblwidth=45, initialdir=self.outlier_corrected_dir, lbl_icon='folder')
|
|
38
|
-
self.videos_dir = FolderSelect(settings_frm, "VIDEO DIRECTORY:", lblwidth=45, initialdir=self.video_dir, lbl_icon='folder')
|
|
39
|
-
self.save_dir = FolderSelect(settings_frm, "SAVE DIRECTORY:", lblwidth=45, initialdir=self.
|
|
34
|
+
self.data_dir = FolderSelect(settings_frm, "DATA DIRECTORY:", lblwidth=45, initialdir=self.outlier_corrected_dir, lbl_icon='folder', tooltip_key='EGOCENTRIC_DATA_DIR')
|
|
35
|
+
self.videos_dir = FolderSelect(settings_frm, "VIDEO DIRECTORY:", lblwidth=45, initialdir=self.video_dir, lbl_icon='folder', tooltip_key='EGOCENTRIC_VIDEO_DIR')
|
|
36
|
+
self.save_dir = FolderSelect(settings_frm, "SAVE DIRECTORY:", lblwidth=45, initialdir=self.project_path, lbl_icon='folder', tooltip_key='SAVE_DIR')
|
|
37
|
+
default_center = find_closest_string(target='center', string_list=self.body_parts_lst)[0]
|
|
38
|
+
default_direction = find_closest_string(target='nose', string_list=self.body_parts_lst)[0]
|
|
40
39
|
|
|
41
|
-
self.center_anchor_dropdown = SimBADropDown(parent=settings_frm, label="CENTER ANCHOR:", dropdown_options=self.body_parts_lst, label_width=45, dropdown_width=45, img='center', value=
|
|
42
|
-
self.direction_anchor_dropdown = SimBADropDown(parent=settings_frm, label="DIRECTION ANCHOR:", dropdown_options=self.body_parts_lst, label_width=45, dropdown_width=45, img='direction', value=
|
|
43
|
-
self.direction_dropdown = SimBADropDown(parent=settings_frm, label="DIRECTION:", dropdown_options=list(range(0, 361)), label_width=45, dropdown_width=45, img='direction_2', value=0)
|
|
44
|
-
self.fill_clr_dropdown = SimBADropDown(parent=settings_frm, label="ROTATION COLOR:", dropdown_options=list(self.clr_dict.keys()), label_width=45, dropdown_width=45, img='fill', value='Black')
|
|
45
|
-
self.core_cnt_dropdown = SimBADropDown(parent=settings_frm, label="CPU COUNT:", dropdown_options=list(range(1, self.cpu_cnt + 1)), label_width=45, dropdown_width=45, img='cpu_small', value=self.cpu_cnt)
|
|
46
|
-
self.gpu_dropdown = SimBADropDown(parent=settings_frm, label='USE GPU:', dropdown_options=['TRUE', 'FALSE'], label_width=45, dropdown_width=45, img='gpu_3', value='FALSE', state=gpu_status)
|
|
40
|
+
self.center_anchor_dropdown = SimBADropDown(parent=settings_frm, label="CENTER ANCHOR:", dropdown_options=self.body_parts_lst, label_width=45, dropdown_width=45, img='center', value=default_center, tooltip_key='EGOCENTRIC_ANCHOR')
|
|
41
|
+
self.direction_anchor_dropdown = SimBADropDown(parent=settings_frm, label="DIRECTION ANCHOR:", dropdown_options=self.body_parts_lst, label_width=45, dropdown_width=45, img='direction', value=default_direction, tooltip_key='EGOCENTRIC_DIRECTION_ANCHOR')
|
|
42
|
+
self.direction_dropdown = SimBADropDown(parent=settings_frm, label="DIRECTION:", dropdown_options=list(range(0, 361)), label_width=45, dropdown_width=45, img='direction_2', value=0, tooltip_key='EGOCENTRIC_DIRECTION')
|
|
43
|
+
self.fill_clr_dropdown = SimBADropDown(parent=settings_frm, label="ROTATION COLOR:", dropdown_options=list(self.clr_dict.keys()), label_width=45, dropdown_width=45, img='fill', value='Black', tooltip_key='ROTATE_FILL_COLOR')
|
|
44
|
+
self.core_cnt_dropdown = SimBADropDown(parent=settings_frm, label="CPU COUNT:", dropdown_options=list(range(1, self.cpu_cnt + 1)), label_width=45, dropdown_width=45, img='cpu_small', value=int(self.cpu_cnt/2), tooltip_key='CORE_COUNT')
|
|
45
|
+
self.gpu_dropdown = SimBADropDown(parent=settings_frm, label='USE GPU:', dropdown_options=['TRUE', 'FALSE'], label_width=45, dropdown_width=45, img='gpu_3', value='FALSE', state=gpu_status, tooltip_key='USE_GPU')
|
|
47
46
|
|
|
48
47
|
settings_frm.grid(row=0, column=0, sticky=NW)
|
|
49
48
|
self.data_dir.grid(row=0, column=0, sticky=NW)
|
|
@@ -62,9 +61,10 @@ class EgocentricAlignPopUp(ConfigReader, PopUpMixin):
|
|
|
62
61
|
data_dir, video_dir = self.data_dir.folder_path, self.videos_dir.folder_path
|
|
63
62
|
save_dir = self.save_dir.folder_path
|
|
64
63
|
gpu = str_2_bool(self.gpu_dropdown.get_value())
|
|
65
|
-
check_if_dir_exists(in_dir=data_dir)
|
|
66
|
-
check_if_dir_exists(in_dir=video_dir)
|
|
64
|
+
check_if_dir_exists(in_dir=data_dir)
|
|
65
|
+
check_if_dir_exists(in_dir=video_dir)
|
|
67
66
|
check_if_dir_exists(in_dir=save_dir)
|
|
67
|
+
core_cnt = int(self.core_cnt_dropdown.get_value())
|
|
68
68
|
if (save_dir == data_dir) or (save_dir == video_dir):
|
|
69
69
|
raise InvalidInputError(msg='The save directory cannot be the same as the data/video directories',source=self.__class__.__name__)
|
|
70
70
|
center_anchor, direction_anchor = self.center_anchor_dropdown.getChoices(), self.direction_anchor_dropdown.getChoices()
|
|
@@ -72,12 +72,10 @@ class EgocentricAlignPopUp(ConfigReader, PopUpMixin):
|
|
|
72
72
|
direction = int(self.direction_dropdown.getChoices())
|
|
73
73
|
if gpu and not check_nvidea_gpu_available():
|
|
74
74
|
raise SimBAGPUError(msg='No NVIDEA GPU detected.', source=self.__class__.__name__)
|
|
75
|
-
data_file_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=[f'.{self.file_type}'],
|
|
76
|
-
raise_error=True)
|
|
75
|
+
data_file_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=[f'.{self.file_type}'], raise_error=True)
|
|
77
76
|
data_file_paths = [os.path.join(data_dir, x) for x in data_file_paths]
|
|
78
77
|
data_file_names = [get_fn_ext(filepath=x)[1] for x in data_file_paths]
|
|
79
|
-
video_file_paths = list(
|
|
80
|
-
find_all_videos_in_directory(directory=video_dir, as_dict=True, raise_error=True).values())
|
|
78
|
+
video_file_paths = list( find_all_videos_in_directory(directory=video_dir, as_dict=True, raise_error=True).values())
|
|
81
79
|
video_file_names = [get_fn_ext(filepath=x)[1] for x in video_file_paths]
|
|
82
80
|
missing_video_files = [x for x in video_file_names if x not in data_file_names]
|
|
83
81
|
if len(missing_video_files) > 0: raise NoDataError(
|
|
@@ -89,6 +87,7 @@ class EgocentricAlignPopUp(ConfigReader, PopUpMixin):
|
|
|
89
87
|
anchor_2=direction_anchor,
|
|
90
88
|
direction=direction,
|
|
91
89
|
anchor_location=None,
|
|
90
|
+
core_cnt=core_cnt,
|
|
92
91
|
fill_clr=self.clr_dict[fill_clr],
|
|
93
92
|
verbose=True,
|
|
94
93
|
gpu=gpu,
|
|
@@ -97,4 +96,4 @@ class EgocentricAlignPopUp(ConfigReader, PopUpMixin):
|
|
|
97
96
|
aligner.run()
|
|
98
97
|
|
|
99
98
|
|
|
100
|
-
#_ = EgocentricAlignPopUp(config_path=r"
|
|
99
|
+
#_ = EgocentricAlignPopUp(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini")
|
|
@@ -35,8 +35,6 @@ class InterpolatePopUp(PopUpMixin, ConfigReader):
|
|
|
35
35
|
self.config_path = config_path
|
|
36
36
|
|
|
37
37
|
self.settings_frm= CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name='settings')
|
|
38
|
-
|
|
39
|
-
#self.settings_frm = LabelFrame(self.main_frm, text="SETTINGS", font=Formats.FONT_HEADER.value)
|
|
40
38
|
instruction_lbl_1 = SimBALabel(parent=self.settings_frm, txt=INSTRUCTIONS_LBL_1, font=Formats.FONT_REGULAR_ITALICS.value, justify='center')
|
|
41
39
|
self.type_dropdown = SimBADropDown(parent=self.settings_frm, dropdown_options=['MISSING BODY-PARTS', 'MISSING ANIMALS'], label="INTERPOLATION TYPE:", label_width=35, value='MISSING BODY-PARTS', dropdown_width=35, img='file_type')
|
|
42
40
|
self.method_dropdown = SimBADropDown(parent=self.settings_frm, dropdown_options=['NEAREST', 'LINEAR', 'QUADRATIC'], label="INTERPOLATION METHOD:", label_width=35, value='NEAREST', dropdown_width=35, img='equation_small')
|
|
@@ -50,7 +48,7 @@ class InterpolatePopUp(PopUpMixin, ConfigReader):
|
|
|
50
48
|
|
|
51
49
|
self.single_file_frm = LabelFrame(self.main_frm, text="INTERPOLATE SINGLE DATA FILE", font=Formats.FONT_HEADER.value)
|
|
52
50
|
instruction_lbl_single = SimBALabel(parent=self.single_file_frm, txt=INSTRUCTIONS_LBL_2, font=Formats.FONT_REGULAR_ITALICS.value, justify='center')
|
|
53
|
-
self.selected_file = FileSelect(self.single_file_frm, "DATA PATH:", lblwidth=35, file_types=[("VIDEO FILE", ".csv .parquet")], initialdir=self.project_path)
|
|
51
|
+
self.selected_file = FileSelect(self.single_file_frm, "DATA PATH:", lblwidth=35, file_types=[("VIDEO FILE", ".csv .parquet")], initialdir=self.project_path, lbl_icon='file')
|
|
54
52
|
self.run_btn_single = SimbaButton(parent=self.single_file_frm, txt="RUN SINGLE DATA FILE INTERPOLATION", img='rocket', txt_clr="blue", font=Formats.FONT_REGULAR.value, cmd=self.run, cmd_kwargs={'multiple': False})
|
|
55
53
|
|
|
56
54
|
self.single_file_frm.grid(row=1, column=0, sticky=NW)
|
|
@@ -60,7 +58,7 @@ class InterpolatePopUp(PopUpMixin, ConfigReader):
|
|
|
60
58
|
|
|
61
59
|
self.multiple_file_frm = LabelFrame(self.main_frm, text="INTERPOLATE DIRECTORY OF DATA", font=Formats.FONT_HEADER.value)
|
|
62
60
|
instruction_lbl_multiple = SimBALabel(parent=self.multiple_file_frm, txt=INSTRUCTIONS_LBL_3, font=Formats.FONT_REGULAR_ITALICS.value, justify='center')
|
|
63
|
-
self.selected_dir = FolderSelect(self.multiple_file_frm, "SELECT DIRECTORY OF DATA FILES:", lblwidth=35, initialdir=self.project_path)
|
|
61
|
+
self.selected_dir = FolderSelect(self.multiple_file_frm, "SELECT DIRECTORY OF DATA FILES:", lblwidth=35, initialdir=self.project_path, lbl_icon='folder')
|
|
64
62
|
self.run_btn_multiple = SimbaButton(parent=self.multiple_file_frm, txt="RUN DATA DIRECTORY INTERPOLATION", img='rocket', txt_clr="blue", font=Formats.FONT_REGULAR.value, cmd=self.run, cmd_kwargs={'multiple': True})
|
|
65
63
|
self.multiple_file_frm.grid(row=2, column=0, sticky=NW)
|
|
66
64
|
instruction_lbl_multiple.grid(row=0, column=0, sticky=NW)
|
|
@@ -19,17 +19,16 @@ class MultipleVideos2FramesPopUp(PopUpMixin):
|
|
|
19
19
|
PopUpMixin.__init__(self, title="EXTRACT FRAMES FROM SINGLE VIDEO", icon='frames')
|
|
20
20
|
core_cnt = find_core_cnt()[0]
|
|
21
21
|
settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header='SETTINGS', icon_name='settings', padx=5, pady=5, relief='solid')
|
|
22
|
-
|
|
23
|
-
self.
|
|
24
|
-
self.
|
|
25
|
-
self.
|
|
26
|
-
self.
|
|
27
|
-
self.
|
|
28
|
-
self.
|
|
29
|
-
self.
|
|
30
|
-
self.
|
|
31
|
-
self.
|
|
32
|
-
self.include_fn_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=['TRUE', 'FALSE'], label='INCLUDE VIDEO NAME IN IMAGE NAMES: ',label_width=50, value='FALSE')
|
|
22
|
+
self.video_dir = FolderSelect(parent=settings_frm, folderDescription="VIDEO DIRECTORY:", lblwidth=50, lbl_icon='folder')
|
|
23
|
+
self.save_dir = FolderSelect(parent=settings_frm, folderDescription="SAVE DIRECTORY:", lblwidth=50, lbl_icon='folder')
|
|
24
|
+
self.core_cnt_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=list(range(1, core_cnt+1)), label='CORE COUNT: ', label_width=50, value=int(core_cnt/2), img='cpu_small')
|
|
25
|
+
self.quality_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=list(range(1, 101)), label='IMAGE QUALITY: ', label_width=50, value=90, state='disabled', img='pct_2')
|
|
26
|
+
self.img_format = SimBADropDown(parent=settings_frm, dropdown_options=['jpeg', 'png', 'webp'], label='IMAGE FORMAT: ', label_width=50, value='png', command=self._inactivate_quality, img='file_type')
|
|
27
|
+
self.verbose_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=['TRUE', 'FALSE'], label='VERBOSE: ', label_width=50, value='TRUE', img='verbose')
|
|
28
|
+
self.greyscale_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=['TRUE', 'FALSE'], label='GREYSCALE: ', label_width=50, value='FALSE', img='grey')
|
|
29
|
+
self.bw_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=['TRUE', 'FALSE'], label='BLACK & WHITE: ',label_width=50, value='FALSE', img='black_and_white')
|
|
30
|
+
self.clahe_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=['TRUE', 'FALSE'], label='CLAHE: ', label_width=50, value='FALSE', img='clahe')
|
|
31
|
+
self.include_fn_dropdown = SimBADropDown(parent=settings_frm, dropdown_options=['TRUE', 'FALSE'], label='INCLUDE VIDEO NAME IN IMAGE NAMES: ',label_width=50, value='FALSE', img='id_card_2')
|
|
33
32
|
|
|
34
33
|
settings_frm.grid(row=0, column=0, sticky=NW, padx=10, pady=10)
|
|
35
34
|
self.video_dir.grid(row=0, column=0, sticky=NW)
|