simba-uw-tf-dev 4.5.8__py3-none-any.whl → 4.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simba/SimBA.py +2 -2
- simba/assets/.recent_projects.txt +1 -0
- simba/assets/icons/frames_2.png +0 -0
- simba/assets/lookups/tooptips.json +15 -1
- simba/data_processors/agg_clf_counter_mp.py +52 -53
- simba/data_processors/blob_location_computer.py +1 -1
- simba/data_processors/circling_detector.py +30 -13
- simba/data_processors/cuda/geometry.py +45 -27
- simba/data_processors/cuda/image.py +1648 -1598
- simba/data_processors/cuda/statistics.py +72 -26
- simba/data_processors/cuda/timeseries.py +1 -1
- simba/data_processors/cue_light_analyzer.py +5 -9
- simba/data_processors/egocentric_aligner.py +25 -7
- simba/data_processors/freezing_detector.py +55 -47
- simba/data_processors/kleinberg_calculator.py +61 -29
- simba/feature_extractors/feature_subsets.py +14 -7
- simba/feature_extractors/mitra_feature_extractor.py +2 -2
- simba/feature_extractors/straub_tail_analyzer.py +4 -6
- simba/labelling/standard_labeller.py +1 -1
- simba/mixins/config_reader.py +5 -2
- simba/mixins/geometry_mixin.py +22 -36
- simba/mixins/image_mixin.py +24 -28
- simba/mixins/plotting_mixin.py +28 -10
- simba/mixins/statistics_mixin.py +48 -11
- simba/mixins/timeseries_features_mixin.py +1 -1
- simba/mixins/train_model_mixin.py +67 -29
- simba/model/inference_batch.py +1 -1
- simba/model/yolo_seg_inference.py +3 -3
- simba/outlier_tools/skip_outlier_correction.py +1 -1
- simba/plotting/ROI_feature_visualizer_mp.py +3 -5
- simba/plotting/clf_validator_mp.py +4 -5
- simba/plotting/cue_light_visualizer.py +6 -7
- simba/plotting/directing_animals_visualizer_mp.py +2 -3
- simba/plotting/distance_plotter_mp.py +378 -378
- simba/plotting/frame_mergerer_ffmpeg.py +137 -196
- simba/plotting/gantt_creator.py +29 -10
- simba/plotting/gantt_creator_mp.py +96 -33
- simba/plotting/geometry_plotter.py +270 -272
- simba/plotting/heat_mapper_clf_mp.py +4 -6
- simba/plotting/heat_mapper_location_mp.py +2 -2
- simba/plotting/light_dark_box_plotter.py +2 -2
- simba/plotting/path_plotter_mp.py +26 -29
- simba/plotting/plot_clf_results_mp.py +455 -454
- simba/plotting/pose_plotter_mp.py +28 -29
- simba/plotting/probability_plot_creator_mp.py +288 -288
- simba/plotting/roi_plotter_mp.py +31 -31
- simba/plotting/single_run_model_validation_video_mp.py +427 -427
- simba/plotting/spontaneous_alternation_plotter.py +2 -3
- simba/plotting/yolo_pose_track_visualizer.py +32 -27
- simba/plotting/yolo_pose_visualizer.py +35 -36
- simba/plotting/yolo_seg_visualizer.py +2 -3
- simba/pose_importers/simba_blob_importer.py +3 -3
- simba/roi_tools/roi_aggregate_stats_mp.py +5 -4
- simba/roi_tools/roi_clf_calculator_mp.py +4 -4
- simba/sandbox/analyze_runtimes.py +30 -0
- simba/sandbox/cuda/egocentric_rotator.py +374 -0
- simba/sandbox/get_cpu_pool.py +5 -0
- simba/sandbox/proboscis_to_tip.py +28 -0
- simba/sandbox/test_directionality.py +47 -0
- simba/sandbox/test_nonstatic_directionality.py +27 -0
- simba/sandbox/test_pycharm_cuda.py +51 -0
- simba/sandbox/test_simba_install.py +41 -0
- simba/sandbox/test_static_directionality.py +26 -0
- simba/sandbox/test_static_directionality_2d.py +26 -0
- simba/sandbox/verify_env.py +42 -0
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo.py +3 -3
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo_bbox.py +2 -2
- simba/ui/pop_ups/clf_add_remove_print_pop_up.py +37 -30
- simba/ui/pop_ups/clf_plot_pop_up.py +2 -2
- simba/ui/pop_ups/egocentric_alignment_pop_up.py +20 -21
- simba/ui/pop_ups/fsttc_pop_up.py +27 -25
- simba/ui/pop_ups/gantt_pop_up.py +31 -6
- simba/ui/pop_ups/interpolate_pop_up.py +2 -4
- simba/ui/pop_ups/kleinberg_pop_up.py +39 -40
- simba/ui/pop_ups/multiple_videos_to_frames_popup.py +10 -11
- simba/ui/pop_ups/single_video_to_frames_popup.py +10 -10
- simba/ui/pop_ups/video_processing_pop_up.py +186 -174
- simba/ui/tkinter_functions.py +10 -1
- simba/utils/custom_feature_extractor.py +1 -1
- simba/utils/data.py +90 -14
- simba/utils/enums.py +1 -0
- simba/utils/errors.py +441 -440
- simba/utils/lookups.py +1203 -1203
- simba/utils/printing.py +124 -124
- simba/utils/read_write.py +3769 -3721
- simba/utils/yolo.py +10 -1
- simba/video_processors/blob_tracking_executor.py +2 -2
- simba/video_processors/clahe_ui.py +66 -23
- simba/video_processors/egocentric_video_rotator.py +46 -44
- simba/video_processors/multi_cropper.py +1 -1
- simba/video_processors/video_processing.py +5264 -5300
- simba/video_processors/videos_to_frames.py +43 -32
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/METADATA +4 -3
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/RECORD +98 -86
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import multiprocessing
|
|
3
|
+
import os
|
|
4
|
+
from typing import Optional, Tuple, Union
|
|
5
|
+
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import cupy as cp
|
|
11
|
+
from cupyx.scipy.ndimage import affine_transform
|
|
12
|
+
CUPY_AVAILABLE = True
|
|
13
|
+
except ImportError:
|
|
14
|
+
import numpy as cp
|
|
15
|
+
from scipy.ndimage import affine_transform
|
|
16
|
+
CUPY_AVAILABLE = False
|
|
17
|
+
|
|
18
|
+
from simba.utils.checks import (check_file_exist_and_readable,
|
|
19
|
+
check_if_dir_exists, check_if_valid_rgb_tuple,
|
|
20
|
+
check_int, check_valid_array,
|
|
21
|
+
check_valid_boolean, check_valid_tuple)
|
|
22
|
+
from simba.utils.data import (align_target_warpaffine_vectors,
|
|
23
|
+
center_rotation_warpaffine_vectors,
|
|
24
|
+
egocentrically_align_pose)
|
|
25
|
+
from simba.utils.enums import Defaults, Formats
|
|
26
|
+
from simba.utils.printing import SimbaTimer, stdout_success
|
|
27
|
+
from simba.utils.read_write import (concatenate_videos_in_folder,
|
|
28
|
+
create_directory, find_core_cnt,
|
|
29
|
+
get_fn_ext, get_video_meta_data, read_df,
|
|
30
|
+
read_frm_of_video,
|
|
31
|
+
read_img_batch_from_video_gpu,
|
|
32
|
+
remove_a_folder,
|
|
33
|
+
_read_img_batch_from_video_helper)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def egocentric_video_aligner(frm_range: np.ndarray,
|
|
37
|
+
video_path: Union[str, os.PathLike],
|
|
38
|
+
temp_dir: Union[str, os.PathLike],
|
|
39
|
+
video_name: str,
|
|
40
|
+
centers: np.ndarray,
|
|
41
|
+
rotation_vectors: np.ndarray,
|
|
42
|
+
target: Tuple[int, int],
|
|
43
|
+
fill_clr: Tuple[int, int, int] = (255, 255, 255),
|
|
44
|
+
verbose: bool = False,
|
|
45
|
+
gpu: bool = True):
|
|
46
|
+
|
|
47
|
+
video_meta = get_video_meta_data(video_path=video_path)
|
|
48
|
+
|
|
49
|
+
batch, frm_range = frm_range[0], frm_range[1]
|
|
50
|
+
save_path = os.path.join(temp_dir, f'{batch}.mp4')
|
|
51
|
+
fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}')
|
|
52
|
+
writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (video_meta['width'], video_meta['height']))
|
|
53
|
+
batch_rotation_vectors = rotation_vectors[frm_range[0]: frm_range[-1]+1]
|
|
54
|
+
batch_centers = centers[frm_range[0]: frm_range[-1]+1]
|
|
55
|
+
m_rotates = center_rotation_warpaffine_vectors(rotation_vectors=batch_rotation_vectors, centers=batch_centers)
|
|
56
|
+
m_translations = align_target_warpaffine_vectors(centers=batch_centers, target=np.array(target))
|
|
57
|
+
|
|
58
|
+
if gpu:
|
|
59
|
+
# Combine rotation and translation matrices into single transform
|
|
60
|
+
# This reduces two sequential operations to one
|
|
61
|
+
batch_size = len(frm_range)
|
|
62
|
+
m_combined = np.zeros((batch_size, 2, 3), dtype=np.float32)
|
|
63
|
+
|
|
64
|
+
for i in range(batch_size):
|
|
65
|
+
# Convert rotation matrix (2x3) to 3x3 homogeneous
|
|
66
|
+
m_rot_3x3 = np.eye(3, dtype=np.float32)
|
|
67
|
+
m_rot_3x3[:2, :] = m_rotates[i].astype(np.float32)
|
|
68
|
+
|
|
69
|
+
# Convert translation matrix (2x3) to 3x3 homogeneous
|
|
70
|
+
m_trans_3x3 = np.eye(3, dtype=np.float32)
|
|
71
|
+
m_trans_3x3[:2, :] = m_translations[i].astype(np.float32)
|
|
72
|
+
|
|
73
|
+
# Combine: translation after rotation (matches sequential cv2.warpAffine order)
|
|
74
|
+
m_combined_3x3 = m_trans_3x3 @ m_rot_3x3
|
|
75
|
+
|
|
76
|
+
# Convert back to 2x3 for warpAffine compatibility
|
|
77
|
+
m_combined[i] = m_combined_3x3[:2, :]
|
|
78
|
+
|
|
79
|
+
# Process frames in batches using GPU reading
|
|
80
|
+
# Use same batch size as original (30) for optimal I/O overlap
|
|
81
|
+
# Main optimization: combined matrix (one warpAffine instead of two)
|
|
82
|
+
img_counter = 0
|
|
83
|
+
frm_batches = np.array_split(frm_range, (len(frm_range) + 30 - 1) // 30)
|
|
84
|
+
for frm_batch_cnt, frm_ids in enumerate(frm_batches):
|
|
85
|
+
frms = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_ids[0], end_frm=frm_ids[-1], verbose=False)
|
|
86
|
+
frms = np.stack(list(frms.values()), axis=0)
|
|
87
|
+
for img_cnt, img in enumerate(frms):
|
|
88
|
+
# Use combined matrix for single warpAffine (faster than two separate calls)
|
|
89
|
+
m = m_combined[img_counter].astype(np.float32)
|
|
90
|
+
final_frame = cv2.warpAffine(img, m, (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
91
|
+
writer.write(final_frame)
|
|
92
|
+
if verbose:
|
|
93
|
+
frame_id = frm_ids[img_cnt]
|
|
94
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
95
|
+
img_counter += 1
|
|
96
|
+
|
|
97
|
+
# Legacy CuPy code (commented out - CPU is faster for this use case)
|
|
98
|
+
if False and CUPY_AVAILABLE:
|
|
99
|
+
# Pre-compute all inverse matrices upfront (much faster than per-frame)
|
|
100
|
+
# For CuPy affine_transform, we need inverse matrices
|
|
101
|
+
m_inv_matrices = []
|
|
102
|
+
m_offsets = []
|
|
103
|
+
for i in range(batch_size):
|
|
104
|
+
m = m_combined[i]
|
|
105
|
+
matrix_2x2 = m[:2, :2].astype(np.float32)
|
|
106
|
+
offset = m[:2, 2].astype(np.float32)
|
|
107
|
+
m_inv_matrices.append(cp.asarray(matrix_2x2))
|
|
108
|
+
m_offsets.append(cp.asarray(offset))
|
|
109
|
+
# Batch invert all matrices at once
|
|
110
|
+
m_inv_matrices_gpu = cp.stack(m_inv_matrices)
|
|
111
|
+
m_inv_matrices_gpu = cp.linalg.inv(m_inv_matrices_gpu)
|
|
112
|
+
m_offsets_gpu = cp.stack(m_offsets)
|
|
113
|
+
|
|
114
|
+
# Create async reader for GPU
|
|
115
|
+
async_reader = AsyncVideoFrameReader(
|
|
116
|
+
video_path=video_path,
|
|
117
|
+
batch_size=batch_size_gpu,
|
|
118
|
+
max_que_size=3,
|
|
119
|
+
start_idx=frm_range[0],
|
|
120
|
+
end_idx=frm_range[-1] + 1,
|
|
121
|
+
gpu=True, # Use GPU reading
|
|
122
|
+
verbose=False
|
|
123
|
+
)
|
|
124
|
+
async_reader.start()
|
|
125
|
+
|
|
126
|
+
# Process batches as they become available from async reader
|
|
127
|
+
# Batch process and transfer to minimize GPU->CPU overhead
|
|
128
|
+
processed_frames_batch = []
|
|
129
|
+
frame_ids_batch = []
|
|
130
|
+
|
|
131
|
+
while True:
|
|
132
|
+
batch_result = get_async_frame_batch(batch_reader=async_reader, timeout=10)
|
|
133
|
+
if batch_result is None:
|
|
134
|
+
# Write any remaining frames
|
|
135
|
+
if processed_frames_batch:
|
|
136
|
+
for frame in processed_frames_batch:
|
|
137
|
+
writer.write(frame)
|
|
138
|
+
break
|
|
139
|
+
|
|
140
|
+
start_idx, end_idx, frms = batch_result
|
|
141
|
+
batch_len = end_idx - start_idx + 1
|
|
142
|
+
frms_gpu = cp.asarray(frms)
|
|
143
|
+
|
|
144
|
+
# Process all frames in batch on GPU first
|
|
145
|
+
batch_transformed = []
|
|
146
|
+
batch_frame_indices = []
|
|
147
|
+
|
|
148
|
+
for i in range(batch_len):
|
|
149
|
+
# Map frame index from video to frm_range index
|
|
150
|
+
frame_id = start_idx + i
|
|
151
|
+
frame_idx_in_range = np.where(frm_range == frame_id)[0]
|
|
152
|
+
if len(frame_idx_in_range) == 0:
|
|
153
|
+
continue
|
|
154
|
+
frame_idx_in_range = frame_idx_in_range[0]
|
|
155
|
+
batch_frame_indices.append((i, frame_idx_in_range))
|
|
156
|
+
|
|
157
|
+
# Process all frames in this batch on GPU
|
|
158
|
+
for i, frame_idx_in_range in batch_frame_indices:
|
|
159
|
+
img_gpu = frms_gpu[i]
|
|
160
|
+
matrix_inv = m_inv_matrices_gpu[frame_idx_in_range]
|
|
161
|
+
offset = m_offsets_gpu[frame_idx_in_range]
|
|
162
|
+
|
|
163
|
+
if len(img_gpu.shape) == 3: # Multi-channel
|
|
164
|
+
transformed_channels = []
|
|
165
|
+
for c in range(img_gpu.shape[2]):
|
|
166
|
+
transformed_ch = affine_transform(
|
|
167
|
+
img_gpu[:, :, c],
|
|
168
|
+
matrix=matrix_inv,
|
|
169
|
+
offset=offset,
|
|
170
|
+
output_shape=(video_meta['height'], video_meta['width']),
|
|
171
|
+
order=1,
|
|
172
|
+
mode='constant',
|
|
173
|
+
cval=fill_clr[c] if c < len(fill_clr) else fill_clr[0],
|
|
174
|
+
prefilter=False
|
|
175
|
+
)
|
|
176
|
+
transformed_channels.append(transformed_ch)
|
|
177
|
+
transformed = cp.stack(transformed_channels, axis=2)
|
|
178
|
+
else: # Single channel
|
|
179
|
+
transformed = affine_transform(
|
|
180
|
+
img_gpu,
|
|
181
|
+
matrix=matrix_inv,
|
|
182
|
+
offset=offset,
|
|
183
|
+
output_shape=(video_meta['height'], video_meta['width']),
|
|
184
|
+
order=1,
|
|
185
|
+
mode='constant',
|
|
186
|
+
cval=fill_clr[0] if len(fill_clr) > 0 else 0,
|
|
187
|
+
prefilter=False
|
|
188
|
+
)
|
|
189
|
+
batch_transformed.append(transformed)
|
|
190
|
+
|
|
191
|
+
# Batch transfer all frames from GPU to CPU at once
|
|
192
|
+
if batch_transformed:
|
|
193
|
+
# Stack all transformed frames and transfer in one go
|
|
194
|
+
batch_transformed_stack = cp.stack(batch_transformed)
|
|
195
|
+
batch_cpu = cp.asnumpy(batch_transformed_stack).astype(np.uint8)
|
|
196
|
+
|
|
197
|
+
# Write all frames from this batch
|
|
198
|
+
for frame_idx, (i, frame_idx_in_range) in enumerate(batch_frame_indices):
|
|
199
|
+
final_frame = batch_cpu[frame_idx]
|
|
200
|
+
writer.write(final_frame)
|
|
201
|
+
|
|
202
|
+
if verbose:
|
|
203
|
+
frame_id = start_idx + i
|
|
204
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
205
|
+
|
|
206
|
+
async_reader.kill()
|
|
207
|
+
|
|
208
|
+
else:
|
|
209
|
+
# Fallback to CPU with combined matrix and batch reading
|
|
210
|
+
# Process frames in batches
|
|
211
|
+
# Use helper function directly to avoid nested multiprocessing (we're already in a worker process)
|
|
212
|
+
# Larger batch size reduces overhead
|
|
213
|
+
batch_size_gpu = 500
|
|
214
|
+
frm_batches = np.array_split(frm_range, (len(frm_range) + batch_size_gpu - 1) // batch_size_gpu)
|
|
215
|
+
|
|
216
|
+
# Create a mapping from frame_id to index in frm_range for fast lookup
|
|
217
|
+
frm_id_to_idx = {frame_id: idx for idx, frame_id in enumerate(frm_range)}
|
|
218
|
+
|
|
219
|
+
for frm_batch_cnt, frm_ids in enumerate(frm_batches):
|
|
220
|
+
# Read batch of frames directly using helper (no multiprocessing)
|
|
221
|
+
frm_idx_array = np.array(frm_ids)
|
|
222
|
+
frms_dict = _read_img_batch_from_video_helper(
|
|
223
|
+
frm_idx=frm_idx_array,
|
|
224
|
+
video_path=video_path,
|
|
225
|
+
greyscale=False,
|
|
226
|
+
verbose=False,
|
|
227
|
+
black_and_white=False,
|
|
228
|
+
clahe=False
|
|
229
|
+
)
|
|
230
|
+
frms = np.stack([frms_dict[f] for f in frm_ids], axis=0)
|
|
231
|
+
|
|
232
|
+
# Process all frames in batch using optimized CPU cv2.warpAffine with combined matrices
|
|
233
|
+
for i, frame_id in enumerate(frm_ids):
|
|
234
|
+
# Fast dictionary lookup instead of np.where
|
|
235
|
+
frame_idx_in_range = frm_id_to_idx.get(frame_id)
|
|
236
|
+
if frame_idx_in_range is None:
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
img = frms[i]
|
|
240
|
+
m = m_combined[frame_idx_in_range].astype(np.float32)
|
|
241
|
+
final_frame = cv2.warpAffine(img, m, (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
242
|
+
writer.write(final_frame)
|
|
243
|
+
|
|
244
|
+
if verbose:
|
|
245
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
246
|
+
else:
|
|
247
|
+
cap = cv2.VideoCapture(video_path)
|
|
248
|
+
for frm_idx, frm_id in enumerate(frm_range):
|
|
249
|
+
img = read_frm_of_video(video_path=cap, frame_index=frm_id)
|
|
250
|
+
rotated_frame = cv2.warpAffine(img, m_rotates[frm_idx], (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
251
|
+
final_frame = cv2.warpAffine(rotated_frame, m_translations[frm_idx], (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
252
|
+
writer.write(final_frame)
|
|
253
|
+
if verbose:
|
|
254
|
+
print(f'Creating frame {frm_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
255
|
+
writer.release()
|
|
256
|
+
return batch + 1
|
|
257
|
+
|
|
258
|
+
class EgocentricVideoRotator():
|
|
259
|
+
"""
|
|
260
|
+
Perform egocentric rotation of a video using CPU multiprocessing.
|
|
261
|
+
|
|
262
|
+
.. video:: _static/img/EgocentricalAligner_2.webm
|
|
263
|
+
:width: 800
|
|
264
|
+
:autoplay:
|
|
265
|
+
:loop:
|
|
266
|
+
|
|
267
|
+
.. seealso::
|
|
268
|
+
To perform joint egocentric alignment of both pose and video, or pose only, use :func:`~simba.data_processors.egocentric_aligner.EgocentricalAligner`.
|
|
269
|
+
To produce rotation vectors, use :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`.
|
|
270
|
+
|
|
271
|
+
:param Union[str, os.PathLike] video_path: Path to a video file.
|
|
272
|
+
:param np.ndarray centers: A 2D array of shape `(num_frames, 2)` containing the original locations of `anchor_1_idx` in each frame before alignment. Returned by :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`.
|
|
273
|
+
:param np.ndarray rotation_vectors: A 3D array of shape `(num_frames, 2, 2)` containing the rotation matrices applied to each frame. Returned by :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`.
|
|
274
|
+
:param bool verbose: If True, prints progress. Deafult True.
|
|
275
|
+
:param Tuple[int, int, int] fill_clr: The color of the additional pixels. Deafult black. (0, 0, 0).
|
|
276
|
+
:param int core_cnt: Number of CPU cores to use for video rotation; `-1` uses all available cores.
|
|
277
|
+
:param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix.
|
|
278
|
+
|
|
279
|
+
:example:
|
|
280
|
+
>>> DATA_PATH = "C:\501_MA142_Gi_Saline_0513.csv"
|
|
281
|
+
>>> VIDEO_PATH = "C:\501_MA142_Gi_Saline_0513.mp4"
|
|
282
|
+
>>> SAVE_PATH = "C:\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
283
|
+
>>> ANCHOR_LOC = np.array([250, 250])
|
|
284
|
+
|
|
285
|
+
>>> df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
286
|
+
>>> bp_cols = [x for x in df.columns if not x.endswith('_p')]
|
|
287
|
+
>>> data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
288
|
+
>>> _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
289
|
+
>>> rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=ANCHOR_LOC, save_path=SAVE_PATH)
|
|
290
|
+
>>> rotater.run()
|
|
291
|
+
"""
|
|
292
|
+
|
|
293
|
+
def __init__(self,
|
|
294
|
+
video_path: Union[str, os.PathLike],
|
|
295
|
+
centers: np.ndarray,
|
|
296
|
+
rotation_vectors: np.ndarray,
|
|
297
|
+
anchor_location: Tuple[int, int],
|
|
298
|
+
verbose: bool = True,
|
|
299
|
+
fill_clr: Tuple[int, int, int] = (0, 0, 0),
|
|
300
|
+
core_cnt: int = -1,
|
|
301
|
+
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
302
|
+
gpu: Optional[bool] = True):
|
|
303
|
+
|
|
304
|
+
check_file_exist_and_readable(file_path=video_path)
|
|
305
|
+
self.video_meta_data = get_video_meta_data(video_path=video_path)
|
|
306
|
+
check_valid_array(data=centers, source=f'{self.__class__.__name__} centers', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
307
|
+
check_valid_array(data=rotation_vectors, source=f'{self.__class__.__name__} rotation_vectors', accepted_ndims=(3,), accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
308
|
+
check_valid_tuple(x=anchor_location, source=f'{self.__class__.__name__} anchor_location', accepted_lengths=(2,), valid_dtypes=(int,))
|
|
309
|
+
for i in anchor_location: check_int(name=f'{self.__class__.__name__} anchor_location', value=i, min_value=1)
|
|
310
|
+
check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
|
|
311
|
+
check_if_valid_rgb_tuple(data=fill_clr)
|
|
312
|
+
check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
|
|
313
|
+
if core_cnt > find_core_cnt()[0] or core_cnt == -1:
|
|
314
|
+
self.core_cnt = find_core_cnt()[0]
|
|
315
|
+
else:
|
|
316
|
+
self.core_cnt = core_cnt
|
|
317
|
+
video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
|
|
318
|
+
if save_path is not None:
|
|
319
|
+
self.save_dir = os.path.dirname(save_path)
|
|
320
|
+
check_if_dir_exists(in_dir=self.save_dir, source=f'{self.__class__.__name__} save_path')
|
|
321
|
+
else:
|
|
322
|
+
self.save_dir = video_dir
|
|
323
|
+
save_path = os.path.join(video_dir, f'{self.video_name}_rotated.mp4')
|
|
324
|
+
self.video_path, self.save_path = video_path, save_path
|
|
325
|
+
self.centers, self.rotation_vectors, self.gpu = centers, rotation_vectors, gpu
|
|
326
|
+
self.verbose, self.fill_clr, self.anchor_loc = verbose, fill_clr, anchor_location
|
|
327
|
+
|
|
328
|
+
def run(self):
|
|
329
|
+
video_timer = SimbaTimer(start=True)
|
|
330
|
+
temp_dir = os.path.join(self.save_dir, 'temp')
|
|
331
|
+
if not os.path.isdir(temp_dir):
|
|
332
|
+
create_directory(paths=temp_dir)
|
|
333
|
+
else:
|
|
334
|
+
remove_a_folder(folder_dir=temp_dir)
|
|
335
|
+
create_directory(paths=temp_dir)
|
|
336
|
+
frm_list = np.arange(0, self.video_meta_data['frame_count'])
|
|
337
|
+
frm_list = np.array_split(frm_list, self.core_cnt)
|
|
338
|
+
frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
|
|
339
|
+
if self.verbose:
|
|
340
|
+
print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
|
|
341
|
+
with multiprocessing.Pool(self.core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
|
|
342
|
+
constants = functools.partial(egocentric_video_aligner,
|
|
343
|
+
temp_dir=temp_dir,
|
|
344
|
+
video_name=self.video_name,
|
|
345
|
+
video_path=self.video_path,
|
|
346
|
+
centers=self.centers,
|
|
347
|
+
rotation_vectors=self.rotation_vectors,
|
|
348
|
+
target=self.anchor_loc,
|
|
349
|
+
verbose=self.verbose,
|
|
350
|
+
fill_clr=self.fill_clr,
|
|
351
|
+
gpu=self.gpu)
|
|
352
|
+
for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)):
|
|
353
|
+
if self.verbose:
|
|
354
|
+
print(f"Rotate batch {result}/{self.core_cnt} complete...")
|
|
355
|
+
pool.terminate()
|
|
356
|
+
pool.join()
|
|
357
|
+
|
|
358
|
+
concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
|
|
359
|
+
video_timer.stop_timer()
|
|
360
|
+
stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
|
|
361
|
+
|
|
362
|
+
if __name__ == "__main__":
|
|
363
|
+
DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
|
|
364
|
+
VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
|
|
365
|
+
SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
366
|
+
ANCHOR_LOC = np.array([250, 250])
|
|
367
|
+
|
|
368
|
+
df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
369
|
+
bp_cols = [x for x in df.columns if not x.endswith('_p')]
|
|
370
|
+
data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
371
|
+
|
|
372
|
+
_, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
373
|
+
rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16, gpu=True)
|
|
374
|
+
rotater.run()
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin
|
|
3
|
+
from simba.mixins.config_reader import ConfigReader
|
|
4
|
+
from simba.utils.read_write import read_df, get_fn_ext, read_video_info, write_df
|
|
5
|
+
|
|
6
|
+
CONFIG_PATH = r"C:\troubleshooting\srami0619\project_folder\project_config.ini"
|
|
7
|
+
BASE = 'prob_base'
|
|
8
|
+
TIP = 'prob_tip'
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
config = ConfigReader(config_path=CONFIG_PATH, read_video_info=True, create_logger=False)
|
|
12
|
+
animal_cnt = config.animal_cnt
|
|
13
|
+
for file_cnt, file_path in enumerate(config.outlier_corrected_paths):
|
|
14
|
+
file_name = get_fn_ext(file_path)[1]
|
|
15
|
+
_, px_per_mm, fps = read_video_info(video_name=file_name, video_info_df=config.video_info_df)
|
|
16
|
+
data_df = read_df(file_path=file_path)
|
|
17
|
+
save_path = os.path.join(config.features_dir, f'{file_name}.csv')
|
|
18
|
+
for animal_id in range(1, animal_cnt+1):
|
|
19
|
+
print(f'Analysing file {file_name}, animal {animal_id} ({file_cnt + 1}/{len(config.outlier_corrected_paths)})')
|
|
20
|
+
base_cols = [f'{BASE}_{animal_id}_x', f'{BASE}_{animal_id}_y']
|
|
21
|
+
tip_cols = [f'{TIP}_{animal_id}_x', f'{TIP}_{animal_id}_y']
|
|
22
|
+
prob_data, tip_data = data_df[base_cols], data_df[tip_cols]
|
|
23
|
+
data_df[f'animal_{animal_id}_base_tip_distance'] = FeatureExtractionMixin().keypoint_distances(a=prob_data.values, b=tip_data.values, px_per_mm=px_per_mm, in_centimeters=False)
|
|
24
|
+
write_df(df=data_df, file_type='csv', save_path=save_path, verbose=True)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Test directionality_to_nonstatic_target function"""
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
print(f"Python: {sys.executable}")
|
|
6
|
+
print("="*60)
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from simba.data_processors.cuda.geometry import directionality_to_nonstatic_target
|
|
10
|
+
print("✓ Function imported successfully")
|
|
11
|
+
except Exception as e:
|
|
12
|
+
print(f"✗ Error importing function: {e}")
|
|
13
|
+
import traceback
|
|
14
|
+
traceback.print_exc()
|
|
15
|
+
sys.exit(1)
|
|
16
|
+
|
|
17
|
+
# Create test data
|
|
18
|
+
print("\nCreating test data...")
|
|
19
|
+
left_ear = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
20
|
+
right_ear = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
21
|
+
nose = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
22
|
+
target = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
23
|
+
|
|
24
|
+
print(f"left_ear shape: {left_ear.shape}, dtype: {left_ear.dtype}")
|
|
25
|
+
print(f"right_ear shape: {right_ear.shape}, dtype: {right_ear.dtype}")
|
|
26
|
+
print(f"nose shape: {nose.shape}, dtype: {nose.dtype}")
|
|
27
|
+
print(f"target shape: {target.shape}, dtype: {target.dtype}")
|
|
28
|
+
|
|
29
|
+
# Test the function
|
|
30
|
+
print("\nTesting directionality_to_nonstatic_target...")
|
|
31
|
+
try:
|
|
32
|
+
result = directionality_to_nonstatic_target(
|
|
33
|
+
left_ear=left_ear,
|
|
34
|
+
right_ear=right_ear,
|
|
35
|
+
nose=nose,
|
|
36
|
+
target=target
|
|
37
|
+
)
|
|
38
|
+
print(f"✓ SUCCESS! Result shape: {result.shape}")
|
|
39
|
+
print(f"Result: {result}")
|
|
40
|
+
except Exception as e:
|
|
41
|
+
print(f"✗ Error running function: {e}")
|
|
42
|
+
import traceback
|
|
43
|
+
traceback.print_exc()
|
|
44
|
+
sys.exit(1)
|
|
45
|
+
|
|
46
|
+
print("\n" + "="*60)
|
|
47
|
+
print("Test passed!")
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Test directionality_to_nonstatic_target with int64"""
|
|
2
|
+
import numpy as np
|
|
3
|
+
from simba.data_processors.cuda.geometry import directionality_to_nonstatic_target
|
|
4
|
+
|
|
5
|
+
print("Testing directionality_to_nonstatic_target with int64...")
|
|
6
|
+
|
|
7
|
+
left_ear = np.random.randint(0, 500, (100, 2)).astype(np.int64)
|
|
8
|
+
right_ear = np.random.randint(0, 500, (100, 2)).astype(np.int64)
|
|
9
|
+
nose = np.random.randint(0, 500, (100, 2)).astype(np.int64)
|
|
10
|
+
target = np.random.randint(0, 500, (100, 2)).astype(np.int64)
|
|
11
|
+
|
|
12
|
+
print(f"Arrays shape: {left_ear.shape}, dtype: {left_ear.dtype}")
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
result = directionality_to_nonstatic_target(
|
|
16
|
+
left_ear=left_ear,
|
|
17
|
+
right_ear=right_ear,
|
|
18
|
+
nose=nose,
|
|
19
|
+
target=target,
|
|
20
|
+
verbose=True
|
|
21
|
+
)
|
|
22
|
+
print(f"✓ SUCCESS! Result shape: {result.shape}, dtype: {result.dtype}")
|
|
23
|
+
print(f"First 5 results:\n{result[:5]}")
|
|
24
|
+
except Exception as e:
|
|
25
|
+
print(f"✗ Error: {e}")
|
|
26
|
+
import traceback
|
|
27
|
+
traceback.print_exc()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Test script to verify PyCharm CUDA environment setup"""
|
|
2
|
+
import sys
|
|
3
|
+
print(f"Python version: {sys.version}")
|
|
4
|
+
print(f"Python path: {sys.executable}")
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
from numba import cuda
|
|
8
|
+
print(f"\nNumba CUDA available: {cuda.is_available()}")
|
|
9
|
+
if cuda.is_available():
|
|
10
|
+
print(f"CUDA devices: {len(cuda.gpus)}")
|
|
11
|
+
except Exception as e:
|
|
12
|
+
print(f"\nError importing numba.cuda: {e}")
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
import cupy as cp
|
|
16
|
+
print(f"CuPy version: {cp.__version__}")
|
|
17
|
+
# Test CuPy
|
|
18
|
+
x = cp.array([1, 2, 3, 4, 5])
|
|
19
|
+
print(f"CuPy test array: {x.get()}")
|
|
20
|
+
except Exception as e:
|
|
21
|
+
print(f"Error importing cupy: {e}")
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
import numpy as np
|
|
25
|
+
print(f"NumPy version: {np.__version__}")
|
|
26
|
+
except Exception as e:
|
|
27
|
+
print(f"Error importing numpy: {e}")
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
import cv2
|
|
31
|
+
print(f"OpenCV version: {cv2.__version__}")
|
|
32
|
+
except Exception as e:
|
|
33
|
+
print(f"Error importing cv2: {e}")
|
|
34
|
+
|
|
35
|
+
# Test SimBA CUDA imports
|
|
36
|
+
try:
|
|
37
|
+
from simba.data_processors.cuda.geometry import is_inside_rectangle
|
|
38
|
+
print("\n✓ Successfully imported simba.data_processors.cuda.geometry")
|
|
39
|
+
|
|
40
|
+
# Quick test
|
|
41
|
+
test_points = np.array([[150, 150], [300, 300], [50, 50]], dtype=np.int32)
|
|
42
|
+
test_rect = np.array([[100, 100], [400, 400]], dtype=np.int32)
|
|
43
|
+
result = is_inside_rectangle(x=test_points, y=test_rect)
|
|
44
|
+
print(f"✓ is_inside_rectangle test passed: {result}")
|
|
45
|
+
except Exception as e:
|
|
46
|
+
print(f"\n✗ Error importing SimBA CUDA functions: {e}")
|
|
47
|
+
import traceback
|
|
48
|
+
traceback.print_exc()
|
|
49
|
+
|
|
50
|
+
print("\n" + "="*50)
|
|
51
|
+
print("Environment check complete!")
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Test SimBA installation in conda environment"""
|
|
2
|
+
import sys
|
|
3
|
+
print(f"Python: {sys.executable}")
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
import simba
|
|
7
|
+
print("✓ SimBA imported successfully!")
|
|
8
|
+
except Exception as e:
|
|
9
|
+
print(f"✗ Error importing SimBA: {e}")
|
|
10
|
+
import traceback
|
|
11
|
+
traceback.print_exc()
|
|
12
|
+
sys.exit(1)
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
from simba.data_processors.cuda.geometry import is_inside_rectangle
|
|
16
|
+
print("✓ CUDA geometry functions imported!")
|
|
17
|
+
except Exception as e:
|
|
18
|
+
print(f"✗ Error importing CUDA functions: {e}")
|
|
19
|
+
import traceback
|
|
20
|
+
traceback.print_exc()
|
|
21
|
+
sys.exit(1)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
import cupy as cp
|
|
25
|
+
import numpy as np
|
|
26
|
+
print(f"✓ CuPy {cp.__version__} available")
|
|
27
|
+
|
|
28
|
+
# Quick test
|
|
29
|
+
test_points = np.array([[150, 150], [300, 300], [50, 50]], dtype=np.int32)
|
|
30
|
+
test_rect = np.array([[100, 100], [400, 400]], dtype=np.int32)
|
|
31
|
+
result = is_inside_rectangle(x=test_points, y=test_rect)
|
|
32
|
+
print(f"✓ CUDA function test passed: {result}")
|
|
33
|
+
except Exception as e:
|
|
34
|
+
print(f"✗ Error testing CUDA: {e}")
|
|
35
|
+
import traceback
|
|
36
|
+
traceback.print_exc()
|
|
37
|
+
sys.exit(1)
|
|
38
|
+
|
|
39
|
+
print("\n" + "="*50)
|
|
40
|
+
print("All tests passed! SimBA is ready to use.")
|
|
41
|
+
print("="*50)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Test directionality_to_static_targets function"""
|
|
2
|
+
import numpy as np
|
|
3
|
+
from simba.data_processors.cuda.geometry import directionality_to_static_targets
|
|
4
|
+
|
|
5
|
+
print("Testing directionality_to_static_targets...")
|
|
6
|
+
|
|
7
|
+
left_ear = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
8
|
+
right_ear = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
9
|
+
nose = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
10
|
+
target = np.array([250, 250], dtype=np.int32)
|
|
11
|
+
|
|
12
|
+
print(f"target shape: {target.shape}, dtype: {target.dtype}")
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
result = directionality_to_static_targets(
|
|
16
|
+
left_ear=left_ear,
|
|
17
|
+
right_ear=right_ear,
|
|
18
|
+
nose=nose,
|
|
19
|
+
target=target
|
|
20
|
+
)
|
|
21
|
+
print(f"✓ SUCCESS! Result shape: {result.shape}")
|
|
22
|
+
print(f"Result:\n{result}")
|
|
23
|
+
except Exception as e:
|
|
24
|
+
print(f"✗ Error: {e}")
|
|
25
|
+
import traceback
|
|
26
|
+
traceback.print_exc()
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Test directionality_to_static_targets with correct 1D target"""
|
|
2
|
+
import numpy as np
|
|
3
|
+
from simba.data_processors.cuda.geometry import directionality_to_static_targets
|
|
4
|
+
|
|
5
|
+
print("Testing directionality_to_static_targets with 1D target (correct)...")
|
|
6
|
+
|
|
7
|
+
left_ear = np.random.randint(0, 500, (100, 2)).astype(np.int32)
|
|
8
|
+
right_ear = np.random.randint(0, 500, (100, 2)).astype(np.int32)
|
|
9
|
+
nose = np.random.randint(0, 500, (100, 2)).astype(np.int32)
|
|
10
|
+
target = np.array([250, 250], dtype=np.int32) # 1D array for static target
|
|
11
|
+
|
|
12
|
+
print(f"target shape: {target.shape}, dtype: {target.dtype}")
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
result = directionality_to_static_targets(
|
|
16
|
+
left_ear=left_ear,
|
|
17
|
+
right_ear=right_ear,
|
|
18
|
+
nose=nose,
|
|
19
|
+
target=target
|
|
20
|
+
)
|
|
21
|
+
print(f"✓ SUCCESS! Result shape: {result.shape}")
|
|
22
|
+
print(f"First 5 results:\n{result[:5]}")
|
|
23
|
+
except Exception as e:
|
|
24
|
+
print(f"✗ Error: {e}")
|
|
25
|
+
import traceback
|
|
26
|
+
traceback.print_exc()
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Verify CUDA environment is properly configured"""
|
|
2
|
+
from numba import cuda
|
|
3
|
+
import cupy as cp
|
|
4
|
+
import numba
|
|
5
|
+
|
|
6
|
+
print("="*60)
|
|
7
|
+
print("CUDA Environment Verification")
|
|
8
|
+
print("="*60)
|
|
9
|
+
print(f"Numba version: {numba.__version__}")
|
|
10
|
+
print(f"CuPy version: {cp.__version__}")
|
|
11
|
+
print(f"CUDA available: {cuda.is_available()}")
|
|
12
|
+
if cuda.is_available():
|
|
13
|
+
print(f"CUDA devices: {len(cuda.gpus)}")
|
|
14
|
+
for i, gpu in enumerate(cuda.gpus):
|
|
15
|
+
print(f" GPU {i}: {gpu}")
|
|
16
|
+
|
|
17
|
+
# Test the function
|
|
18
|
+
print("\n" + "="*60)
|
|
19
|
+
print("Testing directionality_to_nonstatic_target...")
|
|
20
|
+
try:
|
|
21
|
+
from simba.data_processors.cuda.geometry import directionality_to_nonstatic_target
|
|
22
|
+
import numpy as np
|
|
23
|
+
|
|
24
|
+
left_ear = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
25
|
+
right_ear = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
26
|
+
nose = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
27
|
+
target = np.random.randint(0, 500, (10, 2)).astype(np.int32)
|
|
28
|
+
|
|
29
|
+
result = directionality_to_nonstatic_target(
|
|
30
|
+
left_ear=left_ear,
|
|
31
|
+
right_ear=right_ear,
|
|
32
|
+
nose=nose,
|
|
33
|
+
target=target
|
|
34
|
+
)
|
|
35
|
+
print(f"✓ Function works! Result shape: {result.shape}")
|
|
36
|
+
except Exception as e:
|
|
37
|
+
print(f"✗ Error: {e}")
|
|
38
|
+
import traceback
|
|
39
|
+
traceback.print_exc()
|
|
40
|
+
|
|
41
|
+
print("\n" + "="*60)
|
|
42
|
+
print("Environment is ready!")
|