simba-uw-tf-dev 4.5.8__py3-none-any.whl → 4.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simba/SimBA.py +2 -2
- simba/assets/.recent_projects.txt +1 -0
- simba/assets/icons/frames_2.png +0 -0
- simba/assets/lookups/tooptips.json +15 -1
- simba/data_processors/agg_clf_counter_mp.py +52 -53
- simba/data_processors/blob_location_computer.py +1 -1
- simba/data_processors/circling_detector.py +30 -13
- simba/data_processors/cuda/geometry.py +45 -27
- simba/data_processors/cuda/image.py +1648 -1598
- simba/data_processors/cuda/statistics.py +72 -26
- simba/data_processors/cuda/timeseries.py +1 -1
- simba/data_processors/cue_light_analyzer.py +5 -9
- simba/data_processors/egocentric_aligner.py +25 -7
- simba/data_processors/freezing_detector.py +55 -47
- simba/data_processors/kleinberg_calculator.py +61 -29
- simba/feature_extractors/feature_subsets.py +14 -7
- simba/feature_extractors/mitra_feature_extractor.py +2 -2
- simba/feature_extractors/straub_tail_analyzer.py +4 -6
- simba/labelling/standard_labeller.py +1 -1
- simba/mixins/config_reader.py +5 -2
- simba/mixins/geometry_mixin.py +22 -36
- simba/mixins/image_mixin.py +24 -28
- simba/mixins/plotting_mixin.py +28 -10
- simba/mixins/statistics_mixin.py +48 -11
- simba/mixins/timeseries_features_mixin.py +1 -1
- simba/mixins/train_model_mixin.py +67 -29
- simba/model/inference_batch.py +1 -1
- simba/model/yolo_seg_inference.py +3 -3
- simba/outlier_tools/skip_outlier_correction.py +1 -1
- simba/plotting/ROI_feature_visualizer_mp.py +3 -5
- simba/plotting/clf_validator_mp.py +4 -5
- simba/plotting/cue_light_visualizer.py +6 -7
- simba/plotting/directing_animals_visualizer_mp.py +2 -3
- simba/plotting/distance_plotter_mp.py +378 -378
- simba/plotting/frame_mergerer_ffmpeg.py +137 -196
- simba/plotting/gantt_creator.py +29 -10
- simba/plotting/gantt_creator_mp.py +96 -33
- simba/plotting/geometry_plotter.py +270 -272
- simba/plotting/heat_mapper_clf_mp.py +4 -6
- simba/plotting/heat_mapper_location_mp.py +2 -2
- simba/plotting/light_dark_box_plotter.py +2 -2
- simba/plotting/path_plotter_mp.py +26 -29
- simba/plotting/plot_clf_results_mp.py +455 -454
- simba/plotting/pose_plotter_mp.py +28 -29
- simba/plotting/probability_plot_creator_mp.py +288 -288
- simba/plotting/roi_plotter_mp.py +31 -31
- simba/plotting/single_run_model_validation_video_mp.py +427 -427
- simba/plotting/spontaneous_alternation_plotter.py +2 -3
- simba/plotting/yolo_pose_track_visualizer.py +32 -27
- simba/plotting/yolo_pose_visualizer.py +35 -36
- simba/plotting/yolo_seg_visualizer.py +2 -3
- simba/pose_importers/simba_blob_importer.py +3 -3
- simba/roi_tools/roi_aggregate_stats_mp.py +5 -4
- simba/roi_tools/roi_clf_calculator_mp.py +4 -4
- simba/sandbox/analyze_runtimes.py +30 -0
- simba/sandbox/cuda/egocentric_rotator.py +374 -0
- simba/sandbox/get_cpu_pool.py +5 -0
- simba/sandbox/proboscis_to_tip.py +28 -0
- simba/sandbox/test_directionality.py +47 -0
- simba/sandbox/test_nonstatic_directionality.py +27 -0
- simba/sandbox/test_pycharm_cuda.py +51 -0
- simba/sandbox/test_simba_install.py +41 -0
- simba/sandbox/test_static_directionality.py +26 -0
- simba/sandbox/test_static_directionality_2d.py +26 -0
- simba/sandbox/verify_env.py +42 -0
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo.py +3 -3
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo_bbox.py +2 -2
- simba/ui/pop_ups/clf_add_remove_print_pop_up.py +37 -30
- simba/ui/pop_ups/clf_plot_pop_up.py +2 -2
- simba/ui/pop_ups/egocentric_alignment_pop_up.py +20 -21
- simba/ui/pop_ups/fsttc_pop_up.py +27 -25
- simba/ui/pop_ups/gantt_pop_up.py +31 -6
- simba/ui/pop_ups/interpolate_pop_up.py +2 -4
- simba/ui/pop_ups/kleinberg_pop_up.py +39 -40
- simba/ui/pop_ups/multiple_videos_to_frames_popup.py +10 -11
- simba/ui/pop_ups/single_video_to_frames_popup.py +10 -10
- simba/ui/pop_ups/video_processing_pop_up.py +186 -174
- simba/ui/tkinter_functions.py +10 -1
- simba/utils/custom_feature_extractor.py +1 -1
- simba/utils/data.py +90 -14
- simba/utils/enums.py +1 -0
- simba/utils/errors.py +441 -440
- simba/utils/lookups.py +1203 -1203
- simba/utils/printing.py +124 -124
- simba/utils/read_write.py +3769 -3721
- simba/utils/yolo.py +10 -1
- simba/video_processors/blob_tracking_executor.py +2 -2
- simba/video_processors/clahe_ui.py +66 -23
- simba/video_processors/egocentric_video_rotator.py +46 -44
- simba/video_processors/multi_cropper.py +1 -1
- simba/video_processors/video_processing.py +5264 -5300
- simba/video_processors/videos_to_frames.py +43 -32
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/METADATA +4 -3
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/RECORD +98 -86
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.5.8.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/top_level.txt +0 -0
simba/utils/yolo.py
CHANGED
|
@@ -47,6 +47,9 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
|
|
|
47
47
|
`Download initial weights <https://huggingface.co/Ultralytics>`__.
|
|
48
48
|
`Example model_yaml <https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml>`__.
|
|
49
49
|
|
|
50
|
+
.. seealso::
|
|
51
|
+
For the recommended wrapper class with parameter validation, see :class:`simba.model.yolo_fit.FitYolo`.
|
|
52
|
+
|
|
50
53
|
:param initial_weights: Path to the pre-trained YOLO model weights (usually a `.pt` file). Example weights can be found [here](https://huggingface.co/Ultralytics).
|
|
51
54
|
:param model_yaml: YAML file containing paths to the training, validation, and testing datasets and the object class mappings. Example YAML file can be found [here](https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml).
|
|
52
55
|
:param save_path: Directory path where the trained model, logs, and results will be saved.
|
|
@@ -55,7 +58,7 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
|
|
|
55
58
|
:return: None. The trained model and associated training logs are saved in the specified `project_path`.
|
|
56
59
|
|
|
57
60
|
:example:
|
|
58
|
-
>>> fit_yolo(initial_weights=r"C
|
|
61
|
+
>>> fit_yolo(initial_weights=r"C:/troubleshooting/coco_data/weights/yolov8n-obb.pt", data=r"C:/troubleshooting/coco_data/model.yaml", save_path=r"C:/troubleshooting/coco_data/mdl", batch=16)
|
|
59
62
|
"""
|
|
60
63
|
|
|
61
64
|
if not _is_cuda_available()[0]:
|
|
@@ -83,6 +86,9 @@ def load_yolo_model(weights_path: Union[str, os.PathLike],
|
|
|
83
86
|
"""
|
|
84
87
|
Load a YOLO model.
|
|
85
88
|
|
|
89
|
+
.. seealso::
|
|
90
|
+
For recommended wrapper classes that use this function, see :class:`simba.model.yolo_fit.FitYolo`, :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`, and :class:`simba.model.yolo_pose_track_inference.YOLOPoseTrackInference`.
|
|
91
|
+
|
|
86
92
|
:param Union[str, os.PathLike] weights_path: Path to model weights (.pt, .engine, etc).
|
|
87
93
|
:param bool verbose: Whether to print loading info.
|
|
88
94
|
:param Optional[str] format: Export format, one of VALID_FORMATS or None to skip export.
|
|
@@ -169,6 +175,9 @@ def yolo_predict(model: YOLO,
|
|
|
169
175
|
"""
|
|
170
176
|
Produce YOLO predictions.
|
|
171
177
|
|
|
178
|
+
.. seealso::
|
|
179
|
+
For recommended wrapper classes that use this function, see :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, and :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`.
|
|
180
|
+
|
|
172
181
|
:param Union[str, os.PathLike] model: Loaded ultralytics.YOLO model. Returned by :func:`~simba.bounding_box_tools.yolo.model.load_yolo_model`.
|
|
173
182
|
:param Union[str, os.PathLike, np.ndarray] source: Path to video, video stream, directory, image, or image as loaded array.
|
|
174
183
|
:param bool half: Whether to use half precision (FP16) for inference to speed up processing.
|
|
@@ -87,9 +87,9 @@ class BlobTrackingExecutor():
|
|
|
87
87
|
:param bool center: If True, compute center coordinates. Default: True.
|
|
88
88
|
|
|
89
89
|
:example:
|
|
90
|
-
>>> tracker = BlobTrackingExecutor(data=r"C
|
|
90
|
+
>>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle")
|
|
91
91
|
>>> tracker.run()
|
|
92
|
-
>>> tracker = BlobTrackingExecutor(data=r"C
|
|
92
|
+
>>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle", batch_size=5000)
|
|
93
93
|
>>> tracker.run()
|
|
94
94
|
"""
|
|
95
95
|
|
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
2
4
|
from typing import Tuple, Union
|
|
3
5
|
|
|
4
6
|
import cv2
|
|
@@ -27,34 +29,71 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
|
|
|
27
29
|
:return Tuple[float, int]: Tuple containing the chosen clip limit and tile size.
|
|
28
30
|
|
|
29
31
|
:example:
|
|
30
|
-
>>> video = cv2.imread(r"D
|
|
32
|
+
>>> video = cv2.imread(r"D:/EPM/sample_2/video_1.mp4")
|
|
31
33
|
>>> interactive_clahe_ui(data=video)
|
|
32
34
|
"""
|
|
33
35
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
34
36
|
|
|
37
|
+
callback_lock = threading.Lock()
|
|
38
|
+
last_update_time = [0]
|
|
39
|
+
update_delay = 0.05
|
|
40
|
+
|
|
35
41
|
def _get_trackbar_values(v):
|
|
36
42
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
if
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
43
|
+
nonlocal callback_lock, last_update_time, update_delay
|
|
44
|
+
current_time = time.time()
|
|
45
|
+
if current_time - last_update_time[0] < update_delay:
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
if not callback_lock.acquire(blocking=False):
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
if cv2.getWindowProperty(WIN_NAME, cv2.WND_PROP_VISIBLE) < 1:
|
|
53
|
+
return
|
|
54
|
+
try:
|
|
55
|
+
clip_limit = cv2.getTrackbarPos(CLIP_LIMIT, WIN_NAME) / 10.0
|
|
56
|
+
tile_size = cv2.getTrackbarPos(TILE_SIZE, WIN_NAME)
|
|
57
|
+
if tile_size % 2 == 0: tile_size += 1
|
|
58
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(tile_size, tile_size))
|
|
59
|
+
img_clahe = clahe.apply(original_img)
|
|
60
|
+
cv2.putText(img_clahe, txt,(TextOptions.BORDER_BUFFER_X.value, TextOptions.BORDER_BUFFER_Y.value + y_spacer), TextOptions.FONT.value, font_size, (255, 255, 255), 3)
|
|
61
|
+
cv2.imshow(WIN_NAME, img_clahe)
|
|
62
|
+
cv2.waitKey(1)
|
|
63
|
+
last_update_time[0] = current_time
|
|
64
|
+
except cv2.error:
|
|
65
|
+
pass
|
|
66
|
+
finally:
|
|
67
|
+
callback_lock.release()
|
|
45
68
|
|
|
46
69
|
def _change_img(v):
|
|
47
70
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
if
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
71
|
+
current_time = time.time()
|
|
72
|
+
if current_time - last_update_time[0] < update_delay:
|
|
73
|
+
return
|
|
74
|
+
|
|
75
|
+
if not callback_lock.acquire(blocking=False):
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
if cv2.getWindowProperty(WIN_NAME, cv2.WND_PROP_VISIBLE) < 1:
|
|
80
|
+
return
|
|
81
|
+
try:
|
|
82
|
+
new_frm_id = cv2.getTrackbarPos(SELECT_VIDEO_FRAME, WIN_NAME)
|
|
83
|
+
original_img = read_frm_of_video(video_path=data, frame_index=new_frm_id, greyscale=True)
|
|
84
|
+
clip_limit = cv2.getTrackbarPos(CLIP_LIMIT, WIN_NAME) / 10.0
|
|
85
|
+
tile_size = cv2.getTrackbarPos(TILE_SIZE, WIN_NAME)
|
|
86
|
+
if tile_size % 2 == 0: tile_size += 1
|
|
87
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(tile_size, tile_size))
|
|
88
|
+
img_clahe = clahe.apply(original_img)
|
|
89
|
+
cv2.putText(img_clahe, txt,(TextOptions.BORDER_BUFFER_X.value, TextOptions.BORDER_BUFFER_Y.value + y_spacer), TextOptions.FONT.value, font_size, (255, 255, 255), 3)
|
|
90
|
+
cv2.imshow(WIN_NAME, img_clahe)
|
|
91
|
+
cv2.waitKey(1)
|
|
92
|
+
last_update_time[0] = current_time
|
|
93
|
+
except cv2.error:
|
|
94
|
+
pass
|
|
95
|
+
finally:
|
|
96
|
+
callback_lock.release()
|
|
58
97
|
|
|
59
98
|
check_instance(source=interactive_clahe_ui.__name__, instance=data, accepted_types=(np.ndarray, str))
|
|
60
99
|
if isinstance(data, str):
|
|
@@ -80,14 +119,18 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
|
|
|
80
119
|
break
|
|
81
120
|
k = cv2.waitKey(1) & 0xFF
|
|
82
121
|
if k == 27:
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
122
|
+
try:
|
|
123
|
+
clip_limit = cv2.getTrackbarPos(CLIP_LIMIT, WIN_NAME) / 10.0
|
|
124
|
+
tile_size = cv2.getTrackbarPos(TILE_SIZE, WIN_NAME)
|
|
125
|
+
if tile_size % 2 == 0: tile_size += 1
|
|
126
|
+
except cv2.error:
|
|
127
|
+
clip_limit = 1.0
|
|
128
|
+
tile_size = 8
|
|
86
129
|
cv2.destroyAllWindows()
|
|
87
130
|
return clip_limit, tile_size
|
|
88
131
|
|
|
89
132
|
|
|
90
|
-
#interactive_clahe_ui(data=r"
|
|
133
|
+
#interactive_clahe_ui(data=r"C:\troubleshooting\cue_light\t1\project_folder\videos\2025-05-21 16-10-06_cropped.mp4")
|
|
91
134
|
|
|
92
135
|
# # Function to update CLAHE
|
|
93
136
|
# def update_clahe(x):
|
|
@@ -9,10 +9,12 @@ import numpy as np
|
|
|
9
9
|
from simba.utils.checks import (check_file_exist_and_readable,
|
|
10
10
|
check_if_dir_exists, check_if_valid_rgb_tuple,
|
|
11
11
|
check_int, check_valid_array,
|
|
12
|
-
check_valid_boolean,
|
|
12
|
+
check_valid_boolean, check_valid_cpu_pool,
|
|
13
|
+
check_valid_tuple)
|
|
13
14
|
from simba.utils.data import (align_target_warpaffine_vectors,
|
|
14
15
|
center_rotation_warpaffine_vectors,
|
|
15
|
-
egocentrically_align_pose
|
|
16
|
+
egocentrically_align_pose, get_cpu_pool,
|
|
17
|
+
terminate_cpu_pool)
|
|
16
18
|
from simba.utils.enums import Defaults, Formats
|
|
17
19
|
from simba.utils.printing import SimbaTimer, stdout_success
|
|
18
20
|
from simba.utils.read_write import (concatenate_videos_in_folder,
|
|
@@ -35,7 +37,6 @@ def egocentric_video_aligner(frm_range: np.ndarray,
|
|
|
35
37
|
gpu: bool = True):
|
|
36
38
|
|
|
37
39
|
video_meta = get_video_meta_data(video_path=video_path)
|
|
38
|
-
|
|
39
40
|
batch, frm_range = frm_range[0], frm_range[1]
|
|
40
41
|
save_path = os.path.join(temp_dir, f'{batch}.mp4')
|
|
41
42
|
fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}')
|
|
@@ -57,7 +58,7 @@ def egocentric_video_aligner(frm_range: np.ndarray,
|
|
|
57
58
|
final_frame = cv2.warpAffine(rotated_frame, m_translations[img_counter],(video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
58
59
|
writer.write(final_frame)
|
|
59
60
|
if verbose:
|
|
60
|
-
print(f'Creating frame {frame_id} ({video_name}, CPU core: {batch + 1}).')
|
|
61
|
+
print(f'Creating frame {frame_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
61
62
|
img_counter+=1
|
|
62
63
|
else:
|
|
63
64
|
cap = cv2.VideoCapture(video_path)
|
|
@@ -67,7 +68,7 @@ def egocentric_video_aligner(frm_range: np.ndarray,
|
|
|
67
68
|
final_frame = cv2.warpAffine(rotated_frame, m_translations[frm_idx], (video_meta['width'], video_meta['height']), borderValue=fill_clr)
|
|
68
69
|
writer.write(final_frame)
|
|
69
70
|
if verbose:
|
|
70
|
-
print(f'Creating frame {frm_id} ({video_name}, CPU core: {batch + 1}).')
|
|
71
|
+
print(f'Creating frame {frm_id}/{video_meta["frame_count"]} ({video_name}, CPU core: {batch + 1}).')
|
|
71
72
|
writer.release()
|
|
72
73
|
return batch + 1
|
|
73
74
|
|
|
@@ -93,9 +94,9 @@ class EgocentricVideoRotator():
|
|
|
93
94
|
:param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix.
|
|
94
95
|
|
|
95
96
|
:example:
|
|
96
|
-
>>> DATA_PATH = "C
|
|
97
|
-
>>> VIDEO_PATH = "C
|
|
98
|
-
>>> SAVE_PATH = "C
|
|
97
|
+
>>> DATA_PATH = "C:/501_MA142_Gi_Saline_0513.csv"
|
|
98
|
+
>>> VIDEO_PATH = "C:/501_MA142_Gi_Saline_0513.mp4"
|
|
99
|
+
>>> SAVE_PATH = "C:/501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
99
100
|
>>> ANCHOR_LOC = np.array([250, 250])
|
|
100
101
|
|
|
101
102
|
>>> df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
@@ -115,7 +116,8 @@ class EgocentricVideoRotator():
|
|
|
115
116
|
fill_clr: Tuple[int, int, int] = (0, 0, 0),
|
|
116
117
|
core_cnt: int = -1,
|
|
117
118
|
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
118
|
-
gpu: Optional[bool] = True
|
|
119
|
+
gpu: Optional[bool] = True,
|
|
120
|
+
pool: bool = None):
|
|
119
121
|
|
|
120
122
|
check_file_exist_and_readable(file_path=video_path)
|
|
121
123
|
self.video_meta_data = get_video_meta_data(video_path=video_path)
|
|
@@ -126,10 +128,14 @@ class EgocentricVideoRotator():
|
|
|
126
128
|
check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
|
|
127
129
|
check_if_valid_rgb_tuple(data=fill_clr)
|
|
128
130
|
check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
|
|
129
|
-
if core_cnt > find_core_cnt()[0] or core_cnt == -1:
|
|
130
|
-
|
|
131
|
+
if core_cnt > find_core_cnt()[0] or core_cnt == -1: self.core_cnt = find_core_cnt()[0]
|
|
132
|
+
else: self.core_cnt = core_cnt
|
|
133
|
+
if pool is not None:
|
|
134
|
+
check_valid_cpu_pool(value=pool, source=self.__class__.__name__, max_cores=find_core_cnt()[0], min_cores=2, raise_error=True)
|
|
135
|
+
self.pool_termination_flag = True
|
|
131
136
|
else:
|
|
132
|
-
self.
|
|
137
|
+
self.pool_termination_flag = False
|
|
138
|
+
self.pool = get_cpu_pool(core_cnt=self.core_cnt, source=self.__class__.__name__) if pool is None else pool
|
|
133
139
|
video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
|
|
134
140
|
if save_path is not None:
|
|
135
141
|
self.save_dir = os.path.dirname(save_path)
|
|
@@ -152,39 +158,35 @@ class EgocentricVideoRotator():
|
|
|
152
158
|
frm_list = np.arange(0, self.video_meta_data['frame_count'])
|
|
153
159
|
frm_list = np.array_split(frm_list, self.core_cnt)
|
|
154
160
|
frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
|
|
155
|
-
if self.verbose:
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
print(f"Rotate batch {result}/{self.core_cnt} complete...")
|
|
171
|
-
pool.terminate()
|
|
172
|
-
pool.join()
|
|
173
|
-
|
|
161
|
+
if self.verbose: print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
|
|
162
|
+
|
|
163
|
+
constants = functools.partial(egocentric_video_aligner,
|
|
164
|
+
temp_dir=temp_dir,
|
|
165
|
+
video_name=self.video_name,
|
|
166
|
+
video_path=self.video_path,
|
|
167
|
+
centers=self.centers,
|
|
168
|
+
rotation_vectors=self.rotation_vectors,
|
|
169
|
+
target=self.anchor_loc,
|
|
170
|
+
verbose=self.verbose,
|
|
171
|
+
fill_clr=self.fill_clr,
|
|
172
|
+
gpu=self.gpu)
|
|
173
|
+
for cnt, result in enumerate(self.pool.imap(constants, frm_list, chunksize=1)):
|
|
174
|
+
if self.verbose: print(f"Rotate batch {result}/{self.core_cnt} complete...")
|
|
175
|
+
if self.pool_termination_flag: terminate_cpu_pool(pool=self.pool, force=False)
|
|
174
176
|
concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
|
|
175
177
|
video_timer.stop_timer()
|
|
176
178
|
stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
|
|
177
179
|
|
|
178
|
-
if __name__ == "__main__":
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
180
|
+
# if __name__ == "__main__":
|
|
181
|
+
# DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
|
|
182
|
+
# VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
|
|
183
|
+
# SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
184
|
+
# ANCHOR_LOC = np.array([250, 250])
|
|
185
|
+
#
|
|
186
|
+
# df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
187
|
+
# bp_cols = [x for x in df.columns if not x.endswith('_p')]
|
|
188
|
+
# data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
189
|
+
#
|
|
190
|
+
# _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
191
|
+
# rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
|
|
192
|
+
# rotater.run()
|
|
@@ -49,7 +49,7 @@ class MultiCropper(object):
|
|
|
49
49
|
|
|
50
50
|
|
|
51
51
|
:example:
|
|
52
|
-
>>> cropper = MultiCropper(file_type='mp4', input_folder=r'C
|
|
52
|
+
>>> cropper = MultiCropper(file_type='mp4', input_folder=r'C:/troubleshooting/mitra/test', output_folder=r'C:/troubleshooting/mitra/test/cropped', crop_cnt=2, gpu=True)
|
|
53
53
|
>>> cropper.run()
|
|
54
54
|
"""
|
|
55
55
|
|