simba-uw-tf-dev 4.6.4__py3-none-any.whl → 4.6.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simba/data_processors/blob_location_computer.py +1 -1
- simba/data_processors/cuda/geometry.py +45 -27
- simba/data_processors/cuda/image.py +1624 -1600
- simba/data_processors/cuda/statistics.py +72 -25
- simba/data_processors/cuda/timeseries.py +1 -1
- simba/data_processors/egocentric_aligner.py +25 -7
- simba/data_processors/kleinberg_calculator.py +6 -2
- simba/feature_extractors/feature_subsets.py +14 -7
- simba/feature_extractors/straub_tail_analyzer.py +4 -6
- simba/labelling/standard_labeller.py +1 -1
- simba/mixins/geometry_mixin.py +8 -8
- simba/mixins/image_mixin.py +14 -14
- simba/mixins/statistics_mixin.py +48 -11
- simba/mixins/timeseries_features_mixin.py +1 -1
- simba/mixins/train_model_mixin.py +65 -27
- simba/model/inference_batch.py +1 -1
- simba/model/yolo_seg_inference.py +3 -3
- simba/plotting/heat_mapper_clf_mp.py +2 -2
- simba/pose_importers/simba_blob_importer.py +3 -3
- simba/roi_tools/roi_aggregate_stats_mp.py +1 -1
- simba/roi_tools/roi_clf_calculator_mp.py +1 -1
- simba/sandbox/analyze_runtimes.py +30 -0
- simba/sandbox/cuda/egocentric_rotator.py +374 -374
- simba/sandbox/proboscis_to_tip.py +28 -0
- simba/sandbox/test_directionality.py +47 -0
- simba/sandbox/test_nonstatic_directionality.py +27 -0
- simba/sandbox/test_pycharm_cuda.py +51 -0
- simba/sandbox/test_simba_install.py +41 -0
- simba/sandbox/test_static_directionality.py +26 -0
- simba/sandbox/test_static_directionality_2d.py +26 -0
- simba/sandbox/verify_env.py +42 -0
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo.py +3 -3
- simba/third_party_label_appenders/transform/coco_keypoints_to_yolo_bbox.py +2 -2
- simba/ui/pop_ups/fsttc_pop_up.py +27 -25
- simba/ui/pop_ups/kleinberg_pop_up.py +3 -2
- simba/utils/custom_feature_extractor.py +1 -1
- simba/utils/data.py +2 -3
- simba/utils/errors.py +441 -440
- simba/utils/lookups.py +1203 -1203
- simba/utils/read_write.py +70 -31
- simba/utils/yolo.py +10 -1
- simba/video_processors/blob_tracking_executor.py +2 -2
- simba/video_processors/clahe_ui.py +1 -1
- simba/video_processors/egocentric_video_rotator.py +44 -39
- simba/video_processors/multi_cropper.py +1 -1
- simba/video_processors/video_processing.py +5264 -5233
- simba/video_processors/videos_to_frames.py +43 -33
- {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.7.dist-info}/METADATA +4 -3
- {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.7.dist-info}/RECORD +53 -44
- {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.7.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.7.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.7.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.7.dist-info}/top_level.txt +0 -0
simba/utils/read_write.py
CHANGED
|
@@ -97,13 +97,20 @@ def read_df(file_path: Union[str, os.PathLike],
|
|
|
97
97
|
.. note::
|
|
98
98
|
For improved runtime, defaults to :external:py:meth:`pyarrow.csv.write_cs` if file type is ``csv``.
|
|
99
99
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
100
|
+
.. csv-table::
|
|
101
|
+
:header: EXPECTED RUNTIMES
|
|
102
|
+
:file: ../../docs/tables/read_df.csv
|
|
103
|
+
:widths: 10, 45, 45
|
|
104
|
+
:align: center
|
|
105
|
+
:header-rows: 1
|
|
106
|
+
|
|
107
|
+
:param str file_path: Path to data file
|
|
108
|
+
:param str file_type: Type of data. OPTIONS: 'parquet', 'csv', 'pickle'.
|
|
109
|
+
:param Optional[bool]: If the input file has an initial index column. Default: True.
|
|
110
|
+
:param Optional[List[str]] remove_columns: If not None, then remove columns in lits.
|
|
111
|
+
:param Optional[List[str]] usecols: If not None, then keep columns in list.
|
|
112
|
+
:param bool check_multiindex: check file is multi-index headers. Default: False.
|
|
113
|
+
:param int multi_index_headers_to_keep: If reading multi-index file, and we want to keep one of the dropped multi-index levels as the header in the output file, specify the index of the multiindex hader as int.
|
|
107
114
|
:return: Table data in pd.DataFrame format.
|
|
108
115
|
:rtype: pd.DataFrame
|
|
109
116
|
|
|
@@ -207,11 +214,18 @@ def write_df(df: pd.DataFrame,
|
|
|
207
214
|
.. note::
|
|
208
215
|
For improved runtime, defaults to ``pyarrow.csv`` if file_type == ``csv``.
|
|
209
216
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
217
|
+
.. csv-table::
|
|
218
|
+
:header: EXPECTED RUNTIMES
|
|
219
|
+
:file: ../../docs/tables/write_df.csv
|
|
220
|
+
:widths: 10, 45, 45
|
|
221
|
+
:align: center
|
|
222
|
+
:header-rows: 1
|
|
223
|
+
|
|
224
|
+
:param pd.DataFrame df: Pandas dataframe to save to disk.
|
|
225
|
+
:param str file_type: Type of data. OPTIONS: ``parquet``, ``csv``, ``pickle``.
|
|
226
|
+
:param str save_path: Location where to store the data.
|
|
227
|
+
:param bool check_multiindex: check if input file is multi-index headers. Default: False.
|
|
228
|
+
:param bool verbose: Prints message on completion. Default: False.
|
|
215
229
|
|
|
216
230
|
:example:
|
|
217
231
|
>>> write_df(df=df, file_type='csv', save_path='project_folder/csv/input_csv/Video_1.csv')
|
|
@@ -1130,8 +1144,8 @@ def get_file_name_info_in_directory(directory: Union[str, os.PathLike], file_typ
|
|
|
1130
1144
|
:return dict: All found files as values and file base names as keys.
|
|
1131
1145
|
|
|
1132
1146
|
:example:
|
|
1133
|
-
>>> get_file_name_info_in_directory(directory='C
|
|
1134
|
-
>>> {'Video_1': 'C
|
|
1147
|
+
>>> get_file_name_info_in_directory(directory='C:/project_folder/csv/machine_results', file_type='csv')
|
|
1148
|
+
>>> {'Video_1': 'C:/project_folder/csv/machine_results/Video_1'}
|
|
1135
1149
|
"""
|
|
1136
1150
|
|
|
1137
1151
|
results = {}
|
|
@@ -2460,6 +2474,13 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
|
|
|
2460
2474
|
"""
|
|
2461
2475
|
Reads a batch of frames from a video file using GPU acceleration.
|
|
2462
2476
|
|
|
2477
|
+
.. csv-table::
|
|
2478
|
+
:header: EXPECTED RUNTIMES
|
|
2479
|
+
:file: ../../docs/tables/read_img_batch_from_video_gpu.csv
|
|
2480
|
+
:widths: 10, 45, 45
|
|
2481
|
+
:align: center
|
|
2482
|
+
:header-rows: 1
|
|
2483
|
+
|
|
2463
2484
|
This function uses FFmpeg with CUDA acceleration to read frames from a specified range in a video file. It supports both RGB and greyscale video formats. Frames are returned as a dictionary where the keys are
|
|
2464
2485
|
frame indices and the values are NumPy arrays representing the image data.
|
|
2465
2486
|
|
|
@@ -2468,7 +2489,7 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
|
|
|
2468
2489
|
If you expect that the video you are reading in is black and white, set ``black_and_white`` to True to round any of these wonly value sto 0 and 255.
|
|
2469
2490
|
|
|
2470
2491
|
.. seealso::
|
|
2471
|
-
For CPU multicore acceleration, see :func:`simba.mixins.image_mixin.ImageMixin.read_img_batch_from_video`
|
|
2492
|
+
For CPU multicore acceleration, see :func:`simba.mixins.image_mixin.ImageMixin.read_img_batch_from_video` or :func:`simba.utils.read_write.read_img_batch_from_video`.
|
|
2472
2493
|
|
|
2473
2494
|
:param video_path: Path to the video file. Can be a string or an os.PathLike object.
|
|
2474
2495
|
:param start_frm: The starting frame index to read. If None, starts from the beginning of the video.
|
|
@@ -2479,6 +2500,7 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
|
|
|
2479
2500
|
:return: A dictionary where keys are frame indices (integers) and values are NumPy arrays containing the image data of each frame.
|
|
2480
2501
|
"""
|
|
2481
2502
|
|
|
2503
|
+
timer = SimbaTimer(start=True)
|
|
2482
2504
|
check_file_exist_and_readable(file_path=video_path)
|
|
2483
2505
|
video_meta_data = get_video_meta_data(video_path=video_path, fps_as_int=False)
|
|
2484
2506
|
if start_frm is not None:
|
|
@@ -2551,6 +2573,10 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
|
|
|
2551
2573
|
binary_frms[frm_id] = np.where(frames[frm_id] > 127, 255, 0).astype(np.uint8)
|
|
2552
2574
|
frames = binary_frms
|
|
2553
2575
|
|
|
2576
|
+
timer.stop_timer()
|
|
2577
|
+
if verbose:
|
|
2578
|
+
print(f'[{get_current_time()}] Read frames {start_frm}-{end_frm} (video: {video_name}, elapsed time: {timer.elapsed_time_str}s)')
|
|
2579
|
+
|
|
2554
2580
|
return frames
|
|
2555
2581
|
|
|
2556
2582
|
|
|
@@ -2618,7 +2644,7 @@ def bento_file_reader(file_path: Union[str, os.PathLike],
|
|
|
2618
2644
|
:rtype: Dict[str, pd.DataFrame]
|
|
2619
2645
|
|
|
2620
2646
|
:example:
|
|
2621
|
-
>>> bento_file_reader(file_path=r"C
|
|
2647
|
+
>>> bento_file_reader(file_path=r"C:/troubleshooting/bento_test/bento_files/20240812_crumpling3.annot")
|
|
2622
2648
|
"""
|
|
2623
2649
|
|
|
2624
2650
|
def _orient_columns_melt(df: pd.DataFrame) -> pd.DataFrame:
|
|
@@ -2941,7 +2967,7 @@ def labelme_to_dlc(labelme_dir: Union[str, os.PathLike],
|
|
|
2941
2967
|
:return: None
|
|
2942
2968
|
|
|
2943
2969
|
:example:
|
|
2944
|
-
>>> labelme_dir = r'D
|
|
2970
|
+
>>> labelme_dir = r'D:/ts_annotations'
|
|
2945
2971
|
>>> labelme_to_dlc(labelme_dir=labelme_dir)
|
|
2946
2972
|
"""
|
|
2947
2973
|
|
|
@@ -3156,7 +3182,7 @@ def _read_img_batch_from_video_helper(frm_idx: np.ndarray, video_path: Union[str
|
|
|
3156
3182
|
cap.set(1, current_frm)
|
|
3157
3183
|
while current_frm < end_frm:
|
|
3158
3184
|
if verbose:
|
|
3159
|
-
print(f'Reading frame {current_frm}
|
|
3185
|
+
print(f'[{get_current_time()}] Reading frame {current_frm} ({video_meta_data["video_name"]})...')
|
|
3160
3186
|
img = cap.read()[1]
|
|
3161
3187
|
if img is not None:
|
|
3162
3188
|
if greyscale or black_and_white or clahe:
|
|
@@ -3188,6 +3214,14 @@ def read_img_batch_from_video(video_path: Union[str, os.PathLike],
|
|
|
3188
3214
|
"""
|
|
3189
3215
|
Read a batch of frames from a video file. This method reads frames from a specified range of frames within a video file using multiprocessing.
|
|
3190
3216
|
|
|
3217
|
+
.. csv-table::
|
|
3218
|
+
:header: EXPECTED RUNTIMES
|
|
3219
|
+
:file: ../../docs/tables/read_img_batch_from_video.csv
|
|
3220
|
+
:widths: 10, 45, 45
|
|
3221
|
+
:align: center
|
|
3222
|
+
:header-rows: 1
|
|
3223
|
+
|
|
3224
|
+
|
|
3191
3225
|
.. seealso::
|
|
3192
3226
|
For GPU acceleration, see :func:`simba.utils.read_write.read_img_batch_from_video_gpu`
|
|
3193
3227
|
|
|
@@ -3209,6 +3243,8 @@ def read_img_batch_from_video(video_path: Union[str, os.PathLike],
|
|
|
3209
3243
|
>>> read_img_batch_from_video(video_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/videos/Together_1.avi', start_frm=0, end_frm=50)
|
|
3210
3244
|
"""
|
|
3211
3245
|
|
|
3246
|
+
|
|
3247
|
+
timer = SimbaTimer(start=True)
|
|
3212
3248
|
if platform.system() == "Darwin":
|
|
3213
3249
|
if not multiprocessing.get_start_method(allow_none=True):
|
|
3214
3250
|
multiprocessing.set_start_method("fork", force=True)
|
|
@@ -3230,19 +3266,22 @@ def read_img_batch_from_video(video_path: Union[str, os.PathLike],
|
|
|
3230
3266
|
if end_frm <= start_frm:
|
|
3231
3267
|
FrameRangeError(msg=f"Start frame ({start_frm}) has to be before end frame ({end_frm})", source=read_img_batch_from_video.__name__)
|
|
3232
3268
|
frm_lst = np.array_split(np.arange(start_frm, end_frm + 1), core_cnt)
|
|
3269
|
+
pool = multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value)
|
|
3233
3270
|
results = {}
|
|
3234
|
-
|
|
3235
|
-
|
|
3236
|
-
|
|
3237
|
-
|
|
3238
|
-
|
|
3239
|
-
|
|
3240
|
-
|
|
3241
|
-
|
|
3242
|
-
results.update(result)
|
|
3243
|
-
pool.join()
|
|
3271
|
+
constants = functools.partial(_read_img_batch_from_video_helper,
|
|
3272
|
+
video_path=video_path,
|
|
3273
|
+
greyscale=greyscale,
|
|
3274
|
+
black_and_white=black_and_white,
|
|
3275
|
+
clahe=clahe,
|
|
3276
|
+
verbose=verbose)
|
|
3277
|
+
for cnt, result in enumerate(pool.imap(constants, frm_lst, chunksize=1)):
|
|
3278
|
+
results.update(result)
|
|
3244
3279
|
pool.close()
|
|
3245
|
-
|
|
3280
|
+
pool.join()
|
|
3281
|
+
pool.terminate()
|
|
3282
|
+
timer.stop_timer()
|
|
3283
|
+
if verbose:
|
|
3284
|
+
print(f'[{get_current_time()}] Read frames {start_frm}-{end_frm} (video: {video_meta_data["video_name"]}, elapsed time: {timer.elapsed_time_str}s)')
|
|
3246
3285
|
return results
|
|
3247
3286
|
|
|
3248
3287
|
def read_yolo_bp_names_file(file_path: Union[str, os.PathLike]) -> Tuple[str]:
|
|
@@ -3572,8 +3611,8 @@ def osf_download(project_id: str, save_dir: Union[str, os.PathLike], storage: st
|
|
|
3572
3611
|
:param bool overwrite: If True, overwrite existing files. If False, skip existing files (default: False).
|
|
3573
3612
|
|
|
3574
3613
|
:example:
|
|
3575
|
-
>>> osf_download(project_id="7fgwn", save_dir=r'E
|
|
3576
|
-
>>> osf_download(project_id="kym42", save_dir=r'E
|
|
3614
|
+
>>> osf_download(project_id="7fgwn", save_dir=r'E:/rgb_white_vs_black_imgs')
|
|
3615
|
+
>>> osf_download(project_id="kym42", save_dir=r'E:/crim13_imgs', overwrite=True)
|
|
3577
3616
|
"""
|
|
3578
3617
|
|
|
3579
3618
|
_ = get_pkg_version(pkg='osfclient', raise_error=True)
|
simba/utils/yolo.py
CHANGED
|
@@ -47,6 +47,9 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
|
|
|
47
47
|
`Download initial weights <https://huggingface.co/Ultralytics>`__.
|
|
48
48
|
`Example model_yaml <https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml>`__.
|
|
49
49
|
|
|
50
|
+
.. seealso::
|
|
51
|
+
For the recommended wrapper class with parameter validation, see :class:`simba.model.yolo_fit.FitYolo`.
|
|
52
|
+
|
|
50
53
|
:param initial_weights: Path to the pre-trained YOLO model weights (usually a `.pt` file). Example weights can be found [here](https://huggingface.co/Ultralytics).
|
|
51
54
|
:param model_yaml: YAML file containing paths to the training, validation, and testing datasets and the object class mappings. Example YAML file can be found [here](https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml).
|
|
52
55
|
:param save_path: Directory path where the trained model, logs, and results will be saved.
|
|
@@ -55,7 +58,7 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
|
|
|
55
58
|
:return: None. The trained model and associated training logs are saved in the specified `project_path`.
|
|
56
59
|
|
|
57
60
|
:example:
|
|
58
|
-
>>> fit_yolo(initial_weights=r"C
|
|
61
|
+
>>> fit_yolo(initial_weights=r"C:/troubleshooting/coco_data/weights/yolov8n-obb.pt", data=r"C:/troubleshooting/coco_data/model.yaml", save_path=r"C:/troubleshooting/coco_data/mdl", batch=16)
|
|
59
62
|
"""
|
|
60
63
|
|
|
61
64
|
if not _is_cuda_available()[0]:
|
|
@@ -83,6 +86,9 @@ def load_yolo_model(weights_path: Union[str, os.PathLike],
|
|
|
83
86
|
"""
|
|
84
87
|
Load a YOLO model.
|
|
85
88
|
|
|
89
|
+
.. seealso::
|
|
90
|
+
For recommended wrapper classes that use this function, see :class:`simba.model.yolo_fit.FitYolo`, :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`, and :class:`simba.model.yolo_pose_track_inference.YOLOPoseTrackInference`.
|
|
91
|
+
|
|
86
92
|
:param Union[str, os.PathLike] weights_path: Path to model weights (.pt, .engine, etc).
|
|
87
93
|
:param bool verbose: Whether to print loading info.
|
|
88
94
|
:param Optional[str] format: Export format, one of VALID_FORMATS or None to skip export.
|
|
@@ -169,6 +175,9 @@ def yolo_predict(model: YOLO,
|
|
|
169
175
|
"""
|
|
170
176
|
Produce YOLO predictions.
|
|
171
177
|
|
|
178
|
+
.. seealso::
|
|
179
|
+
For recommended wrapper classes that use this function, see :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, and :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`.
|
|
180
|
+
|
|
172
181
|
:param Union[str, os.PathLike] model: Loaded ultralytics.YOLO model. Returned by :func:`~simba.bounding_box_tools.yolo.model.load_yolo_model`.
|
|
173
182
|
:param Union[str, os.PathLike, np.ndarray] source: Path to video, video stream, directory, image, or image as loaded array.
|
|
174
183
|
:param bool half: Whether to use half precision (FP16) for inference to speed up processing.
|
|
@@ -87,9 +87,9 @@ class BlobTrackingExecutor():
|
|
|
87
87
|
:param bool center: If True, compute center coordinates. Default: True.
|
|
88
88
|
|
|
89
89
|
:example:
|
|
90
|
-
>>> tracker = BlobTrackingExecutor(data=r"C
|
|
90
|
+
>>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle")
|
|
91
91
|
>>> tracker.run()
|
|
92
|
-
>>> tracker = BlobTrackingExecutor(data=r"C
|
|
92
|
+
>>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle", batch_size=5000)
|
|
93
93
|
>>> tracker.run()
|
|
94
94
|
"""
|
|
95
95
|
|
|
@@ -29,7 +29,7 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
|
|
|
29
29
|
:return Tuple[float, int]: Tuple containing the chosen clip limit and tile size.
|
|
30
30
|
|
|
31
31
|
:example:
|
|
32
|
-
>>> video = cv2.imread(r"D
|
|
32
|
+
>>> video = cv2.imread(r"D:/EPM/sample_2/video_1.mp4")
|
|
33
33
|
>>> interactive_clahe_ui(data=video)
|
|
34
34
|
"""
|
|
35
35
|
global original_img, font_size, x_spacer, y_spacer, txt
|
|
@@ -9,10 +9,12 @@ import numpy as np
|
|
|
9
9
|
from simba.utils.checks import (check_file_exist_and_readable,
|
|
10
10
|
check_if_dir_exists, check_if_valid_rgb_tuple,
|
|
11
11
|
check_int, check_valid_array,
|
|
12
|
-
check_valid_boolean,
|
|
12
|
+
check_valid_boolean, check_valid_cpu_pool,
|
|
13
|
+
check_valid_tuple)
|
|
13
14
|
from simba.utils.data import (align_target_warpaffine_vectors,
|
|
14
15
|
center_rotation_warpaffine_vectors,
|
|
15
|
-
egocentrically_align_pose,
|
|
16
|
+
egocentrically_align_pose, get_cpu_pool,
|
|
17
|
+
terminate_cpu_pool)
|
|
16
18
|
from simba.utils.enums import Defaults, Formats
|
|
17
19
|
from simba.utils.printing import SimbaTimer, stdout_success
|
|
18
20
|
from simba.utils.read_write import (concatenate_videos_in_folder,
|
|
@@ -92,9 +94,9 @@ class EgocentricVideoRotator():
|
|
|
92
94
|
:param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix.
|
|
93
95
|
|
|
94
96
|
:example:
|
|
95
|
-
>>> DATA_PATH = "C
|
|
96
|
-
>>> VIDEO_PATH = "C
|
|
97
|
-
>>> SAVE_PATH = "C
|
|
97
|
+
>>> DATA_PATH = "C:/501_MA142_Gi_Saline_0513.csv"
|
|
98
|
+
>>> VIDEO_PATH = "C:/501_MA142_Gi_Saline_0513.mp4"
|
|
99
|
+
>>> SAVE_PATH = "C:/501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
98
100
|
>>> ANCHOR_LOC = np.array([250, 250])
|
|
99
101
|
|
|
100
102
|
>>> df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
@@ -114,7 +116,8 @@ class EgocentricVideoRotator():
|
|
|
114
116
|
fill_clr: Tuple[int, int, int] = (0, 0, 0),
|
|
115
117
|
core_cnt: int = -1,
|
|
116
118
|
save_path: Optional[Union[str, os.PathLike]] = None,
|
|
117
|
-
gpu: Optional[bool] = True
|
|
119
|
+
gpu: Optional[bool] = True,
|
|
120
|
+
pool: bool = None):
|
|
118
121
|
|
|
119
122
|
check_file_exist_and_readable(file_path=video_path)
|
|
120
123
|
self.video_meta_data = get_video_meta_data(video_path=video_path)
|
|
@@ -125,10 +128,14 @@ class EgocentricVideoRotator():
|
|
|
125
128
|
check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
|
|
126
129
|
check_if_valid_rgb_tuple(data=fill_clr)
|
|
127
130
|
check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
|
|
128
|
-
if core_cnt > find_core_cnt()[0] or core_cnt == -1:
|
|
129
|
-
|
|
131
|
+
if core_cnt > find_core_cnt()[0] or core_cnt == -1: self.core_cnt = find_core_cnt()[0]
|
|
132
|
+
else: self.core_cnt = core_cnt
|
|
133
|
+
if pool is not None:
|
|
134
|
+
check_valid_cpu_pool(value=pool, source=self.__class__.__name__, max_cores=find_core_cnt()[0], min_cores=2, raise_error=True)
|
|
135
|
+
self.pool_termination_flag = True
|
|
130
136
|
else:
|
|
131
|
-
self.
|
|
137
|
+
self.pool_termination_flag = False
|
|
138
|
+
self.pool = get_cpu_pool(core_cnt=self.core_cnt, source=self.__class__.__name__) if pool is None else pool
|
|
132
139
|
video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
|
|
133
140
|
if save_path is not None:
|
|
134
141
|
self.save_dir = os.path.dirname(save_path)
|
|
@@ -151,37 +158,35 @@ class EgocentricVideoRotator():
|
|
|
151
158
|
frm_list = np.arange(0, self.video_meta_data['frame_count'])
|
|
152
159
|
frm_list = np.array_split(frm_list, self.core_cnt)
|
|
153
160
|
frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
|
|
154
|
-
if self.verbose:
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
print(f"Rotate batch {result}/{self.core_cnt} complete...")
|
|
170
|
-
terminate_cpu_pool(pool=pool, force=False)
|
|
161
|
+
if self.verbose: print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
|
|
162
|
+
|
|
163
|
+
constants = functools.partial(egocentric_video_aligner,
|
|
164
|
+
temp_dir=temp_dir,
|
|
165
|
+
video_name=self.video_name,
|
|
166
|
+
video_path=self.video_path,
|
|
167
|
+
centers=self.centers,
|
|
168
|
+
rotation_vectors=self.rotation_vectors,
|
|
169
|
+
target=self.anchor_loc,
|
|
170
|
+
verbose=self.verbose,
|
|
171
|
+
fill_clr=self.fill_clr,
|
|
172
|
+
gpu=self.gpu)
|
|
173
|
+
for cnt, result in enumerate(self.pool.imap(constants, frm_list, chunksize=1)):
|
|
174
|
+
if self.verbose: print(f"Rotate batch {result}/{self.core_cnt} complete...")
|
|
175
|
+
if self.pool_termination_flag: terminate_cpu_pool(pool=self.pool, force=False)
|
|
171
176
|
concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
|
|
172
177
|
video_timer.stop_timer()
|
|
173
178
|
stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
|
|
174
179
|
|
|
175
|
-
if __name__ == "__main__":
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
180
|
+
# if __name__ == "__main__":
|
|
181
|
+
# DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
|
|
182
|
+
# VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
|
|
183
|
+
# SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
|
|
184
|
+
# ANCHOR_LOC = np.array([250, 250])
|
|
185
|
+
#
|
|
186
|
+
# df = read_df(file_path=DATA_PATH, file_type='csv')
|
|
187
|
+
# bp_cols = [x for x in df.columns if not x.endswith('_p')]
|
|
188
|
+
# data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
|
|
189
|
+
#
|
|
190
|
+
# _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
|
|
191
|
+
# rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
|
|
192
|
+
# rotater.run()
|
|
@@ -49,7 +49,7 @@ class MultiCropper(object):
|
|
|
49
49
|
|
|
50
50
|
|
|
51
51
|
:example:
|
|
52
|
-
>>> cropper = MultiCropper(file_type='mp4', input_folder=r'C
|
|
52
|
+
>>> cropper = MultiCropper(file_type='mp4', input_folder=r'C:/troubleshooting/mitra/test', output_folder=r'C:/troubleshooting/mitra/test/cropped', crop_cnt=2, gpu=True)
|
|
53
53
|
>>> cropper.run()
|
|
54
54
|
"""
|
|
55
55
|
|