simba-uw-tf-dev 4.6.2__py3-none-any.whl → 4.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. simba/assets/.recent_projects.txt +1 -0
  2. simba/assets/lookups/tooptips.json +6 -1
  3. simba/assets/lookups/yolo_schematics/yolo_mitra.csv +9 -0
  4. simba/data_processors/agg_clf_counter_mp.py +52 -53
  5. simba/data_processors/blob_location_computer.py +1 -1
  6. simba/data_processors/circling_detector.py +30 -13
  7. simba/data_processors/cuda/geometry.py +45 -27
  8. simba/data_processors/cuda/image.py +1648 -1598
  9. simba/data_processors/cuda/statistics.py +72 -26
  10. simba/data_processors/cuda/timeseries.py +1 -1
  11. simba/data_processors/cue_light_analyzer.py +5 -9
  12. simba/data_processors/egocentric_aligner.py +25 -7
  13. simba/data_processors/freezing_detector.py +55 -47
  14. simba/data_processors/kleinberg_calculator.py +61 -29
  15. simba/feature_extractors/feature_subsets.py +14 -7
  16. simba/feature_extractors/mitra_feature_extractor.py +2 -2
  17. simba/feature_extractors/straub_tail_analyzer.py +4 -6
  18. simba/labelling/standard_labeller.py +1 -1
  19. simba/mixins/config_reader.py +5 -2
  20. simba/mixins/geometry_mixin.py +22 -36
  21. simba/mixins/image_mixin.py +24 -28
  22. simba/mixins/plotting_mixin.py +28 -10
  23. simba/mixins/statistics_mixin.py +48 -11
  24. simba/mixins/timeseries_features_mixin.py +1 -1
  25. simba/mixins/train_model_mixin.py +68 -33
  26. simba/model/inference_batch.py +2 -2
  27. simba/model/yolo_seg_inference.py +3 -3
  28. simba/outlier_tools/skip_outlier_correction.py +1 -1
  29. simba/plotting/ROI_feature_visualizer_mp.py +3 -5
  30. simba/plotting/clf_validator_mp.py +4 -5
  31. simba/plotting/cue_light_visualizer.py +6 -7
  32. simba/plotting/directing_animals_visualizer_mp.py +2 -3
  33. simba/plotting/distance_plotter_mp.py +378 -378
  34. simba/plotting/gantt_creator.py +29 -10
  35. simba/plotting/gantt_creator_mp.py +96 -33
  36. simba/plotting/geometry_plotter.py +270 -272
  37. simba/plotting/heat_mapper_clf_mp.py +4 -6
  38. simba/plotting/heat_mapper_location_mp.py +2 -2
  39. simba/plotting/light_dark_box_plotter.py +2 -2
  40. simba/plotting/path_plotter_mp.py +26 -29
  41. simba/plotting/plot_clf_results_mp.py +455 -454
  42. simba/plotting/pose_plotter_mp.py +28 -29
  43. simba/plotting/probability_plot_creator_mp.py +288 -288
  44. simba/plotting/roi_plotter_mp.py +31 -31
  45. simba/plotting/single_run_model_validation_video_mp.py +427 -427
  46. simba/plotting/spontaneous_alternation_plotter.py +2 -3
  47. simba/plotting/yolo_pose_track_visualizer.py +32 -27
  48. simba/plotting/yolo_pose_visualizer.py +35 -36
  49. simba/plotting/yolo_seg_visualizer.py +2 -3
  50. simba/pose_importers/simba_blob_importer.py +3 -3
  51. simba/roi_tools/roi_aggregate_stats_mp.py +5 -4
  52. simba/roi_tools/roi_clf_calculator_mp.py +4 -4
  53. simba/sandbox/analyze_runtimes.py +30 -0
  54. simba/sandbox/cuda/egocentric_rotator.py +374 -374
  55. simba/sandbox/get_cpu_pool.py +5 -0
  56. simba/sandbox/proboscis_to_tip.py +28 -0
  57. simba/sandbox/test_directionality.py +47 -0
  58. simba/sandbox/test_nonstatic_directionality.py +27 -0
  59. simba/sandbox/test_pycharm_cuda.py +51 -0
  60. simba/sandbox/test_simba_install.py +41 -0
  61. simba/sandbox/test_static_directionality.py +26 -0
  62. simba/sandbox/test_static_directionality_2d.py +26 -0
  63. simba/sandbox/verify_env.py +42 -0
  64. simba/third_party_label_appenders/transform/coco_keypoints_to_yolo.py +3 -3
  65. simba/third_party_label_appenders/transform/coco_keypoints_to_yolo_bbox.py +2 -2
  66. simba/third_party_label_appenders/transform/simba_to_yolo.py +8 -5
  67. simba/ui/pop_ups/clf_plot_pop_up.py +2 -2
  68. simba/ui/pop_ups/fsttc_pop_up.py +27 -25
  69. simba/ui/pop_ups/gantt_pop_up.py +31 -6
  70. simba/ui/pop_ups/kleinberg_pop_up.py +39 -40
  71. simba/ui/pop_ups/run_machine_models_popup.py +21 -21
  72. simba/ui/pop_ups/simba_to_yolo_keypoints_popup.py +2 -2
  73. simba/ui/pop_ups/video_processing_pop_up.py +37 -29
  74. simba/ui/pop_ups/yolo_inference_popup.py +1 -1
  75. simba/ui/pop_ups/yolo_pose_train_popup.py +1 -1
  76. simba/ui/tkinter_functions.py +3 -0
  77. simba/utils/custom_feature_extractor.py +1 -1
  78. simba/utils/data.py +90 -14
  79. simba/utils/enums.py +1 -0
  80. simba/utils/errors.py +441 -440
  81. simba/utils/lookups.py +1203 -1203
  82. simba/utils/printing.py +124 -124
  83. simba/utils/read_write.py +3769 -3721
  84. simba/utils/yolo.py +10 -1
  85. simba/video_processors/blob_tracking_executor.py +2 -2
  86. simba/video_processors/clahe_ui.py +1 -1
  87. simba/video_processors/egocentric_video_rotator.py +44 -41
  88. simba/video_processors/multi_cropper.py +1 -1
  89. simba/video_processors/video_processing.py +75 -33
  90. simba/video_processors/videos_to_frames.py +43 -33
  91. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.2.dist-info}/METADATA +4 -3
  92. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.2.dist-info}/RECORD +96 -85
  93. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.2.dist-info}/LICENSE +0 -0
  94. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.2.dist-info}/WHEEL +0 -0
  95. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.2.dist-info}/entry_points.txt +0 -0
  96. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.2.dist-info}/top_level.txt +0 -0
simba/utils/yolo.py CHANGED
@@ -47,6 +47,9 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
47
47
  `Download initial weights <https://huggingface.co/Ultralytics>`__.
48
48
  `Example model_yaml <https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml>`__.
49
49
 
50
+ .. seealso::
51
+ For the recommended wrapper class with parameter validation, see :class:`simba.model.yolo_fit.FitYolo`.
52
+
50
53
  :param initial_weights: Path to the pre-trained YOLO model weights (usually a `.pt` file). Example weights can be found [here](https://huggingface.co/Ultralytics).
51
54
  :param model_yaml: YAML file containing paths to the training, validation, and testing datasets and the object class mappings. Example YAML file can be found [here](https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml).
52
55
  :param save_path: Directory path where the trained model, logs, and results will be saved.
@@ -55,7 +58,7 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
55
58
  :return: None. The trained model and associated training logs are saved in the specified `project_path`.
56
59
 
57
60
  :example:
58
- >>> fit_yolo(initial_weights=r"C:\troubleshooting\coco_data\weights\yolov8n-obb.pt", data=r"C:\troubleshooting\coco_data\model.yaml", save_path=r"C:\troubleshooting\coco_data\mdl", batch=16)
61
+ >>> fit_yolo(initial_weights=r"C:/troubleshooting/coco_data/weights/yolov8n-obb.pt", data=r"C:/troubleshooting/coco_data/model.yaml", save_path=r"C:/troubleshooting/coco_data/mdl", batch=16)
59
62
  """
60
63
 
61
64
  if not _is_cuda_available()[0]:
@@ -83,6 +86,9 @@ def load_yolo_model(weights_path: Union[str, os.PathLike],
83
86
  """
84
87
  Load a YOLO model.
85
88
 
89
+ .. seealso::
90
+ For recommended wrapper classes that use this function, see :class:`simba.model.yolo_fit.FitYolo`, :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`, and :class:`simba.model.yolo_pose_track_inference.YOLOPoseTrackInference`.
91
+
86
92
  :param Union[str, os.PathLike] weights_path: Path to model weights (.pt, .engine, etc).
87
93
  :param bool verbose: Whether to print loading info.
88
94
  :param Optional[str] format: Export format, one of VALID_FORMATS or None to skip export.
@@ -169,6 +175,9 @@ def yolo_predict(model: YOLO,
169
175
  """
170
176
  Produce YOLO predictions.
171
177
 
178
+ .. seealso::
179
+ For recommended wrapper classes that use this function, see :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, and :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`.
180
+
172
181
  :param Union[str, os.PathLike] model: Loaded ultralytics.YOLO model. Returned by :func:`~simba.bounding_box_tools.yolo.model.load_yolo_model`.
173
182
  :param Union[str, os.PathLike, np.ndarray] source: Path to video, video stream, directory, image, or image as loaded array.
174
183
  :param bool half: Whether to use half precision (FP16) for inference to speed up processing.
@@ -87,9 +87,9 @@ class BlobTrackingExecutor():
87
87
  :param bool center: If True, compute center coordinates. Default: True.
88
88
 
89
89
  :example:
90
- >>> tracker = BlobTrackingExecutor(data=r"C:\troubleshooting\mitra\test\.temp\blob_definitions.pickle")
90
+ >>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle")
91
91
  >>> tracker.run()
92
- >>> tracker = BlobTrackingExecutor(data=r"C:\troubleshooting\mitra\test\.temp\blob_definitions.pickle", batch_size=5000)
92
+ >>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle", batch_size=5000)
93
93
  >>> tracker.run()
94
94
  """
95
95
 
@@ -29,7 +29,7 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
29
29
  :return Tuple[float, int]: Tuple containing the chosen clip limit and tile size.
30
30
 
31
31
  :example:
32
- >>> video = cv2.imread(r"D:\EPM\sample_2\video_1.mp4")
32
+ >>> video = cv2.imread(r"D:/EPM/sample_2/video_1.mp4")
33
33
  >>> interactive_clahe_ui(data=video)
34
34
  """
35
35
  global original_img, font_size, x_spacer, y_spacer, txt
@@ -9,10 +9,12 @@ import numpy as np
9
9
  from simba.utils.checks import (check_file_exist_and_readable,
10
10
  check_if_dir_exists, check_if_valid_rgb_tuple,
11
11
  check_int, check_valid_array,
12
- check_valid_boolean, check_valid_tuple)
12
+ check_valid_boolean, check_valid_cpu_pool,
13
+ check_valid_tuple)
13
14
  from simba.utils.data import (align_target_warpaffine_vectors,
14
15
  center_rotation_warpaffine_vectors,
15
- egocentrically_align_pose)
16
+ egocentrically_align_pose, get_cpu_pool,
17
+ terminate_cpu_pool)
16
18
  from simba.utils.enums import Defaults, Formats
17
19
  from simba.utils.printing import SimbaTimer, stdout_success
18
20
  from simba.utils.read_write import (concatenate_videos_in_folder,
@@ -92,9 +94,9 @@ class EgocentricVideoRotator():
92
94
  :param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix.
93
95
 
94
96
  :example:
95
- >>> DATA_PATH = "C:\501_MA142_Gi_Saline_0513.csv"
96
- >>> VIDEO_PATH = "C:\501_MA142_Gi_Saline_0513.mp4"
97
- >>> SAVE_PATH = "C:\501_MA142_Gi_Saline_0513_rotated.mp4"
97
+ >>> DATA_PATH = "C:/501_MA142_Gi_Saline_0513.csv"
98
+ >>> VIDEO_PATH = "C:/501_MA142_Gi_Saline_0513.mp4"
99
+ >>> SAVE_PATH = "C:/501_MA142_Gi_Saline_0513_rotated.mp4"
98
100
  >>> ANCHOR_LOC = np.array([250, 250])
99
101
 
100
102
  >>> df = read_df(file_path=DATA_PATH, file_type='csv')
@@ -114,7 +116,8 @@ class EgocentricVideoRotator():
114
116
  fill_clr: Tuple[int, int, int] = (0, 0, 0),
115
117
  core_cnt: int = -1,
116
118
  save_path: Optional[Union[str, os.PathLike]] = None,
117
- gpu: Optional[bool] = True):
119
+ gpu: Optional[bool] = True,
120
+ pool: bool = None):
118
121
 
119
122
  check_file_exist_and_readable(file_path=video_path)
120
123
  self.video_meta_data = get_video_meta_data(video_path=video_path)
@@ -125,10 +128,14 @@ class EgocentricVideoRotator():
125
128
  check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
126
129
  check_if_valid_rgb_tuple(data=fill_clr)
127
130
  check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
128
- if core_cnt > find_core_cnt()[0] or core_cnt == -1:
129
- self.core_cnt = find_core_cnt()[0]
131
+ if core_cnt > find_core_cnt()[0] or core_cnt == -1: self.core_cnt = find_core_cnt()[0]
132
+ else: self.core_cnt = core_cnt
133
+ if pool is not None:
134
+ check_valid_cpu_pool(value=pool, source=self.__class__.__name__, max_cores=find_core_cnt()[0], min_cores=2, raise_error=True)
135
+ self.pool_termination_flag = True
130
136
  else:
131
- self.core_cnt = core_cnt
137
+ self.pool_termination_flag = False
138
+ self.pool = get_cpu_pool(core_cnt=self.core_cnt, source=self.__class__.__name__) if pool is None else pool
132
139
  video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
133
140
  if save_path is not None:
134
141
  self.save_dir = os.path.dirname(save_path)
@@ -151,39 +158,35 @@ class EgocentricVideoRotator():
151
158
  frm_list = np.arange(0, self.video_meta_data['frame_count'])
152
159
  frm_list = np.array_split(frm_list, self.core_cnt)
153
160
  frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
154
- if self.verbose:
155
- print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
156
- with multiprocessing.Pool(self.core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
157
- constants = functools.partial(egocentric_video_aligner,
158
- temp_dir=temp_dir,
159
- video_name=self.video_name,
160
- video_path=self.video_path,
161
- centers=self.centers,
162
- rotation_vectors=self.rotation_vectors,
163
- target=self.anchor_loc,
164
- verbose=self.verbose,
165
- fill_clr=self.fill_clr,
166
- gpu=self.gpu)
167
- for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)):
168
- if self.verbose:
169
- print(f"Rotate batch {result}/{self.core_cnt} complete...")
170
- pool.terminate()
171
- pool.join()
172
-
161
+ if self.verbose: print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
162
+
163
+ constants = functools.partial(egocentric_video_aligner,
164
+ temp_dir=temp_dir,
165
+ video_name=self.video_name,
166
+ video_path=self.video_path,
167
+ centers=self.centers,
168
+ rotation_vectors=self.rotation_vectors,
169
+ target=self.anchor_loc,
170
+ verbose=self.verbose,
171
+ fill_clr=self.fill_clr,
172
+ gpu=self.gpu)
173
+ for cnt, result in enumerate(self.pool.imap(constants, frm_list, chunksize=1)):
174
+ if self.verbose: print(f"Rotate batch {result}/{self.core_cnt} complete...")
175
+ if self.pool_termination_flag: terminate_cpu_pool(pool=self.pool, force=False)
173
176
  concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
174
177
  video_timer.stop_timer()
175
178
  stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
176
179
 
177
- if __name__ == "__main__":
178
- DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
179
- VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
180
- SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
181
- ANCHOR_LOC = np.array([250, 250])
182
-
183
- df = read_df(file_path=DATA_PATH, file_type='csv')
184
- bp_cols = [x for x in df.columns if not x.endswith('_p')]
185
- data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
186
-
187
- _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
188
- rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
189
- rotater.run()
180
+ # if __name__ == "__main__":
181
+ # DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
182
+ # VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
183
+ # SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
184
+ # ANCHOR_LOC = np.array([250, 250])
185
+ #
186
+ # df = read_df(file_path=DATA_PATH, file_type='csv')
187
+ # bp_cols = [x for x in df.columns if not x.endswith('_p')]
188
+ # data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
189
+ #
190
+ # _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
191
+ # rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
192
+ # rotater.run()
@@ -49,7 +49,7 @@ class MultiCropper(object):
49
49
 
50
50
 
51
51
  :example:
52
- >>> cropper = MultiCropper(file_type='mp4', input_folder=r'C:\troubleshooting\mitra\test', output_folder=r'C:\troubleshooting\mitra\test\cropped', crop_cnt=2, gpu=True)
52
+ >>> cropper = MultiCropper(file_type='mp4', input_folder=r'C:/troubleshooting/mitra/test', output_folder=r'C:/troubleshooting/mitra/test/cropped', crop_cnt=2, gpu=True)
53
53
  >>> cropper.run()
54
54
  """
55
55
 
@@ -41,7 +41,7 @@ from simba.utils.checks import (check_ffmpeg_available,
41
41
  check_valid_boolean, check_valid_codec,
42
42
  check_valid_cpu_pool, check_valid_lst,
43
43
  check_valid_tuple)
44
- from simba.utils.data import (find_frame_numbers_from_time_stamp,
44
+ from simba.utils.data import (find_frame_numbers_from_time_stamp, get_cpu_pool,
45
45
  terminate_cpu_pool)
46
46
  from simba.utils.enums import OS, ConfigKey, Defaults, Formats, Options, Paths
47
47
  from simba.utils.errors import (CountError, DirectoryExistError,
@@ -52,7 +52,8 @@ from simba.utils.errors import (CountError, DirectoryExistError,
52
52
  NoDataError, NoFilesFoundError,
53
53
  NotDirectoryError, ResolutionError,
54
54
  SimBAGPUError)
55
- from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts,
55
+ from simba.utils.lookups import (get_current_time,
56
+ get_ffmpeg_crossfade_methods, get_fonts,
56
57
  get_named_colors, percent_to_crf_lookup,
57
58
  percent_to_qv_lk, quality_pct_to_crf,
58
59
  video_quality_to_preset_lookup)
@@ -408,6 +409,9 @@ def clahe_enhance_video(file_path: Union[str, os.PathLike],
408
409
  """
409
410
  Convert a single video file to clahe-enhanced greyscale .avi file.
410
411
 
412
+ .. seealso::
413
+ For multicore method, see :func:`simba.video_processors.video_processing.clahe_enhance_video_mp`.
414
+
411
415
  .. image:: _static/img/clahe_enhance_video.gif
412
416
  :width: 800
413
417
  :align: center
@@ -490,7 +494,7 @@ def _clahe_enhance_video_mp_helper(data: tuple,
490
494
  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
491
495
  clahe_frm = clahe_filter.apply(img)
492
496
  writer.write(clahe_frm)
493
- print(f"CLAHE converted frame {current_frm}/{video_meta_data['frame_count']} (core batch: {batch_id}, video name: {video_meta_data['video_name']})...")
497
+ print(f"[{get_current_time()}] CLAHE converted frame {current_frm}/{video_meta_data['frame_count']} (core batch: {batch_id}, video name: {video_meta_data['video_name']})...")
494
498
  else:
495
499
  FrameRangeWarning(msg=f'Could not read frame {current_frm} in video {video_meta_data["video_name"]}', source=_clahe_enhance_video_mp_helper.__name__)
496
500
  break
@@ -508,10 +512,20 @@ def clahe_enhance_video_mp(file_path: Union[str, os.PathLike],
508
512
  """
509
513
  Convert a single video file to clahe-enhanced greyscale file using multiprocessing.
510
514
 
515
+ .. seealso::
516
+ For single core method, see :func:`simba.video_processors.video_processing.clahe_enhance_video`.
517
+
511
518
  .. image:: _static/img/clahe_enhance_video.gif
512
519
  :width: 800
513
520
  :align: center
514
521
 
522
+ .. csv-table::
523
+ :header: EXPECTED RUNTIMES
524
+ :file: ../../docs/tables/clahe_enhance_video_mp.csv
525
+ :widths: 10, 45, 45
526
+ :align: center
527
+ :header-rows: 1
528
+
515
529
  :param Union[str, os.PathLike] file_path: Path to video file.
516
530
  :param Optional[int] clip_limit: CLAHE amplification limit. Inccreased clip limit reduce noise in output. Default: 2.
517
531
  :param Optional[Tuple[int]] tile_grid_size: The histogram kernel size.
@@ -549,20 +563,20 @@ def clahe_enhance_video_mp(file_path: Union[str, os.PathLike],
549
563
  frm_idx = list(range(0, video_meta_data['frame_count']))
550
564
  frm_idx = np.array_split(frm_idx, core_cnt)
551
565
  frm_idx = [(i, list(j)) for i, j in enumerate(frm_idx)]
552
- with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
553
- constants = functools.partial(_clahe_enhance_video_mp_helper,
554
- video_path=file_path,
555
- clip_limit=clip_limit,
556
- temp_dir=tempdir,
557
- tile_grid_size=tile_grid_size)
558
- for cnt, result in enumerate(pool.imap(constants, frm_idx, chunksize=1)):
559
- print(f'Batch {(result + 1)} / {core_cnt} complete...')
560
- pool.terminate()
561
- pool.join()
566
+ pool = get_cpu_pool(core_cnt=core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value, source=clahe_enhance_video_mp.__name__)
567
+ constants = functools.partial(_clahe_enhance_video_mp_helper,
568
+ video_path=file_path,
569
+ clip_limit=clip_limit,
570
+ temp_dir=tempdir,
571
+ tile_grid_size=tile_grid_size)
572
+ for cnt, result in enumerate(pool.imap(constants, frm_idx, chunksize=1)):
573
+ print(f'[{get_current_time()}] Batch {(result + 1)} / {core_cnt} complete...')
574
+
575
+ terminate_cpu_pool(pool=pool, force=False, source=clahe_enhance_video_mp.__name__)
562
576
  print(f"Joining {video_meta_data['video_name']} multiprocessed video...")
563
577
  concatenate_videos_in_folder(in_folder=tempdir, save_path=save_path, remove_splits=True, gpu=gpu)
564
578
  video_timer.stop_timer()
565
- print(f"CLAHE video {video_meta_data['video_name']} complete (elapsed time: {video_timer.elapsed_time_str}s) ...")
579
+ print(f"[{get_current_time()}] CLAHE video {video_meta_data['video_name']} complete (elapsed time: {video_timer.elapsed_time_str}s) ...")
566
580
 
567
581
 
568
582
  #_ = clahe_enhance_video_mp(file_path= r"D:\EPM_4\original\1.mp4")
@@ -662,7 +676,7 @@ def change_single_video_fps(file_path: Union[str, os.PathLike],
662
676
 
663
677
  :param Union[str, os.PathLike] file_path: Path to video file
664
678
  :param Union[int, float] fps: FPS of the new video file.
665
- :param bool gpu: If True, use NVIDEA GPU codecs. Default False.
679
+ :param bool gpu: If True, use NVIDEA GPU codecs. Default False. GPU can provide significant speedup (3-4x faster) for FPS conversion, especially for longer videos.
666
680
  :param Optional[str] codec: Video codec to use. If None, automatically selects based on file extension (libvpx-vp9 for .webm, mpeg4 for .avi, libx264 for others). Default None.
667
681
  :param Optional[Union[str, os.PathLike]] save_path: Path where to save the converted video. If None, saves in the same directory as input file with ``_fps_{fps}`` suffix. Default None.
668
682
  :param Optional[int] quality: Video quality (CRF value). Lower values = higher quality. Range 0-52. Default 23.
@@ -688,7 +702,7 @@ def change_single_video_fps(file_path: Union[str, os.PathLike],
688
702
  else:
689
703
  check_if_dir_exists(in_dir=os.path.dirname(save_path), raise_error=True)
690
704
  quality = 23 if not check_int(name='quality', value=quality, min_value=0, max_value=52, raise_error=False)[0] else int(quality)
691
- if verbose: print(f"Converting the FPS to {fps} for video {file_name} ...")
705
+ if verbose: print(f"Converting the FPS {video_meta_data['fps']} -> {fps} for video {file_name} ...")
692
706
  if codec is None:
693
707
  if ext.lower() == '.webm':
694
708
  codec = 'libvpx-vp9'
@@ -699,10 +713,14 @@ def change_single_video_fps(file_path: Union[str, os.PathLike],
699
713
  if os.path.isfile(save_path):
700
714
  FileExistWarning(msg=f"Overwriting existing file at {save_path}...", source=change_single_video_fps.__name__,)
701
715
  if gpu:
702
- cmd = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -vf "fps={fps}" -c:v h264_nvenc -rc vbr -cq {quality} -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y'
703
- else:
716
+ cmd = f'ffmpeg -hwaccel auto -i "{file_path}" -vf "fps={fps}" -c:v h264_nvenc -preset p4 -cq {quality} -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y'
717
+ result = subprocess.run(cmd, shell=True)
718
+ if result.returncode != 0:
719
+ if verbose: SimBAGPUError(msg=f'FPS convertion ({video_meta_data["fps"]}->{fps}) GPU for video {file_name} failed, using CPU instead...')
720
+ gpu = False
721
+ if not gpu:
704
722
  cmd = f'ffmpeg -i "{file_path}" -filter:v fps=fps={fps} -c:v {codec} -crf {quality} -c:a aac "{save_path}" -loglevel error -stats -hide_banner -y'
705
- subprocess.call(cmd, shell=True)
723
+ subprocess.call(cmd, shell=True)
706
724
  timer.stop_timer()
707
725
  if verbose: stdout_success(msg=f'SIMBA COMPLETE: FPS of video {file_name} changed from {str(video_meta_data["fps"])} to {str(fps)} and saved in directory {save_path}', elapsed_time=timer.elapsed_time_str, source=change_single_video_fps.__name__)
708
726
 
@@ -711,7 +729,8 @@ def change_fps_of_multiple_videos(path: Union[str, os.PathLike, List[Union[str,
711
729
  fps: int,
712
730
  quality: int = 23,
713
731
  save_dir: Optional[Union[str, os.PathLike]] = None,
714
- gpu: Optional[bool] = False) -> None:
732
+ gpu: Optional[bool] = False,
733
+ verbose: bool = True) -> None:
715
734
  """
716
735
  Change the fps of all video files in a folder. Results are stored in the same directory as in the input files with
717
736
  the suffix ``_fps_new_fps``.
@@ -721,6 +740,7 @@ def change_fps_of_multiple_videos(path: Union[str, os.PathLike, List[Union[str,
721
740
  :param int quality: Video quality (CRF value). Lower values = higher quality. Range 0-52. Default 23.
722
741
  :param Optional[Union[str, os.PathLike]] save_dir: If not None, then the directory where to store converted videos. If None, then stores the new videos in the same directory as the input video with the ``_fps_{fps}.file_extension`` suffix.
723
742
  :param Optional[bool] gpu: If True, use NVIDEA GPU codecs. Default False.
743
+ :param bool verbose: If True, prints conversion progress. Default True.
724
744
  :returns: None.
725
745
 
726
746
  :example:
@@ -750,7 +770,7 @@ def change_fps_of_multiple_videos(path: Union[str, os.PathLike, List[Union[str,
750
770
  video_meta_data = get_video_meta_data(video_path=file_path)
751
771
  if int(fps) == int(video_meta_data["fps"]):
752
772
  SameInputAndOutputWarning(msg=f"The new FPS ({fps}) is the same or lower than the original FPS ({video_meta_data['fps']}) for video {file_name}", source=change_fps_of_multiple_videos.__name__)
753
- print(f"Converting FPS from {video_meta_data['fps']} to {fps} for {file_name}...")
773
+ if verbose: print(f"Converting the FPS {video_meta_data['fps']} -> {fps} for video {file_name} ...")
754
774
  if save_dir is None:
755
775
  save_path = os.path.join(dir_name, file_name + f"_fps_{fps}{ext}")
756
776
  else:
@@ -758,15 +778,26 @@ def change_fps_of_multiple_videos(path: Union[str, os.PathLike, List[Union[str,
758
778
  if ext.lower() == '.webm': codec = 'libvpx-vp9'
759
779
  elif ext.lower() == '.avi': codec = 'mpeg4'
760
780
  else: codec = 'libx264'
781
+ if os.path.isfile(save_path):
782
+ FileExistWarning(msg=f"Overwriting existing file at {save_path}...", source=change_single_video_fps.__name__, )
783
+ if gpu:
784
+ cmd = f'ffmpeg -hwaccel auto -i "{file_path}" -vf "fps={fps}" -c:v h264_nvenc -preset p4 -cq {quality} -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y'
785
+ result = subprocess.run(cmd, shell=True)
786
+ if result.returncode != 0:
787
+ if verbose: SimBAGPUError(msg=f'FPS convertion ({video_meta_data["fps"]}->{fps}) GPU for video {file_name} failed, using CPU instead...')
788
+ gpu = False
789
+ if not gpu:
790
+ cmd = f'ffmpeg -i "{file_path}" -filter:v fps=fps={fps} -c:v {codec} -crf {quality} -c:a aac "{save_path}" -loglevel error -stats -hide_banner -y'
791
+ subprocess.call(cmd, shell=True)
761
792
  if gpu:
762
793
  command = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -vf "fps={fps}" -c:v h264_nvenc -rc vbr -cq {quality} -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y'
763
794
  else:
764
795
  command = f'ffmpeg -i "{file_path}" -filter:v fps=fps={fps} -c:v {codec} -crf {quality} -c:a aac "{save_path}" -loglevel error -stats -hide_banner -y'
765
796
  subprocess.call(command, shell=True)
766
797
  video_timer.stop_timer()
767
- print(f"Video {file_name} complete (saved at {save_path})... (elapsed time: {video_timer.elapsed_time_str}s)")
798
+ if verbose: print(f"Video {file_name} complete (saved at {save_path})... (elapsed time: {video_timer.elapsed_time_str}s)")
768
799
  timer.stop_timer()
769
- stdout_success(msg=f"SIMBA COMPLETE: FPS of {len(video_paths)} video(s) changed to {fps}", elapsed_time=timer.elapsed_time_str, source=change_fps_of_multiple_videos.__name__,)
800
+ if verbose: stdout_success(msg=f"SIMBA COMPLETE: FPS of {len(video_paths)} video(s) changed to {fps}", elapsed_time=timer.elapsed_time_str, source=change_fps_of_multiple_videos.__name__,)
770
801
 
771
802
 
772
803
  def convert_video_powerpoint_compatible_format(file_path: Union[str, os.PathLike], gpu: Optional[bool] = False) -> None:
@@ -1102,7 +1133,7 @@ def clip_video_in_range(file_path: Union[str, os.PathLike],
1102
1133
  end_time: str,
1103
1134
  out_dir: Optional[Union[str, os.PathLike]] = None,
1104
1135
  save_path: Optional[Union[str, os.PathLike]] = None,
1105
- codec: str = 'libvpx-vp9',
1136
+ codec: str = 'libx264',
1106
1137
  quality: int = 60,
1107
1138
  verbose: bool = True,
1108
1139
  overwrite: Optional[bool] = False,
@@ -1145,6 +1176,7 @@ def clip_video_in_range(file_path: Union[str, os.PathLike],
1145
1176
  check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=end_time, video_path=file_path)
1146
1177
  quality = 60 if not check_int(name='quality', value=quality, min_value=0, max_value=100, raise_error=False)[0] else int(quality)
1147
1178
  quality_crf = quality_pct_to_crf(pct=quality)
1179
+ codec = 'libvpx-vp9' if ext.lower() == '.webm' else codec
1148
1180
  if not include_clip_time_in_filename:
1149
1181
  save_name = os.path.join(dir, file_name + "_clipped.mp4")
1150
1182
  else:
@@ -1154,13 +1186,12 @@ def clip_video_in_range(file_path: Union[str, os.PathLike],
1154
1186
  save_name = deepcopy(save_path)
1155
1187
  if os.path.isfile(save_name) and (not overwrite):
1156
1188
  raise FileExistError(msg=f"SIMBA ERROR: The outfile file already exist: {save_name}.", source=clip_video_in_range.__name__)
1157
-
1158
1189
  if gpu:
1159
- cmd = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -ss {start_time} -to {end_time} -async 1 -rc vbr -cq {quality_crf} "{save_name}" -loglevel error -stats -hide_banner -y'
1190
+ cmd = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -ss {start_time} -to {end_time} -async 1 -c:v h264_nvenc -rc vbr -cq {quality_crf} "{save_name}" -loglevel error -stats -hide_banner -y'
1160
1191
  else:
1161
1192
  cmd = f'ffmpeg -i "{file_path}" -ss {start_time} -to {end_time} -async 1 -c:v {codec} -crf {quality_crf} "{save_name}" -loglevel error -stats -hide_banner -y'
1162
1193
  if verbose: print(f"Clipping video {file_name} between {start_time} and {end_time}... ")
1163
- subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
1194
+ subprocess.call(cmd, shell=True)
1164
1195
  timer.stop_timer()
1165
1196
  if verbose: stdout_success(msg=f"Video converted! {save_name} generated!", elapsed_time=timer.elapsed_time_str, source=clip_video_in_range.__name__)
1166
1197
 
@@ -3857,6 +3888,18 @@ def create_average_frm(video_path: Union[str, os.PathLike],
3857
3888
  """
3858
3889
  Create an image representing the average frame of a segment in a video or an entire video.
3859
3890
 
3891
+ .. seealso::
3892
+ See :func:`simba.data_processors.cuda.image.create_average_frm_cupy`, :func:`simba.data_processors.cuda.image.create_average_frm_cuda` for GPU acceleration.
3893
+ This one appears quicker than the GPU implementations...
3894
+
3895
+ .. csv-table::
3896
+ :header: EXPECTED RUNTIMES
3897
+ :file: ../../docs/tables/create_average_frame.csv
3898
+ :widths: 10, 45, 45
3899
+ :align: center
3900
+ :class: simba-table
3901
+ :header-rows: 1
3902
+
3860
3903
  .. video:: _static/img/create_average_frm_1.webm
3861
3904
  :width: 800
3862
3905
  :autoplay:
@@ -3882,10 +3925,6 @@ def create_average_frm(video_path: Union[str, os.PathLike],
3882
3925
  Either pass ``start_frm`` and ``end_frm`` OR ``start_time`` and ``end_time`` OR pass all four arguments as None.
3883
3926
  If all are None, then the entire video will be used to create the average frame.
3884
3927
 
3885
- .. seealso:
3886
- See :func:`simba.data_processors.cuda.image.create_average_frm_cupy`, :func:`simba.data_processors.cuda.image.create_average_frm_cuda` for GPU acceleration.
3887
- This one appears quicker than the GPU implementations...
3888
-
3889
3928
  :param Union[str, os.PathLike] video_path: The path to the video to create the average frame from. Default: None.
3890
3929
  :param Optional[int] start_frm: The first frame in the segment to create the average frame from. Default: None.
3891
3930
  :param Optional[int] end_frm: The last frame in the segment to create the average frame from. Default: None.
@@ -4844,8 +4883,7 @@ def get_video_slic(video_path: Union[str, os.PathLike],
4844
4883
  sigma=sigma)
4845
4884
  for cnt, core_batch in enumerate(pool.map(constants, frm_ranges, chunksize=1)):
4846
4885
  print(f'Core batch {core_batch} complete...')
4847
- pool.join()
4848
- pool.terminate()
4886
+ terminate_cpu_pool(pool=pool, force=False)
4849
4887
  timer.stop_timer()
4850
4888
  concatenate_videos_in_folder(in_folder=temp_folder, save_path=save_path)
4851
4889
  stdout_success(msg=f'SLIC video saved at {save_path}', elapsed_time=timer.elapsed_time_str)
@@ -5159,6 +5197,10 @@ def change_playback_speed_dir(data_dir: Union[str, os.PathLike],
5159
5197
 
5160
5198
 
5161
5199
 
5200
+
5201
+ #x = create_average_frm(video_path=r"D:\troubleshooting\mitra\project_folder\videos\average_cpu_test\20min.mp4", verbose=True)
5202
+
5203
+
5162
5204
  #change_playback_speed_dir(data_dir=r'E:\open_video\barnes_maze\test', speed=5)
5163
5205
 
5164
5206
 
@@ -6,17 +6,17 @@ except:
6
6
  from typing_extensions import Literal
7
7
 
8
8
  import functools
9
- import multiprocessing
10
9
  import os
10
+ from datetime import datetime
11
11
 
12
12
  import cv2
13
13
 
14
14
  from simba.utils.checks import (check_if_dir_exists, check_int, check_str,
15
15
  check_valid_boolean)
16
+ from simba.utils.data import get_cpu_pool, terminate_cpu_pool
16
17
  from simba.utils.printing import SimbaTimer, stdout_success
17
- from simba.utils.read_write import (find_core_cnt, get_video_meta_data,
18
- read_frm_of_video)
19
- from simba.utils.data import terminate_cpu_pool
18
+ from simba.utils.read_write import (find_core_cnt, get_fn_ext,
19
+ get_video_meta_data, read_frm_of_video)
20
20
 
21
21
  JPEG, PNG, WEBP = 'jpeg', 'png', 'webp'
22
22
 
@@ -40,7 +40,7 @@ def _video_to_frms_helper(img_batch: Tuple[int, List[int]],
40
40
  else:
41
41
  save_path = os.path.join(save_dir, f'{frm_idx}.{img_format}')
42
42
  if verbose:
43
- print(f"Saving image {save_path} ...")
43
+ print(f"Saving image {save_path} ({frm_idx}/{video_meta_data['frame_count']})...")
44
44
  img = read_frm_of_video(video_path=cap, frame_index=frm_idx, greyscale=greyscale, clahe=clahe, black_and_white=black_and_white)
45
45
  if img_format == WEBP:
46
46
  cv2.imwrite(save_path, img, [cv2.IMWRITE_WEBP_QUALITY, quality])
@@ -51,7 +51,7 @@ def _video_to_frms_helper(img_batch: Tuple[int, List[int]],
51
51
  return batch_cnt
52
52
 
53
53
  def video_to_frames(video_path: Union[str, os.PathLike],
54
- save_dir: Union[str, os.PathLike],
54
+ save_dir: Optional[Union[str, os.PathLike]] = None,
55
55
  quality: Optional[int] = 95,
56
56
  img_format: Literal['png', 'webp'] = 'png',
57
57
  verbose: bool = True,
@@ -61,10 +61,16 @@ def video_to_frames(video_path: Union[str, os.PathLike],
61
61
  black_and_white: bool = False,
62
62
  include_video_name_in_filename: bool = True):
63
63
 
64
-
65
64
  """
66
65
  Extract all frames from a video file and save them as individual image files.
67
66
 
67
+ .. csv-table::
68
+ :header: EXPECTED RUNTIMES
69
+ :file: ../../docs/tables/video_to_frames.csv
70
+ :widths: 10, 45, 45
71
+ :align: center
72
+ :header-rows: 1
73
+
68
74
  .. note::
69
75
  Uses multiprocessing for faster frame extraction. Frames are saved with sequential numbering (0, 1, 2, ...).
70
76
 
@@ -81,8 +87,8 @@ def video_to_frames(video_path: Union[str, os.PathLike],
81
87
  :return: None. Frames are saved to disk in the specified directory.
82
88
 
83
89
  :example:
84
- >>> video_to_frames(video_path=r"C:\troubleshooting\SDS_pre_post\project_folder\videos\SDI100 x ALR2 post_d7.mp4",
85
- ... save_dir=r'C:\troubleshooting\SDS_pre_post\project_folder\videos\test',
90
+ >>> video_to_frames(video_path=r"C:/troubleshooting/SDS_pre_post/project_folder/videos/SDI100 x ALR2 post_d7.mp4",
91
+ ... save_dir=r'C:/troubleshooting/SDS_pre_post/project_folder/videos/test',
86
92
  ... black_and_white=False,
87
93
  ... verbose=True,
88
94
  ... img_format='webp',
@@ -91,7 +97,11 @@ def video_to_frames(video_path: Union[str, os.PathLike],
91
97
 
92
98
  timer = SimbaTimer(start=True)
93
99
  video_meta_data = get_video_meta_data(video_path=video_path)
94
- check_if_dir_exists(in_dir=save_dir, source=video_to_frames.__name__, raise_error=True)
100
+ if save_dir is not None:
101
+ check_if_dir_exists(in_dir=save_dir, source=video_to_frames.__name__, raise_error=True)
102
+ else:
103
+ save_dir = os.path.join(get_fn_ext(filepath=video_path)[0], f'{video_meta_data["video_name"]}_frames_{datetime.now().strftime("%Y%m%d%H%M%S")}')
104
+ os.makedirs(save_dir)
95
105
  check_valid_boolean(value=verbose, source=f'{video_to_frames.__name__} verbose')
96
106
  check_valid_boolean(value=clahe, source=f'{video_to_frames.__name__} clahe')
97
107
  check_valid_boolean(value=greyscale, source=f'{video_to_frames.__name__} greyscale')
@@ -99,35 +109,35 @@ def video_to_frames(video_path: Union[str, os.PathLike],
99
109
  check_valid_boolean(value=include_video_name_in_filename, source=f'{video_to_frames.__name__} include_video_name_in_filename')
100
110
  check_int(name=f'{video_to_frames.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0], raise_error=True)
101
111
  check_int(name=f'{video_to_frames.__name__} quality', value=quality, min_value=1, max_value=100, raise_error=True)
102
- core_cnt = find_core_cnt()[0] if core_cnt -1 or core_cnt > find_core_cnt()[0] else core_cnt
112
+ core_cnt = find_core_cnt()[0] if core_cnt == -1 or core_cnt > find_core_cnt()[0] else core_cnt
103
113
  check_str(name=f'{video_to_frames.__name__} img_format', value=img_format, options=('jpeg', 'png', 'webp'))
104
114
  frm_ids = list(range(0, video_meta_data['frame_count']))
105
115
  frm_ids = [frm_ids[i * len(frm_ids) // core_cnt: (i + 1) * len(frm_ids) // core_cnt] for i in range(core_cnt)]
106
116
  frm_ids = [(i, j) for i, j in enumerate(frm_ids)]
107
- with multiprocessing.Pool(core_cnt, maxtasksperchild=100) as pool:
108
- constants = functools.partial(_video_to_frms_helper,
109
- verbose=verbose,
110
- img_format=img_format,
111
- quality=quality,
112
- greyscale=greyscale,
113
- black_and_white=black_and_white,
114
- include_video_name_in_filename=include_video_name_in_filename,
115
- video_path=video_path,
116
- clahe=clahe,
117
- save_dir=save_dir)
118
- for cnt, batch_id in enumerate(pool.imap(constants, frm_ids, chunksize=1)):
119
- if verbose:
120
- print(f'Video frame batch {batch_id} (of {core_cnt}) complete...')
121
-
122
- terminate_cpu_pool(pool=pool, force=True)
117
+ pool = get_cpu_pool(core_cnt=core_cnt, source=video_to_frames.__name__)
118
+ constants = functools.partial(_video_to_frms_helper,
119
+ verbose=verbose,
120
+ img_format=img_format,
121
+ quality=quality,
122
+ greyscale=greyscale,
123
+ black_and_white=black_and_white,
124
+ include_video_name_in_filename=include_video_name_in_filename,
125
+ video_path=video_path,
126
+ clahe=clahe,
127
+ save_dir=save_dir)
128
+ for cnt, batch_id in enumerate(pool.imap(constants, frm_ids, chunksize=1)):
129
+ if verbose:
130
+ print(f'Video frame batch {batch_id} (of {core_cnt}) complete...')
131
+
132
+ terminate_cpu_pool(pool=pool, force=True, source=video_to_frames.__name__)
123
133
  timer.stop_timer()
124
- if verbose:
125
- stdout_success(msg=f'All frames for video {video_path} saved in {save_dir}', elapsed_time=timer.elapsed_time_str)
134
+ if verbose: stdout_success(msg=f'All frames ({video_meta_data["frame_count"]}) for video {video_path} saved in {save_dir}', elapsed_time=timer.elapsed_time_str)
126
135
 
127
136
  # if __name__ == "__main__":
128
- # video_to_frames(video_path=r"C:\troubleshooting\SDS_pre_post\project_folder\videos\SDI100 x ALR2 post_d7.mp4",
129
- # save_dir=r'C:\troubleshooting\SDS_pre_post\project_folder\videos\test',
137
+ # video_to_frames(video_path=r"D:\troubleshooting\mitra\project_folder\videos\average_cpu_test\15min.mp4",
138
+ # save_dir=None,
130
139
  # black_and_white=False,
131
140
  # verbose=True,
132
- # img_format='webp',
133
- # clahe=True)
141
+ # img_format='png',
142
+ # clahe=False,
143
+ # core_cnt=18)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: simba-uw-tf-dev
3
- Version: 4.6.2
3
+ Version: 4.7.2
4
4
  Summary: Toolkit for computer classification and analysis of behaviors in experimental animals
5
5
  Home-page: https://github.com/sgoldenlab/simba
6
6
  Author: Simon Nilsson, Jia Jie Choong, Sophia Hwang
@@ -79,7 +79,7 @@ Requires-Dist: yellowbrick (==1.5.0) ; python_version >= "3.9"
79
79
  Requires-Dist: kaleido ; python_version >= "3.9"
80
80
  Requires-Dist: psutil ; python_version >= "3.9"
81
81
  Requires-Dist: h5py (==3.11.0) ; python_version >= "3.9"
82
- Requires-Dist: numba (==0.59.1) ; python_version >= "3.9"
82
+ Requires-Dist: numba (==0.63.1) ; python_version >= "3.9"
83
83
  Requires-Dist: numexpr (==2.10.0) ; python_version >= "3.9"
84
84
  Requires-Dist: statsmodels (==0.14.2) ; python_version >= "3.9"
85
85
  Requires-Dist: shap (==0.42.0) ; python_version >= "3.9"
@@ -138,11 +138,12 @@ Requires-Dist: numexpr (==2.10.0) ; (python_version >= "3.9") and extra == 'arm'
138
138
  Requires-Dist: statsmodels (==0.14.2) ; (python_version >= "3.9") and extra == 'arm'
139
139
  Requires-Dist: tables (==3.9.2) ; (python_version >= "3.9") and extra == 'arm'
140
140
  Provides-Extra: gpu
141
- Requires-Dist: cupy-cuda12x (==13.3.0) ; extra == 'gpu'
141
+ Requires-Dist: cupy-cuda12x (>=13.6.0) ; extra == 'gpu'
142
142
  Requires-Dist: shap (==0.46.1.dev78) ; extra == 'gpu'
143
143
  Requires-Dist: cuml-cu12 (==24.12.0) ; extra == 'gpu'
144
144
  Requires-Dist: torch (==2.5.0) ; extra == 'gpu'
145
145
  Requires-Dist: ultralytics (==8.3.156) ; extra == 'gpu'
146
+ Requires-Dist: nvidia-cuda-runtime-cu12 ; extra == 'gpu'
146
147
 
147
148
  # SimBA (Simple Behavioral Analysis)
148
149
  ![SimBA Splash](https://raw.githubusercontent.com/sgoldenlab/simba/master/docs/tutorials_rst/img/index/landing_page_1.png)