simba-uw-tf-dev 4.6.4__py3-none-any.whl → 4.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. simba/data_processors/cuda/geometry.py +45 -27
  2. simba/data_processors/cuda/image.py +1620 -1600
  3. simba/data_processors/cuda/statistics.py +17 -9
  4. simba/data_processors/egocentric_aligner.py +24 -6
  5. simba/data_processors/kleinberg_calculator.py +6 -2
  6. simba/feature_extractors/feature_subsets.py +12 -5
  7. simba/feature_extractors/straub_tail_analyzer.py +0 -2
  8. simba/mixins/statistics_mixin.py +9 -2
  9. simba/sandbox/analyze_runtimes.py +30 -0
  10. simba/sandbox/cuda/egocentric_rotator.py +374 -374
  11. simba/sandbox/proboscis_to_tip.py +28 -0
  12. simba/sandbox/test_directionality.py +47 -0
  13. simba/sandbox/test_nonstatic_directionality.py +27 -0
  14. simba/sandbox/test_pycharm_cuda.py +51 -0
  15. simba/sandbox/test_simba_install.py +41 -0
  16. simba/sandbox/test_static_directionality.py +26 -0
  17. simba/sandbox/test_static_directionality_2d.py +26 -0
  18. simba/sandbox/verify_env.py +42 -0
  19. simba/ui/pop_ups/fsttc_pop_up.py +27 -25
  20. simba/ui/pop_ups/kleinberg_pop_up.py +3 -2
  21. simba/utils/data.py +0 -1
  22. simba/utils/errors.py +441 -440
  23. simba/utils/lookups.py +1203 -1203
  24. simba/utils/read_write.py +38 -13
  25. simba/video_processors/egocentric_video_rotator.py +41 -36
  26. simba/video_processors/video_processing.py +5247 -5233
  27. simba/video_processors/videos_to_frames.py +41 -31
  28. {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.6.dist-info}/METADATA +2 -2
  29. {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.6.dist-info}/RECORD +33 -24
  30. {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.6.dist-info}/LICENSE +0 -0
  31. {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.6.dist-info}/WHEEL +0 -0
  32. {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.6.dist-info}/entry_points.txt +0 -0
  33. {simba_uw_tf_dev-4.6.4.dist-info → simba_uw_tf_dev-4.6.6.dist-info}/top_level.txt +0 -0
simba/utils/read_write.py CHANGED
@@ -2460,6 +2460,13 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
2460
2460
  """
2461
2461
  Reads a batch of frames from a video file using GPU acceleration.
2462
2462
 
2463
+ .. csv-table::
2464
+ :header: EXPECTED RUNTIMES
2465
+ :file: ../../docs/tables/read_img_batch_from_video_gpu.csv
2466
+ :widths: 10, 45, 45
2467
+ :align: center
2468
+ :header-rows: 1
2469
+
2463
2470
  This function uses FFmpeg with CUDA acceleration to read frames from a specified range in a video file. It supports both RGB and greyscale video formats. Frames are returned as a dictionary where the keys are
2464
2471
  frame indices and the values are NumPy arrays representing the image data.
2465
2472
 
@@ -2468,7 +2475,7 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
2468
2475
  If you expect that the video you are reading in is black and white, set ``black_and_white`` to True to round any of these wonly value sto 0 and 255.
2469
2476
 
2470
2477
  .. seealso::
2471
- For CPU multicore acceleration, see :func:`simba.mixins.image_mixin.ImageMixin.read_img_batch_from_video`
2478
+ For CPU multicore acceleration, see :func:`simba.mixins.image_mixin.ImageMixin.read_img_batch_from_video` or :func:`simba.utils.read_write.read_img_batch_from_video`.
2472
2479
 
2473
2480
  :param video_path: Path to the video file. Can be a string or an os.PathLike object.
2474
2481
  :param start_frm: The starting frame index to read. If None, starts from the beginning of the video.
@@ -2479,6 +2486,7 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
2479
2486
  :return: A dictionary where keys are frame indices (integers) and values are NumPy arrays containing the image data of each frame.
2480
2487
  """
2481
2488
 
2489
+ timer = SimbaTimer(start=True)
2482
2490
  check_file_exist_and_readable(file_path=video_path)
2483
2491
  video_meta_data = get_video_meta_data(video_path=video_path, fps_as_int=False)
2484
2492
  if start_frm is not None:
@@ -2551,6 +2559,10 @@ def read_img_batch_from_video_gpu(video_path: Union[str, os.PathLike],
2551
2559
  binary_frms[frm_id] = np.where(frames[frm_id] > 127, 255, 0).astype(np.uint8)
2552
2560
  frames = binary_frms
2553
2561
 
2562
+ timer.stop_timer()
2563
+ if verbose:
2564
+ print(f'[{get_current_time()}] Read frames {start_frm}-{end_frm} (video: {video_name}, elapsed time: {timer.elapsed_time_str}s)')
2565
+
2554
2566
  return frames
2555
2567
 
2556
2568
 
@@ -3156,7 +3168,7 @@ def _read_img_batch_from_video_helper(frm_idx: np.ndarray, video_path: Union[str
3156
3168
  cap.set(1, current_frm)
3157
3169
  while current_frm < end_frm:
3158
3170
  if verbose:
3159
- print(f'Reading frame {current_frm}/{video_meta_data["frame_count"]} ({video_meta_data["video_name"]})...')
3171
+ print(f'[{get_current_time()}] Reading frame {current_frm} ({video_meta_data["video_name"]})...')
3160
3172
  img = cap.read()[1]
3161
3173
  if img is not None:
3162
3174
  if greyscale or black_and_white or clahe:
@@ -3188,6 +3200,14 @@ def read_img_batch_from_video(video_path: Union[str, os.PathLike],
3188
3200
  """
3189
3201
  Read a batch of frames from a video file. This method reads frames from a specified range of frames within a video file using multiprocessing.
3190
3202
 
3203
+ .. csv-table::
3204
+ :header: EXPECTED RUNTIMES
3205
+ :file: ../../docs/tables/read_img_batch_from_video.csv
3206
+ :widths: 10, 45, 45
3207
+ :align: center
3208
+ :header-rows: 1
3209
+
3210
+
3191
3211
  .. seealso::
3192
3212
  For GPU acceleration, see :func:`simba.utils.read_write.read_img_batch_from_video_gpu`
3193
3213
 
@@ -3209,6 +3229,8 @@ def read_img_batch_from_video(video_path: Union[str, os.PathLike],
3209
3229
  >>> read_img_batch_from_video(video_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/videos/Together_1.avi', start_frm=0, end_frm=50)
3210
3230
  """
3211
3231
 
3232
+
3233
+ timer = SimbaTimer(start=True)
3212
3234
  if platform.system() == "Darwin":
3213
3235
  if not multiprocessing.get_start_method(allow_none=True):
3214
3236
  multiprocessing.set_start_method("fork", force=True)
@@ -3230,19 +3252,22 @@ def read_img_batch_from_video(video_path: Union[str, os.PathLike],
3230
3252
  if end_frm <= start_frm:
3231
3253
  FrameRangeError(msg=f"Start frame ({start_frm}) has to be before end frame ({end_frm})", source=read_img_batch_from_video.__name__)
3232
3254
  frm_lst = np.array_split(np.arange(start_frm, end_frm + 1), core_cnt)
3255
+ pool = multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value)
3233
3256
  results = {}
3234
- with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
3235
- constants = functools.partial(_read_img_batch_from_video_helper,
3236
- video_path=video_path,
3237
- greyscale=greyscale,
3238
- black_and_white=black_and_white,
3239
- clahe=clahe,
3240
- verbose=verbose)
3241
- for cnt, result in enumerate(pool.imap(constants, frm_lst, chunksize=1)):
3242
- results.update(result)
3243
- pool.join()
3257
+ constants = functools.partial(_read_img_batch_from_video_helper,
3258
+ video_path=video_path,
3259
+ greyscale=greyscale,
3260
+ black_and_white=black_and_white,
3261
+ clahe=clahe,
3262
+ verbose=verbose)
3263
+ for cnt, result in enumerate(pool.imap(constants, frm_lst, chunksize=1)):
3264
+ results.update(result)
3244
3265
  pool.close()
3245
- #terminate_cpu_pool(pool=pool, force=False)
3266
+ pool.join()
3267
+ pool.terminate()
3268
+ timer.stop_timer()
3269
+ if verbose:
3270
+ print(f'[{get_current_time()}] Read frames {start_frm}-{end_frm} (video: {video_meta_data["video_name"]}, elapsed time: {timer.elapsed_time_str}s)')
3246
3271
  return results
3247
3272
 
3248
3273
  def read_yolo_bp_names_file(file_path: Union[str, os.PathLike]) -> Tuple[str]:
@@ -9,10 +9,12 @@ import numpy as np
9
9
  from simba.utils.checks import (check_file_exist_and_readable,
10
10
  check_if_dir_exists, check_if_valid_rgb_tuple,
11
11
  check_int, check_valid_array,
12
- check_valid_boolean, check_valid_tuple)
12
+ check_valid_boolean, check_valid_cpu_pool,
13
+ check_valid_tuple)
13
14
  from simba.utils.data import (align_target_warpaffine_vectors,
14
15
  center_rotation_warpaffine_vectors,
15
- egocentrically_align_pose, terminate_cpu_pool)
16
+ egocentrically_align_pose, get_cpu_pool,
17
+ terminate_cpu_pool)
16
18
  from simba.utils.enums import Defaults, Formats
17
19
  from simba.utils.printing import SimbaTimer, stdout_success
18
20
  from simba.utils.read_write import (concatenate_videos_in_folder,
@@ -114,7 +116,8 @@ class EgocentricVideoRotator():
114
116
  fill_clr: Tuple[int, int, int] = (0, 0, 0),
115
117
  core_cnt: int = -1,
116
118
  save_path: Optional[Union[str, os.PathLike]] = None,
117
- gpu: Optional[bool] = True):
119
+ gpu: Optional[bool] = True,
120
+ pool: bool = None):
118
121
 
119
122
  check_file_exist_and_readable(file_path=video_path)
120
123
  self.video_meta_data = get_video_meta_data(video_path=video_path)
@@ -125,10 +128,14 @@ class EgocentricVideoRotator():
125
128
  check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
126
129
  check_if_valid_rgb_tuple(data=fill_clr)
127
130
  check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
128
- if core_cnt > find_core_cnt()[0] or core_cnt == -1:
129
- self.core_cnt = find_core_cnt()[0]
131
+ if core_cnt > find_core_cnt()[0] or core_cnt == -1: self.core_cnt = find_core_cnt()[0]
132
+ else: self.core_cnt = core_cnt
133
+ if pool is not None:
134
+ check_valid_cpu_pool(value=pool, source=self.__class__.__name__, max_cores=find_core_cnt()[0], min_cores=2, raise_error=True)
135
+ self.pool_termination_flag = True
130
136
  else:
131
- self.core_cnt = core_cnt
137
+ self.pool_termination_flag = False
138
+ self.pool = get_cpu_pool(core_cnt=self.core_cnt, source=self.__class__.__name__) if pool is None else pool
132
139
  video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
133
140
  if save_path is not None:
134
141
  self.save_dir = os.path.dirname(save_path)
@@ -151,37 +158,35 @@ class EgocentricVideoRotator():
151
158
  frm_list = np.arange(0, self.video_meta_data['frame_count'])
152
159
  frm_list = np.array_split(frm_list, self.core_cnt)
153
160
  frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
154
- if self.verbose:
155
- print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
156
- with multiprocessing.Pool(self.core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
157
- constants = functools.partial(egocentric_video_aligner,
158
- temp_dir=temp_dir,
159
- video_name=self.video_name,
160
- video_path=self.video_path,
161
- centers=self.centers,
162
- rotation_vectors=self.rotation_vectors,
163
- target=self.anchor_loc,
164
- verbose=self.verbose,
165
- fill_clr=self.fill_clr,
166
- gpu=self.gpu)
167
- for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)):
168
- if self.verbose:
169
- print(f"Rotate batch {result}/{self.core_cnt} complete...")
170
- terminate_cpu_pool(pool=pool, force=False)
161
+ if self.verbose: print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
162
+
163
+ constants = functools.partial(egocentric_video_aligner,
164
+ temp_dir=temp_dir,
165
+ video_name=self.video_name,
166
+ video_path=self.video_path,
167
+ centers=self.centers,
168
+ rotation_vectors=self.rotation_vectors,
169
+ target=self.anchor_loc,
170
+ verbose=self.verbose,
171
+ fill_clr=self.fill_clr,
172
+ gpu=self.gpu)
173
+ for cnt, result in enumerate(self.pool.imap(constants, frm_list, chunksize=1)):
174
+ if self.verbose: print(f"Rotate batch {result}/{self.core_cnt} complete...")
175
+ if self.pool_termination_flag: terminate_cpu_pool(pool=self.pool, force=False)
171
176
  concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
172
177
  video_timer.stop_timer()
173
178
  stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
174
179
 
175
- if __name__ == "__main__":
176
- DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
177
- VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
178
- SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
179
- ANCHOR_LOC = np.array([250, 250])
180
-
181
- df = read_df(file_path=DATA_PATH, file_type='csv')
182
- bp_cols = [x for x in df.columns if not x.endswith('_p')]
183
- data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
184
-
185
- _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
186
- rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
187
- rotater.run()
180
+ # if __name__ == "__main__":
181
+ # DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
182
+ # VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
183
+ # SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
184
+ # ANCHOR_LOC = np.array([250, 250])
185
+ #
186
+ # df = read_df(file_path=DATA_PATH, file_type='csv')
187
+ # bp_cols = [x for x in df.columns if not x.endswith('_p')]
188
+ # data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
189
+ #
190
+ # _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
191
+ # rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
192
+ # rotater.run()