simba-uw-tf-dev 4.6.2__py3-none-any.whl → 4.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. simba/assets/.recent_projects.txt +1 -0
  2. simba/assets/lookups/tooptips.json +6 -1
  3. simba/data_processors/agg_clf_counter_mp.py +52 -53
  4. simba/data_processors/blob_location_computer.py +1 -1
  5. simba/data_processors/circling_detector.py +30 -13
  6. simba/data_processors/cuda/geometry.py +45 -27
  7. simba/data_processors/cuda/image.py +1648 -1598
  8. simba/data_processors/cuda/statistics.py +72 -26
  9. simba/data_processors/cuda/timeseries.py +1 -1
  10. simba/data_processors/cue_light_analyzer.py +5 -9
  11. simba/data_processors/egocentric_aligner.py +25 -7
  12. simba/data_processors/freezing_detector.py +55 -47
  13. simba/data_processors/kleinberg_calculator.py +61 -29
  14. simba/feature_extractors/feature_subsets.py +14 -7
  15. simba/feature_extractors/mitra_feature_extractor.py +2 -2
  16. simba/feature_extractors/straub_tail_analyzer.py +4 -6
  17. simba/labelling/standard_labeller.py +1 -1
  18. simba/mixins/config_reader.py +5 -2
  19. simba/mixins/geometry_mixin.py +22 -36
  20. simba/mixins/image_mixin.py +24 -28
  21. simba/mixins/plotting_mixin.py +28 -10
  22. simba/mixins/statistics_mixin.py +48 -11
  23. simba/mixins/timeseries_features_mixin.py +1 -1
  24. simba/mixins/train_model_mixin.py +67 -29
  25. simba/model/inference_batch.py +1 -1
  26. simba/model/yolo_seg_inference.py +3 -3
  27. simba/outlier_tools/skip_outlier_correction.py +1 -1
  28. simba/plotting/ROI_feature_visualizer_mp.py +3 -5
  29. simba/plotting/clf_validator_mp.py +4 -5
  30. simba/plotting/cue_light_visualizer.py +6 -7
  31. simba/plotting/directing_animals_visualizer_mp.py +2 -3
  32. simba/plotting/distance_plotter_mp.py +378 -378
  33. simba/plotting/gantt_creator.py +29 -10
  34. simba/plotting/gantt_creator_mp.py +96 -33
  35. simba/plotting/geometry_plotter.py +270 -272
  36. simba/plotting/heat_mapper_clf_mp.py +4 -6
  37. simba/plotting/heat_mapper_location_mp.py +2 -2
  38. simba/plotting/light_dark_box_plotter.py +2 -2
  39. simba/plotting/path_plotter_mp.py +26 -29
  40. simba/plotting/plot_clf_results_mp.py +455 -454
  41. simba/plotting/pose_plotter_mp.py +28 -29
  42. simba/plotting/probability_plot_creator_mp.py +288 -288
  43. simba/plotting/roi_plotter_mp.py +31 -31
  44. simba/plotting/single_run_model_validation_video_mp.py +427 -427
  45. simba/plotting/spontaneous_alternation_plotter.py +2 -3
  46. simba/plotting/yolo_pose_track_visualizer.py +32 -27
  47. simba/plotting/yolo_pose_visualizer.py +35 -36
  48. simba/plotting/yolo_seg_visualizer.py +2 -3
  49. simba/pose_importers/simba_blob_importer.py +3 -3
  50. simba/roi_tools/roi_aggregate_stats_mp.py +5 -4
  51. simba/roi_tools/roi_clf_calculator_mp.py +4 -4
  52. simba/sandbox/analyze_runtimes.py +30 -0
  53. simba/sandbox/cuda/egocentric_rotator.py +374 -374
  54. simba/sandbox/get_cpu_pool.py +5 -0
  55. simba/sandbox/proboscis_to_tip.py +28 -0
  56. simba/sandbox/test_directionality.py +47 -0
  57. simba/sandbox/test_nonstatic_directionality.py +27 -0
  58. simba/sandbox/test_pycharm_cuda.py +51 -0
  59. simba/sandbox/test_simba_install.py +41 -0
  60. simba/sandbox/test_static_directionality.py +26 -0
  61. simba/sandbox/test_static_directionality_2d.py +26 -0
  62. simba/sandbox/verify_env.py +42 -0
  63. simba/third_party_label_appenders/transform/coco_keypoints_to_yolo.py +3 -3
  64. simba/third_party_label_appenders/transform/coco_keypoints_to_yolo_bbox.py +2 -2
  65. simba/ui/pop_ups/clf_plot_pop_up.py +2 -2
  66. simba/ui/pop_ups/fsttc_pop_up.py +27 -25
  67. simba/ui/pop_ups/gantt_pop_up.py +31 -6
  68. simba/ui/pop_ups/kleinberg_pop_up.py +39 -40
  69. simba/ui/pop_ups/video_processing_pop_up.py +37 -29
  70. simba/ui/tkinter_functions.py +3 -0
  71. simba/utils/custom_feature_extractor.py +1 -1
  72. simba/utils/data.py +90 -14
  73. simba/utils/enums.py +1 -0
  74. simba/utils/errors.py +441 -440
  75. simba/utils/lookups.py +1203 -1203
  76. simba/utils/printing.py +124 -124
  77. simba/utils/read_write.py +3769 -3721
  78. simba/utils/yolo.py +10 -1
  79. simba/video_processors/blob_tracking_executor.py +2 -2
  80. simba/video_processors/clahe_ui.py +1 -1
  81. simba/video_processors/egocentric_video_rotator.py +44 -41
  82. simba/video_processors/multi_cropper.py +1 -1
  83. simba/video_processors/video_processing.py +5264 -5222
  84. simba/video_processors/videos_to_frames.py +43 -33
  85. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/METADATA +4 -3
  86. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/RECORD +90 -80
  87. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/LICENSE +0 -0
  88. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/WHEEL +0 -0
  89. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/entry_points.txt +0 -0
  90. {simba_uw_tf_dev-4.6.2.dist-info → simba_uw_tf_dev-4.7.1.dist-info}/top_level.txt +0 -0
simba/utils/yolo.py CHANGED
@@ -47,6 +47,9 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
47
47
  `Download initial weights <https://huggingface.co/Ultralytics>`__.
48
48
  `Example model_yaml <https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml>`__.
49
49
 
50
+ .. seealso::
51
+ For the recommended wrapper class with parameter validation, see :class:`simba.model.yolo_fit.FitYolo`.
52
+
50
53
  :param initial_weights: Path to the pre-trained YOLO model weights (usually a `.pt` file). Example weights can be found [here](https://huggingface.co/Ultralytics).
51
54
  :param model_yaml: YAML file containing paths to the training, validation, and testing datasets and the object class mappings. Example YAML file can be found [here](https://github.com/sgoldenlab/simba/blob/master/misc/ex_yolo_model.yaml).
52
55
  :param save_path: Directory path where the trained model, logs, and results will be saved.
@@ -55,7 +58,7 @@ def fit_yolo(weights_path: Union[str, os.PathLike],
55
58
  :return: None. The trained model and associated training logs are saved in the specified `project_path`.
56
59
 
57
60
  :example:
58
- >>> fit_yolo(initial_weights=r"C:\troubleshooting\coco_data\weights\yolov8n-obb.pt", data=r"C:\troubleshooting\coco_data\model.yaml", save_path=r"C:\troubleshooting\coco_data\mdl", batch=16)
61
+ >>> fit_yolo(initial_weights=r"C:/troubleshooting/coco_data/weights/yolov8n-obb.pt", data=r"C:/troubleshooting/coco_data/model.yaml", save_path=r"C:/troubleshooting/coco_data/mdl", batch=16)
59
62
  """
60
63
 
61
64
  if not _is_cuda_available()[0]:
@@ -83,6 +86,9 @@ def load_yolo_model(weights_path: Union[str, os.PathLike],
83
86
  """
84
87
  Load a YOLO model.
85
88
 
89
+ .. seealso::
90
+ For recommended wrapper classes that use this function, see :class:`simba.model.yolo_fit.FitYolo`, :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`, and :class:`simba.model.yolo_pose_track_inference.YOLOPoseTrackInference`.
91
+
86
92
  :param Union[str, os.PathLike] weights_path: Path to model weights (.pt, .engine, etc).
87
93
  :param bool verbose: Whether to print loading info.
88
94
  :param Optional[str] format: Export format, one of VALID_FORMATS or None to skip export.
@@ -169,6 +175,9 @@ def yolo_predict(model: YOLO,
169
175
  """
170
176
  Produce YOLO predictions.
171
177
 
178
+ .. seealso::
179
+ For recommended wrapper classes that use this function, see :class:`simba.model.yolo_inference.YoloInference`, :class:`simba.model.yolo_pose_inference.YOLOPoseInference`, and :class:`simba.model.yolo_seg_inference.YOLOSegmentationInference`.
180
+
172
181
  :param Union[str, os.PathLike] model: Loaded ultralytics.YOLO model. Returned by :func:`~simba.bounding_box_tools.yolo.model.load_yolo_model`.
173
182
  :param Union[str, os.PathLike, np.ndarray] source: Path to video, video stream, directory, image, or image as loaded array.
174
183
  :param bool half: Whether to use half precision (FP16) for inference to speed up processing.
@@ -87,9 +87,9 @@ class BlobTrackingExecutor():
87
87
  :param bool center: If True, compute center coordinates. Default: True.
88
88
 
89
89
  :example:
90
- >>> tracker = BlobTrackingExecutor(data=r"C:\troubleshooting\mitra\test\.temp\blob_definitions.pickle")
90
+ >>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle")
91
91
  >>> tracker.run()
92
- >>> tracker = BlobTrackingExecutor(data=r"C:\troubleshooting\mitra\test\.temp\blob_definitions.pickle", batch_size=5000)
92
+ >>> tracker = BlobTrackingExecutor(data=r"C:/troubleshooting/mitra/test/.temp/blob_definitions.pickle", batch_size=5000)
93
93
  >>> tracker.run()
94
94
  """
95
95
 
@@ -29,7 +29,7 @@ def interactive_clahe_ui(data: Union[str, os.PathLike]) -> Tuple[float, int]:
29
29
  :return Tuple[float, int]: Tuple containing the chosen clip limit and tile size.
30
30
 
31
31
  :example:
32
- >>> video = cv2.imread(r"D:\EPM\sample_2\video_1.mp4")
32
+ >>> video = cv2.imread(r"D:/EPM/sample_2/video_1.mp4")
33
33
  >>> interactive_clahe_ui(data=video)
34
34
  """
35
35
  global original_img, font_size, x_spacer, y_spacer, txt
@@ -9,10 +9,12 @@ import numpy as np
9
9
  from simba.utils.checks import (check_file_exist_and_readable,
10
10
  check_if_dir_exists, check_if_valid_rgb_tuple,
11
11
  check_int, check_valid_array,
12
- check_valid_boolean, check_valid_tuple)
12
+ check_valid_boolean, check_valid_cpu_pool,
13
+ check_valid_tuple)
13
14
  from simba.utils.data import (align_target_warpaffine_vectors,
14
15
  center_rotation_warpaffine_vectors,
15
- egocentrically_align_pose)
16
+ egocentrically_align_pose, get_cpu_pool,
17
+ terminate_cpu_pool)
16
18
  from simba.utils.enums import Defaults, Formats
17
19
  from simba.utils.printing import SimbaTimer, stdout_success
18
20
  from simba.utils.read_write import (concatenate_videos_in_folder,
@@ -92,9 +94,9 @@ class EgocentricVideoRotator():
92
94
  :param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix.
93
95
 
94
96
  :example:
95
- >>> DATA_PATH = "C:\501_MA142_Gi_Saline_0513.csv"
96
- >>> VIDEO_PATH = "C:\501_MA142_Gi_Saline_0513.mp4"
97
- >>> SAVE_PATH = "C:\501_MA142_Gi_Saline_0513_rotated.mp4"
97
+ >>> DATA_PATH = "C:/501_MA142_Gi_Saline_0513.csv"
98
+ >>> VIDEO_PATH = "C:/501_MA142_Gi_Saline_0513.mp4"
99
+ >>> SAVE_PATH = "C:/501_MA142_Gi_Saline_0513_rotated.mp4"
98
100
  >>> ANCHOR_LOC = np.array([250, 250])
99
101
 
100
102
  >>> df = read_df(file_path=DATA_PATH, file_type='csv')
@@ -114,7 +116,8 @@ class EgocentricVideoRotator():
114
116
  fill_clr: Tuple[int, int, int] = (0, 0, 0),
115
117
  core_cnt: int = -1,
116
118
  save_path: Optional[Union[str, os.PathLike]] = None,
117
- gpu: Optional[bool] = True):
119
+ gpu: Optional[bool] = True,
120
+ pool: bool = None):
118
121
 
119
122
  check_file_exist_and_readable(file_path=video_path)
120
123
  self.video_meta_data = get_video_meta_data(video_path=video_path)
@@ -125,10 +128,14 @@ class EgocentricVideoRotator():
125
128
  check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose')
126
129
  check_if_valid_rgb_tuple(data=fill_clr)
127
130
  check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0])
128
- if core_cnt > find_core_cnt()[0] or core_cnt == -1:
129
- self.core_cnt = find_core_cnt()[0]
131
+ if core_cnt > find_core_cnt()[0] or core_cnt == -1: self.core_cnt = find_core_cnt()[0]
132
+ else: self.core_cnt = core_cnt
133
+ if pool is not None:
134
+ check_valid_cpu_pool(value=pool, source=self.__class__.__name__, max_cores=find_core_cnt()[0], min_cores=2, raise_error=True)
135
+ self.pool_termination_flag = True
130
136
  else:
131
- self.core_cnt = core_cnt
137
+ self.pool_termination_flag = False
138
+ self.pool = get_cpu_pool(core_cnt=self.core_cnt, source=self.__class__.__name__) if pool is None else pool
132
139
  video_dir, self.video_name, _ = get_fn_ext(filepath=video_path)
133
140
  if save_path is not None:
134
141
  self.save_dir = os.path.dirname(save_path)
@@ -151,39 +158,35 @@ class EgocentricVideoRotator():
151
158
  frm_list = np.arange(0, self.video_meta_data['frame_count'])
152
159
  frm_list = np.array_split(frm_list, self.core_cnt)
153
160
  frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)]
154
- if self.verbose:
155
- print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
156
- with multiprocessing.Pool(self.core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool:
157
- constants = functools.partial(egocentric_video_aligner,
158
- temp_dir=temp_dir,
159
- video_name=self.video_name,
160
- video_path=self.video_path,
161
- centers=self.centers,
162
- rotation_vectors=self.rotation_vectors,
163
- target=self.anchor_loc,
164
- verbose=self.verbose,
165
- fill_clr=self.fill_clr,
166
- gpu=self.gpu)
167
- for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)):
168
- if self.verbose:
169
- print(f"Rotate batch {result}/{self.core_cnt} complete...")
170
- pool.terminate()
171
- pool.join()
172
-
161
+ if self.verbose: print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...")
162
+
163
+ constants = functools.partial(egocentric_video_aligner,
164
+ temp_dir=temp_dir,
165
+ video_name=self.video_name,
166
+ video_path=self.video_path,
167
+ centers=self.centers,
168
+ rotation_vectors=self.rotation_vectors,
169
+ target=self.anchor_loc,
170
+ verbose=self.verbose,
171
+ fill_clr=self.fill_clr,
172
+ gpu=self.gpu)
173
+ for cnt, result in enumerate(self.pool.imap(constants, frm_list, chunksize=1)):
174
+ if self.verbose: print(f"Rotate batch {result}/{self.core_cnt} complete...")
175
+ if self.pool_termination_flag: terminate_cpu_pool(pool=self.pool, force=False)
173
176
  concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=self.gpu, verbose=self.verbose)
174
177
  video_timer.stop_timer()
175
178
  stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__)
176
179
 
177
- if __name__ == "__main__":
178
- DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
179
- VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
180
- SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
181
- ANCHOR_LOC = np.array([250, 250])
182
-
183
- df = read_df(file_path=DATA_PATH, file_type='csv')
184
- bp_cols = [x for x in df.columns if not x.endswith('_p')]
185
- data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
186
-
187
- _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
188
- rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
189
- rotater.run()
180
+ # if __name__ == "__main__":
181
+ # DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv"
182
+ # VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4"
183
+ # SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4"
184
+ # ANCHOR_LOC = np.array([250, 250])
185
+ #
186
+ # df = read_df(file_path=DATA_PATH, file_type='csv')
187
+ # bp_cols = [x for x in df.columns if not x.endswith('_p')]
188
+ # data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32)
189
+ #
190
+ # _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0)
191
+ # rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(400, 100), save_path=SAVE_PATH, verbose=True, core_cnt=16)
192
+ # rotater.run()
@@ -49,7 +49,7 @@ class MultiCropper(object):
49
49
 
50
50
 
51
51
  :example:
52
- >>> cropper = MultiCropper(file_type='mp4', input_folder=r'C:\troubleshooting\mitra\test', output_folder=r'C:\troubleshooting\mitra\test\cropped', crop_cnt=2, gpu=True)
52
+ >>> cropper = MultiCropper(file_type='mp4', input_folder=r'C:/troubleshooting/mitra/test', output_folder=r'C:/troubleshooting/mitra/test/cropped', crop_cnt=2, gpu=True)
53
53
  >>> cropper.run()
54
54
  """
55
55