sports2d 0.5.5__tar.gz → 0.5.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. {sports2d-0.5.5 → sports2d-0.5.6}/PKG-INFO +3 -2
  2. {sports2d-0.5.5 → sports2d-0.5.6}/README.md +2 -1
  3. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Demo/Config_demo.toml +1 -1
  4. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/process.py +19 -16
  5. {sports2d-0.5.5 → sports2d-0.5.6}/setup.cfg +1 -1
  6. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/PKG-INFO +3 -2
  7. {sports2d-0.5.5 → sports2d-0.5.6}/LICENSE +0 -0
  8. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Demo/demo.mp4 +0 -0
  9. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Sports2D.py +0 -0
  10. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Utilities/__init__.py +0 -0
  11. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Utilities/common.py +0 -0
  12. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Utilities/filter.py +0 -0
  13. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Utilities/skeletons.py +0 -0
  14. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/Utilities/tests.py +0 -0
  15. {sports2d-0.5.5 → sports2d-0.5.6}/Sports2D/__init__.py +0 -0
  16. {sports2d-0.5.5 → sports2d-0.5.6}/pyproject.toml +0 -0
  17. {sports2d-0.5.5 → sports2d-0.5.6}/setup.py +0 -0
  18. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/SOURCES.txt +0 -0
  19. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/dependency_links.txt +0 -0
  20. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/entry_points.txt +0 -0
  21. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/not-zip-safe +0 -0
  22. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/requires.txt +0 -0
  23. {sports2d-0.5.5 → sports2d-0.5.6}/sports2d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sports2d
3
- Version: 0.5.5
3
+ Version: 0.5.6
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -233,9 +233,10 @@ Note that it does not take distortions into account, and that it will be less ac
233
233
  ### Too slow for you?
234
234
 
235
235
  **Quick fixes:**
236
- - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
236
+ - Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
237
237
  - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.
238
238
  - Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
239
+ - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
239
240
  - Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
240
241
 
241
242
  <br>
@@ -193,9 +193,10 @@ Note that it does not take distortions into account, and that it will be less ac
193
193
  ### Too slow for you?
194
194
 
195
195
  **Quick fixes:**
196
- - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
196
+ - Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
197
197
  - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.
198
198
  - Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
199
+ - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
199
200
  - Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
200
201
 
201
202
  <br>
@@ -21,7 +21,7 @@ compare = false # Not implemented yet
21
21
 
22
22
  # Video parameters
23
23
  time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
24
- video_dir = '' # If empty, result dir is current dir
24
+ video_dir = '' # If empty, video dir is current dir
25
25
 
26
26
  # Webcam parameters
27
27
  webcam_id = 0 # your webcam id (0 is default)
@@ -1118,9 +1118,8 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
1118
1118
  - xy_origin: list. The origin of the floor line
1119
1119
  '''
1120
1120
 
1121
-
1122
1121
  # Remove frames where the person is mostly not moving (outlier)
1123
- av_speeds = np.nanmean([np.linalg.norm(trc_data[kpt].diff(), axis=1) for kpt in trc_data.columns.unique()[1:]], axis=0)
1122
+ av_speeds = np.nanmean([np.insert(np.linalg.norm(trc_data[kpt].diff(), axis=1)[1:],0,0) for kpt in trc_data.columns.unique()[1:]], axis=0)
1124
1123
  trc_data = trc_data[av_speeds>tot_speed_above]
1125
1124
 
1126
1125
  # Retrieve zero-speed coordinates for the foot
@@ -1421,7 +1420,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1421
1420
  gaussian_filter_kernel = config_dict.get('post-processing').get('gaussian').get('sigma_kernel')
1422
1421
  loess_filter_kernel = config_dict.get('post-processing').get('loess').get('nb_values_used')
1423
1422
  median_filter_kernel = config_dict.get('post-processing').get('median').get('kernel_size')
1424
- butterworth_filter_cutoff /= slowmo_factor
1425
1423
  filter_options = [do_filter, filter_type,
1426
1424
  butterworth_filter_order, butterworth_filter_cutoff, frame_rate,
1427
1425
  gaussian_filter_kernel, loess_filter_kernel, median_filter_kernel]
@@ -1458,6 +1456,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1458
1456
  logging.warning('Webcam input: the framerate may vary. If results are filtered, Sports2D will use the average framerate as input.')
1459
1457
  else:
1460
1458
  cap, out_vid, cam_width, cam_height, fps = setup_video(video_file_path, save_vid, vid_output_path)
1459
+ fps *= slowmo_factor
1461
1460
  start_time = get_start_time_ffmpeg(video_file_path)
1462
1461
  frame_range = [int((time_range[0]-start_time) * frame_rate), int((time_range[1]-start_time) * frame_rate)] if time_range else [0, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))]
1463
1462
  frame_iterator = tqdm(range(*frame_range)) # use a progress bar
@@ -1471,7 +1470,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1471
1470
  if load_trc:
1472
1471
  if not '_px' in str(load_trc):
1473
1472
  logging.error(f'\n{load_trc} file needs to be in px, not in meters.')
1474
- logging.info(f'\nUsing a pose file instead of running pose tracking {load_trc}.')
1473
+ logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc}.')
1475
1474
  # Load pose file in px
1476
1475
  Q_coords, _, _, keypoints_names, _ = read_trc(load_trc)
1477
1476
  keypoints_ids = [i for i in range(len(keypoints_names))]
@@ -1644,7 +1643,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1644
1643
  all_frames_scores = make_homogeneous(all_frames_scores)
1645
1644
 
1646
1645
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1647
- all_frames_time = pd.Series(np.linspace(frame_range[0]/fps/slowmo_factor, frame_range[1]/fps/slowmo_factor, frame_count+1), name='time')
1646
+ all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count+1), name='time')
1648
1647
  if not multiperson:
1649
1648
  calib_on_person_id = get_personID_with_highest_scores(all_frames_scores)
1650
1649
  detected_persons = [calib_on_person_id]
@@ -1698,15 +1697,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1698
1697
  else:
1699
1698
  filter_type = filter_options[1]
1700
1699
  if filter_type == 'butterworth':
1700
+ cutoff = filter_options[3]
1701
1701
  if video_file == 'webcam':
1702
- cutoff = filter_options[3]
1703
1702
  if cutoff / (fps / 2) >= 1:
1704
1703
  cutoff_old = cutoff
1705
1704
  cutoff = fps/(2+0.001)
1706
1705
  args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
1707
1706
  filter_options[3] = cutoff
1708
- else:
1709
- args = ''
1710
1707
  args = f'Butterworth filter, {filter_options[2]}th order, {filter_options[3]} Hz.'
1711
1708
  filter_options[4] = fps
1712
1709
  if filter_type == 'gaussian':
@@ -1743,6 +1740,9 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1743
1740
 
1744
1741
  else:
1745
1742
  # Compute calibration parameters
1743
+ if not multiperson:
1744
+ selected_person_id = calib_on_person_id
1745
+ calib_on_person_id = 0
1746
1746
  height_px = compute_height(trc_data[calib_on_person_id].iloc[:,1:], keypoints_names,
1747
1747
  fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_px, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
1748
1748
 
@@ -1750,7 +1750,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1750
1750
  # estimated from the line formed by the toes when they are on the ground (where speed = 0)
1751
1751
  toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
1752
1752
  px_per_m = height_px/person_height_m
1753
- toe_speed_below_px_frame = toe_speed_below * px_per_m / (fps*slowmo_factor)
1753
+ toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
1754
1754
  floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1755
1755
  if not floor_angle == 'auto':
1756
1756
  floor_angle_estim = floor_angle
@@ -1774,9 +1774,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1774
1774
  pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i)
1775
1775
 
1776
1776
  # Write to trc file
1777
- pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{i:02d}.trc'))
1777
+ idx_path = selected_person_id if not multiperson and not calib_file else i
1778
+ pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
1778
1779
  make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i)
1779
- logging.info(f'Person {i}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
1780
+ logging.info(f'Person {idx_path}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
1780
1781
 
1781
1782
 
1782
1783
 
@@ -1832,7 +1833,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1832
1833
  all_frames_angles = make_homogeneous(all_frames_angles)
1833
1834
 
1834
1835
  # unwrap angles
1835
- all_frames_angles = np.unwrap(all_frames_angles, axis=0, period=180)
1836
+ # all_frames_angles = np.unwrap(all_frames_angles, axis=0, period=180) # This give all nan values -> need to mask nans
1837
+ for i in range(all_frames_angles.shape[1]): # for each person
1838
+ for j in range(all_frames_angles.shape[2]): # for each angle
1839
+ valid_mask = ~np.isnan(all_frames_angles[:, i, j])
1840
+ all_frames_angles[valid_mask, i, j] = np.unwrap(all_frames_angles[valid_mask, i, j], period=180)
1836
1841
 
1837
1842
  # Process angles for each person
1838
1843
  for i in detected_persons:
@@ -1864,16 +1869,14 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1864
1869
  else:
1865
1870
  filter_type = filter_options[1]
1866
1871
  if filter_type == 'butterworth':
1872
+ cutoff = filter_options[3]
1867
1873
  if video_file == 'webcam':
1868
- cutoff = filter_options[3]
1869
1874
  if cutoff / (fps / 2) >= 1:
1870
1875
  cutoff_old = cutoff
1871
1876
  cutoff = fps/(2+0.001)
1872
1877
  args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
1873
1878
  filter_options[3] = cutoff
1874
- else:
1875
- args = ''
1876
- args = f'Butterworth filter, {filter_options[2]}th order, {filter_options[3]} Hz. ' + args
1879
+ args = f'Butterworth filter, {filter_options[2]}th order, {filter_options[3]} Hz.'
1877
1880
  filter_options[4] = fps
1878
1881
  if filter_type == 'gaussian':
1879
1882
  args = f'Gaussian filter, Sigma kernel {filter_options[5]}.'
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = sports2d
3
- version = 0.5.5
3
+ version = 0.5.6
4
4
  author = David Pagnon
5
5
  author_email = contact@david-pagnon.com
6
6
  description = Detect pose and compute 2D joint angles from a video.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sports2d
3
- Version: 0.5.5
3
+ Version: 0.5.6
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -233,9 +233,10 @@ Note that it does not take distortions into account, and that it will be less ac
233
233
  ### Too slow for you?
234
234
 
235
235
  **Quick fixes:**
236
- - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
236
+ - Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
237
237
  - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.
238
238
  - Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
239
+ - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
239
240
  - Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
240
241
 
241
242
  <br>
File without changes
File without changes
File without changes
File without changes
File without changes