sports2d 0.8.0__tar.gz → 0.8.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.8.0
3
+ Version: 0.8.1
4
4
  Summary: Compute 2D human pose and angles from a video or a webcam.
5
5
  Author-email: David Pagnon <contact@david-pagnon.com>
6
6
  License: BSD 3-Clause License
@@ -105,7 +105,7 @@ Dynamic: license-file
105
105
  > - Better visualization output
106
106
  > - More flexible, easier to run
107
107
  >
108
- > Run `pip install sports2d -U` to get the latest version.
108
+ > Run `pip install sports2d pose2sim -U` to get the latest version.
109
109
 
110
110
  ***N.B.:*** As always, I am more than happy to welcome contributions (see [How to contribute](#how-to-contribute-and-to-do-list))!
111
111
  <!--User-friendly Colab version released! (and latest issues fixed, too)\
@@ -208,7 +208,7 @@ pip install .
208
208
 
209
209
  - **Install Sports2D with Pose2Sim**:
210
210
  ``` cmd
211
- pip install sports2d pose2sim
211
+ pip install sports2d
212
212
  ```
213
213
 
214
214
 
@@ -33,7 +33,7 @@
33
33
  > - Better visualization output
34
34
  > - More flexible, easier to run
35
35
  >
36
- > Run `pip install sports2d -U` to get the latest version.
36
+ > Run `pip install sports2d pose2sim -U` to get the latest version.
37
37
 
38
38
  ***N.B.:*** As always, I am more than happy to welcome contributions (see [How to contribute](#how-to-contribute-and-to-do-list))!
39
39
  <!--User-friendly Colab version released! (and latest issues fixed, too)\
@@ -136,7 +136,7 @@ pip install .
136
136
 
137
137
  - **Install Sports2D with Pose2Sim**:
138
138
  ``` cmd
139
- pip install sports2d pose2sim
139
+ pip install sports2d
140
140
  ```
141
141
 
142
142
 
@@ -642,7 +642,7 @@ def make_trc_with_trc_data(trc_data, trc_path, fps=30):
642
642
 
643
643
  INPUTS:
644
644
  - trc_data: pd.DataFrame. The time and coordinates of the keypoints.
645
- The column names must be 't', 'kpt1', 'kpt1', 'kpt1', 'kpt2', 'kpt2', 'kpt2', ...
645
+ The column names must be 'time', 'kpt1', 'kpt1', 'kpt1', 'kpt2', 'kpt2', 'kpt2', ...
646
646
  - trc_path: Path. The path to the TRC file to save
647
647
  - fps: float. The framerate of the video
648
648
 
@@ -1349,10 +1349,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1349
1349
  trimmed_extrema_percent = config_dict.get('kinematics').get('trimmed_extrema_percent')
1350
1350
  close_to_zero_speed_px = config_dict.get('kinematics').get('close_to_zero_speed_px')
1351
1351
  close_to_zero_speed_m = config_dict.get('kinematics').get('close_to_zero_speed_m')
1352
- if do_ik:
1352
+ if do_ik or use_augmentation:
1353
1353
  if use_augmentation:
1354
1354
  from Pose2Sim.markerAugmentation import augment_markers_all
1355
- from Pose2Sim.kinematics import kinematics_all
1355
+ if do_ik:
1356
+ from Pose2Sim.kinematics import kinematics_all
1356
1357
  # Create a Pose2Sim dictionary and fill in missing keys
1357
1358
  recursivedict = lambda: defaultdict(recursivedict)
1358
1359
  Pose2Sim_config_dict = recursivedict()
@@ -1494,7 +1495,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1494
1495
  logging.warning(f"Skipping {ang_name} angle computation because at least one of the following keypoints is not provided by the model: {ang_params[0]}.")
1495
1496
 
1496
1497
 
1497
- # ====================================================
1498
+ #%% ==================================================
1498
1499
  # Process video or webcam feed
1499
1500
  # ====================================================
1500
1501
  logging.info(f"\nProcessing video stream...")
@@ -1528,10 +1529,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1528
1529
  else:
1529
1530
  frames.append(frame.copy())
1530
1531
 
1531
- cv2.putText(frame, f"Press 'q' to quit", (cam_width-int(400*fontSize), cam_height-20), cv2.FONT_HERSHEY_SIMPLEX, fontSize+0.2, (255,255,255), thickness+1, cv2.LINE_AA)
1532
- cv2.putText(frame, f"Press 'q' to quit", (cam_width-int(400*fontSize), cam_height-20), cv2.FONT_HERSHEY_SIMPLEX, fontSize+0.2, (0,0,255), thickness, cv2.LINE_AA)
1533
-
1534
-
1535
1532
  # Retrieve pose or Estimate pose and track people
1536
1533
  if load_trc_px:
1537
1534
  if frame_nb >= len(keypoints_all):
@@ -1607,6 +1604,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1607
1604
  # Draw keypoints and skeleton
1608
1605
  if show_realtime_results:
1609
1606
  img = frame.copy()
1607
+ cv2.putText(img, f"Press 'q' to quit", (cam_width-int(600*fontSize), cam_height-20), cv2.FONT_HERSHEY_SIMPLEX, fontSize+0.2, (255,255,255), thickness+1, cv2.LINE_AA)
1608
+ cv2.putText(img, f"Press 'q' to quit", (cam_width-int(600*fontSize), cam_height-20), cv2.FONT_HERSHEY_SIMPLEX, fontSize+0.2, (0,0,255), thickness, cv2.LINE_AA)
1610
1609
  img = draw_bounding_box(img, valid_X, valid_Y, colors=colors, fontSize=fontSize, thickness=thickness)
1611
1610
  img = draw_keypts(img, valid_X, valid_Y, valid_scores, cmap_str='RdYlGn')
1612
1611
  img = draw_skel(img, valid_X, valid_Y, pose_model)
@@ -1636,7 +1635,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1636
1635
  cv2.destroyAllWindows()
1637
1636
 
1638
1637
 
1639
- # ====================================================
1638
+ #%% ==================================================
1640
1639
  # Post-processing: Select persons, Interpolate, filter, and save pose and angles
1641
1640
  # ====================================================
1642
1641
  all_frames_X_homog = make_homogeneous(all_frames_X)
@@ -1651,12 +1650,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1651
1650
  all_frames_angles_homog = make_homogeneous(all_frames_angles)
1652
1651
 
1653
1652
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1653
+ all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count-frame_range[0]), name='time')
1654
1654
  if load_trc_px:
1655
- all_frames_time = time_col
1656
1655
  selected_persons = [0]
1657
1656
  else:
1658
1657
  # Select persons
1659
- all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count-frame_range[0]), name='time')
1660
1658
  nb_detected_persons = all_frames_scores_homog.shape[1]
1661
1659
  if nb_persons_to_detect == 'all':
1662
1660
  nb_persons_to_detect = all_frames_scores_homog.shape[1]
@@ -1687,7 +1685,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1687
1685
  logging.info(f'Reordered persons: IDs of persons {selected_persons} become {list(range(len(selected_persons)))}.')
1688
1686
 
1689
1687
 
1690
- # ====================================================
1688
+ #%% ==================================================
1691
1689
  # Post-processing pose
1692
1690
  # ====================================================
1693
1691
  all_frames_X_processed, all_frames_X_flipped_processed, all_frames_Y_processed, all_frames_scores_processed, all_frames_angles_processed = all_frames_X_homog.copy(), all_frames_X_flipped_homog.copy(), all_frames_Y_homog.copy(), all_frames_scores_homog.copy(), all_frames_angles_homog.copy()
@@ -1706,8 +1704,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1706
1704
  if frame_count - frame_range[0] - pose_nan_count <= 10:
1707
1705
  all_frames_X_processed[:,idx_person,:], all_frames_X_flipped_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = np.nan, np.nan, np.nan
1708
1706
  columns=np.array([[c]*3 for c in all_frames_X_person.columns]).flatten()
1709
- trc_data_i = pd.DataFrame(0, index=all_frames_X_person.index, columns=['t']+list(columns))
1710
- trc_data_i['t'] = all_frames_time
1707
+ trc_data_i = pd.DataFrame(0, index=all_frames_X_person.index, columns=['time']+list(columns))
1708
+ trc_data_i['time'] = all_frames_time
1711
1709
  trc_data.append(trc_data_i)
1712
1710
  trc_data_unfiltered_i = trc_data_i.copy()
1713
1711
  trc_data_unfiltered.append(trc_data_unfiltered_i)
@@ -1777,7 +1775,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1777
1775
  all_frames_X_processed[:,idx_person,:], all_frames_X_flipped_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_X_flipped_person, all_frames_Y_person_filt
1778
1776
 
1779
1777
 
1780
- # Convert px to meters
1778
+ #%% Convert px to meters
1781
1779
  trc_data_m = []
1782
1780
  if to_meters and save_pose:
1783
1781
  logging.info('\nConverting pose to meters:')
@@ -1796,17 +1794,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1796
1794
  toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
1797
1795
  if floor_angle == 'auto' or xy_origin == 'auto':
1798
1796
  # estimated from the line formed by the toes when they are on the ground (where speed = 0)
1799
- # try:
1797
+ try:
1800
1798
  if all(key in trc_data[0] for key in ['LBigToe', 'RBigToe']):
1801
1799
  floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1802
1800
  else:
1803
1801
  floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
1804
1802
  xy_origin_estim[0] = xy_origin_estim[0]-0.13
1805
1803
  logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
1806
- # except:
1807
- # floor_angle_estim = 0
1808
- # xy_origin_estim = cam_width/2, cam_height/2
1809
- # logging.warning(f'Could not estimate the floor angle and xy_origin from person {0}. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}] px.')
1804
+ except:
1805
+ floor_angle_estim = 0
1806
+ xy_origin_estim = cam_width/2, cam_height/2
1807
+ logging.warning(f'Could not estimate the floor angle and xy_origin from person {0}. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}] px.')
1810
1808
  if not floor_angle == 'auto':
1811
1809
  floor_angle_estim = floor_angle
1812
1810
  if xy_origin == 'auto':
@@ -1845,9 +1843,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1845
1843
 
1846
1844
  # Convert to meters
1847
1845
  px_to_m_i = [convert_px_to_meters(trc_data[i][kpt_name], first_person_height, height_px, cx, cy, -floor_angle_estim, visible_side=visible_side_i) for kpt_name in new_keypoints_names]
1848
- trc_data_m_i = pd.concat([all_frames_time.rename('t')]+px_to_m_i, axis=1)
1846
+ trc_data_m_i = pd.concat([all_frames_time.rename('time')]+px_to_m_i, axis=1)
1847
+ trc_data_m_i = trc_data_m_i.ffill(axis=0).bfill(axis=0)
1849
1848
  px_to_m_unfiltered_i = [convert_px_to_meters(trc_data_unfiltered[i][kpt_name], first_person_height, height_px, cx, cy, -floor_angle_estim) for kpt_name in new_keypoints_names]
1850
- trc_data_unfiltered_m_i = pd.concat([all_frames_time.rename('t')]+px_to_m_unfiltered_i, axis=1)
1849
+ trc_data_unfiltered_m_i = pd.concat([all_frames_time.rename('time')]+px_to_m_unfiltered_i, axis=1)
1851
1850
 
1852
1851
  if to_meters and show_plots:
1853
1852
  pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i)
@@ -1904,7 +1903,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1904
1903
 
1905
1904
 
1906
1905
 
1907
- # ====================================================
1906
+ #%% ==================================================
1908
1907
  # Post-processing angles
1909
1908
  # ====================================================
1910
1909
  if save_angles and calculate_angles:
@@ -1984,11 +1983,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1984
1983
 
1985
1984
  # Plotting angles before and after interpolation and filtering
1986
1985
  if show_plots:
1987
- all_frames_angles_person.insert(0, 't', all_frames_time)
1986
+ all_frames_angles_person.insert(0, 'time', all_frames_time)
1988
1987
  angle_plots(all_frames_angles_person, angle_data, i) # i = current person
1989
1988
 
1990
1989
 
1991
- # ====================================================
1990
+ #%% ==================================================
1992
1991
  # Save images/video with processed pose and angles
1993
1992
  # ====================================================
1994
1993
  if save_vid or save_img:
@@ -2043,7 +2042,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2043
2042
  logging.info(f"Processed images saved to {img_output_dir.resolve()}.")
2044
2043
 
2045
2044
 
2046
- # ====================================================
2045
+ #%% ==================================================
2047
2046
  # OpenSim inverse kinematics (and optional marker augmentation)
2048
2047
  # ====================================================
2049
2048
  if do_ik or use_augmentation:
@@ -2064,6 +2063,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2064
2063
 
2065
2064
  heights_m, masses = [], []
2066
2065
  for i in range(len(trc_data_m)):
2066
+ trc_data_m_i = trc_data_m[i]
2067
2067
  if do_ik and not use_augmentation:
2068
2068
  logging.info(f'- Person {i}: Running scaling and inverse kinematics without marker augmentation. Set use_augmentation to True if you need it.')
2069
2069
  elif not do_ik and use_augmentation:
@@ -2098,6 +2098,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2098
2098
  Pose2Sim_config_dict['project']['participant_mass'] = masses
2099
2099
  Pose2Sim_config_dict['pose']['pose_model'] = pose_model_name.upper()
2100
2100
  Pose2Sim_config_dict = to_dict(Pose2Sim_config_dict)
2101
+ print(Pose2Sim_config_dict)
2101
2102
 
2102
2103
  # Marker augmentation
2103
2104
  if use_augmentation:
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "sports2d"
7
- version = "0.8.0"
7
+ version = "0.8.1"
8
8
  authors = [
9
9
  {name = "David Pagnon", email = "contact@david-pagnon.com"},
10
10
  ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.8.0
3
+ Version: 0.8.1
4
4
  Summary: Compute 2D human pose and angles from a video or a webcam.
5
5
  Author-email: David Pagnon <contact@david-pagnon.com>
6
6
  License: BSD 3-Clause License
@@ -105,7 +105,7 @@ Dynamic: license-file
105
105
  > - Better visualization output
106
106
  > - More flexible, easier to run
107
107
  >
108
- > Run `pip install sports2d -U` to get the latest version.
108
+ > Run `pip install sports2d pose2sim -U` to get the latest version.
109
109
 
110
110
  ***N.B.:*** As always, I am more than happy to welcome contributions (see [How to contribute](#how-to-contribute-and-to-do-list))!
111
111
  <!--User-friendly Colab version released! (and latest issues fixed, too)\
@@ -208,7 +208,7 @@ pip install .
208
208
 
209
209
  - **Install Sports2D with Pose2Sim**:
210
210
  ``` cmd
211
- pip install sports2d pose2sim
211
+ pip install sports2d
212
212
  ```
213
213
 
214
214
 
File without changes
File without changes
File without changes
File without changes