sports2d 0.6.3__py3-none-any.whl → 0.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,10 +18,10 @@ video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp
18
18
  px_to_m_from_person_id = 2 # Person to use for pixels to meters conversion (not used if a calibration file is provided)
19
19
  px_to_m_person_height = 1.65 # Height of the reference person in meters (for pixels -> meters conversion).
20
20
  visible_side = ['front', 'none', 'auto'] # Choose visible side among ['right', 'left', 'front', 'back', 'auto', 'none']. String or list of strings.
21
- # if 'auto', will be either 'left', 'right', or 'front' depending on the direction of the motion
22
- # if 'none', no processing will be performed on the corresponding person
21
+ # if 'auto', will be either 'left', 'right', or 'front' depending on the direction of the motion
22
+ # if 'none', coordinates will be left in 2D rather than 3D
23
23
  load_trc_px = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
24
- compare = false # Not implemented yet
24
+ compare = false # Not implemented yet
25
25
 
26
26
  # Video parameters
27
27
  time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
@@ -53,7 +53,16 @@ result_dir = '' # If empty, project dir is current dir
53
53
  slowmo_factor = 1 # 1 for normal speed. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8
54
54
 
55
55
  # Pose detection parameters
56
- pose_model = 'Body_with_feet' #With RTMLib: Body_with_feet (default HALPE_26 model), Whole_body (COCO_133: body + feet + hands), Body (COCO_17), CUSTOM (see example at the end of the file), or any from skeletons.py
56
+ pose_model = 'Body_with_feet' #With RTMLib:
57
+ # - Body_with_feet (default HALPE_26 model),
58
+ # - Whole_body_wrist (COCO_133_WRIST: body + feet + 2 hand_points),
59
+ # - Whole_body (COCO_133: body + feet + hands),
60
+ # - Body (COCO_17). Marker augmentation won't work, Kinematic analysis will work,
61
+ # - Hand (HAND_21, only lightweight mode. Potentially better results with Whole_body),
62
+ # - Face (FACE_106),
63
+ # - Animal (ANIMAL2D_17)
64
+ # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
65
+ # /!\ For Face and Animal, use mode="""{dictionary}""", and find the corresponding .onnx model there https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose
57
66
  mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
58
67
 
59
68
  # A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
@@ -81,7 +90,7 @@ det_frequency = 4 # Run person detection only every N frames, and inbetwee
81
90
  device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
82
91
  backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
83
92
  tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
84
- deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
93
+ # deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
85
94
  # More robust in crowded scenes but tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
86
95
  # Requires `pip install torch torchvision torchreid gdown tensorboard`
87
96
 
@@ -105,11 +114,6 @@ xy_origin = ['auto'] # ['auto'] or [px_x,px_y]. N.B.: px_y points downwards.
105
114
  # If conversion from a calibration file
106
115
  calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
107
116
 
108
- fastest_frames_to_remove_percent = 0.1 # Frames with high speed are considered as outliers
109
- close_to_zero_speed_px = 50 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
110
- large_hip_knee_angles = 45 # Hip and knee angles below this value are considered as imprecise
111
- trimmed_extrema_percent = 0.5 # Proportion of the most extreme segment values to remove before calculating their mean)
112
-
113
117
 
114
118
  [angles]
115
119
  display_angle_values_on = ['body', 'list'] # 'body', 'list', ['body', 'list'], 'none'. Display angle values on the body, as a list in the upper left of the image, both, or do not display them.
@@ -147,19 +151,21 @@ filter_type = 'butterworth' # butterworth, gaussian, LOESS, median
147
151
 
148
152
 
149
153
  [kinematics]
150
- do_ik = false # Do scaling and inverse kinematics?
154
+ do_ik = true # Do scaling and inverse kinematics?
151
155
  use_augmentation = true # true or false (lowercase) # Set to true if you want to use the model with augmented markers
152
156
  use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
153
-
154
- osim_setup_path = '../OpenSim_setup' # Path to the OpenSim setup folder
157
+ participant_mass = [67.0, 55.0] # kg # defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)
155
158
  right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
156
- # default_height = 1.7 # meters # If automatic height calculation did not work, this value is used to scale the model
157
- remove_individual_scaling_setup = true # true or false (lowercase) # If true, the individual scaling setup files are removed to avoid cluttering
158
- remove_individual_ik_setup = true # true or false (lowercase) # If true, the individual IK setup files are removed to avoid cluttering
159
+
160
+ # Choosing best frames to scale the model
161
+ default_height = 1.7 # meters # If automatic height calculation did not work, this value is used to scale the model
159
162
  fastest_frames_to_remove_percent = 0.1 # Frames with high speed are considered as outliers
160
- close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
163
+ close_to_zero_speed_px = 50 # Sum for all keypoints: about 50 px/frame
164
+ close_to_zero_speed_m = 0.2 # Sum for all keypoints: 0.2 m/frame
161
165
  large_hip_knee_angles = 45 # Hip and knee angles below this value are considered as imprecise
162
166
  trimmed_extrema_percent = 0.5 # Proportion of the most extreme segment values to remove before calculating their mean)
167
+ remove_individual_scaling_setup = true # true or false (lowercase) # If true, the individual scaling setup files are removed to avoid cluttering
168
+ remove_individual_ik_setup = true # true or false (lowercase) # If true, the individual IK setup files are removed to avoid cluttering
163
169
 
164
170
 
165
171
  [logging]
Sports2D/Sports2D.py CHANGED
@@ -124,7 +124,7 @@ from Sports2D import Sports2D
124
124
  DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
125
125
  'px_to_m_from_person_id': 2,
126
126
  'px_to_m_person_height': 1.65,
127
- 'visible_side': ['front', 'auto'],
127
+ 'visible_side': ['front', 'none', 'auto'],
128
128
  'load_trc_px': '',
129
129
  'compare': False,
130
130
  'time_range': [],
@@ -159,11 +159,7 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
159
159
  'calib_file': '',
160
160
  'floor_angle': 'auto',
161
161
  'xy_origin': ['auto'],
162
- 'save_calib': True,
163
- 'fastest_frames_to_remove_percent': 0.1,
164
- 'close_to_zero_speed_px': 50,
165
- 'large_hip_knee_angles': 45,
166
- 'trimmed_extrema_percent': 0.5
162
+ 'save_calib': True
167
163
  },
168
164
  'angles': {'display_angle_values_on': ['body', 'list'],
169
165
  'fontSize': 0.3,
@@ -209,12 +205,14 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
209
205
  },
210
206
  'kinematics':{'do_ik': False,
211
207
  'use_augmentation': False,
212
- 'use_contacts_muscles': False,
208
+ 'use_contacts_muscles': True,
209
+ 'participant_mass': [67.0, 55.0],
213
210
  'right_left_symmetry': True,
214
211
  'default_height': 1.70,
215
212
  'remove_individual_scaling_setup': True,
216
213
  'remove_individual_ik_setup': True,
217
214
  'fastest_frames_to_remove_percent': 0.1,
215
+ 'close_to_zero_speed_px': 50,
218
216
  'close_to_zero_speed_m': 0.2,
219
217
  'large_hip_knee_angles': 45,
220
218
  'trimmed_extrema_percent': 0.5,
@@ -226,7 +224,7 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
226
224
  CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
227
225
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
228
226
  'px_to_m_person_height': ["H", "height of the person in meters. 1.70 if not specified"],
229
- 'visible_side': ["", "front, back, left, right, auto, or none. 'front auto' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
227
+ 'visible_side': ["", "front, back, left, right, auto, or none. 'front none auto' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
230
228
  'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
231
229
  'compare': ["", "visually compare motion with trc file. false if not specified"],
232
230
  'webcam_id': ["w", "webcam ID. 0 if not specified"],
@@ -260,6 +258,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
260
258
  'do_ik': ["", "do inverse kinematics. false if not specified"],
261
259
  'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
262
260
  'use_contacts_muscles': ["", "Use model with contact spheres and muscles. false if not specified"],
261
+ 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
263
262
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
264
263
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
265
264
  'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
@@ -270,10 +269,6 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
270
269
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
271
270
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
272
271
  'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
273
- 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
274
- 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
275
- 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
276
- 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
277
272
  'fontSize': ["", "font size for angle values. 0.3 if not specified"],
278
273
  'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
279
274
  'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
@@ -293,7 +288,8 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
293
288
  'remove_individual_scaling_setup': ["", "remove individual scaling setup files generated during scaling. true if not specified"],
294
289
  'remove_individual_ik_setup': ["", "remove individual IK setup files generated during IK. true if not specified"],
295
290
  'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
296
- 'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
291
+ 'close_to_zero_speed_m': ["","Sum for all keypoints: about 0.2 m/frame. Defaults to 0.2"],
292
+ 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame. Defaults to 50"],
297
293
  'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise and ignored. Defaults to 45"],
298
294
  'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
299
295
  'use_custom_logging': ["", "use custom logging. false if not specified"]
@@ -22,6 +22,7 @@ import subprocess
22
22
  from pathlib import Path
23
23
  import itertools as it
24
24
  import logging
25
+ from collections import defaultdict
25
26
  from anytree import PreOrderIter
26
27
 
27
28
  import numpy as np
@@ -32,9 +33,9 @@ import cv2
32
33
  import c3d
33
34
 
34
35
  import matplotlib.pyplot as plt
35
- from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QVBoxLayout
36
36
  from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
37
37
  from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
38
+ from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QVBoxLayout
38
39
 
39
40
 
40
41
  ## AUTHORSHIP INFORMATION
@@ -64,7 +65,7 @@ angle_dict = { # lowercase!
64
65
  'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
65
66
  'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
66
67
  'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
67
- 'left wrist': [['LElbow', 'LIndex', 'LWrist'], 'flexion', -180, 1],
68
+ 'left wrist': [['LElbow', 'LWrist', 'LIndex'], 'flexion', -180, 1],
68
69
 
69
70
  # segment angles
70
71
  'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
@@ -97,18 +98,18 @@ marker_Z_positions = {'right':
97
98
  "LHip": 0.105, "LKnee": 0.0886, "LAnkle": 0.0972, "LBigToe":0.0766, "LHeel":0.0883, "LSmallToe": 0.1200,
98
99
  "LShoulder": 0.2016, "LElbow": 0.1613, "LWrist": 0.120, "LThumb": 0.1625, "LIndex": 0.1735, "LPinky": 0.1740, "LEye": 0.0311,
99
100
  "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
100
- 'front':
101
- {"RHip": 0.0301, "RKnee": 0.0179, "RAnkle": 0.0230, "RBigToe": 0.2179, "RHeel": -0.0119, "RSmallToe": 0.1804,
102
- "RShoulder": -0.01275, "RElbow": 0.0119, "RWrist": 0.0002, "RThumb": 0.0106, "RIndex": -0.0004, "RPinky": -0.0009, "REye": 0.0702,
103
- "LHip": -0.0301, "LKnee": -0.0179, "LAnkle": 0.0230, "LBigToe": 0.2179, "LHeel": -0.0119, "LSmallToe": 0.1804,
104
- "LShoulder": 0.01275, "LElbow": -0.0119, "LWrist": -0.0002, "LThumb": -0.0106, "LIndex": 0.0004, "LPinky": 0.0009, "LEye": -0.0702,
105
- "Hip": 0.0301, "Neck": -0.0008, "Head": 0.0655, "Nose": 0.1076},
101
+ 'front': # original knee:0.0179
102
+ {"RHip": 0.0301, "RKnee": 0.129, "RAnkle": 0.0230, "RBigToe": 0.2179, "RHeel": -0.0119, "RSmallToe": 0.1804,
103
+ "RShoulder": -0.01275, "RElbow": 0.0702, "RWrist": 0.1076, "RThumb": 0.0106, "RIndex": -0.0004, "RPinky": -0.0009, "REye": 0.0702,
104
+ "LHip": 0.0301, "LKnee": 0.129, "LAnkle": 0.0230, "LBigToe": 0.2179, "LHeel": -0.0119, "LSmallToe": 0.1804,
105
+ "LShoulder": -0.01275, "LElbow": 0.0702, "LWrist": 0.1076, "LThumb": 0.0106, "LIndex": -0.0004, "LPinky": -0.0009, "LEye": 0.0702,
106
+ "Hip": 0.0301, "Neck": 0.0008, "Head": 0.0655, "Nose": 0.1076},
106
107
  'back':
107
- {"RHip": -0.0301, "RKnee": -0.0179, "RAnkle": -0.0230, "RBigToe": -0.2179, "RHeel": 0.0119, "RSmallToe": -0.1804,
108
- "RShoulder": 0.01275, "RElbow": -0.0119, "RWrist": -0.0002, "RThumb": -0.0106, "RIndex": 0.0004, "RPinky": 0.0009, "REye": -0.0702,
109
- "LHip": 0.0301, "LKnee": 0.0179, "LAnkle": -0.0230, "LBigToe": -0.2179, "LHeel": 0.0119, "LSmallToe": -0.1804,
110
- "LShoulder": -0.01275, "LElbow": 0.0119, "LWrist": 0.0002, "LThumb": 0.0106, "LIndex": -0.0004, "LPinky": -0.0009, "LEye": 0.0702,
111
- "Hip": 0.0301, "Neck": -0.0008, "Head": -0.0655, "Nose": 0.1076},
108
+ {"RHip": -0.0301, "RKnee": -0.129, "RAnkle": -0.0230, "RBigToe": -0.2179, "RHeel": 0.0119, "RSmallToe": -0.1804,
109
+ "RShoulder": 0.01275, "RElbow": 0.0702, "RWrist": -1076.0002, "RThumb": -0.0106, "RIndex": 0.0004, "RPinky": 0.0009, "REye": -0.0702,
110
+ "LHip": -0.0301, "LKnee": -0.129, "LAnkle": -0.0230, "LBigToe": -0.2179, "LHeel": 0.0119, "LSmallToe": -0.1804,
111
+ "LShoulder": 0.01275, "LElbow": 0.0702, "LWrist": -0.1076, "LThumb": -0.0106, "LIndex": 0.0004, "LPinky": 0.0009, "LEye": -0.0702,
112
+ "Hip": -0.0301, "Neck": -0.0008, "Head": -0.0655, "Nose": -0.1076},
112
113
  }
113
114
 
114
115
  colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
@@ -170,6 +171,15 @@ class plotWindow():
170
171
  self.app.exec_()
171
172
 
172
173
  ## FUNCTIONS
174
+ def to_dict(d):
175
+ '''
176
+ Convert a defaultdict to a dict.
177
+ '''
178
+ if isinstance(d, defaultdict):
179
+ return {k: to_dict(v) for k, v in d.items()}
180
+ return d
181
+
182
+
173
183
  def read_trc(trc_path):
174
184
  '''
175
185
  Read a TRC file and extract its contents.
@@ -575,7 +585,7 @@ def add_neck_hip_coords(kpt_name, p_X, p_Y, p_scores, kpt_ids, kpt_names):
575
585
  return p_X, p_Y, p_scores
576
586
 
577
587
 
578
- def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
588
+ def best_coords_for_measurements(Q_coords, keypoints_names, beginning_frames_to_remove_percent=0.2, end_frames_to_remove_percent=0.2, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
579
589
  '''
580
590
  Compute the best coordinates for measurements, after removing:
581
591
  - 20% fastest frames (may be outliers)
@@ -585,6 +595,8 @@ def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_re
585
595
  INPUTS:
586
596
  - Q_coords: pd.DataFrame. The XYZ coordinates of each marker
587
597
  - keypoints_names: list. The list of marker names
598
+ - beginning_frames_to_remove_percent: float
599
+ - end_frames_to_remove_percent: float
588
600
  - fastest_frames_to_remove_percent: float
589
601
  - close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
590
602
  - large_hip_knee_angles: int
@@ -607,6 +619,9 @@ def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_re
607
619
  Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_Hip), axis=1)
608
620
  n_markers = len(keypoints_names)
609
621
 
622
+ # Removing first and last frames
623
+ # Q_coords = Q_coords.iloc[int(len(Q_coords) * beginning_frames_to_remove_percent):int(len(Q_coords) * (1-end_frames_to_remove_percent))]
624
+
610
625
  # Using 80% slowest frames
611
626
  sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
612
627
  sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
@@ -643,7 +643,6 @@ FACE_106 = Node("root", id=None, children=[
643
643
  ]),
644
644
  ])
645
645
 
646
-
647
646
  '''ANIMAL2D_17 (full-body animal)
648
647
  https://github.com/AlexTheBad/AP-10K/'''
649
648
  ANIMAL2D_17 = Node("Hip", id=4, children=[
@@ -58,13 +58,23 @@ def test_workflow():
58
58
  demo_cmd = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False"]
59
59
  subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
60
60
 
61
- # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, time range and slowmo factor
62
- demo_cmd2 = ["sports2d", "--to_meters", "False", "--multiperson", "False", "--mode", "lightweight", "--det_frequency", "50", "--time_range", "1.2", "2.7", "--slowmo_factor", "4", "--show_realtime_results", "False", "--show_graphs", "False"]
61
+ # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
62
+ demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
63
+ "--to_meters", "False",
64
+ "--multiperson", "False",
65
+ "--mode", "lightweight", "--det_frequency", "50",
66
+ "--slowmo_factor", "4",
67
+ "--filter_type", "gaussian",
68
+ "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
63
69
  subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
64
70
 
65
- # With inverse kinematics, body pose_model and custom RTMO mode
66
- # demo_cmd3 = ["sports2d", "--do_ik", "--person_orientation", "front none left", "--pose_model", "body", "--mode", "{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}", "--show_realtime_results", "False", "--show_graphs", "False"]
67
- # subprocess.run(demo_cmd3, check=True, capture_output=True, text=True)
71
+ # With a time range, inverse kinematics, marker augmentation, body pose_model and custom RTMO mode
72
+ demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
73
+ "--time_range", "1.2", "2.7",
74
+ "--do_ik", "True", "--use_augmentation", "True",
75
+ "--px_to_m_from_person_id", "1", "--px_to_m_person_height", "1.65",
76
+ "--visible_side", "front", "auto", "--participant_mass", "55.0", "67.0"]
77
+ subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
68
78
 
69
79
  # From config file
70
80
  cli_config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
@@ -72,6 +82,5 @@ def test_workflow():
72
82
  cli_video_dir = Path(__file__).resolve().parent.parent / 'Demo'
73
83
  config_dict.get("project").update({"video_dir": str(cli_video_dir)})
74
84
  with open(cli_config_path, 'w') as f: toml.dump(config_dict, f)
75
-
76
85
  demo_cmd4 = ["sports2d", "--config", str(cli_config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
77
86
  subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')
Sports2D/process.py CHANGED
@@ -56,10 +56,13 @@ import sys
56
56
  import logging
57
57
  import json
58
58
  import ast
59
+ import shutil
60
+ import os
59
61
  from functools import partial
60
62
  from datetime import datetime
61
63
  import itertools as it
62
64
  from tqdm import tqdm
65
+ from collections import defaultdict
63
66
  from anytree import RenderTree
64
67
 
65
68
  import numpy as np
@@ -69,11 +72,14 @@ import matplotlib as mpl
69
72
  import matplotlib.pyplot as plt
70
73
  from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
71
74
  from deep_sort_realtime.deepsort_tracker import DeepSort
75
+ import opensim as osim
72
76
 
73
77
  from Sports2D.Utilities import filter
74
78
  from Sports2D.Utilities.common import *
75
79
  from Sports2D.Utilities.skeletons import *
76
80
 
81
+ DEFAULT_MASS = 70
82
+ DEFAULT_HEIGHT = 1.7
77
83
 
78
84
  ## AUTHORSHIP INFORMATION
79
85
  __author__ = "David Pagnon, HunMin Kim"
@@ -592,8 +598,8 @@ def load_pose_file(Q_coords):
592
598
  - scores_all: np.array. The scores in the format (Nframes, 1, Nmarkers)
593
599
  '''
594
600
 
595
- Z_cols = [3*i+2 for i in range(len(Q_coords.columns)//3)]
596
- Q_coords_xy = Q_coords.drop(Q_coords.columns[Z_cols], axis=1)
601
+ Z_cols = np.array([[3*i,3*i+1] for i in range(len(Q_coords.columns)//3)]).ravel()
602
+ Q_coords_xy = Q_coords.iloc[:,Z_cols]
597
603
  kpt_number = len(Q_coords_xy.columns)//2
598
604
 
599
605
  # shape (Nframes, 2*Nmarkers) --> (Nframes, 1, Nmarkers, 2)
@@ -619,7 +625,7 @@ def trc_data_from_XYZtime(X, Y, Z, time):
619
625
  '''
620
626
 
621
627
  trc_data = pd.concat([pd.concat([X.iloc[:,kpt], Y.iloc[:,kpt], Z.iloc[:,kpt]], axis=1) for kpt in range(len(X.columns))], axis=1)
622
- trc_data.insert(0, 't', time)
628
+ trc_data.insert(0, 'time', time)
623
629
 
624
630
  return trc_data
625
631
 
@@ -922,6 +928,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
922
928
  px_to_m_from_person_id = int(config_dict.get('project').get('px_to_m_from_person_id'))
923
929
  px_to_m_person_height_m = config_dict.get('project').get('px_to_m_person_height')
924
930
  visible_side = config_dict.get('project').get('visible_side')
931
+ if isinstance(visible_side, str): visible_side = [visible_side]
925
932
  # Pose from file
926
933
  load_trc_px = config_dict.get('project').get('load_trc_px')
927
934
  if load_trc_px == '': load_trc_px = None
@@ -973,11 +980,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
973
980
  xy_origin = config_dict.get('px_to_meters_conversion').get('xy_origin') # ['auto'] or [x, y]
974
981
  xy_origin = [float(o) for o in xy_origin] if xy_origin != ['auto'] else 'auto'
975
982
 
976
- fastest_frames_to_remove_percent = config_dict.get('px_to_meters_conversion').get('fastest_frames_to_remove_percent')
977
- large_hip_knee_angles = config_dict.get('px_to_meters_conversion').get('large_hip_knee_angles')
978
- trimmed_extrema_percent = config_dict.get('px_to_meters_conversion').get('trimmed_extrema_percent')
979
- close_to_zero_speed_px = config_dict.get('px_to_meters_conversion').get('close_to_zero_speed_px')
980
-
981
983
  keypoint_likelihood_threshold = config_dict.get('pose').get('keypoint_likelihood_threshold')
982
984
  average_likelihood_threshold = config_dict.get('pose').get('average_likelihood_threshold')
983
985
  keypoint_number_threshold = config_dict.get('pose').get('keypoint_number_threshold')
@@ -1010,23 +1012,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1010
1012
  butterworth_filter_order, butterworth_filter_cutoff, frame_rate,
1011
1013
  gaussian_filter_kernel, loess_filter_kernel, median_filter_kernel]
1012
1014
 
1013
- # Inverse kinematics settings
1014
- do_ik = config_dict.get('kinematics').get('do_ik')
1015
- use_augmentation = config_dict.get('kinematics').get('use_augmentation')
1016
- use_contacts_muscles = config_dict.get('kinematics').get('use_contacts_muscles')
1017
-
1018
- osim_setup_path = config_dict.get('kinematics').get('osim_setup_path')
1019
- right_left_symmetry = config_dict.get('kinematics').get('right_left_symmetry')
1020
- default_height = config_dict.get('kinematics').get('default_height')
1021
- remove_scaling_setup = config_dict.get('kinematics').get('remove_individual_scaling_setup')
1022
- remove_ik_setup = config_dict.get('kinematics').get('remove_individual_ik_setup')
1023
- fastest_frames_to_remove_percent = config_dict.get('kinematics').get('fastest_frames_to_remove_percent')
1024
- large_hip_knee_angles = config_dict.get('kinematics').get('large_hip_knee_angles')
1025
- trimmed_extrema_percent = config_dict.get('kinematics').get('trimmed_extrema_percent')
1026
- close_to_zero_speed = config_dict.get('kinematics').get('close_to_zero_speed_m')
1027
-
1028
- if do_ik: from Pose2Sim import Pose2Sim
1029
-
1030
1015
  # Create output directories
1031
1016
  if video_file == "webcam":
1032
1017
  current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
@@ -1045,6 +1030,32 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1045
1030
  if save_img:
1046
1031
  img_output_dir.mkdir(parents=True, exist_ok=True)
1047
1032
 
1033
+ # Inverse kinematics settings
1034
+ do_ik = config_dict.get('kinematics').get('do_ik')
1035
+ use_augmentation = config_dict.get('kinematics').get('use_augmentation')
1036
+ participant_masses = config_dict.get('kinematics').get('participant_mass')
1037
+ participant_masses = participant_masses if isinstance(participant_masses, list) else [participant_masses]
1038
+ fastest_frames_to_remove_percent = config_dict.get('kinematics').get('fastest_frames_to_remove_percent')
1039
+ large_hip_knee_angles = config_dict.get('kinematics').get('large_hip_knee_angles')
1040
+ trimmed_extrema_percent = config_dict.get('kinematics').get('trimmed_extrema_percent')
1041
+ close_to_zero_speed_px = config_dict.get('kinematics').get('close_to_zero_speed_px')
1042
+ close_to_zero_speed_m = config_dict.get('kinematics').get('close_to_zero_speed_m')
1043
+ if do_ik:
1044
+ from Pose2Sim.markerAugmentation import augment_markers_all
1045
+ from Pose2Sim.kinematics import kinematics_all
1046
+ # Create a Pose2Sim dictionary and fill in missing keys
1047
+ recursivedict = lambda: defaultdict(recursivedict)
1048
+ Pose2Sim_config_dict = recursivedict()
1049
+ # Fill Pose2Sim dictionary (height and mass will be filled later)
1050
+ Pose2Sim_config_dict['project']['project_dir'] = str(output_dir)
1051
+ Pose2Sim_config_dict['markerAugmentation']['make_c3d'] = make_c3d
1052
+ Pose2Sim_config_dict['kinematics'] = config_dict.get('kinematics')
1053
+ # Temporarily recreate Pose2Sim file hierarchy
1054
+ pose3d_dir = Path(output_dir) / 'pose-3d'
1055
+ pose3d_dir.mkdir(parents=True, exist_ok=True)
1056
+ kinematics_dir = Path(output_dir) / 'kinematics'
1057
+ kinematics_dir.mkdir(parents=True, exist_ok=True)
1058
+
1048
1059
 
1049
1060
  # Set up video capture
1050
1061
  if video_file == "webcam":
@@ -1067,7 +1078,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1067
1078
  model_name = 'HALPE_26'
1068
1079
  ModelClass = BodyWithFeet # 26 keypoints(halpe26)
1069
1080
  logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
1070
- elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY', 'WHOLE_BODY_WRIST'):
1081
+ elif pose_model.upper() == 'WHOLE_BODY_WRIST':
1082
+ model_name = 'COCO_133_WRIST'
1083
+ ModelClass = Wholebody
1084
+ logging.info(f"Using COCO_133 model (body, feet, 2 hand points) for pose estimation.")
1085
+ elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY'):
1071
1086
  model_name = 'COCO_133'
1072
1087
  ModelClass = Wholebody
1073
1088
  logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
@@ -1113,12 +1128,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1113
1128
  logging.error(f'\n{load_trc_px} file needs to be in px, not in meters.')
1114
1129
  logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc_px}.')
1115
1130
  # Load pose file in px
1116
- Q_coords, _, _, keypoints_names, _ = read_trc(load_trc_px)
1131
+ Q_coords, _, time_col, keypoints_names, _ = read_trc(load_trc_px)
1132
+
1117
1133
  keypoints_ids = [i for i in range(len(keypoints_names))]
1118
1134
  keypoints_all, scores_all = load_pose_file(Q_coords)
1119
- for pre, _, node in RenderTree(model_name):
1135
+ for pre, _, node in RenderTree(pose_model):
1120
1136
  if node.name in keypoints_names:
1121
1137
  node.id = keypoints_names.index(node.name)
1138
+ if time_range:
1139
+ frame_range = [abs(time_col - time_range[0]).idxmin(), abs(time_col - time_range[1]).idxmin()+1]
1140
+ else:
1141
+ frame_range = [0, len(Q_coords)]
1142
+ frame_iterator = tqdm(range(*frame_range))
1122
1143
 
1123
1144
  else:
1124
1145
  # Retrieve keypoint names from model
@@ -1241,7 +1262,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1241
1262
  person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
1242
1263
  else:
1243
1264
  person_X_flipped = person_X.copy()
1244
-
1265
+
1245
1266
  # Compute angles
1246
1267
  person_angles = []
1247
1268
  # Add Neck and Hip if not provided
@@ -1322,7 +1343,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1322
1343
  all_frames_scores = make_homogeneous(all_frames_scores)
1323
1344
 
1324
1345
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1325
- all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count+1), name='time')
1346
+ if not load_trc_px:
1347
+ all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count-frame_range[0]+1), name='time')
1348
+ else:
1349
+ all_frames_time = time_col
1326
1350
  if not multiperson:
1327
1351
  px_to_m_from_person_id = get_personID_with_highest_scores(all_frames_scores)
1328
1352
  detected_persons = [px_to_m_from_person_id]
@@ -1343,7 +1367,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1343
1367
 
1344
1368
  # Delete person if less than 4 valid frames
1345
1369
  pose_nan_count = len(np.where(all_frames_X_person.sum(axis=1)==0)[0])
1346
- if frame_count - pose_nan_count <= 4:
1370
+ if frame_count - frame_range[0] - pose_nan_count <= 4:
1347
1371
  trc_data_i = pd.DataFrame(0, index=all_frames_X_person.index, columns=np.array([[c]*3 for c in all_frames_X_person.columns]).flatten())
1348
1372
  trc_data_i.insert(0, 't', all_frames_time)
1349
1373
  trc_data.append(trc_data_i)
@@ -1411,6 +1435,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1411
1435
 
1412
1436
 
1413
1437
  # Convert px to meters
1438
+ trc_data_m = []
1414
1439
  if to_meters:
1415
1440
  logging.info('\nConverting pose to meters:')
1416
1441
  if px_to_m_from_person_id>=len(trc_data):
@@ -1444,7 +1469,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1444
1469
  except:
1445
1470
  floor_angle_estim = 0
1446
1471
  xy_origin_estim = cam_width/2, cam_height/2
1447
- logging.warning(f'Could not estimate the floor angle and xy_origin for person {px_to_m_from_person_id}. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}].')
1472
+ logging.warning(f'Could not estimate the floor angle and xy_origin for person {px_to_m_from_person_id}. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}] px.')
1448
1473
  if not floor_angle == 'auto':
1449
1474
  floor_angle_estim = floor_angle
1450
1475
  if xy_origin == 'auto':
@@ -1453,15 +1478,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1453
1478
  cx, cy = xy_origin
1454
1479
  logging.info(f'Using height of person #{px_to_m_from_person_id} ({px_to_m_person_height_m}m) to convert coordinates in meters. '
1455
1480
  f'Floor angle: {np.degrees(floor_angle_estim) if not floor_angle=="auto" else f"auto (estimation: {round(np.degrees(floor_angle_estim),2)}°)"}, '
1456
- f'xy_origin: {xy_origin if not xy_origin=="auto" else f"auto (estimation: {[round(c) for c in xy_origin_estim]})"}.')
1481
+ f'xy_origin: {xy_origin if not xy_origin=="auto" else f"auto (estimation: {[round(c) for c in xy_origin_estim]})"} px.')
1457
1482
 
1458
1483
  # Coordinates in m
1459
1484
  for i in range(len(trc_data)):
1460
- # print(i)
1461
1485
  if not np.array(trc_data[i].iloc[:,1:] ==0).all():
1462
1486
  # Automatically determine visible side
1463
1487
  visible_side_i = visible_side[i] if len(visible_side)>i else 'auto' # set to 'auto' if list too short
1464
-
1465
1488
  # Set to 'front' if slope of X values between [-5,5]
1466
1489
  if visible_side_i == 'auto':
1467
1490
  try:
@@ -1473,14 +1496,15 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1473
1496
  visible_side_i = 'right' if gait_direction > 0.6 \
1474
1497
  else 'left' if gait_direction < -0.6 \
1475
1498
  else 'front'
1499
+ logging.info(f'- Person {i}: Seen from the {visible_side_i}.')
1476
1500
  except:
1477
1501
  visible_side_i = 'none'
1478
- logging.warning(f'Could not automatically find gait direction for person {i}. Please set visible_side to "front", "back", "left", or "right" for this person. Setting to "none".')
1479
-
1502
+ logging.warning(f'- Person {i}: Could not automatically find gait direction. Please set visible_side to "front", "back", "left", or "right" for this person. Setting to "none".')
1480
1503
  # skip if none
1481
- if visible_side_i == 'none':
1482
- logging.info(f'Skipping because "visible_side" is set to none for person {i}.')
1483
- continue
1504
+ elif visible_side_i == 'none':
1505
+ logging.info(f'- Person {i}: Keeping output in 2D because "visible_side" is set to "none" for person {i}.')
1506
+ else:
1507
+ logging.info(f'- Person {i}: Seen from the {visible_side_i}.')
1484
1508
 
1485
1509
  # Convert to meters
1486
1510
  trc_data_m_i = pd.concat([convert_px_to_meters(trc_data[i][kpt_name], px_to_m_person_height_m, height_px, cx, cy, -floor_angle_estim, visible_side=visible_side_i) for kpt_name in keypoints_names], axis=1)
@@ -1492,12 +1516,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1492
1516
  pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i)
1493
1517
 
1494
1518
  # Write to trc file
1519
+ trc_data_m.append(trc_data_m_i)
1495
1520
  idx_path = selected_person_id if not multiperson and not calib_file else i
1496
1521
  pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
1497
1522
  make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
1498
1523
  if make_c3d:
1499
1524
  c3d_path = convert_to_c3d(pose_path_person_m_i)
1500
- logging.info(f'Person {idx_path}: Pose in meters saved to {pose_path_person_m_i.resolve()}. {"Also saved in c3d format." if make_c3d else ""}')
1525
+ logging.info(f'Pose in meters saved to {pose_path_person_m_i.resolve()}. {"Also saved in c3d format." if make_c3d else ""}')
1501
1526
 
1502
1527
 
1503
1528
 
@@ -1505,9 +1530,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1505
1530
 
1506
1531
 
1507
1532
 
1508
-
1509
-
1510
-
1511
1533
  # # plt.plot(trc_data_m.iloc[:,0], trc_data_m.iloc[:,1])
1512
1534
  # # plt.ylim([0,2])
1513
1535
  # # plt.show()
@@ -1542,7 +1564,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1542
1564
 
1543
1565
 
1544
1566
 
1545
-
1567
+
1546
1568
 
1547
1569
 
1548
1570
 
@@ -1566,7 +1588,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1566
1588
 
1567
1589
  # Delete person if less than 4 valid frames
1568
1590
  angle_nan_count = len(np.where(all_frames_angles_person.sum(axis=1)==0)[0])
1569
- if frame_count - angle_nan_count <= 4:
1591
+ if frame_count - frame_range[0] - angle_nan_count <= 4:
1570
1592
  logging.info(f'- Person {i}: Less than 4 valid frames. Deleting person.')
1571
1593
 
1572
1594
  else:
@@ -1628,16 +1650,83 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1628
1650
  angle_plots(all_frames_angles_person, angle_data, i) # i = current person
1629
1651
 
1630
1652
 
1631
- # # Run scaling and inverse kinematics
1632
- # if save_angles and calculate_angles and do_ik:
1633
- # logging.info('\nPost-processing angles (with inverse kinematics):')
1634
- # if not to_meters:
1635
- # logging.error('IK requires positions in meters rather than in pixels. Set to_meters to True.')
1636
- # raise ValueError('IK requires positions in meters rather than in pixels. Set to_meters to True.')
1637
-
1638
-
1639
- # marker_Z_positions
1640
- # if 'none': No IK possible.
1641
- # visible_side=='auto'
1642
-
1643
- # convert_to_c3d(trc_path)
1653
+ # OpenSim inverse kinematics (and optional marker augmentation)
1654
+ if do_ik or use_augmentation:
1655
+ logging.info('\nPost-processing angles (with inverse kinematics):')
1656
+ if not to_meters:
1657
+ logging.warning('Skipping marker augmentation and inverse kinematics as to_meters was set to False.')
1658
+ else:
1659
+ # move all trc files containing _m_ string to pose3d_dir
1660
+ if not load_trc_px:
1661
+ trc_list = output_dir.glob('*_m_*.trc')
1662
+ else:
1663
+ trc_list = [pose_path_person_m_i]
1664
+ for trc_file in trc_list:
1665
+ if (pose3d_dir/trc_file.name).exists():
1666
+ os.remove(pose3d_dir/trc_file.name)
1667
+ shutil.move(trc_file, pose3d_dir)
1668
+
1669
+ heights_m, masses = [], []
1670
+ for i in range(len(trc_data_m)):
1671
+ if do_ik and not use_augmentation:
1672
+ logging.info(f'- Person {i}: Running scaling and inverse kinematics without marker augmentation. Set use_augmentation to True if you need it.')
1673
+ elif not do_ik and use_augmentation:
1674
+ logging.info(f'- Person {i}: Running marker augmentation without inverse kinematics. Set do_ik to True if you need it.')
1675
+ else:
1676
+ logging.info(f'- Person {i}: Running marker augmentation and inverse kinematics.')
1677
+
1678
+ # Delete person if less than 4 valid frames
1679
+ pose_path_person = pose_output_path.parent / (pose_output_path.stem + f'_person{i:02d}.trc')
1680
+ all_frames_X_person = pd.DataFrame(all_frames_X_homog[:,i,:], columns=keypoints_names)
1681
+ pose_nan_count = len(np.where(all_frames_X_person.sum(axis=1)==0)[0])
1682
+ if frame_count - frame_range[0] - pose_nan_count <= 4:
1683
+ # heights_m.append(DEFAULT_HEIGHT)
1684
+ # masses.append(DEFAULT_MASS)
1685
+ logging.info(f'Less than 4 valid frames. Deleting person.')
1686
+ else:
1687
+ if visible_side[i] == 'none':
1688
+ logging.info(f'Skipping marker augmentation and inverse kinematics because visible_side is "none".')
1689
+ # heights_m.append(DEFAULT_HEIGHT)
1690
+ # masses.append(DEFAULT_MASS)
1691
+ else:
1692
+ # Provide missing data to Pose2Sim_config_dict
1693
+ height_m_i = compute_height(trc_data_m_i.iloc[:,1:], keypoints_names,
1694
+ fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_m, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
1695
+ mass_i = participant_masses[i] if len(participant_masses)>i else 70
1696
+ if len(participant_masses)<=i:
1697
+ logging.warning(f'No mass provided. Using 70 kg as default.')
1698
+ heights_m.append(height_m_i)
1699
+ masses.append(mass_i)
1700
+
1701
+ Pose2Sim_config_dict['project']['participant_height'] = heights_m
1702
+ Pose2Sim_config_dict['project']['participant_mass'] = masses
1703
+ Pose2Sim_config_dict['pose']['pose_model'] = pose_model_name.upper()
1704
+ Pose2Sim_config_dict = to_dict(Pose2Sim_config_dict)
1705
+
1706
+ # Marker augmentation
1707
+ if use_augmentation:
1708
+ logging.info('Running marker augmentation...')
1709
+ augment_markers_all(Pose2Sim_config_dict)
1710
+ logging.info(f'Augmented trc results saved to {pose3d_dir.resolve()}.\n')
1711
+
1712
+ if do_ik:
1713
+ if not save_angles or not calculate_angles:
1714
+ logging.warning(f'Skipping inverse kinematics because save_angles or calculate_angles is set to False.')
1715
+ else:
1716
+ logging.info('Running inverse kinematics...')
1717
+ kinematics_all(Pose2Sim_config_dict)
1718
+ for mot_file in kinematics_dir.glob('*.mot'):
1719
+ if (mot_file.parent/(mot_file.stem+'_ik.mot')).exists():
1720
+ os.remove(mot_file.parent/(mot_file.stem+'_ik.mot'))
1721
+ os.rename(mot_file, mot_file.parent/(mot_file.stem+'_ik.mot'))
1722
+ logging.info(f'.osim model and .mot motion file results saved to {kinematics_dir.resolve()}.\n')
1723
+
1724
+ # Move all files in pose-3d and kinematics to the output_dir
1725
+ osim.Logger.removeFileSink()
1726
+ for directory in [pose3d_dir, kinematics_dir]:
1727
+ for file in directory.glob('*'):
1728
+ if (output_dir/file.name).exists():
1729
+ os.remove(output_dir/file.name)
1730
+ shutil.move(file, output_dir)
1731
+ pose3d_dir.rmdir()
1732
+ kinematics_dir.rmdir()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: sports2d
3
- Version: 0.6.3
3
+ Version: 0.7.2
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -39,6 +39,7 @@ Requires-Dist: openvino
39
39
  Requires-Dist: tqdm
40
40
  Requires-Dist: imageio_ffmpeg
41
41
  Requires-Dist: deep-sort-realtime
42
+ Requires-Dist: Pose2Sim
42
43
 
43
44
 
44
45
  [![Continuous integration](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml/badge.svg?branch=main)](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml)
@@ -64,14 +65,13 @@ Requires-Dist: deep-sort-realtime
64
65
 
65
66
  > **`Announcement:`\
66
67
  > Complete rewriting of the code!** Run `pip install sports2d -U` to get the latest version.
68
+ > - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
69
+ > - Any detector and pose estimation model can be used. **New in v0.6!**
70
+ > - Results in meters rather than pixels. **New in v0.5!**
67
71
  > - Faster, more accurate
68
72
  > - Works from a webcam
69
- > - Results in meters rather than pixels. **New in v0.5!**
70
73
  > - Better visualization output
71
74
  > - More flexible, easier to run
72
- > - Batch process multiple videos at once
73
- >
74
- > Note: Colab version broken for now. I'll fix it in the next few weeks.
75
75
 
76
76
  ***N.B.:*** As always, I am more than happy to welcome contributions (see [How to contribute](#how-to-contribute-and-to-do-list))!
77
77
  <!--User-friendly Colab version released! (and latest issues fixed, too)\
@@ -220,10 +220,7 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
220
220
 
221
221
  The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)**
222
222
 
223
- <!-- IMAGE ICI
224
- -->
225
-
226
-
223
+ <img src="Content/sports2d_blender.gif" width="760">
227
224
 
228
225
  <br>
229
226
 
@@ -238,10 +235,9 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
238
235
  - **File -> Open Model:** Open your scaled model (e.g., `Model_Pose2Sim_LSTM.osim`).
239
236
  - **File -> Load Motion:** Open your motion file (e.g., `angles.mot`).
240
237
 
241
- <br>
238
+ <img src="Content/sports2d_opensim.gif" width="760">
242
239
 
243
- <!-- IMAGE ICI
244
- -->
240
+ <br>
245
241
 
246
242
 
247
243
 
@@ -273,21 +269,26 @@ sports2d --time_range 1.2 2.7
273
269
 
274
270
 
275
271
  #### Get coordinates in meters:
272
+ > **N.B.:** Depth is estimated from a neutral pose.
276
273
 
277
274
  <!-- You either need to provide a calibration file, or simply the height of a person (Note that the latter will not take distortions into account, and that it will be less accurate for motion in the frontal plane).\-->
278
275
  You may need to convert pixel coordinates to meters.\
279
- Just provide the height of the reference person (and their ID in case of multiple person detection).\
280
- The floor angle and the origin of the xy axis are computed automatically from gait. If you analyze another type of motion, you can manually specify them.\
281
- Note that it does not take distortions into account, and that it will be less accurate for motions in the frontal plane.
276
+ Just provide the height of the reference person (and their ID in case of multiple person detection).
282
277
 
283
- ``` cmd
278
+ You can also specify whether the visible side of the person is left, right, front, or back. Set it to 'auto' if you do not want to find it automatically (only works for motion in the sagittal plane), or to 'none' if you want to keep 2D instead of 3D coordinates (if the person goes right, and then left for example).
279
+
280
+ The floor angle and the origin of the xy axis are computed automatically from gait. If you analyze another type of motion, you can manually specify them. Note that `y` points down.\
281
+ Also note that distortions are not taken into account, and that results will be less accurate for motions in the frontal plane.
282
+
283
+ <!-- ``` cmd
284
284
  sports2d --to_meters True --calib_file calib_demo.toml
285
- ```
285
+ ``` -->
286
286
  ``` cmd
287
287
  sports2d --to_meters True --px_to_m_person_height 1.65 --px_to_m_from_person_id 2
288
288
  ```
289
289
  ``` cmd
290
- sports2d --to_meters True --px_to_m_person_height 1.65 --px_to_m_from_person_id 2 --floor_angle 0 --xy_origin 0 940
290
+ sports2d --to_meters True --px_to_m_person_height 1.65 --px_to_m_from_person_id 2 `
291
+ --visible_side front none auto --floor_angle 0 --xy_origin 0 940
291
292
  ```
292
293
 
293
294
  <br>
@@ -296,24 +297,28 @@ sports2d --to_meters True --px_to_m_person_height 1.65 --px_to_m_from_person_id
296
297
  #### Run inverse kinematics:
297
298
  > N.B.: [Full install](#full-install) required.
298
299
 
299
- > N.B.: The person needs to be moving on a single plane for the whole selected time range.
300
-
301
- Analyzed persons can be showing their left, right, front, or back side. If you want to ignore a certain person, set `--visible_side none`.
302
-
303
-
304
-
305
-
306
- Why IK?
307
- Add section in how it works
300
+ > **N.B.:** The person needs to be moving on a single plane for the whole selected time range.
308
301
 
302
+ OpenSim inverse kinematics allows you to set joint constraints, joint angle limits, to constrain the bones to keep the same length all along the motion and potentially to have equal sizes on left and right side. Most generally, it gives more biomechanically accurate results. It can also give you the opportunity to compute joint torques, muscle forces, ground reaction forces, and more, [with MoCo](https://opensim-org.github.io/opensim-moco-site/) for example.
309
303
 
304
+ This is done via [Pose2Sim](https://github.com/perfanalytics/pose2sim).\
305
+ Model scaling is done according to the mean of the segment lengths, across a subset of frames. We remove the 10% fastest frames (potential outliers), the frames where the speed is 0 (person probably out of frame), the frames where the average knee and hip flexion angles are above 45° (pose estimation is not precise when the person is crouching) and the 20% most extreme segment values after the previous operations (potential outliers). All these parameters can be edited in your Config.toml file.
310
306
 
311
307
  ```cmd
312
- sports2d --time_range 1.2 2.7 --do_ik true --visible_side front left
308
+ sports2d --time_range 1.2 2.7 `
309
+ --do_ik true `
310
+ --px_to_m_from_person_id 1 --px_to_m_person_height 1.65 `
311
+ --visible_side front auto
313
312
  ```
314
313
 
314
+ You can optionally use the LSTM marker augmentation to improve the quality of the output motion.\
315
+ You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis).
316
+
315
317
  ```cmd
316
- sports2d --time_range 1.2 2.7 --do_ik true --visible_side front left --use_augmentation True
318
+ sports2d --time_range 1.2 2.7 `
319
+ --do_ik true --use_augmentation True `
320
+ --px_to_m_from_person_id 1 --px_to_m_person_height 1.65 `
321
+ --visible_side front left --participant_mass 67.0 55.0
317
322
  ```
318
323
 
319
324
  <br>
@@ -379,7 +384,9 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
379
384
  ```
380
385
  - Choose whether you want video, images, trc pose file, angle mot file, real-time display, and plots:
381
386
  ```cmd
382
- sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false --show_graphs false
387
+ sports2d --save_vid false --save_img true `
388
+ --save_pose false --save_angles true `
389
+ --show_realtime_results false --show_graphs false
383
390
  ```
384
391
  - Save results to a custom directory, specify the slow-motion factor:
385
392
  ``` cmd
@@ -396,12 +403,12 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
396
403
  ```
397
404
  - Use any custom (deployed) MMPose model
398
405
  ``` cmd
399
- sports2d --pose_model BodyWithFeet :
400
- --mode """{'det_class':'YOLOX',
401
- 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
402
- 'det_input_size':[640, 640],
403
- 'pose_class':'RTMPose',
404
- 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
406
+ sports2d --pose_model BodyWithFeet : `
407
+ --mode """{'det_class':'YOLOX', `
408
+ 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip', `
409
+ 'det_input_size':[640, 640], `
410
+ 'pose_class':'RTMPose', `
411
+ 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip', `
405
412
  'pose_input_size':[192,256]}"""
406
413
  ```
407
414
 
@@ -418,9 +425,10 @@ sports2d --help
418
425
 
419
426
  ```
420
427
  'config': ["C", "path to a toml configuration file"],
428
+
421
429
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
422
430
  'px_to_m_person_height': ["H", "height of the person in meters. 1.70 if not specified"],
423
- 'visible_side': ["", "front, back, left, right, auto, or none. 'front auto' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
431
+ 'visible_side': ["", "front, back, left, right, auto, or none. 'front none auto' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
424
432
  'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
425
433
  'compare': ["", "visually compare motion with trc file. false if not specified"],
426
434
  'webcam_id': ["w", "webcam ID. 0 if not specified"],
@@ -454,6 +462,7 @@ sports2d --help
454
462
  'do_ik': ["", "do inverse kinematics. false if not specified"],
455
463
  'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
456
464
  'use_contacts_muscles': ["", "Use model with contact spheres and muscles. false if not specified"],
465
+ 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
457
466
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
458
467
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
459
468
  'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
@@ -586,7 +595,7 @@ Sports2D:
586
595
 
587
596
  4. **Chooses the right persons to keep.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, only retrieves the keypoints with high enough confidence, and only keeps the persons with high enough average confidence over each frame.
588
597
 
589
- 4. **Converts the pixel coordinates to meters.** The user can provide a calibration file, or simply the size of a specified person. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified.
598
+ 4. **Converts the pixel coordinates to meters.** The user can provide a calibration file, or simply the size of a specified person. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
590
599
 
591
600
  5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
592
601
 
@@ -0,0 +1,16 @@
1
+ Sports2D/Sports2D.py,sha256=eWOz-7HQiwRu7Xl0_bPTlg9meOW635x197WL9QrKfoU,29719
2
+ Sports2D/__init__.py,sha256=TyCP7Uuuy6CNklhPf8W84MbYoO1_-1dxowSYAJyk_OI,102
3
+ Sports2D/process.py,sha256=4Ce22jOTutlGZoPEz5vMp2g4RQUGQSByKQj_QV3UHzo,87916
4
+ Sports2D/Demo/Config_demo.toml,sha256=S7cBtdob9zxA6deicPY1ZEQicTYeaByet5gSvRmkG00,13854
5
+ Sports2D/Demo/demo.mp4,sha256=2aZkFxhWR7ESMEtXCT8MGA83p2jmoU2sp1ylQfO3gDk,3968304
6
+ Sports2D/Utilities/__init__.py,sha256=TyCP7Uuuy6CNklhPf8W84MbYoO1_-1dxowSYAJyk_OI,102
7
+ Sports2D/Utilities/common.py,sha256=OKyjBuXoZK0O34vuGeXzVrWpsyx6DI219L-yuS-iQTU,48254
8
+ Sports2D/Utilities/filter.py,sha256=8mVefMjDzxmh9a30eNtIrUuK_mUKoOJ2Nr-OzcQKkKM,4922
9
+ Sports2D/Utilities/skeletons.py,sha256=WObRPHpCj5Q2WpspzFRy1gvAX-EZD9WyA9K-kqL4YRo,40076
10
+ Sports2D/Utilities/tests.py,sha256=mzs69p5ZIGiOX6co2qwQmO09LhJLex3yujcUWC6p4Bw,3573
11
+ sports2d-0.7.2.dist-info/LICENSE,sha256=f4qe3nE0Y7ltJho5w-xAR0jI5PUox5Xl-MsYiY7ZRM8,1521
12
+ sports2d-0.7.2.dist-info/METADATA,sha256=YoQQ1p6p0fOEk12nVhaVvnkjo0dleyG46L-vsLTpxJw,38518
13
+ sports2d-0.7.2.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
14
+ sports2d-0.7.2.dist-info/entry_points.txt,sha256=h2CJTuydtNf8JyaLoWxWl5HTSIxx5Ra_FSiSGQsf7Sk,52
15
+ sports2d-0.7.2.dist-info/top_level.txt,sha256=DoURf9UDB8lQ_9lMUPQMQqhXCvWPFFjJco9NzPlHJ6I,9
16
+ sports2d-0.7.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,16 +0,0 @@
1
- Sports2D/Sports2D.py,sha256=VWlQiv2tE926mEp2hdDp86G4iymykBNKCNZLJ0jdGPQ,30065
2
- Sports2D/__init__.py,sha256=TyCP7Uuuy6CNklhPf8W84MbYoO1_-1dxowSYAJyk_OI,102
3
- Sports2D/process.py,sha256=WvCkYl4Bz3NbTzNPns7k0TCB-QqpNnnDxVhMDaLZs-I,82432
4
- Sports2D/Demo/Config_demo.toml,sha256=v03yDYkSUqfqPo6jgvMgFNLKrfxSZqmnyu9W7eEAq-Q,13301
5
- Sports2D/Demo/demo.mp4,sha256=2aZkFxhWR7ESMEtXCT8MGA83p2jmoU2sp1ylQfO3gDk,3968304
6
- Sports2D/Utilities/__init__.py,sha256=TyCP7Uuuy6CNklhPf8W84MbYoO1_-1dxowSYAJyk_OI,102
7
- Sports2D/Utilities/common.py,sha256=UxL5ztrEkvTXlRyRFzi7FlG7IeUMH9EpKvEd0Gzg2d8,47681
8
- Sports2D/Utilities/filter.py,sha256=8mVefMjDzxmh9a30eNtIrUuK_mUKoOJ2Nr-OzcQKkKM,4922
9
- Sports2D/Utilities/skeletons.py,sha256=ljCaXrkbI2m_xp_hXxcRBzhL6rPgcmib9xixWuIt624,40077
10
- Sports2D/Utilities/tests.py,sha256=yG8CNPB7AJPhVSuiCCeKtQsgdys9GHntUlzF4ECq838,3158
11
- sports2d-0.6.3.dist-info/LICENSE,sha256=f4qe3nE0Y7ltJho5w-xAR0jI5PUox5Xl-MsYiY7ZRM8,1521
12
- sports2d-0.6.3.dist-info/METADATA,sha256=9P1TAELNlLldoOA-sWUf3Q8xS42RXGQwkDhCNVldw_s,36269
13
- sports2d-0.6.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
14
- sports2d-0.6.3.dist-info/entry_points.txt,sha256=h2CJTuydtNf8JyaLoWxWl5HTSIxx5Ra_FSiSGQsf7Sk,52
15
- sports2d-0.6.3.dist-info/top_level.txt,sha256=DoURf9UDB8lQ_9lMUPQMQqhXCvWPFFjJco9NzPlHJ6I,9
16
- sports2d-0.6.3.dist-info/RECORD,,