sports2d 0.5.6__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Sports2D/Demo/Config_demo.toml +110 -5
- Sports2D/Sports2D.py +18 -9
- Sports2D/Utilities/common.py +291 -0
- Sports2D/Utilities/skeletons.py +654 -143
- Sports2D/Utilities/tests.py +12 -5
- Sports2D/process.py +205 -295
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/METADATA +100 -9
- sports2d-0.6.1.dist-info/RECORD +16 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/WHEEL +1 -1
- sports2d-0.5.6.dist-info/RECORD +0 -16
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/LICENSE +0 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/entry_points.txt +0 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/top_level.txt +0 -0
Sports2D/Demo/Config_demo.toml
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
|
|
14
14
|
[project]
|
|
15
15
|
video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
|
|
16
|
-
#
|
|
16
|
+
# On Windows, replace '\' with '/'
|
|
17
17
|
# Beware that images won't be saved if paths contain non ASCII characters.
|
|
18
18
|
person_height = 1.70 # Height of the person in meters (for pixels -> meters conversion)
|
|
19
19
|
load_trc = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
|
|
@@ -21,6 +21,7 @@ compare = false # Not implemented yet
|
|
|
21
21
|
|
|
22
22
|
# Video parameters
|
|
23
23
|
time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
|
|
24
|
+
# Time ranges can be different for each video.
|
|
24
25
|
video_dir = '' # If empty, video dir is current dir
|
|
25
26
|
|
|
26
27
|
# Webcam parameters
|
|
@@ -48,12 +49,32 @@ result_dir = '' # If empty, project dir is current dir
|
|
|
48
49
|
slowmo_factor = 1 # 1 for normal speed. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8
|
|
49
50
|
|
|
50
51
|
# Pose detection parameters
|
|
51
|
-
pose_model = '
|
|
52
|
-
mode = 'balanced'
|
|
52
|
+
pose_model = 'Body_with_feet' #With RTMLib: Body_with_feet (default HALPE_26 model), Whole_body (COCO_133: body + feet + hands), Body (COCO_17), CUSTOM (see example at the end of the file), or any from skeletons.py
|
|
53
|
+
mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
|
|
54
|
+
|
|
55
|
+
# A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
|
|
56
|
+
# Models can be local paths or URLs.
|
|
57
|
+
# Make sure the input_sizes are within triple quotes, and that they are in the opposite order from the one in the model path (for example, it would be [192,256] for rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip).
|
|
58
|
+
# If your pose_model is not provided in skeletons.py, you may have to create your own one (see example at the end of the file).
|
|
59
|
+
# Example, equivalent to mode='balanced':
|
|
60
|
+
# mode = """{'det_class':'YOLOX',
|
|
61
|
+
# 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
|
|
62
|
+
# 'det_input_size':[640, 640],
|
|
63
|
+
# 'pose_class':'RTMPose',
|
|
64
|
+
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
|
|
65
|
+
# 'pose_input_size':[192,256]}"""
|
|
66
|
+
# Example with one-stage RTMO model (Requires pose_model = 'Body'):
|
|
67
|
+
# mode = """{'pose_class':'RTMO',
|
|
68
|
+
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
|
|
69
|
+
# 'pose_input_size':[640, 640]}"""
|
|
70
|
+
|
|
53
71
|
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
|
54
72
|
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
|
73
|
+
device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
|
|
74
|
+
backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
|
|
55
75
|
tracking_mode = 'sports2d' # 'rtmlib' or 'sports2d'. 'sports2d' is generally much more accurate and comparable in speed
|
|
56
76
|
|
|
77
|
+
|
|
57
78
|
# Processing parameters
|
|
58
79
|
keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
|
|
59
80
|
average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
|
|
@@ -83,13 +104,14 @@ fontSize = 0.3
|
|
|
83
104
|
|
|
84
105
|
# Select joint angles among
|
|
85
106
|
# ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
|
|
86
|
-
joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow']
|
|
107
|
+
joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
|
|
87
108
|
# Select segment angles among
|
|
88
109
|
# ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
|
|
89
110
|
segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
|
|
90
111
|
|
|
91
112
|
# Processing parameters
|
|
92
113
|
flip_left_right = true # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participent switches their stance.
|
|
114
|
+
correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead
|
|
93
115
|
|
|
94
116
|
|
|
95
117
|
[post-processing]
|
|
@@ -121,5 +143,88 @@ person_orientation = ['front', 'none', 'left'] # Choose among 'auto', 'none', 'f
|
|
|
121
143
|
osim_setup_path = '../OpenSim_setup' # Path to the OpenSim setup folder
|
|
122
144
|
close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
|
|
123
145
|
|
|
146
|
+
|
|
124
147
|
[logging]
|
|
125
|
-
use_custom_logging = false # if integrated in an API that already has logging
|
|
148
|
+
use_custom_logging = false # if integrated in an API that already has logging
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
# CUSTOM skeleton
|
|
153
|
+
# If you use a model with different keypoints and/or different ordering
|
|
154
|
+
# Useful if you trained your own model, from DeepLabCut or MMPose for example.
|
|
155
|
+
# Make sure the ids are set in the right order and start from zero.
|
|
156
|
+
#
|
|
157
|
+
# If you want to perform inverse kinematics, you will also need to create an OpenSim model
|
|
158
|
+
# and add to its markerset the location where you expect the triangulated keypoints to be detected.
|
|
159
|
+
#
|
|
160
|
+
# In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
|
|
161
|
+
# You can create as many custom skeletons as you want, just add them further down and rename them.
|
|
162
|
+
#
|
|
163
|
+
# Check your model hierarchy with: for pre, _, node in RenderTree(model):
|
|
164
|
+
# print(f'{pre}{node.name} id={node.id}')
|
|
165
|
+
[pose.CUSTOM]
|
|
166
|
+
name = "Hip"
|
|
167
|
+
id = 19
|
|
168
|
+
[[pose.CUSTOM.children]]
|
|
169
|
+
name = "RHip"
|
|
170
|
+
id = 12
|
|
171
|
+
[[pose.CUSTOM.children.children]]
|
|
172
|
+
name = "RKnee"
|
|
173
|
+
id = 14
|
|
174
|
+
[[pose.CUSTOM.children.children.children]]
|
|
175
|
+
name = "RAnkle"
|
|
176
|
+
id = 16
|
|
177
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
178
|
+
name = "RBigToe"
|
|
179
|
+
id = 21
|
|
180
|
+
[[pose.CUSTOM.children.children.children.children.children]]
|
|
181
|
+
name = "RSmallToe"
|
|
182
|
+
id = 23
|
|
183
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
184
|
+
name = "RHeel"
|
|
185
|
+
id = 25
|
|
186
|
+
[[pose.CUSTOM.children]]
|
|
187
|
+
name = "LHip"
|
|
188
|
+
id = 11
|
|
189
|
+
[[pose.CUSTOM.children.children]]
|
|
190
|
+
name = "LKnee"
|
|
191
|
+
id = 13
|
|
192
|
+
[[pose.CUSTOM.children.children.children]]
|
|
193
|
+
name = "LAnkle"
|
|
194
|
+
id = 15
|
|
195
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
196
|
+
name = "LBigToe"
|
|
197
|
+
id = 20
|
|
198
|
+
[[pose.CUSTOM.children.children.children.children.children]]
|
|
199
|
+
name = "LSmallToe"
|
|
200
|
+
id = 22
|
|
201
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
202
|
+
name = "LHeel"
|
|
203
|
+
id = 24
|
|
204
|
+
[[pose.CUSTOM.children]]
|
|
205
|
+
name = "Neck"
|
|
206
|
+
id = 18
|
|
207
|
+
[[pose.CUSTOM.children.children]]
|
|
208
|
+
name = "Head"
|
|
209
|
+
id = 17
|
|
210
|
+
[[pose.CUSTOM.children.children.children]]
|
|
211
|
+
name = "Nose"
|
|
212
|
+
id = 0
|
|
213
|
+
[[pose.CUSTOM.children.children]]
|
|
214
|
+
name = "RShoulder"
|
|
215
|
+
id = 6
|
|
216
|
+
[[pose.CUSTOM.children.children.children]]
|
|
217
|
+
name = "RElbow"
|
|
218
|
+
id = 8
|
|
219
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
220
|
+
name = "RWrist"
|
|
221
|
+
id = 10
|
|
222
|
+
[[pose.CUSTOM.children.children]]
|
|
223
|
+
name = "LShoulder"
|
|
224
|
+
id = 5
|
|
225
|
+
[[pose.CUSTOM.children.children.children]]
|
|
226
|
+
name = "LElbow"
|
|
227
|
+
id = 7
|
|
228
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
229
|
+
name = "LWrist"
|
|
230
|
+
id = 9
|
Sports2D/Sports2D.py
CHANGED
|
@@ -143,6 +143,8 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
|
|
|
143
143
|
'pose_model': 'body_with_feet',
|
|
144
144
|
'mode': 'balanced',
|
|
145
145
|
'det_frequency': 4,
|
|
146
|
+
'device': 'auto',
|
|
147
|
+
'backend': 'auto',
|
|
146
148
|
'tracking_mode': 'sports2d',
|
|
147
149
|
'keypoint_likelihood_threshold': 0.3,
|
|
148
150
|
'average_likelihood_threshold': 0.5,
|
|
@@ -171,7 +173,9 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
|
|
|
171
173
|
'Right shoulder',
|
|
172
174
|
'Left shoulder',
|
|
173
175
|
'Right elbow',
|
|
174
|
-
'Left elbow'
|
|
176
|
+
'Left elbow',
|
|
177
|
+
'Right wrist',
|
|
178
|
+
'Left wrist'],
|
|
175
179
|
'segment_angles': [ 'Right foot',
|
|
176
180
|
'Left foot',
|
|
177
181
|
'Right shank',
|
|
@@ -186,7 +190,8 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
|
|
|
186
190
|
'Left arm',
|
|
187
191
|
'Right forearm',
|
|
188
192
|
'Left forearm'],
|
|
189
|
-
'flip_left_right': True
|
|
193
|
+
'flip_left_right': True,
|
|
194
|
+
'correct_segment_angles_with_floor_angle': True
|
|
190
195
|
},
|
|
191
196
|
'post-processing': {'interpolate': True,
|
|
192
197
|
'interp_gap_smaller_than': 10,
|
|
@@ -228,9 +233,11 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
228
233
|
'save_angles': ["A", "save angles as mot files. true if not specified"],
|
|
229
234
|
'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
|
|
230
235
|
'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
|
|
231
|
-
'mode': ["m",
|
|
236
|
+
'mode': ["m", 'light, balanced, performance, or a """{dictionary within triple quote}""". balanced if not specified. Use a dictionary to specify your own detection and/or pose estimation models (more about in the documentation).'],
|
|
232
237
|
'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
|
|
233
238
|
Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
|
|
239
|
+
'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
|
|
240
|
+
'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
|
|
234
241
|
'to_meters': ["M", "convert pixels to meters. true if not specified"],
|
|
235
242
|
'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
|
|
236
243
|
'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
|
|
@@ -252,6 +259,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
252
259
|
'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
|
|
253
260
|
'fontSize': ["", "font size for angle values. 0.3 if not specified"],
|
|
254
261
|
'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
|
|
262
|
+
'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
|
|
255
263
|
'interpolate': ["", "interpolate missing data. true if not specified"],
|
|
256
264
|
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
257
265
|
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
@@ -320,7 +328,7 @@ def base_params(config_dict):
|
|
|
320
328
|
video = cv2.VideoCapture(str(video_dir / video_file)) if video_dir else cv2.VideoCapture(str(video_file))
|
|
321
329
|
if not video.isOpened():
|
|
322
330
|
raise FileNotFoundError(f'Error: Could not open {video_dir/video_file}. Check that the file exists.')
|
|
323
|
-
frame_rate = video.get(cv2.CAP_PROP_FPS)
|
|
331
|
+
frame_rate = round(video.get(cv2.CAP_PROP_FPS))
|
|
324
332
|
if frame_rate == 0:
|
|
325
333
|
frame_rate = 30
|
|
326
334
|
logging.warning(f'Error: Could not retrieve frame rate from {video_dir/video_file}. Defaulting to 30fps.')
|
|
@@ -435,7 +443,7 @@ def process(config='Config_demo.toml'):
|
|
|
435
443
|
|
|
436
444
|
process_fun(config_dict, video_file, time_range, frame_rate, result_dir)
|
|
437
445
|
|
|
438
|
-
elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
|
|
446
|
+
elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
|
|
439
447
|
logging.info(f'\nProcessing {video_file} took {elapsed_time:.2f} s.')
|
|
440
448
|
|
|
441
449
|
logging.shutdown()
|
|
@@ -501,10 +509,11 @@ def main():
|
|
|
501
509
|
# Override dictionary with command-line arguments if provided
|
|
502
510
|
leaf_keys = get_leaf_keys(new_config)
|
|
503
511
|
for leaf_key, default_value in leaf_keys.items():
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
512
|
+
if not 'CUSTOM' in leaf_key:
|
|
513
|
+
leaf_name = leaf_key.split('.')[-1]
|
|
514
|
+
cli_value = getattr(args, leaf_name)
|
|
515
|
+
if cli_value is not None:
|
|
516
|
+
set_nested_value(new_config, leaf_key, cli_value)
|
|
508
517
|
|
|
509
518
|
# Run process with the new configuration dictionary
|
|
510
519
|
Sports2D.process(new_config)
|
Sports2D/Utilities/common.py
CHANGED
|
@@ -23,6 +23,7 @@ from pathlib import Path
|
|
|
23
23
|
import logging
|
|
24
24
|
|
|
25
25
|
import numpy as np
|
|
26
|
+
import pandas as pd
|
|
26
27
|
from scipy import interpolate
|
|
27
28
|
import imageio_ffmpeg as ffmpeg
|
|
28
29
|
import cv2
|
|
@@ -43,6 +44,49 @@ __email__ = "contact@david-pagnon.com"
|
|
|
43
44
|
__status__ = "Development"
|
|
44
45
|
|
|
45
46
|
|
|
47
|
+
## CONSTANTS
|
|
48
|
+
angle_dict = { # lowercase!
|
|
49
|
+
# joint angles
|
|
50
|
+
'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
|
|
51
|
+
'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
|
|
52
|
+
'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
|
|
53
|
+
'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
|
|
54
|
+
'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
55
|
+
'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
56
|
+
# 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
|
|
57
|
+
# 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
|
|
58
|
+
'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
59
|
+
'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
60
|
+
'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
|
|
61
|
+
'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
|
|
62
|
+
'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
|
|
63
|
+
'left wrist': [['LElbow', 'LIndex', 'LWrist'], 'flexion', -180, 1],
|
|
64
|
+
|
|
65
|
+
# segment angles
|
|
66
|
+
'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
|
|
67
|
+
'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
|
|
68
|
+
'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
|
|
69
|
+
'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
|
|
70
|
+
'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
|
|
71
|
+
'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
|
|
72
|
+
'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
|
|
73
|
+
'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
|
|
74
|
+
'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
|
|
75
|
+
'head': [['Head', 'Neck'], 'horizontal', 0, -1],
|
|
76
|
+
'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
|
|
77
|
+
'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
|
|
78
|
+
'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
|
|
79
|
+
'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
|
|
80
|
+
'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
|
|
81
|
+
'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
|
|
85
|
+
(125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
|
|
86
|
+
(255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
|
|
87
|
+
(255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
|
|
88
|
+
thickness = 1
|
|
89
|
+
|
|
46
90
|
## CLASSES
|
|
47
91
|
class plotWindow():
|
|
48
92
|
'''
|
|
@@ -96,6 +140,35 @@ class plotWindow():
|
|
|
96
140
|
self.app.exec_()
|
|
97
141
|
|
|
98
142
|
## FUNCTIONS
|
|
143
|
+
def read_trc(trc_path):
|
|
144
|
+
'''
|
|
145
|
+
Read a TRC file and extract its contents.
|
|
146
|
+
|
|
147
|
+
INPUTS:
|
|
148
|
+
- trc_path (str): The path to the TRC file.
|
|
149
|
+
|
|
150
|
+
OUTPUTS:
|
|
151
|
+
- tuple: A tuple containing the Q coordinates, frames column, time column, marker names, and header.
|
|
152
|
+
'''
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
with open(trc_path, 'r') as trc_file:
|
|
156
|
+
header = [next(trc_file) for _ in range(5)]
|
|
157
|
+
markers = header[3].split('\t')[2::3]
|
|
158
|
+
markers = [m.strip() for m in markers if m.strip()] # remove last \n character
|
|
159
|
+
|
|
160
|
+
trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4, encoding='utf-8')
|
|
161
|
+
frames_col, time_col = trc_df.iloc[:, 0], trc_df.iloc[:, 1]
|
|
162
|
+
Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
|
|
163
|
+
Q_coords = Q_coords.loc[:, ~Q_coords.columns.str.startswith('Unnamed')] # remove unnamed columns
|
|
164
|
+
Q_coords.columns = np.array([[m,m,m] for m in markers]).ravel().tolist()
|
|
165
|
+
|
|
166
|
+
return Q_coords, frames_col, time_col, markers, header
|
|
167
|
+
|
|
168
|
+
except Exception as e:
|
|
169
|
+
raise ValueError(f"Error reading TRC file at {trc_path}: {e}")
|
|
170
|
+
|
|
171
|
+
|
|
99
172
|
def interpolate_zeros_nans(col, *args):
|
|
100
173
|
'''
|
|
101
174
|
Interpolate missing points (of value zero),
|
|
@@ -247,6 +320,10 @@ def points_to_angles(points_list):
|
|
|
247
320
|
If parameters are arrays, returns an array of floats between 0.0 and 360.0
|
|
248
321
|
|
|
249
322
|
INPUTS:
|
|
323
|
+
- points_list: list of arrays of points
|
|
324
|
+
|
|
325
|
+
OUTPUTS:
|
|
326
|
+
- ang_deg: float or array of floats. The angle(s) in degrees.
|
|
250
327
|
'''
|
|
251
328
|
|
|
252
329
|
if len(points_list) < 2: # if not enough points, return None
|
|
@@ -288,6 +365,220 @@ def points_to_angles(points_list):
|
|
|
288
365
|
return ang_deg
|
|
289
366
|
|
|
290
367
|
|
|
368
|
+
def fixed_angles(points_list, ang_name):
|
|
369
|
+
'''
|
|
370
|
+
Add offset and multiplying factor to angles
|
|
371
|
+
|
|
372
|
+
INPUTS:
|
|
373
|
+
- points_list: list of arrays of points
|
|
374
|
+
- ang_name: str. The name of the angle to consider.
|
|
375
|
+
|
|
376
|
+
OUTPUTS:
|
|
377
|
+
- ang: float. The angle in degrees.
|
|
378
|
+
'''
|
|
379
|
+
|
|
380
|
+
ang_params = angle_dict[ang_name]
|
|
381
|
+
ang = points_to_angles(points_list)
|
|
382
|
+
ang += ang_params[2]
|
|
383
|
+
ang *= ang_params[3]
|
|
384
|
+
if ang_name in ['pelvis', 'shoulders']:
|
|
385
|
+
ang = np.where(ang>90, ang-180, ang)
|
|
386
|
+
ang = np.where(ang<-90, ang+180, ang)
|
|
387
|
+
else:
|
|
388
|
+
ang = np.where(ang>180, ang-360, ang)
|
|
389
|
+
ang = np.where(ang<-180, ang+360, ang)
|
|
390
|
+
|
|
391
|
+
return ang
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def mean_angles(trc_data, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
|
|
395
|
+
'''
|
|
396
|
+
Compute the mean angle time series from 3D points for a given list of angles.
|
|
397
|
+
|
|
398
|
+
INPUTS:
|
|
399
|
+
- trc_data (DataFrame): The triangulated coordinates of the markers.
|
|
400
|
+
- ang_to_consider (list): The list of angles to consider (requires angle_dict).
|
|
401
|
+
|
|
402
|
+
OUTPUTS:
|
|
403
|
+
- ang_mean: The mean angle time series.
|
|
404
|
+
'''
|
|
405
|
+
|
|
406
|
+
ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
|
|
407
|
+
|
|
408
|
+
angs = []
|
|
409
|
+
for ang_name in ang_to_consider:
|
|
410
|
+
ang_params = angle_dict[ang_name]
|
|
411
|
+
ang_mk = ang_params[0]
|
|
412
|
+
if 'Neck' not in trc_data.columns:
|
|
413
|
+
df_MidShoulder = pd.DataFrame((trc_data['RShoulder'].values + trc_data['LShoulder'].values) /2)
|
|
414
|
+
df_MidShoulder.columns = ['Neck']*3
|
|
415
|
+
trc_data = pd.concat((trc_data.reset_index(drop=True), df_MidShoulder), axis=1)
|
|
416
|
+
|
|
417
|
+
pts_for_angles = []
|
|
418
|
+
for pt in ang_mk:
|
|
419
|
+
# pts_for_angles.append(trc_data.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
|
|
420
|
+
pts_for_angles.append(trc_data[pt])
|
|
421
|
+
|
|
422
|
+
ang = fixed_angles(pts_for_angles, ang_name)
|
|
423
|
+
ang = np.abs(ang)
|
|
424
|
+
angs.append(ang)
|
|
425
|
+
|
|
426
|
+
ang_mean = np.mean(angs, axis=0)
|
|
427
|
+
|
|
428
|
+
return ang_mean
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def add_neck_hip_coords(kpt_name, p_X, p_Y, p_scores, kpt_ids, kpt_names):
|
|
432
|
+
'''
|
|
433
|
+
Add neck (midshoulder) and hip (midhip) coordinates if neck and hip are not available
|
|
434
|
+
|
|
435
|
+
INPUTS:
|
|
436
|
+
- kpt_name: name of the keypoint to add (neck, hip)
|
|
437
|
+
- p_X: list of x coordinates after flipping if needed
|
|
438
|
+
- p_Y: list of y coordinates
|
|
439
|
+
- p_scores: list of confidence scores
|
|
440
|
+
- kpt_ids: list of keypoint ids (see skeletons.py)
|
|
441
|
+
- kpt_names: list of keypoint names (see skeletons.py)
|
|
442
|
+
|
|
443
|
+
OUTPUTS:
|
|
444
|
+
- p_X: list of x coordinates with added missing coordinate
|
|
445
|
+
- p_Y: list of y coordinates with added missing coordinate
|
|
446
|
+
- p_scores: list of confidence scores with added missing score
|
|
447
|
+
'''
|
|
448
|
+
|
|
449
|
+
names, ids = kpt_names.copy(), kpt_ids.copy()
|
|
450
|
+
names.append(kpt_name)
|
|
451
|
+
ids.append(len(p_X))
|
|
452
|
+
if kpt_name == 'Neck':
|
|
453
|
+
mid_X = (np.abs(p_X[ids[names.index('LShoulder')]]) + np.abs(p_X[ids[names.index('RShoulder')]])) /2
|
|
454
|
+
mid_Y = (p_Y[ids[names.index('LShoulder')]] + p_Y[ids[names.index('RShoulder')]])/2
|
|
455
|
+
mid_score = (p_scores[ids[names.index('LShoulder')]] + p_scores[ids[names.index('RShoulder')]])/2
|
|
456
|
+
elif kpt_name == 'Hip':
|
|
457
|
+
mid_X = (np.abs(p_X[ids[names.index('LHip')]]) + np.abs(p_X[ids[names.index('RHip')]]) ) /2
|
|
458
|
+
mid_Y = (p_Y[ids[names.index('LHip')]] + p_Y[ids[names.index('RHip')]])/2
|
|
459
|
+
mid_score = (p_scores[ids[names.index('LHip')]] + p_scores[ids[names.index('RHip')]])/2
|
|
460
|
+
else:
|
|
461
|
+
raise ValueError("kpt_name must be 'Neck' or 'Hip'")
|
|
462
|
+
p_X = np.append(p_X, mid_X)
|
|
463
|
+
p_Y = np.append(p_Y, mid_Y)
|
|
464
|
+
p_scores = np.append(p_scores, mid_score)
|
|
465
|
+
|
|
466
|
+
return p_X, p_Y, p_scores
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
def best_coords_for_measurements(trc_data, keypoints_names, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
|
|
470
|
+
'''
|
|
471
|
+
Compute the best coordinates for measurements, after removing:
|
|
472
|
+
- 20% fastest frames (may be outliers)
|
|
473
|
+
- frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
|
|
474
|
+
- frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
|
|
475
|
+
|
|
476
|
+
INPUTS:
|
|
477
|
+
- trc_data: pd.DataFrame. The XYZ coordinates of each marker
|
|
478
|
+
- keypoints_names: list. The list of marker names
|
|
479
|
+
- fastest_frames_to_remove_percent: float
|
|
480
|
+
- close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
|
|
481
|
+
- large_hip_knee_angles: int
|
|
482
|
+
- trimmed_extrema_percent
|
|
483
|
+
|
|
484
|
+
OUTPUT:
|
|
485
|
+
- trc_data_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
|
|
486
|
+
'''
|
|
487
|
+
|
|
488
|
+
# Add MidShoulder column
|
|
489
|
+
df_MidShoulder = pd.DataFrame((trc_data['RShoulder'].values + trc_data['LShoulder'].values) /2)
|
|
490
|
+
df_MidShoulder.columns = ['MidShoulder']*3
|
|
491
|
+
trc_data = pd.concat((trc_data.reset_index(drop=True), df_MidShoulder), axis=1)
|
|
492
|
+
|
|
493
|
+
# Add Hip column if not present
|
|
494
|
+
n_markers_init = len(keypoints_names)
|
|
495
|
+
if 'Hip' not in keypoints_names:
|
|
496
|
+
df_Hip = pd.DataFrame((trc_data['RHip'].values + trc_data['LHip'].values) /2)
|
|
497
|
+
df_Hip.columns = ['Hip']*3
|
|
498
|
+
trc_data = pd.concat((trc_data.reset_index(drop=True), df_Hip), axis=1)
|
|
499
|
+
n_markers = len(keypoints_names)
|
|
500
|
+
|
|
501
|
+
# Using 80% slowest frames
|
|
502
|
+
sum_speeds = pd.Series(np.nansum([np.linalg.norm(trc_data.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
|
|
503
|
+
sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
|
|
504
|
+
if len(sum_speeds)==0:
|
|
505
|
+
raise ValueError('All frames have speed close to zero. Make sure the person is moving and correctly detected, or change close_to_zero_speed to a lower value.')
|
|
506
|
+
min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
|
|
507
|
+
trc_data_low_speeds = trc_data.iloc[min_speed_indices].reset_index(drop=True)
|
|
508
|
+
|
|
509
|
+
# Only keep frames with hip and knee flexion angles below 45%
|
|
510
|
+
# (if more than 50 of them, else take 50 smallest values)
|
|
511
|
+
try:
|
|
512
|
+
ang_mean = mean_angles(trc_data_low_speeds, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
|
|
513
|
+
trc_data_low_speeds_low_angles = trc_data_low_speeds[ang_mean < large_hip_knee_angles]
|
|
514
|
+
if len(trc_data_low_speeds_low_angles) < 50:
|
|
515
|
+
trc_data_low_speeds_low_angles = trc_data_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
|
|
516
|
+
except:
|
|
517
|
+
logging.warning(f"At least one among the RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder markers is missing for computing the knee and hip angles. Not restricting these agles to be below {large_hip_knee_angles}°.")
|
|
518
|
+
|
|
519
|
+
if n_markers_init < n_markers:
|
|
520
|
+
trc_data_low_speeds_low_angles = trc_data_low_speeds_low_angles.iloc[:,:-3]
|
|
521
|
+
|
|
522
|
+
return trc_data_low_speeds_low_angles
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def compute_height(trc_data, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
|
|
526
|
+
'''
|
|
527
|
+
Compute the height of the person from the trc data.
|
|
528
|
+
|
|
529
|
+
INPUTS:
|
|
530
|
+
- trc_data: pd.DataFrame. The XYZ coordinates of each marker
|
|
531
|
+
- keypoints_names: list. The list of marker names
|
|
532
|
+
- fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
|
|
533
|
+
- close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
|
|
534
|
+
- large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
|
|
535
|
+
- trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
|
|
536
|
+
|
|
537
|
+
OUTPUT:
|
|
538
|
+
- height: float. The estimated height of the person
|
|
539
|
+
'''
|
|
540
|
+
|
|
541
|
+
# Retrieve most reliable coordinates, adding MidShoulder and Hip columns if not present
|
|
542
|
+
trc_data_low_speeds_low_angles = best_coords_for_measurements(trc_data, keypoints_names,
|
|
543
|
+
fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
|
|
544
|
+
|
|
545
|
+
# Automatically compute the height of the person
|
|
546
|
+
feet_pairs = [['RHeel', 'RAnkle'], ['LHeel', 'LAnkle']]
|
|
547
|
+
try:
|
|
548
|
+
rfoot, lfoot = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in feet_pairs]
|
|
549
|
+
except:
|
|
550
|
+
rfoot, lfoot = 10, 10
|
|
551
|
+
logging.warning('The Heel marker is missing from your model. Considering Foot to Heel size as 10 cm.')
|
|
552
|
+
|
|
553
|
+
ankle_to_shoulder_pairs = [['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
|
|
554
|
+
['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
|
|
555
|
+
try:
|
|
556
|
+
rshank, rfemur, rback, lshank, lfemur, lback = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in ankle_to_shoulder_pairs]
|
|
557
|
+
except:
|
|
558
|
+
logging.error('At least one of the following markers is missing for computing the height of the person:\
|
|
559
|
+
RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\n\
|
|
560
|
+
Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
|
|
561
|
+
raise ValueError('At least one of the following markers is missing for computing the height of the person:\
|
|
562
|
+
RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\
|
|
563
|
+
Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
|
|
564
|
+
|
|
565
|
+
try:
|
|
566
|
+
head_pair = [['MidShoulder', 'Head']]
|
|
567
|
+
head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]
|
|
568
|
+
except:
|
|
569
|
+
head_pair = [['MidShoulder', 'Nose']]
|
|
570
|
+
head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]\
|
|
571
|
+
*1.33
|
|
572
|
+
logging.warning('The Head marker is missing from your model. Considering Neck to Head size as 1.33 times Neck to MidShoulder size.')
|
|
573
|
+
|
|
574
|
+
heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
|
|
575
|
+
|
|
576
|
+
# Remove the 20% most extreme values
|
|
577
|
+
height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
|
|
578
|
+
|
|
579
|
+
return height
|
|
580
|
+
|
|
581
|
+
|
|
291
582
|
def euclidean_distance(q1, q2):
|
|
292
583
|
'''
|
|
293
584
|
Euclidean distance between 2 points (N-dim).
|