sports2d 0.8.20__py3-none-any.whl → 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Sports2D/Demo/Calib_demo.toml +12 -0
- Sports2D/Demo/Config_demo.toml +7 -6
- Sports2D/Sports2D.py +17 -4
- Sports2D/Utilities/tests.py +30 -8
- Sports2D/process.py +308 -237
- {sports2d-0.8.20.dist-info → sports2d-0.8.22.dist-info}/METADATA +67 -37
- {sports2d-0.8.20.dist-info → sports2d-0.8.22.dist-info}/RECORD +11 -10
- {sports2d-0.8.20.dist-info → sports2d-0.8.22.dist-info}/WHEEL +0 -0
- {sports2d-0.8.20.dist-info → sports2d-0.8.22.dist-info}/entry_points.txt +0 -0
- {sports2d-0.8.20.dist-info → sports2d-0.8.22.dist-info}/licenses/LICENSE +0 -0
- {sports2d-0.8.20.dist-info → sports2d-0.8.22.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
[demo]
|
|
2
|
+
name = "demo"
|
|
3
|
+
size = [ 1768, 994]
|
|
4
|
+
matrix = [ [ 2520.0897058227038, 0.0, 884.0], [ 0.0, 2520.0897058227038, 497.0], [ 0.0, 0.0, 1.0]]
|
|
5
|
+
distortions = [ 0.0, 0.0, 0.0, 0.0]
|
|
6
|
+
rotation = [ 1.2082126924727719, 1.2098328575850605, -1.2082126924727719]
|
|
7
|
+
translation = [ -3.510103521992233, 1.7079310029359385, 10.0]
|
|
8
|
+
fisheye = false
|
|
9
|
+
|
|
10
|
+
[metadata]
|
|
11
|
+
adjusted = false
|
|
12
|
+
error = 0.0
|
Sports2D/Demo/Config_demo.toml
CHANGED
|
@@ -97,13 +97,13 @@ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, har
|
|
|
97
97
|
keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
|
|
98
98
|
average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
|
|
99
99
|
keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
|
|
100
|
-
|
|
100
|
+
max_distance = 100 # in px or None # If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one
|
|
101
101
|
|
|
102
102
|
[px_to_meters_conversion]
|
|
103
103
|
# Pixel to meters conversion
|
|
104
104
|
to_meters = true
|
|
105
105
|
make_c3d = true
|
|
106
|
-
save_calib =
|
|
106
|
+
save_calib = false
|
|
107
107
|
|
|
108
108
|
# If conversion from first_person_height
|
|
109
109
|
floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
|
|
@@ -131,15 +131,16 @@ correct_segment_angles_with_floor_angle = true # If the camera is tilted, correc
|
|
|
131
131
|
|
|
132
132
|
[post-processing]
|
|
133
133
|
interpolate = true
|
|
134
|
-
interp_gap_smaller_than = 10 #
|
|
134
|
+
interp_gap_smaller_than = 10 # Do not interpolate larger gaps
|
|
135
135
|
fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
|
|
136
136
|
sections_to_keep = 'all' # 'all', 'largest', 'first', 'last'
|
|
137
|
-
#
|
|
138
|
-
|
|
137
|
+
# Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one
|
|
138
|
+
min_chunk_size = 10 # Minimum number of valid frames in a row to keep a chunk of data for a person
|
|
139
|
+
reject_outliers = true # Hampel filter for outlier rejection before other filtering methods. Rejects outliers that are outside of a 95% confidence interal from the median in a sliding window of size 7.
|
|
139
140
|
|
|
140
141
|
filter = true
|
|
141
142
|
show_graphs = true # Show plots of raw and processed results
|
|
142
|
-
save_graphs =
|
|
143
|
+
save_graphs = true # Save position and angle plots of raw and processed results
|
|
143
144
|
filter_type = 'butterworth' # butterworth, kalman, gcv_spline, gaussian, loess, median, butterworth_on_speed
|
|
144
145
|
|
|
145
146
|
# Most intuitive and standard filter in biomechanics
|
Sports2D/Sports2D.py
CHANGED
|
@@ -152,6 +152,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
152
152
|
'keypoint_likelihood_threshold': 0.3,
|
|
153
153
|
'average_likelihood_threshold': 0.5,
|
|
154
154
|
'keypoint_number_threshold': 0.3,
|
|
155
|
+
'max_distance': 100,
|
|
155
156
|
'CUSTOM': { 'name': 'Hip',
|
|
156
157
|
'id': 19,
|
|
157
158
|
'children': [{'name': 'RHip',
|
|
@@ -196,7 +197,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
196
197
|
'calib_file': '',
|
|
197
198
|
'floor_angle': 'auto',
|
|
198
199
|
'xy_origin': ['auto'],
|
|
199
|
-
'save_calib':
|
|
200
|
+
'save_calib': False
|
|
200
201
|
},
|
|
201
202
|
'angles': {'display_angle_values_on': ['body', 'list'],
|
|
202
203
|
'fontSize': 0.3,
|
|
@@ -233,10 +234,11 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
233
234
|
'interp_gap_smaller_than': 10,
|
|
234
235
|
'fill_large_gaps_with': 'last_value',
|
|
235
236
|
'sections_to_keep':'all',
|
|
237
|
+
'min_chunk_size': 10,
|
|
236
238
|
'reject_outliers': True,
|
|
237
239
|
'filter': True,
|
|
238
240
|
'show_graphs': True,
|
|
239
|
-
'save_graphs':
|
|
241
|
+
'save_graphs': True,
|
|
240
242
|
'filter_type': 'butterworth',
|
|
241
243
|
'butterworth': {'order': 4, 'cut_off_frequency': 6.0},
|
|
242
244
|
'kalman': {'trust_ratio': 500.0, 'smooth':True},
|
|
@@ -280,7 +282,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
280
282
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
281
283
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
282
284
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
283
|
-
'save_graphs': ["", "save position and angle plots of raw and processed results.
|
|
285
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. true if not specified"],
|
|
284
286
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
285
287
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
286
288
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -315,6 +317,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
315
317
|
'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
|
|
316
318
|
'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
|
|
317
319
|
'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
|
|
320
|
+
'max_distance': ["", "If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one. in px or None, 100 by default."],
|
|
318
321
|
'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
|
|
319
322
|
'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
|
|
320
323
|
'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
|
|
@@ -326,6 +329,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
326
329
|
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
327
330
|
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
328
331
|
'sections_to_keep': ["", "all, largest, first, or last. Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one"],
|
|
332
|
+
'min_chunk_size': ["", "Minimum number of valid frames in a row to keep a chunk of data for a person. 10 if not specified"],
|
|
329
333
|
'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
|
|
330
334
|
'filter': ["", "filter results. true if not specified"],
|
|
331
335
|
'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
|
|
@@ -473,6 +477,14 @@ def set_nested_value(config, flat_key, value):
|
|
|
473
477
|
d[keys[-1]] = value
|
|
474
478
|
|
|
475
479
|
|
|
480
|
+
def merge_dicts(original, overrides):
|
|
481
|
+
for key, value in overrides.items():
|
|
482
|
+
if isinstance(value, dict) and isinstance(original.get(key), dict):
|
|
483
|
+
merge_dicts(original[key], value)
|
|
484
|
+
else:
|
|
485
|
+
original[key] = value
|
|
486
|
+
|
|
487
|
+
|
|
476
488
|
def str2bool(v):
|
|
477
489
|
'''
|
|
478
490
|
Convert a string to a boolean value.
|
|
@@ -500,7 +512,8 @@ def process(config='Config_demo.toml'):
|
|
|
500
512
|
from Sports2D.process import process_fun
|
|
501
513
|
|
|
502
514
|
if type(config) == dict:
|
|
503
|
-
config_dict =
|
|
515
|
+
config_dict = DEFAULT_CONFIG.copy()
|
|
516
|
+
merge_dicts(config_dict, config)
|
|
504
517
|
else:
|
|
505
518
|
config_dict = read_config_file(config)
|
|
506
519
|
video_dir, video_files, frame_rates, time_ranges, result_dir = base_params(config_dict)
|
Sports2D/Utilities/tests.py
CHANGED
|
@@ -45,7 +45,7 @@ def test_workflow():
|
|
|
45
45
|
## From Python ##
|
|
46
46
|
#############################
|
|
47
47
|
|
|
48
|
-
# Default
|
|
48
|
+
# Default from the demo config file
|
|
49
49
|
config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
|
|
50
50
|
config_dict = toml.load(config_path)
|
|
51
51
|
video_dir = Path(__file__).resolve().parent.parent / 'Demo'
|
|
@@ -53,6 +53,28 @@ def test_workflow():
|
|
|
53
53
|
config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
|
|
54
54
|
config_dict.get("base").update({"show_realtime_results":False})
|
|
55
55
|
config_dict.get("post-processing").update({"show_graphs":False})
|
|
56
|
+
config_dict.get("post-processing").update({"save_graphs":False})
|
|
57
|
+
|
|
58
|
+
Sports2D.process(config_dict)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# Only passing the updated values
|
|
62
|
+
video_dir = Path(__file__).resolve().parent.parent / 'Demo'
|
|
63
|
+
config_dict = {
|
|
64
|
+
'base': {
|
|
65
|
+
'nb_persons_to_detect': 1,
|
|
66
|
+
'person_ordering_method': 'greatest_displacement',
|
|
67
|
+
"show_realtime_results":False
|
|
68
|
+
},
|
|
69
|
+
'pose': {
|
|
70
|
+
'mode': 'lightweight',
|
|
71
|
+
'det_frequency': 50
|
|
72
|
+
},
|
|
73
|
+
'post-processing': {
|
|
74
|
+
'show_graphs':False,
|
|
75
|
+
'save_graphs':False
|
|
76
|
+
}
|
|
77
|
+
}
|
|
56
78
|
|
|
57
79
|
Sports2D.process(config_dict)
|
|
58
80
|
|
|
@@ -62,28 +84,28 @@ def test_workflow():
|
|
|
62
84
|
#############################
|
|
63
85
|
|
|
64
86
|
# Default
|
|
65
|
-
demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False"]
|
|
87
|
+
demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False"]
|
|
66
88
|
subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
67
89
|
|
|
68
90
|
# With loading a trc file, visible_side 'front', first_person_height '1.76", floor_angle 0, xy_origin [0, 928]
|
|
69
|
-
demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
91
|
+
demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
|
|
70
92
|
"--load_trc_px", os.path.join(root_dir, "demo_Sports2D", "demo_Sports2D_px_person01.trc"),
|
|
71
93
|
"--visible_side", "front", "--first_person_height", "1.76", "--time_range", "1.2", "2.7",
|
|
72
94
|
"--floor_angle", "0", "--xy_origin", "0", "928"]
|
|
73
95
|
subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
74
96
|
|
|
75
97
|
# With no pixels to meters conversion, one person to select, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
|
|
76
|
-
demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
77
|
-
|
|
98
|
+
demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
|
|
99
|
+
# "--calib_file", "calib_demo.toml",
|
|
78
100
|
"--nb_persons_to_detect", "1", "--person_ordering_method", "greatest_displacement",
|
|
79
101
|
"--mode", "lightweight", "--det_frequency", "50",
|
|
80
102
|
"--slowmo_factor", "4",
|
|
81
|
-
"--filter_type", "gaussian",
|
|
103
|
+
"--filter_type", "gaussian", "--use_augmentation", "False",
|
|
82
104
|
"--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
|
|
83
105
|
subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
84
106
|
|
|
85
107
|
# With a time range, inverse kinematics, marker augmentation
|
|
86
|
-
demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
108
|
+
demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
|
|
87
109
|
"--time_range", "1.2", "2.7",
|
|
88
110
|
"--do_ik", "True", "--use_augmentation", "True",
|
|
89
111
|
"--nb_persons_to_detect", "all", "--first_person_height", "1.65",
|
|
@@ -97,7 +119,7 @@ def test_workflow():
|
|
|
97
119
|
config_dict.get("base").update({"video_dir": str(video_dir)})
|
|
98
120
|
config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
|
|
99
121
|
with open(config_path, 'w') as f: toml.dump(config_dict, f)
|
|
100
|
-
demo_cmd5 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
|
|
122
|
+
demo_cmd5 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",]
|
|
101
123
|
subprocess.run(demo_cmd5, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
102
124
|
|
|
103
125
|
|
Sports2D/process.py
CHANGED
|
@@ -87,12 +87,15 @@ from Pose2Sim.triangulation import indices_of_first_last_non_nan_chunks
|
|
|
87
87
|
from Pose2Sim.personAssociation import *
|
|
88
88
|
from Pose2Sim.filtering import *
|
|
89
89
|
|
|
90
|
+
# Silence numpy "RuntimeWarning: Mean of empty slice"
|
|
91
|
+
import warnings
|
|
92
|
+
warnings.filterwarnings("ignore", category=RuntimeWarning, message="Mean of empty slice")
|
|
93
|
+
|
|
90
94
|
# Not safe, but to be used until OpenMMLab/RTMlib's SSL certificates are updated
|
|
91
95
|
import ssl
|
|
92
96
|
ssl._create_default_https_context = ssl._create_unverified_context
|
|
93
97
|
|
|
94
98
|
|
|
95
|
-
|
|
96
99
|
DEFAULT_MASS = 70
|
|
97
100
|
DEFAULT_HEIGHT = 1.7
|
|
98
101
|
|
|
@@ -798,6 +801,8 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
|
|
|
798
801
|
INPUTS:
|
|
799
802
|
- trc_data_unfiltered: pd.DataFrame. The unfiltered trc data
|
|
800
803
|
- trc_data: pd.DataFrame. The filtered trc data
|
|
804
|
+
- person_id: int. The ID of the person
|
|
805
|
+
- show: bool. Whether to show the plots
|
|
801
806
|
|
|
802
807
|
OUTPUT:
|
|
803
808
|
- matplotlib window with tabbed figures for each keypoint
|
|
@@ -806,7 +811,6 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
|
|
|
806
811
|
os_name = platform.system()
|
|
807
812
|
if os_name == 'Windows':
|
|
808
813
|
mpl.use('qt5agg') # windows
|
|
809
|
-
|
|
810
814
|
mpl.rc('figure', max_open_warning=0)
|
|
811
815
|
|
|
812
816
|
keypoints_names = trc_data.columns[1::3]
|
|
@@ -1142,6 +1146,8 @@ def select_persons_on_vid(video_file_path, frame_range, all_pose_coords):
|
|
|
1142
1146
|
|
|
1143
1147
|
# Change color on hover
|
|
1144
1148
|
for person_idx, bbox in enumerate(all_bboxes[frame_idx]):
|
|
1149
|
+
if person_idx >= len(rects): # Skip if rect doesn't exist
|
|
1150
|
+
continue
|
|
1145
1151
|
if ~np.isnan(bbox).any():
|
|
1146
1152
|
x_min, y_min, x_max, y_max = bbox.astype(int)
|
|
1147
1153
|
if x_min <= x <= x_max and y_min <= y <= y_max:
|
|
@@ -1269,7 +1275,7 @@ def select_persons_on_vid(video_file_path, frame_range, all_pose_coords):
|
|
|
1269
1275
|
return selected_persons
|
|
1270
1276
|
|
|
1271
1277
|
|
|
1272
|
-
def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_speed_below = 7,
|
|
1278
|
+
def compute_floor_line(trc_data, score_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_speed_below = 7, score_threshold=0.5):
|
|
1273
1279
|
'''
|
|
1274
1280
|
Compute the floor line equation, angle, and direction
|
|
1275
1281
|
from the feet keypoints when they have zero speed.
|
|
@@ -1287,20 +1293,25 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
|
|
|
1287
1293
|
- gait_direction: float. Left if < 0, 'right' otherwise
|
|
1288
1294
|
'''
|
|
1289
1295
|
|
|
1290
|
-
# Remove frames where the person is mostly not moving (outlier)
|
|
1291
|
-
speeds_kpts = np.array([np.insert(np.linalg.norm(trc_data[kpt].diff(), axis=1)[1:],0,0)
|
|
1292
|
-
for kpt in trc_data.columns.unique()[1:]]).T
|
|
1293
|
-
av_speeds = np.array([np.nanmean(speed_kpt) if not np.isnan(speed_kpt).all() else 0 for speed_kpt in speeds_kpts])
|
|
1294
|
-
trc_data = trc_data[av_speeds>tot_speed_above]
|
|
1295
|
-
|
|
1296
1296
|
# Retrieve zero-speed coordinates for the foot
|
|
1297
1297
|
low_speeds_X, low_speeds_Y = [], []
|
|
1298
1298
|
gait_direction_val = []
|
|
1299
1299
|
for kpt in keypoint_names:
|
|
1300
|
-
|
|
1300
|
+
# Remove frames without data
|
|
1301
|
+
trc_data_kpt = trc_data[kpt].iloc[:,:2]
|
|
1302
|
+
score_data_kpt = score_data[kpt]
|
|
1303
|
+
start, end = indices_of_first_last_non_nan_chunks(score_data_kpt, chunk_choice_method='all')
|
|
1304
|
+
trc_data_kpt_trim = trc_data_kpt.iloc[start:end].reset_index(drop=True)
|
|
1305
|
+
score_data_kpt_trim = score_data_kpt.iloc[start:end].reset_index(drop=True)
|
|
1306
|
+
|
|
1307
|
+
# Compute speeds
|
|
1308
|
+
speeds = np.linalg.norm(trc_data_kpt_trim.diff(), axis=1)
|
|
1309
|
+
|
|
1310
|
+
# Remove speeds with low confidence
|
|
1311
|
+
speeds = np.where(score_data_kpt_trim>score_threshold, speeds, np.nan)
|
|
1301
1312
|
|
|
1302
|
-
|
|
1303
|
-
low_speeds_coords =
|
|
1313
|
+
# Get coordinates with low speeds, high
|
|
1314
|
+
low_speeds_coords = trc_data_kpt_trim[speeds<toe_speed_below]
|
|
1304
1315
|
low_speeds_coords = low_speeds_coords[low_speeds_coords!=0]
|
|
1305
1316
|
|
|
1306
1317
|
low_speeds_X_kpt = low_speeds_coords.iloc[:,0].tolist()
|
|
@@ -1445,6 +1456,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1445
1456
|
mode = config_dict.get('pose').get('mode')
|
|
1446
1457
|
det_frequency = config_dict.get('pose').get('det_frequency')
|
|
1447
1458
|
tracking_mode = config_dict.get('pose').get('tracking_mode')
|
|
1459
|
+
max_distance = config_dict.get('pose').get('max_distance', None)
|
|
1448
1460
|
if tracking_mode == 'deepsort':
|
|
1449
1461
|
deepsort_params = config_dict.get('pose').get('deepsort_params')
|
|
1450
1462
|
try:
|
|
@@ -1492,7 +1504,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1492
1504
|
interp_gap_smaller_than = config_dict.get('post-processing').get('interp_gap_smaller_than')
|
|
1493
1505
|
fill_large_gaps_with = config_dict.get('post-processing').get('fill_large_gaps_with')
|
|
1494
1506
|
sections_to_keep = config_dict.get('post-processing').get('sections_to_keep')
|
|
1495
|
-
|
|
1507
|
+
min_chunk_size = config_dict.get('post-processing').get('min_chunk_size')
|
|
1496
1508
|
do_filter = config_dict.get('post-processing').get('filter')
|
|
1497
1509
|
handle_LR_swap = config_dict.get('post-processing').get('handle_LR_swap', False)
|
|
1498
1510
|
reject_outliers = config_dict.get('post-processing').get('reject_outliers', False)
|
|
@@ -1514,7 +1526,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1514
1526
|
# Create output directories
|
|
1515
1527
|
if video_file == "webcam":
|
|
1516
1528
|
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1517
|
-
|
|
1529
|
+
video_file_stem = f'webcam_{current_date}'
|
|
1530
|
+
output_dir_name = f'{video_file_stem}_Sports2D'
|
|
1518
1531
|
video_file_path = result_dir / output_dir_name / f'webcam_{current_date}_raw.mp4'
|
|
1519
1532
|
else:
|
|
1520
1533
|
video_file_stem = video_file.stem
|
|
@@ -1623,6 +1636,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1623
1636
|
# Load pose file in px
|
|
1624
1637
|
Q_coords, _, time_col, keypoints_names, _ = read_trc(load_trc_px)
|
|
1625
1638
|
t0 = time_col[0]
|
|
1639
|
+
tf = time_col.iloc[-1]
|
|
1626
1640
|
keypoints_ids = [i for i in range(len(keypoints_names))]
|
|
1627
1641
|
keypoints_all, scores_all = load_pose_file(Q_coords)
|
|
1628
1642
|
|
|
@@ -1640,6 +1654,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1640
1654
|
keypoints_ids = [node.id for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1641
1655
|
keypoints_names = [node.name for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1642
1656
|
t0 = 0
|
|
1657
|
+
tf = (cap.get(cv2.CAP_PROP_FRAME_COUNT)-1) / fps if cap.get(cv2.CAP_PROP_FRAME_COUNT)>0 else float('inf')
|
|
1643
1658
|
|
|
1644
1659
|
# Set up pose tracker
|
|
1645
1660
|
try:
|
|
@@ -1652,14 +1667,12 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1652
1667
|
except:
|
|
1653
1668
|
logging.error('Error: Pose estimation failed. Check in Config.toml that pose_model and mode are valid.')
|
|
1654
1669
|
raise ValueError('Error: Pose estimation failed. Check in Config.toml that pose_model and mode are valid.')
|
|
1655
|
-
|
|
1656
|
-
# if tracking_mode not in ['deepsort', 'sports2d']:
|
|
1657
|
-
# logging.warning(f"Tracking mode {tracking_mode} not recognized. Using sports2d method.")
|
|
1658
|
-
# tracking_mode = 'sports2d'
|
|
1659
|
-
# logging.info(f'Pose tracking set up for "{pose_model_name}" model.')
|
|
1660
|
-
# logging.info(f'Mode: {mode}.\n')
|
|
1661
1670
|
logging.info(f'Persons are detected every {det_frequency} frames and tracked inbetween. Tracking is done with {tracking_mode}.')
|
|
1662
|
-
|
|
1671
|
+
|
|
1672
|
+
if tracking_mode == 'deepsort':
|
|
1673
|
+
logging.info(f'Deepsort parameters: {deepsort_params}.')
|
|
1674
|
+
if tracking_mode not in ['deepsort', 'sports2d']:
|
|
1675
|
+
logging.warning(f"Tracking mode {tracking_mode} is not implemented. 'sports2d' is recommended.")
|
|
1663
1676
|
logging.info(f'{"All persons are" if nb_persons_to_detect=="all" else f"{nb_persons_to_detect} persons are" if nb_persons_to_detect>1 else "1 person is"} analyzed. Person ordering method is {person_ordering_method}.')
|
|
1664
1677
|
logging.info(f"{keypoint_likelihood_threshold=}, {average_likelihood_threshold=}, {keypoint_number_threshold=}")
|
|
1665
1678
|
|
|
@@ -1691,7 +1704,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1691
1704
|
frame_processing_times = []
|
|
1692
1705
|
frame_count = 0
|
|
1693
1706
|
first_frame = max(int(t0 * fps), frame_range[0])
|
|
1694
|
-
|
|
1707
|
+
last_frame = min(int(tf * fps), frame_range[1]-1)
|
|
1708
|
+
if first_frame >= last_frame:
|
|
1709
|
+
logging.error('Error: No frames to process. Check that your time_range is coherent with the video duration.')
|
|
1710
|
+
raise ValueError('Error: No frames to process. Check that your time_range is coherent with the video duration.')
|
|
1711
|
+
|
|
1695
1712
|
while cap.isOpened():
|
|
1696
1713
|
# Skip to the starting frame
|
|
1697
1714
|
if frame_count < first_frame:
|
|
@@ -1714,9 +1731,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1714
1731
|
if save_angles:
|
|
1715
1732
|
all_frames_angles.append([])
|
|
1716
1733
|
continue
|
|
1717
|
-
# else: # does not store all frames in memory if they are not saved or used for ordering
|
|
1718
|
-
# if save_img or save_vid or person_ordering_method == 'on_click':
|
|
1719
|
-
# frames.append(frame.copy())
|
|
1720
1734
|
|
|
1721
1735
|
# Retrieve pose or Estimate pose and track people
|
|
1722
1736
|
if load_trc_px:
|
|
@@ -1732,22 +1746,57 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1732
1746
|
# Detect poses
|
|
1733
1747
|
keypoints, scores = pose_tracker(frame)
|
|
1734
1748
|
|
|
1735
|
-
# Non maximum suppression (at pose level, not detection)
|
|
1749
|
+
# Non maximum suppression (at pose level, not detection, and only using likely keypoints)
|
|
1736
1750
|
frame_shape = frame.shape
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1751
|
+
mask_scores = np.mean(scores, axis=1) > 0.2
|
|
1752
|
+
|
|
1753
|
+
likely_keypoints = np.where(mask_scores[:, np.newaxis, np.newaxis], keypoints, np.nan)
|
|
1754
|
+
likely_scores = np.where(mask_scores[:, np.newaxis], scores, np.nan)
|
|
1755
|
+
likely_bboxes = bbox_xyxy_compute(frame_shape, likely_keypoints, padding=0)
|
|
1756
|
+
score_likely_bboxes = np.nanmean(likely_scores, axis=1)
|
|
1757
|
+
|
|
1758
|
+
valid_indices = np.where(~np.isnan(score_likely_bboxes))[0]
|
|
1759
|
+
if len(valid_indices) > 0:
|
|
1760
|
+
valid_bboxes = likely_bboxes[valid_indices]
|
|
1761
|
+
valid_scores = score_likely_bboxes[valid_indices]
|
|
1762
|
+
keep_valid = nms(valid_bboxes, valid_scores, nms_thr=0.45)
|
|
1763
|
+
keep = valid_indices[keep_valid]
|
|
1764
|
+
else:
|
|
1765
|
+
keep = []
|
|
1766
|
+
keypoints, scores = likely_keypoints[keep], likely_scores[keep]
|
|
1767
|
+
|
|
1768
|
+
# # Debugging: display detected keypoints on the frame
|
|
1769
|
+
# colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0), (0,0,128), (128,128,0), (128,0,128), (0,128,128)]
|
|
1770
|
+
# bboxes = likely_bboxes[keep]
|
|
1771
|
+
# for person_idx in range(len(keypoints)):
|
|
1772
|
+
# for kpt_idx, kpt in enumerate(keypoints[person_idx]):
|
|
1773
|
+
# if not np.isnan(kpt).any():
|
|
1774
|
+
# cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[person_idx%len(colors)], -1)
|
|
1775
|
+
# if not np.isnan(bboxes[person_idx]).any():
|
|
1776
|
+
# cv2.rectangle(frame, (int(bboxes[person_idx][0]), int(bboxes[person_idx][1])), (int(bboxes[person_idx][2]), int(bboxes[person_idx][3])), colors[person_idx%len(colors)], 1)
|
|
1777
|
+
# cv2.imshow(f'{video_file} Sports2D', frame)
|
|
1778
|
+
|
|
1742
1779
|
# Track poses across frames
|
|
1743
1780
|
if tracking_mode == 'deepsort':
|
|
1744
1781
|
keypoints, scores = sort_people_deepsort(keypoints, scores, deepsort_tracker, frame, frame_count)
|
|
1745
1782
|
if tracking_mode == 'sports2d':
|
|
1746
1783
|
if 'prev_keypoints' not in locals(): prev_keypoints = keypoints
|
|
1747
|
-
prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores=scores)
|
|
1784
|
+
prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores=scores, max_dist=max_distance)
|
|
1748
1785
|
else:
|
|
1749
1786
|
pass
|
|
1750
|
-
|
|
1787
|
+
|
|
1788
|
+
# # Debugging: display detected keypoints on the frame
|
|
1789
|
+
# colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0), (0,0,128), (128,128,0), (128,0,128), (0,128,128)]
|
|
1790
|
+
# for person_idx in range(len(keypoints)):
|
|
1791
|
+
# for kpt_idx, kpt in enumerate(keypoints[person_idx]):
|
|
1792
|
+
# if not np.isnan(kpt).any():
|
|
1793
|
+
# cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[person_idx%len(colors)], -1)
|
|
1794
|
+
# # if not np.isnan(bboxes[person_idx]).any():
|
|
1795
|
+
# # cv2.rectangle(frame, (int(bboxes[person_idx][0]), int(bboxes[person_idx][1])), (int(bboxes[person_idx][2]), int(bboxes[person_idx][3])), colors[person_idx%len(colors)], 1)
|
|
1796
|
+
# cv2.imshow(f'{video_file} Sports2D', frame)
|
|
1797
|
+
# # if (cv2.waitKey(1) & 0xFF) == ord('q') or (cv2.waitKey(1) & 0xFF) == 27:
|
|
1798
|
+
# # break
|
|
1799
|
+
# # input()
|
|
1751
1800
|
|
|
1752
1801
|
# Process coordinates and compute angles
|
|
1753
1802
|
valid_X, valid_Y, valid_scores = [], [], []
|
|
@@ -1771,6 +1820,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1771
1820
|
person_Y = np.full_like(person_Y, np.nan)
|
|
1772
1821
|
person_scores = np.full_like(person_scores, np.nan)
|
|
1773
1822
|
|
|
1823
|
+
|
|
1824
|
+
|
|
1825
|
+
## RECREATE KEYPOINTS, SCORES
|
|
1826
|
+
|
|
1827
|
+
|
|
1828
|
+
|
|
1829
|
+
|
|
1830
|
+
|
|
1831
|
+
|
|
1832
|
+
|
|
1833
|
+
|
|
1834
|
+
|
|
1774
1835
|
# Check whether the person is looking to the left or right
|
|
1775
1836
|
if flip_left_right:
|
|
1776
1837
|
person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
|
|
@@ -1915,16 +1976,38 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1915
1976
|
if save_pose:
|
|
1916
1977
|
logging.info('\nPost-processing pose:')
|
|
1917
1978
|
# Process pose for each person
|
|
1918
|
-
trc_data, trc_data_unfiltered = [], []
|
|
1979
|
+
trc_data, trc_data_unfiltered, score_data = [], [], []
|
|
1980
|
+
first_run_starts_everyone, last_run_ends_everyone = [], []
|
|
1919
1981
|
for i, idx_person in enumerate(selected_persons):
|
|
1920
1982
|
pose_path_person = pose_output_path.parent / (pose_output_path.stem + f'_person{i:02d}.trc')
|
|
1921
1983
|
all_frames_X_person = pd.DataFrame(all_frames_X_processed[:,idx_person,:], columns=new_keypoints_names)
|
|
1922
1984
|
all_frames_Y_person = pd.DataFrame(all_frames_Y_processed[:,idx_person,:], columns=new_keypoints_names)
|
|
1985
|
+
score_data.append(pd.DataFrame(all_frames_scores_processed[:,idx_person,:], columns=new_keypoints_names))
|
|
1923
1986
|
if calculate_angles or save_angles:
|
|
1924
1987
|
all_frames_X_flipped_person = pd.DataFrame(all_frames_X_flipped_processed[:,idx_person,:], columns=new_keypoints_names)
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
if
|
|
1988
|
+
|
|
1989
|
+
# Interpolate
|
|
1990
|
+
if not interpolate:
|
|
1991
|
+
logging.info(f'- Person {i}: No interpolation.')
|
|
1992
|
+
all_frames_X_person_interp = all_frames_X_person
|
|
1993
|
+
all_frames_Y_person_interp = all_frames_Y_person
|
|
1994
|
+
else:
|
|
1995
|
+
logging.info(f'- Person {i}: Interpolating missing sequences if they are smaller than {interp_gap_smaller_than} frames. Large gaps filled with {fill_large_gaps_with}.')
|
|
1996
|
+
all_frames_X_person_interp = all_frames_X_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
|
|
1997
|
+
all_frames_Y_person_interp = all_frames_Y_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
|
|
1998
|
+
|
|
1999
|
+
# Find the first and last valid chunks of data
|
|
2000
|
+
first_run_starts, last_run_ends = [], []
|
|
2001
|
+
for col in all_frames_X_person.columns:
|
|
2002
|
+
first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(all_frames_X_person_interp[col], min_chunk_size=min_chunk_size, chunk_choice_method=sections_to_keep)
|
|
2003
|
+
first_run_starts += [first_run_start]
|
|
2004
|
+
last_run_ends += [last_run_end]
|
|
2005
|
+
first_run_start_min, last_run_end_max = min(first_run_starts), max(last_run_ends)
|
|
2006
|
+
first_run_starts_everyone += [first_run_starts]
|
|
2007
|
+
last_run_ends_everyone += [last_run_ends]
|
|
2008
|
+
|
|
2009
|
+
# Do not process person if no section of min_chunk_size valid frames in a row
|
|
2010
|
+
if (first_run_start_min, last_run_end_max) == (0,0):
|
|
1928
2011
|
all_frames_X_processed[:,idx_person,:], all_frames_X_flipped_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = np.nan, np.nan, np.nan
|
|
1929
2012
|
columns=np.array([[c]*3 for c in all_frames_X_person.columns]).flatten()
|
|
1930
2013
|
trc_data_i = pd.DataFrame(0, index=all_frames_X_person.index, columns=['time']+list(columns))
|
|
@@ -1932,105 +2015,92 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1932
2015
|
trc_data.append(trc_data_i)
|
|
1933
2016
|
trc_data_unfiltered_i = trc_data_i.copy()
|
|
1934
2017
|
trc_data_unfiltered.append(trc_data_unfiltered_i)
|
|
1935
|
-
|
|
1936
|
-
|
|
2018
|
+
logging.info(f' Person {i}: Less than {min_chunk_size} valid frames in a row. Deleting person.')
|
|
2019
|
+
continue
|
|
1937
2020
|
|
|
2021
|
+
# Fill remaining gaps
|
|
2022
|
+
if fill_large_gaps_with.lower() == 'last_value':
|
|
2023
|
+
for col_id, col in enumerate(all_frames_X_person_interp.columns):
|
|
2024
|
+
first_run_start, last_run_end = first_run_starts[col_id], last_run_ends[col_id]
|
|
2025
|
+
for coord_df in [all_frames_X_person_interp, all_frames_Y_person_interp, all_frames_Z_homog]:
|
|
2026
|
+
coord_df.loc[:first_run_start, col] = np.nan
|
|
2027
|
+
coord_df.loc[last_run_end:, col] = np.nan
|
|
2028
|
+
coord_df.loc[first_run_start:last_run_end, col] = coord_df.loc[first_run_start:last_run_end, col].ffill().bfill()
|
|
2029
|
+
elif fill_large_gaps_with.lower() == 'zeros':
|
|
2030
|
+
all_frames_X_person_interp.replace(np.nan, 0, inplace=True)
|
|
2031
|
+
all_frames_Y_person_interp.replace(np.nan, 0, inplace=True)
|
|
2032
|
+
|
|
2033
|
+
# if handle_LR_swap:
|
|
2034
|
+
# logging.info(f'Handling left-right swaps.')
|
|
2035
|
+
# all_frames_X_person_interp = all_frames_X_person_interp.apply(LR_unswap, axis=0)
|
|
2036
|
+
# all_frames_Y_person_interp = all_frames_Y_person_interp.apply(LR_unswap, axis=0)
|
|
2037
|
+
|
|
2038
|
+
if reject_outliers:
|
|
2039
|
+
logging.info('Rejecting outliers with a Hampel filter.')
|
|
2040
|
+
all_frames_X_person_interp = all_frames_X_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
|
|
2041
|
+
all_frames_Y_person_interp = all_frames_Y_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
|
|
2042
|
+
|
|
2043
|
+
if not do_filter:
|
|
2044
|
+
logging.info(f'No filtering.')
|
|
2045
|
+
all_frames_X_person_filt = all_frames_X_person_interp
|
|
2046
|
+
all_frames_Y_person_filt = all_frames_Y_person_interp
|
|
1938
2047
|
else:
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
2048
|
+
if filter_type == ('butterworth' or 'butterworth_on_speed'):
|
|
2049
|
+
cutoff = butterworth_filter_cutoff
|
|
2050
|
+
if video_file == 'webcam':
|
|
2051
|
+
if cutoff / (fps / 2) >= 1:
|
|
2052
|
+
cutoff_old = cutoff
|
|
2053
|
+
cutoff = fps/(2+0.001)
|
|
2054
|
+
args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
|
|
2055
|
+
butterworth_filter_cutoff = cutoff
|
|
2056
|
+
filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
|
|
2057
|
+
args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
|
|
2058
|
+
frame_rate = fps
|
|
2059
|
+
elif filter_type == 'gcv_spline':
|
|
2060
|
+
args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
|
|
2061
|
+
elif filter_type == 'kalman':
|
|
2062
|
+
args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
|
|
2063
|
+
elif filter_type == 'gaussian':
|
|
2064
|
+
args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
|
|
2065
|
+
elif filter_type == 'loess':
|
|
2066
|
+
args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
|
|
2067
|
+
elif filter_type == 'median':
|
|
2068
|
+
args = f'Median filter, kernel of {median_filter_kernel}.'
|
|
1944
2069
|
else:
|
|
1945
|
-
logging.
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
if
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
if video_file == 'webcam':
|
|
1980
|
-
if cutoff / (fps / 2) >= 1:
|
|
1981
|
-
cutoff_old = cutoff
|
|
1982
|
-
cutoff = fps/(2+0.001)
|
|
1983
|
-
args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
|
|
1984
|
-
butterworth_filter_cutoff = cutoff
|
|
1985
|
-
filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
|
|
1986
|
-
args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
|
|
1987
|
-
frame_rate = fps
|
|
1988
|
-
elif filter_type == 'gcv_spline':
|
|
1989
|
-
args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
|
|
1990
|
-
elif filter_type == 'kalman':
|
|
1991
|
-
args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
|
|
1992
|
-
elif filter_type == 'gaussian':
|
|
1993
|
-
args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
|
|
1994
|
-
elif filter_type == 'loess':
|
|
1995
|
-
args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
|
|
1996
|
-
elif filter_type == 'median':
|
|
1997
|
-
args = f'Median filter, kernel of {median_filter_kernel}.'
|
|
1998
|
-
else:
|
|
1999
|
-
logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2000
|
-
raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2001
|
-
|
|
2002
|
-
logging.info(f'Filtering with {args}')
|
|
2003
|
-
all_frames_X_person_filt = all_frames_X_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
|
|
2004
|
-
all_frames_Y_person_filt = all_frames_Y_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
# Build TRC file
|
|
2008
|
-
trc_data_i = trc_data_from_XYZtime(all_frames_X_person_filt, all_frames_Y_person_filt, all_frames_Z_homog, all_frames_time)
|
|
2009
|
-
trc_data.append(trc_data_i)
|
|
2010
|
-
if not load_trc_px:
|
|
2011
|
-
make_trc_with_trc_data(trc_data_i, str(pose_path_person), fps=fps)
|
|
2012
|
-
logging.info(f'Pose in pixels saved to {pose_path_person.resolve()}.')
|
|
2013
|
-
|
|
2014
|
-
# Plotting coordinates before and after interpolation and filtering
|
|
2015
|
-
columns_to_concat = []
|
|
2016
|
-
for kpt in range(len(all_frames_X_person.columns)):
|
|
2017
|
-
columns_to_concat.extend([all_frames_X_person.iloc[:,kpt], all_frames_Y_person.iloc[:,kpt], all_frames_Z_homog.iloc[:,kpt]])
|
|
2018
|
-
trc_data_unfiltered_i = pd.concat([all_frames_time] + columns_to_concat, axis=1)
|
|
2019
|
-
trc_data_unfiltered.append(trc_data_unfiltered_i)
|
|
2020
|
-
if not to_meters and (show_plots or save_plots):
|
|
2021
|
-
pw = pose_plots(trc_data_unfiltered_i, trc_data_i, i, show=show_plots)
|
|
2022
|
-
if save_plots:
|
|
2023
|
-
for n, f in enumerate(pw.figure_handles):
|
|
2024
|
-
dpi = pw.canvases[i].figure.dpi
|
|
2025
|
-
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2026
|
-
title = pw.tabs.tabText(n)
|
|
2027
|
-
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
|
|
2028
|
-
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2029
|
-
logging.info(f'Pose plots (px) saved in {plots_output_dir}.')
|
|
2030
|
-
|
|
2031
|
-
all_frames_X_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_Y_person_filt
|
|
2032
|
-
if calculate_angles or save_angles:
|
|
2033
|
-
all_frames_X_flipped_processed[:,idx_person,:] = all_frames_X_flipped_person
|
|
2070
|
+
logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2071
|
+
raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2072
|
+
|
|
2073
|
+
logging.info(f'Filtering with {args}')
|
|
2074
|
+
all_frames_X_person_filt = all_frames_X_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
|
|
2075
|
+
all_frames_Y_person_filt = all_frames_Y_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
|
|
2076
|
+
|
|
2077
|
+
# Build TRC file
|
|
2078
|
+
trc_data_i = trc_data_from_XYZtime(all_frames_X_person_filt, all_frames_Y_person_filt, all_frames_Z_homog, all_frames_time)
|
|
2079
|
+
trc_data.append(trc_data_i)
|
|
2080
|
+
if not load_trc_px:
|
|
2081
|
+
make_trc_with_trc_data(trc_data_i, str(pose_path_person), fps=fps)
|
|
2082
|
+
logging.info(f'Pose in pixels saved to {pose_path_person.resolve()}.')
|
|
2083
|
+
|
|
2084
|
+
# Plotting coordinates before and after interpolation and filtering
|
|
2085
|
+
columns_to_concat = []
|
|
2086
|
+
for kpt in range(len(all_frames_X_person.columns)):
|
|
2087
|
+
columns_to_concat.extend([all_frames_X_person.iloc[:,kpt], all_frames_Y_person.iloc[:,kpt], all_frames_Z_homog.iloc[:,kpt]])
|
|
2088
|
+
trc_data_unfiltered_i = pd.concat([all_frames_time] + columns_to_concat, axis=1)
|
|
2089
|
+
trc_data_unfiltered.append(trc_data_unfiltered_i)
|
|
2090
|
+
if not to_meters and (show_plots or save_plots):
|
|
2091
|
+
pw = pose_plots(trc_data_unfiltered_i, trc_data_i, i, show=show_plots)
|
|
2092
|
+
if save_plots:
|
|
2093
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2094
|
+
dpi = pw.canvases[i].figure.dpi
|
|
2095
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2096
|
+
title = pw.tabs.tabText(n)
|
|
2097
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
|
|
2098
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2099
|
+
logging.info(f'Pose plots (px) saved in {plots_output_dir}.')
|
|
2100
|
+
|
|
2101
|
+
all_frames_X_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_Y_person_filt
|
|
2102
|
+
if calculate_angles or save_angles:
|
|
2103
|
+
all_frames_X_flipped_processed[:,idx_person,:] = all_frames_X_flipped_person
|
|
2034
2104
|
|
|
2035
2105
|
|
|
2036
2106
|
#%% Convert px to meters
|
|
@@ -2054,11 +2124,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2054
2124
|
# estimated from the line formed by the toes when they are on the ground (where speed = 0)
|
|
2055
2125
|
try:
|
|
2056
2126
|
if all(key in trc_data[0] for key in ['LBigToe', 'RBigToe']):
|
|
2057
|
-
floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
|
|
2127
|
+
floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], score_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
|
|
2058
2128
|
else:
|
|
2059
|
-
floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
|
|
2060
|
-
xy_origin_estim[
|
|
2061
|
-
logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
|
|
2129
|
+
floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], score_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
|
|
2130
|
+
xy_origin_estim[1] = xy_origin_estim[1] + 0.13*px_per_m # approx. height of the ankle above the floor
|
|
2131
|
+
logging.warning(f'The RBigToe and LBigToe are missing from your pose estimation model. Using ankles - 13 cm to compute the floor line.')
|
|
2062
2132
|
except:
|
|
2063
2133
|
floor_angle_estim = 0
|
|
2064
2134
|
xy_origin_estim = cam_width/2, cam_height/2
|
|
@@ -2069,7 +2139,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2069
2139
|
cx, cy = xy_origin_estim
|
|
2070
2140
|
else:
|
|
2071
2141
|
cx, cy = xy_origin
|
|
2072
|
-
logging.info(f'Using height of person #0 ({first_person_height}m) to convert coordinates in meters
|
|
2142
|
+
logging.info(f'Using height of person #0 ({first_person_height}m) to convert coordinates in meters.\n'
|
|
2073
2143
|
f'Floor angle: {np.degrees(floor_angle_estim) if not floor_angle=="auto" else f"auto (estimation: {round(np.degrees(floor_angle_estim),2)}°)"}, '
|
|
2074
2144
|
f'xy_origin: {xy_origin if not xy_origin=="auto" else f"auto (estimation: {[round(c) for c in xy_origin_estim]})"} px.')
|
|
2075
2145
|
|
|
@@ -2083,9 +2153,9 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2083
2153
|
if visible_side_i == 'auto':
|
|
2084
2154
|
try:
|
|
2085
2155
|
if all(key in trc_data[i] for key in ['LBigToe', 'RBigToe']):
|
|
2086
|
-
_, _, gait_direction = compute_floor_line(trc_data[i], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
|
|
2156
|
+
_, _, gait_direction = compute_floor_line(trc_data[i], score_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
|
|
2087
2157
|
else:
|
|
2088
|
-
_, _, gait_direction = compute_floor_line(trc_data[i], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
|
|
2158
|
+
_, _, gait_direction = compute_floor_line(trc_data[i], score_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
|
|
2089
2159
|
logging.warning(f'The RBigToe and LBigToe are missing from your model. Gait direction will be determined from the ankle points.')
|
|
2090
2160
|
visible_side_i = 'right' if gait_direction > 0.3 \
|
|
2091
2161
|
else 'left' if gait_direction < -0.3 \
|
|
@@ -2103,8 +2173,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2103
2173
|
# Convert to meters
|
|
2104
2174
|
px_to_m_i = [convert_px_to_meters(trc_data[i][kpt_name], first_person_height, height_px, cx, cy, -floor_angle_estim, visible_side=visible_side_i) for kpt_name in new_keypoints_names]
|
|
2105
2175
|
trc_data_m_i = pd.concat([all_frames_time.rename('time')]+px_to_m_i, axis=1)
|
|
2106
|
-
for c in 3*np.arange(len(trc_data_m_i.columns[3::3]))+1: # only X coordinates
|
|
2107
|
-
first_run_start, last_run_end =
|
|
2176
|
+
for c_id, c in enumerate(3*np.arange(len(trc_data_m_i.columns[3::3]))+1): # only X coordinates
|
|
2177
|
+
first_run_start, last_run_end = first_run_starts_everyone[i][c_id], last_run_ends_everyone[i][c_id]
|
|
2108
2178
|
trc_data_m_i.iloc[:first_run_start,c+2] = np.nan
|
|
2109
2179
|
trc_data_m_i.iloc[last_run_end:,c+2] = np.nan
|
|
2110
2180
|
trc_data_m_i.iloc[first_run_start:last_run_end,c+2] = trc_data_m_i.iloc[first_run_start:last_run_end,c+2].ffill().bfill()
|
|
@@ -2205,86 +2275,95 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2205
2275
|
if new_visible_side[i] == 'left' and not flip_left_right:
|
|
2206
2276
|
all_frames_angles_homog[:, idx_person, :] = -all_frames_angles_homog[:, idx_person, :]
|
|
2207
2277
|
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
all_frames_angles_processed[:,idx_person,:] = np.nan
|
|
2212
|
-
logging.info(f'- Person {i}: Less than 4 valid frames. Deleting person.')
|
|
2213
|
-
|
|
2278
|
+
if not interpolate:
|
|
2279
|
+
logging.info(f'- Person {i}: No interpolation.')
|
|
2280
|
+
all_frames_angles_person_interp = all_frames_angles_person
|
|
2214
2281
|
else:
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
|
|
2282
|
+
logging.info(f'- Person {i}: Interpolating missing sequences if they are smaller than {interp_gap_smaller_than} frames. Large gaps filled with {fill_large_gaps_with}.')
|
|
2283
|
+
all_frames_angles_person_interp = all_frames_angles_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
|
|
2284
|
+
|
|
2285
|
+
# Find the first and last valid chunks of data
|
|
2286
|
+
first_run_starts, last_run_ends = [], []
|
|
2287
|
+
for col in all_frames_angles_person.columns:
|
|
2288
|
+
first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(all_frames_angles_person_interp[col], min_chunk_size=min_chunk_size, chunk_choice_method=sections_to_keep)
|
|
2289
|
+
first_run_starts += [first_run_start]
|
|
2290
|
+
last_run_ends += [last_run_end]
|
|
2291
|
+
first_run_start_min, last_run_end_max = min(first_run_starts), max(last_run_ends)
|
|
2292
|
+
|
|
2293
|
+
# Do not process person if no section of min_chunk_size valid frames in a row
|
|
2294
|
+
if (first_run_start_min, last_run_end_max) == (0,0):
|
|
2295
|
+
all_frames_angles_processed[:,idx_person,:]= np.nan
|
|
2296
|
+
logging.info(f' Person {i}: Less than {min_chunk_size} valid frames in a row. Deleting person.')
|
|
2297
|
+
continue
|
|
2298
|
+
|
|
2299
|
+
# Fill remaining gaps
|
|
2300
|
+
if fill_large_gaps_with == 'last_value':
|
|
2301
|
+
for col_id, col in enumerate(all_frames_angles_person_interp.columns):
|
|
2302
|
+
first_run_start, last_run_end = first_run_starts[col_id], last_run_ends[col_id]
|
|
2303
|
+
all_frames_angles_person_interp.loc[:first_run_start, col] = np.nan
|
|
2304
|
+
all_frames_angles_person_interp.loc[last_run_end:, col] = np.nan
|
|
2305
|
+
all_frames_angles_person_interp.loc[first_run_start:last_run_end, col] = all_frames_angles_person_interp.loc[first_run_start:last_run_end, col].ffill().bfill()
|
|
2306
|
+
elif fill_large_gaps_with == 'zeros':
|
|
2307
|
+
all_frames_angles_person_interp.replace(np.nan, 0, inplace=True)
|
|
2230
2308
|
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2309
|
+
# Filter
|
|
2310
|
+
if reject_outliers:
|
|
2311
|
+
logging.info(f'Rejecting outliers with a Hampel filter.')
|
|
2312
|
+
all_frames_angles_person_interp = all_frames_angles_person_interp.apply(hampel_filter, axis=0)
|
|
2313
|
+
|
|
2314
|
+
if not do_filter:
|
|
2315
|
+
logging.info(f'No filtering.')
|
|
2316
|
+
all_frames_angles_person_filt = all_frames_angles_person_interp
|
|
2317
|
+
else:
|
|
2318
|
+
if filter_type == ('butterworth' or 'butterworth_on_speed'):
|
|
2319
|
+
cutoff = butterworth_filter_cutoff
|
|
2320
|
+
if video_file == 'webcam':
|
|
2321
|
+
if cutoff / (fps / 2) >= 1:
|
|
2322
|
+
cutoff_old = cutoff
|
|
2323
|
+
cutoff = fps/(2+0.001)
|
|
2324
|
+
args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
|
|
2325
|
+
butterworth_filter_cutoff = cutoff
|
|
2326
|
+
filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
|
|
2327
|
+
args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
|
|
2328
|
+
frame_rate = fps
|
|
2329
|
+
elif filter_type == 'gcv_spline':
|
|
2330
|
+
args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
|
|
2331
|
+
elif filter_type == 'kalman':
|
|
2332
|
+
args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
|
|
2333
|
+
elif filter_type == 'gaussian':
|
|
2334
|
+
args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
|
|
2335
|
+
elif filter_type == 'loess':
|
|
2336
|
+
args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
|
|
2337
|
+
elif filter_type == 'median':
|
|
2338
|
+
args = f'Median filter, kernel of {median_filter_kernel}.'
|
|
2239
2339
|
else:
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
|
|
2252
|
-
|
|
2253
|
-
|
|
2254
|
-
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
# Add floor_angle_estim to segment angles
|
|
2269
|
-
if correct_segment_angles_with_floor_angle and to_meters:
|
|
2270
|
-
logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
|
|
2271
|
-
for ang_name in all_frames_angles_person_filt.columns:
|
|
2272
|
-
if 'horizontal' in angle_dict[ang_name][1]:
|
|
2273
|
-
all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
|
|
2274
|
-
|
|
2275
|
-
# Remove columns with all nan values
|
|
2276
|
-
all_frames_angles_processed[:,idx_person,:] = all_frames_angles_person_filt
|
|
2277
|
-
all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
|
|
2278
|
-
all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
|
|
2279
|
-
|
|
2280
|
-
# Build mot file
|
|
2281
|
-
angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
|
|
2282
|
-
logging.info(f'Angles saved to {angles_path_person.resolve()}.')
|
|
2283
|
-
|
|
2284
|
-
# Plotting angles before and after interpolation and filtering
|
|
2285
|
-
all_frames_angles_person.insert(0, 'time', all_frames_time)
|
|
2286
|
-
if save_plots and (show_plots or save_plots):
|
|
2287
|
-
pw = angle_plots(all_frames_angles_person, angle_data, i, show=show_plots) # i = current person
|
|
2340
|
+
logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2341
|
+
raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2342
|
+
|
|
2343
|
+
logging.info(f'Filtering with {args}')
|
|
2344
|
+
all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
|
|
2345
|
+
|
|
2346
|
+
# Add floor_angle_estim to segment angles
|
|
2347
|
+
if correct_segment_angles_with_floor_angle and to_meters:
|
|
2348
|
+
logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
|
|
2349
|
+
for ang_name in all_frames_angles_person_filt.columns:
|
|
2350
|
+
if 'horizontal' in angle_dict[ang_name][1]:
|
|
2351
|
+
all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
|
|
2352
|
+
|
|
2353
|
+
# Remove columns with all nan values
|
|
2354
|
+
all_frames_angles_processed[:,idx_person,:] = all_frames_angles_person_filt
|
|
2355
|
+
all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
|
|
2356
|
+
all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
|
|
2357
|
+
|
|
2358
|
+
# Build mot file
|
|
2359
|
+
angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
|
|
2360
|
+
logging.info(f'Angles saved to {angles_path_person.resolve()}.')
|
|
2361
|
+
|
|
2362
|
+
# Plotting angles before and after interpolation and filtering
|
|
2363
|
+
all_frames_angles_person.insert(0, 'time', all_frames_time)
|
|
2364
|
+
if show_plots or save_plots:
|
|
2365
|
+
pw = angle_plots(all_frames_angles_person, angle_data, i, show=show_plots) # i = current person
|
|
2366
|
+
if save_plots:
|
|
2288
2367
|
for n, f in enumerate(pw.figure_handles):
|
|
2289
2368
|
dpi = pw.canvases[i].figure.dpi
|
|
2290
2369
|
f.set_size_inches(1280/dpi, 720/dpi)
|
|
@@ -2392,25 +2471,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2392
2471
|
# Delete person if less than 4 valid frames
|
|
2393
2472
|
pose_path_person = pose_output_path.parent / (pose_output_path.stem + f'_person{i:02d}.trc')
|
|
2394
2473
|
all_frames_X_person = pd.DataFrame(all_frames_X_homog[:,i,:], columns=new_keypoints_names)
|
|
2395
|
-
|
|
2396
|
-
|
|
2397
|
-
# heights_m.append(DEFAULT_HEIGHT)
|
|
2398
|
-
# masses.append(DEFAULT_MASS)
|
|
2399
|
-
logging.info(f'Less than 4 valid frames. Deleting person.')
|
|
2474
|
+
if new_visible_side[i] == 'none':
|
|
2475
|
+
logging.info(f'Skipping marker augmentation and inverse kinematics because visible_side is "none".')
|
|
2400
2476
|
else:
|
|
2401
|
-
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
-
|
|
2406
|
-
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
mass_i = participant_masses[i] if len(participant_masses)>i else 70
|
|
2410
|
-
if len(participant_masses)<=i:
|
|
2411
|
-
logging.warning(f'No mass provided. Using 70 kg as default.')
|
|
2412
|
-
heights_m.append(height_m_i)
|
|
2413
|
-
masses.append(mass_i)
|
|
2477
|
+
# Provide missing data to Pose2Sim_config_dict
|
|
2478
|
+
height_m_i = compute_height(trc_data_m_i.iloc[:,1:], keypoints_names,
|
|
2479
|
+
fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_m, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
|
|
2480
|
+
mass_i = participant_masses[i] if len(participant_masses)>i else DEFAULT_MASS
|
|
2481
|
+
if len(participant_masses)<=i:
|
|
2482
|
+
logging.warning(f'No mass provided. Using {DEFAULT_MASS} kg as default.')
|
|
2483
|
+
heights_m.append(height_m_i)
|
|
2484
|
+
masses.append(mass_i)
|
|
2414
2485
|
|
|
2415
2486
|
Pose2Sim_config_dict['project']['participant_height'] = heights_m
|
|
2416
2487
|
Pose2Sim_config_dict['project']['participant_mass'] = masses
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.22
|
|
4
4
|
Summary: Compute 2D human pose and angles from a video or a webcam.
|
|
5
5
|
Author-email: David Pagnon <contact@david-pagnon.com>
|
|
6
6
|
Maintainer-email: David Pagnon <contact@david-pagnon.com>
|
|
@@ -38,7 +38,7 @@ Requires-Dist: openvino
|
|
|
38
38
|
Requires-Dist: opencv-python<4.12
|
|
39
39
|
Requires-Dist: imageio_ffmpeg
|
|
40
40
|
Requires-Dist: deep-sort-realtime
|
|
41
|
-
Requires-Dist: Pose2Sim>=0.10.
|
|
41
|
+
Requires-Dist: Pose2Sim>=0.10.38
|
|
42
42
|
Dynamic: license-file
|
|
43
43
|
|
|
44
44
|
|
|
@@ -67,6 +67,7 @@ Dynamic: license-file
|
|
|
67
67
|
</br>
|
|
68
68
|
|
|
69
69
|
> **`Announcements:`**
|
|
70
|
+
> - Generate or import a calibration file, OpenSim skeleton overlay **New in v0.9!**
|
|
70
71
|
> - Select only the persons you want to analyze **New in v0.8!**
|
|
71
72
|
> - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
|
|
72
73
|
> - Any detector and pose estimation model can be used. **New in v0.6!**
|
|
@@ -218,16 +219,19 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
|
|
|
218
219
|
|
|
219
220
|
1. **Install the Pose2Sim_Blender add-on.**\
|
|
220
221
|
Follow instructions on the [Pose2Sim_Blender](https://github.com/davidpagnon/Pose2Sim_Blender) add-on page.
|
|
222
|
+
2. **Import the camera and video.**
|
|
223
|
+
- **Cameras -> Import**: Open your `demo_calib.toml` file from your `result_dir` folder.
|
|
224
|
+
- **Images/Videos -> Show**: open your video file (e.g., `demo_Sports2D.mp4`).\
|
|
225
|
+
-> **Other tools -> See through camera**
|
|
221
226
|
2. **Open your point coordinates.**\
|
|
222
|
-
**
|
|
223
|
-
|
|
227
|
+
**OpenSim data -> Markers**: Open your trc file(e.g., `demo_Sports2D_m_person00.trc`) from your `result_dir` folder.\
|
|
224
228
|
This will optionally create **an animated rig** based on the motion of the captured person.
|
|
225
229
|
3. **Open your animated skeleton:**\
|
|
226
230
|
Make sure you first set `--do_ik True` ([full install](#full-install) required). See [inverse kinematics](#run-inverse-kinematics) section for more details.
|
|
227
|
-
- **
|
|
228
|
-
- **
|
|
231
|
+
- **OpenSim data -> Model**: Open your scaled model (e.g., `demo_Sports2D_m_person00_LSTM.osim`).
|
|
232
|
+
- **OpenSim data -> Motion**: Open your motion file (e.g., `demo_Sports2D_m_person00_LSTM_ik.mot`).
|
|
229
233
|
|
|
230
|
-
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)**
|
|
234
|
+
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)** [](https://discord.com/invite/4mXUdSFjmt)
|
|
231
235
|
|
|
232
236
|
<img src="Content/sports2d_blender.gif" width="760">
|
|
233
237
|
|
|
@@ -284,7 +288,7 @@ If you only want to analyze a subset of the detected persons, you can use the `-
|
|
|
284
288
|
sports2d --nb_persons_to_detect 2 --person_ordering_method highest_likelihood
|
|
285
289
|
```
|
|
286
290
|
|
|
287
|
-
We recommend
|
|
291
|
+
We recommend using the `on_click` method if you can afford a manual input. This lets the user handle both the person number and their order in the same stage. When prompted, select the persons you are interested in in the desired order. In our case, lets slide to a frame where both people are visible, and select the woman first, then the man.
|
|
288
292
|
|
|
289
293
|
Otherwise, if you want to run Sports2D automatically for example, you can choose other ordering methods such as 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'.
|
|
290
294
|
|
|
@@ -301,28 +305,32 @@ sports2d --person_ordering_method on_click
|
|
|
301
305
|
|
|
302
306
|
|
|
303
307
|
#### Get coordinates in meters:
|
|
304
|
-
> **N.B.:**
|
|
308
|
+
> **N.B.:** The Z coordinate (depth) should not be overly trusted.
|
|
305
309
|
|
|
306
|
-
|
|
307
|
-
You may need to convert pixel coordinates to meters.\
|
|
308
|
-
Just provide the height of the reference person (and their ID in case of multiple person detection).
|
|
310
|
+
You may want coordinates in meters rather than pixels. 2 options to do so:
|
|
309
311
|
|
|
310
|
-
|
|
312
|
+
1. **Just provide the height of a reference person**:
|
|
313
|
+
- Their height in meters is be compared with their height in pixels to get a pixel-to-meter conversion factor.
|
|
314
|
+
- To estimate the depth coordinates, specify which side of the person is visible: `left`, `right`, `front`, or `back`. Use `auto` if you want it to be automatically determined (only works for motions in the sagittal plane), or `none` if you want to keep 2D coordinates instead of 3D (if the person turns around, for example).
|
|
315
|
+
- The floor angle is automatically estimated from gait, as well as the origin of the xy axis. The person trajectory is corrected accordingly. You can use the `--floor_angle` and `--xy_origin` parameters to manually specify them if your subject is not travelling horizontally or if you want the origin not to be under their feet (note that the `y` axis points down).
|
|
316
|
+
|
|
317
|
+
**N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
|
|
311
318
|
|
|
312
|
-
|
|
313
|
-
|
|
319
|
+
``` cmd
|
|
320
|
+
sports2d --first_person_height 1.65 --visible_side auto front none
|
|
321
|
+
```
|
|
322
|
+
``` cmd
|
|
323
|
+
sports2d --first_person_height 1.65 --visible_side auto front none `
|
|
324
|
+
--person_ordering_method on_click `
|
|
325
|
+
--floor_angle 0 --xy_origin 0 940
|
|
326
|
+
```
|
|
314
327
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
``` cmd
|
|
319
|
-
sports2d --
|
|
320
|
-
```
|
|
321
|
-
``` cmd
|
|
322
|
-
sports2d --to_meters True --first_person_height 1.65 --visible_side auto front none `
|
|
323
|
-
--person_ordering_method on_click `
|
|
324
|
-
--floor_angle 0 --xy_origin 0 940
|
|
325
|
-
```
|
|
328
|
+
2. **Or use a calibration file**:\
|
|
329
|
+
It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
|
|
330
|
+
|
|
331
|
+
``` cmd
|
|
332
|
+
sports2d --calib_file Calib_demo.toml --visible_side auto front none
|
|
333
|
+
```
|
|
326
334
|
|
|
327
335
|
<br>
|
|
328
336
|
|
|
@@ -337,18 +345,22 @@ OpenSim inverse kinematics allows you to set joint constraints, joint angle limi
|
|
|
337
345
|
This is done via [Pose2Sim](https://github.com/perfanalytics/pose2sim).\
|
|
338
346
|
Model scaling is done according to the mean of the segment lengths, across a subset of frames. We remove the 10% fastest frames (potential outliers), the frames where the speed is 0 (person probably out of frame), the frames where the average knee and hip flexion angles are above 45° (pose estimation is not precise when the person is crouching) and the 20% most extreme segment values after the previous operations (potential outliers). All these parameters can be edited in your Config.toml file.
|
|
339
347
|
|
|
348
|
+
**N.B.: This will not work on sections where the person is not moving in a single plane. You can split your video into several time ranges if needed.**
|
|
349
|
+
|
|
340
350
|
```cmd
|
|
341
351
|
sports2d --time_range 1.2 2.7 `
|
|
342
352
|
--do_ik true --first_person_height 1.65 --visible_side auto front
|
|
343
353
|
```
|
|
344
354
|
|
|
345
355
|
You can optionally use the LSTM marker augmentation to improve the quality of the output motion.\
|
|
346
|
-
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis)
|
|
356
|
+
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis).\
|
|
357
|
+
Optionally again, you can [visualize the overlaid results in Blender](#visualize-in-blender). The automatic calibration won't be accurate with such a small time range, so you need to use the provided calibration file (or one that has been generated from the full walk).
|
|
347
358
|
|
|
348
359
|
```cmd
|
|
349
360
|
sports2d --time_range 1.2 2.7 `
|
|
350
361
|
--do_ik true --first_person_height 1.65 --visible_side left front `
|
|
351
|
-
--use_augmentation True --participant_mass 55.0 67.0
|
|
362
|
+
--use_augmentation True --participant_mass 55.0 67.0 `
|
|
363
|
+
--calib_file Calib_demo.toml
|
|
352
364
|
```
|
|
353
365
|
|
|
354
366
|
<br>
|
|
@@ -376,14 +388,31 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
376
388
|
``` cmd
|
|
377
389
|
sports2d --config Config_demo.toml
|
|
378
390
|
```
|
|
379
|
-
- Run within Python
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
391
|
+
- Run within Python, for example:\
|
|
392
|
+
- Edit `Demo/Config_demo.toml` and run:
|
|
393
|
+
```python
|
|
394
|
+
from Sports2D import Sports2D
|
|
395
|
+
from pathlib import Path
|
|
396
|
+
import toml
|
|
397
|
+
|
|
398
|
+
config_path = Path(Sports2D.__file__).parent / 'Demo'/'Config_demo.toml'
|
|
399
|
+
config_dict = toml.load(config_path)
|
|
400
|
+
Sports2D.process(config_dict)
|
|
401
|
+
```
|
|
402
|
+
- Or you can pass the non default values only:
|
|
403
|
+
```python
|
|
404
|
+
from Sports2D import Sports2D
|
|
405
|
+
config_dict = {
|
|
406
|
+
'base': {
|
|
407
|
+
'nb_persons_to_detect': 1,
|
|
408
|
+
'person_ordering_method': 'greatest_displacement'
|
|
409
|
+
},
|
|
410
|
+
'pose': {
|
|
411
|
+
'mode': 'lightweight',
|
|
412
|
+
'det_frequency': 50
|
|
413
|
+
}}
|
|
414
|
+
Sports2D.process(config_dict)
|
|
415
|
+
```
|
|
387
416
|
|
|
388
417
|
<br>
|
|
389
418
|
|
|
@@ -407,7 +436,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
407
436
|
```cmd
|
|
408
437
|
sports2d --flip_left_right true # Default
|
|
409
438
|
```
|
|
410
|
-
- Correct segment angles according to the estimated camera
|
|
439
|
+
- Correct segment angles according to the estimated camera tilt angle.\
|
|
411
440
|
**N.B.:** *The camera tilt angle is automatically estimated. Set to false if it is actually the floor which is tilted rather than the camera.*
|
|
412
441
|
```cmd
|
|
413
442
|
sports2d --correct_segment_angles_with_floor_angle true # Default
|
|
@@ -477,6 +506,7 @@ sports2d --help
|
|
|
477
506
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
478
507
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
479
508
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
509
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. false if not specified"],
|
|
480
510
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
481
511
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
482
512
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -9,17 +9,18 @@ Content/paper.md,sha256=8rWSOLrKTysloZv0Fz2lr3nayxtHi7GlFMqwdgDVggY,11333
|
|
|
9
9
|
Content/sports2d_blender.gif,sha256=wgMuPRxhja3XtQn76_okGXsNnUT9Thp0pnD36GdW5_E,448786
|
|
10
10
|
Content/sports2d_opensim.gif,sha256=XP1AcjqhbGcJknXUoNJjPWAwaM9ahZafbDgLWvzKJs4,376656
|
|
11
11
|
Sports2D/Sports2D.ipynb,sha256=VnOVjIl6ndnCJTT13L4W5qTw4T-TQDF3jt3-wxnXDqM,2427047
|
|
12
|
-
Sports2D/Sports2D.py,sha256=
|
|
12
|
+
Sports2D/Sports2D.py,sha256=mV46Gh6I5OSjkFkfoQl5K2-RQIT4M-cAwWEQl7TSsXA,36346
|
|
13
13
|
Sports2D/__init__.py,sha256=BuUkPEdItxlkeqz4dmoiPwZLkgAfABJK3KWQ1ujTGwE,153
|
|
14
|
-
Sports2D/process.py,sha256=
|
|
15
|
-
Sports2D/Demo/
|
|
14
|
+
Sports2D/process.py,sha256=aIqyGOa1AZUOkiuq0IiHDVM6DYQMBECgHHZycSFudr0,129389
|
|
15
|
+
Sports2D/Demo/Calib_demo.toml,sha256=d6myoOkhhz3c5LOwCEJQBWT9eyqr6RSYoaPbFjBMizc,369
|
|
16
|
+
Sports2D/Demo/Config_demo.toml,sha256=fq9j-KKAm6rnVGkA38PydBu2ZyZfihx0LBp5QM1U-e8,15913
|
|
16
17
|
Sports2D/Demo/demo.mp4,sha256=2aZkFxhWR7ESMEtXCT8MGA83p2jmoU2sp1ylQfO3gDk,3968304
|
|
17
18
|
Sports2D/Utilities/__init__.py,sha256=BuUkPEdItxlkeqz4dmoiPwZLkgAfABJK3KWQ1ujTGwE,153
|
|
18
19
|
Sports2D/Utilities/common.py,sha256=idMRmesFv5BPX-5g3z5dOVa7SpS_8tNgijvGrOZlR-k,11185
|
|
19
|
-
Sports2D/Utilities/tests.py,sha256=
|
|
20
|
-
sports2d-0.8.
|
|
21
|
-
sports2d-0.8.
|
|
22
|
-
sports2d-0.8.
|
|
23
|
-
sports2d-0.8.
|
|
24
|
-
sports2d-0.8.
|
|
25
|
-
sports2d-0.8.
|
|
20
|
+
Sports2D/Utilities/tests.py,sha256=BcZ02nVcX04PpTQ2t0g5dPGHbAXY_028Y2lxn_3udjQ,5570
|
|
21
|
+
sports2d-0.8.22.dist-info/licenses/LICENSE,sha256=f4qe3nE0Y7ltJho5w-xAR0jI5PUox5Xl-MsYiY7ZRM8,1521
|
|
22
|
+
sports2d-0.8.22.dist-info/METADATA,sha256=xdNcXme_pKvpQ8FTbMgeGhPw4wi-i2KHTUv6G1LRPgg,43216
|
|
23
|
+
sports2d-0.8.22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
24
|
+
sports2d-0.8.22.dist-info/entry_points.txt,sha256=V8dFDIXatz9VvoGgoHzb2wE71C9-f85K6_OjnEQlxww,108
|
|
25
|
+
sports2d-0.8.22.dist-info/top_level.txt,sha256=cWWBiDD2WbQXMoIoN6-9et9U2t2c_ZKo2JtBqO5uN-k,17
|
|
26
|
+
sports2d-0.8.22.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|