sports2d 0.5.6__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Sports2D/Demo/Config_demo.toml +110 -5
- Sports2D/Sports2D.py +18 -9
- Sports2D/Utilities/common.py +291 -0
- Sports2D/Utilities/skeletons.py +654 -143
- Sports2D/Utilities/tests.py +12 -5
- Sports2D/process.py +205 -295
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/METADATA +100 -9
- sports2d-0.6.1.dist-info/RECORD +16 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/WHEEL +1 -1
- sports2d-0.5.6.dist-info/RECORD +0 -16
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/LICENSE +0 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/entry_points.txt +0 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.1.dist-info}/top_level.txt +0 -0
Sports2D/process.py
CHANGED
|
@@ -54,6 +54,9 @@
|
|
|
54
54
|
from pathlib import Path
|
|
55
55
|
import sys
|
|
56
56
|
import logging
|
|
57
|
+
import json
|
|
58
|
+
import ast
|
|
59
|
+
from functools import partial
|
|
57
60
|
from datetime import datetime
|
|
58
61
|
import itertools as it
|
|
59
62
|
from tqdm import tqdm
|
|
@@ -64,57 +67,13 @@ import pandas as pd
|
|
|
64
67
|
import cv2
|
|
65
68
|
import matplotlib as mpl
|
|
66
69
|
import matplotlib.pyplot as plt
|
|
67
|
-
from rtmlib import PoseTracker, BodyWithFeet
|
|
70
|
+
from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
|
|
68
71
|
|
|
69
72
|
from Sports2D.Utilities import filter
|
|
70
73
|
from Sports2D.Utilities.common import *
|
|
71
74
|
from Sports2D.Utilities.skeletons import *
|
|
72
75
|
|
|
73
76
|
|
|
74
|
-
## CONSTANTS
|
|
75
|
-
angle_dict = { # lowercase!
|
|
76
|
-
# joint angles
|
|
77
|
-
'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
|
|
78
|
-
'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
|
|
79
|
-
'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
|
|
80
|
-
'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
|
|
81
|
-
'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
82
|
-
'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
83
|
-
# 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
|
|
84
|
-
# 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
|
|
85
|
-
'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
86
|
-
'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
87
|
-
'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
|
|
88
|
-
'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
|
|
89
|
-
'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
|
|
90
|
-
'left wrist': [['LElbow', 'LIndex', 'LWrist'], 'flexion', -180, 1],
|
|
91
|
-
|
|
92
|
-
# segment angles
|
|
93
|
-
'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
|
|
94
|
-
'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
|
|
95
|
-
'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
|
|
96
|
-
'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
|
|
97
|
-
'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
|
|
98
|
-
'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
|
|
99
|
-
'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
|
|
100
|
-
'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
|
|
101
|
-
'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
|
|
102
|
-
'head': [['Head', 'Neck'], 'horizontal', 0, -1],
|
|
103
|
-
'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
|
|
104
|
-
'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
|
|
105
|
-
'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
|
|
106
|
-
'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
|
|
107
|
-
'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
|
|
108
|
-
'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
|
|
112
|
-
(125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
|
|
113
|
-
(255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
|
|
114
|
-
(255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
|
|
115
|
-
thickness = 1
|
|
116
|
-
|
|
117
|
-
|
|
118
77
|
## AUTHORSHIP INFORMATION
|
|
119
78
|
__author__ = "David Pagnon, HunMin Kim"
|
|
120
79
|
__copyright__ = "Copyright 2023, Sports2D"
|
|
@@ -153,7 +112,7 @@ def setup_webcam(webcam_id, save_vid, vid_output_path, input_size):
|
|
|
153
112
|
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, input_size[1])
|
|
154
113
|
cam_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
155
114
|
cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
156
|
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
115
|
+
fps = round(cap.get(cv2.CAP_PROP_FPS))
|
|
157
116
|
if fps == 0: fps = 30
|
|
158
117
|
|
|
159
118
|
if cam_width != input_size[0] or cam_height != input_size[1]:
|
|
@@ -205,7 +164,7 @@ def setup_video(video_file_path, save_vid, vid_output_path):
|
|
|
205
164
|
cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
206
165
|
|
|
207
166
|
out_vid = None
|
|
208
|
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
167
|
+
fps = round(cap.get(cv2.CAP_PROP_FPS))
|
|
209
168
|
if fps == 0: fps = 30
|
|
210
169
|
if save_vid:
|
|
211
170
|
# try:
|
|
@@ -221,67 +180,78 @@ def setup_video(video_file_path, save_vid, vid_output_path):
|
|
|
221
180
|
return cap, out_vid, cam_width, cam_height, fps
|
|
222
181
|
|
|
223
182
|
|
|
224
|
-
def setup_backend_device():
|
|
183
|
+
def setup_backend_device(backend='auto', device='auto'):
|
|
225
184
|
'''
|
|
226
185
|
Set up the backend and device for the pose tracker based on the availability of hardware acceleration.
|
|
227
186
|
TensorRT is not supported by RTMLib yet: https://github.com/Tau-J/rtmlib/issues/12
|
|
228
187
|
|
|
229
|
-
|
|
188
|
+
If device and backend are not specified, they are automatically set up in the following order of priority:
|
|
230
189
|
1. GPU with CUDA and ONNXRuntime backend (if CUDAExecutionProvider is available)
|
|
231
190
|
2. GPU with ROCm and ONNXRuntime backend (if ROCMExecutionProvider is available, for AMD GPUs)
|
|
232
191
|
3. GPU with MPS or CoreML and ONNXRuntime backend (for macOS systems)
|
|
233
192
|
4. CPU with OpenVINO backend (default fallback)
|
|
234
193
|
'''
|
|
235
194
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
logging.
|
|
243
|
-
|
|
244
|
-
device = 'rocm'
|
|
245
|
-
backend = 'onnxruntime'
|
|
246
|
-
logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
|
|
247
|
-
else:
|
|
248
|
-
raise
|
|
249
|
-
except:
|
|
195
|
+
if device!='auto' and backend!='auto':
|
|
196
|
+
device = device.lower()
|
|
197
|
+
backend = backend.lower()
|
|
198
|
+
|
|
199
|
+
if device=='auto' or backend=='auto':
|
|
200
|
+
if device=='auto' and backend!='auto' or device!='auto' and backend=='auto':
|
|
201
|
+
logging.warning(f"If you set device or backend to 'auto', you must set the other to 'auto' as well. Both device and backend will be determined automatically.")
|
|
202
|
+
|
|
250
203
|
try:
|
|
204
|
+
import torch
|
|
251
205
|
import onnxruntime as ort
|
|
252
|
-
if
|
|
253
|
-
device = '
|
|
206
|
+
if torch.cuda.is_available() == True and 'CUDAExecutionProvider' in ort.get_available_providers():
|
|
207
|
+
device = 'cuda'
|
|
254
208
|
backend = 'onnxruntime'
|
|
255
|
-
logging.info(f"\nValid
|
|
209
|
+
logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
|
|
210
|
+
elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers():
|
|
211
|
+
device = 'rocm'
|
|
212
|
+
backend = 'onnxruntime'
|
|
213
|
+
logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
|
|
256
214
|
else:
|
|
257
|
-
raise
|
|
215
|
+
raise
|
|
258
216
|
except:
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
217
|
+
try:
|
|
218
|
+
import onnxruntime as ort
|
|
219
|
+
if 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
|
|
220
|
+
device = 'mps'
|
|
221
|
+
backend = 'onnxruntime'
|
|
222
|
+
logging.info(f"\nValid MPS installation found: using ONNXRuntime backend with GPU.")
|
|
223
|
+
else:
|
|
224
|
+
raise
|
|
225
|
+
except:
|
|
226
|
+
device = 'cpu'
|
|
227
|
+
backend = 'openvino'
|
|
228
|
+
logging.info(f"\nNo valid CUDA installation found: using OpenVINO backend with CPU.")
|
|
229
|
+
|
|
263
230
|
return backend, device
|
|
264
231
|
|
|
265
232
|
|
|
266
|
-
def setup_pose_tracker(det_frequency, mode, tracking):
|
|
233
|
+
def setup_pose_tracker(ModelClass, det_frequency, mode, tracking, backend, device):
|
|
267
234
|
'''
|
|
268
235
|
Set up the RTMLib pose tracker with the appropriate model and backend.
|
|
269
236
|
If CUDA is available, use it with ONNXRuntime backend; else use CPU with openvino
|
|
270
237
|
|
|
271
238
|
INPUTS:
|
|
239
|
+
- ModelClass: class. The RTMlib model class to use for pose detection (Body, BodyWithFeet, Wholebody)
|
|
272
240
|
- det_frequency: int. The frequency of pose detection (every N frames)
|
|
273
241
|
- mode: str. The mode of the pose tracker ('lightweight', 'balanced', 'performance')
|
|
274
242
|
- tracking: bool. Whether to track persons across frames with RTMlib tracker
|
|
243
|
+
- backend: str. The backend to use for pose detection (onnxruntime, openvino, opencv)
|
|
244
|
+
- device: str. The device to use for pose detection (cpu, cuda, rocm, mps)
|
|
275
245
|
|
|
276
246
|
OUTPUTS:
|
|
277
247
|
- pose_tracker: PoseTracker. The initialized pose tracker object
|
|
278
248
|
'''
|
|
279
249
|
|
|
280
|
-
backend, device = setup_backend_device()
|
|
250
|
+
backend, device = setup_backend_device(backend=backend, device=device)
|
|
281
251
|
|
|
282
252
|
# Initialize the pose tracker with Halpe26 model
|
|
283
253
|
pose_tracker = PoseTracker(
|
|
284
|
-
|
|
254
|
+
ModelClass,
|
|
285
255
|
det_frequency=det_frequency,
|
|
286
256
|
mode=mode,
|
|
287
257
|
backend=backend,
|
|
@@ -353,22 +323,17 @@ def compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, keypoints_id
|
|
|
353
323
|
|
|
354
324
|
ang_params = angle_dict.get(ang_name)
|
|
355
325
|
if ang_params is not None:
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
ang = ang-180 if ang>90 else ang
|
|
365
|
-
ang = ang+180 if ang<-90 else ang
|
|
366
|
-
else:
|
|
367
|
-
ang = ang-360 if ang>180 else ang
|
|
368
|
-
ang = ang+360 if ang<-180 else ang
|
|
326
|
+
try:
|
|
327
|
+
if ang_name in ['pelvis', 'trunk', 'shoulders']:
|
|
328
|
+
angle_coords = [[np.abs(person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]]), person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0]]
|
|
329
|
+
else:
|
|
330
|
+
angle_coords = [[person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]], person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0]]
|
|
331
|
+
ang = fixed_angles(angle_coords, ang_name)
|
|
332
|
+
except:
|
|
333
|
+
ang = np.nan
|
|
369
334
|
else:
|
|
370
335
|
ang = np.nan
|
|
371
|
-
|
|
336
|
+
|
|
372
337
|
return ang
|
|
373
338
|
|
|
374
339
|
|
|
@@ -618,7 +583,7 @@ def draw_skel(img, X, Y, model, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)]):
|
|
|
618
583
|
[cv2.line(img,
|
|
619
584
|
(int(x[n[0]]), int(y[n[0]])), (int(x[n[1]]), int(y[n[1]])), c, thickness)
|
|
620
585
|
for n in node_pairs
|
|
621
|
-
if not (np.isnan(x[n[0]]) or np.isnan(y[n[0]]) or np.isnan(x[n[1]]) or np.isnan(y[n[1]]))]
|
|
586
|
+
if not None in n and not (np.isnan(x[n[0]]) or np.isnan(y[n[0]]) or np.isnan(x[n[1]]) or np.isnan(y[n[1]]))] # IF NOT NONE
|
|
622
587
|
|
|
623
588
|
return img
|
|
624
589
|
|
|
@@ -692,31 +657,34 @@ def draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, keypoints_
|
|
|
692
657
|
ang_name = angle_names[k]
|
|
693
658
|
ang_params = angle_dict.get(ang_name)
|
|
694
659
|
if ang_params is not None:
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
app_point, vec1, vec2 = draw_joint_angle(img, ang_coords, flip, right_angle)
|
|
706
|
-
|
|
707
|
-
# Write angle on body
|
|
708
|
-
if 'body' in display_angle_values_on:
|
|
660
|
+
kpts = ang_params[0]
|
|
661
|
+
if not any(item not in keypoints_names+['Neck', 'Hip'] for item in kpts):
|
|
662
|
+
ang_coords = np.array([[X[keypoints_ids[keypoints_names.index(kpt)]], Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0] if kpt in keypoints_names])
|
|
663
|
+
X_flipped = np.append(X_flipped, X[len(X_flipped):])
|
|
664
|
+
X_flipped_coords = [X_flipped[keypoints_ids[keypoints_names.index(kpt)]] for kpt in ang_params[0] if kpt in keypoints_names]
|
|
665
|
+
flip = -1 if any(x_flipped < 0 for x_flipped in X_flipped_coords) else 1
|
|
666
|
+
flip = 1 if ang_name in ['pelvis', 'trunk', 'shoulders'] else flip
|
|
667
|
+
right_angle = True if ang_params[2]==90 else False
|
|
668
|
+
|
|
669
|
+
# Draw angle
|
|
709
670
|
if len(ang_coords) == 2: # segment angle
|
|
710
|
-
|
|
671
|
+
app_point, vec = draw_segment_angle(img, ang_coords, flip)
|
|
711
672
|
else: # joint angle
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
673
|
+
app_point, vec1, vec2 = draw_joint_angle(img, ang_coords, flip, right_angle)
|
|
674
|
+
|
|
675
|
+
# Write angle on body
|
|
676
|
+
if 'body' in display_angle_values_on:
|
|
677
|
+
if len(ang_coords) == 2: # segment angle
|
|
678
|
+
write_angle_on_body(img, ang, app_point, vec, np.array([1,0]), dist=20, color=(255,255,255), fontSize=fontSize, thickness=thickness)
|
|
679
|
+
else: # joint angle
|
|
680
|
+
write_angle_on_body(img, ang, app_point, vec1, vec2, dist=40, color=(0,255,0), fontSize=fontSize, thickness=thickness)
|
|
681
|
+
|
|
682
|
+
# Write angle as a list on image with progress bar
|
|
683
|
+
if 'list' in display_angle_values_on:
|
|
684
|
+
if len(ang_coords) == 2: # segment angle
|
|
685
|
+
ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (255,255,255), fontSize=fontSize, thickness=thickness)
|
|
686
|
+
else:
|
|
687
|
+
ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (0,255,0), fontSize=fontSize, thickness=thickness)
|
|
720
688
|
|
|
721
689
|
return img
|
|
722
690
|
|
|
@@ -869,32 +837,6 @@ def write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_lin
|
|
|
869
837
|
return ang_label_line
|
|
870
838
|
|
|
871
839
|
|
|
872
|
-
def read_trc(trc_path):
|
|
873
|
-
'''
|
|
874
|
-
Read trc file
|
|
875
|
-
|
|
876
|
-
INPUTS:
|
|
877
|
-
- trc_path: path to the trc file
|
|
878
|
-
|
|
879
|
-
OUTPUTS:
|
|
880
|
-
- Q_coords: dataframe of coordinates
|
|
881
|
-
- frames_col: series of frames
|
|
882
|
-
- time_col: series of time
|
|
883
|
-
- markers: list of marker names
|
|
884
|
-
- header: list of header lines
|
|
885
|
-
'''
|
|
886
|
-
|
|
887
|
-
with open(trc_path, 'r') as trc_file:
|
|
888
|
-
header = [next(trc_file) for line in range(5)]
|
|
889
|
-
markers = header[3].split('\t')[2::3]
|
|
890
|
-
|
|
891
|
-
trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4)
|
|
892
|
-
frames_col, time_col = pd.Series(trc_df.iloc[:,0], name='frames'), pd.Series(trc_df.iloc[:,1], name='time')
|
|
893
|
-
Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
|
|
894
|
-
|
|
895
|
-
return Q_coords, frames_col, time_col, markers, header
|
|
896
|
-
|
|
897
|
-
|
|
898
840
|
def load_pose_file(Q_coords):
|
|
899
841
|
'''
|
|
900
842
|
Load 2D keypoints from a dataframe of XYZ coordinates
|
|
@@ -1075,6 +1017,7 @@ def angle_plots(angle_data_unfiltered, angle_data, person_id):
|
|
|
1075
1017
|
ax = plt.subplot(111)
|
|
1076
1018
|
plt.plot(angle_data_unfiltered.iloc[:,0], angle_data_unfiltered.iloc[:,id+1], label='unfiltered')
|
|
1077
1019
|
plt.plot(angle_data.iloc[:,0], angle_data.iloc[:,id+1], label='filtered')
|
|
1020
|
+
|
|
1078
1021
|
ax.set_xlabel('Time (seconds)')
|
|
1079
1022
|
ax.set_ylabel(angle+' (°)')
|
|
1080
1023
|
plt.legend()
|
|
@@ -1144,138 +1087,6 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
|
|
|
1144
1087
|
return angle, xy_origin
|
|
1145
1088
|
|
|
1146
1089
|
|
|
1147
|
-
def mean_angles(Q_coords, markers, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
|
|
1148
|
-
'''
|
|
1149
|
-
Compute the mean angle time series from 3D points for a given list of angles.
|
|
1150
|
-
|
|
1151
|
-
INPUTS:
|
|
1152
|
-
- Q_coords (DataFrame): The triangulated coordinates of the markers.
|
|
1153
|
-
- markers (list): The list of marker names.
|
|
1154
|
-
- ang_to_consider (list): The list of angles to consider (requires angle_dict).
|
|
1155
|
-
|
|
1156
|
-
OUTPUTS:
|
|
1157
|
-
- ang_mean: The mean angle time series.
|
|
1158
|
-
'''
|
|
1159
|
-
|
|
1160
|
-
ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
|
|
1161
|
-
|
|
1162
|
-
angs = []
|
|
1163
|
-
for ang_name in ang_to_consider:
|
|
1164
|
-
ang_params = angle_dict[ang_name]
|
|
1165
|
-
ang_mk = ang_params[0]
|
|
1166
|
-
|
|
1167
|
-
pts_for_angles = []
|
|
1168
|
-
for pt in ang_mk:
|
|
1169
|
-
pts_for_angles.append(Q_coords.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
|
|
1170
|
-
ang = points_to_angles(pts_for_angles)
|
|
1171
|
-
|
|
1172
|
-
ang += ang_params[2]
|
|
1173
|
-
ang *= ang_params[3]
|
|
1174
|
-
ang = np.abs(ang)
|
|
1175
|
-
|
|
1176
|
-
angs.append(ang)
|
|
1177
|
-
|
|
1178
|
-
ang_mean = np.mean(angs, axis=0)
|
|
1179
|
-
|
|
1180
|
-
return ang_mean
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
|
|
1184
|
-
'''
|
|
1185
|
-
Compute the best coordinates for measurements, after removing:
|
|
1186
|
-
- 20% fastest frames (may be outliers)
|
|
1187
|
-
- frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
|
|
1188
|
-
- frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
|
|
1189
|
-
|
|
1190
|
-
INPUTS:
|
|
1191
|
-
- Q_coords: pd.DataFrame. The XYZ coordinates of each marker
|
|
1192
|
-
- keypoints_names: list. The list of marker names
|
|
1193
|
-
- fastest_frames_to_remove_percent: float
|
|
1194
|
-
- close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
|
|
1195
|
-
- large_hip_knee_angles: int
|
|
1196
|
-
- trimmed_extrema_percent
|
|
1197
|
-
|
|
1198
|
-
OUTPUT:
|
|
1199
|
-
- Q_coords_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
|
|
1200
|
-
'''
|
|
1201
|
-
|
|
1202
|
-
# Add Hip column if not present
|
|
1203
|
-
n_markers_init = len(keypoints_names)
|
|
1204
|
-
if 'Hip' not in keypoints_names:
|
|
1205
|
-
RHip_df = Q_coords.iloc[:,keypoints_names.index('RHip')*3:keypoints_names.index('RHip')*3+3]
|
|
1206
|
-
LHip_df = Q_coords.iloc[:,keypoints_names.index('LHip')*3:keypoints_names.index('RHip')*3+3]
|
|
1207
|
-
Hip_df = RHip_df.add(LHip_df, fill_value=0) /2
|
|
1208
|
-
Hip_df.columns = [col+ str(int(Q_coords.columns[-1][1:])+1) for col in ['X','Y','Z']]
|
|
1209
|
-
keypoints_names += ['Hip']
|
|
1210
|
-
Q_coords = pd.concat([Q_coords, Hip_df], axis=1)
|
|
1211
|
-
n_markers = len(keypoints_names)
|
|
1212
|
-
|
|
1213
|
-
# Using 80% slowest frames
|
|
1214
|
-
sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
|
|
1215
|
-
sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
|
|
1216
|
-
min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
|
|
1217
|
-
Q_coords_low_speeds = Q_coords.iloc[min_speed_indices].reset_index(drop=True)
|
|
1218
|
-
|
|
1219
|
-
# Only keep frames with hip and knee flexion angles below 45%
|
|
1220
|
-
# (if more than 50 of them, else take 50 smallest values)
|
|
1221
|
-
ang_mean = mean_angles(Q_coords_low_speeds, keypoints_names, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
|
|
1222
|
-
Q_coords_low_speeds_low_angles = Q_coords_low_speeds[ang_mean < large_hip_knee_angles]
|
|
1223
|
-
if len(Q_coords_low_speeds_low_angles) < 50:
|
|
1224
|
-
Q_coords_low_speeds_low_angles = Q_coords_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
|
|
1225
|
-
|
|
1226
|
-
if n_markers_init < n_markers:
|
|
1227
|
-
Q_coords_low_speeds_low_angles = Q_coords_low_speeds_low_angles.iloc[:,:-3]
|
|
1228
|
-
|
|
1229
|
-
return Q_coords_low_speeds_low_angles
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
def compute_height(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
|
|
1233
|
-
'''
|
|
1234
|
-
Compute the height of the person from the trc data.
|
|
1235
|
-
|
|
1236
|
-
INPUTS:
|
|
1237
|
-
- Q_coords: pd.DataFrame. The XYZ coordinates of each marker
|
|
1238
|
-
- keypoints_names: list. The list of marker names
|
|
1239
|
-
- fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
|
|
1240
|
-
- close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
|
|
1241
|
-
- large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
|
|
1242
|
-
- trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
|
|
1243
|
-
|
|
1244
|
-
OUTPUT:
|
|
1245
|
-
- height: float. The estimated height of the person
|
|
1246
|
-
'''
|
|
1247
|
-
|
|
1248
|
-
# Retrieve most reliable coordinates
|
|
1249
|
-
Q_coords_low_speeds_low_angles = best_coords_for_measurements(Q_coords, keypoints_names,
|
|
1250
|
-
fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
|
|
1251
|
-
Q_coords_low_speeds_low_angles.columns = np.array([[m]*3 for m in keypoints_names]).flatten()
|
|
1252
|
-
|
|
1253
|
-
# Add MidShoulder column
|
|
1254
|
-
df_MidShoulder = pd.DataFrame((Q_coords_low_speeds_low_angles['RShoulder'].values + Q_coords_low_speeds_low_angles['LShoulder'].values) /2)
|
|
1255
|
-
df_MidShoulder.columns = ['MidShoulder']*3
|
|
1256
|
-
Q_coords_low_speeds_low_angles = pd.concat((Q_coords_low_speeds_low_angles.reset_index(drop=True), df_MidShoulder), axis=1)
|
|
1257
|
-
|
|
1258
|
-
# Automatically compute the height of the person
|
|
1259
|
-
pairs_up_to_shoulders = [['RHeel', 'RAnkle'], ['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
|
|
1260
|
-
['LHeel', 'LAnkle'], ['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
|
|
1261
|
-
try:
|
|
1262
|
-
rfoot, rshank, rfemur, rback, lfoot, lshank, lfemur, lback = [euclidean_distance(Q_coords_low_speeds_low_angles[pair[0]],Q_coords_low_speeds_low_angles[pair[1]]) for pair in pairs_up_to_shoulders]
|
|
1263
|
-
except:
|
|
1264
|
-
raise ValueError('At least one of the following markers is missing for computing the height of the person:\
|
|
1265
|
-
RHeel, RAnkle, RKnee, RHip, RShoulder, LHeel, LAnkle, LKnee, LHip, LShoulder.\
|
|
1266
|
-
Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
|
|
1267
|
-
if 'Head' in keypoints_names:
|
|
1268
|
-
head = euclidean_distance(Q_coords_low_speeds_low_angles['MidShoulder'], Q_coords_low_speeds_low_angles['Head'])
|
|
1269
|
-
else:
|
|
1270
|
-
head = euclidean_distance(Q_coords_low_speeds_low_angles['MidShoulder'], Q_coords_low_speeds_low_angles['Nose'])*1.33
|
|
1271
|
-
heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
|
|
1272
|
-
|
|
1273
|
-
# Remove the 20% most extreme values
|
|
1274
|
-
height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
|
|
1275
|
-
|
|
1276
|
-
return height
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
1090
|
def convert_px_to_meters(Q_coords_kpt, person_height_m, height_px, cx, cy, floor_angle):
|
|
1280
1091
|
'''
|
|
1281
1092
|
Convert pixel coordinates to meters.
|
|
@@ -1373,6 +1184,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1373
1184
|
mode = config_dict.get('pose').get('mode')
|
|
1374
1185
|
det_frequency = config_dict.get('pose').get('det_frequency')
|
|
1375
1186
|
tracking_mode = config_dict.get('pose').get('tracking_mode')
|
|
1187
|
+
backend = config_dict.get('pose').get('backend')
|
|
1188
|
+
device = config_dict.get('pose').get('device')
|
|
1376
1189
|
|
|
1377
1190
|
# Pixel to meters conversion
|
|
1378
1191
|
to_meters = config_dict.get('px_to_meters_conversion').get('to_meters')
|
|
@@ -1406,6 +1219,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1406
1219
|
fontSize = config_dict.get('angles').get('fontSize')
|
|
1407
1220
|
thickness = 1 if fontSize < 0.8 else 2
|
|
1408
1221
|
flip_left_right = config_dict.get('angles').get('flip_left_right')
|
|
1222
|
+
correct_segment_angles_with_floor_angle = config_dict.get('angles').get('correct_segment_angles_with_floor_angle')
|
|
1409
1223
|
|
|
1410
1224
|
# Post-processing settings
|
|
1411
1225
|
interpolate = config_dict.get('post-processing').get('interpolate')
|
|
@@ -1464,9 +1278,52 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1464
1278
|
cv2.namedWindow(f'{video_file} Sports2D', cv2.WINDOW_NORMAL + cv2.WINDOW_KEEPRATIO)
|
|
1465
1279
|
cv2.setWindowProperty(f'{video_file} Sports2D', cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_FULLSCREEN)
|
|
1466
1280
|
|
|
1281
|
+
# Select the appropriate model based on the model_type
|
|
1282
|
+
if pose_model.upper() in ('HALPE_26', 'BODY_WITH_FEET'):
|
|
1283
|
+
model_name = 'HALPE_26'
|
|
1284
|
+
ModelClass = BodyWithFeet # 26 keypoints(halpe26)
|
|
1285
|
+
logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
|
|
1286
|
+
elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY', 'WHOLE_BODY_WRIST'):
|
|
1287
|
+
model_name = 'COCO_133'
|
|
1288
|
+
ModelClass = Wholebody
|
|
1289
|
+
logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
|
|
1290
|
+
elif pose_model.upper() in ('COCO_17', 'BODY'):
|
|
1291
|
+
model_name = 'COCO_17'
|
|
1292
|
+
ModelClass = Body
|
|
1293
|
+
logging.info(f"Using COCO_17 model (body) for pose estimation.")
|
|
1294
|
+
else:
|
|
1295
|
+
raise ValueError(f"Invalid model_type: {model_name}. Must be 'HALPE_26', 'COCO_133', or 'COCO_17'. Use another network (MMPose, DeepLabCut, OpenPose, AlphaPose, BlazePose...) and convert the output files if you need another model. See documentation.")
|
|
1296
|
+
pose_model_name = pose_model
|
|
1297
|
+
pose_model = eval(model_name)
|
|
1298
|
+
|
|
1299
|
+
# Manually select the models if mode is a dictionary rather than 'lightweight', 'balanced', or 'performance'
|
|
1300
|
+
if not mode in ['lightweight', 'balanced', 'performance']:
|
|
1301
|
+
try:
|
|
1302
|
+
try:
|
|
1303
|
+
mode = ast.literal_eval(mode)
|
|
1304
|
+
except: # if within single quotes instead of double quotes when run with sports2d --mode """{dictionary}"""
|
|
1305
|
+
mode = mode.strip("'").replace('\n', '').replace(" ", "").replace(",", '", "').replace(":", '":"').replace("{", '{"').replace("}", '"}').replace('":"/',':/').replace('":"\\',':\\')
|
|
1306
|
+
mode = re.sub(r'"\[([^"]+)",\s?"([^"]+)\]"', r'[\1,\2]', mode) # changes "[640", "640]" to [640,640]
|
|
1307
|
+
mode = json.loads(mode)
|
|
1308
|
+
det_class = mode.get('det_class')
|
|
1309
|
+
det = mode.get('det_model')
|
|
1310
|
+
det_input_size = mode.get('det_input_size')
|
|
1311
|
+
pose_class = mode.get('pose_class')
|
|
1312
|
+
pose = mode.get('pose_model')
|
|
1313
|
+
pose_input_size = mode.get('pose_input_size')
|
|
1314
|
+
|
|
1315
|
+
ModelClass = partial(Custom,
|
|
1316
|
+
det_class=det_class, det=det, det_input_size=det_input_size,
|
|
1317
|
+
pose_class=pose_class, pose=pose, pose_input_size=pose_input_size,
|
|
1318
|
+
backend=backend, device=device)
|
|
1319
|
+
|
|
1320
|
+
except (json.JSONDecodeError, TypeError):
|
|
1321
|
+
logging.warning("\nInvalid mode. Must be 'lightweight', 'balanced', 'performance', or '''{dictionary}''' of parameters within triple quotes. Make sure input_sizes are within square brackets.")
|
|
1322
|
+
logging.warning('Using the default "balanced" mode.')
|
|
1323
|
+
mode = 'balanced'
|
|
1467
1324
|
|
|
1325
|
+
|
|
1468
1326
|
# Skip pose estimation or set it up:
|
|
1469
|
-
model = eval(pose_model)
|
|
1470
1327
|
if load_trc:
|
|
1471
1328
|
if not '_px' in str(load_trc):
|
|
1472
1329
|
logging.error(f'\n{load_trc} file needs to be in px, not in meters.')
|
|
@@ -1475,26 +1332,39 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1475
1332
|
Q_coords, _, _, keypoints_names, _ = read_trc(load_trc)
|
|
1476
1333
|
keypoints_ids = [i for i in range(len(keypoints_names))]
|
|
1477
1334
|
keypoints_all, scores_all = load_pose_file(Q_coords)
|
|
1478
|
-
for pre, _, node in RenderTree(
|
|
1335
|
+
for pre, _, node in RenderTree(model_name):
|
|
1479
1336
|
if node.name in keypoints_names:
|
|
1480
1337
|
node.id = keypoints_names.index(node.name)
|
|
1481
1338
|
|
|
1482
1339
|
else:
|
|
1483
1340
|
# Retrieve keypoint names from model
|
|
1484
|
-
keypoints_ids = [node.id for _, _, node in RenderTree(
|
|
1485
|
-
keypoints_names = [node.name for _, _, node in RenderTree(
|
|
1341
|
+
keypoints_ids = [node.id for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1342
|
+
keypoints_names = [node.name for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1486
1343
|
|
|
1487
1344
|
tracking_rtmlib = True if (tracking_mode == 'rtmlib' and multiperson) else False
|
|
1488
|
-
pose_tracker = setup_pose_tracker(det_frequency, mode, tracking_rtmlib)
|
|
1489
|
-
logging.info(f'\nPose tracking set up for
|
|
1345
|
+
pose_tracker = setup_pose_tracker(ModelClass, det_frequency, mode, tracking_rtmlib, backend, device)
|
|
1346
|
+
logging.info(f'\nPose tracking set up for "{pose_model_name}" model.')
|
|
1347
|
+
logging.info(f'Mode: {mode}.\n')
|
|
1490
1348
|
logging.info(f'Persons are detected every {det_frequency} frames and tracked inbetween. Multi-person is {"" if multiperson else "not "}selected.')
|
|
1491
1349
|
logging.info(f"Parameters: {keypoint_likelihood_threshold=}, {average_likelihood_threshold=}, {keypoint_number_threshold=}")
|
|
1492
1350
|
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1351
|
+
if flip_left_right:
|
|
1352
|
+
try:
|
|
1353
|
+
Ltoe_idx = keypoints_ids[keypoints_names.index('LBigToe')]
|
|
1354
|
+
LHeel_idx = keypoints_ids[keypoints_names.index('LHeel')]
|
|
1355
|
+
Rtoe_idx = keypoints_ids[keypoints_names.index('RBigToe')]
|
|
1356
|
+
RHeel_idx = keypoints_ids[keypoints_names.index('RHeel')]
|
|
1357
|
+
L_R_direction_idx = [Ltoe_idx, LHeel_idx, Rtoe_idx, RHeel_idx]
|
|
1358
|
+
except ValueError:
|
|
1359
|
+
logging.warning(f"Missing 'LBigToe', 'LHeel', 'RBigToe', 'RHeel' keypoints. flip_left_right will be set to False")
|
|
1360
|
+
flip_left_right = False
|
|
1361
|
+
|
|
1362
|
+
if calculate_angles:
|
|
1363
|
+
for ang_name in angle_names:
|
|
1364
|
+
ang_params = angle_dict.get(ang_name)
|
|
1365
|
+
kpts = ang_params[0]
|
|
1366
|
+
if any(item not in keypoints_names+['Neck', 'Hip'] for item in kpts):
|
|
1367
|
+
logging.warning(f"Skipping {ang_name} angle computation because at least one of the following keypoints is not provided by the model: {ang_params[0]}.")
|
|
1498
1368
|
|
|
1499
1369
|
|
|
1500
1370
|
# Process video or webcam feed
|
|
@@ -1568,9 +1438,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1568
1438
|
person_X = np.full_like(person_X, np.nan)
|
|
1569
1439
|
person_Y = np.full_like(person_Y, np.nan)
|
|
1570
1440
|
person_scores = np.full_like(person_scores, np.nan)
|
|
1571
|
-
valid_X.append(person_X)
|
|
1572
|
-
valid_Y.append(person_Y)
|
|
1573
|
-
valid_scores.append(person_scores)
|
|
1574
1441
|
|
|
1575
1442
|
|
|
1576
1443
|
# Compute angles
|
|
@@ -1580,24 +1447,40 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1580
1447
|
person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
|
|
1581
1448
|
else:
|
|
1582
1449
|
person_X_flipped = person_X.copy()
|
|
1583
|
-
valid_X_flipped.append(person_X_flipped)
|
|
1584
1450
|
|
|
1585
1451
|
# Compute angles
|
|
1586
1452
|
person_angles = []
|
|
1453
|
+
# Add Neck and Hip if not provided
|
|
1454
|
+
new_keypoints_names, new_keypoints_ids = keypoints_names.copy(), keypoints_ids.copy()
|
|
1455
|
+
for kpt in ['Neck', 'Hip']:
|
|
1456
|
+
if kpt not in new_keypoints_names:
|
|
1457
|
+
person_X_flipped, person_Y, person_scores = add_neck_hip_coords(kpt, person_X_flipped, person_Y, person_scores, new_keypoints_ids, new_keypoints_names)
|
|
1458
|
+
person_X, _, _ = add_neck_hip_coords(kpt, person_X, person_Y, person_scores, new_keypoints_ids, new_keypoints_names)
|
|
1459
|
+
new_keypoints_names.append(kpt)
|
|
1460
|
+
new_keypoints_ids.append(len(person_X_flipped)-1)
|
|
1461
|
+
|
|
1587
1462
|
for ang_name in angle_names:
|
|
1588
|
-
|
|
1463
|
+
ang_params = angle_dict.get(ang_name)
|
|
1464
|
+
kpts = ang_params[0]
|
|
1465
|
+
if not any(item not in new_keypoints_names for item in kpts):
|
|
1466
|
+
ang = compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, new_keypoints_ids, new_keypoints_names)
|
|
1467
|
+
else:
|
|
1468
|
+
ang = np.nan
|
|
1589
1469
|
person_angles.append(ang)
|
|
1590
1470
|
valid_angles.append(person_angles)
|
|
1591
|
-
|
|
1471
|
+
valid_X_flipped.append(person_X_flipped)
|
|
1472
|
+
valid_X.append(person_X)
|
|
1473
|
+
valid_Y.append(person_Y)
|
|
1474
|
+
valid_scores.append(person_scores)
|
|
1592
1475
|
|
|
1593
1476
|
# Draw keypoints and skeleton
|
|
1594
1477
|
if show_realtime_results or save_vid or save_img:
|
|
1595
1478
|
img = frame.copy()
|
|
1596
1479
|
img = draw_bounding_box(img, valid_X, valid_Y, colors=colors, fontSize=fontSize, thickness=thickness)
|
|
1597
|
-
img = draw_keypts(img, valid_X, valid_Y,
|
|
1598
|
-
img = draw_skel(img, valid_X, valid_Y,
|
|
1480
|
+
img = draw_keypts(img, valid_X, valid_Y, valid_scores, cmap_str='RdYlGn')
|
|
1481
|
+
img = draw_skel(img, valid_X, valid_Y, pose_model, colors=colors)
|
|
1599
1482
|
if calculate_angles:
|
|
1600
|
-
img = draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped,
|
|
1483
|
+
img = draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, new_keypoints_ids, new_keypoints_names, angle_names, display_angle_values_on=display_angle_values_on, colors=colors, fontSize=fontSize, thickness=thickness)
|
|
1601
1484
|
|
|
1602
1485
|
if show_realtime_results:
|
|
1603
1486
|
cv2.imshow(f'{video_file} Sports2D', img)
|
|
@@ -1618,6 +1501,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1618
1501
|
elapsed_time = (datetime.now() - start_time).total_seconds()
|
|
1619
1502
|
frame_processing_times.append(elapsed_time)
|
|
1620
1503
|
|
|
1504
|
+
|
|
1505
|
+
# End of the video is reached
|
|
1621
1506
|
cap.release()
|
|
1622
1507
|
logging.info(f"Video processing completed.")
|
|
1623
1508
|
if save_vid:
|
|
@@ -1730,9 +1615,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1730
1615
|
if show_plots and not to_meters:
|
|
1731
1616
|
pose_plots(trc_data_unfiltered_i, trc_data_i, i)
|
|
1732
1617
|
|
|
1618
|
+
|
|
1733
1619
|
# Convert px to meters
|
|
1734
1620
|
if to_meters:
|
|
1735
1621
|
logging.info('\nConverting pose to meters:')
|
|
1622
|
+
if calib_on_person_id>=len(trc_data):
|
|
1623
|
+
logging.warning(f'Person #{calib_on_person_id} not detected in the video. Calibrating on person #0 instead.')
|
|
1624
|
+
calib_on_person_id = 0
|
|
1736
1625
|
if calib_file:
|
|
1737
1626
|
logging.info(f'Using calibration file to convert coordinates in meters: {calib_file}.')
|
|
1738
1627
|
calib_params_dict = retrieve_calib_params(calib_file)
|
|
@@ -1748,10 +1637,20 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1748
1637
|
|
|
1749
1638
|
if floor_angle == 'auto' or xy_origin == 'auto':
|
|
1750
1639
|
# estimated from the line formed by the toes when they are on the ground (where speed = 0)
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1640
|
+
try:
|
|
1641
|
+
toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
|
|
1642
|
+
px_per_m = height_px/person_height_m
|
|
1643
|
+
toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
|
|
1644
|
+
try:
|
|
1645
|
+
floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
|
|
1646
|
+
except: # no feet points
|
|
1647
|
+
floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
|
|
1648
|
+
xy_origin_estim[0] = xy_origin_estim[0]-0.13
|
|
1649
|
+
logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
|
|
1650
|
+
except:
|
|
1651
|
+
floor_angle_estim = 0
|
|
1652
|
+
xy_origin_estim = cam_width/2, cam_height/2
|
|
1653
|
+
logging.warning(f'Could not estimate the floor angle and xy_origin. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}].')
|
|
1755
1654
|
if not floor_angle == 'auto':
|
|
1756
1655
|
floor_angle_estim = floor_angle
|
|
1757
1656
|
if xy_origin == 'auto':
|
|
@@ -1776,7 +1675,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1776
1675
|
# Write to trc file
|
|
1777
1676
|
idx_path = selected_person_id if not multiperson and not calib_file else i
|
|
1778
1677
|
pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
|
|
1779
|
-
make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i)
|
|
1678
|
+
make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
|
|
1780
1679
|
logging.info(f'Person {idx_path}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
|
|
1781
1680
|
|
|
1782
1681
|
|
|
@@ -1887,6 +1786,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1887
1786
|
logging.info(f'Filtering with {args}')
|
|
1888
1787
|
all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter.filter1d, axis=0, args=filter_options)
|
|
1889
1788
|
|
|
1789
|
+
# Remove columns with all nan values
|
|
1790
|
+
all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
|
|
1791
|
+
all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
|
|
1792
|
+
|
|
1793
|
+
# Add floor_angle_estim to segment angles
|
|
1794
|
+
if correct_segment_angles_with_floor_angle and to_meters:
|
|
1795
|
+
logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
|
|
1796
|
+
for ang_name in all_frames_angles_person_filt.columns:
|
|
1797
|
+
if 'horizontal' in angle_dict[ang_name][1]:
|
|
1798
|
+
all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
|
|
1799
|
+
|
|
1890
1800
|
# Build mot file
|
|
1891
1801
|
angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
|
|
1892
1802
|
logging.info(f'Angles saved to {angles_path_person.resolve()}.')
|