sports2d 0.5.5__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Sports2D/process.py CHANGED
@@ -54,6 +54,9 @@
54
54
  from pathlib import Path
55
55
  import sys
56
56
  import logging
57
+ import json
58
+ import ast
59
+ from functools import partial
57
60
  from datetime import datetime
58
61
  import itertools as it
59
62
  from tqdm import tqdm
@@ -64,57 +67,13 @@ import pandas as pd
64
67
  import cv2
65
68
  import matplotlib as mpl
66
69
  import matplotlib.pyplot as plt
67
- from rtmlib import PoseTracker, BodyWithFeet
70
+ from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
68
71
 
69
72
  from Sports2D.Utilities import filter
70
73
  from Sports2D.Utilities.common import *
71
74
  from Sports2D.Utilities.skeletons import *
72
75
 
73
76
 
74
- ## CONSTANTS
75
- angle_dict = { # lowercase!
76
- # joint angles
77
- 'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
78
- 'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
79
- 'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
80
- 'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
81
- 'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
82
- 'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
83
- # 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
84
- # 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
85
- 'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
86
- 'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
87
- 'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
88
- 'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
89
- 'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
90
- 'left wrist': [['LElbow', 'LIndex', 'LWrist'], 'flexion', -180, 1],
91
-
92
- # segment angles
93
- 'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
94
- 'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
95
- 'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
96
- 'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
97
- 'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
98
- 'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
99
- 'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
100
- 'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
101
- 'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
102
- 'head': [['Head', 'Neck'], 'horizontal', 0, -1],
103
- 'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
104
- 'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
105
- 'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
106
- 'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
107
- 'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
108
- 'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
109
- }
110
-
111
- colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
112
- (125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
113
- (255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
114
- (255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
115
- thickness = 1
116
-
117
-
118
77
  ## AUTHORSHIP INFORMATION
119
78
  __author__ = "David Pagnon, HunMin Kim"
120
79
  __copyright__ = "Copyright 2023, Sports2D"
@@ -153,7 +112,7 @@ def setup_webcam(webcam_id, save_vid, vid_output_path, input_size):
153
112
  cap.set(cv2.CAP_PROP_FRAME_HEIGHT, input_size[1])
154
113
  cam_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
155
114
  cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
156
- fps = cap.get(cv2.CAP_PROP_FPS)
115
+ fps = round(cap.get(cv2.CAP_PROP_FPS))
157
116
  if fps == 0: fps = 30
158
117
 
159
118
  if cam_width != input_size[0] or cam_height != input_size[1]:
@@ -205,7 +164,7 @@ def setup_video(video_file_path, save_vid, vid_output_path):
205
164
  cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
206
165
 
207
166
  out_vid = None
208
- fps = cap.get(cv2.CAP_PROP_FPS)
167
+ fps = round(cap.get(cv2.CAP_PROP_FPS))
209
168
  if fps == 0: fps = 30
210
169
  if save_vid:
211
170
  # try:
@@ -221,67 +180,78 @@ def setup_video(video_file_path, save_vid, vid_output_path):
221
180
  return cap, out_vid, cam_width, cam_height, fps
222
181
 
223
182
 
224
- def setup_backend_device():
183
+ def setup_backend_device(backend='auto', device='auto'):
225
184
  '''
226
185
  Set up the backend and device for the pose tracker based on the availability of hardware acceleration.
227
186
  TensorRT is not supported by RTMLib yet: https://github.com/Tau-J/rtmlib/issues/12
228
187
 
229
- Selects the best option in the following order of priority:
188
+ If device and backend are not specified, they are automatically set up in the following order of priority:
230
189
  1. GPU with CUDA and ONNXRuntime backend (if CUDAExecutionProvider is available)
231
190
  2. GPU with ROCm and ONNXRuntime backend (if ROCMExecutionProvider is available, for AMD GPUs)
232
191
  3. GPU with MPS or CoreML and ONNXRuntime backend (for macOS systems)
233
192
  4. CPU with OpenVINO backend (default fallback)
234
193
  '''
235
194
 
236
- try:
237
- import torch
238
- import onnxruntime as ort
239
- if torch.cuda.is_available() == True and 'CUDAExecutionProvider' in ort.get_available_providers():
240
- device = 'cuda'
241
- backend = 'onnxruntime'
242
- logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
243
- elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers():
244
- device = 'rocm'
245
- backend = 'onnxruntime'
246
- logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
247
- else:
248
- raise
249
- except:
195
+ if device!='auto' and backend!='auto':
196
+ device = device.lower()
197
+ backend = backend.lower()
198
+
199
+ if device=='auto' or backend=='auto':
200
+ if device=='auto' and backend!='auto' or device!='auto' and backend=='auto':
201
+ logging.warning(f"If you set device or backend to 'auto', you must set the other to 'auto' as well. Both device and backend will be determined automatically.")
202
+
250
203
  try:
204
+ import torch
251
205
  import onnxruntime as ort
252
- if 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
253
- device = 'mps'
206
+ if torch.cuda.is_available() == True and 'CUDAExecutionProvider' in ort.get_available_providers():
207
+ device = 'cuda'
254
208
  backend = 'onnxruntime'
255
- logging.info(f"\nValid MPS installation found: using ONNXRuntime backend with GPU.")
209
+ logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
210
+ elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers():
211
+ device = 'rocm'
212
+ backend = 'onnxruntime'
213
+ logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
256
214
  else:
257
- raise
215
+ raise
258
216
  except:
259
- device = 'cpu'
260
- backend = 'openvino'
261
- logging.info(f"\nNo valid CUDA installation found: using OpenVINO backend with CPU.")
262
-
217
+ try:
218
+ import onnxruntime as ort
219
+ if 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
220
+ device = 'mps'
221
+ backend = 'onnxruntime'
222
+ logging.info(f"\nValid MPS installation found: using ONNXRuntime backend with GPU.")
223
+ else:
224
+ raise
225
+ except:
226
+ device = 'cpu'
227
+ backend = 'openvino'
228
+ logging.info(f"\nNo valid CUDA installation found: using OpenVINO backend with CPU.")
229
+
263
230
  return backend, device
264
231
 
265
232
 
266
- def setup_pose_tracker(det_frequency, mode, tracking):
233
+ def setup_pose_tracker(ModelClass, det_frequency, mode, tracking, backend, device):
267
234
  '''
268
235
  Set up the RTMLib pose tracker with the appropriate model and backend.
269
236
  If CUDA is available, use it with ONNXRuntime backend; else use CPU with openvino
270
237
 
271
238
  INPUTS:
239
+ - ModelClass: class. The RTMlib model class to use for pose detection (Body, BodyWithFeet, Wholebody)
272
240
  - det_frequency: int. The frequency of pose detection (every N frames)
273
241
  - mode: str. The mode of the pose tracker ('lightweight', 'balanced', 'performance')
274
242
  - tracking: bool. Whether to track persons across frames with RTMlib tracker
243
+ - backend: str. The backend to use for pose detection (onnxruntime, openvino, opencv)
244
+ - device: str. The device to use for pose detection (cpu, cuda, rocm, mps)
275
245
 
276
246
  OUTPUTS:
277
247
  - pose_tracker: PoseTracker. The initialized pose tracker object
278
248
  '''
279
249
 
280
- backend, device = setup_backend_device()
250
+ backend, device = setup_backend_device(backend=backend, device=device)
281
251
 
282
252
  # Initialize the pose tracker with Halpe26 model
283
253
  pose_tracker = PoseTracker(
284
- BodyWithFeet,
254
+ ModelClass,
285
255
  det_frequency=det_frequency,
286
256
  mode=mode,
287
257
  backend=backend,
@@ -353,22 +323,17 @@ def compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, keypoints_id
353
323
 
354
324
  ang_params = angle_dict.get(ang_name)
355
325
  if ang_params is not None:
356
- if ang_name in ['pelvis', 'trunk', 'shoulders']:
357
- angle_coords = [[np.abs(person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]]), person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0] if kpt in keypoints_names]
358
- else:
359
- angle_coords = [[person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]], person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0] if kpt in keypoints_names]
360
- ang = points_to_angles(angle_coords)
361
- ang += ang_params[2]
362
- ang *= ang_params[3]
363
- if ang_name in ['pelvis', 'shoulders']:
364
- ang = ang-180 if ang>90 else ang
365
- ang = ang+180 if ang<-90 else ang
366
- else:
367
- ang = ang-360 if ang>180 else ang
368
- ang = ang+360 if ang<-180 else ang
326
+ try:
327
+ if ang_name in ['pelvis', 'trunk', 'shoulders']:
328
+ angle_coords = [[np.abs(person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]]), person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0]]
329
+ else:
330
+ angle_coords = [[person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]], person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0]]
331
+ ang = fixed_angles(angle_coords, ang_name)
332
+ except:
333
+ ang = np.nan
369
334
  else:
370
335
  ang = np.nan
371
-
336
+
372
337
  return ang
373
338
 
374
339
 
@@ -618,7 +583,7 @@ def draw_skel(img, X, Y, model, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)]):
618
583
  [cv2.line(img,
619
584
  (int(x[n[0]]), int(y[n[0]])), (int(x[n[1]]), int(y[n[1]])), c, thickness)
620
585
  for n in node_pairs
621
- if not (np.isnan(x[n[0]]) or np.isnan(y[n[0]]) or np.isnan(x[n[1]]) or np.isnan(y[n[1]]))]
586
+ if not None in n and not (np.isnan(x[n[0]]) or np.isnan(y[n[0]]) or np.isnan(x[n[1]]) or np.isnan(y[n[1]]))] # IF NOT NONE
622
587
 
623
588
  return img
624
589
 
@@ -692,31 +657,34 @@ def draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, keypoints_
692
657
  ang_name = angle_names[k]
693
658
  ang_params = angle_dict.get(ang_name)
694
659
  if ang_params is not None:
695
- ang_coords = np.array([[X[keypoints_ids[keypoints_names.index(kpt)]], Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0] if kpt in keypoints_names])
696
- X_flipped_coords = [X_flipped[keypoints_ids[keypoints_names.index(kpt)]] for kpt in ang_params[0] if kpt in keypoints_names]
697
- flip = -1 if any(x_flipped < 0 for x_flipped in X_flipped_coords) else 1
698
- flip = 1 if ang_name in ['pelvis', 'trunk', 'shoulders'] else flip
699
- right_angle = True if ang_params[2]==90 else False
700
-
701
- # Draw angle
702
- if len(ang_coords) == 2: # segment angle
703
- app_point, vec = draw_segment_angle(img, ang_coords, flip)
704
- else: # joint angle
705
- app_point, vec1, vec2 = draw_joint_angle(img, ang_coords, flip, right_angle)
706
-
707
- # Write angle on body
708
- if 'body' in display_angle_values_on:
660
+ kpts = ang_params[0]
661
+ if not any(item not in keypoints_names+['Neck', 'Hip'] for item in kpts):
662
+ ang_coords = np.array([[X[keypoints_ids[keypoints_names.index(kpt)]], Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0] if kpt in keypoints_names])
663
+ X_flipped = np.append(X_flipped, X[len(X_flipped):])
664
+ X_flipped_coords = [X_flipped[keypoints_ids[keypoints_names.index(kpt)]] for kpt in ang_params[0] if kpt in keypoints_names]
665
+ flip = -1 if any(x_flipped < 0 for x_flipped in X_flipped_coords) else 1
666
+ flip = 1 if ang_name in ['pelvis', 'trunk', 'shoulders'] else flip
667
+ right_angle = True if ang_params[2]==90 else False
668
+
669
+ # Draw angle
709
670
  if len(ang_coords) == 2: # segment angle
710
- write_angle_on_body(img, ang, app_point, vec, np.array([1,0]), dist=20, color=(255,255,255), fontSize=fontSize, thickness=thickness)
671
+ app_point, vec = draw_segment_angle(img, ang_coords, flip)
711
672
  else: # joint angle
712
- write_angle_on_body(img, ang, app_point, vec1, vec2, dist=40, color=(0,255,0), fontSize=fontSize, thickness=thickness)
713
-
714
- # Write angle as a list on image with progress bar
715
- if 'list' in display_angle_values_on:
716
- if len(ang_coords) == 2: # segment angle
717
- ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (255,255,255), fontSize=fontSize, thickness=thickness)
718
- else:
719
- ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (0,255,0), fontSize=fontSize, thickness=thickness)
673
+ app_point, vec1, vec2 = draw_joint_angle(img, ang_coords, flip, right_angle)
674
+
675
+ # Write angle on body
676
+ if 'body' in display_angle_values_on:
677
+ if len(ang_coords) == 2: # segment angle
678
+ write_angle_on_body(img, ang, app_point, vec, np.array([1,0]), dist=20, color=(255,255,255), fontSize=fontSize, thickness=thickness)
679
+ else: # joint angle
680
+ write_angle_on_body(img, ang, app_point, vec1, vec2, dist=40, color=(0,255,0), fontSize=fontSize, thickness=thickness)
681
+
682
+ # Write angle as a list on image with progress bar
683
+ if 'list' in display_angle_values_on:
684
+ if len(ang_coords) == 2: # segment angle
685
+ ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (255,255,255), fontSize=fontSize, thickness=thickness)
686
+ else:
687
+ ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (0,255,0), fontSize=fontSize, thickness=thickness)
720
688
 
721
689
  return img
722
690
 
@@ -869,32 +837,6 @@ def write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_lin
869
837
  return ang_label_line
870
838
 
871
839
 
872
- def read_trc(trc_path):
873
- '''
874
- Read trc file
875
-
876
- INPUTS:
877
- - trc_path: path to the trc file
878
-
879
- OUTPUTS:
880
- - Q_coords: dataframe of coordinates
881
- - frames_col: series of frames
882
- - time_col: series of time
883
- - markers: list of marker names
884
- - header: list of header lines
885
- '''
886
-
887
- with open(trc_path, 'r') as trc_file:
888
- header = [next(trc_file) for line in range(5)]
889
- markers = header[3].split('\t')[2::3]
890
-
891
- trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4)
892
- frames_col, time_col = pd.Series(trc_df.iloc[:,0], name='frames'), pd.Series(trc_df.iloc[:,1], name='time')
893
- Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
894
-
895
- return Q_coords, frames_col, time_col, markers, header
896
-
897
-
898
840
  def load_pose_file(Q_coords):
899
841
  '''
900
842
  Load 2D keypoints from a dataframe of XYZ coordinates
@@ -1075,6 +1017,7 @@ def angle_plots(angle_data_unfiltered, angle_data, person_id):
1075
1017
  ax = plt.subplot(111)
1076
1018
  plt.plot(angle_data_unfiltered.iloc[:,0], angle_data_unfiltered.iloc[:,id+1], label='unfiltered')
1077
1019
  plt.plot(angle_data.iloc[:,0], angle_data.iloc[:,id+1], label='filtered')
1020
+
1078
1021
  ax.set_xlabel('Time (seconds)')
1079
1022
  ax.set_ylabel(angle+' (°)')
1080
1023
  plt.legend()
@@ -1118,9 +1061,8 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
1118
1061
  - xy_origin: list. The origin of the floor line
1119
1062
  '''
1120
1063
 
1121
-
1122
1064
  # Remove frames where the person is mostly not moving (outlier)
1123
- av_speeds = np.nanmean([np.linalg.norm(trc_data[kpt].diff(), axis=1) for kpt in trc_data.columns.unique()[1:]], axis=0)
1065
+ av_speeds = np.nanmean([np.insert(np.linalg.norm(trc_data[kpt].diff(), axis=1)[1:],0,0) for kpt in trc_data.columns.unique()[1:]], axis=0)
1124
1066
  trc_data = trc_data[av_speeds>tot_speed_above]
1125
1067
 
1126
1068
  # Retrieve zero-speed coordinates for the foot
@@ -1145,138 +1087,6 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
1145
1087
  return angle, xy_origin
1146
1088
 
1147
1089
 
1148
- def mean_angles(Q_coords, markers, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
1149
- '''
1150
- Compute the mean angle time series from 3D points for a given list of angles.
1151
-
1152
- INPUTS:
1153
- - Q_coords (DataFrame): The triangulated coordinates of the markers.
1154
- - markers (list): The list of marker names.
1155
- - ang_to_consider (list): The list of angles to consider (requires angle_dict).
1156
-
1157
- OUTPUTS:
1158
- - ang_mean: The mean angle time series.
1159
- '''
1160
-
1161
- ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
1162
-
1163
- angs = []
1164
- for ang_name in ang_to_consider:
1165
- ang_params = angle_dict[ang_name]
1166
- ang_mk = ang_params[0]
1167
-
1168
- pts_for_angles = []
1169
- for pt in ang_mk:
1170
- pts_for_angles.append(Q_coords.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
1171
- ang = points_to_angles(pts_for_angles)
1172
-
1173
- ang += ang_params[2]
1174
- ang *= ang_params[3]
1175
- ang = np.abs(ang)
1176
-
1177
- angs.append(ang)
1178
-
1179
- ang_mean = np.mean(angs, axis=0)
1180
-
1181
- return ang_mean
1182
-
1183
-
1184
- def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
1185
- '''
1186
- Compute the best coordinates for measurements, after removing:
1187
- - 20% fastest frames (may be outliers)
1188
- - frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
1189
- - frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
1190
-
1191
- INPUTS:
1192
- - Q_coords: pd.DataFrame. The XYZ coordinates of each marker
1193
- - keypoints_names: list. The list of marker names
1194
- - fastest_frames_to_remove_percent: float
1195
- - close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
1196
- - large_hip_knee_angles: int
1197
- - trimmed_extrema_percent
1198
-
1199
- OUTPUT:
1200
- - Q_coords_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
1201
- '''
1202
-
1203
- # Add Hip column if not present
1204
- n_markers_init = len(keypoints_names)
1205
- if 'Hip' not in keypoints_names:
1206
- RHip_df = Q_coords.iloc[:,keypoints_names.index('RHip')*3:keypoints_names.index('RHip')*3+3]
1207
- LHip_df = Q_coords.iloc[:,keypoints_names.index('LHip')*3:keypoints_names.index('RHip')*3+3]
1208
- Hip_df = RHip_df.add(LHip_df, fill_value=0) /2
1209
- Hip_df.columns = [col+ str(int(Q_coords.columns[-1][1:])+1) for col in ['X','Y','Z']]
1210
- keypoints_names += ['Hip']
1211
- Q_coords = pd.concat([Q_coords, Hip_df], axis=1)
1212
- n_markers = len(keypoints_names)
1213
-
1214
- # Using 80% slowest frames
1215
- sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
1216
- sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
1217
- min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
1218
- Q_coords_low_speeds = Q_coords.iloc[min_speed_indices].reset_index(drop=True)
1219
-
1220
- # Only keep frames with hip and knee flexion angles below 45%
1221
- # (if more than 50 of them, else take 50 smallest values)
1222
- ang_mean = mean_angles(Q_coords_low_speeds, keypoints_names, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
1223
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds[ang_mean < large_hip_knee_angles]
1224
- if len(Q_coords_low_speeds_low_angles) < 50:
1225
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
1226
-
1227
- if n_markers_init < n_markers:
1228
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds_low_angles.iloc[:,:-3]
1229
-
1230
- return Q_coords_low_speeds_low_angles
1231
-
1232
-
1233
- def compute_height(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
1234
- '''
1235
- Compute the height of the person from the trc data.
1236
-
1237
- INPUTS:
1238
- - Q_coords: pd.DataFrame. The XYZ coordinates of each marker
1239
- - keypoints_names: list. The list of marker names
1240
- - fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
1241
- - close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
1242
- - large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
1243
- - trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
1244
-
1245
- OUTPUT:
1246
- - height: float. The estimated height of the person
1247
- '''
1248
-
1249
- # Retrieve most reliable coordinates
1250
- Q_coords_low_speeds_low_angles = best_coords_for_measurements(Q_coords, keypoints_names,
1251
- fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
1252
- Q_coords_low_speeds_low_angles.columns = np.array([[m]*3 for m in keypoints_names]).flatten()
1253
-
1254
- # Add MidShoulder column
1255
- df_MidShoulder = pd.DataFrame((Q_coords_low_speeds_low_angles['RShoulder'].values + Q_coords_low_speeds_low_angles['LShoulder'].values) /2)
1256
- df_MidShoulder.columns = ['MidShoulder']*3
1257
- Q_coords_low_speeds_low_angles = pd.concat((Q_coords_low_speeds_low_angles.reset_index(drop=True), df_MidShoulder), axis=1)
1258
-
1259
- # Automatically compute the height of the person
1260
- pairs_up_to_shoulders = [['RHeel', 'RAnkle'], ['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
1261
- ['LHeel', 'LAnkle'], ['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
1262
- try:
1263
- rfoot, rshank, rfemur, rback, lfoot, lshank, lfemur, lback = [euclidean_distance(Q_coords_low_speeds_low_angles[pair[0]],Q_coords_low_speeds_low_angles[pair[1]]) for pair in pairs_up_to_shoulders]
1264
- except:
1265
- raise ValueError('At least one of the following markers is missing for computing the height of the person:\
1266
- RHeel, RAnkle, RKnee, RHip, RShoulder, LHeel, LAnkle, LKnee, LHip, LShoulder.\
1267
- Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
1268
- if 'Head' in keypoints_names:
1269
- head = euclidean_distance(Q_coords_low_speeds_low_angles['MidShoulder'], Q_coords_low_speeds_low_angles['Head'])
1270
- else:
1271
- head = euclidean_distance(Q_coords_low_speeds_low_angles['MidShoulder'], Q_coords_low_speeds_low_angles['Nose'])*1.33
1272
- heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
1273
-
1274
- # Remove the 20% most extreme values
1275
- height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
1276
-
1277
- return height
1278
-
1279
-
1280
1090
  def convert_px_to_meters(Q_coords_kpt, person_height_m, height_px, cx, cy, floor_angle):
1281
1091
  '''
1282
1092
  Convert pixel coordinates to meters.
@@ -1374,6 +1184,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1374
1184
  mode = config_dict.get('pose').get('mode')
1375
1185
  det_frequency = config_dict.get('pose').get('det_frequency')
1376
1186
  tracking_mode = config_dict.get('pose').get('tracking_mode')
1187
+ backend = config_dict.get('pose').get('backend')
1188
+ device = config_dict.get('pose').get('device')
1377
1189
 
1378
1190
  # Pixel to meters conversion
1379
1191
  to_meters = config_dict.get('px_to_meters_conversion').get('to_meters')
@@ -1407,6 +1219,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1407
1219
  fontSize = config_dict.get('angles').get('fontSize')
1408
1220
  thickness = 1 if fontSize < 0.8 else 2
1409
1221
  flip_left_right = config_dict.get('angles').get('flip_left_right')
1222
+ correct_segment_angles_with_floor_angle = config_dict.get('angles').get('correct_segment_angles_with_floor_angle')
1410
1223
 
1411
1224
  # Post-processing settings
1412
1225
  interpolate = config_dict.get('post-processing').get('interpolate')
@@ -1421,7 +1234,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1421
1234
  gaussian_filter_kernel = config_dict.get('post-processing').get('gaussian').get('sigma_kernel')
1422
1235
  loess_filter_kernel = config_dict.get('post-processing').get('loess').get('nb_values_used')
1423
1236
  median_filter_kernel = config_dict.get('post-processing').get('median').get('kernel_size')
1424
- butterworth_filter_cutoff /= slowmo_factor
1425
1237
  filter_options = [do_filter, filter_type,
1426
1238
  butterworth_filter_order, butterworth_filter_cutoff, frame_rate,
1427
1239
  gaussian_filter_kernel, loess_filter_kernel, median_filter_kernel]
@@ -1458,6 +1270,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1458
1270
  logging.warning('Webcam input: the framerate may vary. If results are filtered, Sports2D will use the average framerate as input.')
1459
1271
  else:
1460
1272
  cap, out_vid, cam_width, cam_height, fps = setup_video(video_file_path, save_vid, vid_output_path)
1273
+ fps *= slowmo_factor
1461
1274
  start_time = get_start_time_ffmpeg(video_file_path)
1462
1275
  frame_range = [int((time_range[0]-start_time) * frame_rate), int((time_range[1]-start_time) * frame_rate)] if time_range else [0, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))]
1463
1276
  frame_iterator = tqdm(range(*frame_range)) # use a progress bar
@@ -1465,37 +1278,93 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1465
1278
  cv2.namedWindow(f'{video_file} Sports2D', cv2.WINDOW_NORMAL + cv2.WINDOW_KEEPRATIO)
1466
1279
  cv2.setWindowProperty(f'{video_file} Sports2D', cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_FULLSCREEN)
1467
1280
 
1281
+ # Select the appropriate model based on the model_type
1282
+ if pose_model.upper() in ('HALPE_26', 'BODY_WITH_FEET'):
1283
+ model_name = 'HALPE_26'
1284
+ ModelClass = BodyWithFeet # 26 keypoints(halpe26)
1285
+ logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
1286
+ elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY', 'WHOLE_BODY_WRIST'):
1287
+ model_name = 'COCO_133'
1288
+ ModelClass = Wholebody
1289
+ logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
1290
+ elif pose_model.upper() in ('COCO_17', 'BODY'):
1291
+ model_name = 'COCO_17'
1292
+ ModelClass = Body
1293
+ logging.info(f"Using COCO_17 model (body) for pose estimation.")
1294
+ else:
1295
+ raise ValueError(f"Invalid model_type: {model_name}. Must be 'HALPE_26', 'COCO_133', or 'COCO_17'. Use another network (MMPose, DeepLabCut, OpenPose, AlphaPose, BlazePose...) and convert the output files if you need another model. See documentation.")
1296
+ pose_model_name = pose_model
1297
+ pose_model = eval(model_name)
1298
+
1299
+ # Manually select the models if mode is a dictionary rather than 'lightweight', 'balanced', or 'performance'
1300
+ if not mode in ['lightweight', 'balanced', 'performance']:
1301
+ try:
1302
+ try:
1303
+ mode = ast.literal_eval(mode)
1304
+ except: # if within single quotes instead of double quotes when run with sports2d --mode """{dictionary}"""
1305
+ mode = mode.strip("'").replace('\n', '').replace(" ", "").replace(",", '", "').replace(":", '":"').replace("{", '{"').replace("}", '"}').replace('":"/',':/').replace('":"\\',':\\')
1306
+ mode = re.sub(r'"\[([^"]+)",\s?"([^"]+)\]"', r'[\1,\2]', mode) # changes "[640", "640]" to [640,640]
1307
+ mode = json.loads(mode)
1308
+ det_class = mode.get('det_class')
1309
+ det = mode.get('det_model')
1310
+ det_input_size = mode.get('det_input_size')
1311
+ pose_class = mode.get('pose_class')
1312
+ pose = mode.get('pose_model')
1313
+ pose_input_size = mode.get('pose_input_size')
1314
+
1315
+ ModelClass = partial(Custom,
1316
+ det_class=det_class, det=det, det_input_size=det_input_size,
1317
+ pose_class=pose_class, pose=pose, pose_input_size=pose_input_size,
1318
+ backend=backend, device=device)
1319
+
1320
+ except (json.JSONDecodeError, TypeError):
1321
+ logging.warning("\nInvalid mode. Must be 'lightweight', 'balanced', 'performance', or '''{dictionary}''' of parameters within triple quotes. Make sure input_sizes are within square brackets.")
1322
+ logging.warning('Using the default "balanced" mode.')
1323
+ mode = 'balanced'
1468
1324
 
1325
+
1469
1326
  # Skip pose estimation or set it up:
1470
- model = eval(pose_model)
1471
1327
  if load_trc:
1472
1328
  if not '_px' in str(load_trc):
1473
1329
  logging.error(f'\n{load_trc} file needs to be in px, not in meters.')
1474
- logging.info(f'\nUsing a pose file instead of running pose tracking {load_trc}.')
1330
+ logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc}.')
1475
1331
  # Load pose file in px
1476
1332
  Q_coords, _, _, keypoints_names, _ = read_trc(load_trc)
1477
1333
  keypoints_ids = [i for i in range(len(keypoints_names))]
1478
1334
  keypoints_all, scores_all = load_pose_file(Q_coords)
1479
- for pre, _, node in RenderTree(model):
1335
+ for pre, _, node in RenderTree(model_name):
1480
1336
  if node.name in keypoints_names:
1481
1337
  node.id = keypoints_names.index(node.name)
1482
1338
 
1483
1339
  else:
1484
1340
  # Retrieve keypoint names from model
1485
- keypoints_ids = [node.id for _, _, node in RenderTree(model) if node.id!=None]
1486
- keypoints_names = [node.name for _, _, node in RenderTree(model) if node.id!=None]
1341
+ keypoints_ids = [node.id for _, _, node in RenderTree(pose_model) if node.id!=None]
1342
+ keypoints_names = [node.name for _, _, node in RenderTree(pose_model) if node.id!=None]
1487
1343
 
1488
1344
  tracking_rtmlib = True if (tracking_mode == 'rtmlib' and multiperson) else False
1489
- pose_tracker = setup_pose_tracker(det_frequency, mode, tracking_rtmlib)
1490
- logging.info(f'\nPose tracking set up for BodyWithFeet model in {mode} mode.')
1345
+ pose_tracker = setup_pose_tracker(ModelClass, det_frequency, mode, tracking_rtmlib, backend, device)
1346
+ logging.info(f'\nPose tracking set up for "{pose_model_name}" model.')
1347
+ logging.info(f'Mode: {mode}.\n')
1491
1348
  logging.info(f'Persons are detected every {det_frequency} frames and tracked inbetween. Multi-person is {"" if multiperson else "not "}selected.')
1492
1349
  logging.info(f"Parameters: {keypoint_likelihood_threshold=}, {average_likelihood_threshold=}, {keypoint_number_threshold=}")
1493
1350
 
1494
- Ltoe_idx = keypoints_ids[keypoints_names.index('LBigToe')]
1495
- LHeel_idx = keypoints_ids[keypoints_names.index('LHeel')]
1496
- Rtoe_idx = keypoints_ids[keypoints_names.index('RBigToe')]
1497
- RHeel_idx = keypoints_ids[keypoints_names.index('RHeel')]
1498
- L_R_direction_idx = [Ltoe_idx, LHeel_idx, Rtoe_idx, RHeel_idx]
1351
+ if flip_left_right:
1352
+ try:
1353
+ Ltoe_idx = keypoints_ids[keypoints_names.index('LBigToe')]
1354
+ LHeel_idx = keypoints_ids[keypoints_names.index('LHeel')]
1355
+ Rtoe_idx = keypoints_ids[keypoints_names.index('RBigToe')]
1356
+ RHeel_idx = keypoints_ids[keypoints_names.index('RHeel')]
1357
+ L_R_direction_idx = [Ltoe_idx, LHeel_idx, Rtoe_idx, RHeel_idx]
1358
+ except ValueError:
1359
+ logging.warning(f"Missing 'LBigToe', 'LHeel', 'RBigToe', 'RHeel' keypoints. flip_left_right will be set to False")
1360
+ flip_left_right = False
1361
+
1362
+ if calculate_angles:
1363
+ for ang_name in angle_names:
1364
+ ang_params = angle_dict.get(ang_name)
1365
+ kpts = ang_params[0]
1366
+ if any(item not in keypoints_names+['Neck', 'Hip'] for item in kpts):
1367
+ logging.warning(f"Skipping {ang_name} angle computation because at least one of the following keypoints is not provided by the model: {ang_params[0]}.")
1499
1368
 
1500
1369
 
1501
1370
  # Process video or webcam feed
@@ -1569,9 +1438,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1569
1438
  person_X = np.full_like(person_X, np.nan)
1570
1439
  person_Y = np.full_like(person_Y, np.nan)
1571
1440
  person_scores = np.full_like(person_scores, np.nan)
1572
- valid_X.append(person_X)
1573
- valid_Y.append(person_Y)
1574
- valid_scores.append(person_scores)
1575
1441
 
1576
1442
 
1577
1443
  # Compute angles
@@ -1581,24 +1447,40 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1581
1447
  person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
1582
1448
  else:
1583
1449
  person_X_flipped = person_X.copy()
1584
- valid_X_flipped.append(person_X_flipped)
1585
1450
 
1586
1451
  # Compute angles
1587
1452
  person_angles = []
1453
+ # Add Neck and Hip if not provided
1454
+ new_keypoints_names, new_keypoints_ids = keypoints_names.copy(), keypoints_ids.copy()
1455
+ for kpt in ['Neck', 'Hip']:
1456
+ if kpt not in new_keypoints_names:
1457
+ person_X_flipped, person_Y, person_scores = add_neck_hip_coords(kpt, person_X_flipped, person_Y, person_scores, new_keypoints_ids, new_keypoints_names)
1458
+ person_X, _, _ = add_neck_hip_coords(kpt, person_X, person_Y, person_scores, new_keypoints_ids, new_keypoints_names)
1459
+ new_keypoints_names.append(kpt)
1460
+ new_keypoints_ids.append(len(person_X_flipped)-1)
1461
+
1588
1462
  for ang_name in angle_names:
1589
- ang = compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, keypoints_ids, keypoints_names)
1463
+ ang_params = angle_dict.get(ang_name)
1464
+ kpts = ang_params[0]
1465
+ if not any(item not in new_keypoints_names for item in kpts):
1466
+ ang = compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, new_keypoints_ids, new_keypoints_names)
1467
+ else:
1468
+ ang = np.nan
1590
1469
  person_angles.append(ang)
1591
1470
  valid_angles.append(person_angles)
1592
-
1471
+ valid_X_flipped.append(person_X_flipped)
1472
+ valid_X.append(person_X)
1473
+ valid_Y.append(person_Y)
1474
+ valid_scores.append(person_scores)
1593
1475
 
1594
1476
  # Draw keypoints and skeleton
1595
1477
  if show_realtime_results or save_vid or save_img:
1596
1478
  img = frame.copy()
1597
1479
  img = draw_bounding_box(img, valid_X, valid_Y, colors=colors, fontSize=fontSize, thickness=thickness)
1598
- img = draw_keypts(img, valid_X, valid_Y, scores, cmap_str='RdYlGn')
1599
- img = draw_skel(img, valid_X, valid_Y, model, colors=colors)
1480
+ img = draw_keypts(img, valid_X, valid_Y, valid_scores, cmap_str='RdYlGn')
1481
+ img = draw_skel(img, valid_X, valid_Y, pose_model, colors=colors)
1600
1482
  if calculate_angles:
1601
- img = draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, keypoints_ids, keypoints_names, angle_names, display_angle_values_on=display_angle_values_on, colors=colors, fontSize=fontSize, thickness=thickness)
1483
+ img = draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, new_keypoints_ids, new_keypoints_names, angle_names, display_angle_values_on=display_angle_values_on, colors=colors, fontSize=fontSize, thickness=thickness)
1602
1484
 
1603
1485
  if show_realtime_results:
1604
1486
  cv2.imshow(f'{video_file} Sports2D', img)
@@ -1619,6 +1501,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1619
1501
  elapsed_time = (datetime.now() - start_time).total_seconds()
1620
1502
  frame_processing_times.append(elapsed_time)
1621
1503
 
1504
+
1505
+ # End of the video is reached
1622
1506
  cap.release()
1623
1507
  logging.info(f"Video processing completed.")
1624
1508
  if save_vid:
@@ -1644,7 +1528,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1644
1528
  all_frames_scores = make_homogeneous(all_frames_scores)
1645
1529
 
1646
1530
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1647
- all_frames_time = pd.Series(np.linspace(frame_range[0]/fps/slowmo_factor, frame_range[1]/fps/slowmo_factor, frame_count+1), name='time')
1531
+ all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count+1), name='time')
1648
1532
  if not multiperson:
1649
1533
  calib_on_person_id = get_personID_with_highest_scores(all_frames_scores)
1650
1534
  detected_persons = [calib_on_person_id]
@@ -1698,15 +1582,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1698
1582
  else:
1699
1583
  filter_type = filter_options[1]
1700
1584
  if filter_type == 'butterworth':
1585
+ cutoff = filter_options[3]
1701
1586
  if video_file == 'webcam':
1702
- cutoff = filter_options[3]
1703
1587
  if cutoff / (fps / 2) >= 1:
1704
1588
  cutoff_old = cutoff
1705
1589
  cutoff = fps/(2+0.001)
1706
1590
  args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
1707
1591
  filter_options[3] = cutoff
1708
- else:
1709
- args = ''
1710
1592
  args = f'Butterworth filter, {filter_options[2]}th order, {filter_options[3]} Hz.'
1711
1593
  filter_options[4] = fps
1712
1594
  if filter_type == 'gaussian':
@@ -1733,9 +1615,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1733
1615
  if show_plots and not to_meters:
1734
1616
  pose_plots(trc_data_unfiltered_i, trc_data_i, i)
1735
1617
 
1618
+
1736
1619
  # Convert px to meters
1737
1620
  if to_meters:
1738
1621
  logging.info('\nConverting pose to meters:')
1622
+ if calib_on_person_id>=len(trc_data):
1623
+ logging.warning(f'Person #{calib_on_person_id} not detected in the video. Calibrating on person #0 instead.')
1624
+ calib_on_person_id = 0
1739
1625
  if calib_file:
1740
1626
  logging.info(f'Using calibration file to convert coordinates in meters: {calib_file}.')
1741
1627
  calib_params_dict = retrieve_calib_params(calib_file)
@@ -1743,15 +1629,28 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1743
1629
 
1744
1630
  else:
1745
1631
  # Compute calibration parameters
1632
+ if not multiperson:
1633
+ selected_person_id = calib_on_person_id
1634
+ calib_on_person_id = 0
1746
1635
  height_px = compute_height(trc_data[calib_on_person_id].iloc[:,1:], keypoints_names,
1747
1636
  fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_px, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
1748
1637
 
1749
1638
  if floor_angle == 'auto' or xy_origin == 'auto':
1750
1639
  # estimated from the line formed by the toes when they are on the ground (where speed = 0)
1751
- toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
1752
- px_per_m = height_px/person_height_m
1753
- toe_speed_below_px_frame = toe_speed_below * px_per_m / (fps*slowmo_factor)
1754
- floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1640
+ try:
1641
+ toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
1642
+ px_per_m = height_px/person_height_m
1643
+ toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
1644
+ try:
1645
+ floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1646
+ except: # no feet points
1647
+ floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
1648
+ xy_origin_estim[0] = xy_origin_estim[0]-0.13
1649
+ logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
1650
+ except:
1651
+ floor_angle_estim = 0
1652
+ xy_origin_estim = cam_width/2, cam_height/2
1653
+ logging.warning(f'Could not estimate the floor angle and xy_origin. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}].')
1755
1654
  if not floor_angle == 'auto':
1756
1655
  floor_angle_estim = floor_angle
1757
1656
  if xy_origin == 'auto':
@@ -1774,9 +1673,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1774
1673
  pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i)
1775
1674
 
1776
1675
  # Write to trc file
1777
- pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{i:02d}.trc'))
1778
- make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i)
1779
- logging.info(f'Person {i}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
1676
+ idx_path = selected_person_id if not multiperson and not calib_file else i
1677
+ pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
1678
+ make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
1679
+ logging.info(f'Person {idx_path}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
1780
1680
 
1781
1681
 
1782
1682
 
@@ -1832,7 +1732,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1832
1732
  all_frames_angles = make_homogeneous(all_frames_angles)
1833
1733
 
1834
1734
  # unwrap angles
1835
- all_frames_angles = np.unwrap(all_frames_angles, axis=0, period=180)
1735
+ # all_frames_angles = np.unwrap(all_frames_angles, axis=0, period=180) # This give all nan values -> need to mask nans
1736
+ for i in range(all_frames_angles.shape[1]): # for each person
1737
+ for j in range(all_frames_angles.shape[2]): # for each angle
1738
+ valid_mask = ~np.isnan(all_frames_angles[:, i, j])
1739
+ all_frames_angles[valid_mask, i, j] = np.unwrap(all_frames_angles[valid_mask, i, j], period=180)
1836
1740
 
1837
1741
  # Process angles for each person
1838
1742
  for i in detected_persons:
@@ -1864,16 +1768,14 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1864
1768
  else:
1865
1769
  filter_type = filter_options[1]
1866
1770
  if filter_type == 'butterworth':
1771
+ cutoff = filter_options[3]
1867
1772
  if video_file == 'webcam':
1868
- cutoff = filter_options[3]
1869
1773
  if cutoff / (fps / 2) >= 1:
1870
1774
  cutoff_old = cutoff
1871
1775
  cutoff = fps/(2+0.001)
1872
1776
  args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
1873
1777
  filter_options[3] = cutoff
1874
- else:
1875
- args = ''
1876
- args = f'Butterworth filter, {filter_options[2]}th order, {filter_options[3]} Hz. ' + args
1778
+ args = f'Butterworth filter, {filter_options[2]}th order, {filter_options[3]} Hz.'
1877
1779
  filter_options[4] = fps
1878
1780
  if filter_type == 'gaussian':
1879
1781
  args = f'Gaussian filter, Sigma kernel {filter_options[5]}.'
@@ -1884,6 +1786,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1884
1786
  logging.info(f'Filtering with {args}')
1885
1787
  all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter.filter1d, axis=0, args=filter_options)
1886
1788
 
1789
+ # Remove columns with all nan values
1790
+ all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
1791
+ all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
1792
+
1793
+ # Add floor_angle_estim to segment angles
1794
+ if correct_segment_angles_with_floor_angle and to_meters:
1795
+ logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
1796
+ for ang_name in all_frames_angles_person_filt.columns:
1797
+ if 'horizontal' in angle_dict[ang_name][1]:
1798
+ all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
1799
+
1887
1800
  # Build mot file
1888
1801
  angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
1889
1802
  logging.info(f'Angles saved to {angles_path_person.resolve()}.')