sports2d 0.6.2__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Sports2D/process.py CHANGED
@@ -56,10 +56,13 @@ import sys
56
56
  import logging
57
57
  import json
58
58
  import ast
59
+ import shutil
60
+ import os
59
61
  from functools import partial
60
62
  from datetime import datetime
61
63
  import itertools as it
62
64
  from tqdm import tqdm
65
+ from collections import defaultdict
63
66
  from anytree import RenderTree
64
67
 
65
68
  import numpy as np
@@ -69,11 +72,14 @@ import matplotlib as mpl
69
72
  import matplotlib.pyplot as plt
70
73
  from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
71
74
  from deep_sort_realtime.deepsort_tracker import DeepSort
75
+ import opensim as osim
72
76
 
73
77
  from Sports2D.Utilities import filter
74
78
  from Sports2D.Utilities.common import *
75
79
  from Sports2D.Utilities.skeletons import *
76
80
 
81
+ DEFAULT_MASS = 70
82
+ DEFAULT_HEIGHT = 1.7
77
83
 
78
84
  ## AUTHORSHIP INFORMATION
79
85
  __author__ = "David Pagnon, HunMin Kim"
@@ -592,8 +598,8 @@ def load_pose_file(Q_coords):
592
598
  - scores_all: np.array. The scores in the format (Nframes, 1, Nmarkers)
593
599
  '''
594
600
 
595
- Z_cols = [3*i+2 for i in range(len(Q_coords.columns)//3)]
596
- Q_coords_xy = Q_coords.drop(Q_coords.columns[Z_cols], axis=1)
601
+ Z_cols = np.array([[3*i,3*i+1] for i in range(len(Q_coords.columns)//3)]).ravel()
602
+ Q_coords_xy = Q_coords.iloc[:,Z_cols]
597
603
  kpt_number = len(Q_coords_xy.columns)//2
598
604
 
599
605
  # shape (Nframes, 2*Nmarkers) --> (Nframes, 1, Nmarkers, 2)
@@ -787,9 +793,9 @@ def get_personID_with_highest_scores(all_frames_scores):
787
793
  return person_id
788
794
 
789
795
 
790
- def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_speed_below = 1.0, tot_speed_above=2.0):
796
+ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_speed_below = 7, tot_speed_above=2.0):
791
797
  '''
792
- Compute the floor line equation and angle
798
+ Compute the floor line equation, angle, and direction
793
799
  from the feet keypoints when they have zero speed.
794
800
 
795
801
  N.B.: Y coordinates point downwards
@@ -802,6 +808,7 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
802
808
  OUTPUT:
803
809
  - angle: float. The angle of the floor line in radians
804
810
  - xy_origin: list. The origin of the floor line
811
+ - gait_direction: float. Left if < 0, 'right' otherwise
805
812
  '''
806
813
 
807
814
  # Remove frames where the person is mostly not moving (outlier)
@@ -810,36 +817,45 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
810
817
 
811
818
  # Retrieve zero-speed coordinates for the foot
812
819
  low_speeds_X, low_speeds_Y = [], []
820
+ gait_direction_val = []
813
821
  for kpt in keypoint_names:
814
822
  speeds = np.linalg.norm(trc_data[kpt].diff(), axis=1)
815
-
823
+
816
824
  low_speed_frames = trc_data[speeds<toe_speed_below].index
817
825
  low_speeds_coords = trc_data[kpt].loc[low_speed_frames]
818
826
  low_speeds_coords = low_speeds_coords[low_speeds_coords!=0]
819
827
 
820
- low_speeds_X += low_speeds_coords.iloc[:,0].tolist()
828
+ low_speeds_X_kpt = low_speeds_coords.iloc[:,0].tolist()
829
+ low_speeds_X += low_speeds_X_kpt
821
830
  low_speeds_Y += low_speeds_coords.iloc[:,1].tolist()
822
831
 
832
+ # gait direction (between [-1,1])
833
+ X_trend_val = np.polyfit(range(len(low_speeds_X_kpt)), low_speeds_X_kpt, 1)[0]
834
+ gait_direction_kpt = X_trend_val * len(low_speeds_X_kpt) / (np.max(low_speeds_X_kpt) - np.min(low_speeds_X_kpt))
835
+ gait_direction_val.append(gait_direction_kpt)
836
+
823
837
  # Fit a line to the zero-speed coordinates
824
838
  floor_line = np.polyfit(low_speeds_X, low_speeds_Y, 1) # (slope, intercept)
825
- xy_origin = [0, floor_line[1]]
839
+ angle = -np.arctan(floor_line[0]) # angle of the floor line in degrees
840
+ xy_origin = [0, floor_line[1]] # origin of the floor line
826
841
 
827
- # Compute the angle of the floor line in degrees
828
- angle = -np.arctan(floor_line[0])
829
-
830
- return angle, xy_origin
842
+ # Gait direction
843
+ gait_direction = np.mean(gait_direction_val)
844
+
845
+ return angle, xy_origin, gait_direction
831
846
 
832
847
 
833
- def convert_px_to_meters(Q_coords_kpt, person_height_m, height_px, cx, cy, floor_angle):
848
+ def convert_px_to_meters(Q_coords_kpt, px_to_m_person_height_m, height_px, cx, cy, floor_angle, visible_side='none'):
834
849
  '''
835
850
  Convert pixel coordinates to meters.
836
851
 
837
852
  INPUTS:
838
853
  - Q_coords_kpt: pd.DataFrame. The xyz coordinates of a keypoint in pixels, with z filled with zeros
839
- - person_height_m: float. The height of the person in meters
854
+ - px_to_m_person_height_m: float. The height of the person in meters
840
855
  - height_px: float. The height of the person in pixels
841
856
  - cx, cy: float. The origin of the image in pixels
842
857
  - floor_angle: float. The angle of the floor in radians
858
+ - visible_side: str. The side of the person that is visible ('right', 'left', 'front', 'back', 'none')
843
859
 
844
860
  OUTPUT:
845
861
  - Q_coords_kpt_m: pd.DataFrame. The XYZ coordinates of a keypoint in meters
@@ -848,10 +864,17 @@ def convert_px_to_meters(Q_coords_kpt, person_height_m, height_px, cx, cy, floor
848
864
  u = Q_coords_kpt.iloc[:,0]
849
865
  v = Q_coords_kpt.iloc[:,1]
850
866
 
851
- X = person_height_m / height_px * ((u-cx) + (v-cy)*np.sin(floor_angle))
852
- Y = - person_height_m / height_px * np.cos(floor_angle) * (v-cy - np.tan(floor_angle)*(u-cx))
867
+ X = px_to_m_person_height_m / height_px * ((u-cx) + (v-cy)*np.sin(floor_angle))
868
+ Y = - px_to_m_person_height_m / height_px * np.cos(floor_angle) * (v-cy - np.tan(floor_angle)*(u-cx))
853
869
 
854
- Q_coords_kpt_m = pd.DataFrame(np.array([X, Y, np.zeros_like(X)]).T, columns=Q_coords_kpt.columns)
870
+ if 'marker_Z_positions' in globals() and visible_side!='none':
871
+ marker_name = Q_coords_kpt.columns[0]
872
+ Z = X.copy()
873
+ Z[:] = marker_Z_positions[visible_side][marker_name]
874
+ else:
875
+ Z = np.zeros_like(X)
876
+
877
+ Q_coords_kpt_m = pd.DataFrame(np.array([X, Y, Z]).T, columns=Q_coords_kpt.columns)
855
878
 
856
879
  return Q_coords_kpt_m
857
880
 
@@ -902,11 +925,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
902
925
 
903
926
  # Base parameters
904
927
  video_dir = Path(config_dict.get('project').get('video_dir'))
905
- person_height_m = config_dict.get('project').get('person_height')
928
+ px_to_m_from_person_id = int(config_dict.get('project').get('px_to_m_from_person_id'))
929
+ px_to_m_person_height_m = config_dict.get('project').get('px_to_m_person_height')
930
+ visible_side = config_dict.get('project').get('visible_side')
906
931
  # Pose from file
907
- load_trc = config_dict.get('project').get('load_trc')
908
- if load_trc == '': load_trc = None
909
- else: load_trc = Path(load_trc).resolve()
932
+ load_trc_px = config_dict.get('project').get('load_trc_px')
933
+ if load_trc_px == '': load_trc_px = None
934
+ else: load_trc_px = Path(load_trc_px).resolve()
910
935
  compare = config_dict.get('project').get('compare')
911
936
  # Webcam settings
912
937
  webcam_id = config_dict.get('project').get('webcam_id')
@@ -942,23 +967,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
942
967
 
943
968
  # Pixel to meters conversion
944
969
  to_meters = config_dict.get('px_to_meters_conversion').get('to_meters')
970
+ make_c3d = config_dict.get('px_to_meters_conversion').get('make_c3d')
945
971
  save_calib = config_dict.get('px_to_meters_conversion').get('save_calib')
946
972
  # Calibration from file
947
973
  calib_file = config_dict.get('px_to_meters_conversion').get('calib_file')
948
974
  if calib_file == '': calib_file = None
949
975
  else: calib_file = Path(calib_file).resolve()
950
976
  # Calibration from person height
951
- calib_on_person_id = int(config_dict.get('px_to_meters_conversion').get('calib_on_person_id'))
952
977
  floor_angle = config_dict.get('px_to_meters_conversion').get('floor_angle') # 'auto' or float
953
978
  floor_angle = np.radians(float(floor_angle)) if floor_angle != 'auto' else floor_angle
954
979
  xy_origin = config_dict.get('px_to_meters_conversion').get('xy_origin') # ['auto'] or [x, y]
955
980
  xy_origin = [float(o) for o in xy_origin] if xy_origin != ['auto'] else 'auto'
956
981
 
957
- fastest_frames_to_remove_percent = config_dict.get('px_to_meters_conversion').get('fastest_frames_to_remove_percent')
958
- large_hip_knee_angles = config_dict.get('px_to_meters_conversion').get('large_hip_knee_angles')
959
- trimmed_extrema_percent = config_dict.get('px_to_meters_conversion').get('trimmed_extrema_percent')
960
- close_to_zero_speed_px = config_dict.get('px_to_meters_conversion').get('close_to_zero_speed_px')
961
-
962
982
  keypoint_likelihood_threshold = config_dict.get('pose').get('keypoint_likelihood_threshold')
963
983
  average_likelihood_threshold = config_dict.get('pose').get('average_likelihood_threshold')
964
984
  keypoint_number_threshold = config_dict.get('pose').get('keypoint_number_threshold')
@@ -991,11 +1011,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
991
1011
  butterworth_filter_order, butterworth_filter_cutoff, frame_rate,
992
1012
  gaussian_filter_kernel, loess_filter_kernel, median_filter_kernel]
993
1013
 
994
- # Inverse kinematics settings
995
- do_ik = config_dict.get('inverse-kinematics').get('do_ik')
996
- osim_setup_path = config_dict.get('inverse-kinematics').get('osim_setup_path')
997
- person_orientations = config_dict.get('inverse-kinematics').get('person_orientation')
998
-
999
1014
  # Create output directories
1000
1015
  if video_file == "webcam":
1001
1016
  current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
@@ -1014,6 +1029,32 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1014
1029
  if save_img:
1015
1030
  img_output_dir.mkdir(parents=True, exist_ok=True)
1016
1031
 
1032
+ # Inverse kinematics settings
1033
+ do_ik = config_dict.get('kinematics').get('do_ik')
1034
+ use_augmentation = config_dict.get('kinematics').get('use_augmentation')
1035
+ participant_masses = config_dict.get('kinematics').get('participant_mass')
1036
+ participant_masses = participant_masses if isinstance(participant_masses, list) else [participant_masses]
1037
+ fastest_frames_to_remove_percent = config_dict.get('kinematics').get('fastest_frames_to_remove_percent')
1038
+ large_hip_knee_angles = config_dict.get('kinematics').get('large_hip_knee_angles')
1039
+ trimmed_extrema_percent = config_dict.get('kinematics').get('trimmed_extrema_percent')
1040
+ close_to_zero_speed_px = config_dict.get('kinematics').get('close_to_zero_speed_px')
1041
+ close_to_zero_speed_m = config_dict.get('kinematics').get('close_to_zero_speed_m')
1042
+ if do_ik:
1043
+ from Pose2Sim.markerAugmentation import augment_markers_all
1044
+ from Pose2Sim.kinematics import kinematics_all
1045
+ # Create a Pose2Sim dictionary and fill in missing keys
1046
+ recursivedict = lambda: defaultdict(recursivedict)
1047
+ Pose2Sim_config_dict = recursivedict()
1048
+ # Fill Pose2Sim dictionary (height and mass will be filled later)
1049
+ Pose2Sim_config_dict['project']['project_dir'] = str(output_dir)
1050
+ Pose2Sim_config_dict['markerAugmentation']['make_c3d'] = make_c3d
1051
+ Pose2Sim_config_dict['kinematics'] = config_dict.get('kinematics')
1052
+ # Temporarily recreate Pose2Sim file hierarchy
1053
+ pose3d_dir = Path(output_dir) / 'pose-3d'
1054
+ pose3d_dir.mkdir(parents=True, exist_ok=True)
1055
+ kinematics_dir = Path(output_dir) / 'kinematics'
1056
+ kinematics_dir.mkdir(parents=True, exist_ok=True)
1057
+
1017
1058
 
1018
1059
  # Set up video capture
1019
1060
  if video_file == "webcam":
@@ -1036,7 +1077,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1036
1077
  model_name = 'HALPE_26'
1037
1078
  ModelClass = BodyWithFeet # 26 keypoints(halpe26)
1038
1079
  logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
1039
- elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY', 'WHOLE_BODY_WRIST'):
1080
+ elif pose_model.upper() == 'WHOLE_BODY_WRIST':
1081
+ model_name = 'COCO_133_WRIST'
1082
+ ModelClass = Wholebody
1083
+ logging.info(f"Using COCO_133 model (body, feet, 2 hand points) for pose estimation.")
1084
+ elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY'):
1040
1085
  model_name = 'COCO_133'
1041
1086
  ModelClass = Wholebody
1042
1087
  logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
@@ -1077,15 +1122,15 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1077
1122
 
1078
1123
 
1079
1124
  # Skip pose estimation or set it up:
1080
- if load_trc:
1081
- if not '_px' in str(load_trc):
1082
- logging.error(f'\n{load_trc} file needs to be in px, not in meters.')
1083
- logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc}.')
1125
+ if load_trc_px:
1126
+ if not '_px' in str(load_trc_px):
1127
+ logging.error(f'\n{load_trc_px} file needs to be in px, not in meters.')
1128
+ logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc_px}.')
1084
1129
  # Load pose file in px
1085
- Q_coords, _, _, keypoints_names, _ = read_trc(load_trc)
1130
+ Q_coords, _, _, keypoints_names, _ = read_trc(load_trc_px)
1086
1131
  keypoints_ids = [i for i in range(len(keypoints_names))]
1087
1132
  keypoints_all, scores_all = load_pose_file(Q_coords)
1088
- for pre, _, node in RenderTree(model_name):
1133
+ for pre, _, node in RenderTree(pose_model):
1089
1134
  if node.name in keypoints_names:
1090
1135
  node.id = keypoints_names.index(node.name)
1091
1136
 
@@ -1137,7 +1182,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1137
1182
  frame_count = 0
1138
1183
  while cap.isOpened():
1139
1184
  # Skip to the starting frame
1140
- if frame_count < frame_range[0] and not load_trc:
1185
+ if frame_count < frame_range[0] and not load_trc_px:
1141
1186
  cap.read()
1142
1187
  frame_count += 1
1143
1188
  continue
@@ -1163,7 +1208,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1163
1208
 
1164
1209
 
1165
1210
  # Retrieve pose or Estimate pose and track people
1166
- if load_trc:
1211
+ if load_trc_px:
1167
1212
  if frame_nb >= len(keypoints_all):
1168
1213
  break
1169
1214
  keypoints = keypoints_all[frame_nb]
@@ -1184,7 +1229,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1184
1229
  valid_X, valid_Y, valid_scores = [], [], []
1185
1230
  valid_X_flipped, valid_angles = [], []
1186
1231
  for person_idx in range(len(keypoints)):
1187
- if load_trc:
1232
+ if load_trc_px:
1188
1233
  person_X = keypoints[person_idx][:,0]
1189
1234
  person_Y = keypoints[person_idx][:,1]
1190
1235
  person_scores = scores[person_idx]
@@ -1210,7 +1255,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1210
1255
  person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
1211
1256
  else:
1212
1257
  person_X_flipped = person_X.copy()
1213
-
1258
+
1214
1259
  # Compute angles
1215
1260
  person_angles = []
1216
1261
  # Add Neck and Hip if not provided
@@ -1293,8 +1338,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1293
1338
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1294
1339
  all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count+1), name='time')
1295
1340
  if not multiperson:
1296
- calib_on_person_id = get_personID_with_highest_scores(all_frames_scores)
1297
- detected_persons = [calib_on_person_id]
1341
+ px_to_m_from_person_id = get_personID_with_highest_scores(all_frames_scores)
1342
+ detected_persons = [px_to_m_from_person_id]
1298
1343
  else:
1299
1344
  detected_persons = range(all_frames_X_homog.shape[1])
1300
1345
 
@@ -1312,7 +1357,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1312
1357
 
1313
1358
  # Delete person if less than 4 valid frames
1314
1359
  pose_nan_count = len(np.where(all_frames_X_person.sum(axis=1)==0)[0])
1315
- if frame_count - pose_nan_count <= 4:
1360
+ if frame_count - frame_range[0] - pose_nan_count <= 4:
1316
1361
  trc_data_i = pd.DataFrame(0, index=all_frames_X_person.index, columns=np.array([[c]*3 for c in all_frames_X_person.columns]).flatten())
1317
1362
  trc_data_i.insert(0, 't', all_frames_time)
1318
1363
  trc_data.append(trc_data_i)
@@ -1367,7 +1412,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1367
1412
  # Build TRC file
1368
1413
  trc_data_i = trc_data_from_XYZtime(all_frames_X_person_filt, all_frames_Y_person_filt, all_frames_Z_homog, all_frames_time)
1369
1414
  trc_data.append(trc_data_i)
1370
- if not load_trc:
1415
+ if not load_trc_px:
1371
1416
  make_trc_with_trc_data(trc_data_i, str(pose_path_person), fps=fps)
1372
1417
  logging.info(f'Pose in pixels saved to {pose_path_person.resolve()}.')
1373
1418
 
@@ -1380,11 +1425,12 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1380
1425
 
1381
1426
 
1382
1427
  # Convert px to meters
1428
+ trc_data_m = []
1383
1429
  if to_meters:
1384
1430
  logging.info('\nConverting pose to meters:')
1385
- if calib_on_person_id>=len(trc_data):
1386
- logging.warning(f'Person #{calib_on_person_id} not detected in the video. Calibrating on person #0 instead.')
1387
- calib_on_person_id = 0
1431
+ if px_to_m_from_person_id>=len(trc_data):
1432
+ logging.warning(f'Person #{px_to_m_from_person_id} not detected in the video. Calibrating on person #0 instead.')
1433
+ px_to_m_from_person_id = 0
1388
1434
  if calib_file:
1389
1435
  logging.info(f'Using calibration file to convert coordinates in meters: {calib_file}.')
1390
1436
  calib_params_dict = retrieve_calib_params(calib_file)
@@ -1393,53 +1439,81 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1393
1439
  else:
1394
1440
  # Compute calibration parameters
1395
1441
  if not multiperson:
1396
- selected_person_id = calib_on_person_id
1397
- calib_on_person_id = 0
1398
- height_px = compute_height(trc_data[calib_on_person_id].iloc[:,1:], keypoints_names,
1442
+ selected_person_id = px_to_m_from_person_id
1443
+ px_to_m_from_person_id = 0
1444
+ height_px = compute_height(trc_data[px_to_m_from_person_id].iloc[:,1:], keypoints_names,
1399
1445
  fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_px, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
1400
1446
 
1447
+ toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
1448
+ px_per_m = height_px/px_to_m_person_height_m
1449
+ toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
1401
1450
  if floor_angle == 'auto' or xy_origin == 'auto':
1402
1451
  # estimated from the line formed by the toes when they are on the ground (where speed = 0)
1403
1452
  try:
1404
- toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
1405
- px_per_m = height_px/person_height_m
1406
- toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
1407
- try:
1408
- floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1409
- except: # no feet points
1410
- floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
1453
+ if all(key in trc_data[px_to_m_from_person_id] for key in ['LBigToe', 'RBigToe']):
1454
+ floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[px_to_m_from_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1455
+ else:
1456
+ floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[px_to_m_from_person_id], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
1411
1457
  xy_origin_estim[0] = xy_origin_estim[0]-0.13
1412
1458
  logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
1413
1459
  except:
1414
1460
  floor_angle_estim = 0
1415
1461
  xy_origin_estim = cam_width/2, cam_height/2
1416
- logging.warning(f'Could not estimate the floor angle and xy_origin. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}].')
1462
+ logging.warning(f'Could not estimate the floor angle and xy_origin for person {px_to_m_from_person_id}. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}] px.')
1417
1463
  if not floor_angle == 'auto':
1418
1464
  floor_angle_estim = floor_angle
1419
1465
  if xy_origin == 'auto':
1420
1466
  cx, cy = xy_origin_estim
1421
1467
  else:
1422
1468
  cx, cy = xy_origin
1423
- logging.info(f'Using height of person #{calib_on_person_id} ({person_height_m}m) to convert coordinates in meters. '
1469
+ logging.info(f'Using height of person #{px_to_m_from_person_id} ({px_to_m_person_height_m}m) to convert coordinates in meters. '
1424
1470
  f'Floor angle: {np.degrees(floor_angle_estim) if not floor_angle=="auto" else f"auto (estimation: {round(np.degrees(floor_angle_estim),2)}°)"}, '
1425
- f'xy_origin: {xy_origin if not xy_origin=="auto" else f"auto (estimation: {[round(c) for c in xy_origin_estim]})"}.')
1471
+ f'xy_origin: {xy_origin if not xy_origin=="auto" else f"auto (estimation: {[round(c) for c in xy_origin_estim]})"} px.')
1426
1472
 
1427
1473
  # Coordinates in m
1428
1474
  for i in range(len(trc_data)):
1429
1475
  if not np.array(trc_data[i].iloc[:,1:] ==0).all():
1430
- trc_data_m_i = pd.concat([convert_px_to_meters(trc_data[i][kpt_name], person_height_m, height_px, cx, cy, -floor_angle_estim) for kpt_name in keypoints_names], axis=1)
1476
+ # Automatically determine visible side
1477
+ visible_side_i = visible_side[i] if len(visible_side)>i else 'auto' # set to 'auto' if list too short
1478
+
1479
+ # Set to 'front' if slope of X values between [-5,5]
1480
+ if visible_side_i == 'auto':
1481
+ try:
1482
+ if all(key in trc_data[i] for key in ['LBigToe', 'RBigToe']):
1483
+ _, _, gait_direction = compute_floor_line(trc_data[i], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
1484
+ else:
1485
+ _, _, gait_direction = compute_floor_line(trc_data[i], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
1486
+ logging.warning(f'The RBigToe and LBigToe are missing from your model. Gait direction will be determined from the ankle points.')
1487
+ visible_side_i = 'right' if gait_direction > 0.6 \
1488
+ else 'left' if gait_direction < -0.6 \
1489
+ else 'front'
1490
+ logging.info(f'- Person {i}: Seen from the {visible_side_i}.')
1491
+ except:
1492
+ visible_side_i = 'none'
1493
+ logging.warning(f'- Person {i}: Could not automatically find gait direction. Please set visible_side to "front", "back", "left", or "right" for this person. Setting to "none".')
1494
+ # skip if none
1495
+ elif visible_side_i == 'none':
1496
+ logging.info(f'- Person {i}: Keeping output in 2D because "visible_side" is set to "none" for person {i}.')
1497
+ else:
1498
+ logging.info(f'- Person {i}: Seen from the {visible_side_i}.')
1499
+
1500
+ # Convert to meters
1501
+ trc_data_m_i = pd.concat([convert_px_to_meters(trc_data[i][kpt_name], px_to_m_person_height_m, height_px, cx, cy, -floor_angle_estim, visible_side=visible_side_i) for kpt_name in keypoints_names], axis=1)
1431
1502
  trc_data_m_i.insert(0, 't', all_frames_time)
1432
- trc_data_unfiltered_m_i = pd.concat([convert_px_to_meters(trc_data_unfiltered[i][kpt_name], person_height_m, height_px, cx, cy, -floor_angle_estim) for kpt_name in keypoints_names], axis=1)
1503
+ trc_data_unfiltered_m_i = pd.concat([convert_px_to_meters(trc_data_unfiltered[i][kpt_name], px_to_m_person_height_m, height_px, cx, cy, -floor_angle_estim) for kpt_name in keypoints_names], axis=1)
1433
1504
  trc_data_unfiltered_m_i.insert(0, 't', all_frames_time)
1434
1505
 
1435
1506
  if to_meters and show_plots:
1436
1507
  pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i)
1437
1508
 
1438
1509
  # Write to trc file
1510
+ trc_data_m.append(trc_data_m_i)
1439
1511
  idx_path = selected_person_id if not multiperson and not calib_file else i
1440
1512
  pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
1441
1513
  make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
1442
- logging.info(f'Person {idx_path}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
1514
+ if make_c3d:
1515
+ c3d_path = convert_to_c3d(pose_path_person_m_i)
1516
+ logging.info(f'Pose in meters saved to {pose_path_person_m_i.resolve()}. {"Also saved in c3d format." if make_c3d else ""}')
1443
1517
 
1444
1518
 
1445
1519
 
@@ -1447,9 +1521,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1447
1521
 
1448
1522
 
1449
1523
 
1450
-
1451
-
1452
-
1453
1524
  # # plt.plot(trc_data_m.iloc[:,0], trc_data_m.iloc[:,1])
1454
1525
  # # plt.ylim([0,2])
1455
1526
  # # plt.show()
@@ -1457,7 +1528,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1457
1528
 
1458
1529
 
1459
1530
  # z = 3.0 # distance between the camera and the person. Required in the calibration file but simplified in the equations
1460
- # f = height_px / person_height_m * z
1531
+ # f = height_px / px_to_m_person_height_m * z
1461
1532
 
1462
1533
 
1463
1534
  # # Name
@@ -1484,14 +1555,14 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1484
1555
 
1485
1556
 
1486
1557
 
1487
-
1558
+
1488
1559
 
1489
1560
 
1490
1561
 
1491
1562
 
1492
1563
  # Post-processing angles
1493
1564
  if save_angles and calculate_angles:
1494
- logging.info('\nPost-processing angles:')
1565
+ logging.info('\nPost-processing angles (without inverse kinematics):')
1495
1566
  all_frames_angles = make_homogeneous(all_frames_angles)
1496
1567
 
1497
1568
  # unwrap angles
@@ -1508,7 +1579,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1508
1579
 
1509
1580
  # Delete person if less than 4 valid frames
1510
1581
  angle_nan_count = len(np.where(all_frames_angles_person.sum(axis=1)==0)[0])
1511
- if frame_count - angle_nan_count <= 4:
1582
+ if frame_count - frame_range[0] - angle_nan_count <= 4:
1512
1583
  logging.info(f'- Person {i}: Less than 4 valid frames. Deleting person.')
1513
1584
 
1514
1585
  else:
@@ -1568,3 +1639,81 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1568
1639
  if show_plots:
1569
1640
  all_frames_angles_person.insert(0, 't', all_frames_time)
1570
1641
  angle_plots(all_frames_angles_person, angle_data, i) # i = current person
1642
+
1643
+
1644
+ # OpenSim inverse kinematics (and optional marker augmentation)
1645
+ if do_ik or use_augmentation:
1646
+ logging.info('\nPost-processing angles (with inverse kinematics):')
1647
+ if not to_meters:
1648
+ logging.warning('Skipping marker augmentation and inverse kinematics as to_meters was set to False.')
1649
+ else:
1650
+ # move all trc files containing _m_ string to pose3d_dir
1651
+ for trc_file in output_dir.glob('*_m_*.trc'):
1652
+ if (pose3d_dir/trc_file.name).exists():
1653
+ os.remove(pose3d_dir/trc_file.name)
1654
+ shutil.move(trc_file, pose3d_dir)
1655
+
1656
+ heights_m, masses = [], []
1657
+ for i in range(len(trc_data_m)):
1658
+ if do_ik and not use_augmentation:
1659
+ logging.info(f'- Person {i}: Running scaling and inverse kinematics without marker augmentation. Set use_augmentation to True if you need it.')
1660
+ elif not do_ik and use_augmentation:
1661
+ logging.info(f'- Person {i}: Running marker augmentation without inverse kinematics. Set do_ik to True if you need it.')
1662
+ else:
1663
+ logging.info(f'- Person {i}: Running marker augmentation and inverse kinematics.')
1664
+
1665
+ # Delete person if less than 4 valid frames
1666
+ pose_path_person = pose_output_path.parent / (pose_output_path.stem + f'_person{i:02d}.trc')
1667
+ all_frames_X_person = pd.DataFrame(all_frames_X_homog[:,i,:], columns=keypoints_names)
1668
+ pose_nan_count = len(np.where(all_frames_X_person.sum(axis=1)==0)[0])
1669
+ if frame_count - frame_range[0] - pose_nan_count <= 4:
1670
+ # heights_m.append(DEFAULT_HEIGHT)
1671
+ # masses.append(DEFAULT_MASS)
1672
+ logging.info(f'Less than 4 valid frames. Deleting person.')
1673
+ else:
1674
+ if visible_side[i] == 'none':
1675
+ logging.info(f'Skipping marker augmentation and inverse kinematics because visible_side is "none".')
1676
+ # heights_m.append(DEFAULT_HEIGHT)
1677
+ # masses.append(DEFAULT_MASS)
1678
+ else:
1679
+ # Provide missing data to Pose2Sim_config_dict
1680
+ height_m_i = compute_height(trc_data_m_i.iloc[:,1:], keypoints_names,
1681
+ fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_m, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
1682
+ mass_i = participant_masses[i] if len(participant_masses)>i else 70
1683
+ if len(participant_masses)<=i:
1684
+ logging.warning(f'No mass provided. Using 70 kg as default.')
1685
+ heights_m.append(height_m_i)
1686
+ masses.append(mass_i)
1687
+
1688
+ Pose2Sim_config_dict['project']['participant_height'] = heights_m
1689
+ Pose2Sim_config_dict['project']['participant_mass'] = masses
1690
+ Pose2Sim_config_dict['pose']['pose_model'] = pose_model_name.upper()
1691
+ Pose2Sim_config_dict = to_dict(Pose2Sim_config_dict)
1692
+
1693
+ # Marker augmentation
1694
+ if use_augmentation:
1695
+ logging.info('Running marker augmentation...')
1696
+ augment_markers_all(Pose2Sim_config_dict)
1697
+ logging.info(f'Augmented trc results saved to {pose3d_dir.resolve()}.\n')
1698
+
1699
+ if do_ik:
1700
+ if not save_angles or not calculate_angles:
1701
+ logging.warning(f'Skipping inverse kinematics because save_angles or calculate_angles is set to False.')
1702
+ else:
1703
+ logging.info('Running inverse kinematics...')
1704
+ kinematics_all(Pose2Sim_config_dict)
1705
+ for mot_file in kinematics_dir.glob('*.mot'):
1706
+ if (mot_file.parent/(mot_file.stem+'_ik.mot')).exists():
1707
+ os.remove(mot_file.parent/(mot_file.stem+'_ik.mot'))
1708
+ os.rename(mot_file, mot_file.parent/(mot_file.stem+'_ik.mot'))
1709
+ logging.info(f'.osim model and .mot motion file results saved to {kinematics_dir.resolve()}.\n')
1710
+
1711
+ # Move all files in pose-3d and kinematics to the output_dir
1712
+ osim.Logger.removeFileSink()
1713
+ for directory in [pose3d_dir, kinematics_dir]:
1714
+ for file in directory.glob('*'):
1715
+ if (output_dir/file.name).exists():
1716
+ os.remove(output_dir/file.name)
1717
+ shutil.move(file, output_dir)
1718
+ pose3d_dir.rmdir()
1719
+ kinematics_dir.rmdir()