sports2d 0.8.21__tar.gz → 0.8.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {sports2d-0.8.21 → sports2d-0.8.23}/PKG-INFO +2 -2
  2. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Demo/Config_demo.toml +9 -6
  3. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Sports2D.py +5 -1
  4. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/process.py +384 -288
  5. {sports2d-0.8.21 → sports2d-0.8.23}/pyproject.toml +1 -1
  6. {sports2d-0.8.21 → sports2d-0.8.23}/sports2d.egg-info/PKG-INFO +2 -2
  7. {sports2d-0.8.21 → sports2d-0.8.23}/sports2d.egg-info/requires.txt +1 -1
  8. {sports2d-0.8.21 → sports2d-0.8.23}/.github/workflows/continuous-integration.yml +0 -0
  9. {sports2d-0.8.21 → sports2d-0.8.23}/.github/workflows/joss_pdf.yml +0 -0
  10. {sports2d-0.8.21 → sports2d-0.8.23}/.github/workflows/publish-on-release.yml +0 -0
  11. {sports2d-0.8.21 → sports2d-0.8.23}/.gitignore +0 -0
  12. {sports2d-0.8.21 → sports2d-0.8.23}/CITATION.cff +0 -0
  13. {sports2d-0.8.21 → sports2d-0.8.23}/Content/Demo_plots.png +0 -0
  14. {sports2d-0.8.21 → sports2d-0.8.23}/Content/Demo_results.png +0 -0
  15. {sports2d-0.8.21 → sports2d-0.8.23}/Content/Demo_terminal.png +0 -0
  16. {sports2d-0.8.21 → sports2d-0.8.23}/Content/Person_selection.png +0 -0
  17. {sports2d-0.8.21 → sports2d-0.8.23}/Content/Video_tuto_Sports2D_Colab.png +0 -0
  18. {sports2d-0.8.21 → sports2d-0.8.23}/Content/joint_convention.png +0 -0
  19. {sports2d-0.8.21 → sports2d-0.8.23}/Content/paper.bib +0 -0
  20. {sports2d-0.8.21 → sports2d-0.8.23}/Content/paper.md +0 -0
  21. {sports2d-0.8.21 → sports2d-0.8.23}/Content/sports2d_blender.gif +0 -0
  22. {sports2d-0.8.21 → sports2d-0.8.23}/Content/sports2d_opensim.gif +0 -0
  23. {sports2d-0.8.21 → sports2d-0.8.23}/LICENSE +0 -0
  24. {sports2d-0.8.21 → sports2d-0.8.23}/README.md +0 -0
  25. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Demo/Calib_demo.toml +0 -0
  26. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Demo/demo.mp4 +0 -0
  27. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Sports2D.ipynb +0 -0
  28. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Utilities/__init__.py +0 -0
  29. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Utilities/common.py +0 -0
  30. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/Utilities/tests.py +0 -0
  31. {sports2d-0.8.21 → sports2d-0.8.23}/Sports2D/__init__.py +0 -0
  32. {sports2d-0.8.21 → sports2d-0.8.23}/setup.cfg +0 -0
  33. {sports2d-0.8.21 → sports2d-0.8.23}/sports2d.egg-info/SOURCES.txt +0 -0
  34. {sports2d-0.8.21 → sports2d-0.8.23}/sports2d.egg-info/dependency_links.txt +0 -0
  35. {sports2d-0.8.21 → sports2d-0.8.23}/sports2d.egg-info/entry_points.txt +0 -0
  36. {sports2d-0.8.21 → sports2d-0.8.23}/sports2d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.8.21
3
+ Version: 0.8.23
4
4
  Summary: Compute 2D human pose and angles from a video or a webcam.
5
5
  Author-email: David Pagnon <contact@david-pagnon.com>
6
6
  Maintainer-email: David Pagnon <contact@david-pagnon.com>
@@ -38,7 +38,7 @@ Requires-Dist: openvino
38
38
  Requires-Dist: opencv-python<4.12
39
39
  Requires-Dist: imageio_ffmpeg
40
40
  Requires-Dist: deep-sort-realtime
41
- Requires-Dist: Pose2Sim>=0.10.36
41
+ Requires-Dist: Pose2Sim>=0.10.38
42
42
  Dynamic: license-file
43
43
 
44
44
 
@@ -97,13 +97,13 @@ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, har
97
97
  keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
98
98
  average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
99
99
  keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
100
-
100
+ max_distance = 100 # in px or None # If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one
101
101
 
102
102
  [px_to_meters_conversion]
103
103
  # Pixel to meters conversion
104
104
  to_meters = true
105
105
  make_c3d = true
106
- save_calib = false
106
+ save_calib = true
107
107
 
108
108
  # If conversion from first_person_height
109
109
  floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
@@ -111,7 +111,9 @@ xy_origin = ['auto'] # ['auto'] or [px_x,px_y]. N.B.: px_y points downwards.
111
111
 
112
112
  # If conversion from a calibration file
113
113
  calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
114
-
114
+ # subject_distance
115
+ # focal_distance
116
+ # recalculate_extrinsics
115
117
 
116
118
  [angles]
117
119
  display_angle_values_on = ['body', 'list'] # 'body', 'list', ['body', 'list'], 'none'. Display angle values on the body, as a list in the upper left of the image, both, or do not display them.
@@ -131,11 +133,12 @@ correct_segment_angles_with_floor_angle = true # If the camera is tilted, correc
131
133
 
132
134
  [post-processing]
133
135
  interpolate = true
134
- interp_gap_smaller_than = 10 # do not interpolate bigger gaps
136
+ interp_gap_smaller_than = 10 # Do not interpolate larger gaps
135
137
  fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
136
138
  sections_to_keep = 'all' # 'all', 'largest', 'first', 'last'
137
- # keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one
138
- reject_outliers = true # Hampel filter for outlier rejection before other filtering methods. Rejects outliers that are outside of a 95% confidence interal from the median in a sliding window of size 7.
139
+ # Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one
140
+ min_chunk_size = 10 # Minimum number of valid frames in a row to keep a chunk of data for a person
141
+ reject_outliers = true # Hampel filter for outlier rejection before other filtering methods. Rejects outliers that are outside of a 95% confidence interal from the median in a sliding window of size 7.
139
142
 
140
143
  filter = true
141
144
  show_graphs = true # Show plots of raw and processed results
@@ -152,6 +152,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
152
152
  'keypoint_likelihood_threshold': 0.3,
153
153
  'average_likelihood_threshold': 0.5,
154
154
  'keypoint_number_threshold': 0.3,
155
+ 'max_distance': 100,
155
156
  'CUSTOM': { 'name': 'Hip',
156
157
  'id': 19,
157
158
  'children': [{'name': 'RHip',
@@ -196,7 +197,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
196
197
  'calib_file': '',
197
198
  'floor_angle': 'auto',
198
199
  'xy_origin': ['auto'],
199
- 'save_calib': False
200
+ 'save_calib': True
200
201
  },
201
202
  'angles': {'display_angle_values_on': ['body', 'list'],
202
203
  'fontSize': 0.3,
@@ -233,6 +234,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
233
234
  'interp_gap_smaller_than': 10,
234
235
  'fill_large_gaps_with': 'last_value',
235
236
  'sections_to_keep':'all',
237
+ 'min_chunk_size': 10,
236
238
  'reject_outliers': True,
237
239
  'filter': True,
238
240
  'show_graphs': True,
@@ -315,6 +317,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
315
317
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
316
318
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
317
319
  'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
320
+ 'max_distance': ["", "If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one. in px or None, 100 by default."],
318
321
  'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
319
322
  'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
320
323
  'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
@@ -326,6 +329,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
326
329
  'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
327
330
  'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
328
331
  'sections_to_keep': ["", "all, largest, first, or last. Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one"],
332
+ 'min_chunk_size': ["", "Minimum number of valid frames in a row to keep a chunk of data for a person. 10 if not specified"],
329
333
  'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
330
334
  'filter': ["", "filter results. true if not specified"],
331
335
  'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
@@ -78,21 +78,24 @@ from matplotlib import patheffects
78
78
 
79
79
  from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Hand, Custom
80
80
  from rtmlib.tools.object_detection.post_processings import nms
81
- from deep_sort_realtime.deepsort_tracker import DeepSort
82
81
 
83
82
  from Sports2D.Utilities.common import *
84
83
  from Pose2Sim.common import *
85
84
  from Pose2Sim.skeletons import *
85
+ from Pose2Sim.calibration import toml_write
86
86
  from Pose2Sim.triangulation import indices_of_first_last_non_nan_chunks
87
87
  from Pose2Sim.personAssociation import *
88
88
  from Pose2Sim.filtering import *
89
89
 
90
+ # Silence numpy "RuntimeWarning: Mean of empty slice"
91
+ import warnings
92
+ warnings.filterwarnings("ignore", category=RuntimeWarning, message="Mean of empty slice")
93
+
90
94
  # Not safe, but to be used until OpenMMLab/RTMlib's SSL certificates are updated
91
95
  import ssl
92
96
  ssl._create_default_https_context = ssl._create_unverified_context
93
97
 
94
98
 
95
-
96
99
  DEFAULT_MASS = 70
97
100
  DEFAULT_HEIGHT = 1.7
98
101
 
@@ -798,6 +801,8 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
798
801
  INPUTS:
799
802
  - trc_data_unfiltered: pd.DataFrame. The unfiltered trc data
800
803
  - trc_data: pd.DataFrame. The filtered trc data
804
+ - person_id: int. The ID of the person
805
+ - show: bool. Whether to show the plots
801
806
 
802
807
  OUTPUT:
803
808
  - matplotlib window with tabbed figures for each keypoint
@@ -806,7 +811,6 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
806
811
  os_name = platform.system()
807
812
  if os_name == 'Windows':
808
813
  mpl.use('qt5agg') # windows
809
-
810
814
  mpl.rc('figure', max_open_warning=0)
811
815
 
812
816
  keypoints_names = trc_data.columns[1::3]
@@ -1142,6 +1146,8 @@ def select_persons_on_vid(video_file_path, frame_range, all_pose_coords):
1142
1146
 
1143
1147
  # Change color on hover
1144
1148
  for person_idx, bbox in enumerate(all_bboxes[frame_idx]):
1149
+ if person_idx >= len(rects): # Skip if rect doesn't exist
1150
+ continue
1145
1151
  if ~np.isnan(bbox).any():
1146
1152
  x_min, y_min, x_max, y_max = bbox.astype(int)
1147
1153
  if x_min <= x <= x_max and y_min <= y <= y_max:
@@ -1269,7 +1275,7 @@ def select_persons_on_vid(video_file_path, frame_range, all_pose_coords):
1269
1275
  return selected_persons
1270
1276
 
1271
1277
 
1272
- def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_speed_below = 7, tot_speed_above=2.0):
1278
+ def compute_floor_line(trc_data, score_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_speed_below = 7, score_threshold=0.5):
1273
1279
  '''
1274
1280
  Compute the floor line equation, angle, and direction
1275
1281
  from the feet keypoints when they have zero speed.
@@ -1287,20 +1293,25 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
1287
1293
  - gait_direction: float. Left if < 0, 'right' otherwise
1288
1294
  '''
1289
1295
 
1290
- # Remove frames where the person is mostly not moving (outlier)
1291
- speeds_kpts = np.array([np.insert(np.linalg.norm(trc_data[kpt].diff(), axis=1)[1:],0,0)
1292
- for kpt in trc_data.columns.unique()[1:]]).T
1293
- av_speeds = np.array([np.nanmean(speed_kpt) if not np.isnan(speed_kpt).all() else 0 for speed_kpt in speeds_kpts])
1294
- trc_data = trc_data[av_speeds>tot_speed_above]
1295
-
1296
1296
  # Retrieve zero-speed coordinates for the foot
1297
1297
  low_speeds_X, low_speeds_Y = [], []
1298
1298
  gait_direction_val = []
1299
1299
  for kpt in keypoint_names:
1300
- speeds = np.linalg.norm(trc_data[kpt].diff(), axis=1)
1300
+ # Remove frames without data
1301
+ trc_data_kpt = trc_data[kpt].iloc[:,:2]
1302
+ score_data_kpt = score_data[kpt]
1303
+ start, end = indices_of_first_last_non_nan_chunks(score_data_kpt, chunk_choice_method='all')
1304
+ trc_data_kpt_trim = trc_data_kpt.iloc[start:end].reset_index(drop=True)
1305
+ score_data_kpt_trim = score_data_kpt.iloc[start:end].reset_index(drop=True)
1306
+
1307
+ # Compute speeds
1308
+ speeds = np.linalg.norm(trc_data_kpt_trim.diff(), axis=1)
1301
1309
 
1302
- low_speed_frames = trc_data[speeds<toe_speed_below].index
1303
- low_speeds_coords = trc_data[kpt].loc[low_speed_frames]
1310
+ # Remove speeds with low confidence
1311
+ speeds = np.where(score_data_kpt_trim>score_threshold, speeds, np.nan)
1312
+
1313
+ # Get coordinates with low speeds, high
1314
+ low_speeds_coords = trc_data_kpt_trim[speeds<toe_speed_below]
1304
1315
  low_speeds_coords = low_speeds_coords[low_speeds_coords!=0]
1305
1316
 
1306
1317
  low_speeds_X_kpt = low_speeds_coords.iloc[:,0].tolist()
@@ -1445,7 +1456,9 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1445
1456
  mode = config_dict.get('pose').get('mode')
1446
1457
  det_frequency = config_dict.get('pose').get('det_frequency')
1447
1458
  tracking_mode = config_dict.get('pose').get('tracking_mode')
1459
+ max_distance = config_dict.get('pose').get('max_distance', None)
1448
1460
  if tracking_mode == 'deepsort':
1461
+ from deep_sort_realtime.deepsort_tracker import DeepSort
1449
1462
  deepsort_params = config_dict.get('pose').get('deepsort_params')
1450
1463
  try:
1451
1464
  deepsort_params = ast.literal_eval(deepsort_params)
@@ -1464,8 +1477,12 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1464
1477
  save_calib = config_dict.get('px_to_meters_conversion').get('save_calib')
1465
1478
  # Calibration from file
1466
1479
  calib_file = config_dict.get('px_to_meters_conversion').get('calib_file')
1467
- if calib_file == '': calib_file = None
1468
- else: calib_file = Path(calib_file).resolve()
1480
+ if calib_file == '':
1481
+ calib_file = None
1482
+ else:
1483
+ calib_file = video_dir / calib_file
1484
+ if not calib_file.is_file():
1485
+ raise FileNotFoundError(f'Error: Could not find calibration file {calib_file}. Check that the file exists.')
1469
1486
  # Calibration from person height
1470
1487
  floor_angle = config_dict.get('px_to_meters_conversion').get('floor_angle') # 'auto' or float
1471
1488
  floor_angle = np.radians(float(floor_angle)) if floor_angle != 'auto' else floor_angle
@@ -1492,7 +1509,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1492
1509
  interp_gap_smaller_than = config_dict.get('post-processing').get('interp_gap_smaller_than')
1493
1510
  fill_large_gaps_with = config_dict.get('post-processing').get('fill_large_gaps_with')
1494
1511
  sections_to_keep = config_dict.get('post-processing').get('sections_to_keep')
1495
-
1512
+ min_chunk_size = config_dict.get('post-processing').get('min_chunk_size')
1496
1513
  do_filter = config_dict.get('post-processing').get('filter')
1497
1514
  handle_LR_swap = config_dict.get('post-processing').get('handle_LR_swap', False)
1498
1515
  reject_outliers = config_dict.get('post-processing').get('reject_outliers', False)
@@ -1514,7 +1531,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1514
1531
  # Create output directories
1515
1532
  if video_file == "webcam":
1516
1533
  current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
1517
- output_dir_name = f'webcam_{current_date}_Sports2D'
1534
+ video_file_stem = f'webcam_{current_date}'
1535
+ output_dir_name = f'{video_file_stem}_Sports2D'
1518
1536
  video_file_path = result_dir / output_dir_name / f'webcam_{current_date}_raw.mp4'
1519
1537
  else:
1520
1538
  video_file_stem = video_file.stem
@@ -1623,6 +1641,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1623
1641
  # Load pose file in px
1624
1642
  Q_coords, _, time_col, keypoints_names, _ = read_trc(load_trc_px)
1625
1643
  t0 = time_col[0]
1644
+ tf = time_col.iloc[-1]
1626
1645
  keypoints_ids = [i for i in range(len(keypoints_names))]
1627
1646
  keypoints_all, scores_all = load_pose_file(Q_coords)
1628
1647
 
@@ -1640,6 +1659,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1640
1659
  keypoints_ids = [node.id for _, _, node in RenderTree(pose_model) if node.id!=None]
1641
1660
  keypoints_names = [node.name for _, _, node in RenderTree(pose_model) if node.id!=None]
1642
1661
  t0 = 0
1662
+ tf = (cap.get(cv2.CAP_PROP_FRAME_COUNT)-1) / fps if cap.get(cv2.CAP_PROP_FRAME_COUNT)>0 else float('inf')
1643
1663
 
1644
1664
  # Set up pose tracker
1645
1665
  try:
@@ -1652,14 +1672,12 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1652
1672
  except:
1653
1673
  logging.error('Error: Pose estimation failed. Check in Config.toml that pose_model and mode are valid.')
1654
1674
  raise ValueError('Error: Pose estimation failed. Check in Config.toml that pose_model and mode are valid.')
1655
-
1656
- # if tracking_mode not in ['deepsort', 'sports2d']:
1657
- # logging.warning(f"Tracking mode {tracking_mode} not recognized. Using sports2d method.")
1658
- # tracking_mode = 'sports2d'
1659
- # logging.info(f'Pose tracking set up for "{pose_model_name}" model.')
1660
- # logging.info(f'Mode: {mode}.\n')
1661
1675
  logging.info(f'Persons are detected every {det_frequency} frames and tracked inbetween. Tracking is done with {tracking_mode}.')
1662
- if tracking_mode == 'deepsort': logging.info(f'Deepsort parameters: {deepsort_params}.')
1676
+
1677
+ if tracking_mode == 'deepsort':
1678
+ logging.info(f'Deepsort parameters: {deepsort_params}.')
1679
+ if tracking_mode not in ['deepsort', 'sports2d']:
1680
+ logging.warning(f"Tracking mode {tracking_mode} is not implemented. 'sports2d' is recommended.")
1663
1681
  logging.info(f'{"All persons are" if nb_persons_to_detect=="all" else f"{nb_persons_to_detect} persons are" if nb_persons_to_detect>1 else "1 person is"} analyzed. Person ordering method is {person_ordering_method}.')
1664
1682
  logging.info(f"{keypoint_likelihood_threshold=}, {average_likelihood_threshold=}, {keypoint_number_threshold=}")
1665
1683
 
@@ -1691,7 +1709,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1691
1709
  frame_processing_times = []
1692
1710
  frame_count = 0
1693
1711
  first_frame = max(int(t0 * fps), frame_range[0])
1694
- # frames = []
1712
+ last_frame = min(int(tf * fps), frame_range[1]-1)
1713
+ if first_frame >= last_frame:
1714
+ logging.error('Error: No frames to process. Check that your time_range is coherent with the video duration.')
1715
+ raise ValueError('Error: No frames to process. Check that your time_range is coherent with the video duration.')
1716
+
1695
1717
  while cap.isOpened():
1696
1718
  # Skip to the starting frame
1697
1719
  if frame_count < first_frame:
@@ -1714,9 +1736,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1714
1736
  if save_angles:
1715
1737
  all_frames_angles.append([])
1716
1738
  continue
1717
- # else: # does not store all frames in memory if they are not saved or used for ordering
1718
- # if save_img or save_vid or person_ordering_method == 'on_click':
1719
- # frames.append(frame.copy())
1720
1739
 
1721
1740
  # Retrieve pose or Estimate pose and track people
1722
1741
  if load_trc_px:
@@ -1732,22 +1751,57 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1732
1751
  # Detect poses
1733
1752
  keypoints, scores = pose_tracker(frame)
1734
1753
 
1735
- # Non maximum suppression (at pose level, not detection)
1754
+ # Non maximum suppression (at pose level, not detection, and only using likely keypoints)
1736
1755
  frame_shape = frame.shape
1737
- bboxes = bbox_xyxy_compute(frame_shape, keypoints, padding=0)
1738
- score_bboxes = np.array([np.mean(s) for s in scores])
1739
- keep = nms(bboxes, score_bboxes, nms_thr=0.45)
1740
- keypoints, scores = keypoints[keep], scores[keep]
1741
-
1756
+ mask_scores = np.mean(scores, axis=1) > 0.2
1757
+
1758
+ likely_keypoints = np.where(mask_scores[:, np.newaxis, np.newaxis], keypoints, np.nan)
1759
+ likely_scores = np.where(mask_scores[:, np.newaxis], scores, np.nan)
1760
+ likely_bboxes = bbox_xyxy_compute(frame_shape, likely_keypoints, padding=0)
1761
+ score_likely_bboxes = np.nanmean(likely_scores, axis=1)
1762
+
1763
+ valid_indices = np.where(~np.isnan(score_likely_bboxes))[0]
1764
+ if len(valid_indices) > 0:
1765
+ valid_bboxes = likely_bboxes[valid_indices]
1766
+ valid_scores = score_likely_bboxes[valid_indices]
1767
+ keep_valid = nms(valid_bboxes, valid_scores, nms_thr=0.45)
1768
+ keep = valid_indices[keep_valid]
1769
+ else:
1770
+ keep = []
1771
+ keypoints, scores = likely_keypoints[keep], likely_scores[keep]
1772
+
1773
+ # # Debugging: display detected keypoints on the frame
1774
+ # colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0), (0,0,128), (128,128,0), (128,0,128), (0,128,128)]
1775
+ # bboxes = likely_bboxes[keep]
1776
+ # for person_idx in range(len(keypoints)):
1777
+ # for kpt_idx, kpt in enumerate(keypoints[person_idx]):
1778
+ # if not np.isnan(kpt).any():
1779
+ # cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[person_idx%len(colors)], -1)
1780
+ # if not np.isnan(bboxes[person_idx]).any():
1781
+ # cv2.rectangle(frame, (int(bboxes[person_idx][0]), int(bboxes[person_idx][1])), (int(bboxes[person_idx][2]), int(bboxes[person_idx][3])), colors[person_idx%len(colors)], 1)
1782
+ # cv2.imshow(f'{video_file} Sports2D', frame)
1783
+
1742
1784
  # Track poses across frames
1743
1785
  if tracking_mode == 'deepsort':
1744
1786
  keypoints, scores = sort_people_deepsort(keypoints, scores, deepsort_tracker, frame, frame_count)
1745
1787
  if tracking_mode == 'sports2d':
1746
1788
  if 'prev_keypoints' not in locals(): prev_keypoints = keypoints
1747
- prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores=scores)
1789
+ prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores=scores, max_dist=max_distance)
1748
1790
  else:
1749
1791
  pass
1750
-
1792
+
1793
+ # # Debugging: display detected keypoints on the frame
1794
+ # colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0), (0,0,128), (128,128,0), (128,0,128), (0,128,128)]
1795
+ # for person_idx in range(len(keypoints)):
1796
+ # for kpt_idx, kpt in enumerate(keypoints[person_idx]):
1797
+ # if not np.isnan(kpt).any():
1798
+ # cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[person_idx%len(colors)], -1)
1799
+ # # if not np.isnan(bboxes[person_idx]).any():
1800
+ # # cv2.rectangle(frame, (int(bboxes[person_idx][0]), int(bboxes[person_idx][1])), (int(bboxes[person_idx][2]), int(bboxes[person_idx][3])), colors[person_idx%len(colors)], 1)
1801
+ # cv2.imshow(f'{video_file} Sports2D', frame)
1802
+ # # if (cv2.waitKey(1) & 0xFF) == ord('q') or (cv2.waitKey(1) & 0xFF) == 27:
1803
+ # # break
1804
+ # # input()
1751
1805
 
1752
1806
  # Process coordinates and compute angles
1753
1807
  valid_X, valid_Y, valid_scores = [], [], []
@@ -1771,6 +1825,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1771
1825
  person_Y = np.full_like(person_Y, np.nan)
1772
1826
  person_scores = np.full_like(person_scores, np.nan)
1773
1827
 
1828
+
1829
+
1830
+ ## RECREATE KEYPOINTS, SCORES
1831
+
1832
+
1833
+
1834
+
1835
+
1836
+
1837
+
1838
+
1839
+
1774
1840
  # Check whether the person is looking to the left or right
1775
1841
  if flip_left_right:
1776
1842
  person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
@@ -1915,16 +1981,38 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1915
1981
  if save_pose:
1916
1982
  logging.info('\nPost-processing pose:')
1917
1983
  # Process pose for each person
1918
- trc_data, trc_data_unfiltered = [], []
1984
+ trc_data, trc_data_unfiltered, score_data = [], [], []
1985
+ first_run_starts_everyone, last_run_ends_everyone = [], []
1919
1986
  for i, idx_person in enumerate(selected_persons):
1920
1987
  pose_path_person = pose_output_path.parent / (pose_output_path.stem + f'_person{i:02d}.trc')
1921
1988
  all_frames_X_person = pd.DataFrame(all_frames_X_processed[:,idx_person,:], columns=new_keypoints_names)
1922
1989
  all_frames_Y_person = pd.DataFrame(all_frames_Y_processed[:,idx_person,:], columns=new_keypoints_names)
1990
+ score_data.append(pd.DataFrame(all_frames_scores_processed[:,idx_person,:], columns=new_keypoints_names))
1923
1991
  if calculate_angles or save_angles:
1924
1992
  all_frames_X_flipped_person = pd.DataFrame(all_frames_X_flipped_processed[:,idx_person,:], columns=new_keypoints_names)
1925
- # Delete person if less than 10 valid frames
1926
- pose_nan_count = len(np.where(all_frames_X_person.sum(axis=1)==0)[0])
1927
- if frame_count - frame_range[0] - pose_nan_count <= 10:
1993
+
1994
+ # Interpolate
1995
+ if not interpolate:
1996
+ logging.info(f'- Person {i}: No interpolation.')
1997
+ all_frames_X_person_interp = all_frames_X_person
1998
+ all_frames_Y_person_interp = all_frames_Y_person
1999
+ else:
2000
+ logging.info(f'- Person {i}: Interpolating missing sequences if they are smaller than {interp_gap_smaller_than} frames. Large gaps filled with {fill_large_gaps_with}.')
2001
+ all_frames_X_person_interp = all_frames_X_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
2002
+ all_frames_Y_person_interp = all_frames_Y_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
2003
+
2004
+ # Find the first and last valid chunks of data
2005
+ first_run_starts, last_run_ends = [], []
2006
+ for col in all_frames_X_person.columns:
2007
+ first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(all_frames_X_person_interp[col], min_chunk_size=min_chunk_size, chunk_choice_method=sections_to_keep)
2008
+ first_run_starts += [first_run_start]
2009
+ last_run_ends += [last_run_end]
2010
+ first_run_start_min, last_run_end_max = min(first_run_starts), max(last_run_ends)
2011
+ first_run_starts_everyone += [first_run_starts]
2012
+ last_run_ends_everyone += [last_run_ends]
2013
+
2014
+ # Do not process person if no section of min_chunk_size valid frames in a row
2015
+ if (first_run_start_min, last_run_end_max) == (0,0):
1928
2016
  all_frames_X_processed[:,idx_person,:], all_frames_X_flipped_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = np.nan, np.nan, np.nan
1929
2017
  columns=np.array([[c]*3 for c in all_frames_X_person.columns]).flatten()
1930
2018
  trc_data_i = pd.DataFrame(0, index=all_frames_X_person.index, columns=['time']+list(columns))
@@ -1932,121 +2020,139 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1932
2020
  trc_data.append(trc_data_i)
1933
2021
  trc_data_unfiltered_i = trc_data_i.copy()
1934
2022
  trc_data_unfiltered.append(trc_data_unfiltered_i)
1935
-
1936
- logging.info(f'- Person {i}: Less than 10 valid frames. Deleting person.')
2023
+ logging.info(f' Person {i}: Less than {min_chunk_size} valid frames in a row. Deleting person.')
2024
+ continue
1937
2025
 
2026
+ # Fill remaining gaps
2027
+ if fill_large_gaps_with.lower() == 'last_value':
2028
+ for col_id, col in enumerate(all_frames_X_person_interp.columns):
2029
+ first_run_start, last_run_end = first_run_starts[col_id], last_run_ends[col_id]
2030
+ for coord_df in [all_frames_X_person_interp, all_frames_Y_person_interp, all_frames_Z_homog]:
2031
+ coord_df.loc[:first_run_start, col] = np.nan
2032
+ coord_df.loc[last_run_end:, col] = np.nan
2033
+ coord_df.loc[first_run_start:last_run_end, col] = coord_df.loc[first_run_start:last_run_end, col].ffill().bfill()
2034
+ elif fill_large_gaps_with.lower() == 'zeros':
2035
+ all_frames_X_person_interp.replace(np.nan, 0, inplace=True)
2036
+ all_frames_Y_person_interp.replace(np.nan, 0, inplace=True)
2037
+
2038
+ # if handle_LR_swap:
2039
+ # logging.info(f'Handling left-right swaps.')
2040
+ # all_frames_X_person_interp = all_frames_X_person_interp.apply(LR_unswap, axis=0)
2041
+ # all_frames_Y_person_interp = all_frames_Y_person_interp.apply(LR_unswap, axis=0)
2042
+
2043
+ if reject_outliers:
2044
+ logging.info('Rejecting outliers with a Hampel filter.')
2045
+ all_frames_X_person_interp = all_frames_X_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
2046
+ all_frames_Y_person_interp = all_frames_Y_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
2047
+
2048
+ if not do_filter:
2049
+ logging.info(f'No filtering.')
2050
+ all_frames_X_person_filt = all_frames_X_person_interp
2051
+ all_frames_Y_person_filt = all_frames_Y_person_interp
1938
2052
  else:
1939
- # Interpolate
1940
- if not interpolate:
1941
- logging.info(f'- Person {i}: No interpolation.')
1942
- all_frames_X_person_interp = all_frames_X_person
1943
- all_frames_Y_person_interp = all_frames_Y_person
2053
+ if filter_type == ('butterworth' or 'butterworth_on_speed'):
2054
+ cutoff = butterworth_filter_cutoff
2055
+ if video_file == 'webcam':
2056
+ if cutoff / (fps / 2) >= 1:
2057
+ cutoff_old = cutoff
2058
+ cutoff = fps/(2+0.001)
2059
+ args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
2060
+ butterworth_filter_cutoff = cutoff
2061
+ filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
2062
+ args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
2063
+ frame_rate = fps
2064
+ elif filter_type == 'gcv_spline':
2065
+ args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
2066
+ elif filter_type == 'kalman':
2067
+ args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
2068
+ elif filter_type == 'gaussian':
2069
+ args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
2070
+ elif filter_type == 'loess':
2071
+ args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
2072
+ elif filter_type == 'median':
2073
+ args = f'Median filter, kernel of {median_filter_kernel}.'
1944
2074
  else:
1945
- logging.info(f'- Person {i}: Interpolating missing sequences if they are smaller than {interp_gap_smaller_than} frames. Large gaps filled with {fill_large_gaps_with}.')
1946
- all_frames_X_person_interp = all_frames_X_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
1947
- all_frames_Y_person_interp = all_frames_Y_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
1948
-
1949
- if fill_large_gaps_with.lower() == 'last_value':
1950
- for col in all_frames_X_person_interp.columns:
1951
- first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(all_frames_Y_person_interp[col], min_chunk_size=interp_gap_smaller_than, chunk_choice_method=sections_to_keep)
1952
- for coord_df in [all_frames_X_person_interp, all_frames_Y_person_interp, all_frames_Z_homog]:
1953
- coord_df.loc[:first_run_start, col] = np.nan
1954
- coord_df.loc[last_run_end:, col] = np.nan
1955
- coord_df.loc[first_run_start:last_run_end, col] = coord_df.loc[first_run_start:last_run_end, col].ffill().bfill()
1956
-
1957
- elif fill_large_gaps_with.lower() == 'zeros':
1958
- all_frames_X_person_interp.replace(np.nan, 0, inplace=True)
1959
- all_frames_Y_person_interp.replace(np.nan, 0, inplace=True)
1960
-
1961
- # Filter
1962
- # if handle_LR_swap:
1963
- # logging.info(f'Handling left-right swaps.')
1964
- # all_frames_X_person_interp = all_frames_X_person_interp.apply(LR_unswap, axis=0)
1965
- # all_frames_Y_person_interp = all_frames_Y_person_interp.apply(LR_unswap, axis=0)
1966
-
1967
- if reject_outliers:
1968
- logging.info('Rejecting outliers with a Hampel filter.')
1969
- all_frames_X_person_interp = all_frames_X_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
1970
- all_frames_Y_person_interp = all_frames_Y_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
1971
-
1972
- if not do_filter:
1973
- logging.info(f'No filtering.')
1974
- all_frames_X_person_filt = all_frames_X_person_interp
1975
- all_frames_Y_person_filt = all_frames_Y_person_interp
1976
- else:
1977
- if filter_type == ('butterworth' or 'butterworth_on_speed'):
1978
- cutoff = butterworth_filter_cutoff
1979
- if video_file == 'webcam':
1980
- if cutoff / (fps / 2) >= 1:
1981
- cutoff_old = cutoff
1982
- cutoff = fps/(2+0.001)
1983
- args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
1984
- butterworth_filter_cutoff = cutoff
1985
- filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
1986
- args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
1987
- frame_rate = fps
1988
- elif filter_type == 'gcv_spline':
1989
- args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
1990
- elif filter_type == 'kalman':
1991
- args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
1992
- elif filter_type == 'gaussian':
1993
- args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
1994
- elif filter_type == 'loess':
1995
- args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
1996
- elif filter_type == 'median':
1997
- args = f'Median filter, kernel of {median_filter_kernel}.'
1998
- else:
1999
- logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2000
- raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2001
-
2002
- logging.info(f'Filtering with {args}')
2003
- all_frames_X_person_filt = all_frames_X_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
2004
- all_frames_Y_person_filt = all_frames_Y_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
2005
-
2006
-
2007
- # Build TRC file
2008
- trc_data_i = trc_data_from_XYZtime(all_frames_X_person_filt, all_frames_Y_person_filt, all_frames_Z_homog, all_frames_time)
2009
- trc_data.append(trc_data_i)
2010
- if not load_trc_px:
2011
- make_trc_with_trc_data(trc_data_i, str(pose_path_person), fps=fps)
2012
- logging.info(f'Pose in pixels saved to {pose_path_person.resolve()}.')
2013
-
2014
- # Plotting coordinates before and after interpolation and filtering
2015
- columns_to_concat = []
2016
- for kpt in range(len(all_frames_X_person.columns)):
2017
- columns_to_concat.extend([all_frames_X_person.iloc[:,kpt], all_frames_Y_person.iloc[:,kpt], all_frames_Z_homog.iloc[:,kpt]])
2018
- trc_data_unfiltered_i = pd.concat([all_frames_time] + columns_to_concat, axis=1)
2019
- trc_data_unfiltered.append(trc_data_unfiltered_i)
2020
- if not to_meters and (show_plots or save_plots):
2021
- pw = pose_plots(trc_data_unfiltered_i, trc_data_i, i, show=show_plots)
2022
- if save_plots:
2023
- for n, f in enumerate(pw.figure_handles):
2024
- dpi = pw.canvases[i].figure.dpi
2025
- f.set_size_inches(1280/dpi, 720/dpi)
2026
- title = pw.tabs.tabText(n)
2027
- plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
2028
- f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
2029
- logging.info(f'Pose plots (px) saved in {plots_output_dir}.')
2030
-
2031
- all_frames_X_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_Y_person_filt
2032
- if calculate_angles or save_angles:
2033
- all_frames_X_flipped_processed[:,idx_person,:] = all_frames_X_flipped_person
2075
+ logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2076
+ raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2077
+
2078
+ logging.info(f'Filtering with {args}')
2079
+ all_frames_X_person_filt = all_frames_X_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
2080
+ all_frames_Y_person_filt = all_frames_Y_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
2081
+
2082
+ # Build TRC file
2083
+ trc_data_i = trc_data_from_XYZtime(all_frames_X_person_filt, all_frames_Y_person_filt, all_frames_Z_homog, all_frames_time)
2084
+ trc_data.append(trc_data_i)
2085
+ if not load_trc_px:
2086
+ make_trc_with_trc_data(trc_data_i, str(pose_path_person), fps=fps)
2087
+ logging.info(f'Pose in pixels saved to {pose_path_person.resolve()}.')
2088
+
2089
+ # Plotting coordinates before and after interpolation and filtering
2090
+ columns_to_concat = []
2091
+ for kpt in range(len(all_frames_X_person.columns)):
2092
+ columns_to_concat.extend([all_frames_X_person.iloc[:,kpt], all_frames_Y_person.iloc[:,kpt], all_frames_Z_homog.iloc[:,kpt]])
2093
+ trc_data_unfiltered_i = pd.concat([all_frames_time] + columns_to_concat, axis=1)
2094
+ trc_data_unfiltered.append(trc_data_unfiltered_i)
2095
+ if not to_meters and (show_plots or save_plots):
2096
+ pw = pose_plots(trc_data_unfiltered_i, trc_data_i, i, show=show_plots)
2097
+ if save_plots:
2098
+ for n, f in enumerate(pw.figure_handles):
2099
+ dpi = pw.canvases[i].figure.dpi
2100
+ f.set_size_inches(1280/dpi, 720/dpi)
2101
+ title = pw.tabs.tabText(n)
2102
+ plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
2103
+ f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
2104
+ logging.info(f'Pose plots (px) saved in {plots_output_dir}.')
2105
+
2106
+ all_frames_X_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_Y_person_filt
2107
+ if calculate_angles or save_angles:
2108
+ all_frames_X_flipped_processed[:,idx_person,:] = all_frames_X_flipped_person
2034
2109
 
2035
2110
 
2036
2111
  #%% Convert px to meters
2037
2112
  trc_data_m = []
2038
2113
  if to_meters and save_pose:
2039
2114
  logging.info('\nConverting pose to meters:')
2115
+
2116
+ # Compute height in px of the first person
2117
+ height_px = compute_height(trc_data[0].iloc[:,1:], new_keypoints_names,
2118
+ fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_px, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
2119
+
2120
+ if calib_file or save_calib:
2121
+ dist_to_cam = 10.0 # arbitrary distance between the camera and the person (m)
2122
+ R90z = np.array([[0.0, -1.0, 0.0],
2123
+ [1.0, 0.0, 0.0],
2124
+ [0.0, 0.0, 1.0]])
2125
+ R270x = np.array([[1.0, 0.0, 0.0],
2126
+ [0.0, 0.0, 1.0],
2127
+ [0.0, -1.0, 0.0]])
2128
+
2129
+ # Compute px to meter parameters from calibration file
2040
2130
  if calib_file:
2041
- logging.info(f'Using calibration file to convert coordinates in meters: {calib_file}.')
2042
2131
  calib_params_dict = retrieve_calib_params(calib_file)
2043
- # TODO
2044
2132
 
2045
- else:
2046
- # Compute calibration parameters
2047
- height_px = compute_height(trc_data[0].iloc[:,1:], new_keypoints_names,
2048
- fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_px, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
2133
+ f = calib_params_dict['K'][0][0][0]
2134
+ first_person_height = height_px / f * dist_to_cam
2049
2135
 
2136
+ R_cam = cv2.Rodrigues(calib_params_dict['R'][0])[0]
2137
+ T_cam = np.array(calib_params_dict['T'][0])
2138
+ R_world, T_world = world_to_camera_persp(R_cam, T_cam)
2139
+ Rfloory = R90z.T @ R_world @ R270x.T
2140
+ T_world = R90z.T @ T_world
2141
+ floor_angle_estim = np.arctan2(Rfloory[0,2], Rfloory[0,0])
2142
+
2143
+ cu = calib_params_dict['K'][0][0][2]
2144
+ cv = calib_params_dict['K'][0][1][2]
2145
+ cx = 0.0
2146
+ cy = cv + T_world[2]*f/dist_to_cam
2147
+ xy_origin_estim = [cx, cy]
2148
+
2149
+ logging.info(f'Using calibration file to convert coordinates in meters: {calib_file}.\n'
2150
+ f'Floor angle: {np.degrees(floor_angle_estim):.2f}°, '
2151
+ f'xy_origin: [{cx:.2f}, {cy:.2f}] px.')
2152
+
2153
+
2154
+ # Compute px to meter parameters from scene
2155
+ else:
2050
2156
  toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
2051
2157
  px_per_m = height_px/first_person_height
2052
2158
  toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
@@ -2054,11 +2160,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2054
2160
  # estimated from the line formed by the toes when they are on the ground (where speed = 0)
2055
2161
  try:
2056
2162
  if all(key in trc_data[0] for key in ['LBigToe', 'RBigToe']):
2057
- floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
2163
+ floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], score_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
2058
2164
  else:
2059
- floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
2060
- xy_origin_estim[0] = xy_origin_estim[0]-0.13
2061
- logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
2165
+ floor_angle_estim, xy_origin_estim, _ = compute_floor_line(trc_data[0], score_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
2166
+ xy_origin_estim[1] = xy_origin_estim[1] + 0.13*px_per_m # approx. height of the ankle above the floor
2167
+ logging.warning(f'The RBigToe and LBigToe are missing from your pose estimation model. Using ankles - 13 cm to compute the floor line.')
2062
2168
  except:
2063
2169
  floor_angle_estim = 0
2064
2170
  xy_origin_estim = cam_width/2, cam_height/2
@@ -2069,9 +2175,40 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2069
2175
  cx, cy = xy_origin_estim
2070
2176
  else:
2071
2177
  cx, cy = xy_origin
2072
- logging.info(f'Using height of person #0 ({first_person_height}m) to convert coordinates in meters. '
2178
+ logging.info(f'Using height of person #0 ({first_person_height}m) to convert coordinates in meters.\n'
2073
2179
  f'Floor angle: {np.degrees(floor_angle_estim) if not floor_angle=="auto" else f"auto (estimation: {round(np.degrees(floor_angle_estim),2)}°)"}, '
2074
2180
  f'xy_origin: {xy_origin if not xy_origin=="auto" else f"auto (estimation: {[round(c) for c in xy_origin_estim]})"} px.')
2181
+
2182
+ # Save calibration file
2183
+ if save_calib:
2184
+ calib_file_path = output_dir / f'{video_file_stem}_Sports2D_calib.toml'
2185
+
2186
+ # name, size, distortions
2187
+ N = [video_file_stem]
2188
+ S = [[cam_width, cam_height]]
2189
+ D = [[0.0, 0.0, 0.0, 0.0]]
2190
+
2191
+ # Intrinsics
2192
+ f = height_px / first_person_height * dist_to_cam
2193
+ cu = cam_width/2
2194
+ cv = cam_height/2
2195
+ K = np.array([[[f, 0.0, cu], [0.0, f, cv], [0.0, 0.0, 1.0]]])
2196
+
2197
+ # Extrinsics
2198
+ Rfloory = np.array([[np.cos(floor_angle_estim), 0.0, np.sin(floor_angle_estim)],
2199
+ [0.0, 1.0, 0.0],
2200
+ [-np.sin(floor_angle_estim), 0.0, np.cos(floor_angle_estim)]])
2201
+ R_world = R90z @ Rfloory @ R270x
2202
+ T_world = R90z @ np.array([-(cx-cu)/f*dist_to_cam, -dist_to_cam, (cy-cv)/f*dist_to_cam])
2203
+
2204
+ R_cam, T_cam = world_to_camera_persp(R_world, T_world)
2205
+ Tvec_cam = T_cam.reshape(1,3).tolist()
2206
+ Rvec_cam = cv2.Rodrigues(R_cam)[0].reshape(1,3).tolist()
2207
+
2208
+ # Write calibration file
2209
+ toml_write(calib_file_path, N, S, D, K, Rvec_cam, Tvec_cam)
2210
+ logging.info(f'Calibration saved to {calib_file_path}.')
2211
+
2075
2212
 
2076
2213
  # Coordinates in m
2077
2214
  new_visible_side = []
@@ -2083,9 +2220,9 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2083
2220
  if visible_side_i == 'auto':
2084
2221
  try:
2085
2222
  if all(key in trc_data[i] for key in ['LBigToe', 'RBigToe']):
2086
- _, _, gait_direction = compute_floor_line(trc_data[i], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
2223
+ _, _, gait_direction = compute_floor_line(trc_data[i], score_data[0], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
2087
2224
  else:
2088
- _, _, gait_direction = compute_floor_line(trc_data[i], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
2225
+ _, _, gait_direction = compute_floor_line(trc_data[i], score_data[0], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame, score_threshold=average_likelihood_threshold)
2089
2226
  logging.warning(f'The RBigToe and LBigToe are missing from your model. Gait direction will be determined from the ankle points.')
2090
2227
  visible_side_i = 'right' if gait_direction > 0.3 \
2091
2228
  else 'left' if gait_direction < -0.3 \
@@ -2103,8 +2240,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2103
2240
  # Convert to meters
2104
2241
  px_to_m_i = [convert_px_to_meters(trc_data[i][kpt_name], first_person_height, height_px, cx, cy, -floor_angle_estim, visible_side=visible_side_i) for kpt_name in new_keypoints_names]
2105
2242
  trc_data_m_i = pd.concat([all_frames_time.rename('time')]+px_to_m_i, axis=1)
2106
- for c in 3*np.arange(len(trc_data_m_i.columns[3::3]))+1: # only X coordinates
2107
- first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(trc_data_m_i.iloc[:,c], min_chunk_size=interp_gap_smaller_than, chunk_choice_method=sections_to_keep)
2243
+ for c_id, c in enumerate(3*np.arange(len(trc_data_m_i.columns[3::3]))+1): # only X coordinates
2244
+ first_run_start, last_run_end = first_run_starts_everyone[i][c_id], last_run_ends_everyone[i][c_id]
2108
2245
  trc_data_m_i.iloc[:first_run_start,c+2] = np.nan
2109
2246
  trc_data_m_i.iloc[last_run_end:,c+2] = np.nan
2110
2247
  trc_data_m_i.iloc[first_run_start:last_run_end,c+2] = trc_data_m_i.iloc[first_run_start:last_run_end,c+2].ffill().bfill()
@@ -2136,48 +2273,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2136
2273
  new_visible_side += [visible_side_i]
2137
2274
  else:
2138
2275
  new_visible_side = visible_side.copy()
2139
-
2140
-
2141
-
2142
-
2143
-
2144
-
2145
- # # plt.plot(trc_data_m.iloc[:,0], trc_data_m.iloc[:,1])
2146
- # # plt.ylim([0,2])
2147
- # # plt.show()
2148
-
2149
-
2150
-
2151
- # z = 3.0 # distance between the camera and the person. Required in the calibration file but simplified in the equations
2152
- # f = height_px / first_person_height * z
2153
-
2154
-
2155
- # # Name
2156
- # N = [video_file]
2157
-
2158
- # # Size
2159
- # S = [[cam_width, cam_height]]
2160
-
2161
- # # Distortions
2162
- # D = [[0.0, 0.0, 0.0, 0.0]]
2163
-
2164
- # # Camera matrix
2165
- # K = [[[f, 0.0, cx], [0.0, f, cy], [0.0, 0.0, 1.0]]] # f and Z do not matter in 2D
2166
-
2167
- # # Rot, Trans
2168
- # R =
2169
- # T =
2170
-
2171
- # # Save calibration file
2172
-
2173
- # # Convert to meters
2174
- # trc_data =
2175
-
2176
-
2177
-
2178
-
2179
-
2180
-
2181
2276
 
2182
2277
 
2183
2278
  #%% ==================================================
@@ -2205,86 +2300,95 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2205
2300
  if new_visible_side[i] == 'left' and not flip_left_right:
2206
2301
  all_frames_angles_homog[:, idx_person, :] = -all_frames_angles_homog[:, idx_person, :]
2207
2302
 
2208
- # Delete person if less than 4 valid frames
2209
- angle_nan_count = len(np.where(all_frames_angles_person.sum(axis=1)==0)[0])
2210
- if frame_count - frame_range[0] - angle_nan_count <= 4:
2211
- all_frames_angles_processed[:,idx_person,:] = np.nan
2212
- logging.info(f'- Person {i}: Less than 4 valid frames. Deleting person.')
2213
-
2303
+ if not interpolate:
2304
+ logging.info(f'- Person {i}: No interpolation.')
2305
+ all_frames_angles_person_interp = all_frames_angles_person
2214
2306
  else:
2215
- # Interpolate
2216
- if not interpolate:
2217
- logging.info(f'- Person {i}: No interpolation.')
2218
- all_frames_angles_person_interp = all_frames_angles_person
2219
- else:
2220
- logging.info(f'- Person {i}: Interpolating missing sequences if they are smaller than {interp_gap_smaller_than} frames. Large gaps filled with {fill_large_gaps_with}.')
2221
- all_frames_angles_person_interp = all_frames_angles_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
2222
- if fill_large_gaps_with == 'last_value':
2223
- for col in all_frames_angles_person_interp.columns:
2224
- first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(all_frames_angles_person_interp[col], min_chunk_size=interp_gap_smaller_than, chunk_choice_method=sections_to_keep)
2225
- all_frames_angles_person_interp.loc[:first_run_start, col] = np.nan
2226
- all_frames_angles_person_interp.loc[last_run_end:, col] = np.nan
2227
- all_frames_angles_person_interp.loc[first_run_start:last_run_end, col] = all_frames_angles_person_interp.loc[first_run_start:last_run_end, col].ffill().bfill()
2228
- elif fill_large_gaps_with == 'zeros':
2229
- all_frames_angles_person_interp.replace(np.nan, 0, inplace=True)
2307
+ logging.info(f'- Person {i}: Interpolating missing sequences if they are smaller than {interp_gap_smaller_than} frames. Large gaps filled with {fill_large_gaps_with}.')
2308
+ all_frames_angles_person_interp = all_frames_angles_person.apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, 'linear'])
2309
+
2310
+ # Find the first and last valid chunks of data
2311
+ first_run_starts, last_run_ends = [], []
2312
+ for col in all_frames_angles_person.columns:
2313
+ first_run_start, last_run_end = indices_of_first_last_non_nan_chunks(all_frames_angles_person_interp[col], min_chunk_size=min_chunk_size, chunk_choice_method=sections_to_keep)
2314
+ first_run_starts += [first_run_start]
2315
+ last_run_ends += [last_run_end]
2316
+ first_run_start_min, last_run_end_max = min(first_run_starts), max(last_run_ends)
2317
+
2318
+ # Do not process person if no section of min_chunk_size valid frames in a row
2319
+ if (first_run_start_min, last_run_end_max) == (0,0):
2320
+ all_frames_angles_processed[:,idx_person,:]= np.nan
2321
+ logging.info(f' Person {i}: Less than {min_chunk_size} valid frames in a row. Deleting person.')
2322
+ continue
2323
+
2324
+ # Fill remaining gaps
2325
+ if fill_large_gaps_with == 'last_value':
2326
+ for col_id, col in enumerate(all_frames_angles_person_interp.columns):
2327
+ first_run_start, last_run_end = first_run_starts[col_id], last_run_ends[col_id]
2328
+ all_frames_angles_person_interp.loc[:first_run_start, col] = np.nan
2329
+ all_frames_angles_person_interp.loc[last_run_end:, col] = np.nan
2330
+ all_frames_angles_person_interp.loc[first_run_start:last_run_end, col] = all_frames_angles_person_interp.loc[first_run_start:last_run_end, col].ffill().bfill()
2331
+ elif fill_large_gaps_with == 'zeros':
2332
+ all_frames_angles_person_interp.replace(np.nan, 0, inplace=True)
2230
2333
 
2231
- # Filter
2232
- if reject_outliers:
2233
- logging.info(f'Rejecting outliers with a Hampel filter.')
2234
- all_frames_angles_person_interp = all_frames_angles_person_interp.apply(hampel_filter, axis=0)
2235
-
2236
- if not do_filter:
2237
- logging.info(f'No filtering.')
2238
- all_frames_angles_person_filt = all_frames_angles_person_interp
2334
+ # Filter
2335
+ if reject_outliers:
2336
+ logging.info(f'Rejecting outliers with a Hampel filter.')
2337
+ all_frames_angles_person_interp = all_frames_angles_person_interp.apply(hampel_filter, axis=0)
2338
+
2339
+ if not do_filter:
2340
+ logging.info(f'No filtering.')
2341
+ all_frames_angles_person_filt = all_frames_angles_person_interp
2342
+ else:
2343
+ if filter_type == ('butterworth' or 'butterworth_on_speed'):
2344
+ cutoff = butterworth_filter_cutoff
2345
+ if video_file == 'webcam':
2346
+ if cutoff / (fps / 2) >= 1:
2347
+ cutoff_old = cutoff
2348
+ cutoff = fps/(2+0.001)
2349
+ args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
2350
+ butterworth_filter_cutoff = cutoff
2351
+ filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
2352
+ args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
2353
+ frame_rate = fps
2354
+ elif filter_type == 'gcv_spline':
2355
+ args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
2356
+ elif filter_type == 'kalman':
2357
+ args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
2358
+ elif filter_type == 'gaussian':
2359
+ args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
2360
+ elif filter_type == 'loess':
2361
+ args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
2362
+ elif filter_type == 'median':
2363
+ args = f'Median filter, kernel of {median_filter_kernel}.'
2239
2364
  else:
2240
- if filter_type == ('butterworth' or 'butterworth_on_speed'):
2241
- cutoff = butterworth_filter_cutoff
2242
- if video_file == 'webcam':
2243
- if cutoff / (fps / 2) >= 1:
2244
- cutoff_old = cutoff
2245
- cutoff = fps/(2+0.001)
2246
- args = f'\n{cutoff_old:.1f} Hz cut-off framerate too large for a real-time framerate of {fps:.1f} Hz. Using a cut-off framerate of {cutoff:.1f} Hz instead.'
2247
- butterworth_filter_cutoff = cutoff
2248
- filt_type = 'Butterworth' if filter_type == 'butterworth' else 'Butterworth on speed'
2249
- args = f'{filt_type} filter, {butterworth_filter_order}th order, {butterworth_filter_cutoff} Hz.'
2250
- frame_rate = fps
2251
- elif filter_type == 'gcv_spline':
2252
- args = f'GVC Spline filter, which automatically evaluates the best trade-off between smoothness and fidelity to data.'
2253
- elif filter_type == 'kalman':
2254
- args = f'Kalman filter, trusting measurement {kalman_filter_trust_ratio} times more than the process matrix.'
2255
- elif filter_type == 'gaussian':
2256
- args = f'Gaussian filter, Sigma kernel {gaussian_filter_kernel}.'
2257
- elif filter_type == 'loess':
2258
- args = f'LOESS filter, window size of {loess_filter_kernel} frames.'
2259
- elif filter_type == 'median':
2260
- args = f'Median filter, kernel of {median_filter_kernel}.'
2261
- else:
2262
- logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2263
- raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2264
-
2265
- logging.info(f'Filtering with {args}')
2266
- all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
2267
-
2268
- # Add floor_angle_estim to segment angles
2269
- if correct_segment_angles_with_floor_angle and to_meters:
2270
- logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
2271
- for ang_name in all_frames_angles_person_filt.columns:
2272
- if 'horizontal' in angle_dict[ang_name][1]:
2273
- all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
2274
-
2275
- # Remove columns with all nan values
2276
- all_frames_angles_processed[:,idx_person,:] = all_frames_angles_person_filt
2277
- all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
2278
- all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
2279
-
2280
- # Build mot file
2281
- angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
2282
- logging.info(f'Angles saved to {angles_path_person.resolve()}.')
2283
-
2284
- # Plotting angles before and after interpolation and filtering
2285
- all_frames_angles_person.insert(0, 'time', all_frames_time)
2286
- if save_plots and (show_plots or save_plots):
2287
- pw = angle_plots(all_frames_angles_person, angle_data, i, show=show_plots) # i = current person
2365
+ logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2366
+ raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
2367
+
2368
+ logging.info(f'Filtering with {args}')
2369
+ all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
2370
+
2371
+ # Add floor_angle_estim to segment angles
2372
+ if correct_segment_angles_with_floor_angle and to_meters:
2373
+ logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
2374
+ for ang_name in all_frames_angles_person_filt.columns:
2375
+ if 'horizontal' in angle_dict[ang_name][1]:
2376
+ all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
2377
+
2378
+ # Remove columns with all nan values
2379
+ all_frames_angles_processed[:,idx_person,:] = all_frames_angles_person_filt
2380
+ all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
2381
+ all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
2382
+
2383
+ # Build mot file
2384
+ angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
2385
+ logging.info(f'Angles saved to {angles_path_person.resolve()}.')
2386
+
2387
+ # Plotting angles before and after interpolation and filtering
2388
+ all_frames_angles_person.insert(0, 'time', all_frames_time)
2389
+ if show_plots or save_plots:
2390
+ pw = angle_plots(all_frames_angles_person, angle_data, i, show=show_plots) # i = current person
2391
+ if save_plots:
2288
2392
  for n, f in enumerate(pw.figure_handles):
2289
2393
  dpi = pw.canvases[i].figure.dpi
2290
2394
  f.set_size_inches(1280/dpi, 720/dpi)
@@ -2392,25 +2496,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
2392
2496
  # Delete person if less than 4 valid frames
2393
2497
  pose_path_person = pose_output_path.parent / (pose_output_path.stem + f'_person{i:02d}.trc')
2394
2498
  all_frames_X_person = pd.DataFrame(all_frames_X_homog[:,i,:], columns=new_keypoints_names)
2395
- pose_nan_count = len(np.where(all_frames_X_person.sum(axis=1)==0)[0])
2396
- if frame_count - frame_range[0] - pose_nan_count <= 4:
2397
- # heights_m.append(DEFAULT_HEIGHT)
2398
- # masses.append(DEFAULT_MASS)
2399
- logging.info(f'Less than 4 valid frames. Deleting person.')
2499
+ if new_visible_side[i] == 'none':
2500
+ logging.info(f'Skipping marker augmentation and inverse kinematics because visible_side is "none".')
2400
2501
  else:
2401
- if new_visible_side[i] == 'none':
2402
- logging.info(f'Skipping marker augmentation and inverse kinematics because visible_side is "none".')
2403
- # heights_m.append(DEFAULT_HEIGHT)
2404
- # masses.append(DEFAULT_MASS)
2405
- else:
2406
- # Provide missing data to Pose2Sim_config_dict
2407
- height_m_i = compute_height(trc_data_m_i.iloc[:,1:], keypoints_names,
2408
- fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_m, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
2409
- mass_i = participant_masses[i] if len(participant_masses)>i else 70
2410
- if len(participant_masses)<=i:
2411
- logging.warning(f'No mass provided. Using 70 kg as default.')
2412
- heights_m.append(height_m_i)
2413
- masses.append(mass_i)
2502
+ # Provide missing data to Pose2Sim_config_dict
2503
+ height_m_i = compute_height(trc_data_m_i.iloc[:,1:], keypoints_names,
2504
+ fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_m, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
2505
+ mass_i = participant_masses[i] if len(participant_masses)>i else DEFAULT_MASS
2506
+ if len(participant_masses)<=i:
2507
+ logging.warning(f'No mass provided. Using {DEFAULT_MASS} kg as default.')
2508
+ heights_m.append(height_m_i)
2509
+ masses.append(mass_i)
2414
2510
 
2415
2511
  Pose2Sim_config_dict['project']['participant_height'] = heights_m
2416
2512
  Pose2Sim_config_dict['project']['participant_mass'] = masses
@@ -49,7 +49,7 @@ dependencies = [
49
49
  "opencv-python<4.12", # otherwise forces numpy>=2.0, which is incompatible with some opensim/python combinations
50
50
  "imageio_ffmpeg",
51
51
  "deep-sort-realtime",
52
- "Pose2Sim>=0.10.36"
52
+ "Pose2Sim>=0.10.38"
53
53
  ]
54
54
 
55
55
  [tool.setuptools_scm]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.8.21
3
+ Version: 0.8.23
4
4
  Summary: Compute 2D human pose and angles from a video or a webcam.
5
5
  Author-email: David Pagnon <contact@david-pagnon.com>
6
6
  Maintainer-email: David Pagnon <contact@david-pagnon.com>
@@ -38,7 +38,7 @@ Requires-Dist: openvino
38
38
  Requires-Dist: opencv-python<4.12
39
39
  Requires-Dist: imageio_ffmpeg
40
40
  Requires-Dist: deep-sort-realtime
41
- Requires-Dist: Pose2Sim>=0.10.36
41
+ Requires-Dist: Pose2Sim>=0.10.38
42
42
  Dynamic: license-file
43
43
 
44
44
 
@@ -14,4 +14,4 @@ openvino
14
14
  opencv-python<4.12
15
15
  imageio_ffmpeg
16
16
  deep-sort-realtime
17
- Pose2Sim>=0.10.36
17
+ Pose2Sim>=0.10.38
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes