sports2d 0.7.0__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -89,7 +89,7 @@ det_frequency = 4 # Run person detection only every N frames, and inbetwee
89
89
  # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
90
90
  device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
91
91
  backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
92
- tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
92
+ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned
93
93
  # deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
94
94
  # More robust in crowded scenes but tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
95
95
  # Requires `pip install torch torchvision torchreid gdown tensorboard`
@@ -151,8 +151,8 @@ filter_type = 'butterworth' # butterworth, gaussian, LOESS, median
151
151
 
152
152
 
153
153
  [kinematics]
154
- do_ik = true # Do scaling and inverse kinematics?
155
- use_augmentation = true # true or false (lowercase) # Set to true if you want to use the model with augmented markers
154
+ do_ik = false # Do scaling and inverse kinematics?
155
+ use_augmentation = false # true or false (lowercase) # Set to true if you want to use the model with augmented markers
156
156
  use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
157
157
  participant_mass = [67.0, 55.0] # kg # defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)
158
158
  right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
Sports2D/Sports2D.py CHANGED
@@ -109,6 +109,7 @@
109
109
 
110
110
 
111
111
  ## INIT
112
+ from importlib.metadata import version
112
113
  import argparse
113
114
  import toml
114
115
  from datetime import datetime
@@ -261,7 +262,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
261
262
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
262
263
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
263
264
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
264
- 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
265
+ 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
265
266
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
266
267
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
267
268
  More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
@@ -301,7 +302,7 @@ __author__ = "David Pagnon"
301
302
  __copyright__ = "Copyright 2023, Sports2D"
302
303
  __credits__ = ["David Pagnon"]
303
304
  __license__ = "BSD 3-Clause License"
304
- __version__ = "0.4.0"
305
+ __version__ = version("sports2d")
305
306
  __maintainer__ = "David Pagnon"
306
307
  __email__ = "contact@david-pagnon.com"
307
308
  __status__ = "Development"
@@ -2,6 +2,7 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  import sys
5
+ from importlib.metadata import version
5
6
 
6
- __version__ = "0.4.0"
7
+ __version__ = version("sports2d")
7
8
  VERSION = __version__
@@ -15,27 +15,14 @@
15
15
 
16
16
 
17
17
  ## INIT
18
- import re
19
18
  import sys
20
- import toml
19
+ from importlib.metadata import version
21
20
  import subprocess
22
21
  from pathlib import Path
23
- import itertools as it
24
22
  import logging
25
23
  from collections import defaultdict
26
- from anytree import PreOrderIter
27
-
28
24
  import numpy as np
29
- import pandas as pd
30
- from scipy import interpolate
31
25
  import imageio_ffmpeg as ffmpeg
32
- import cv2
33
- import c3d
34
-
35
- import matplotlib.pyplot as plt
36
- from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
37
- from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
38
- from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QVBoxLayout
39
26
 
40
27
 
41
28
  ## AUTHORSHIP INFORMATION
@@ -43,7 +30,7 @@ __author__ = "David Pagnon"
43
30
  __copyright__ = "Copyright 2023, Sports2D"
44
31
  __credits__ = ["David Pagnon"]
45
32
  __license__ = "BSD 3-Clause License"
46
- __version__ = "0.4.0"
33
+ __version__ = version("sports2d")
47
34
  __maintainer__ = "David Pagnon"
48
35
  __email__ = "contact@david-pagnon.com"
49
36
  __status__ = "Development"
@@ -118,58 +105,6 @@ colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255),
118
105
  (255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
119
106
  thickness = 1
120
107
 
121
- ## CLASSES
122
- class plotWindow():
123
- '''
124
- Display several figures in tabs
125
- Taken from https://github.com/superjax/plotWindow/blob/master/plotWindow.py
126
-
127
- USAGE:
128
- pw = plotWindow()
129
- f = plt.figure()
130
- plt.plot(x1, y1)
131
- pw.addPlot("1", f)
132
- f = plt.figure()
133
- plt.plot(x2, y2)
134
- pw.addPlot("2", f)
135
- '''
136
- def __init__(self, parent=None):
137
- self.app = QApplication.instance()
138
- if not self.app:
139
- self.app = QApplication(sys.argv)
140
- self.MainWindow = QMainWindow()
141
- self.MainWindow.setWindowTitle("Multitabs figure")
142
- self.canvases = []
143
- self.figure_handles = []
144
- self.toolbar_handles = []
145
- self.tab_handles = []
146
- self.current_window = -1
147
- self.tabs = QTabWidget()
148
- self.MainWindow.setCentralWidget(self.tabs)
149
- self.MainWindow.resize(1280, 720)
150
- self.MainWindow.show()
151
-
152
- def addPlot(self, title, figure):
153
- new_tab = QWidget()
154
- layout = QVBoxLayout()
155
- new_tab.setLayout(layout)
156
-
157
- figure.subplots_adjust(left=0.1, right=0.99, bottom=0.1, top=0.91, wspace=0.2, hspace=0.2)
158
- new_canvas = FigureCanvas(figure)
159
- new_toolbar = NavigationToolbar(new_canvas, new_tab)
160
-
161
- layout.addWidget(new_canvas)
162
- layout.addWidget(new_toolbar)
163
- self.tabs.addTab(new_tab, title)
164
-
165
- self.toolbar_handles.append(new_toolbar)
166
- self.canvases.append(new_canvas)
167
- self.figure_handles.append(figure)
168
- self.tab_handles.append(new_tab)
169
-
170
- def show(self):
171
- self.app.exec_()
172
-
173
108
  ## FUNCTIONS
174
109
  def to_dict(d):
175
110
  '''
@@ -180,164 +115,6 @@ def to_dict(d):
180
115
  return d
181
116
 
182
117
 
183
- def read_trc(trc_path):
184
- '''
185
- Read a TRC file and extract its contents.
186
-
187
- INPUTS:
188
- - trc_path (str): The path to the TRC file.
189
-
190
- OUTPUTS:
191
- - tuple: A tuple containing the Q coordinates, frames column, time column, marker names, and header.
192
- '''
193
-
194
- try:
195
- with open(trc_path, 'r') as trc_file:
196
- header = [next(trc_file) for _ in range(5)]
197
- markers = header[3].split('\t')[2::3]
198
- markers = [m.strip() for m in markers if m.strip()] # remove last \n character
199
-
200
- trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4, encoding='utf-8')
201
- frames_col, time_col = trc_df.iloc[:, 0], trc_df.iloc[:, 1]
202
- Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
203
- Q_coords = Q_coords.loc[:, ~Q_coords.columns.str.startswith('Unnamed')] # remove unnamed columns
204
- Q_coords.columns = np.array([[m,m,m] for m in markers]).ravel().tolist()
205
-
206
- return Q_coords, frames_col, time_col, markers, header
207
-
208
- except Exception as e:
209
- raise ValueError(f"Error reading TRC file at {trc_path}: {e}")
210
-
211
-
212
- def extract_trc_data(trc_path):
213
- '''
214
- Extract marker names and coordinates from a trc file.
215
-
216
- INPUTS:
217
- - trc_path: Path to the trc file
218
-
219
- OUTPUTS:
220
- - marker_names: List of marker names
221
- - marker_coords: Array of marker coordinates (n_frames, t+3*n_markers)
222
- '''
223
-
224
- # marker names
225
- with open(trc_path, 'r') as file:
226
- lines = file.readlines()
227
- marker_names_line = lines[3]
228
- marker_names = marker_names_line.strip().split('\t')[2::3]
229
-
230
- # time and marker coordinates
231
- trc_data_np = np.genfromtxt(trc_path, skip_header=5, delimiter = '\t')[:,1:]
232
-
233
- return marker_names, trc_data_np
234
-
235
-
236
- def create_c3d_file(c3d_path, marker_names, trc_data_np):
237
- '''
238
- Create a c3d file from the data extracted from a trc file.
239
-
240
- INPUTS:
241
- - c3d_path: Path to the c3d file
242
- - marker_names: List of marker names
243
- - trc_data_np: Array of marker coordinates (n_frames, t+3*n_markers)
244
-
245
- OUTPUTS:
246
- - c3d file
247
- '''
248
-
249
- # retrieve frame rate
250
- times = trc_data_np[:,0]
251
- frame_rate = round((len(times)-1) / (times[-1] - times[0]))
252
-
253
- # write c3d file
254
- writer = c3d.Writer(point_rate=frame_rate, analog_rate=0, point_scale=1.0, point_units='mm', gen_scale=-1.0)
255
- writer.set_point_labels(marker_names)
256
- writer.set_screen_axis(X='+Z', Y='+Y')
257
-
258
- for frame in trc_data_np:
259
- residuals = np.full((len(marker_names), 1), 0.0)
260
- cameras = np.zeros((len(marker_names), 1))
261
- coords = frame[1:].reshape(-1,3)*1000
262
- points = np.hstack((coords, residuals, cameras))
263
- writer.add_frames([(points, np.array([]))])
264
-
265
- writer.set_start_frame(0)
266
- writer._set_last_frame(len(trc_data_np)-1)
267
-
268
- with open(c3d_path, 'wb') as handle:
269
- writer.write(handle)
270
-
271
-
272
- def convert_to_c3d(trc_path):
273
- '''
274
- Make Visual3D compatible c3d files from a trc path
275
-
276
- INPUT:
277
- - trc_path: string, trc file to convert
278
-
279
- OUTPUT:
280
- - c3d file
281
- '''
282
-
283
- trc_path = str(trc_path)
284
- c3d_path = trc_path.replace('.trc', '.c3d')
285
- marker_names, trc_data_np = extract_trc_data(trc_path)
286
- create_c3d_file(c3d_path, marker_names, trc_data_np)
287
-
288
- return c3d_path
289
-
290
-
291
- def interpolate_zeros_nans(col, *args):
292
- '''
293
- Interpolate missing points (of value zero),
294
- unless more than N contiguous values are missing.
295
-
296
- INPUTS:
297
- - col: pandas column of coordinates
298
- - args[0] = N: max number of contiguous bad values, above which they won't be interpolated
299
- - args[1] = kind: 'linear', 'slinear', 'quadratic', 'cubic'. Default: 'cubic'
300
-
301
- OUTPUT:
302
- - col_interp: interpolated pandas column
303
- '''
304
-
305
- if len(args)==2:
306
- N, kind = args
307
- if len(args)==1:
308
- N = np.inf
309
- kind = args[0]
310
- if not args:
311
- N = np.inf
312
-
313
- # Interpolate nans
314
- mask = ~(np.isnan(col) | col.eq(0)) # true where nans or zeros
315
- idx_good = np.where(mask)[0]
316
- if len(idx_good) <= 4:
317
- return col
318
-
319
- if 'kind' not in locals(): # 'linear', 'slinear', 'quadratic', 'cubic'
320
- f_interp = interpolate.interp1d(idx_good, col[idx_good], kind="linear", bounds_error=False)
321
- else:
322
- f_interp = interpolate.interp1d(idx_good, col[idx_good], kind=kind, fill_value='extrapolate', bounds_error=False)
323
- col_interp = np.where(mask, col, f_interp(col.index)) #replace at false index with interpolated values
324
-
325
- # Reintroduce nans if length of sequence > N
326
- idx_notgood = np.where(~mask)[0]
327
- gaps = np.where(np.diff(idx_notgood) > 1)[0] + 1 # where the indices of true are not contiguous
328
- sequences = np.split(idx_notgood, gaps)
329
- if sequences[0].size>0:
330
- for seq in sequences:
331
- if len(seq) > N: # values to exclude from interpolation are set to false when they are too long
332
- col_interp[seq] = np.nan
333
-
334
- return col_interp
335
-
336
-
337
- def natural_sort_key(s):
338
- return [int(c) if c.isdigit() else c.lower() for c in re.split(r'(\d+)', s)]
339
-
340
-
341
118
  def make_homogeneous(list_of_arrays):
342
119
  '''
343
120
  Make a list of arrays (or a list of lists) homogeneous by padding with nans
@@ -428,374 +205,6 @@ def resample_video(vid_output_path, fps, desired_framerate):
428
205
  new_vid_path.rename(vid_output_path)
429
206
 
430
207
 
431
- def points_to_angles(points_list):
432
- '''
433
- If len(points_list)==2, computes clockwise angle of ab vector w.r.t. horizontal (e.g. RBigToe, RHeel)
434
- If len(points_list)==3, computes clockwise angle from a to c around b (e.g. Neck, Hip, Knee)
435
- If len(points_list)==4, computes clockwise angle between vectors ab and cd (e.g. Neck Hip, RKnee RHip)
436
-
437
- Points can be 2D or 3D.
438
- If parameters are float, returns a float between 0.0 and 360.0
439
- If parameters are arrays, returns an array of floats between 0.0 and 360.0
440
-
441
- INPUTS:
442
- - points_list: list of arrays of points
443
-
444
- OUTPUTS:
445
- - ang_deg: float or array of floats. The angle(s) in degrees.
446
- '''
447
-
448
- if len(points_list) < 2: # if not enough points, return None
449
- return np.nan
450
-
451
- points_array = np.array(points_list)
452
- dimensions = points_array.shape[-1]
453
-
454
- if len(points_list) == 2:
455
- vector_u = points_array[0] - points_array[1]
456
- if len(points_array.shape)==2:
457
- vector_v = np.array([1, 0, 0]) # Here vector X, could be any horizontal vector
458
- else:
459
- vector_v = np.array([[1, 0, 0],] * points_array.shape[1])
460
-
461
- elif len(points_list) == 3:
462
- vector_u = points_array[0] - points_array[1]
463
- vector_v = points_array[2] - points_array[1]
464
-
465
- elif len(points_list) == 4:
466
- vector_u = points_array[1] - points_array[0]
467
- vector_v = points_array[3] - points_array[2]
468
-
469
- else:
470
- return np.nan
471
-
472
- if dimensions == 2:
473
- vector_u = vector_u[:2]
474
- vector_v = vector_v[:2]
475
- ang = np.arctan2(vector_u[1], vector_u[0]) - np.arctan2(vector_v[1], vector_v[0])
476
- else:
477
- cross_product = np.cross(vector_u, vector_v)
478
- dot_product = np.einsum('ij,ij->i', vector_u, vector_v) # np.dot(vector_u, vector_v) # does not work with time series
479
- ang = np.arctan2(np.linalg.norm(cross_product, axis=1), dot_product)
480
-
481
- ang_deg = np.degrees(ang)
482
- # ang_deg = np.array(np.degrees(np.unwrap(ang*2)/2))
483
-
484
- return ang_deg
485
-
486
-
487
- def fixed_angles(points_list, ang_name):
488
- '''
489
- Add offset and multiplying factor to angles
490
-
491
- INPUTS:
492
- - points_list: list of arrays of points
493
- - ang_name: str. The name of the angle to consider.
494
-
495
- OUTPUTS:
496
- - ang: float. The angle in degrees.
497
- '''
498
-
499
- ang_params = angle_dict[ang_name]
500
- ang = points_to_angles(points_list)
501
- ang += ang_params[2]
502
- ang *= ang_params[3]
503
- if ang_name in ['pelvis', 'shoulders']:
504
- ang = np.where(ang>90, ang-180, ang)
505
- ang = np.where(ang<-90, ang+180, ang)
506
- else:
507
- ang = np.where(ang>180, ang-360, ang)
508
- ang = np.where(ang<-180, ang+360, ang)
509
-
510
- return ang
511
-
512
-
513
- def mean_angles(trc_data, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
514
- '''
515
- Compute the mean angle time series from 3D points for a given list of angles.
516
-
517
- INPUTS:
518
- - trc_data (DataFrame): The triangulated coordinates of the markers.
519
- - ang_to_consider (list): The list of angles to consider (requires angle_dict).
520
-
521
- OUTPUTS:
522
- - ang_mean: The mean angle time series.
523
- '''
524
-
525
- ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
526
-
527
- angs = []
528
- for ang_name in ang_to_consider:
529
- ang_params = angle_dict[ang_name]
530
- ang_mk = ang_params[0]
531
- if 'Neck' not in trc_data.columns:
532
- df_MidShoulder = pd.DataFrame((trc_data['RShoulder'].values + trc_data['LShoulder'].values) /2)
533
- df_MidShoulder.columns = ['Neck']*3
534
- trc_data = pd.concat((trc_data.reset_index(drop=True), df_MidShoulder), axis=1)
535
-
536
- pts_for_angles = []
537
- for pt in ang_mk:
538
- # pts_for_angles.append(trc_data.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
539
- pts_for_angles.append(trc_data[pt])
540
-
541
- ang = fixed_angles(pts_for_angles, ang_name)
542
- ang = np.abs(ang)
543
- angs.append(ang)
544
-
545
- ang_mean = np.mean(angs, axis=0)
546
-
547
- return ang_mean
548
-
549
-
550
- def add_neck_hip_coords(kpt_name, p_X, p_Y, p_scores, kpt_ids, kpt_names):
551
- '''
552
- Add neck (midshoulder) and hip (midhip) coordinates if neck and hip are not available
553
-
554
- INPUTS:
555
- - kpt_name: name of the keypoint to add (neck, hip)
556
- - p_X: list of x coordinates after flipping if needed
557
- - p_Y: list of y coordinates
558
- - p_scores: list of confidence scores
559
- - kpt_ids: list of keypoint ids (see skeletons.py)
560
- - kpt_names: list of keypoint names (see skeletons.py)
561
-
562
- OUTPUTS:
563
- - p_X: list of x coordinates with added missing coordinate
564
- - p_Y: list of y coordinates with added missing coordinate
565
- - p_scores: list of confidence scores with added missing score
566
- '''
567
-
568
- names, ids = kpt_names.copy(), kpt_ids.copy()
569
- names.append(kpt_name)
570
- ids.append(len(p_X))
571
- if kpt_name == 'Neck':
572
- mid_X = (np.abs(p_X[ids[names.index('LShoulder')]]) + np.abs(p_X[ids[names.index('RShoulder')]])) /2
573
- mid_Y = (p_Y[ids[names.index('LShoulder')]] + p_Y[ids[names.index('RShoulder')]])/2
574
- mid_score = (p_scores[ids[names.index('LShoulder')]] + p_scores[ids[names.index('RShoulder')]])/2
575
- elif kpt_name == 'Hip':
576
- mid_X = (np.abs(p_X[ids[names.index('LHip')]]) + np.abs(p_X[ids[names.index('RHip')]]) ) /2
577
- mid_Y = (p_Y[ids[names.index('LHip')]] + p_Y[ids[names.index('RHip')]])/2
578
- mid_score = (p_scores[ids[names.index('LHip')]] + p_scores[ids[names.index('RHip')]])/2
579
- else:
580
- raise ValueError("kpt_name must be 'Neck' or 'Hip'")
581
- p_X = np.append(p_X, mid_X)
582
- p_Y = np.append(p_Y, mid_Y)
583
- p_scores = np.append(p_scores, mid_score)
584
-
585
- return p_X, p_Y, p_scores
586
-
587
-
588
- def best_coords_for_measurements(Q_coords, keypoints_names, beginning_frames_to_remove_percent=0.2, end_frames_to_remove_percent=0.2, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
589
- '''
590
- Compute the best coordinates for measurements, after removing:
591
- - 20% fastest frames (may be outliers)
592
- - frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
593
- - frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
594
-
595
- INPUTS:
596
- - Q_coords: pd.DataFrame. The XYZ coordinates of each marker
597
- - keypoints_names: list. The list of marker names
598
- - beginning_frames_to_remove_percent: float
599
- - end_frames_to_remove_percent: float
600
- - fastest_frames_to_remove_percent: float
601
- - close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
602
- - large_hip_knee_angles: int
603
- - trimmed_extrema_percent
604
-
605
- OUTPUT:
606
- - Q_coords_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
607
- '''
608
-
609
- # Add MidShoulder column
610
- df_MidShoulder = pd.DataFrame((Q_coords['RShoulder'].values + Q_coords['LShoulder'].values) /2)
611
- df_MidShoulder.columns = ['MidShoulder']*3
612
- Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_MidShoulder), axis=1)
613
-
614
- # Add Hip column if not present
615
- n_markers_init = len(keypoints_names)
616
- if 'Hip' not in keypoints_names:
617
- df_Hip = pd.DataFrame((Q_coords['RHip'].values + Q_coords['LHip'].values) /2)
618
- df_Hip.columns = ['Hip']*3
619
- Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_Hip), axis=1)
620
- n_markers = len(keypoints_names)
621
-
622
- # Removing first and last frames
623
- # Q_coords = Q_coords.iloc[int(len(Q_coords) * beginning_frames_to_remove_percent):int(len(Q_coords) * (1-end_frames_to_remove_percent))]
624
-
625
- # Using 80% slowest frames
626
- sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
627
- sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
628
- if len(sum_speeds)==0:
629
- logging.warning('All frames have speed close to zero. Make sure the person is moving and correctly detected, or change close_to_zero_speed to a lower value. Not restricting the speeds to be above any threshold.')
630
- Q_coords_low_speeds = Q_coords
631
- else:
632
- min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
633
- Q_coords_low_speeds = Q_coords.iloc[min_speed_indices].reset_index(drop=True)
634
-
635
- # Only keep frames with hip and knee flexion angles below 45%
636
- # (if more than 50 of them, else take 50 smallest values)
637
- try:
638
- ang_mean = mean_angles(Q_coords_low_speeds, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
639
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds[ang_mean < large_hip_knee_angles]
640
- if len(Q_coords_low_speeds_low_angles) < 50:
641
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
642
- except:
643
- logging.warning(f"At least one among the RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder markers is missing for computing the knee and hip angles. Not restricting these angles to be below {large_hip_knee_angles}°.")
644
-
645
- if n_markers_init < n_markers:
646
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds_low_angles.iloc[:,:-3]
647
-
648
- return Q_coords_low_speeds_low_angles
649
-
650
-
651
- def compute_height(trc_data, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
652
- '''
653
- Compute the height of the person from the trc data.
654
-
655
- INPUTS:
656
- - trc_data: pd.DataFrame. The XYZ coordinates of each marker
657
- - keypoints_names: list. The list of marker names
658
- - fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
659
- - close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
660
- - large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
661
- - trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
662
-
663
- OUTPUT:
664
- - height: float. The estimated height of the person
665
- '''
666
-
667
- # Retrieve most reliable coordinates, adding MidShoulder and Hip columns if not present
668
- trc_data_low_speeds_low_angles = best_coords_for_measurements(trc_data, keypoints_names,
669
- fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
670
-
671
- # Automatically compute the height of the person
672
- feet_pairs = [['RHeel', 'RAnkle'], ['LHeel', 'LAnkle']]
673
- try:
674
- rfoot, lfoot = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in feet_pairs]
675
- except:
676
- rfoot, lfoot = 0.10, 0.10
677
- logging.warning('The Heel marker is missing from your model. Considering Foot to Heel size as 10 cm.')
678
-
679
- ankle_to_shoulder_pairs = [['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
680
- ['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
681
- try:
682
- rshank, rfemur, rback, lshank, lfemur, lback = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in ankle_to_shoulder_pairs]
683
- except:
684
- logging.error('At least one of the following markers is missing for computing the height of the person:\
685
- RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\n\
686
- Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
687
- raise ValueError('At least one of the following markers is missing for computing the height of the person:\
688
- RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\
689
- Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
690
-
691
- try:
692
- head_pair = [['MidShoulder', 'Head']]
693
- head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]
694
- except:
695
- head_pair = [['MidShoulder', 'Nose']]
696
- head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]\
697
- *1.33
698
- logging.warning('The Head marker is missing from your model. Considering Neck to Head size as 1.33 times Neck to MidShoulder size.')
699
-
700
- heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
701
-
702
- # Remove the 20% most extreme values
703
- height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
704
-
705
- return height
706
-
707
-
708
- def euclidean_distance(q1, q2):
709
- '''
710
- Euclidean distance between 2 points (N-dim).
711
-
712
- INPUTS:
713
- - q1: list of N_dimensional coordinates of point
714
- or list of N points of N_dimensional coordinates
715
- - q2: idem
716
-
717
- OUTPUTS:
718
- - euc_dist: float. Euclidian distance between q1 and q2
719
- '''
720
-
721
- q1 = np.array(q1)
722
- q2 = np.array(q2)
723
- dist = q2 - q1
724
- if np.isnan(dist).all():
725
- dist = np.empty_like(dist)
726
- dist[...] = np.inf
727
-
728
- if len(dist.shape)==1:
729
- euc_dist = np.sqrt(np.nansum( [d**2 for d in dist]))
730
- else:
731
- euc_dist = np.sqrt(np.nansum( [d**2 for d in dist], axis=1))
732
-
733
- return euc_dist
734
-
735
-
736
- def trimmed_mean(arr, trimmed_extrema_percent=0.5):
737
- '''
738
- Trimmed mean calculation for an array.
739
-
740
- INPUTS:
741
- - arr (np.array): The input array.
742
- - trimmed_extrema_percent (float): The percentage of values to be trimmed from both ends.
743
-
744
- OUTPUTS:
745
- - float: The trimmed mean of the array.
746
- '''
747
-
748
- # Sort the array
749
- sorted_arr = np.sort(arr)
750
-
751
- # Determine the indices for the 25th and 75th percentiles (if trimmed_percent = 0.5)
752
- lower_idx = int(len(sorted_arr) * (trimmed_extrema_percent/2))
753
- upper_idx = int(len(sorted_arr) * (1 - trimmed_extrema_percent/2))
754
-
755
- # Slice the array to exclude the 25% lowest and highest values
756
- trimmed_arr = sorted_arr[lower_idx:upper_idx]
757
-
758
- # Return the mean of the remaining values
759
- return np.mean(trimmed_arr)
760
-
761
-
762
- def retrieve_calib_params(calib_file):
763
- '''
764
- Compute projection matrices from toml calibration file.
765
-
766
- INPUT:
767
- - calib_file: calibration .toml file.
768
-
769
- OUTPUT:
770
- - S: (h,w) vectors as list of 2x1 arrays
771
- - K: intrinsic matrices as list of 3x3 arrays
772
- - dist: distortion vectors as list of 4x1 arrays
773
- - inv_K: inverse intrinsic matrices as list of 3x3 arrays
774
- - optim_K: intrinsic matrices for undistorting points as list of 3x3 arrays
775
- - R: rotation rodrigue vectors as list of 3x1 arrays
776
- - T: translation vectors as list of 3x1 arrays
777
- '''
778
-
779
- calib = toml.load(calib_file)
780
-
781
- cal_keys = [c for c in calib.keys()
782
- if c not in ['metadata', 'capture_volume', 'charuco', 'checkerboard']
783
- and isinstance(calib[c],dict)]
784
- S, K, dist, optim_K, inv_K, R, R_mat, T = [], [], [], [], [], [], [], []
785
- for c, cam in enumerate(cal_keys):
786
- S.append(np.array(calib[cam]['size']))
787
- K.append(np.array(calib[cam]['matrix']))
788
- dist.append(np.array(calib[cam]['distortions']))
789
- optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0])
790
- inv_K.append(np.linalg.inv(K[c]))
791
- R.append(np.array(calib[cam]['rotation']))
792
- R_mat.append(cv2.Rodrigues(R[c])[0])
793
- T.append(np.array(calib[cam]['translation']))
794
- calib_params_dict = {'S': S, 'K': K, 'dist': dist, 'inv_K': inv_K, 'optim_K': optim_K, 'R': R, 'R_mat': R_mat, 'T': T}
795
-
796
- return calib_params_dict
797
-
798
-
799
208
  def write_calibration(calib_params, toml_path):
800
209
  '''
801
210
  Write calibration file from calibration parameters
@@ -815,349 +224,3 @@ def write_calibration(calib_params, toml_path):
815
224
  cal_f.write(cam_str + name_str + size_str + mat_str + dist_str + rot_str + tran_str + fish_str)
816
225
  meta = '[metadata]\nadjusted = false\nerror = 0.0\n'
817
226
  cal_f.write(meta)
818
-
819
-
820
- def pad_shape(arr, target_len, fill_value=np.nan):
821
- '''
822
- Pads an array to the target length with specified fill values
823
-
824
- INPUTS:
825
- - arr: Input array to be padded.
826
- - target_len: The target length of the first dimension after padding.
827
- - fill_value: The value to use for padding (default: np.nan).
828
-
829
- OUTPUTS:
830
- - Padded array with shape (target_len, ...) matching the input dimensions.
831
- '''
832
-
833
- if len(arr) < target_len:
834
- pad_shape = (target_len - len(arr),) + arr.shape[1:]
835
- padding = np.full(pad_shape, fill_value)
836
- return np.concatenate((arr, padding))
837
-
838
- return arr
839
-
840
-
841
- def min_with_single_indices(L, T):
842
- '''
843
- Let L be a list (size s) with T associated tuple indices (size s).
844
- Select the smallest values of L, considering that
845
- the next smallest value cannot have the same numbers
846
- in the associated tuple as any of the previous ones.
847
-
848
- Example:
849
- L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ]
850
- T = list(it.product(range(2),range(3)))
851
- = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)]
852
-
853
- - 1st smallest value: 3 with tuple (2,3), index 11
854
- - 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]:
855
- 20 with tuple (0,0), index 0
856
- - 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]:
857
- 23 with tuple (1,1), index 5
858
-
859
- INPUTS:
860
- - L: list (size s)
861
- - T: T associated tuple indices (size s)
862
-
863
- OUTPUTS:
864
- - minL: list of smallest values of L, considering constraints on tuple indices
865
- - argminL: list of indices of smallest values of L (indices of best combinations)
866
- - T_minL: list of tuples associated with smallest values of L
867
- '''
868
-
869
- minL = [np.nanmin(L)]
870
- argminL = [np.nanargmin(L)]
871
- T_minL = [T[argminL[0]]]
872
-
873
- mask_tokeep = np.array([True for t in T])
874
- i=0
875
- while mask_tokeep.any()==True:
876
- mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T])
877
- if mask_tokeep.any()==True:
878
- indicesL_tokeep = np.where(mask_tokeep)[0]
879
- minL += [np.nanmin(np.array(L)[indicesL_tokeep]) if not np.isnan(np.array(L)[indicesL_tokeep]).all() else np.nan]
880
- argminL += [indicesL_tokeep[np.nanargmin(np.array(L)[indicesL_tokeep])] if not np.isnan(minL[-1]) else indicesL_tokeep[0]]
881
- T_minL += (T[argminL[i+1]],)
882
- i+=1
883
-
884
- return np.array(minL), np.array(argminL), np.array(T_minL)
885
-
886
-
887
- def sort_people_sports2d(keyptpre, keypt, scores=None):
888
- '''
889
- Associate persons across frames (Sports2D method)
890
- Persons' indices are sometimes swapped when changing frame
891
- A person is associated to another in the next frame when they are at a small distance
892
-
893
- N.B.: Requires min_with_single_indices and euclidian_distance function (see common.py)
894
-
895
- INPUTS:
896
- - keyptpre: (K, L, M) array of 2D coordinates for K persons in the previous frame, L keypoints, M 2D coordinates
897
- - keypt: idem keyptpre, for current frame
898
- - score: (K, L) array of confidence scores for K persons, L keypoints (optional)
899
-
900
- OUTPUTS:
901
- - sorted_prev_keypoints: array with reordered persons with values of previous frame if current is empty
902
- - sorted_keypoints: array with reordered persons --> if scores is not None
903
- - sorted_scores: array with reordered scores --> if scores is not None
904
- - associated_tuples: list of tuples with correspondences between persons across frames --> if scores is None (for Pose2Sim.triangulation())
905
- '''
906
-
907
- # Generate possible person correspondences across frames
908
- max_len = max(len(keyptpre), len(keypt))
909
- keyptpre = pad_shape(keyptpre, max_len, fill_value=np.nan)
910
- keypt = pad_shape(keypt, max_len, fill_value=np.nan)
911
- if scores is not None:
912
- scores = pad_shape(scores, max_len, fill_value=np.nan)
913
-
914
- # Compute distance between persons from one frame to another
915
- personsIDs_comb = sorted(list(it.product(range(len(keyptpre)), range(len(keypt)))))
916
- frame_by_frame_dist = [euclidean_distance(keyptpre[comb[0]],keypt[comb[1]]) for comb in personsIDs_comb]
917
- frame_by_frame_dist = np.mean(frame_by_frame_dist, axis=1)
918
-
919
- # Sort correspondences by distance
920
- _, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
921
-
922
- # Associate points to same index across frames, nan if no correspondence
923
- sorted_keypoints = []
924
- for i in range(len(keyptpre)):
925
- id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
926
- if len(id_in_old) > 0: sorted_keypoints += [keypt[id_in_old[0]]]
927
- else: sorted_keypoints += [keypt[i]]
928
- sorted_keypoints = np.array(sorted_keypoints)
929
-
930
- if scores is not None:
931
- sorted_scores = []
932
- for i in range(len(keyptpre)):
933
- id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
934
- if len(id_in_old) > 0: sorted_scores += [scores[id_in_old[0]]]
935
- else: sorted_scores += [scores[i]]
936
- sorted_scores = np.array(sorted_scores)
937
-
938
- # Keep track of previous values even when missing for more than one frame
939
- sorted_prev_keypoints = np.where(np.isnan(sorted_keypoints) & ~np.isnan(keyptpre), keyptpre, sorted_keypoints)
940
-
941
- if scores is not None:
942
- return sorted_prev_keypoints, sorted_keypoints, sorted_scores
943
- else: # For Pose2Sim.triangulation()
944
- return sorted_keypoints, associated_tuples
945
-
946
-
947
- def sort_people_rtmlib(pose_tracker, keypoints, scores):
948
- '''
949
- Associate persons across frames (RTMLib method)
950
-
951
- INPUTS:
952
- - pose_tracker: PoseTracker. The initialized RTMLib pose tracker object
953
- - keypoints: array of shape K, L, M with K the number of detected persons,
954
- L the number of detected keypoints, M their 2D coordinates
955
- - scores: array of shape K, L with K the number of detected persons,
956
- L the confidence of detected keypoints
957
-
958
- OUTPUT:
959
- - sorted_keypoints: array with reordered persons
960
- - sorted_scores: array with reordered scores
961
- '''
962
-
963
- try:
964
- desired_size = max(pose_tracker.track_ids_last_frame)+1
965
- sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
966
- sorted_keypoints[pose_tracker.track_ids_last_frame] = keypoints[:len(pose_tracker.track_ids_last_frame), :, :]
967
- sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
968
- sorted_scores[pose_tracker.track_ids_last_frame] = scores[:len(pose_tracker.track_ids_last_frame), :]
969
- except:
970
- sorted_keypoints, sorted_scores = keypoints, scores
971
-
972
- return sorted_keypoints, sorted_scores
973
-
974
-
975
- def sort_people_deepsort(keypoints, scores, deepsort_tracker, frame,frame_count):
976
- '''
977
- Associate persons across frames (DeepSort method)
978
-
979
- INPUTS:
980
- - keypoints: array of shape K, L, M with K the number of detected persons,
981
- L the number of detected keypoints, M their 2D coordinates
982
- - scores: array of shape K, L with K the number of detected persons,
983
- L the confidence of detected keypoints
984
- - deepsort_tracker: The initialized DeepSort tracker object
985
- - frame: np.array. The current image opened with cv2.imread
986
-
987
- OUTPUT:
988
- - sorted_keypoints: array with reordered persons
989
- - sorted_scores: array with reordered scores
990
- '''
991
-
992
- try:
993
- # Compute bboxes from keypoints and create detections (bboxes, scores, class_ids)
994
- bboxes_ltwh = bbox_ltwh_compute(keypoints, padding=20)
995
- bbox_scores = np.mean(scores, axis=1)
996
- class_ids = np.array(['person']*len(bboxes_ltwh))
997
- detections = list(zip(bboxes_ltwh, bbox_scores, class_ids))
998
-
999
- # Estimates the tracks and retrieve indexes of the original detections
1000
- det_ids = [i for i in range(len(detections))]
1001
- tracks = deepsort_tracker.update_tracks(detections, frame=frame, others=det_ids)
1002
- track_ids_frame, orig_det_ids = [], []
1003
- for track in tracks:
1004
- if not track.is_confirmed():
1005
- continue
1006
- track_ids_frame.append(int(track.track_id)-1) # ID of people
1007
- orig_det_ids.append(track.get_det_supplementary()) # ID of detections
1008
-
1009
- # Correspondence between person IDs and original detection IDs
1010
- desired_size = max(track_ids_frame) + 1
1011
- sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
1012
- sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
1013
- for i,v in enumerate(track_ids_frame):
1014
- if orig_det_ids[i] is not None:
1015
- sorted_keypoints[v] = keypoints[orig_det_ids[i]]
1016
- sorted_scores[v] = scores[orig_det_ids[i]]
1017
-
1018
- except Exception as e:
1019
- sorted_keypoints, sorted_scores = keypoints, scores
1020
- if frame_count > deepsort_tracker.tracker.n_init:
1021
- logging.warning(f"Tracking error: {e}. Sorting persons with DeepSort method failed for this frame.")
1022
-
1023
- return sorted_keypoints, sorted_scores
1024
-
1025
-
1026
- def bbox_ltwh_compute(keypoints, padding=0):
1027
- '''
1028
- Compute bounding boxes in (x_min, y_min, width, height) format
1029
- Optionally add padding to the bounding boxes
1030
- as a percentage of the bounding box size (+padding% horizontally, +padding/2% vertically)
1031
-
1032
- INPUTS:
1033
- - keypoints: array of shape K, L, M with K the number of detected persons,
1034
- L the number of detected keypoints, M their 2D coordinates
1035
- - padding: int. The padding to add to the bounding boxes, in perceptage
1036
- '''
1037
-
1038
- x_coords = keypoints[:, :, 0]
1039
- y_coords = keypoints[:, :, 1]
1040
-
1041
- x_min, x_max = np.min(x_coords, axis=1), np.max(x_coords, axis=1)
1042
- y_min, y_max = np.min(y_coords, axis=1), np.max(y_coords, axis=1)
1043
- width = x_max - x_min
1044
- height = y_max - y_min
1045
-
1046
- if padding > 0:
1047
- x_min = x_min - width*padding/100
1048
- y_min = y_min - height/2*padding/100
1049
- width = width + 2*width*padding/100
1050
- height = height + height*padding/100
1051
-
1052
- bbox_ltwh = np.stack((x_min, y_min, width, height), axis=1)
1053
-
1054
- return bbox_ltwh
1055
-
1056
-
1057
- def draw_bounding_box(img, X, Y, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)], fontSize=0.3, thickness=1):
1058
- '''
1059
- Draw bounding boxes and person ID around list of lists of X and Y coordinates.
1060
- Bounding boxes have a different color for each person.
1061
-
1062
- INPUTS:
1063
- - img: opencv image
1064
- - X: list of list of x coordinates
1065
- - Y: list of list of y coordinates
1066
- - colors: list of colors to cycle through
1067
-
1068
- OUTPUT:
1069
- - img: image with rectangles and person IDs
1070
- '''
1071
-
1072
- color_cycle = it.cycle(colors)
1073
-
1074
- for i,(x,y) in enumerate(zip(X,Y)):
1075
- color = next(color_cycle)
1076
- if not np.isnan(x).all():
1077
- x_min, y_min = np.nanmin(x).astype(int), np.nanmin(y).astype(int)
1078
- x_max, y_max = np.nanmax(x).astype(int), np.nanmax(y).astype(int)
1079
- if x_min < 0: x_min = 0
1080
- if x_max > img.shape[1]: x_max = img.shape[1]
1081
- if y_min < 0: y_min = 0
1082
- if y_max > img.shape[0]: y_max = img.shape[0]
1083
-
1084
- # Draw rectangles
1085
- cv2.rectangle(img, (x_min-25, y_min-25), (x_max+25, y_max+25), color, thickness)
1086
-
1087
- # Write person ID
1088
- cv2.putText(img, str(i), (x_min-30, y_min-30), cv2.FONT_HERSHEY_SIMPLEX, fontSize, color, 2, cv2.LINE_AA)
1089
-
1090
- return img
1091
-
1092
-
1093
- def draw_skel(img, X, Y, model):
1094
- '''
1095
- Draws keypoints and skeleton for each person.
1096
- Skeletons have a different color for each person.
1097
-
1098
- INPUTS:
1099
- - img: opencv image
1100
- - X: list of list of x coordinates
1101
- - Y: list of list of y coordinates
1102
- - model: skeleton model (from skeletons.py)
1103
- - colors: list of colors to cycle through
1104
-
1105
- OUTPUT:
1106
- - img: image with keypoints and skeleton
1107
- '''
1108
-
1109
- # Get (unique) pairs between which to draw a line
1110
- id_pairs, name_pairs = [], []
1111
- for data_i in PreOrderIter(model.root, filter_=lambda node: node.is_leaf):
1112
- node_branch_ids = [node_i.id for node_i in data_i.path]
1113
- node_branch_names = [node_i.name for node_i in data_i.path]
1114
- id_pairs += [[node_branch_ids[i],node_branch_ids[i+1]] for i in range(len(node_branch_ids)-1)]
1115
- name_pairs += [[node_branch_names[i],node_branch_names[i+1]] for i in range(len(node_branch_names)-1)]
1116
- node_pairs = {tuple(name_pair): id_pair for (name_pair,id_pair) in zip(name_pairs,id_pairs)}
1117
-
1118
-
1119
- # Draw lines
1120
- for (x,y) in zip(X,Y):
1121
- if not np.isnan(x).all():
1122
- for names, ids in node_pairs.items():
1123
- if not None in ids and not (np.isnan(x[ids[0]]) or np.isnan(y[ids[0]]) or np.isnan(x[ids[1]]) or np.isnan(y[ids[1]])):
1124
- if any(n.startswith('R') for n in names) and not any(n.startswith('L') for n in names):
1125
- c = (255,128,0)
1126
- elif any(n.startswith('L') for n in names) and not any(n.startswith('R') for n in names):
1127
- c = (0,255,0)
1128
- else:
1129
- c = (51, 153, 255)
1130
- cv2.line(img, (int(x[ids[0]]), int(y[ids[0]])), (int(x[ids[1]]), int(y[ids[1]])), c, thickness)
1131
-
1132
- return img
1133
-
1134
-
1135
- def draw_keypts(img, X, Y, scores, cmap_str='RdYlGn'):
1136
- '''
1137
- Draws keypoints and skeleton for each person.
1138
- Keypoints' colors depend on their score.
1139
-
1140
- INPUTS:
1141
- - img: opencv image
1142
- - X: list of list of x coordinates
1143
- - Y: list of list of y coordinates
1144
- - scores: list of list of scores
1145
- - cmap_str: colormap name
1146
-
1147
- OUTPUT:
1148
- - img: image with keypoints and skeleton
1149
- '''
1150
-
1151
- scores = np.where(np.isnan(scores), 0, scores)
1152
- # scores = (scores - 0.4) / (1-0.4) # to get a red color for scores lower than 0.4
1153
- scores = np.where(scores>0.99, 0.99, scores)
1154
- scores = np.where(scores<0, 0, scores)
1155
-
1156
- cmap = plt.get_cmap(cmap_str)
1157
- for (x,y,s) in zip(X,Y,scores):
1158
- c_k = np.array(cmap(s))[:,:-1]*255
1159
- [cv2.circle(img, (int(x[i]), int(y[i])), thickness+4, c_k[i][::-1], -1)
1160
- for i in range(len(x))
1161
- if not (np.isnan(x[i]) or np.isnan(y[i]))]
1162
-
1163
- return img
@@ -19,6 +19,7 @@
19
19
 
20
20
 
21
21
  ## INIT
22
+ from importlib.metadata import version
22
23
  import numpy as np
23
24
  from scipy import signal
24
25
  from scipy.ndimage import gaussian_filter1d
@@ -30,7 +31,7 @@ __author__ = "David Pagnon"
30
31
  __copyright__ = "Copyright 2021, Pose2Sim"
31
32
  __credits__ = ["David Pagnon"]
32
33
  __license__ = "BSD 3-Clause License"
33
- __version__ = "0.4.0"
34
+ __version__ = version("sports2d")
34
35
  __maintainer__ = "David Pagnon"
35
36
  __email__ = "contact@david-pagnon.com"
36
37
  __status__ = "Development"
@@ -13,6 +13,7 @@
13
13
 
14
14
 
15
15
  ## INIT
16
+ from importlib.metadata import version
16
17
  import toml
17
18
  import subprocess
18
19
  from pathlib import Path
@@ -23,7 +24,7 @@ __author__ = "David Pagnon"
23
24
  __copyright__ = "Copyright 2023, Sports2D"
24
25
  __credits__ = ["David Pagnon"]
25
26
  __license__ = "BSD 3-Clause License"
26
- __version__ = "0.4.0"
27
+ __version__ = version("sports2d")
27
28
  __maintainer__ = "David Pagnon"
28
29
  __email__ = "contact@david-pagnon.com"
29
30
  __status__ = "Development"
@@ -58,12 +59,14 @@ def test_workflow():
58
59
  demo_cmd = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False"]
59
60
  subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
60
61
 
61
- # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, time range and slowmo factor
62
+ # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
62
63
  demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
63
64
  "--to_meters", "False",
64
65
  "--multiperson", "False",
65
66
  "--mode", "lightweight", "--det_frequency", "50",
66
- "--time_range", "1.2", "2.7", "--slowmo_factor", "4"]
67
+ "--slowmo_factor", "4",
68
+ "--filter_type", "gaussian",
69
+ "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
67
70
  subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
68
71
 
69
72
  # With a time range, inverse kinematics, marker augmentation, body pose_model and custom RTMO mode
@@ -71,9 +74,8 @@ def test_workflow():
71
74
  "--time_range", "1.2", "2.7",
72
75
  "--do_ik", "True", "--use_augmentation", "True",
73
76
  "--px_to_m_from_person_id", "1", "--px_to_m_person_height", "1.65",
74
- "--visible_side", "left", "front", "--participant_mass", "55.0", "67.0",
75
- "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
76
- subprocess.run(demo_cmd3, check=True, capture_output=True, text=True)
77
+ "--visible_side", "front", "auto", "--participant_mass", "55.0", "67.0"]
78
+ subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
77
79
 
78
80
  # From config file
79
81
  cli_config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
Sports2D/__init__.py CHANGED
@@ -2,6 +2,7 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  import sys
5
+ from importlib.metadata import version
5
6
 
6
- __version__ = "0.4.0"
7
+ __version__ = version("sports2d")
7
8
  VERSION = __version__
Sports2D/process.py CHANGED
@@ -58,6 +58,7 @@ import json
58
58
  import ast
59
59
  import shutil
60
60
  import os
61
+ from importlib.metadata import version
61
62
  from functools import partial
62
63
  from datetime import datetime
63
64
  import itertools as it
@@ -72,11 +73,11 @@ import matplotlib as mpl
72
73
  import matplotlib.pyplot as plt
73
74
  from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
74
75
  from deep_sort_realtime.deepsort_tracker import DeepSort
75
- import opensim as osim
76
76
 
77
77
  from Sports2D.Utilities import filter
78
78
  from Sports2D.Utilities.common import *
79
79
  from Sports2D.Utilities.skeletons import *
80
+ from Pose2Sim.common import *
80
81
 
81
82
  DEFAULT_MASS = 70
82
83
  DEFAULT_HEIGHT = 1.7
@@ -86,7 +87,7 @@ __author__ = "David Pagnon, HunMin Kim"
86
87
  __copyright__ = "Copyright 2023, Sports2D"
87
88
  __credits__ = ["David Pagnon"]
88
89
  __license__ = "BSD 3-Clause License"
89
- __version__ = "0.4.0"
90
+ __version__ = version("sports2d")
90
91
  __maintainer__ = "David Pagnon"
91
92
  __email__ = "contact@david-pagnon.com"
92
93
  __status__ = "Development"
@@ -625,7 +626,7 @@ def trc_data_from_XYZtime(X, Y, Z, time):
625
626
  '''
626
627
 
627
628
  trc_data = pd.concat([pd.concat([X.iloc[:,kpt], Y.iloc[:,kpt], Z.iloc[:,kpt]], axis=1) for kpt in range(len(X.columns))], axis=1)
628
- trc_data.insert(0, 't', time)
629
+ trc_data.insert(0, 'time', time)
629
630
 
630
631
  return trc_data
631
632
 
@@ -928,6 +929,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
928
929
  px_to_m_from_person_id = int(config_dict.get('project').get('px_to_m_from_person_id'))
929
930
  px_to_m_person_height_m = config_dict.get('project').get('px_to_m_person_height')
930
931
  visible_side = config_dict.get('project').get('visible_side')
932
+ if isinstance(visible_side, str): visible_side = [visible_side]
931
933
  # Pose from file
932
934
  load_trc_px = config_dict.get('project').get('load_trc_px')
933
935
  if load_trc_px == '': load_trc_px = None
@@ -1040,7 +1042,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1040
1042
  close_to_zero_speed_px = config_dict.get('kinematics').get('close_to_zero_speed_px')
1041
1043
  close_to_zero_speed_m = config_dict.get('kinematics').get('close_to_zero_speed_m')
1042
1044
  if do_ik:
1043
- from Pose2Sim.markerAugmentation import augment_markers_all
1045
+ if use_augmentation:
1046
+ from Pose2Sim.markerAugmentation import augment_markers_all
1044
1047
  from Pose2Sim.kinematics import kinematics_all
1045
1048
  # Create a Pose2Sim dictionary and fill in missing keys
1046
1049
  recursivedict = lambda: defaultdict(recursivedict)
@@ -1127,12 +1130,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1127
1130
  logging.error(f'\n{load_trc_px} file needs to be in px, not in meters.')
1128
1131
  logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc_px}.')
1129
1132
  # Load pose file in px
1130
- Q_coords, _, _, keypoints_names, _ = read_trc(load_trc_px)
1133
+ Q_coords, _, time_col, keypoints_names, _ = read_trc(load_trc_px)
1134
+
1131
1135
  keypoints_ids = [i for i in range(len(keypoints_names))]
1132
1136
  keypoints_all, scores_all = load_pose_file(Q_coords)
1133
1137
  for pre, _, node in RenderTree(pose_model):
1134
1138
  if node.name in keypoints_names:
1135
1139
  node.id = keypoints_names.index(node.name)
1140
+ if time_range:
1141
+ frame_range = [abs(time_col - time_range[0]).idxmin(), abs(time_col - time_range[1]).idxmin()+1]
1142
+ else:
1143
+ frame_range = [0, len(Q_coords)]
1144
+ frame_iterator = tqdm(range(*frame_range))
1136
1145
 
1137
1146
  else:
1138
1147
  # Retrieve keypoint names from model
@@ -1336,7 +1345,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1336
1345
  all_frames_scores = make_homogeneous(all_frames_scores)
1337
1346
 
1338
1347
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1339
- all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count+1), name='time')
1348
+ if not load_trc_px:
1349
+ all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count-frame_range[0]+1), name='time')
1350
+ else:
1351
+ all_frames_time = time_col
1340
1352
  if not multiperson:
1341
1353
  px_to_m_from_person_id = get_personID_with_highest_scores(all_frames_scores)
1342
1354
  detected_persons = [px_to_m_from_person_id]
@@ -1475,7 +1487,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1475
1487
  if not np.array(trc_data[i].iloc[:,1:] ==0).all():
1476
1488
  # Automatically determine visible side
1477
1489
  visible_side_i = visible_side[i] if len(visible_side)>i else 'auto' # set to 'auto' if list too short
1478
-
1479
1490
  # Set to 'front' if slope of X values between [-5,5]
1480
1491
  if visible_side_i == 'auto':
1481
1492
  try:
@@ -1512,7 +1523,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1512
1523
  pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
1513
1524
  make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
1514
1525
  if make_c3d:
1515
- c3d_path = convert_to_c3d(pose_path_person_m_i)
1526
+ c3d_path = convert_to_c3d(str(pose_path_person_m_i))
1516
1527
  logging.info(f'Pose in meters saved to {pose_path_person_m_i.resolve()}. {"Also saved in c3d format." if make_c3d else ""}')
1517
1528
 
1518
1529
 
@@ -1643,12 +1654,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1643
1654
 
1644
1655
  # OpenSim inverse kinematics (and optional marker augmentation)
1645
1656
  if do_ik or use_augmentation:
1657
+ import opensim as osim
1646
1658
  logging.info('\nPost-processing angles (with inverse kinematics):')
1647
1659
  if not to_meters:
1648
1660
  logging.warning('Skipping marker augmentation and inverse kinematics as to_meters was set to False.')
1649
1661
  else:
1650
1662
  # move all trc files containing _m_ string to pose3d_dir
1651
- for trc_file in output_dir.glob('*_m_*.trc'):
1663
+ if not load_trc_px:
1664
+ trc_list = output_dir.glob('*_m_*.trc')
1665
+ else:
1666
+ trc_list = [pose_path_person_m_i]
1667
+ for trc_file in trc_list:
1652
1668
  if (pose3d_dir/trc_file.name).exists():
1653
1669
  os.remove(pose3d_dir/trc_file.name)
1654
1670
  shutil.move(trc_file, pose3d_dir)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.7.0
3
+ Version: 0.7.3
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -40,6 +40,7 @@ Requires-Dist: tqdm
40
40
  Requires-Dist: imageio_ffmpeg
41
41
  Requires-Dist: deep-sort-realtime
42
42
  Requires-Dist: Pose2Sim
43
+ Dynamic: license-file
43
44
 
44
45
 
45
46
  [![Continuous integration](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml/badge.svg?branch=main)](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml)
@@ -81,7 +82,10 @@ Works on any smartphone!**\
81
82
 
82
83
  </br>
83
84
 
84
- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e
85
+
86
+ https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
87
+
88
+ <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
85
89
 
86
90
  `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
87
91
  `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
@@ -465,7 +469,7 @@ sports2d --help
465
469
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
466
470
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
467
471
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
468
- 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
472
+ 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
469
473
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
470
474
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
471
475
  More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
@@ -0,0 +1,16 @@
1
+ Sports2D/Sports2D.py,sha256=AiH2D2IJ9odxqvPkhDQSv6Kyj1Bn6wNFtlmGB7YbHC0,29770
2
+ Sports2D/__init__.py,sha256=BuUkPEdItxlkeqz4dmoiPwZLkgAfABJK3KWQ1ujTGwE,153
3
+ Sports2D/process.py,sha256=4OFI2BlfMQ0Vm1g59HLOzBRAmEiXkawa-f5AVPsYXp0,88043
4
+ Sports2D/Demo/Config_demo.toml,sha256=TeIaqpbnPDO2jyOOH0IUAMRxiqdpoeHN_ZDE3P5Nrfo,13877
5
+ Sports2D/Demo/demo.mp4,sha256=2aZkFxhWR7ESMEtXCT8MGA83p2jmoU2sp1ylQfO3gDk,3968304
6
+ Sports2D/Utilities/__init__.py,sha256=BuUkPEdItxlkeqz4dmoiPwZLkgAfABJK3KWQ1ujTGwE,153
7
+ Sports2D/Utilities/common.py,sha256=eq-UR8ZJLLRwpXtN4drurmuypuTuxsiiU2ZYd-R3Zqw,11196
8
+ Sports2D/Utilities/filter.py,sha256=rfZcqofjllKI_5ovZTKEAmyjOZpB_PzbAJ0P874T8Ak,4973
9
+ Sports2D/Utilities/skeletons.py,sha256=WObRPHpCj5Q2WpspzFRy1gvAX-EZD9WyA9K-kqL4YRo,40076
10
+ Sports2D/Utilities/tests.py,sha256=gUIyOxujSTvyzHkOmuexIBqd3BApLcshPBcXuNfGCZ0,3624
11
+ sports2d-0.7.3.dist-info/licenses/LICENSE,sha256=f4qe3nE0Y7ltJho5w-xAR0jI5PUox5Xl-MsYiY7ZRM8,1521
12
+ sports2d-0.7.3.dist-info/METADATA,sha256=6aje5qwCEgLaFZch4gHya4-Bh-5SwsNAPp5ZBM6bW1M,38631
13
+ sports2d-0.7.3.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
14
+ sports2d-0.7.3.dist-info/entry_points.txt,sha256=h2CJTuydtNf8JyaLoWxWl5HTSIxx5Ra_FSiSGQsf7Sk,52
15
+ sports2d-0.7.3.dist-info/top_level.txt,sha256=DoURf9UDB8lQ_9lMUPQMQqhXCvWPFFjJco9NzPlHJ6I,9
16
+ sports2d-0.7.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.2)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,16 +0,0 @@
1
- Sports2D/Sports2D.py,sha256=eWOz-7HQiwRu7Xl0_bPTlg9meOW635x197WL9QrKfoU,29719
2
- Sports2D/__init__.py,sha256=TyCP7Uuuy6CNklhPf8W84MbYoO1_-1dxowSYAJyk_OI,102
3
- Sports2D/process.py,sha256=nuNOwNJCF8dduUOOuoOYxjDpO2J5T9dEjrLYPLuuz5Q,87391
4
- Sports2D/Demo/Config_demo.toml,sha256=S7cBtdob9zxA6deicPY1ZEQicTYeaByet5gSvRmkG00,13854
5
- Sports2D/Demo/demo.mp4,sha256=2aZkFxhWR7ESMEtXCT8MGA83p2jmoU2sp1ylQfO3gDk,3968304
6
- Sports2D/Utilities/__init__.py,sha256=TyCP7Uuuy6CNklhPf8W84MbYoO1_-1dxowSYAJyk_OI,102
7
- Sports2D/Utilities/common.py,sha256=OKyjBuXoZK0O34vuGeXzVrWpsyx6DI219L-yuS-iQTU,48254
8
- Sports2D/Utilities/filter.py,sha256=8mVefMjDzxmh9a30eNtIrUuK_mUKoOJ2Nr-OzcQKkKM,4922
9
- Sports2D/Utilities/skeletons.py,sha256=WObRPHpCj5Q2WpspzFRy1gvAX-EZD9WyA9K-kqL4YRo,40076
10
- Sports2D/Utilities/tests.py,sha256=mE05fRL2hGGI0VD4kb_GmLsucCvyVPfOxohUK3I96RE,3516
11
- sports2d-0.7.0.dist-info/LICENSE,sha256=f4qe3nE0Y7ltJho5w-xAR0jI5PUox5Xl-MsYiY7ZRM8,1521
12
- sports2d-0.7.0.dist-info/METADATA,sha256=ywXZgpFzd2T2LTD8TmSDtIMv9uJQfAH3RpUgRAwMY68,38518
13
- sports2d-0.7.0.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
14
- sports2d-0.7.0.dist-info/entry_points.txt,sha256=h2CJTuydtNf8JyaLoWxWl5HTSIxx5Ra_FSiSGQsf7Sk,52
15
- sports2d-0.7.0.dist-info/top_level.txt,sha256=DoURf9UDB8lQ_9lMUPQMQqhXCvWPFFjJco9NzPlHJ6I,9
16
- sports2d-0.7.0.dist-info/RECORD,,