sports2d 0.7.2__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,27 +15,13 @@
15
15
 
16
16
 
17
17
  ## INIT
18
- import re
19
- import sys
20
- import toml
18
+ from importlib.metadata import version
21
19
  import subprocess
22
20
  from pathlib import Path
23
- import itertools as it
24
21
  import logging
25
22
  from collections import defaultdict
26
- from anytree import PreOrderIter
27
-
28
23
  import numpy as np
29
- import pandas as pd
30
- from scipy import interpolate
31
24
  import imageio_ffmpeg as ffmpeg
32
- import cv2
33
- import c3d
34
-
35
- import matplotlib.pyplot as plt
36
- from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
37
- from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
38
- from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QVBoxLayout
39
25
 
40
26
 
41
27
  ## AUTHORSHIP INFORMATION
@@ -43,7 +29,7 @@ __author__ = "David Pagnon"
43
29
  __copyright__ = "Copyright 2023, Sports2D"
44
30
  __credits__ = ["David Pagnon"]
45
31
  __license__ = "BSD 3-Clause License"
46
- __version__ = "0.4.0"
32
+ __version__ = version("sports2d")
47
33
  __maintainer__ = "David Pagnon"
48
34
  __email__ = "contact@david-pagnon.com"
49
35
  __status__ = "Development"
@@ -118,58 +104,6 @@ colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255),
118
104
  (255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
119
105
  thickness = 1
120
106
 
121
- ## CLASSES
122
- class plotWindow():
123
- '''
124
- Display several figures in tabs
125
- Taken from https://github.com/superjax/plotWindow/blob/master/plotWindow.py
126
-
127
- USAGE:
128
- pw = plotWindow()
129
- f = plt.figure()
130
- plt.plot(x1, y1)
131
- pw.addPlot("1", f)
132
- f = plt.figure()
133
- plt.plot(x2, y2)
134
- pw.addPlot("2", f)
135
- '''
136
- def __init__(self, parent=None):
137
- self.app = QApplication.instance()
138
- if not self.app:
139
- self.app = QApplication(sys.argv)
140
- self.MainWindow = QMainWindow()
141
- self.MainWindow.setWindowTitle("Multitabs figure")
142
- self.canvases = []
143
- self.figure_handles = []
144
- self.toolbar_handles = []
145
- self.tab_handles = []
146
- self.current_window = -1
147
- self.tabs = QTabWidget()
148
- self.MainWindow.setCentralWidget(self.tabs)
149
- self.MainWindow.resize(1280, 720)
150
- self.MainWindow.show()
151
-
152
- def addPlot(self, title, figure):
153
- new_tab = QWidget()
154
- layout = QVBoxLayout()
155
- new_tab.setLayout(layout)
156
-
157
- figure.subplots_adjust(left=0.1, right=0.99, bottom=0.1, top=0.91, wspace=0.2, hspace=0.2)
158
- new_canvas = FigureCanvas(figure)
159
- new_toolbar = NavigationToolbar(new_canvas, new_tab)
160
-
161
- layout.addWidget(new_canvas)
162
- layout.addWidget(new_toolbar)
163
- self.tabs.addTab(new_tab, title)
164
-
165
- self.toolbar_handles.append(new_toolbar)
166
- self.canvases.append(new_canvas)
167
- self.figure_handles.append(figure)
168
- self.tab_handles.append(new_tab)
169
-
170
- def show(self):
171
- self.app.exec_()
172
-
173
107
  ## FUNCTIONS
174
108
  def to_dict(d):
175
109
  '''
@@ -180,164 +114,6 @@ def to_dict(d):
180
114
  return d
181
115
 
182
116
 
183
- def read_trc(trc_path):
184
- '''
185
- Read a TRC file and extract its contents.
186
-
187
- INPUTS:
188
- - trc_path (str): The path to the TRC file.
189
-
190
- OUTPUTS:
191
- - tuple: A tuple containing the Q coordinates, frames column, time column, marker names, and header.
192
- '''
193
-
194
- try:
195
- with open(trc_path, 'r') as trc_file:
196
- header = [next(trc_file) for _ in range(5)]
197
- markers = header[3].split('\t')[2::3]
198
- markers = [m.strip() for m in markers if m.strip()] # remove last \n character
199
-
200
- trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4, encoding='utf-8')
201
- frames_col, time_col = trc_df.iloc[:, 0], trc_df.iloc[:, 1]
202
- Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
203
- Q_coords = Q_coords.loc[:, ~Q_coords.columns.str.startswith('Unnamed')] # remove unnamed columns
204
- Q_coords.columns = np.array([[m,m,m] for m in markers]).ravel().tolist()
205
-
206
- return Q_coords, frames_col, time_col, markers, header
207
-
208
- except Exception as e:
209
- raise ValueError(f"Error reading TRC file at {trc_path}: {e}")
210
-
211
-
212
- def extract_trc_data(trc_path):
213
- '''
214
- Extract marker names and coordinates from a trc file.
215
-
216
- INPUTS:
217
- - trc_path: Path to the trc file
218
-
219
- OUTPUTS:
220
- - marker_names: List of marker names
221
- - marker_coords: Array of marker coordinates (n_frames, t+3*n_markers)
222
- '''
223
-
224
- # marker names
225
- with open(trc_path, 'r') as file:
226
- lines = file.readlines()
227
- marker_names_line = lines[3]
228
- marker_names = marker_names_line.strip().split('\t')[2::3]
229
-
230
- # time and marker coordinates
231
- trc_data_np = np.genfromtxt(trc_path, skip_header=5, delimiter = '\t')[:,1:]
232
-
233
- return marker_names, trc_data_np
234
-
235
-
236
- def create_c3d_file(c3d_path, marker_names, trc_data_np):
237
- '''
238
- Create a c3d file from the data extracted from a trc file.
239
-
240
- INPUTS:
241
- - c3d_path: Path to the c3d file
242
- - marker_names: List of marker names
243
- - trc_data_np: Array of marker coordinates (n_frames, t+3*n_markers)
244
-
245
- OUTPUTS:
246
- - c3d file
247
- '''
248
-
249
- # retrieve frame rate
250
- times = trc_data_np[:,0]
251
- frame_rate = round((len(times)-1) / (times[-1] - times[0]))
252
-
253
- # write c3d file
254
- writer = c3d.Writer(point_rate=frame_rate, analog_rate=0, point_scale=1.0, point_units='mm', gen_scale=-1.0)
255
- writer.set_point_labels(marker_names)
256
- writer.set_screen_axis(X='+Z', Y='+Y')
257
-
258
- for frame in trc_data_np:
259
- residuals = np.full((len(marker_names), 1), 0.0)
260
- cameras = np.zeros((len(marker_names), 1))
261
- coords = frame[1:].reshape(-1,3)*1000
262
- points = np.hstack((coords, residuals, cameras))
263
- writer.add_frames([(points, np.array([]))])
264
-
265
- writer.set_start_frame(0)
266
- writer._set_last_frame(len(trc_data_np)-1)
267
-
268
- with open(c3d_path, 'wb') as handle:
269
- writer.write(handle)
270
-
271
-
272
- def convert_to_c3d(trc_path):
273
- '''
274
- Make Visual3D compatible c3d files from a trc path
275
-
276
- INPUT:
277
- - trc_path: string, trc file to convert
278
-
279
- OUTPUT:
280
- - c3d file
281
- '''
282
-
283
- trc_path = str(trc_path)
284
- c3d_path = trc_path.replace('.trc', '.c3d')
285
- marker_names, trc_data_np = extract_trc_data(trc_path)
286
- create_c3d_file(c3d_path, marker_names, trc_data_np)
287
-
288
- return c3d_path
289
-
290
-
291
- def interpolate_zeros_nans(col, *args):
292
- '''
293
- Interpolate missing points (of value zero),
294
- unless more than N contiguous values are missing.
295
-
296
- INPUTS:
297
- - col: pandas column of coordinates
298
- - args[0] = N: max number of contiguous bad values, above which they won't be interpolated
299
- - args[1] = kind: 'linear', 'slinear', 'quadratic', 'cubic'. Default: 'cubic'
300
-
301
- OUTPUT:
302
- - col_interp: interpolated pandas column
303
- '''
304
-
305
- if len(args)==2:
306
- N, kind = args
307
- if len(args)==1:
308
- N = np.inf
309
- kind = args[0]
310
- if not args:
311
- N = np.inf
312
-
313
- # Interpolate nans
314
- mask = ~(np.isnan(col) | col.eq(0)) # true where nans or zeros
315
- idx_good = np.where(mask)[0]
316
- if len(idx_good) <= 4:
317
- return col
318
-
319
- if 'kind' not in locals(): # 'linear', 'slinear', 'quadratic', 'cubic'
320
- f_interp = interpolate.interp1d(idx_good, col[idx_good], kind="linear", bounds_error=False)
321
- else:
322
- f_interp = interpolate.interp1d(idx_good, col[idx_good], kind=kind, fill_value='extrapolate', bounds_error=False)
323
- col_interp = np.where(mask, col, f_interp(col.index)) #replace at false index with interpolated values
324
-
325
- # Reintroduce nans if length of sequence > N
326
- idx_notgood = np.where(~mask)[0]
327
- gaps = np.where(np.diff(idx_notgood) > 1)[0] + 1 # where the indices of true are not contiguous
328
- sequences = np.split(idx_notgood, gaps)
329
- if sequences[0].size>0:
330
- for seq in sequences:
331
- if len(seq) > N: # values to exclude from interpolation are set to false when they are too long
332
- col_interp[seq] = np.nan
333
-
334
- return col_interp
335
-
336
-
337
- def natural_sort_key(s):
338
- return [int(c) if c.isdigit() else c.lower() for c in re.split(r'(\d+)', s)]
339
-
340
-
341
117
  def make_homogeneous(list_of_arrays):
342
118
  '''
343
119
  Make a list of arrays (or a list of lists) homogeneous by padding with nans
@@ -428,374 +204,6 @@ def resample_video(vid_output_path, fps, desired_framerate):
428
204
  new_vid_path.rename(vid_output_path)
429
205
 
430
206
 
431
- def points_to_angles(points_list):
432
- '''
433
- If len(points_list)==2, computes clockwise angle of ab vector w.r.t. horizontal (e.g. RBigToe, RHeel)
434
- If len(points_list)==3, computes clockwise angle from a to c around b (e.g. Neck, Hip, Knee)
435
- If len(points_list)==4, computes clockwise angle between vectors ab and cd (e.g. Neck Hip, RKnee RHip)
436
-
437
- Points can be 2D or 3D.
438
- If parameters are float, returns a float between 0.0 and 360.0
439
- If parameters are arrays, returns an array of floats between 0.0 and 360.0
440
-
441
- INPUTS:
442
- - points_list: list of arrays of points
443
-
444
- OUTPUTS:
445
- - ang_deg: float or array of floats. The angle(s) in degrees.
446
- '''
447
-
448
- if len(points_list) < 2: # if not enough points, return None
449
- return np.nan
450
-
451
- points_array = np.array(points_list)
452
- dimensions = points_array.shape[-1]
453
-
454
- if len(points_list) == 2:
455
- vector_u = points_array[0] - points_array[1]
456
- if len(points_array.shape)==2:
457
- vector_v = np.array([1, 0, 0]) # Here vector X, could be any horizontal vector
458
- else:
459
- vector_v = np.array([[1, 0, 0],] * points_array.shape[1])
460
-
461
- elif len(points_list) == 3:
462
- vector_u = points_array[0] - points_array[1]
463
- vector_v = points_array[2] - points_array[1]
464
-
465
- elif len(points_list) == 4:
466
- vector_u = points_array[1] - points_array[0]
467
- vector_v = points_array[3] - points_array[2]
468
-
469
- else:
470
- return np.nan
471
-
472
- if dimensions == 2:
473
- vector_u = vector_u[:2]
474
- vector_v = vector_v[:2]
475
- ang = np.arctan2(vector_u[1], vector_u[0]) - np.arctan2(vector_v[1], vector_v[0])
476
- else:
477
- cross_product = np.cross(vector_u, vector_v)
478
- dot_product = np.einsum('ij,ij->i', vector_u, vector_v) # np.dot(vector_u, vector_v) # does not work with time series
479
- ang = np.arctan2(np.linalg.norm(cross_product, axis=1), dot_product)
480
-
481
- ang_deg = np.degrees(ang)
482
- # ang_deg = np.array(np.degrees(np.unwrap(ang*2)/2))
483
-
484
- return ang_deg
485
-
486
-
487
- def fixed_angles(points_list, ang_name):
488
- '''
489
- Add offset and multiplying factor to angles
490
-
491
- INPUTS:
492
- - points_list: list of arrays of points
493
- - ang_name: str. The name of the angle to consider.
494
-
495
- OUTPUTS:
496
- - ang: float. The angle in degrees.
497
- '''
498
-
499
- ang_params = angle_dict[ang_name]
500
- ang = points_to_angles(points_list)
501
- ang += ang_params[2]
502
- ang *= ang_params[3]
503
- if ang_name in ['pelvis', 'shoulders']:
504
- ang = np.where(ang>90, ang-180, ang)
505
- ang = np.where(ang<-90, ang+180, ang)
506
- else:
507
- ang = np.where(ang>180, ang-360, ang)
508
- ang = np.where(ang<-180, ang+360, ang)
509
-
510
- return ang
511
-
512
-
513
- def mean_angles(trc_data, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
514
- '''
515
- Compute the mean angle time series from 3D points for a given list of angles.
516
-
517
- INPUTS:
518
- - trc_data (DataFrame): The triangulated coordinates of the markers.
519
- - ang_to_consider (list): The list of angles to consider (requires angle_dict).
520
-
521
- OUTPUTS:
522
- - ang_mean: The mean angle time series.
523
- '''
524
-
525
- ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
526
-
527
- angs = []
528
- for ang_name in ang_to_consider:
529
- ang_params = angle_dict[ang_name]
530
- ang_mk = ang_params[0]
531
- if 'Neck' not in trc_data.columns:
532
- df_MidShoulder = pd.DataFrame((trc_data['RShoulder'].values + trc_data['LShoulder'].values) /2)
533
- df_MidShoulder.columns = ['Neck']*3
534
- trc_data = pd.concat((trc_data.reset_index(drop=True), df_MidShoulder), axis=1)
535
-
536
- pts_for_angles = []
537
- for pt in ang_mk:
538
- # pts_for_angles.append(trc_data.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
539
- pts_for_angles.append(trc_data[pt])
540
-
541
- ang = fixed_angles(pts_for_angles, ang_name)
542
- ang = np.abs(ang)
543
- angs.append(ang)
544
-
545
- ang_mean = np.mean(angs, axis=0)
546
-
547
- return ang_mean
548
-
549
-
550
- def add_neck_hip_coords(kpt_name, p_X, p_Y, p_scores, kpt_ids, kpt_names):
551
- '''
552
- Add neck (midshoulder) and hip (midhip) coordinates if neck and hip are not available
553
-
554
- INPUTS:
555
- - kpt_name: name of the keypoint to add (neck, hip)
556
- - p_X: list of x coordinates after flipping if needed
557
- - p_Y: list of y coordinates
558
- - p_scores: list of confidence scores
559
- - kpt_ids: list of keypoint ids (see skeletons.py)
560
- - kpt_names: list of keypoint names (see skeletons.py)
561
-
562
- OUTPUTS:
563
- - p_X: list of x coordinates with added missing coordinate
564
- - p_Y: list of y coordinates with added missing coordinate
565
- - p_scores: list of confidence scores with added missing score
566
- '''
567
-
568
- names, ids = kpt_names.copy(), kpt_ids.copy()
569
- names.append(kpt_name)
570
- ids.append(len(p_X))
571
- if kpt_name == 'Neck':
572
- mid_X = (np.abs(p_X[ids[names.index('LShoulder')]]) + np.abs(p_X[ids[names.index('RShoulder')]])) /2
573
- mid_Y = (p_Y[ids[names.index('LShoulder')]] + p_Y[ids[names.index('RShoulder')]])/2
574
- mid_score = (p_scores[ids[names.index('LShoulder')]] + p_scores[ids[names.index('RShoulder')]])/2
575
- elif kpt_name == 'Hip':
576
- mid_X = (np.abs(p_X[ids[names.index('LHip')]]) + np.abs(p_X[ids[names.index('RHip')]]) ) /2
577
- mid_Y = (p_Y[ids[names.index('LHip')]] + p_Y[ids[names.index('RHip')]])/2
578
- mid_score = (p_scores[ids[names.index('LHip')]] + p_scores[ids[names.index('RHip')]])/2
579
- else:
580
- raise ValueError("kpt_name must be 'Neck' or 'Hip'")
581
- p_X = np.append(p_X, mid_X)
582
- p_Y = np.append(p_Y, mid_Y)
583
- p_scores = np.append(p_scores, mid_score)
584
-
585
- return p_X, p_Y, p_scores
586
-
587
-
588
- def best_coords_for_measurements(Q_coords, keypoints_names, beginning_frames_to_remove_percent=0.2, end_frames_to_remove_percent=0.2, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
589
- '''
590
- Compute the best coordinates for measurements, after removing:
591
- - 20% fastest frames (may be outliers)
592
- - frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
593
- - frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
594
-
595
- INPUTS:
596
- - Q_coords: pd.DataFrame. The XYZ coordinates of each marker
597
- - keypoints_names: list. The list of marker names
598
- - beginning_frames_to_remove_percent: float
599
- - end_frames_to_remove_percent: float
600
- - fastest_frames_to_remove_percent: float
601
- - close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
602
- - large_hip_knee_angles: int
603
- - trimmed_extrema_percent
604
-
605
- OUTPUT:
606
- - Q_coords_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
607
- '''
608
-
609
- # Add MidShoulder column
610
- df_MidShoulder = pd.DataFrame((Q_coords['RShoulder'].values + Q_coords['LShoulder'].values) /2)
611
- df_MidShoulder.columns = ['MidShoulder']*3
612
- Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_MidShoulder), axis=1)
613
-
614
- # Add Hip column if not present
615
- n_markers_init = len(keypoints_names)
616
- if 'Hip' not in keypoints_names:
617
- df_Hip = pd.DataFrame((Q_coords['RHip'].values + Q_coords['LHip'].values) /2)
618
- df_Hip.columns = ['Hip']*3
619
- Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_Hip), axis=1)
620
- n_markers = len(keypoints_names)
621
-
622
- # Removing first and last frames
623
- # Q_coords = Q_coords.iloc[int(len(Q_coords) * beginning_frames_to_remove_percent):int(len(Q_coords) * (1-end_frames_to_remove_percent))]
624
-
625
- # Using 80% slowest frames
626
- sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
627
- sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
628
- if len(sum_speeds)==0:
629
- logging.warning('All frames have speed close to zero. Make sure the person is moving and correctly detected, or change close_to_zero_speed to a lower value. Not restricting the speeds to be above any threshold.')
630
- Q_coords_low_speeds = Q_coords
631
- else:
632
- min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
633
- Q_coords_low_speeds = Q_coords.iloc[min_speed_indices].reset_index(drop=True)
634
-
635
- # Only keep frames with hip and knee flexion angles below 45%
636
- # (if more than 50 of them, else take 50 smallest values)
637
- try:
638
- ang_mean = mean_angles(Q_coords_low_speeds, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
639
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds[ang_mean < large_hip_knee_angles]
640
- if len(Q_coords_low_speeds_low_angles) < 50:
641
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
642
- except:
643
- logging.warning(f"At least one among the RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder markers is missing for computing the knee and hip angles. Not restricting these angles to be below {large_hip_knee_angles}°.")
644
-
645
- if n_markers_init < n_markers:
646
- Q_coords_low_speeds_low_angles = Q_coords_low_speeds_low_angles.iloc[:,:-3]
647
-
648
- return Q_coords_low_speeds_low_angles
649
-
650
-
651
- def compute_height(trc_data, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
652
- '''
653
- Compute the height of the person from the trc data.
654
-
655
- INPUTS:
656
- - trc_data: pd.DataFrame. The XYZ coordinates of each marker
657
- - keypoints_names: list. The list of marker names
658
- - fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
659
- - close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
660
- - large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
661
- - trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
662
-
663
- OUTPUT:
664
- - height: float. The estimated height of the person
665
- '''
666
-
667
- # Retrieve most reliable coordinates, adding MidShoulder and Hip columns if not present
668
- trc_data_low_speeds_low_angles = best_coords_for_measurements(trc_data, keypoints_names,
669
- fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
670
-
671
- # Automatically compute the height of the person
672
- feet_pairs = [['RHeel', 'RAnkle'], ['LHeel', 'LAnkle']]
673
- try:
674
- rfoot, lfoot = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in feet_pairs]
675
- except:
676
- rfoot, lfoot = 0.10, 0.10
677
- logging.warning('The Heel marker is missing from your model. Considering Foot to Heel size as 10 cm.')
678
-
679
- ankle_to_shoulder_pairs = [['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
680
- ['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
681
- try:
682
- rshank, rfemur, rback, lshank, lfemur, lback = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in ankle_to_shoulder_pairs]
683
- except:
684
- logging.error('At least one of the following markers is missing for computing the height of the person:\
685
- RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\n\
686
- Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
687
- raise ValueError('At least one of the following markers is missing for computing the height of the person:\
688
- RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\
689
- Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
690
-
691
- try:
692
- head_pair = [['MidShoulder', 'Head']]
693
- head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]
694
- except:
695
- head_pair = [['MidShoulder', 'Nose']]
696
- head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]\
697
- *1.33
698
- logging.warning('The Head marker is missing from your model. Considering Neck to Head size as 1.33 times Neck to MidShoulder size.')
699
-
700
- heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
701
-
702
- # Remove the 20% most extreme values
703
- height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
704
-
705
- return height
706
-
707
-
708
- def euclidean_distance(q1, q2):
709
- '''
710
- Euclidean distance between 2 points (N-dim).
711
-
712
- INPUTS:
713
- - q1: list of N_dimensional coordinates of point
714
- or list of N points of N_dimensional coordinates
715
- - q2: idem
716
-
717
- OUTPUTS:
718
- - euc_dist: float. Euclidian distance between q1 and q2
719
- '''
720
-
721
- q1 = np.array(q1)
722
- q2 = np.array(q2)
723
- dist = q2 - q1
724
- if np.isnan(dist).all():
725
- dist = np.empty_like(dist)
726
- dist[...] = np.inf
727
-
728
- if len(dist.shape)==1:
729
- euc_dist = np.sqrt(np.nansum( [d**2 for d in dist]))
730
- else:
731
- euc_dist = np.sqrt(np.nansum( [d**2 for d in dist], axis=1))
732
-
733
- return euc_dist
734
-
735
-
736
- def trimmed_mean(arr, trimmed_extrema_percent=0.5):
737
- '''
738
- Trimmed mean calculation for an array.
739
-
740
- INPUTS:
741
- - arr (np.array): The input array.
742
- - trimmed_extrema_percent (float): The percentage of values to be trimmed from both ends.
743
-
744
- OUTPUTS:
745
- - float: The trimmed mean of the array.
746
- '''
747
-
748
- # Sort the array
749
- sorted_arr = np.sort(arr)
750
-
751
- # Determine the indices for the 25th and 75th percentiles (if trimmed_percent = 0.5)
752
- lower_idx = int(len(sorted_arr) * (trimmed_extrema_percent/2))
753
- upper_idx = int(len(sorted_arr) * (1 - trimmed_extrema_percent/2))
754
-
755
- # Slice the array to exclude the 25% lowest and highest values
756
- trimmed_arr = sorted_arr[lower_idx:upper_idx]
757
-
758
- # Return the mean of the remaining values
759
- return np.mean(trimmed_arr)
760
-
761
-
762
- def retrieve_calib_params(calib_file):
763
- '''
764
- Compute projection matrices from toml calibration file.
765
-
766
- INPUT:
767
- - calib_file: calibration .toml file.
768
-
769
- OUTPUT:
770
- - S: (h,w) vectors as list of 2x1 arrays
771
- - K: intrinsic matrices as list of 3x3 arrays
772
- - dist: distortion vectors as list of 4x1 arrays
773
- - inv_K: inverse intrinsic matrices as list of 3x3 arrays
774
- - optim_K: intrinsic matrices for undistorting points as list of 3x3 arrays
775
- - R: rotation rodrigue vectors as list of 3x1 arrays
776
- - T: translation vectors as list of 3x1 arrays
777
- '''
778
-
779
- calib = toml.load(calib_file)
780
-
781
- cal_keys = [c for c in calib.keys()
782
- if c not in ['metadata', 'capture_volume', 'charuco', 'checkerboard']
783
- and isinstance(calib[c],dict)]
784
- S, K, dist, optim_K, inv_K, R, R_mat, T = [], [], [], [], [], [], [], []
785
- for c, cam in enumerate(cal_keys):
786
- S.append(np.array(calib[cam]['size']))
787
- K.append(np.array(calib[cam]['matrix']))
788
- dist.append(np.array(calib[cam]['distortions']))
789
- optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0])
790
- inv_K.append(np.linalg.inv(K[c]))
791
- R.append(np.array(calib[cam]['rotation']))
792
- R_mat.append(cv2.Rodrigues(R[c])[0])
793
- T.append(np.array(calib[cam]['translation']))
794
- calib_params_dict = {'S': S, 'K': K, 'dist': dist, 'inv_K': inv_K, 'optim_K': optim_K, 'R': R, 'R_mat': R_mat, 'T': T}
795
-
796
- return calib_params_dict
797
-
798
-
799
207
  def write_calibration(calib_params, toml_path):
800
208
  '''
801
209
  Write calibration file from calibration parameters
@@ -815,349 +223,3 @@ def write_calibration(calib_params, toml_path):
815
223
  cal_f.write(cam_str + name_str + size_str + mat_str + dist_str + rot_str + tran_str + fish_str)
816
224
  meta = '[metadata]\nadjusted = false\nerror = 0.0\n'
817
225
  cal_f.write(meta)
818
-
819
-
820
- def pad_shape(arr, target_len, fill_value=np.nan):
821
- '''
822
- Pads an array to the target length with specified fill values
823
-
824
- INPUTS:
825
- - arr: Input array to be padded.
826
- - target_len: The target length of the first dimension after padding.
827
- - fill_value: The value to use for padding (default: np.nan).
828
-
829
- OUTPUTS:
830
- - Padded array with shape (target_len, ...) matching the input dimensions.
831
- '''
832
-
833
- if len(arr) < target_len:
834
- pad_shape = (target_len - len(arr),) + arr.shape[1:]
835
- padding = np.full(pad_shape, fill_value)
836
- return np.concatenate((arr, padding))
837
-
838
- return arr
839
-
840
-
841
- def min_with_single_indices(L, T):
842
- '''
843
- Let L be a list (size s) with T associated tuple indices (size s).
844
- Select the smallest values of L, considering that
845
- the next smallest value cannot have the same numbers
846
- in the associated tuple as any of the previous ones.
847
-
848
- Example:
849
- L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ]
850
- T = list(it.product(range(2),range(3)))
851
- = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)]
852
-
853
- - 1st smallest value: 3 with tuple (2,3), index 11
854
- - 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]:
855
- 20 with tuple (0,0), index 0
856
- - 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]:
857
- 23 with tuple (1,1), index 5
858
-
859
- INPUTS:
860
- - L: list (size s)
861
- - T: T associated tuple indices (size s)
862
-
863
- OUTPUTS:
864
- - minL: list of smallest values of L, considering constraints on tuple indices
865
- - argminL: list of indices of smallest values of L (indices of best combinations)
866
- - T_minL: list of tuples associated with smallest values of L
867
- '''
868
-
869
- minL = [np.nanmin(L)]
870
- argminL = [np.nanargmin(L)]
871
- T_minL = [T[argminL[0]]]
872
-
873
- mask_tokeep = np.array([True for t in T])
874
- i=0
875
- while mask_tokeep.any()==True:
876
- mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T])
877
- if mask_tokeep.any()==True:
878
- indicesL_tokeep = np.where(mask_tokeep)[0]
879
- minL += [np.nanmin(np.array(L)[indicesL_tokeep]) if not np.isnan(np.array(L)[indicesL_tokeep]).all() else np.nan]
880
- argminL += [indicesL_tokeep[np.nanargmin(np.array(L)[indicesL_tokeep])] if not np.isnan(minL[-1]) else indicesL_tokeep[0]]
881
- T_minL += (T[argminL[i+1]],)
882
- i+=1
883
-
884
- return np.array(minL), np.array(argminL), np.array(T_minL)
885
-
886
-
887
- def sort_people_sports2d(keyptpre, keypt, scores=None):
888
- '''
889
- Associate persons across frames (Sports2D method)
890
- Persons' indices are sometimes swapped when changing frame
891
- A person is associated to another in the next frame when they are at a small distance
892
-
893
- N.B.: Requires min_with_single_indices and euclidian_distance function (see common.py)
894
-
895
- INPUTS:
896
- - keyptpre: (K, L, M) array of 2D coordinates for K persons in the previous frame, L keypoints, M 2D coordinates
897
- - keypt: idem keyptpre, for current frame
898
- - score: (K, L) array of confidence scores for K persons, L keypoints (optional)
899
-
900
- OUTPUTS:
901
- - sorted_prev_keypoints: array with reordered persons with values of previous frame if current is empty
902
- - sorted_keypoints: array with reordered persons --> if scores is not None
903
- - sorted_scores: array with reordered scores --> if scores is not None
904
- - associated_tuples: list of tuples with correspondences between persons across frames --> if scores is None (for Pose2Sim.triangulation())
905
- '''
906
-
907
- # Generate possible person correspondences across frames
908
- max_len = max(len(keyptpre), len(keypt))
909
- keyptpre = pad_shape(keyptpre, max_len, fill_value=np.nan)
910
- keypt = pad_shape(keypt, max_len, fill_value=np.nan)
911
- if scores is not None:
912
- scores = pad_shape(scores, max_len, fill_value=np.nan)
913
-
914
- # Compute distance between persons from one frame to another
915
- personsIDs_comb = sorted(list(it.product(range(len(keyptpre)), range(len(keypt)))))
916
- frame_by_frame_dist = [euclidean_distance(keyptpre[comb[0]],keypt[comb[1]]) for comb in personsIDs_comb]
917
- frame_by_frame_dist = np.mean(frame_by_frame_dist, axis=1)
918
-
919
- # Sort correspondences by distance
920
- _, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
921
-
922
- # Associate points to same index across frames, nan if no correspondence
923
- sorted_keypoints = []
924
- for i in range(len(keyptpre)):
925
- id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
926
- if len(id_in_old) > 0: sorted_keypoints += [keypt[id_in_old[0]]]
927
- else: sorted_keypoints += [keypt[i]]
928
- sorted_keypoints = np.array(sorted_keypoints)
929
-
930
- if scores is not None:
931
- sorted_scores = []
932
- for i in range(len(keyptpre)):
933
- id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
934
- if len(id_in_old) > 0: sorted_scores += [scores[id_in_old[0]]]
935
- else: sorted_scores += [scores[i]]
936
- sorted_scores = np.array(sorted_scores)
937
-
938
- # Keep track of previous values even when missing for more than one frame
939
- sorted_prev_keypoints = np.where(np.isnan(sorted_keypoints) & ~np.isnan(keyptpre), keyptpre, sorted_keypoints)
940
-
941
- if scores is not None:
942
- return sorted_prev_keypoints, sorted_keypoints, sorted_scores
943
- else: # For Pose2Sim.triangulation()
944
- return sorted_keypoints, associated_tuples
945
-
946
-
947
- def sort_people_rtmlib(pose_tracker, keypoints, scores):
948
- '''
949
- Associate persons across frames (RTMLib method)
950
-
951
- INPUTS:
952
- - pose_tracker: PoseTracker. The initialized RTMLib pose tracker object
953
- - keypoints: array of shape K, L, M with K the number of detected persons,
954
- L the number of detected keypoints, M their 2D coordinates
955
- - scores: array of shape K, L with K the number of detected persons,
956
- L the confidence of detected keypoints
957
-
958
- OUTPUT:
959
- - sorted_keypoints: array with reordered persons
960
- - sorted_scores: array with reordered scores
961
- '''
962
-
963
- try:
964
- desired_size = max(pose_tracker.track_ids_last_frame)+1
965
- sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
966
- sorted_keypoints[pose_tracker.track_ids_last_frame] = keypoints[:len(pose_tracker.track_ids_last_frame), :, :]
967
- sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
968
- sorted_scores[pose_tracker.track_ids_last_frame] = scores[:len(pose_tracker.track_ids_last_frame), :]
969
- except:
970
- sorted_keypoints, sorted_scores = keypoints, scores
971
-
972
- return sorted_keypoints, sorted_scores
973
-
974
-
975
- def sort_people_deepsort(keypoints, scores, deepsort_tracker, frame,frame_count):
976
- '''
977
- Associate persons across frames (DeepSort method)
978
-
979
- INPUTS:
980
- - keypoints: array of shape K, L, M with K the number of detected persons,
981
- L the number of detected keypoints, M their 2D coordinates
982
- - scores: array of shape K, L with K the number of detected persons,
983
- L the confidence of detected keypoints
984
- - deepsort_tracker: The initialized DeepSort tracker object
985
- - frame: np.array. The current image opened with cv2.imread
986
-
987
- OUTPUT:
988
- - sorted_keypoints: array with reordered persons
989
- - sorted_scores: array with reordered scores
990
- '''
991
-
992
- try:
993
- # Compute bboxes from keypoints and create detections (bboxes, scores, class_ids)
994
- bboxes_ltwh = bbox_ltwh_compute(keypoints, padding=20)
995
- bbox_scores = np.mean(scores, axis=1)
996
- class_ids = np.array(['person']*len(bboxes_ltwh))
997
- detections = list(zip(bboxes_ltwh, bbox_scores, class_ids))
998
-
999
- # Estimates the tracks and retrieve indexes of the original detections
1000
- det_ids = [i for i in range(len(detections))]
1001
- tracks = deepsort_tracker.update_tracks(detections, frame=frame, others=det_ids)
1002
- track_ids_frame, orig_det_ids = [], []
1003
- for track in tracks:
1004
- if not track.is_confirmed():
1005
- continue
1006
- track_ids_frame.append(int(track.track_id)-1) # ID of people
1007
- orig_det_ids.append(track.get_det_supplementary()) # ID of detections
1008
-
1009
- # Correspondence between person IDs and original detection IDs
1010
- desired_size = max(track_ids_frame) + 1
1011
- sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
1012
- sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
1013
- for i,v in enumerate(track_ids_frame):
1014
- if orig_det_ids[i] is not None:
1015
- sorted_keypoints[v] = keypoints[orig_det_ids[i]]
1016
- sorted_scores[v] = scores[orig_det_ids[i]]
1017
-
1018
- except Exception as e:
1019
- sorted_keypoints, sorted_scores = keypoints, scores
1020
- if frame_count > deepsort_tracker.tracker.n_init:
1021
- logging.warning(f"Tracking error: {e}. Sorting persons with DeepSort method failed for this frame.")
1022
-
1023
- return sorted_keypoints, sorted_scores
1024
-
1025
-
1026
- def bbox_ltwh_compute(keypoints, padding=0):
1027
- '''
1028
- Compute bounding boxes in (x_min, y_min, width, height) format
1029
- Optionally add padding to the bounding boxes
1030
- as a percentage of the bounding box size (+padding% horizontally, +padding/2% vertically)
1031
-
1032
- INPUTS:
1033
- - keypoints: array of shape K, L, M with K the number of detected persons,
1034
- L the number of detected keypoints, M their 2D coordinates
1035
- - padding: int. The padding to add to the bounding boxes, in perceptage
1036
- '''
1037
-
1038
- x_coords = keypoints[:, :, 0]
1039
- y_coords = keypoints[:, :, 1]
1040
-
1041
- x_min, x_max = np.min(x_coords, axis=1), np.max(x_coords, axis=1)
1042
- y_min, y_max = np.min(y_coords, axis=1), np.max(y_coords, axis=1)
1043
- width = x_max - x_min
1044
- height = y_max - y_min
1045
-
1046
- if padding > 0:
1047
- x_min = x_min - width*padding/100
1048
- y_min = y_min - height/2*padding/100
1049
- width = width + 2*width*padding/100
1050
- height = height + height*padding/100
1051
-
1052
- bbox_ltwh = np.stack((x_min, y_min, width, height), axis=1)
1053
-
1054
- return bbox_ltwh
1055
-
1056
-
1057
- def draw_bounding_box(img, X, Y, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)], fontSize=0.3, thickness=1):
1058
- '''
1059
- Draw bounding boxes and person ID around list of lists of X and Y coordinates.
1060
- Bounding boxes have a different color for each person.
1061
-
1062
- INPUTS:
1063
- - img: opencv image
1064
- - X: list of list of x coordinates
1065
- - Y: list of list of y coordinates
1066
- - colors: list of colors to cycle through
1067
-
1068
- OUTPUT:
1069
- - img: image with rectangles and person IDs
1070
- '''
1071
-
1072
- color_cycle = it.cycle(colors)
1073
-
1074
- for i,(x,y) in enumerate(zip(X,Y)):
1075
- color = next(color_cycle)
1076
- if not np.isnan(x).all():
1077
- x_min, y_min = np.nanmin(x).astype(int), np.nanmin(y).astype(int)
1078
- x_max, y_max = np.nanmax(x).astype(int), np.nanmax(y).astype(int)
1079
- if x_min < 0: x_min = 0
1080
- if x_max > img.shape[1]: x_max = img.shape[1]
1081
- if y_min < 0: y_min = 0
1082
- if y_max > img.shape[0]: y_max = img.shape[0]
1083
-
1084
- # Draw rectangles
1085
- cv2.rectangle(img, (x_min-25, y_min-25), (x_max+25, y_max+25), color, thickness)
1086
-
1087
- # Write person ID
1088
- cv2.putText(img, str(i), (x_min-30, y_min-30), cv2.FONT_HERSHEY_SIMPLEX, fontSize, color, 2, cv2.LINE_AA)
1089
-
1090
- return img
1091
-
1092
-
1093
- def draw_skel(img, X, Y, model):
1094
- '''
1095
- Draws keypoints and skeleton for each person.
1096
- Skeletons have a different color for each person.
1097
-
1098
- INPUTS:
1099
- - img: opencv image
1100
- - X: list of list of x coordinates
1101
- - Y: list of list of y coordinates
1102
- - model: skeleton model (from skeletons.py)
1103
- - colors: list of colors to cycle through
1104
-
1105
- OUTPUT:
1106
- - img: image with keypoints and skeleton
1107
- '''
1108
-
1109
- # Get (unique) pairs between which to draw a line
1110
- id_pairs, name_pairs = [], []
1111
- for data_i in PreOrderIter(model.root, filter_=lambda node: node.is_leaf):
1112
- node_branch_ids = [node_i.id for node_i in data_i.path]
1113
- node_branch_names = [node_i.name for node_i in data_i.path]
1114
- id_pairs += [[node_branch_ids[i],node_branch_ids[i+1]] for i in range(len(node_branch_ids)-1)]
1115
- name_pairs += [[node_branch_names[i],node_branch_names[i+1]] for i in range(len(node_branch_names)-1)]
1116
- node_pairs = {tuple(name_pair): id_pair for (name_pair,id_pair) in zip(name_pairs,id_pairs)}
1117
-
1118
-
1119
- # Draw lines
1120
- for (x,y) in zip(X,Y):
1121
- if not np.isnan(x).all():
1122
- for names, ids in node_pairs.items():
1123
- if not None in ids and not (np.isnan(x[ids[0]]) or np.isnan(y[ids[0]]) or np.isnan(x[ids[1]]) or np.isnan(y[ids[1]])):
1124
- if any(n.startswith('R') for n in names) and not any(n.startswith('L') for n in names):
1125
- c = (255,128,0)
1126
- elif any(n.startswith('L') for n in names) and not any(n.startswith('R') for n in names):
1127
- c = (0,255,0)
1128
- else:
1129
- c = (51, 153, 255)
1130
- cv2.line(img, (int(x[ids[0]]), int(y[ids[0]])), (int(x[ids[1]]), int(y[ids[1]])), c, thickness)
1131
-
1132
- return img
1133
-
1134
-
1135
- def draw_keypts(img, X, Y, scores, cmap_str='RdYlGn'):
1136
- '''
1137
- Draws keypoints and skeleton for each person.
1138
- Keypoints' colors depend on their score.
1139
-
1140
- INPUTS:
1141
- - img: opencv image
1142
- - X: list of list of x coordinates
1143
- - Y: list of list of y coordinates
1144
- - scores: list of list of scores
1145
- - cmap_str: colormap name
1146
-
1147
- OUTPUT:
1148
- - img: image with keypoints and skeleton
1149
- '''
1150
-
1151
- scores = np.where(np.isnan(scores), 0, scores)
1152
- # scores = (scores - 0.4) / (1-0.4) # to get a red color for scores lower than 0.4
1153
- scores = np.where(scores>0.99, 0.99, scores)
1154
- scores = np.where(scores<0, 0, scores)
1155
-
1156
- cmap = plt.get_cmap(cmap_str)
1157
- for (x,y,s) in zip(X,Y,scores):
1158
- c_k = np.array(cmap(s))[:,:-1]*255
1159
- [cv2.circle(img, (int(x[i]), int(y[i])), thickness+4, c_k[i][::-1], -1)
1160
- for i in range(len(x))
1161
- if not (np.isnan(x[i]) or np.isnan(y[i]))]
1162
-
1163
- return img