sports2d 0.5.6__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,13 +20,17 @@ import sys
20
20
  import toml
21
21
  import subprocess
22
22
  from pathlib import Path
23
+ import itertools as it
23
24
  import logging
25
+ from anytree import PreOrderIter
24
26
 
25
27
  import numpy as np
28
+ import pandas as pd
26
29
  from scipy import interpolate
27
30
  import imageio_ffmpeg as ffmpeg
28
31
  import cv2
29
32
 
33
+ import matplotlib.pyplot as plt
30
34
  from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QVBoxLayout
31
35
  from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
32
36
  from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
@@ -43,6 +47,49 @@ __email__ = "contact@david-pagnon.com"
43
47
  __status__ = "Development"
44
48
 
45
49
 
50
+ ## CONSTANTS
51
+ angle_dict = { # lowercase!
52
+ # joint angles
53
+ 'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
54
+ 'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
55
+ 'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
56
+ 'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
57
+ 'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
58
+ 'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
59
+ # 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
60
+ # 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
61
+ 'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
62
+ 'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
63
+ 'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
64
+ 'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
65
+ 'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
66
+ 'left wrist': [['LElbow', 'LIndex', 'LWrist'], 'flexion', -180, 1],
67
+
68
+ # segment angles
69
+ 'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
70
+ 'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
71
+ 'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
72
+ 'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
73
+ 'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
74
+ 'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
75
+ 'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
76
+ 'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
77
+ 'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
78
+ 'head': [['Head', 'Neck'], 'horizontal', 0, -1],
79
+ 'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
80
+ 'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
81
+ 'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
82
+ 'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
83
+ 'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
84
+ 'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
85
+ }
86
+
87
+ colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
88
+ (125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
89
+ (255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
90
+ (255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
91
+ thickness = 1
92
+
46
93
  ## CLASSES
47
94
  class plotWindow():
48
95
  '''
@@ -96,6 +143,35 @@ class plotWindow():
96
143
  self.app.exec_()
97
144
 
98
145
  ## FUNCTIONS
146
+ def read_trc(trc_path):
147
+ '''
148
+ Read a TRC file and extract its contents.
149
+
150
+ INPUTS:
151
+ - trc_path (str): The path to the TRC file.
152
+
153
+ OUTPUTS:
154
+ - tuple: A tuple containing the Q coordinates, frames column, time column, marker names, and header.
155
+ '''
156
+
157
+ try:
158
+ with open(trc_path, 'r') as trc_file:
159
+ header = [next(trc_file) for _ in range(5)]
160
+ markers = header[3].split('\t')[2::3]
161
+ markers = [m.strip() for m in markers if m.strip()] # remove last \n character
162
+
163
+ trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4, encoding='utf-8')
164
+ frames_col, time_col = trc_df.iloc[:, 0], trc_df.iloc[:, 1]
165
+ Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
166
+ Q_coords = Q_coords.loc[:, ~Q_coords.columns.str.startswith('Unnamed')] # remove unnamed columns
167
+ Q_coords.columns = np.array([[m,m,m] for m in markers]).ravel().tolist()
168
+
169
+ return Q_coords, frames_col, time_col, markers, header
170
+
171
+ except Exception as e:
172
+ raise ValueError(f"Error reading TRC file at {trc_path}: {e}")
173
+
174
+
99
175
  def interpolate_zeros_nans(col, *args):
100
176
  '''
101
177
  Interpolate missing points (of value zero),
@@ -247,6 +323,10 @@ def points_to_angles(points_list):
247
323
  If parameters are arrays, returns an array of floats between 0.0 and 360.0
248
324
 
249
325
  INPUTS:
326
+ - points_list: list of arrays of points
327
+
328
+ OUTPUTS:
329
+ - ang_deg: float or array of floats. The angle(s) in degrees.
250
330
  '''
251
331
 
252
332
  if len(points_list) < 2: # if not enough points, return None
@@ -288,6 +368,222 @@ def points_to_angles(points_list):
288
368
  return ang_deg
289
369
 
290
370
 
371
+ def fixed_angles(points_list, ang_name):
372
+ '''
373
+ Add offset and multiplying factor to angles
374
+
375
+ INPUTS:
376
+ - points_list: list of arrays of points
377
+ - ang_name: str. The name of the angle to consider.
378
+
379
+ OUTPUTS:
380
+ - ang: float. The angle in degrees.
381
+ '''
382
+
383
+ ang_params = angle_dict[ang_name]
384
+ ang = points_to_angles(points_list)
385
+ ang += ang_params[2]
386
+ ang *= ang_params[3]
387
+ if ang_name in ['pelvis', 'shoulders']:
388
+ ang = np.where(ang>90, ang-180, ang)
389
+ ang = np.where(ang<-90, ang+180, ang)
390
+ else:
391
+ ang = np.where(ang>180, ang-360, ang)
392
+ ang = np.where(ang<-180, ang+360, ang)
393
+
394
+ return ang
395
+
396
+
397
+ def mean_angles(trc_data, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
398
+ '''
399
+ Compute the mean angle time series from 3D points for a given list of angles.
400
+
401
+ INPUTS:
402
+ - trc_data (DataFrame): The triangulated coordinates of the markers.
403
+ - ang_to_consider (list): The list of angles to consider (requires angle_dict).
404
+
405
+ OUTPUTS:
406
+ - ang_mean: The mean angle time series.
407
+ '''
408
+
409
+ ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
410
+
411
+ angs = []
412
+ for ang_name in ang_to_consider:
413
+ ang_params = angle_dict[ang_name]
414
+ ang_mk = ang_params[0]
415
+ if 'Neck' not in trc_data.columns:
416
+ df_MidShoulder = pd.DataFrame((trc_data['RShoulder'].values + trc_data['LShoulder'].values) /2)
417
+ df_MidShoulder.columns = ['Neck']*3
418
+ trc_data = pd.concat((trc_data.reset_index(drop=True), df_MidShoulder), axis=1)
419
+
420
+ pts_for_angles = []
421
+ for pt in ang_mk:
422
+ # pts_for_angles.append(trc_data.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
423
+ pts_for_angles.append(trc_data[pt])
424
+
425
+ ang = fixed_angles(pts_for_angles, ang_name)
426
+ ang = np.abs(ang)
427
+ angs.append(ang)
428
+
429
+ ang_mean = np.mean(angs, axis=0)
430
+
431
+ return ang_mean
432
+
433
+
434
+ def add_neck_hip_coords(kpt_name, p_X, p_Y, p_scores, kpt_ids, kpt_names):
435
+ '''
436
+ Add neck (midshoulder) and hip (midhip) coordinates if neck and hip are not available
437
+
438
+ INPUTS:
439
+ - kpt_name: name of the keypoint to add (neck, hip)
440
+ - p_X: list of x coordinates after flipping if needed
441
+ - p_Y: list of y coordinates
442
+ - p_scores: list of confidence scores
443
+ - kpt_ids: list of keypoint ids (see skeletons.py)
444
+ - kpt_names: list of keypoint names (see skeletons.py)
445
+
446
+ OUTPUTS:
447
+ - p_X: list of x coordinates with added missing coordinate
448
+ - p_Y: list of y coordinates with added missing coordinate
449
+ - p_scores: list of confidence scores with added missing score
450
+ '''
451
+
452
+ names, ids = kpt_names.copy(), kpt_ids.copy()
453
+ names.append(kpt_name)
454
+ ids.append(len(p_X))
455
+ if kpt_name == 'Neck':
456
+ mid_X = (np.abs(p_X[ids[names.index('LShoulder')]]) + np.abs(p_X[ids[names.index('RShoulder')]])) /2
457
+ mid_Y = (p_Y[ids[names.index('LShoulder')]] + p_Y[ids[names.index('RShoulder')]])/2
458
+ mid_score = (p_scores[ids[names.index('LShoulder')]] + p_scores[ids[names.index('RShoulder')]])/2
459
+ elif kpt_name == 'Hip':
460
+ mid_X = (np.abs(p_X[ids[names.index('LHip')]]) + np.abs(p_X[ids[names.index('RHip')]]) ) /2
461
+ mid_Y = (p_Y[ids[names.index('LHip')]] + p_Y[ids[names.index('RHip')]])/2
462
+ mid_score = (p_scores[ids[names.index('LHip')]] + p_scores[ids[names.index('RHip')]])/2
463
+ else:
464
+ raise ValueError("kpt_name must be 'Neck' or 'Hip'")
465
+ p_X = np.append(p_X, mid_X)
466
+ p_Y = np.append(p_Y, mid_Y)
467
+ p_scores = np.append(p_scores, mid_score)
468
+
469
+ return p_X, p_Y, p_scores
470
+
471
+
472
+ def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
473
+ '''
474
+ Compute the best coordinates for measurements, after removing:
475
+ - 20% fastest frames (may be outliers)
476
+ - frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
477
+ - frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
478
+
479
+ INPUTS:
480
+ - Q_coords: pd.DataFrame. The XYZ coordinates of each marker
481
+ - keypoints_names: list. The list of marker names
482
+ - fastest_frames_to_remove_percent: float
483
+ - close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
484
+ - large_hip_knee_angles: int
485
+ - trimmed_extrema_percent
486
+
487
+ OUTPUT:
488
+ - Q_coords_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
489
+ '''
490
+
491
+ # Add MidShoulder column
492
+ df_MidShoulder = pd.DataFrame((Q_coords['RShoulder'].values + Q_coords['LShoulder'].values) /2)
493
+ df_MidShoulder.columns = ['MidShoulder']*3
494
+ Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_MidShoulder), axis=1)
495
+
496
+ # Add Hip column if not present
497
+ n_markers_init = len(keypoints_names)
498
+ if 'Hip' not in keypoints_names:
499
+ df_Hip = pd.DataFrame((Q_coords['RHip'].values + Q_coords['LHip'].values) /2)
500
+ df_Hip.columns = ['Hip']*3
501
+ Q_coords = pd.concat((Q_coords.reset_index(drop=True), df_Hip), axis=1)
502
+ n_markers = len(keypoints_names)
503
+
504
+ # Using 80% slowest frames
505
+ sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
506
+ sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
507
+ if len(sum_speeds)==0:
508
+ logging.warning('All frames have speed close to zero. Make sure the person is moving and correctly detected, or change close_to_zero_speed to a lower value. Not restricting the speeds to be above any threshold.')
509
+ Q_coords_low_speeds = Q_coords
510
+ else:
511
+ min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
512
+ Q_coords_low_speeds = Q_coords.iloc[min_speed_indices].reset_index(drop=True)
513
+
514
+ # Only keep frames with hip and knee flexion angles below 45%
515
+ # (if more than 50 of them, else take 50 smallest values)
516
+ try:
517
+ ang_mean = mean_angles(Q_coords_low_speeds, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
518
+ Q_coords_low_speeds_low_angles = Q_coords_low_speeds[ang_mean < large_hip_knee_angles]
519
+ if len(Q_coords_low_speeds_low_angles) < 50:
520
+ Q_coords_low_speeds_low_angles = Q_coords_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
521
+ except:
522
+ logging.warning(f"At least one among the RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder markers is missing for computing the knee and hip angles. Not restricting these angles to be below {large_hip_knee_angles}°.")
523
+
524
+ if n_markers_init < n_markers:
525
+ Q_coords_low_speeds_low_angles = Q_coords_low_speeds_low_angles.iloc[:,:-3]
526
+
527
+ return Q_coords_low_speeds_low_angles
528
+
529
+
530
+ def compute_height(trc_data, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
531
+ '''
532
+ Compute the height of the person from the trc data.
533
+
534
+ INPUTS:
535
+ - trc_data: pd.DataFrame. The XYZ coordinates of each marker
536
+ - keypoints_names: list. The list of marker names
537
+ - fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
538
+ - close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
539
+ - large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
540
+ - trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
541
+
542
+ OUTPUT:
543
+ - height: float. The estimated height of the person
544
+ '''
545
+
546
+ # Retrieve most reliable coordinates, adding MidShoulder and Hip columns if not present
547
+ trc_data_low_speeds_low_angles = best_coords_for_measurements(trc_data, keypoints_names,
548
+ fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
549
+
550
+ # Automatically compute the height of the person
551
+ feet_pairs = [['RHeel', 'RAnkle'], ['LHeel', 'LAnkle']]
552
+ try:
553
+ rfoot, lfoot = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in feet_pairs]
554
+ except:
555
+ rfoot, lfoot = 0.10, 0.10
556
+ logging.warning('The Heel marker is missing from your model. Considering Foot to Heel size as 10 cm.')
557
+
558
+ ankle_to_shoulder_pairs = [['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
559
+ ['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
560
+ try:
561
+ rshank, rfemur, rback, lshank, lfemur, lback = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in ankle_to_shoulder_pairs]
562
+ except:
563
+ logging.error('At least one of the following markers is missing for computing the height of the person:\
564
+ RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\n\
565
+ Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
566
+ raise ValueError('At least one of the following markers is missing for computing the height of the person:\
567
+ RAnkle, RKnee, RHip, RShoulder, LAnkle, LKnee, LHip, LShoulder.\
568
+ Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
569
+
570
+ try:
571
+ head_pair = [['MidShoulder', 'Head']]
572
+ head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]
573
+ except:
574
+ head_pair = [['MidShoulder', 'Nose']]
575
+ head = [euclidean_distance(trc_data_low_speeds_low_angles[pair[0]],trc_data_low_speeds_low_angles[pair[1]]) for pair in head_pair][0]\
576
+ *1.33
577
+ logging.warning('The Head marker is missing from your model. Considering Neck to Head size as 1.33 times Neck to MidShoulder size.')
578
+
579
+ heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
580
+
581
+ # Remove the 20% most extreme values
582
+ height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
583
+
584
+ return height
585
+
586
+
291
587
  def euclidean_distance(q1, q2):
292
588
  '''
293
589
  Euclidean distance between 2 points (N-dim).
@@ -397,4 +693,350 @@ def write_calibration(calib_params, toml_path):
397
693
  fish_str = f'fisheye = false\n\n'
398
694
  cal_f.write(cam_str + name_str + size_str + mat_str + dist_str + rot_str + tran_str + fish_str)
399
695
  meta = '[metadata]\nadjusted = false\nerror = 0.0\n'
400
- cal_f.write(meta)
696
+ cal_f.write(meta)
697
+
698
+
699
+ def pad_shape(arr, target_len, fill_value=np.nan):
700
+ '''
701
+ Pads an array to the target length with specified fill values
702
+
703
+ INPUTS:
704
+ - arr: Input array to be padded.
705
+ - target_len: The target length of the first dimension after padding.
706
+ - fill_value: The value to use for padding (default: np.nan).
707
+
708
+ OUTPUTS:
709
+ - Padded array with shape (target_len, ...) matching the input dimensions.
710
+ '''
711
+
712
+ if len(arr) < target_len:
713
+ pad_shape = (target_len - len(arr),) + arr.shape[1:]
714
+ padding = np.full(pad_shape, fill_value)
715
+ return np.concatenate((arr, padding))
716
+
717
+ return arr
718
+
719
+
720
+ def min_with_single_indices(L, T):
721
+ '''
722
+ Let L be a list (size s) with T associated tuple indices (size s).
723
+ Select the smallest values of L, considering that
724
+ the next smallest value cannot have the same numbers
725
+ in the associated tuple as any of the previous ones.
726
+
727
+ Example:
728
+ L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ]
729
+ T = list(it.product(range(2),range(3)))
730
+ = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)]
731
+
732
+ - 1st smallest value: 3 with tuple (2,3), index 11
733
+ - 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]:
734
+ 20 with tuple (0,0), index 0
735
+ - 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]:
736
+ 23 with tuple (1,1), index 5
737
+
738
+ INPUTS:
739
+ - L: list (size s)
740
+ - T: T associated tuple indices (size s)
741
+
742
+ OUTPUTS:
743
+ - minL: list of smallest values of L, considering constraints on tuple indices
744
+ - argminL: list of indices of smallest values of L (indices of best combinations)
745
+ - T_minL: list of tuples associated with smallest values of L
746
+ '''
747
+
748
+ minL = [np.nanmin(L)]
749
+ argminL = [np.nanargmin(L)]
750
+ T_minL = [T[argminL[0]]]
751
+
752
+ mask_tokeep = np.array([True for t in T])
753
+ i=0
754
+ while mask_tokeep.any()==True:
755
+ mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T])
756
+ if mask_tokeep.any()==True:
757
+ indicesL_tokeep = np.where(mask_tokeep)[0]
758
+ minL += [np.nanmin(np.array(L)[indicesL_tokeep]) if not np.isnan(np.array(L)[indicesL_tokeep]).all() else np.nan]
759
+ argminL += [indicesL_tokeep[np.nanargmin(np.array(L)[indicesL_tokeep])] if not np.isnan(minL[-1]) else indicesL_tokeep[0]]
760
+ T_minL += (T[argminL[i+1]],)
761
+ i+=1
762
+
763
+ return np.array(minL), np.array(argminL), np.array(T_minL)
764
+
765
+
766
+ def sort_people_sports2d(keyptpre, keypt, scores=None):
767
+ '''
768
+ Associate persons across frames (Sports2D method)
769
+ Persons' indices are sometimes swapped when changing frame
770
+ A person is associated to another in the next frame when they are at a small distance
771
+
772
+ N.B.: Requires min_with_single_indices and euclidian_distance function (see common.py)
773
+
774
+ INPUTS:
775
+ - keyptpre: (K, L, M) array of 2D coordinates for K persons in the previous frame, L keypoints, M 2D coordinates
776
+ - keypt: idem keyptpre, for current frame
777
+ - score: (K, L) array of confidence scores for K persons, L keypoints (optional)
778
+
779
+ OUTPUTS:
780
+ - sorted_prev_keypoints: array with reordered persons with values of previous frame if current is empty
781
+ - sorted_keypoints: array with reordered persons --> if scores is not None
782
+ - sorted_scores: array with reordered scores --> if scores is not None
783
+ - associated_tuples: list of tuples with correspondences between persons across frames --> if scores is None (for Pose2Sim.triangulation())
784
+ '''
785
+
786
+ # Generate possible person correspondences across frames
787
+ max_len = max(len(keyptpre), len(keypt))
788
+ keyptpre = pad_shape(keyptpre, max_len, fill_value=np.nan)
789
+ keypt = pad_shape(keypt, max_len, fill_value=np.nan)
790
+ if scores is not None:
791
+ scores = pad_shape(scores, max_len, fill_value=np.nan)
792
+
793
+ # Compute distance between persons from one frame to another
794
+ personsIDs_comb = sorted(list(it.product(range(len(keyptpre)), range(len(keypt)))))
795
+ frame_by_frame_dist = [euclidean_distance(keyptpre[comb[0]],keypt[comb[1]]) for comb in personsIDs_comb]
796
+ frame_by_frame_dist = np.mean(frame_by_frame_dist, axis=1)
797
+
798
+ # Sort correspondences by distance
799
+ _, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
800
+
801
+ # Associate points to same index across frames, nan if no correspondence
802
+ sorted_keypoints = []
803
+ for i in range(len(keyptpre)):
804
+ id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
805
+ if len(id_in_old) > 0: sorted_keypoints += [keypt[id_in_old[0]]]
806
+ else: sorted_keypoints += [keypt[i]]
807
+ sorted_keypoints = np.array(sorted_keypoints)
808
+
809
+ if scores is not None:
810
+ sorted_scores = []
811
+ for i in range(len(keyptpre)):
812
+ id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
813
+ if len(id_in_old) > 0: sorted_scores += [scores[id_in_old[0]]]
814
+ else: sorted_scores += [scores[i]]
815
+ sorted_scores = np.array(sorted_scores)
816
+
817
+ # Keep track of previous values even when missing for more than one frame
818
+ sorted_prev_keypoints = np.where(np.isnan(sorted_keypoints) & ~np.isnan(keyptpre), keyptpre, sorted_keypoints)
819
+
820
+ if scores is not None:
821
+ return sorted_prev_keypoints, sorted_keypoints, sorted_scores
822
+ else: # For Pose2Sim.triangulation()
823
+ return sorted_keypoints, associated_tuples
824
+
825
+
826
+ def sort_people_rtmlib(pose_tracker, keypoints, scores):
827
+ '''
828
+ Associate persons across frames (RTMLib method)
829
+
830
+ INPUTS:
831
+ - pose_tracker: PoseTracker. The initialized RTMLib pose tracker object
832
+ - keypoints: array of shape K, L, M with K the number of detected persons,
833
+ L the number of detected keypoints, M their 2D coordinates
834
+ - scores: array of shape K, L with K the number of detected persons,
835
+ L the confidence of detected keypoints
836
+
837
+ OUTPUT:
838
+ - sorted_keypoints: array with reordered persons
839
+ - sorted_scores: array with reordered scores
840
+ '''
841
+
842
+ try:
843
+ desired_size = max(pose_tracker.track_ids_last_frame)+1
844
+ sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
845
+ sorted_keypoints[pose_tracker.track_ids_last_frame] = keypoints[:len(pose_tracker.track_ids_last_frame), :, :]
846
+ sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
847
+ sorted_scores[pose_tracker.track_ids_last_frame] = scores[:len(pose_tracker.track_ids_last_frame), :]
848
+ except:
849
+ sorted_keypoints, sorted_scores = keypoints, scores
850
+
851
+ return sorted_keypoints, sorted_scores
852
+
853
+
854
+ def sort_people_deepsort(keypoints, scores, deepsort_tracker, frame,frame_count):
855
+ '''
856
+ Associate persons across frames (DeepSort method)
857
+
858
+ INPUTS:
859
+ - keypoints: array of shape K, L, M with K the number of detected persons,
860
+ L the number of detected keypoints, M their 2D coordinates
861
+ - scores: array of shape K, L with K the number of detected persons,
862
+ L the confidence of detected keypoints
863
+ - deepsort_tracker: The initialized DeepSort tracker object
864
+ - frame: np.array. The current image opened with cv2.imread
865
+
866
+ OUTPUT:
867
+ - sorted_keypoints: array with reordered persons
868
+ - sorted_scores: array with reordered scores
869
+ '''
870
+
871
+ try:
872
+ # Compute bboxes from keypoints and create detections (bboxes, scores, class_ids)
873
+ bboxes_ltwh = bbox_ltwh_compute(keypoints, padding=20)
874
+ bbox_scores = np.mean(scores, axis=1)
875
+ class_ids = np.array(['person']*len(bboxes_ltwh))
876
+ detections = list(zip(bboxes_ltwh, bbox_scores, class_ids))
877
+
878
+ # Estimates the tracks and retrieve indexes of the original detections
879
+ det_ids = [i for i in range(len(detections))]
880
+ tracks = deepsort_tracker.update_tracks(detections, frame=frame, others=det_ids)
881
+ track_ids_frame, orig_det_ids = [], []
882
+ for track in tracks:
883
+ if not track.is_confirmed():
884
+ continue
885
+ track_ids_frame.append(int(track.track_id)-1) # ID of people
886
+ orig_det_ids.append(track.get_det_supplementary()) # ID of detections
887
+
888
+ # Correspondence between person IDs and original detection IDs
889
+ desired_size = max(track_ids_frame) + 1
890
+ sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
891
+ sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
892
+ for i,v in enumerate(track_ids_frame):
893
+ if orig_det_ids[i] is not None:
894
+ sorted_keypoints[v] = keypoints[orig_det_ids[i]]
895
+ sorted_scores[v] = scores[orig_det_ids[i]]
896
+
897
+ except Exception as e:
898
+ sorted_keypoints, sorted_scores = keypoints, scores
899
+ if frame_count > deepsort_tracker.tracker.n_init:
900
+ logging.warning(f"Tracking error: {e}. Sorting persons with DeepSort method failed for this frame.")
901
+
902
+ return sorted_keypoints, sorted_scores
903
+
904
+
905
+ def bbox_ltwh_compute(keypoints, padding=0):
906
+ '''
907
+ Compute bounding boxes in (x_min, y_min, width, height) format
908
+ Optionally add padding to the bounding boxes
909
+ as a percentage of the bounding box size (+padding% horizontally, +padding/2% vertically)
910
+
911
+ INPUTS:
912
+ - keypoints: array of shape K, L, M with K the number of detected persons,
913
+ L the number of detected keypoints, M their 2D coordinates
914
+ - padding: int. The padding to add to the bounding boxes, in perceptage
915
+ '''
916
+
917
+ x_coords = keypoints[:, :, 0]
918
+ y_coords = keypoints[:, :, 1]
919
+
920
+ x_min, x_max = np.min(x_coords, axis=1), np.max(x_coords, axis=1)
921
+ y_min, y_max = np.min(y_coords, axis=1), np.max(y_coords, axis=1)
922
+ width = x_max - x_min
923
+ height = y_max - y_min
924
+
925
+ if padding > 0:
926
+ x_min = x_min - width*padding/100
927
+ y_min = y_min - height/2*padding/100
928
+ width = width + 2*width*padding/100
929
+ height = height + height*padding/100
930
+
931
+ bbox_ltwh = np.stack((x_min, y_min, width, height), axis=1)
932
+
933
+ return bbox_ltwh
934
+
935
+
936
+ def draw_bounding_box(img, X, Y, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)], fontSize=0.3, thickness=1):
937
+ '''
938
+ Draw bounding boxes and person ID around list of lists of X and Y coordinates.
939
+ Bounding boxes have a different color for each person.
940
+
941
+ INPUTS:
942
+ - img: opencv image
943
+ - X: list of list of x coordinates
944
+ - Y: list of list of y coordinates
945
+ - colors: list of colors to cycle through
946
+
947
+ OUTPUT:
948
+ - img: image with rectangles and person IDs
949
+ '''
950
+
951
+ color_cycle = it.cycle(colors)
952
+
953
+ for i,(x,y) in enumerate(zip(X,Y)):
954
+ color = next(color_cycle)
955
+ if not np.isnan(x).all():
956
+ x_min, y_min = np.nanmin(x).astype(int), np.nanmin(y).astype(int)
957
+ x_max, y_max = np.nanmax(x).astype(int), np.nanmax(y).astype(int)
958
+ if x_min < 0: x_min = 0
959
+ if x_max > img.shape[1]: x_max = img.shape[1]
960
+ if y_min < 0: y_min = 0
961
+ if y_max > img.shape[0]: y_max = img.shape[0]
962
+
963
+ # Draw rectangles
964
+ cv2.rectangle(img, (x_min-25, y_min-25), (x_max+25, y_max+25), color, thickness)
965
+
966
+ # Write person ID
967
+ cv2.putText(img, str(i), (x_min-30, y_min-30), cv2.FONT_HERSHEY_SIMPLEX, fontSize, color, 2, cv2.LINE_AA)
968
+
969
+ return img
970
+
971
+
972
+ def draw_skel(img, X, Y, model):
973
+ '''
974
+ Draws keypoints and skeleton for each person.
975
+ Skeletons have a different color for each person.
976
+
977
+ INPUTS:
978
+ - img: opencv image
979
+ - X: list of list of x coordinates
980
+ - Y: list of list of y coordinates
981
+ - model: skeleton model (from skeletons.py)
982
+ - colors: list of colors to cycle through
983
+
984
+ OUTPUT:
985
+ - img: image with keypoints and skeleton
986
+ '''
987
+
988
+ # Get (unique) pairs between which to draw a line
989
+ id_pairs, name_pairs = [], []
990
+ for data_i in PreOrderIter(model.root, filter_=lambda node: node.is_leaf):
991
+ node_branch_ids = [node_i.id for node_i in data_i.path]
992
+ node_branch_names = [node_i.name for node_i in data_i.path]
993
+ id_pairs += [[node_branch_ids[i],node_branch_ids[i+1]] for i in range(len(node_branch_ids)-1)]
994
+ name_pairs += [[node_branch_names[i],node_branch_names[i+1]] for i in range(len(node_branch_names)-1)]
995
+ node_pairs = {tuple(name_pair): id_pair for (name_pair,id_pair) in zip(name_pairs,id_pairs)}
996
+
997
+
998
+ # Draw lines
999
+ for (x,y) in zip(X,Y):
1000
+ if not np.isnan(x).all():
1001
+ for names, ids in node_pairs.items():
1002
+ if not None in ids and not (np.isnan(x[ids[0]]) or np.isnan(y[ids[0]]) or np.isnan(x[ids[1]]) or np.isnan(y[ids[1]])):
1003
+ if any(n.startswith('R') for n in names) and not any(n.startswith('L') for n in names):
1004
+ c = (255,128,0)
1005
+ elif any(n.startswith('L') for n in names) and not any(n.startswith('R') for n in names):
1006
+ c = (0,255,0)
1007
+ else:
1008
+ c = (51, 153, 255)
1009
+ cv2.line(img, (int(x[ids[0]]), int(y[ids[0]])), (int(x[ids[1]]), int(y[ids[1]])), c, thickness)
1010
+
1011
+ return img
1012
+
1013
+
1014
+ def draw_keypts(img, X, Y, scores, cmap_str='RdYlGn'):
1015
+ '''
1016
+ Draws keypoints and skeleton for each person.
1017
+ Keypoints' colors depend on their score.
1018
+
1019
+ INPUTS:
1020
+ - img: opencv image
1021
+ - X: list of list of x coordinates
1022
+ - Y: list of list of y coordinates
1023
+ - scores: list of list of scores
1024
+ - cmap_str: colormap name
1025
+
1026
+ OUTPUT:
1027
+ - img: image with keypoints and skeleton
1028
+ '''
1029
+
1030
+ scores = np.where(np.isnan(scores), 0, scores)
1031
+ # scores = (scores - 0.4) / (1-0.4) # to get a red color for scores lower than 0.4
1032
+ scores = np.where(scores>0.99, 0.99, scores)
1033
+ scores = np.where(scores<0, 0, scores)
1034
+
1035
+ cmap = plt.get_cmap(cmap_str)
1036
+ for (x,y,s) in zip(X,Y,scores):
1037
+ c_k = np.array(cmap(s))[:,:-1]*255
1038
+ [cv2.circle(img, (int(x[i]), int(y[i])), thickness+4, c_k[i][::-1], -1)
1039
+ for i in range(len(x))
1040
+ if not (np.isnan(x[i]) or np.isnan(y[i]))]
1041
+
1042
+ return img