sports2d 0.5.6__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Sports2D/Demo/Config_demo.toml +116 -8
- Sports2D/Sports2D.py +23 -10
- Sports2D/Utilities/common.py +643 -1
- Sports2D/Utilities/skeletons.py +654 -143
- Sports2D/Utilities/tests.py +12 -5
- Sports2D/process.py +238 -565
- {sports2d-0.5.6.dist-info → sports2d-0.6.2.dist-info}/METADATA +108 -9
- sports2d-0.6.2.dist-info/RECORD +16 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.2.dist-info}/WHEEL +1 -1
- sports2d-0.5.6.dist-info/RECORD +0 -16
- {sports2d-0.5.6.dist-info → sports2d-0.6.2.dist-info}/LICENSE +0 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.2.dist-info}/entry_points.txt +0 -0
- {sports2d-0.5.6.dist-info → sports2d-0.6.2.dist-info}/top_level.txt +0 -0
Sports2D/process.py
CHANGED
|
@@ -54,67 +54,27 @@
|
|
|
54
54
|
from pathlib import Path
|
|
55
55
|
import sys
|
|
56
56
|
import logging
|
|
57
|
+
import json
|
|
58
|
+
import ast
|
|
59
|
+
from functools import partial
|
|
57
60
|
from datetime import datetime
|
|
58
61
|
import itertools as it
|
|
59
62
|
from tqdm import tqdm
|
|
60
|
-
from anytree import RenderTree
|
|
63
|
+
from anytree import RenderTree
|
|
61
64
|
|
|
62
65
|
import numpy as np
|
|
63
66
|
import pandas as pd
|
|
64
67
|
import cv2
|
|
65
68
|
import matplotlib as mpl
|
|
66
69
|
import matplotlib.pyplot as plt
|
|
67
|
-
from rtmlib import PoseTracker, BodyWithFeet
|
|
70
|
+
from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
|
|
71
|
+
from deep_sort_realtime.deepsort_tracker import DeepSort
|
|
68
72
|
|
|
69
73
|
from Sports2D.Utilities import filter
|
|
70
74
|
from Sports2D.Utilities.common import *
|
|
71
75
|
from Sports2D.Utilities.skeletons import *
|
|
72
76
|
|
|
73
77
|
|
|
74
|
-
## CONSTANTS
|
|
75
|
-
angle_dict = { # lowercase!
|
|
76
|
-
# joint angles
|
|
77
|
-
'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
|
|
78
|
-
'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
|
|
79
|
-
'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
|
|
80
|
-
'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
|
|
81
|
-
'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
82
|
-
'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
83
|
-
# 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
|
|
84
|
-
# 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
|
|
85
|
-
'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
86
|
-
'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
|
|
87
|
-
'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
|
|
88
|
-
'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
|
|
89
|
-
'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
|
|
90
|
-
'left wrist': [['LElbow', 'LIndex', 'LWrist'], 'flexion', -180, 1],
|
|
91
|
-
|
|
92
|
-
# segment angles
|
|
93
|
-
'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
|
|
94
|
-
'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
|
|
95
|
-
'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
|
|
96
|
-
'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
|
|
97
|
-
'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
|
|
98
|
-
'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
|
|
99
|
-
'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
|
|
100
|
-
'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
|
|
101
|
-
'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
|
|
102
|
-
'head': [['Head', 'Neck'], 'horizontal', 0, -1],
|
|
103
|
-
'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
|
|
104
|
-
'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
|
|
105
|
-
'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
|
|
106
|
-
'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
|
|
107
|
-
'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
|
|
108
|
-
'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
|
|
112
|
-
(125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
|
|
113
|
-
(255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
|
|
114
|
-
(255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
|
|
115
|
-
thickness = 1
|
|
116
|
-
|
|
117
|
-
|
|
118
78
|
## AUTHORSHIP INFORMATION
|
|
119
79
|
__author__ = "David Pagnon, HunMin Kim"
|
|
120
80
|
__copyright__ = "Copyright 2023, Sports2D"
|
|
@@ -153,7 +113,7 @@ def setup_webcam(webcam_id, save_vid, vid_output_path, input_size):
|
|
|
153
113
|
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, input_size[1])
|
|
154
114
|
cam_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
155
115
|
cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
156
|
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
116
|
+
fps = round(cap.get(cv2.CAP_PROP_FPS))
|
|
157
117
|
if fps == 0: fps = 30
|
|
158
118
|
|
|
159
119
|
if cam_width != input_size[0] or cam_height != input_size[1]:
|
|
@@ -205,7 +165,7 @@ def setup_video(video_file_path, save_vid, vid_output_path):
|
|
|
205
165
|
cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
206
166
|
|
|
207
167
|
out_vid = None
|
|
208
|
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
168
|
+
fps = round(cap.get(cv2.CAP_PROP_FPS))
|
|
209
169
|
if fps == 0: fps = 30
|
|
210
170
|
if save_vid:
|
|
211
171
|
# try:
|
|
@@ -221,67 +181,78 @@ def setup_video(video_file_path, save_vid, vid_output_path):
|
|
|
221
181
|
return cap, out_vid, cam_width, cam_height, fps
|
|
222
182
|
|
|
223
183
|
|
|
224
|
-
def setup_backend_device():
|
|
184
|
+
def setup_backend_device(backend='auto', device='auto'):
|
|
225
185
|
'''
|
|
226
186
|
Set up the backend and device for the pose tracker based on the availability of hardware acceleration.
|
|
227
187
|
TensorRT is not supported by RTMLib yet: https://github.com/Tau-J/rtmlib/issues/12
|
|
228
188
|
|
|
229
|
-
|
|
189
|
+
If device and backend are not specified, they are automatically set up in the following order of priority:
|
|
230
190
|
1. GPU with CUDA and ONNXRuntime backend (if CUDAExecutionProvider is available)
|
|
231
191
|
2. GPU with ROCm and ONNXRuntime backend (if ROCMExecutionProvider is available, for AMD GPUs)
|
|
232
192
|
3. GPU with MPS or CoreML and ONNXRuntime backend (for macOS systems)
|
|
233
193
|
4. CPU with OpenVINO backend (default fallback)
|
|
234
194
|
'''
|
|
235
195
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
logging.
|
|
243
|
-
|
|
244
|
-
device = 'rocm'
|
|
245
|
-
backend = 'onnxruntime'
|
|
246
|
-
logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
|
|
247
|
-
else:
|
|
248
|
-
raise
|
|
249
|
-
except:
|
|
196
|
+
if device!='auto' and backend!='auto':
|
|
197
|
+
device = device.lower()
|
|
198
|
+
backend = backend.lower()
|
|
199
|
+
|
|
200
|
+
if device=='auto' or backend=='auto':
|
|
201
|
+
if device=='auto' and backend!='auto' or device!='auto' and backend=='auto':
|
|
202
|
+
logging.warning(f"If you set device or backend to 'auto', you must set the other to 'auto' as well. Both device and backend will be determined automatically.")
|
|
203
|
+
|
|
250
204
|
try:
|
|
205
|
+
import torch
|
|
251
206
|
import onnxruntime as ort
|
|
252
|
-
if
|
|
253
|
-
device = '
|
|
207
|
+
if torch.cuda.is_available() == True and 'CUDAExecutionProvider' in ort.get_available_providers():
|
|
208
|
+
device = 'cuda'
|
|
209
|
+
backend = 'onnxruntime'
|
|
210
|
+
logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
|
|
211
|
+
elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers():
|
|
212
|
+
device = 'rocm'
|
|
254
213
|
backend = 'onnxruntime'
|
|
255
|
-
logging.info(f"\nValid
|
|
214
|
+
logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
|
|
256
215
|
else:
|
|
257
|
-
raise
|
|
216
|
+
raise
|
|
258
217
|
except:
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
218
|
+
try:
|
|
219
|
+
import onnxruntime as ort
|
|
220
|
+
if 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
|
|
221
|
+
device = 'mps'
|
|
222
|
+
backend = 'onnxruntime'
|
|
223
|
+
logging.info(f"\nValid MPS installation found: using ONNXRuntime backend with GPU.")
|
|
224
|
+
else:
|
|
225
|
+
raise
|
|
226
|
+
except:
|
|
227
|
+
device = 'cpu'
|
|
228
|
+
backend = 'openvino'
|
|
229
|
+
logging.info(f"\nNo valid CUDA installation found: using OpenVINO backend with CPU.")
|
|
230
|
+
|
|
263
231
|
return backend, device
|
|
264
232
|
|
|
265
233
|
|
|
266
|
-
def setup_pose_tracker(det_frequency, mode, tracking):
|
|
234
|
+
def setup_pose_tracker(ModelClass, det_frequency, mode, tracking, backend, device):
|
|
267
235
|
'''
|
|
268
236
|
Set up the RTMLib pose tracker with the appropriate model and backend.
|
|
269
237
|
If CUDA is available, use it with ONNXRuntime backend; else use CPU with openvino
|
|
270
238
|
|
|
271
239
|
INPUTS:
|
|
240
|
+
- ModelClass: class. The RTMlib model class to use for pose detection (Body, BodyWithFeet, Wholebody)
|
|
272
241
|
- det_frequency: int. The frequency of pose detection (every N frames)
|
|
273
242
|
- mode: str. The mode of the pose tracker ('lightweight', 'balanced', 'performance')
|
|
274
243
|
- tracking: bool. Whether to track persons across frames with RTMlib tracker
|
|
244
|
+
- backend: str. The backend to use for pose detection (onnxruntime, openvino, opencv)
|
|
245
|
+
- device: str. The device to use for pose detection (cpu, cuda, rocm, mps)
|
|
275
246
|
|
|
276
247
|
OUTPUTS:
|
|
277
248
|
- pose_tracker: PoseTracker. The initialized pose tracker object
|
|
278
249
|
'''
|
|
279
250
|
|
|
280
|
-
backend, device = setup_backend_device()
|
|
251
|
+
backend, device = setup_backend_device(backend=backend, device=device)
|
|
281
252
|
|
|
282
253
|
# Initialize the pose tracker with Halpe26 model
|
|
283
254
|
pose_tracker = PoseTracker(
|
|
284
|
-
|
|
255
|
+
ModelClass,
|
|
285
256
|
det_frequency=det_frequency,
|
|
286
257
|
mode=mode,
|
|
287
258
|
backend=backend,
|
|
@@ -353,178 +324,18 @@ def compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, keypoints_id
|
|
|
353
324
|
|
|
354
325
|
ang_params = angle_dict.get(ang_name)
|
|
355
326
|
if ang_params is not None:
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
ang = ang-180 if ang>90 else ang
|
|
365
|
-
ang = ang+180 if ang<-90 else ang
|
|
366
|
-
else:
|
|
367
|
-
ang = ang-360 if ang>180 else ang
|
|
368
|
-
ang = ang+360 if ang<-180 else ang
|
|
327
|
+
try:
|
|
328
|
+
if ang_name in ['pelvis', 'trunk', 'shoulders']:
|
|
329
|
+
angle_coords = [[np.abs(person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]]), person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0]]
|
|
330
|
+
else:
|
|
331
|
+
angle_coords = [[person_X_flipped[keypoints_ids[keypoints_names.index(kpt)]], person_Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0]]
|
|
332
|
+
ang = fixed_angles(angle_coords, ang_name)
|
|
333
|
+
except:
|
|
334
|
+
ang = np.nan
|
|
369
335
|
else:
|
|
370
336
|
ang = np.nan
|
|
371
|
-
|
|
372
|
-
return ang
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
def min_with_single_indices(L, T):
|
|
376
|
-
'''
|
|
377
|
-
Let L be a list (size s) with T associated tuple indices (size s).
|
|
378
|
-
Select the smallest values of L, considering that
|
|
379
|
-
the next smallest value cannot have the same numbers
|
|
380
|
-
in the associated tuple as any of the previous ones.
|
|
381
|
-
|
|
382
|
-
Example:
|
|
383
|
-
L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ]
|
|
384
|
-
T = list(it.product(range(2),range(3)))
|
|
385
|
-
= [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)]
|
|
386
|
-
|
|
387
|
-
- 1st smallest value: 3 with tuple (2,3), index 11
|
|
388
|
-
- 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]:
|
|
389
|
-
20 with tuple (0,0), index 0
|
|
390
|
-
- 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]:
|
|
391
|
-
23 with tuple (1,1), index 5
|
|
392
|
-
|
|
393
|
-
INPUTS:
|
|
394
|
-
- L: list (size s)
|
|
395
|
-
- T: T associated tuple indices (size s)
|
|
396
|
-
|
|
397
|
-
OUTPUTS:
|
|
398
|
-
- minL: list of smallest values of L, considering constraints on tuple indices
|
|
399
|
-
- argminL: list of indices of smallest values of L (indices of best combinations)
|
|
400
|
-
- T_minL: list of tuples associated with smallest values of L
|
|
401
|
-
'''
|
|
402
|
-
|
|
403
|
-
minL = [np.nanmin(L)]
|
|
404
|
-
argminL = [np.nanargmin(L)]
|
|
405
|
-
T_minL = [T[argminL[0]]]
|
|
406
|
-
|
|
407
|
-
mask_tokeep = np.array([True for t in T])
|
|
408
|
-
i=0
|
|
409
|
-
while mask_tokeep.any()==True:
|
|
410
|
-
mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T])
|
|
411
|
-
if mask_tokeep.any()==True:
|
|
412
|
-
indicesL_tokeep = np.where(mask_tokeep)[0]
|
|
413
|
-
minL += [np.nanmin(np.array(L)[indicesL_tokeep]) if not np.isnan(np.array(L)[indicesL_tokeep]).all() else np.nan]
|
|
414
|
-
argminL += [indicesL_tokeep[np.nanargmin(np.array(L)[indicesL_tokeep])] if not np.isnan(minL[-1]) else indicesL_tokeep[0]]
|
|
415
|
-
T_minL += (T[argminL[i+1]],)
|
|
416
|
-
i+=1
|
|
417
|
-
|
|
418
|
-
return np.array(minL), np.array(argminL), np.array(T_minL)
|
|
419
337
|
|
|
420
|
-
|
|
421
|
-
def pad_shape(arr, target_len, fill_value=np.nan):
|
|
422
|
-
'''
|
|
423
|
-
Pads an array to the target length with specified fill values
|
|
424
|
-
|
|
425
|
-
INPUTS:
|
|
426
|
-
- arr: Input array to be padded.
|
|
427
|
-
- target_len: The target length of the first dimension after padding.
|
|
428
|
-
- fill_value: The value to use for padding (default: np.nan).
|
|
429
|
-
|
|
430
|
-
OUTPUTS:
|
|
431
|
-
- Padded array with shape (target_len, ...) matching the input dimensions.
|
|
432
|
-
'''
|
|
433
|
-
|
|
434
|
-
if len(arr) < target_len:
|
|
435
|
-
pad_shape = (target_len - len(arr),) + arr.shape[1:]
|
|
436
|
-
padding = np.full(pad_shape, fill_value)
|
|
437
|
-
return np.concatenate((arr, padding))
|
|
438
|
-
|
|
439
|
-
return arr
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
def sort_people_sports2d(keyptpre, keypt, scores=None):
|
|
443
|
-
'''
|
|
444
|
-
Associate persons across frames (Sports2D method)
|
|
445
|
-
Persons' indices are sometimes swapped when changing frame
|
|
446
|
-
A person is associated to another in the next frame when they are at a small distance
|
|
447
|
-
|
|
448
|
-
N.B.: Requires min_with_single_indices and euclidian_distance function (see common.py)
|
|
449
|
-
|
|
450
|
-
INPUTS:
|
|
451
|
-
- keyptpre: (K, L, M) array of 2D coordinates for K persons in the previous frame, L keypoints, M 2D coordinates
|
|
452
|
-
- keypt: idem keyptpre, for current frame
|
|
453
|
-
- score: (K, L) array of confidence scores for K persons, L keypoints (optional)
|
|
454
|
-
|
|
455
|
-
OUTPUTS:
|
|
456
|
-
- sorted_prev_keypoints: array with reordered persons with values of previous frame if current is empty
|
|
457
|
-
- sorted_keypoints: array with reordered persons --> if scores is not None
|
|
458
|
-
- sorted_scores: array with reordered scores --> if scores is not None
|
|
459
|
-
- associated_tuples: list of tuples with correspondences between persons across frames --> if scores is None (for Pose2Sim.triangulation())
|
|
460
|
-
'''
|
|
461
|
-
|
|
462
|
-
# Generate possible person correspondences across frames
|
|
463
|
-
max_len = max(len(keyptpre), len(keypt))
|
|
464
|
-
keyptpre = pad_shape(keyptpre, max_len, fill_value=np.nan)
|
|
465
|
-
keypt = pad_shape(keypt, max_len, fill_value=np.nan)
|
|
466
|
-
if scores is not None:
|
|
467
|
-
scores = pad_shape(scores, max_len, fill_value=np.nan)
|
|
468
|
-
|
|
469
|
-
# Compute distance between persons from one frame to another
|
|
470
|
-
personsIDs_comb = sorted(list(it.product(range(len(keyptpre)), range(len(keypt)))))
|
|
471
|
-
frame_by_frame_dist = [euclidean_distance(keyptpre[comb[0]],keypt[comb[1]]) for comb in personsIDs_comb]
|
|
472
|
-
frame_by_frame_dist = np.mean(frame_by_frame_dist, axis=1)
|
|
473
|
-
|
|
474
|
-
# Sort correspondences by distance
|
|
475
|
-
_, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
|
|
476
|
-
|
|
477
|
-
# Associate points to same index across frames, nan if no correspondence
|
|
478
|
-
sorted_keypoints = []
|
|
479
|
-
for i in range(len(keyptpre)):
|
|
480
|
-
id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
|
|
481
|
-
if len(id_in_old) > 0: sorted_keypoints += [keypt[id_in_old[0]]]
|
|
482
|
-
else: sorted_keypoints += [keypt[i]]
|
|
483
|
-
sorted_keypoints = np.array(sorted_keypoints)
|
|
484
|
-
|
|
485
|
-
if scores is not None:
|
|
486
|
-
sorted_scores = []
|
|
487
|
-
for i in range(len(keyptpre)):
|
|
488
|
-
id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
|
|
489
|
-
if len(id_in_old) > 0: sorted_scores += [scores[id_in_old[0]]]
|
|
490
|
-
else: sorted_scores += [scores[i]]
|
|
491
|
-
sorted_scores = np.array(sorted_scores)
|
|
492
|
-
|
|
493
|
-
# Keep track of previous values even when missing for more than one frame
|
|
494
|
-
sorted_prev_keypoints = np.where(np.isnan(sorted_keypoints) & ~np.isnan(keyptpre), keyptpre, sorted_keypoints)
|
|
495
|
-
|
|
496
|
-
if scores is not None:
|
|
497
|
-
return sorted_prev_keypoints, sorted_keypoints, sorted_scores
|
|
498
|
-
else: # For Pose2Sim.triangulation()
|
|
499
|
-
return sorted_keypoints, associated_tuples
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
def sort_people_rtmlib(pose_tracker, keypoints, scores):
|
|
503
|
-
'''
|
|
504
|
-
Associate persons across frames (RTMLib method)
|
|
505
|
-
|
|
506
|
-
INPUTS:
|
|
507
|
-
- pose_tracker: PoseTracker. The initialized RTMLib pose tracker object
|
|
508
|
-
- keypoints: array of shape K, L, M with K the number of detected persons,
|
|
509
|
-
L the number of detected keypoints, M their 2D coordinates
|
|
510
|
-
- scores: array of shape K, L with K the number of detected persons,
|
|
511
|
-
L the confidence of detected keypoints
|
|
512
|
-
|
|
513
|
-
OUTPUT:
|
|
514
|
-
- sorted_keypoints: array with reordered persons
|
|
515
|
-
- sorted_scores: array with reordered scores
|
|
516
|
-
'''
|
|
517
|
-
|
|
518
|
-
try:
|
|
519
|
-
desired_size = max(pose_tracker.track_ids_last_frame)+1
|
|
520
|
-
sorted_keypoints = np.full((desired_size, keypoints.shape[1], 2), np.nan)
|
|
521
|
-
sorted_keypoints[pose_tracker.track_ids_last_frame] = keypoints[:len(pose_tracker.track_ids_last_frame), :, :]
|
|
522
|
-
sorted_scores = np.full((desired_size, scores.shape[1]), np.nan)
|
|
523
|
-
sorted_scores[pose_tracker.track_ids_last_frame] = scores[:len(pose_tracker.track_ids_last_frame), :]
|
|
524
|
-
except:
|
|
525
|
-
sorted_keypoints, sorted_scores = keypoints, scores
|
|
526
|
-
|
|
527
|
-
return sorted_keypoints, sorted_scores
|
|
338
|
+
return ang
|
|
528
339
|
|
|
529
340
|
|
|
530
341
|
def draw_dotted_line(img, start, direction, length, color=(0, 255, 0), gap=7, dot_length=3, thickness=thickness):
|
|
@@ -551,109 +362,6 @@ def draw_dotted_line(img, start, direction, length, color=(0, 255, 0), gap=7, do
|
|
|
551
362
|
cv2.line(img, tuple(line_start.astype(int)), tuple(line_end.astype(int)), color, thickness)
|
|
552
363
|
|
|
553
364
|
|
|
554
|
-
def draw_bounding_box(img, X, Y, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)], fontSize=0.3, thickness=1):
|
|
555
|
-
'''
|
|
556
|
-
Draw bounding boxes and person ID around list of lists of X and Y coordinates.
|
|
557
|
-
Bounding boxes have a different color for each person.
|
|
558
|
-
|
|
559
|
-
INPUTS:
|
|
560
|
-
- img: opencv image
|
|
561
|
-
- X: list of list of x coordinates
|
|
562
|
-
- Y: list of list of y coordinates
|
|
563
|
-
- colors: list of colors to cycle through
|
|
564
|
-
|
|
565
|
-
OUTPUT:
|
|
566
|
-
- img: image with rectangles and person IDs
|
|
567
|
-
'''
|
|
568
|
-
|
|
569
|
-
color_cycle = it.cycle(colors)
|
|
570
|
-
|
|
571
|
-
for i,(x,y) in enumerate(zip(X,Y)):
|
|
572
|
-
color = next(color_cycle)
|
|
573
|
-
if not np.isnan(x).all():
|
|
574
|
-
x_min, y_min = np.nanmin(x).astype(int), np.nanmin(y).astype(int)
|
|
575
|
-
x_max, y_max = np.nanmax(x).astype(int), np.nanmax(y).astype(int)
|
|
576
|
-
if x_min < 0: x_min = 0
|
|
577
|
-
if x_max > img.shape[1]: x_max = img.shape[1]
|
|
578
|
-
if y_min < 0: y_min = 0
|
|
579
|
-
if y_max > img.shape[0]: y_max = img.shape[0]
|
|
580
|
-
|
|
581
|
-
# Draw rectangles
|
|
582
|
-
cv2.rectangle(img, (x_min-25, y_min-25), (x_max+25, y_max+25), color, thickness)
|
|
583
|
-
|
|
584
|
-
# Write person ID
|
|
585
|
-
cv2.putText(img, str(i), (x_min-30, y_min-30), cv2.FONT_HERSHEY_SIMPLEX, fontSize+1, color, 2, cv2.LINE_AA)
|
|
586
|
-
|
|
587
|
-
return img
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
def draw_skel(img, X, Y, model, colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)]):
|
|
591
|
-
'''
|
|
592
|
-
Draws keypoints and skeleton for each person.
|
|
593
|
-
Skeletons have a different color for each person.
|
|
594
|
-
|
|
595
|
-
INPUTS:
|
|
596
|
-
- img: opencv image
|
|
597
|
-
- X: list of list of x coordinates
|
|
598
|
-
- Y: list of list of y coordinates
|
|
599
|
-
- model: skeleton model (from skeletons.py)
|
|
600
|
-
- colors: list of colors to cycle through
|
|
601
|
-
|
|
602
|
-
OUTPUT:
|
|
603
|
-
- img: image with keypoints and skeleton
|
|
604
|
-
'''
|
|
605
|
-
|
|
606
|
-
# Get (unique) pairs between which to draw a line
|
|
607
|
-
node_pairs = []
|
|
608
|
-
for data_i in PreOrderIter(model.root, filter_=lambda node: node.is_leaf):
|
|
609
|
-
node_branches = [node_i.id for node_i in data_i.path]
|
|
610
|
-
node_pairs += [[node_branches[i],node_branches[i+1]] for i in range(len(node_branches)-1)]
|
|
611
|
-
node_pairs = [list(x) for x in set(tuple(x) for x in node_pairs)]
|
|
612
|
-
|
|
613
|
-
# Draw lines
|
|
614
|
-
color_cycle = it.cycle(colors)
|
|
615
|
-
for (x,y) in zip(X,Y):
|
|
616
|
-
c = next(color_cycle)
|
|
617
|
-
if not np.isnan(x).all():
|
|
618
|
-
[cv2.line(img,
|
|
619
|
-
(int(x[n[0]]), int(y[n[0]])), (int(x[n[1]]), int(y[n[1]])), c, thickness)
|
|
620
|
-
for n in node_pairs
|
|
621
|
-
if not (np.isnan(x[n[0]]) or np.isnan(y[n[0]]) or np.isnan(x[n[1]]) or np.isnan(y[n[1]]))]
|
|
622
|
-
|
|
623
|
-
return img
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
def draw_keypts(img, X, Y, scores, cmap_str='RdYlGn'):
|
|
627
|
-
'''
|
|
628
|
-
Draws keypoints and skeleton for each person.
|
|
629
|
-
Keypoints' colors depend on their score.
|
|
630
|
-
|
|
631
|
-
INPUTS:
|
|
632
|
-
- img: opencv image
|
|
633
|
-
- X: list of list of x coordinates
|
|
634
|
-
- Y: list of list of y coordinates
|
|
635
|
-
- scores: list of list of scores
|
|
636
|
-
- cmap_str: colormap name
|
|
637
|
-
|
|
638
|
-
OUTPUT:
|
|
639
|
-
- img: image with keypoints and skeleton
|
|
640
|
-
'''
|
|
641
|
-
|
|
642
|
-
scores = np.where(np.isnan(scores), 0, scores)
|
|
643
|
-
# scores = (scores - 0.4) / (1-0.4) # to get a red color for scores lower than 0.4
|
|
644
|
-
scores = np.where(scores>0.99, 0.99, scores)
|
|
645
|
-
scores = np.where(scores<0, 0, scores)
|
|
646
|
-
|
|
647
|
-
cmap = plt.get_cmap(cmap_str)
|
|
648
|
-
for (x,y,s) in zip(X,Y,scores):
|
|
649
|
-
c_k = np.array(cmap(s))[:,:-1]*255
|
|
650
|
-
[cv2.circle(img, (int(x[i]), int(y[i])), thickness+4, c_k[i][::-1], -1)
|
|
651
|
-
for i in range(len(x))
|
|
652
|
-
if not (np.isnan(x[i]) or np.isnan(y[i]))]
|
|
653
|
-
|
|
654
|
-
return img
|
|
655
|
-
|
|
656
|
-
|
|
657
365
|
def draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, keypoints_ids, keypoints_names, angle_names, display_angle_values_on= ['body', 'list'], colors=[(255, 0, 0), (0, 255, 0), (0, 0, 255)], fontSize=0.3, thickness=1):
|
|
658
366
|
'''
|
|
659
367
|
Draw angles on the image.
|
|
@@ -692,31 +400,34 @@ def draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, keypoints_
|
|
|
692
400
|
ang_name = angle_names[k]
|
|
693
401
|
ang_params = angle_dict.get(ang_name)
|
|
694
402
|
if ang_params is not None:
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
app_point, vec1, vec2 = draw_joint_angle(img, ang_coords, flip, right_angle)
|
|
706
|
-
|
|
707
|
-
# Write angle on body
|
|
708
|
-
if 'body' in display_angle_values_on:
|
|
403
|
+
kpts = ang_params[0]
|
|
404
|
+
if not any(item not in keypoints_names+['Neck', 'Hip'] for item in kpts):
|
|
405
|
+
ang_coords = np.array([[X[keypoints_ids[keypoints_names.index(kpt)]], Y[keypoints_ids[keypoints_names.index(kpt)]]] for kpt in ang_params[0] if kpt in keypoints_names])
|
|
406
|
+
X_flipped = np.append(X_flipped, X[len(X_flipped):])
|
|
407
|
+
X_flipped_coords = [X_flipped[keypoints_ids[keypoints_names.index(kpt)]] for kpt in ang_params[0] if kpt in keypoints_names]
|
|
408
|
+
flip = -1 if any(x_flipped < 0 for x_flipped in X_flipped_coords) else 1
|
|
409
|
+
flip = 1 if ang_name in ['pelvis', 'trunk', 'shoulders'] else flip
|
|
410
|
+
right_angle = True if ang_params[2]==90 else False
|
|
411
|
+
|
|
412
|
+
# Draw angle
|
|
709
413
|
if len(ang_coords) == 2: # segment angle
|
|
710
|
-
|
|
414
|
+
app_point, vec = draw_segment_angle(img, ang_coords, flip)
|
|
711
415
|
else: # joint angle
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
416
|
+
app_point, vec1, vec2 = draw_joint_angle(img, ang_coords, flip, right_angle)
|
|
417
|
+
|
|
418
|
+
# Write angle on body
|
|
419
|
+
if 'body' in display_angle_values_on:
|
|
420
|
+
if len(ang_coords) == 2: # segment angle
|
|
421
|
+
write_angle_on_body(img, ang, app_point, vec, np.array([1,0]), dist=20, color=(255,255,255), fontSize=fontSize, thickness=thickness)
|
|
422
|
+
else: # joint angle
|
|
423
|
+
write_angle_on_body(img, ang, app_point, vec1, vec2, dist=40, color=(0,255,0), fontSize=fontSize, thickness=thickness)
|
|
424
|
+
|
|
425
|
+
# Write angle as a list on image with progress bar
|
|
426
|
+
if 'list' in display_angle_values_on:
|
|
427
|
+
if len(ang_coords) == 2: # segment angle
|
|
428
|
+
ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (255,255,255), fontSize=fontSize, thickness=thickness)
|
|
429
|
+
else:
|
|
430
|
+
ang_label_line = write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_line, color = (0,255,0), fontSize=fontSize, thickness=thickness)
|
|
720
431
|
|
|
721
432
|
return img
|
|
722
433
|
|
|
@@ -869,32 +580,6 @@ def write_angle_as_list(img, ang, ang_name, person_label_position, ang_label_lin
|
|
|
869
580
|
return ang_label_line
|
|
870
581
|
|
|
871
582
|
|
|
872
|
-
def read_trc(trc_path):
|
|
873
|
-
'''
|
|
874
|
-
Read trc file
|
|
875
|
-
|
|
876
|
-
INPUTS:
|
|
877
|
-
- trc_path: path to the trc file
|
|
878
|
-
|
|
879
|
-
OUTPUTS:
|
|
880
|
-
- Q_coords: dataframe of coordinates
|
|
881
|
-
- frames_col: series of frames
|
|
882
|
-
- time_col: series of time
|
|
883
|
-
- markers: list of marker names
|
|
884
|
-
- header: list of header lines
|
|
885
|
-
'''
|
|
886
|
-
|
|
887
|
-
with open(trc_path, 'r') as trc_file:
|
|
888
|
-
header = [next(trc_file) for line in range(5)]
|
|
889
|
-
markers = header[3].split('\t')[2::3]
|
|
890
|
-
|
|
891
|
-
trc_df = pd.read_csv(trc_path, sep="\t", skiprows=4)
|
|
892
|
-
frames_col, time_col = pd.Series(trc_df.iloc[:,0], name='frames'), pd.Series(trc_df.iloc[:,1], name='time')
|
|
893
|
-
Q_coords = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
|
|
894
|
-
|
|
895
|
-
return Q_coords, frames_col, time_col, markers, header
|
|
896
|
-
|
|
897
|
-
|
|
898
583
|
def load_pose_file(Q_coords):
|
|
899
584
|
'''
|
|
900
585
|
Load 2D keypoints from a dataframe of XYZ coordinates
|
|
@@ -1075,6 +760,7 @@ def angle_plots(angle_data_unfiltered, angle_data, person_id):
|
|
|
1075
760
|
ax = plt.subplot(111)
|
|
1076
761
|
plt.plot(angle_data_unfiltered.iloc[:,0], angle_data_unfiltered.iloc[:,id+1], label='unfiltered')
|
|
1077
762
|
plt.plot(angle_data.iloc[:,0], angle_data.iloc[:,id+1], label='filtered')
|
|
763
|
+
|
|
1078
764
|
ax.set_xlabel('Time (seconds)')
|
|
1079
765
|
ax.set_ylabel(angle+' (°)')
|
|
1080
766
|
plt.legend()
|
|
@@ -1144,138 +830,6 @@ def compute_floor_line(trc_data, keypoint_names = ['LBigToe', 'RBigToe'], toe_sp
|
|
|
1144
830
|
return angle, xy_origin
|
|
1145
831
|
|
|
1146
832
|
|
|
1147
|
-
def mean_angles(Q_coords, markers, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']):
|
|
1148
|
-
'''
|
|
1149
|
-
Compute the mean angle time series from 3D points for a given list of angles.
|
|
1150
|
-
|
|
1151
|
-
INPUTS:
|
|
1152
|
-
- Q_coords (DataFrame): The triangulated coordinates of the markers.
|
|
1153
|
-
- markers (list): The list of marker names.
|
|
1154
|
-
- ang_to_consider (list): The list of angles to consider (requires angle_dict).
|
|
1155
|
-
|
|
1156
|
-
OUTPUTS:
|
|
1157
|
-
- ang_mean: The mean angle time series.
|
|
1158
|
-
'''
|
|
1159
|
-
|
|
1160
|
-
ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip']
|
|
1161
|
-
|
|
1162
|
-
angs = []
|
|
1163
|
-
for ang_name in ang_to_consider:
|
|
1164
|
-
ang_params = angle_dict[ang_name]
|
|
1165
|
-
ang_mk = ang_params[0]
|
|
1166
|
-
|
|
1167
|
-
pts_for_angles = []
|
|
1168
|
-
for pt in ang_mk:
|
|
1169
|
-
pts_for_angles.append(Q_coords.iloc[:,markers.index(pt)*3:markers.index(pt)*3+3])
|
|
1170
|
-
ang = points_to_angles(pts_for_angles)
|
|
1171
|
-
|
|
1172
|
-
ang += ang_params[2]
|
|
1173
|
-
ang *= ang_params[3]
|
|
1174
|
-
ang = np.abs(ang)
|
|
1175
|
-
|
|
1176
|
-
angs.append(ang)
|
|
1177
|
-
|
|
1178
|
-
ang_mean = np.mean(angs, axis=0)
|
|
1179
|
-
|
|
1180
|
-
return ang_mean
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
def best_coords_for_measurements(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.2, close_to_zero_speed=0.2, large_hip_knee_angles=45):
|
|
1184
|
-
'''
|
|
1185
|
-
Compute the best coordinates for measurements, after removing:
|
|
1186
|
-
- 20% fastest frames (may be outliers)
|
|
1187
|
-
- frames when speed is close to zero (person is out of frame): 0.2 m/frame, or 50 px/frame
|
|
1188
|
-
- frames when hip and knee angle below 45° (imprecise coordinates when person is crouching)
|
|
1189
|
-
|
|
1190
|
-
INPUTS:
|
|
1191
|
-
- Q_coords: pd.DataFrame. The XYZ coordinates of each marker
|
|
1192
|
-
- keypoints_names: list. The list of marker names
|
|
1193
|
-
- fastest_frames_to_remove_percent: float
|
|
1194
|
-
- close_to_zero_speed: float (sum for all keypoints: about 50 px/frame or 0.2 m/frame)
|
|
1195
|
-
- large_hip_knee_angles: int
|
|
1196
|
-
- trimmed_extrema_percent
|
|
1197
|
-
|
|
1198
|
-
OUTPUT:
|
|
1199
|
-
- Q_coords_low_speeds_low_angles: pd.DataFrame. The best coordinates for measurements
|
|
1200
|
-
'''
|
|
1201
|
-
|
|
1202
|
-
# Add Hip column if not present
|
|
1203
|
-
n_markers_init = len(keypoints_names)
|
|
1204
|
-
if 'Hip' not in keypoints_names:
|
|
1205
|
-
RHip_df = Q_coords.iloc[:,keypoints_names.index('RHip')*3:keypoints_names.index('RHip')*3+3]
|
|
1206
|
-
LHip_df = Q_coords.iloc[:,keypoints_names.index('LHip')*3:keypoints_names.index('RHip')*3+3]
|
|
1207
|
-
Hip_df = RHip_df.add(LHip_df, fill_value=0) /2
|
|
1208
|
-
Hip_df.columns = [col+ str(int(Q_coords.columns[-1][1:])+1) for col in ['X','Y','Z']]
|
|
1209
|
-
keypoints_names += ['Hip']
|
|
1210
|
-
Q_coords = pd.concat([Q_coords, Hip_df], axis=1)
|
|
1211
|
-
n_markers = len(keypoints_names)
|
|
1212
|
-
|
|
1213
|
-
# Using 80% slowest frames
|
|
1214
|
-
sum_speeds = pd.Series(np.nansum([np.linalg.norm(Q_coords.iloc[:,kpt:kpt+3].diff(), axis=1) for kpt in range(n_markers)], axis=0))
|
|
1215
|
-
sum_speeds = sum_speeds[sum_speeds>close_to_zero_speed] # Removing when speeds close to zero (out of frame)
|
|
1216
|
-
min_speed_indices = sum_speeds.abs().nsmallest(int(len(sum_speeds) * (1-fastest_frames_to_remove_percent))).index
|
|
1217
|
-
Q_coords_low_speeds = Q_coords.iloc[min_speed_indices].reset_index(drop=True)
|
|
1218
|
-
|
|
1219
|
-
# Only keep frames with hip and knee flexion angles below 45%
|
|
1220
|
-
# (if more than 50 of them, else take 50 smallest values)
|
|
1221
|
-
ang_mean = mean_angles(Q_coords_low_speeds, keypoints_names, ang_to_consider = ['right knee', 'left knee', 'right hip', 'left hip'])
|
|
1222
|
-
Q_coords_low_speeds_low_angles = Q_coords_low_speeds[ang_mean < large_hip_knee_angles]
|
|
1223
|
-
if len(Q_coords_low_speeds_low_angles) < 50:
|
|
1224
|
-
Q_coords_low_speeds_low_angles = Q_coords_low_speeds.iloc[pd.Series(ang_mean).nsmallest(50).index]
|
|
1225
|
-
|
|
1226
|
-
if n_markers_init < n_markers:
|
|
1227
|
-
Q_coords_low_speeds_low_angles = Q_coords_low_speeds_low_angles.iloc[:,:-3]
|
|
1228
|
-
|
|
1229
|
-
return Q_coords_low_speeds_low_angles
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
def compute_height(Q_coords, keypoints_names, fastest_frames_to_remove_percent=0.1, close_to_zero_speed=50, large_hip_knee_angles=45, trimmed_extrema_percent=0.5):
|
|
1233
|
-
'''
|
|
1234
|
-
Compute the height of the person from the trc data.
|
|
1235
|
-
|
|
1236
|
-
INPUTS:
|
|
1237
|
-
- Q_coords: pd.DataFrame. The XYZ coordinates of each marker
|
|
1238
|
-
- keypoints_names: list. The list of marker names
|
|
1239
|
-
- fastest_frames_to_remove_percent: float. Frames with high speed are considered as outliers
|
|
1240
|
-
- close_to_zero_speed: float. Sum for all keypoints: about 50 px/frame or 0.2 m/frame
|
|
1241
|
-
- large_hip_knee_angles5: float. Hip and knee angles below this value are considered as imprecise
|
|
1242
|
-
- trimmed_extrema_percent: float. Proportion of the most extreme segment values to remove before calculating their mean)
|
|
1243
|
-
|
|
1244
|
-
OUTPUT:
|
|
1245
|
-
- height: float. The estimated height of the person
|
|
1246
|
-
'''
|
|
1247
|
-
|
|
1248
|
-
# Retrieve most reliable coordinates
|
|
1249
|
-
Q_coords_low_speeds_low_angles = best_coords_for_measurements(Q_coords, keypoints_names,
|
|
1250
|
-
fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed, large_hip_knee_angles=large_hip_knee_angles)
|
|
1251
|
-
Q_coords_low_speeds_low_angles.columns = np.array([[m]*3 for m in keypoints_names]).flatten()
|
|
1252
|
-
|
|
1253
|
-
# Add MidShoulder column
|
|
1254
|
-
df_MidShoulder = pd.DataFrame((Q_coords_low_speeds_low_angles['RShoulder'].values + Q_coords_low_speeds_low_angles['LShoulder'].values) /2)
|
|
1255
|
-
df_MidShoulder.columns = ['MidShoulder']*3
|
|
1256
|
-
Q_coords_low_speeds_low_angles = pd.concat((Q_coords_low_speeds_low_angles.reset_index(drop=True), df_MidShoulder), axis=1)
|
|
1257
|
-
|
|
1258
|
-
# Automatically compute the height of the person
|
|
1259
|
-
pairs_up_to_shoulders = [['RHeel', 'RAnkle'], ['RAnkle', 'RKnee'], ['RKnee', 'RHip'], ['RHip', 'RShoulder'],
|
|
1260
|
-
['LHeel', 'LAnkle'], ['LAnkle', 'LKnee'], ['LKnee', 'LHip'], ['LHip', 'LShoulder']]
|
|
1261
|
-
try:
|
|
1262
|
-
rfoot, rshank, rfemur, rback, lfoot, lshank, lfemur, lback = [euclidean_distance(Q_coords_low_speeds_low_angles[pair[0]],Q_coords_low_speeds_low_angles[pair[1]]) for pair in pairs_up_to_shoulders]
|
|
1263
|
-
except:
|
|
1264
|
-
raise ValueError('At least one of the following markers is missing for computing the height of the person:\
|
|
1265
|
-
RHeel, RAnkle, RKnee, RHip, RShoulder, LHeel, LAnkle, LKnee, LHip, LShoulder.\
|
|
1266
|
-
Make sure that the person is entirely visible, or use a calibration file instead, or set "to_meters=false".')
|
|
1267
|
-
if 'Head' in keypoints_names:
|
|
1268
|
-
head = euclidean_distance(Q_coords_low_speeds_low_angles['MidShoulder'], Q_coords_low_speeds_low_angles['Head'])
|
|
1269
|
-
else:
|
|
1270
|
-
head = euclidean_distance(Q_coords_low_speeds_low_angles['MidShoulder'], Q_coords_low_speeds_low_angles['Nose'])*1.33
|
|
1271
|
-
heights = (rfoot + lfoot)/2 + (rshank + lshank)/2 + (rfemur + lfemur)/2 + (rback + lback)/2 + head
|
|
1272
|
-
|
|
1273
|
-
# Remove the 20% most extreme values
|
|
1274
|
-
height = trimmed_mean(heights, trimmed_extrema_percent=trimmed_extrema_percent)
|
|
1275
|
-
|
|
1276
|
-
return height
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
833
|
def convert_px_to_meters(Q_coords_kpt, person_height_m, height_px, cx, cy, floor_angle):
|
|
1280
834
|
'''
|
|
1281
835
|
Convert pixel coordinates to meters.
|
|
@@ -1373,6 +927,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1373
927
|
mode = config_dict.get('pose').get('mode')
|
|
1374
928
|
det_frequency = config_dict.get('pose').get('det_frequency')
|
|
1375
929
|
tracking_mode = config_dict.get('pose').get('tracking_mode')
|
|
930
|
+
if tracking_mode == 'deepsort':
|
|
931
|
+
deepsort_params = config_dict.get('pose').get('deepsort_params')
|
|
932
|
+
try:
|
|
933
|
+
deepsort_params = ast.literal_eval(deepsort_params)
|
|
934
|
+
except: # if within single quotes instead of double quotes when run with sports2d --mode """{dictionary}"""
|
|
935
|
+
deepsort_params = deepsort_params.strip("'").replace('\n', '').replace(" ", "").replace(",", '", "').replace(":", '":"').replace("{", '{"').replace("}", '"}').replace('":"/',':/').replace('":"\\',':\\')
|
|
936
|
+
deepsort_params = re.sub(r'"\[([^"]+)",\s?"([^"]+)\]"', r'[\1,\2]', deepsort_params) # changes "[640", "640]" to [640,640]
|
|
937
|
+
deepsort_params = json.loads(deepsort_params)
|
|
938
|
+
deepsort_tracker = DeepSort(**deepsort_params)
|
|
939
|
+
deepsort_tracker.tracker.tracks.clear()
|
|
940
|
+
backend = config_dict.get('pose').get('backend')
|
|
941
|
+
device = config_dict.get('pose').get('device')
|
|
1376
942
|
|
|
1377
943
|
# Pixel to meters conversion
|
|
1378
944
|
to_meters = config_dict.get('px_to_meters_conversion').get('to_meters')
|
|
@@ -1406,6 +972,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1406
972
|
fontSize = config_dict.get('angles').get('fontSize')
|
|
1407
973
|
thickness = 1 if fontSize < 0.8 else 2
|
|
1408
974
|
flip_left_right = config_dict.get('angles').get('flip_left_right')
|
|
975
|
+
correct_segment_angles_with_floor_angle = config_dict.get('angles').get('correct_segment_angles_with_floor_angle')
|
|
1409
976
|
|
|
1410
977
|
# Post-processing settings
|
|
1411
978
|
interpolate = config_dict.get('post-processing').get('interpolate')
|
|
@@ -1464,9 +1031,52 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1464
1031
|
cv2.namedWindow(f'{video_file} Sports2D', cv2.WINDOW_NORMAL + cv2.WINDOW_KEEPRATIO)
|
|
1465
1032
|
cv2.setWindowProperty(f'{video_file} Sports2D', cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_FULLSCREEN)
|
|
1466
1033
|
|
|
1034
|
+
# Select the appropriate model based on the model_type
|
|
1035
|
+
if pose_model.upper() in ('HALPE_26', 'BODY_WITH_FEET'):
|
|
1036
|
+
model_name = 'HALPE_26'
|
|
1037
|
+
ModelClass = BodyWithFeet # 26 keypoints(halpe26)
|
|
1038
|
+
logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
|
|
1039
|
+
elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY', 'WHOLE_BODY_WRIST'):
|
|
1040
|
+
model_name = 'COCO_133'
|
|
1041
|
+
ModelClass = Wholebody
|
|
1042
|
+
logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
|
|
1043
|
+
elif pose_model.upper() in ('COCO_17', 'BODY'):
|
|
1044
|
+
model_name = 'COCO_17'
|
|
1045
|
+
ModelClass = Body
|
|
1046
|
+
logging.info(f"Using COCO_17 model (body) for pose estimation.")
|
|
1047
|
+
else:
|
|
1048
|
+
raise ValueError(f"Invalid model_type: {model_name}. Must be 'HALPE_26', 'COCO_133', or 'COCO_17'. Use another network (MMPose, DeepLabCut, OpenPose, AlphaPose, BlazePose...) and convert the output files if you need another model. See documentation.")
|
|
1049
|
+
pose_model_name = pose_model
|
|
1050
|
+
pose_model = eval(model_name)
|
|
1051
|
+
|
|
1052
|
+
# Manually select the models if mode is a dictionary rather than 'lightweight', 'balanced', or 'performance'
|
|
1053
|
+
if not mode in ['lightweight', 'balanced', 'performance']:
|
|
1054
|
+
try:
|
|
1055
|
+
try:
|
|
1056
|
+
mode = ast.literal_eval(mode)
|
|
1057
|
+
except: # if within single quotes instead of double quotes when run with sports2d --mode """{dictionary}"""
|
|
1058
|
+
mode = mode.strip("'").replace('\n', '').replace(" ", "").replace(",", '", "').replace(":", '":"').replace("{", '{"').replace("}", '"}').replace('":"/',':/').replace('":"\\',':\\')
|
|
1059
|
+
mode = re.sub(r'"\[([^"]+)",\s?"([^"]+)\]"', r'[\1,\2]', mode) # changes "[640", "640]" to [640,640]
|
|
1060
|
+
mode = json.loads(mode)
|
|
1061
|
+
det_class = mode.get('det_class')
|
|
1062
|
+
det = mode.get('det_model')
|
|
1063
|
+
det_input_size = mode.get('det_input_size')
|
|
1064
|
+
pose_class = mode.get('pose_class')
|
|
1065
|
+
pose = mode.get('pose_model')
|
|
1066
|
+
pose_input_size = mode.get('pose_input_size')
|
|
1067
|
+
|
|
1068
|
+
ModelClass = partial(Custom,
|
|
1069
|
+
det_class=det_class, det=det, det_input_size=det_input_size,
|
|
1070
|
+
pose_class=pose_class, pose=pose, pose_input_size=pose_input_size,
|
|
1071
|
+
backend=backend, device=device)
|
|
1072
|
+
|
|
1073
|
+
except (json.JSONDecodeError, TypeError):
|
|
1074
|
+
logging.warning("\nInvalid mode. Must be 'lightweight', 'balanced', 'performance', or '''{dictionary}''' of parameters within triple quotes. Make sure input_sizes are within square brackets.")
|
|
1075
|
+
logging.warning('Using the default "balanced" mode.')
|
|
1076
|
+
mode = 'balanced'
|
|
1077
|
+
|
|
1467
1078
|
|
|
1468
1079
|
# Skip pose estimation or set it up:
|
|
1469
|
-
model = eval(pose_model)
|
|
1470
1080
|
if load_trc:
|
|
1471
1081
|
if not '_px' in str(load_trc):
|
|
1472
1082
|
logging.error(f'\n{load_trc} file needs to be in px, not in meters.')
|
|
@@ -1475,26 +1085,48 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1475
1085
|
Q_coords, _, _, keypoints_names, _ = read_trc(load_trc)
|
|
1476
1086
|
keypoints_ids = [i for i in range(len(keypoints_names))]
|
|
1477
1087
|
keypoints_all, scores_all = load_pose_file(Q_coords)
|
|
1478
|
-
for pre, _, node in RenderTree(
|
|
1088
|
+
for pre, _, node in RenderTree(model_name):
|
|
1479
1089
|
if node.name in keypoints_names:
|
|
1480
1090
|
node.id = keypoints_names.index(node.name)
|
|
1481
1091
|
|
|
1482
1092
|
else:
|
|
1483
1093
|
# Retrieve keypoint names from model
|
|
1484
|
-
keypoints_ids = [node.id for _, _, node in RenderTree(
|
|
1485
|
-
keypoints_names = [node.name for _, _, node in RenderTree(
|
|
1486
|
-
|
|
1487
|
-
tracking_rtmlib = True if (tracking_mode == 'rtmlib' and multiperson) else False
|
|
1488
|
-
pose_tracker = setup_pose_tracker(det_frequency, mode, tracking_rtmlib)
|
|
1489
|
-
logging.info(f'\nPose tracking set up for BodyWithFeet model in {mode} mode.')
|
|
1490
|
-
logging.info(f'Persons are detected every {det_frequency} frames and tracked inbetween. Multi-person is {"" if multiperson else "not "}selected.')
|
|
1491
|
-
logging.info(f"Parameters: {keypoint_likelihood_threshold=}, {average_likelihood_threshold=}, {keypoint_number_threshold=}")
|
|
1094
|
+
keypoints_ids = [node.id for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1095
|
+
keypoints_names = [node.name for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1492
1096
|
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1097
|
+
# Set up pose tracker
|
|
1098
|
+
try:
|
|
1099
|
+
pose_tracker = setup_pose_tracker(ModelClass, det_frequency, mode, False, backend, device)
|
|
1100
|
+
except:
|
|
1101
|
+
logging.error('Error: Pose estimation failed. Check in Config.toml that pose_model and mode are valid.')
|
|
1102
|
+
raise ValueError('Error: Pose estimation failed. Check in Config.toml that pose_model and mode are valid.')
|
|
1103
|
+
|
|
1104
|
+
if tracking_mode not in ['deepsort', 'sports2d']:
|
|
1105
|
+
logging.warning(f"Tracking mode {tracking_mode} not recognized. Using sports2d method.")
|
|
1106
|
+
tracking_mode = 'sports2d'
|
|
1107
|
+
logging.info(f'\nPose tracking set up for "{pose_model_name}" model.')
|
|
1108
|
+
logging.info(f'Mode: {mode}.\n')
|
|
1109
|
+
logging.info(f'Persons are detected every {det_frequency} frames and tracked inbetween. Multi-person is {"" if multiperson else "not "}selected. Tracking is done with {tracking_mode}.')
|
|
1110
|
+
if tracking_mode == 'deepsort': logging.info(f'Deepsort parameters: {deepsort_params}.')
|
|
1111
|
+
logging.info(f"{keypoint_likelihood_threshold=}, {average_likelihood_threshold=}, {keypoint_number_threshold=}")
|
|
1112
|
+
|
|
1113
|
+
if flip_left_right:
|
|
1114
|
+
try:
|
|
1115
|
+
Ltoe_idx = keypoints_ids[keypoints_names.index('LBigToe')]
|
|
1116
|
+
LHeel_idx = keypoints_ids[keypoints_names.index('LHeel')]
|
|
1117
|
+
Rtoe_idx = keypoints_ids[keypoints_names.index('RBigToe')]
|
|
1118
|
+
RHeel_idx = keypoints_ids[keypoints_names.index('RHeel')]
|
|
1119
|
+
L_R_direction_idx = [Ltoe_idx, LHeel_idx, Rtoe_idx, RHeel_idx]
|
|
1120
|
+
except ValueError:
|
|
1121
|
+
logging.warning(f"Missing 'LBigToe', 'LHeel', 'RBigToe', 'RHeel' keypoints. flip_left_right will be set to False")
|
|
1122
|
+
flip_left_right = False
|
|
1123
|
+
|
|
1124
|
+
if calculate_angles:
|
|
1125
|
+
for ang_name in angle_names:
|
|
1126
|
+
ang_params = angle_dict.get(ang_name)
|
|
1127
|
+
kpts = ang_params[0]
|
|
1128
|
+
if any(item not in keypoints_names+['Neck', 'Hip'] for item in kpts):
|
|
1129
|
+
logging.warning(f"Skipping {ang_name} angle computation because at least one of the following keypoints is not provided by the model: {ang_params[0]}.")
|
|
1498
1130
|
|
|
1499
1131
|
|
|
1500
1132
|
# Process video or webcam feed
|
|
@@ -1513,22 +1145,22 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1513
1145
|
for frame_nb in frame_iterator:
|
|
1514
1146
|
start_time = datetime.now()
|
|
1515
1147
|
success, frame = cap.read()
|
|
1148
|
+
frame_count += 1
|
|
1516
1149
|
|
|
1517
1150
|
# If frame not grabbed
|
|
1518
1151
|
if not success:
|
|
1519
|
-
logging.warning(f"Failed to grab frame {frame_count}.")
|
|
1152
|
+
logging.warning(f"Failed to grab frame {frame_count-1}.")
|
|
1520
1153
|
if save_pose:
|
|
1521
1154
|
all_frames_X.append([])
|
|
1522
1155
|
all_frames_Y.append([])
|
|
1523
1156
|
all_frames_scores.append([])
|
|
1524
1157
|
if save_angles:
|
|
1525
1158
|
all_frames_angles.append([])
|
|
1526
|
-
frame_count += 1
|
|
1527
1159
|
continue
|
|
1528
1160
|
else:
|
|
1529
1161
|
cv2.putText(frame, f"Press 'q' to quit", (cam_width-int(400*fontSize), cam_height-20), cv2.FONT_HERSHEY_SIMPLEX, fontSize+0.2, (255,255,255), thickness+1, cv2.LINE_AA)
|
|
1530
1162
|
cv2.putText(frame, f"Press 'q' to quit", (cam_width-int(400*fontSize), cam_height-20), cv2.FONT_HERSHEY_SIMPLEX, fontSize+0.2, (0,0,255), thickness, cv2.LINE_AA)
|
|
1531
|
-
|
|
1163
|
+
|
|
1532
1164
|
|
|
1533
1165
|
# Retrieve pose or Estimate pose and track people
|
|
1534
1166
|
if load_trc:
|
|
@@ -1539,13 +1171,14 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1539
1171
|
else:
|
|
1540
1172
|
# Detect poses
|
|
1541
1173
|
keypoints, scores = pose_tracker(frame)
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1174
|
+
|
|
1175
|
+
# Track poses across frames
|
|
1176
|
+
if tracking_mode == 'deepsort':
|
|
1177
|
+
keypoints, scores = sort_people_deepsort(keypoints, scores, deepsort_tracker, frame, frame_count)
|
|
1178
|
+
if tracking_mode == 'sports2d':
|
|
1546
1179
|
if 'prev_keypoints' not in locals(): prev_keypoints = keypoints
|
|
1547
1180
|
prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores=scores)
|
|
1548
|
-
|
|
1181
|
+
|
|
1549
1182
|
|
|
1550
1183
|
# Process coordinates and compute angles
|
|
1551
1184
|
valid_X, valid_Y, valid_scores = [], [], []
|
|
@@ -1568,9 +1201,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1568
1201
|
person_X = np.full_like(person_X, np.nan)
|
|
1569
1202
|
person_Y = np.full_like(person_Y, np.nan)
|
|
1570
1203
|
person_scores = np.full_like(person_scores, np.nan)
|
|
1571
|
-
valid_X.append(person_X)
|
|
1572
|
-
valid_Y.append(person_Y)
|
|
1573
|
-
valid_scores.append(person_scores)
|
|
1574
1204
|
|
|
1575
1205
|
|
|
1576
1206
|
# Compute angles
|
|
@@ -1580,24 +1210,40 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1580
1210
|
person_X_flipped = flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids)
|
|
1581
1211
|
else:
|
|
1582
1212
|
person_X_flipped = person_X.copy()
|
|
1583
|
-
valid_X_flipped.append(person_X_flipped)
|
|
1584
1213
|
|
|
1585
1214
|
# Compute angles
|
|
1586
1215
|
person_angles = []
|
|
1216
|
+
# Add Neck and Hip if not provided
|
|
1217
|
+
new_keypoints_names, new_keypoints_ids = keypoints_names.copy(), keypoints_ids.copy()
|
|
1218
|
+
for kpt in ['Neck', 'Hip']:
|
|
1219
|
+
if kpt not in new_keypoints_names:
|
|
1220
|
+
person_X_flipped, person_Y, person_scores = add_neck_hip_coords(kpt, person_X_flipped, person_Y, person_scores, new_keypoints_ids, new_keypoints_names)
|
|
1221
|
+
person_X, _, _ = add_neck_hip_coords(kpt, person_X, person_Y, person_scores, new_keypoints_ids, new_keypoints_names)
|
|
1222
|
+
new_keypoints_names.append(kpt)
|
|
1223
|
+
new_keypoints_ids.append(len(person_X_flipped)-1)
|
|
1224
|
+
|
|
1587
1225
|
for ang_name in angle_names:
|
|
1588
|
-
|
|
1226
|
+
ang_params = angle_dict.get(ang_name)
|
|
1227
|
+
kpts = ang_params[0]
|
|
1228
|
+
if not any(item not in new_keypoints_names for item in kpts):
|
|
1229
|
+
ang = compute_angle(ang_name, person_X_flipped, person_Y, angle_dict, new_keypoints_ids, new_keypoints_names)
|
|
1230
|
+
else:
|
|
1231
|
+
ang = np.nan
|
|
1589
1232
|
person_angles.append(ang)
|
|
1590
1233
|
valid_angles.append(person_angles)
|
|
1591
|
-
|
|
1234
|
+
valid_X_flipped.append(person_X_flipped)
|
|
1235
|
+
valid_X.append(person_X)
|
|
1236
|
+
valid_Y.append(person_Y)
|
|
1237
|
+
valid_scores.append(person_scores)
|
|
1592
1238
|
|
|
1593
1239
|
# Draw keypoints and skeleton
|
|
1594
1240
|
if show_realtime_results or save_vid or save_img:
|
|
1595
1241
|
img = frame.copy()
|
|
1596
1242
|
img = draw_bounding_box(img, valid_X, valid_Y, colors=colors, fontSize=fontSize, thickness=thickness)
|
|
1597
|
-
img = draw_keypts(img, valid_X, valid_Y,
|
|
1598
|
-
img = draw_skel(img, valid_X, valid_Y,
|
|
1243
|
+
img = draw_keypts(img, valid_X, valid_Y, valid_scores, cmap_str='RdYlGn')
|
|
1244
|
+
img = draw_skel(img, valid_X, valid_Y, pose_model)
|
|
1599
1245
|
if calculate_angles:
|
|
1600
|
-
img = draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped,
|
|
1246
|
+
img = draw_angles(img, valid_X, valid_Y, valid_angles, valid_X_flipped, new_keypoints_ids, new_keypoints_names, angle_names, display_angle_values_on=display_angle_values_on, colors=colors, fontSize=fontSize, thickness=thickness)
|
|
1601
1247
|
|
|
1602
1248
|
if show_realtime_results:
|
|
1603
1249
|
cv2.imshow(f'{video_file} Sports2D', img)
|
|
@@ -1618,6 +1264,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1618
1264
|
elapsed_time = (datetime.now() - start_time).total_seconds()
|
|
1619
1265
|
frame_processing_times.append(elapsed_time)
|
|
1620
1266
|
|
|
1267
|
+
|
|
1268
|
+
# End of the video is reached
|
|
1621
1269
|
cap.release()
|
|
1622
1270
|
logging.info(f"Video processing completed.")
|
|
1623
1271
|
if save_vid:
|
|
@@ -1730,9 +1378,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1730
1378
|
if show_plots and not to_meters:
|
|
1731
1379
|
pose_plots(trc_data_unfiltered_i, trc_data_i, i)
|
|
1732
1380
|
|
|
1381
|
+
|
|
1733
1382
|
# Convert px to meters
|
|
1734
1383
|
if to_meters:
|
|
1735
1384
|
logging.info('\nConverting pose to meters:')
|
|
1385
|
+
if calib_on_person_id>=len(trc_data):
|
|
1386
|
+
logging.warning(f'Person #{calib_on_person_id} not detected in the video. Calibrating on person #0 instead.')
|
|
1387
|
+
calib_on_person_id = 0
|
|
1736
1388
|
if calib_file:
|
|
1737
1389
|
logging.info(f'Using calibration file to convert coordinates in meters: {calib_file}.')
|
|
1738
1390
|
calib_params_dict = retrieve_calib_params(calib_file)
|
|
@@ -1748,10 +1400,20 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1748
1400
|
|
|
1749
1401
|
if floor_angle == 'auto' or xy_origin == 'auto':
|
|
1750
1402
|
# estimated from the line formed by the toes when they are on the ground (where speed = 0)
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1403
|
+
try:
|
|
1404
|
+
toe_speed_below = 1 # m/s (below which the foot is considered to be stationary)
|
|
1405
|
+
px_per_m = height_px/person_height_m
|
|
1406
|
+
toe_speed_below_px_frame = toe_speed_below * px_per_m / fps
|
|
1407
|
+
try:
|
|
1408
|
+
floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LBigToe', 'RBigToe'], toe_speed_below=toe_speed_below_px_frame)
|
|
1409
|
+
except: # no feet points
|
|
1410
|
+
floor_angle_estim, xy_origin_estim = compute_floor_line(trc_data[calib_on_person_id], keypoint_names=['LAnkle', 'RAnkle'], toe_speed_below=toe_speed_below_px_frame)
|
|
1411
|
+
xy_origin_estim[0] = xy_origin_estim[0]-0.13
|
|
1412
|
+
logging.warning(f'The RBigToe and LBigToe are missing from your model. Using ankles - 13 cm to compute the floor line.')
|
|
1413
|
+
except:
|
|
1414
|
+
floor_angle_estim = 0
|
|
1415
|
+
xy_origin_estim = cam_width/2, cam_height/2
|
|
1416
|
+
logging.warning(f'Could not estimate the floor angle and xy_origin. Make sure that the full body is visible. Using floor angle = 0° and xy_origin = [{cam_width/2}, {cam_height/2}].')
|
|
1755
1417
|
if not floor_angle == 'auto':
|
|
1756
1418
|
floor_angle_estim = floor_angle
|
|
1757
1419
|
if xy_origin == 'auto':
|
|
@@ -1776,7 +1438,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1776
1438
|
# Write to trc file
|
|
1777
1439
|
idx_path = selected_person_id if not multiperson and not calib_file else i
|
|
1778
1440
|
pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
|
|
1779
|
-
make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i)
|
|
1441
|
+
make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
|
|
1780
1442
|
logging.info(f'Person {idx_path}: Pose in meters saved to {pose_path_person_m_i.resolve()}.')
|
|
1781
1443
|
|
|
1782
1444
|
|
|
@@ -1887,6 +1549,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1887
1549
|
logging.info(f'Filtering with {args}')
|
|
1888
1550
|
all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter.filter1d, axis=0, args=filter_options)
|
|
1889
1551
|
|
|
1552
|
+
# Remove columns with all nan values
|
|
1553
|
+
all_frames_angles_person_filt.dropna(axis=1, how='all', inplace=True)
|
|
1554
|
+
all_frames_angles_person = all_frames_angles_person[all_frames_angles_person_filt.columns]
|
|
1555
|
+
|
|
1556
|
+
# Add floor_angle_estim to segment angles
|
|
1557
|
+
if correct_segment_angles_with_floor_angle and to_meters:
|
|
1558
|
+
logging.info(f'Correcting segment angles by removing the {round(np.degrees(floor_angle_estim),2)}° floor angle.')
|
|
1559
|
+
for ang_name in all_frames_angles_person_filt.columns:
|
|
1560
|
+
if 'horizontal' in angle_dict[ang_name][1]:
|
|
1561
|
+
all_frames_angles_person_filt[ang_name] -= np.degrees(floor_angle_estim)
|
|
1562
|
+
|
|
1890
1563
|
# Build mot file
|
|
1891
1564
|
angle_data = make_mot_with_angles(all_frames_angles_person_filt, all_frames_time, str(angles_path_person))
|
|
1892
1565
|
logging.info(f'Angles saved to {angles_path_person.resolve()}.')
|