sports2d 0.8.25__py3-none-any.whl → 0.8.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Content/huggingface_demo.png +0 -0
- Sports2D/Demo/Config_demo.toml +9 -6
- Sports2D/Sports2D.py +2 -0
- Sports2D/process.py +192 -303
- {sports2d-0.8.25.dist-info → sports2d-0.8.27.dist-info}/METADATA +34 -21
- {sports2d-0.8.25.dist-info → sports2d-0.8.27.dist-info}/RECORD +10 -10
- {sports2d-0.8.25.dist-info → sports2d-0.8.27.dist-info}/WHEEL +1 -1
- Sports2D/Sports2D.ipynb +0 -3114
- {sports2d-0.8.25.dist-info → sports2d-0.8.27.dist-info}/entry_points.txt +0 -0
- {sports2d-0.8.25.dist-info → sports2d-0.8.27.dist-info}/licenses/LICENSE +0 -0
- {sports2d-0.8.25.dist-info → sports2d-0.8.27.dist-info}/top_level.txt +0 -0
Sports2D/process.py
CHANGED
|
@@ -75,29 +75,32 @@ import matplotlib as mpl
|
|
|
75
75
|
import matplotlib.pyplot as plt
|
|
76
76
|
from matplotlib.widgets import Slider, Button
|
|
77
77
|
from matplotlib import patheffects
|
|
78
|
-
|
|
79
|
-
from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Hand, Custom
|
|
80
78
|
from rtmlib.tools.object_detection.post_processings import nms
|
|
81
79
|
|
|
82
80
|
from Sports2D.Utilities.common import *
|
|
83
81
|
from Pose2Sim.common import *
|
|
84
82
|
from Pose2Sim.skeletons import *
|
|
85
83
|
from Pose2Sim.calibration import toml_write
|
|
84
|
+
from Pose2Sim.poseEstimation import setup_model_class_mode, setup_backend_device, setup_pose_tracker
|
|
86
85
|
from Pose2Sim.triangulation import indices_of_first_last_non_nan_chunks
|
|
87
86
|
from Pose2Sim.personAssociation import *
|
|
88
87
|
from Pose2Sim.filtering import *
|
|
89
88
|
|
|
90
|
-
|
|
91
|
-
|
|
89
|
+
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
|
|
90
|
+
np.set_printoptions(legacy='1.21') # otherwise prints np.float64(3.0) rather than 3.0
|
|
91
|
+
import warnings # Silence numpy and CoreML warnings
|
|
92
92
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="Mean of empty slice")
|
|
93
93
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="All-NaN slice encountered")
|
|
94
94
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
|
95
|
+
warnings.filterwarnings("ignore", message=".*Input.*has a dynamic shape.*but the runtime shape.*has zero elements.*")
|
|
96
|
+
|
|
95
97
|
|
|
96
98
|
# Not safe, but to be used until OpenMMLab/RTMlib's SSL certificates are updated
|
|
97
99
|
import ssl
|
|
98
100
|
ssl._create_default_https_context = ssl._create_unverified_context
|
|
99
101
|
|
|
100
102
|
|
|
103
|
+
CORRECTION_2D_TO_3D = 1.063 # Corrective factor for height calculation: segments do not perfectly lie in the 2D plane and look shorter than in 3D
|
|
101
104
|
DEFAULT_MASS = 70
|
|
102
105
|
DEFAULT_HEIGHT = 1.7
|
|
103
106
|
|
|
@@ -206,165 +209,6 @@ def setup_video(video_file_path, vid_output_path, save_vid):
|
|
|
206
209
|
return cap, out_vid, cam_width, cam_height, fps
|
|
207
210
|
|
|
208
211
|
|
|
209
|
-
def setup_model_class_mode(pose_model, mode, config_dict={}):
|
|
210
|
-
'''
|
|
211
|
-
Set up the pose model class and mode for the pose tracker.
|
|
212
|
-
'''
|
|
213
|
-
|
|
214
|
-
if pose_model.upper() in ('HALPE_26', 'BODY_WITH_FEET'):
|
|
215
|
-
model_name = 'HALPE_26'
|
|
216
|
-
ModelClass = BodyWithFeet # 26 keypoints(halpe26)
|
|
217
|
-
logging.info(f"Using HALPE_26 model (body and feet) for pose estimation in {mode} mode.")
|
|
218
|
-
elif pose_model.upper() in ('COCO_133', 'WHOLE_BODY', 'WHOLE_BODY_WRIST'):
|
|
219
|
-
model_name = 'COCO_133'
|
|
220
|
-
ModelClass = Wholebody
|
|
221
|
-
logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation in {mode} mode.")
|
|
222
|
-
elif pose_model.upper() in ('COCO_17', 'BODY'):
|
|
223
|
-
model_name = 'COCO_17'
|
|
224
|
-
ModelClass = Body
|
|
225
|
-
logging.info(f"Using COCO_17 model (body) for pose estimation in {mode} mode.")
|
|
226
|
-
elif pose_model.upper() =='HAND':
|
|
227
|
-
model_name = 'HAND_21'
|
|
228
|
-
ModelClass = Hand
|
|
229
|
-
logging.info(f"Using HAND_21 model for pose estimation in {mode} mode.")
|
|
230
|
-
elif pose_model.upper() =='FACE':
|
|
231
|
-
model_name = 'FACE_106'
|
|
232
|
-
logging.info(f"Using FACE_106 model for pose estimation in {mode} mode.")
|
|
233
|
-
elif pose_model.upper() =='ANIMAL':
|
|
234
|
-
model_name = 'ANIMAL2D_17'
|
|
235
|
-
logging.info(f"Using ANIMAL2D_17 model for pose estimation in {mode} mode.")
|
|
236
|
-
else:
|
|
237
|
-
model_name = pose_model.upper()
|
|
238
|
-
logging.info(f"Using model {model_name} for pose estimation in {mode} mode.")
|
|
239
|
-
try:
|
|
240
|
-
pose_model = eval(model_name)
|
|
241
|
-
except:
|
|
242
|
-
try: # from Config.toml
|
|
243
|
-
from anytree.importer import DictImporter
|
|
244
|
-
model_name = pose_model.upper()
|
|
245
|
-
pose_model = DictImporter().import_(config_dict.get('pose').get(pose_model))
|
|
246
|
-
if pose_model.id == 'None':
|
|
247
|
-
pose_model.id = None
|
|
248
|
-
logging.info(f"Using model {model_name} for pose estimation.")
|
|
249
|
-
except:
|
|
250
|
-
raise NameError(f'{pose_model} not found in skeletons.py nor in Config.toml')
|
|
251
|
-
|
|
252
|
-
# Manually select the models if mode is a dictionary rather than 'lightweight', 'balanced', or 'performance'
|
|
253
|
-
if not mode in ['lightweight', 'balanced', 'performance'] or 'ModelClass' not in locals():
|
|
254
|
-
try:
|
|
255
|
-
from functools import partial
|
|
256
|
-
try:
|
|
257
|
-
mode = ast.literal_eval(mode)
|
|
258
|
-
except: # if within single quotes instead of double quotes when run with sports2d --mode """{dictionary}"""
|
|
259
|
-
mode = mode.strip("'").replace('\n', '').replace(" ", "").replace(",", '", "').replace(":", '":"').replace("{", '{"').replace("}", '"}').replace('":"/',':/').replace('":"\\',':\\')
|
|
260
|
-
mode = re.sub(r'"\[([^"]+)",\s?"([^"]+)\]"', r'[\1,\2]', mode) # changes "[640", "640]" to [640,640]
|
|
261
|
-
mode = json.loads(mode)
|
|
262
|
-
det_class = mode.get('det_class')
|
|
263
|
-
det = mode.get('det_model')
|
|
264
|
-
det_input_size = mode.get('det_input_size')
|
|
265
|
-
pose_class = mode.get('pose_class')
|
|
266
|
-
pose = mode.get('pose_model')
|
|
267
|
-
pose_input_size = mode.get('pose_input_size')
|
|
268
|
-
|
|
269
|
-
ModelClass = partial(Custom,
|
|
270
|
-
det_class=det_class, det=det, det_input_size=det_input_size,
|
|
271
|
-
pose_class=pose_class, pose=pose, pose_input_size=pose_input_size)
|
|
272
|
-
logging.info(f"Using model {model_name} with the following custom parameters: {mode}.")
|
|
273
|
-
|
|
274
|
-
if pose_class == 'RTMO' and model_name != 'COCO_17':
|
|
275
|
-
logging.warning("RTMO currently only supports 'Body' pose_model. Switching to 'Body'.")
|
|
276
|
-
pose_model = eval('COCO_17')
|
|
277
|
-
|
|
278
|
-
except (json.JSONDecodeError, TypeError):
|
|
279
|
-
logging.warning("Invalid mode. Must be 'lightweight', 'balanced', 'performance', or '''{dictionary}''' of parameters within triple quotes. Make sure input_sizes are within square brackets.")
|
|
280
|
-
logging.warning('Using the default "balanced" mode.')
|
|
281
|
-
mode = 'balanced'
|
|
282
|
-
|
|
283
|
-
return pose_model, ModelClass, mode
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
def setup_backend_device(backend='auto', device='auto'):
|
|
287
|
-
'''
|
|
288
|
-
Set up the backend and device for the pose tracker based on the availability of hardware acceleration.
|
|
289
|
-
TensorRT is not supported by RTMLib yet: https://github.com/Tau-J/rtmlib/issues/12
|
|
290
|
-
|
|
291
|
-
If device and backend are not specified, they are automatically set up in the following order of priority:
|
|
292
|
-
1. GPU with CUDA and ONNXRuntime backend (if CUDAExecutionProvider is available)
|
|
293
|
-
2. GPU with ROCm and ONNXRuntime backend (if ROCMExecutionProvider is available, for AMD GPUs)
|
|
294
|
-
3. GPU with MPS or CoreML and ONNXRuntime backend (for macOS systems)
|
|
295
|
-
4. CPU with OpenVINO backend (default fallback)
|
|
296
|
-
'''
|
|
297
|
-
|
|
298
|
-
if device!='auto' and backend!='auto':
|
|
299
|
-
device = device.lower()
|
|
300
|
-
backend = backend.lower()
|
|
301
|
-
|
|
302
|
-
if device=='auto' or backend=='auto':
|
|
303
|
-
if device=='auto' and backend!='auto' or device!='auto' and backend=='auto':
|
|
304
|
-
logging.warning(f"If you set device or backend to 'auto', you must set the other to 'auto' as well. Both device and backend will be determined automatically.")
|
|
305
|
-
|
|
306
|
-
try:
|
|
307
|
-
import torch
|
|
308
|
-
import onnxruntime as ort
|
|
309
|
-
if torch.cuda.is_available() == True and 'CUDAExecutionProvider' in ort.get_available_providers():
|
|
310
|
-
device = 'cuda'
|
|
311
|
-
backend = 'onnxruntime'
|
|
312
|
-
logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
|
|
313
|
-
elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers():
|
|
314
|
-
device = 'rocm'
|
|
315
|
-
backend = 'onnxruntime'
|
|
316
|
-
logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
|
|
317
|
-
else:
|
|
318
|
-
raise
|
|
319
|
-
except:
|
|
320
|
-
try:
|
|
321
|
-
import onnxruntime as ort
|
|
322
|
-
if 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
|
|
323
|
-
device = 'mps'
|
|
324
|
-
backend = 'onnxruntime'
|
|
325
|
-
logging.info(f"\nValid MPS installation found: using ONNXRuntime backend with GPU.")
|
|
326
|
-
else:
|
|
327
|
-
raise
|
|
328
|
-
except:
|
|
329
|
-
device = 'cpu'
|
|
330
|
-
backend = 'openvino'
|
|
331
|
-
logging.info(f"\nNo valid CUDA installation found: using OpenVINO backend with CPU.")
|
|
332
|
-
|
|
333
|
-
return backend, device
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
def setup_pose_tracker(ModelClass, det_frequency, mode, tracking, backend, device):
|
|
337
|
-
'''
|
|
338
|
-
Set up the RTMLib pose tracker with the appropriate model and backend.
|
|
339
|
-
If CUDA is available, use it with ONNXRuntime backend; else use CPU with openvino
|
|
340
|
-
|
|
341
|
-
INPUTS:
|
|
342
|
-
- ModelClass: class. The RTMlib model class to use for pose detection (Body, BodyWithFeet, Wholebody)
|
|
343
|
-
- det_frequency: int. The frequency of pose detection (every N frames)
|
|
344
|
-
- mode: str. The mode of the pose tracker ('lightweight', 'balanced', 'performance')
|
|
345
|
-
- tracking: bool. Whether to track persons across frames with RTMlib tracker
|
|
346
|
-
- backend: str. The backend to use for pose detection (onnxruntime, openvino, opencv)
|
|
347
|
-
- device: str. The device to use for pose detection (cpu, cuda, rocm, mps)
|
|
348
|
-
|
|
349
|
-
OUTPUTS:
|
|
350
|
-
- pose_tracker: PoseTracker. The initialized pose tracker object
|
|
351
|
-
'''
|
|
352
|
-
|
|
353
|
-
backend, device = setup_backend_device(backend=backend, device=device)
|
|
354
|
-
|
|
355
|
-
# Initialize the pose tracker with Halpe26 model
|
|
356
|
-
pose_tracker = PoseTracker(
|
|
357
|
-
ModelClass,
|
|
358
|
-
det_frequency=det_frequency,
|
|
359
|
-
mode=mode,
|
|
360
|
-
backend=backend,
|
|
361
|
-
device=device,
|
|
362
|
-
tracking=tracking,
|
|
363
|
-
to_openpose=False)
|
|
364
|
-
|
|
365
|
-
return pose_tracker
|
|
366
|
-
|
|
367
|
-
|
|
368
212
|
def flip_left_right_direction(person_X, L_R_direction_idx, keypoints_names, keypoints_ids):
|
|
369
213
|
'''
|
|
370
214
|
Flip the points to the right or left for more consistent angle calculation
|
|
@@ -798,7 +642,6 @@ def make_mot_with_angles(angles, time, mot_path):
|
|
|
798
642
|
def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
|
|
799
643
|
'''
|
|
800
644
|
Displays trc filtered and unfiltered data for comparison
|
|
801
|
-
⚠ Often crashes on the third window...
|
|
802
645
|
|
|
803
646
|
INPUTS:
|
|
804
647
|
- trc_data_unfiltered: pd.DataFrame. The unfiltered trc data
|
|
@@ -809,23 +652,26 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
|
|
|
809
652
|
OUTPUT:
|
|
810
653
|
- matplotlib window with tabbed figures for each keypoint
|
|
811
654
|
'''
|
|
812
|
-
|
|
655
|
+
|
|
813
656
|
os_name = platform.system()
|
|
814
|
-
if os_name == 'Windows':
|
|
815
|
-
mpl.use('qt5agg') # windows
|
|
816
657
|
mpl.rc('figure', max_open_warning=0)
|
|
658
|
+
if show:
|
|
659
|
+
if os_name == 'Windows':
|
|
660
|
+
mpl.use('qt5agg') # windows
|
|
661
|
+
pw = plotWindow()
|
|
662
|
+
pw.MainWindow.setWindowTitle('Person'+ str(person_id) + ' coordinates')
|
|
663
|
+
else:
|
|
664
|
+
mpl.use('Agg') # Otherwise fails on Hugging-face
|
|
665
|
+
figures_list = []
|
|
817
666
|
|
|
818
667
|
keypoints_names = trc_data.columns[1::3]
|
|
819
|
-
|
|
820
|
-
pw = plotWindow()
|
|
821
|
-
pw.MainWindow.setWindowTitle('Person'+ str(person_id) + ' coordinates') # Main title
|
|
822
|
-
|
|
823
668
|
for id, keypoint in enumerate(keypoints_names):
|
|
824
669
|
f = plt.figure()
|
|
825
|
-
if
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
670
|
+
if show:
|
|
671
|
+
if os_name == 'Windows':
|
|
672
|
+
f.canvas.manager.window.setWindowTitle(keypoint + ' Plot')
|
|
673
|
+
elif os_name == 'Darwin':
|
|
674
|
+
f.canvas.manager.set_window_title(keypoint + ' Plot')
|
|
829
675
|
|
|
830
676
|
axX = plt.subplot(211)
|
|
831
677
|
plt.plot(trc_data_unfiltered.iloc[:,0], trc_data_unfiltered.iloc[:,id*3+1], label='unfiltered')
|
|
@@ -840,18 +686,21 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
|
|
|
840
686
|
axY.set_xlabel('Time (seconds)')
|
|
841
687
|
axY.set_ylabel(keypoint+' Y')
|
|
842
688
|
|
|
843
|
-
|
|
689
|
+
if show:
|
|
690
|
+
pw.addPlot(keypoint, f)
|
|
691
|
+
else:
|
|
692
|
+
figures_list.append((keypoint, f))
|
|
844
693
|
|
|
845
694
|
if show:
|
|
846
695
|
pw.show()
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
696
|
+
return pw
|
|
697
|
+
else:
|
|
698
|
+
return figures_list
|
|
699
|
+
|
|
850
700
|
|
|
851
701
|
def angle_plots(angle_data_unfiltered, angle_data, person_id, show=True):
|
|
852
702
|
'''
|
|
853
703
|
Displays angle filtered and unfiltered data for comparison
|
|
854
|
-
⚠ Often crashes on the third window...
|
|
855
704
|
|
|
856
705
|
INPUTS:
|
|
857
706
|
- angle_data_unfiltered: pd.DataFrame. The unfiltered angle data
|
|
@@ -862,21 +711,24 @@ def angle_plots(angle_data_unfiltered, angle_data, person_id, show=True):
|
|
|
862
711
|
'''
|
|
863
712
|
|
|
864
713
|
os_name = platform.system()
|
|
865
|
-
if os_name == 'Windows':
|
|
866
|
-
mpl.use('qt5agg') # windows
|
|
867
714
|
mpl.rc('figure', max_open_warning=0)
|
|
715
|
+
if show:
|
|
716
|
+
if os_name == 'Windows':
|
|
717
|
+
mpl.use('qt5agg') # windows
|
|
718
|
+
pw = plotWindow()
|
|
719
|
+
pw.MainWindow.setWindowTitle('Person'+ str(person_id) + ' angles')
|
|
720
|
+
else:
|
|
721
|
+
mpl.use('Agg') # Otherwise fails on Hugging-face
|
|
722
|
+
figures_list = []
|
|
868
723
|
|
|
869
724
|
angles_names = angle_data.columns[1:]
|
|
870
|
-
|
|
871
|
-
pw = plotWindow()
|
|
872
|
-
pw.MainWindow.setWindowTitle('Person'+ str(person_id) + ' angles') # Main title
|
|
873
|
-
|
|
874
725
|
for id, angle in enumerate(angles_names):
|
|
875
726
|
f = plt.figure()
|
|
876
|
-
if
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
727
|
+
if show:
|
|
728
|
+
if os_name == 'Windows':
|
|
729
|
+
f.canvas.manager.window.setWindowTitle(angle + ' Plot') # windows
|
|
730
|
+
elif os_name == 'Darwin': # macOS
|
|
731
|
+
f.canvas.manager.set_window_title(angle + ' Plot') # mac
|
|
880
732
|
|
|
881
733
|
ax = plt.subplot(111)
|
|
882
734
|
plt.plot(angle_data_unfiltered.iloc[:,0], angle_data_unfiltered.iloc[:,id+1], label='unfiltered')
|
|
@@ -886,12 +738,16 @@ def angle_plots(angle_data_unfiltered, angle_data, person_id, show=True):
|
|
|
886
738
|
ax.set_ylabel(angle+' (°)')
|
|
887
739
|
plt.legend()
|
|
888
740
|
|
|
889
|
-
|
|
890
|
-
|
|
741
|
+
if show:
|
|
742
|
+
pw.addPlot(angle, f)
|
|
743
|
+
else:
|
|
744
|
+
figures_list.append((angle, f))
|
|
745
|
+
|
|
891
746
|
if show:
|
|
892
747
|
pw.show()
|
|
893
|
-
|
|
894
|
-
|
|
748
|
+
return pw
|
|
749
|
+
else:
|
|
750
|
+
return figures_list
|
|
895
751
|
|
|
896
752
|
|
|
897
753
|
def get_personIDs_with_highest_scores(all_frames_scores, nb_persons_to_detect):
|
|
@@ -1306,7 +1162,7 @@ def compute_floor_line(trc_data, score_data, keypoint_names = ['LBigToe', 'RBigT
|
|
|
1306
1162
|
trc_data_kpt_trim = trc_data_kpt.iloc[start:end].reset_index(drop=True)
|
|
1307
1163
|
score_data_kpt_trim = score_data_kpt.iloc[start:end].reset_index(drop=True)
|
|
1308
1164
|
|
|
1309
|
-
# Compute
|
|
1165
|
+
# Compute euclidean speed
|
|
1310
1166
|
speeds = np.linalg.norm(trc_data_kpt_trim.diff(), axis=1)
|
|
1311
1167
|
|
|
1312
1168
|
# Remove speeds with low confidence
|
|
@@ -1450,7 +1306,8 @@ def get_floor_params(floor_angle='auto', xy_origin=['auto'],
|
|
|
1450
1306
|
except:
|
|
1451
1307
|
floor_angle_kin = 0
|
|
1452
1308
|
xy_origin_kin = cam_width/2, cam_height/2
|
|
1453
|
-
|
|
1309
|
+
gait_direction = 1
|
|
1310
|
+
logging.warning(f'Could not estimate the floor angle, xy_origin, and visible from person {0}. Make sure that the full body is visible. Using floor angle = 0°, xy_origin = [{cam_width/2}, {cam_height/2}] px, and visible_side = right.')
|
|
1454
1311
|
|
|
1455
1312
|
# Determine final floor angle estimation
|
|
1456
1313
|
if floor_angle == 'from_calib':
|
|
@@ -1578,7 +1435,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1578
1435
|
|
|
1579
1436
|
# Base parameters
|
|
1580
1437
|
video_dir = Path(config_dict.get('base').get('video_dir'))
|
|
1581
|
-
|
|
1438
|
+
|
|
1582
1439
|
nb_persons_to_detect = config_dict.get('base').get('nb_persons_to_detect')
|
|
1583
1440
|
if nb_persons_to_detect != 'all':
|
|
1584
1441
|
try:
|
|
@@ -1825,6 +1682,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1825
1682
|
keypoints_names = [node.name for _, _, node in RenderTree(pose_model) if node.id!=None]
|
|
1826
1683
|
t0 = 0
|
|
1827
1684
|
tf = (cap.get(cv2.CAP_PROP_FRAME_COUNT)-1) / fps if cap.get(cv2.CAP_PROP_FRAME_COUNT)>0 else float('inf')
|
|
1685
|
+
kpt_id_max = max(keypoints_ids)+1
|
|
1828
1686
|
|
|
1829
1687
|
# Set up pose tracker
|
|
1830
1688
|
try:
|
|
@@ -1913,60 +1771,64 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1913
1771
|
if video_file == "webcam":
|
|
1914
1772
|
out_vid.write(frame)
|
|
1915
1773
|
|
|
1916
|
-
#
|
|
1917
|
-
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
if '
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1774
|
+
try: # Frames with no detection cause errors on MacOS CoreMLExecutionProvider
|
|
1775
|
+
# Detect poses
|
|
1776
|
+
keypoints, scores = pose_tracker(frame)
|
|
1777
|
+
|
|
1778
|
+
# Non maximum suppression (at pose level, not detection, and only using likely keypoints)
|
|
1779
|
+
frame_shape = frame.shape
|
|
1780
|
+
mask_scores = np.mean(scores, axis=1) > 0.2
|
|
1781
|
+
|
|
1782
|
+
likely_keypoints = np.where(mask_scores[:, np.newaxis, np.newaxis], keypoints, np.nan)
|
|
1783
|
+
likely_scores = np.where(mask_scores[:, np.newaxis], scores, np.nan)
|
|
1784
|
+
likely_bboxes = bbox_xyxy_compute(frame_shape, likely_keypoints, padding=0)
|
|
1785
|
+
score_likely_bboxes = np.nanmean(likely_scores, axis=1)
|
|
1786
|
+
|
|
1787
|
+
valid_indices = np.where(~np.isnan(score_likely_bboxes))[0]
|
|
1788
|
+
if len(valid_indices) > 0:
|
|
1789
|
+
valid_bboxes = likely_bboxes[valid_indices]
|
|
1790
|
+
valid_scores = score_likely_bboxes[valid_indices]
|
|
1791
|
+
keep_valid = nms(valid_bboxes, valid_scores, nms_thr=0.45)
|
|
1792
|
+
keep = valid_indices[keep_valid]
|
|
1793
|
+
else:
|
|
1794
|
+
keep = []
|
|
1795
|
+
keypoints, scores = likely_keypoints[keep], likely_scores[keep]
|
|
1796
|
+
|
|
1797
|
+
# # Debugging: display detected keypoints on the frame
|
|
1798
|
+
# colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0), (0,0,128), (128,128,0), (128,0,128), (0,128,128)]
|
|
1799
|
+
# bboxes = likely_bboxes[keep]
|
|
1800
|
+
# for person_idx in range(len(keypoints)):
|
|
1801
|
+
# for kpt_idx, kpt in enumerate(keypoints[person_idx]):
|
|
1802
|
+
# if not np.isnan(kpt).any():
|
|
1803
|
+
# cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[person_idx%len(colors)], -1)
|
|
1804
|
+
# if not np.isnan(bboxes[person_idx]).any():
|
|
1805
|
+
# cv2.rectangle(frame, (int(bboxes[person_idx][0]), int(bboxes[person_idx][1])), (int(bboxes[person_idx][2]), int(bboxes[person_idx][3])), colors[person_idx%len(colors)], 1)
|
|
1806
|
+
# cv2.imshow(f'{video_file} Sports2D', frame)
|
|
1807
|
+
|
|
1808
|
+
# Track poses across frames
|
|
1809
|
+
if tracking_mode == 'deepsort':
|
|
1810
|
+
keypoints, scores = sort_people_deepsort(keypoints, scores, deepsort_tracker, frame, frame_count)
|
|
1811
|
+
if tracking_mode == 'sports2d':
|
|
1812
|
+
if 'prev_keypoints' not in locals(): prev_keypoints = keypoints
|
|
1813
|
+
prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores=scores, max_dist=max_distance)
|
|
1814
|
+
else:
|
|
1815
|
+
pass
|
|
1816
|
+
|
|
1817
|
+
# # Debugging: display detected keypoints on the frame
|
|
1818
|
+
# colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (128,0,0), (0,128,0), (0,0,128), (128,128,0), (128,0,128), (0,128,128)]
|
|
1819
|
+
# for person_idx in range(len(keypoints)):
|
|
1820
|
+
# for kpt_idx, kpt in enumerate(keypoints[person_idx]):
|
|
1821
|
+
# if not np.isnan(kpt).any():
|
|
1822
|
+
# cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[person_idx%len(colors)], -1)
|
|
1823
|
+
# # if not np.isnan(bboxes[person_idx]).any():
|
|
1824
|
+
# # cv2.rectangle(frame, (int(bboxes[person_idx][0]), int(bboxes[person_idx][1])), (int(bboxes[person_idx][2]), int(bboxes[person_idx][3])), colors[person_idx%len(colors)], 1)
|
|
1825
|
+
# cv2.imshow(f'{video_file} Sports2D', frame)
|
|
1826
|
+
# # if (cv2.waitKey(1) & 0xFF) == ord('q') or (cv2.waitKey(1) & 0xFF) == 27:
|
|
1827
|
+
# # break
|
|
1828
|
+
# # input()
|
|
1829
|
+
except:
|
|
1830
|
+
keypoints = np.full((1,kpt_id_max,2), fill_value=np.nan)
|
|
1831
|
+
scores = np.full((1,kpt_id_max), fill_value=np.nan)
|
|
1970
1832
|
|
|
1971
1833
|
# Process coordinates and compute angles
|
|
1972
1834
|
valid_X, valid_Y, valid_scores = [], [], []
|
|
@@ -2058,6 +1920,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2058
1920
|
if (cv2.waitKey(1) & 0xFF) == ord('q') or (cv2.waitKey(1) & 0xFF) == 27:
|
|
2059
1921
|
break
|
|
2060
1922
|
|
|
1923
|
+
# # Debugging
|
|
1924
|
+
# img_output_path = img_output_dir / f'{video_file_stem}_frame{frame_nb:06d}.png'
|
|
1925
|
+
# cv2.imwrite(str(img_output_path), img)
|
|
1926
|
+
|
|
2061
1927
|
all_frames_X.append(np.array(valid_X))
|
|
2062
1928
|
all_frames_X_flipped.append(np.array(valid_X_flipped))
|
|
2063
1929
|
all_frames_Y.append(np.array(valid_Y))
|
|
@@ -2260,12 +2126,20 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2260
2126
|
if not to_meters and (show_plots or save_plots):
|
|
2261
2127
|
pw = pose_plots(trc_data_unfiltered_i, trc_data_i, i, show=show_plots)
|
|
2262
2128
|
if save_plots:
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2129
|
+
if show_plots:
|
|
2130
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2131
|
+
dpi = pw.canvases[n].figure.dpi
|
|
2132
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2133
|
+
title = pw.tabs.tabText(n)
|
|
2134
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
|
|
2135
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2136
|
+
else: # Tabbed plots not used
|
|
2137
|
+
for title, f in pw:
|
|
2138
|
+
dpi = f.dpi
|
|
2139
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2140
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
|
|
2141
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2142
|
+
plt.close(f)
|
|
2269
2143
|
logging.info(f'Pose plots (px) saved in {plots_output_dir}.')
|
|
2270
2144
|
|
|
2271
2145
|
all_frames_X_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_Y_person_filt
|
|
@@ -2278,8 +2152,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2278
2152
|
if to_meters and save_pose:
|
|
2279
2153
|
logging.info('\nConverting pose to meters:')
|
|
2280
2154
|
|
|
2281
|
-
# Compute height
|
|
2282
|
-
height_px = compute_height(trc_data[0].iloc[:,1:], new_keypoints_names,
|
|
2155
|
+
# Compute height of the first person in pixels
|
|
2156
|
+
height_px = CORRECTION_2D_TO_3D * compute_height(trc_data[0].iloc[:,1:], new_keypoints_names,
|
|
2283
2157
|
fastest_frames_to_remove_percent=fastest_frames_to_remove_percent, close_to_zero_speed=close_to_zero_speed_px, large_hip_knee_angles=large_hip_knee_angles, trimmed_extrema_percent=trimmed_extrema_percent)
|
|
2284
2158
|
|
|
2285
2159
|
# Compute distance from camera to compensate for perspective effects
|
|
@@ -2334,41 +2208,40 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2334
2208
|
message = get_correction_message(xy_origin)
|
|
2335
2209
|
logging.info(f'Floor level: {cy:.2f} px (from the top of the image), gait starting at {cx:.2f} px in the {direction_person0} direction for the first person. Corrected using {message}\n')
|
|
2336
2210
|
|
|
2211
|
+
# Prepare calibration data
|
|
2212
|
+
R90z = np.array([[0.0, -1.0, 0.0],
|
|
2213
|
+
[1.0, 0.0, 0.0],
|
|
2214
|
+
[0.0, 0.0, 1.0]])
|
|
2215
|
+
R270x = np.array([[1.0, 0.0, 0.0],
|
|
2216
|
+
[0.0, 0.0, 1.0],
|
|
2217
|
+
[0.0, -1.0, 0.0]])
|
|
2218
|
+
|
|
2219
|
+
calib_file_path = output_dir / f'{video_file_stem}_Sports2D_calib.toml'
|
|
2220
|
+
|
|
2221
|
+
# name, size, distortions
|
|
2222
|
+
N = [video_file_stem]
|
|
2223
|
+
S = [[cam_width, cam_height]]
|
|
2224
|
+
D = [[0.0, 0.0, 0.0, 0.0]]
|
|
2225
|
+
|
|
2226
|
+
# Intrinsics
|
|
2227
|
+
f = height_px / first_person_height * distance_m
|
|
2228
|
+
cu = cam_width/2
|
|
2229
|
+
cv = cam_height/2
|
|
2230
|
+
K = np.array([[[f, 0.0, cu], [0.0, f, cv], [0.0, 0.0, 1.0]]])
|
|
2231
|
+
|
|
2232
|
+
# Extrinsics
|
|
2233
|
+
Rfloory = np.array([[np.cos(floor_angle_estim), 0.0, np.sin(floor_angle_estim)],
|
|
2234
|
+
[0.0, 1.0, 0.0],
|
|
2235
|
+
[-np.sin(floor_angle_estim), 0.0, np.cos(floor_angle_estim)]])
|
|
2236
|
+
R_world = R90z @ Rfloory @ R270x
|
|
2237
|
+
T_world = R90z @ np.array([-(cx-cu)/f*distance_m, -distance_m, (cy-cv)/f*distance_m])
|
|
2238
|
+
|
|
2239
|
+
R_cam, T_cam = world_to_camera_persp(R_world, T_world)
|
|
2240
|
+
Tvec_cam = T_cam.reshape(1,3).tolist()
|
|
2241
|
+
Rvec_cam = cv2.Rodrigues(R_cam)[0].reshape(1,3).tolist()
|
|
2337
2242
|
|
|
2338
2243
|
# Save calibration file
|
|
2339
2244
|
if save_calib and not calib_file:
|
|
2340
|
-
R90z = np.array([[0.0, -1.0, 0.0],
|
|
2341
|
-
[1.0, 0.0, 0.0],
|
|
2342
|
-
[0.0, 0.0, 1.0]])
|
|
2343
|
-
R270x = np.array([[1.0, 0.0, 0.0],
|
|
2344
|
-
[0.0, 0.0, 1.0],
|
|
2345
|
-
[0.0, -1.0, 0.0]])
|
|
2346
|
-
|
|
2347
|
-
calib_file_path = output_dir / f'{video_file_stem}_Sports2D_calib.toml'
|
|
2348
|
-
|
|
2349
|
-
# name, size, distortions
|
|
2350
|
-
N = [video_file_stem]
|
|
2351
|
-
S = [[cam_width, cam_height]]
|
|
2352
|
-
D = [[0.0, 0.0, 0.0, 0.0]]
|
|
2353
|
-
|
|
2354
|
-
# Intrinsics
|
|
2355
|
-
f = height_px / first_person_height * distance_m
|
|
2356
|
-
cu = cam_width/2
|
|
2357
|
-
cv = cam_height/2
|
|
2358
|
-
K = np.array([[[f, 0.0, cu], [0.0, f, cv], [0.0, 0.0, 1.0]]])
|
|
2359
|
-
|
|
2360
|
-
# Extrinsics
|
|
2361
|
-
Rfloory = np.array([[np.cos(floor_angle_estim), 0.0, np.sin(floor_angle_estim)],
|
|
2362
|
-
[0.0, 1.0, 0.0],
|
|
2363
|
-
[-np.sin(floor_angle_estim), 0.0, np.cos(floor_angle_estim)]])
|
|
2364
|
-
R_world = R90z @ Rfloory @ R270x
|
|
2365
|
-
T_world = R90z @ np.array([-(cx-cu)/f*distance_m, -distance_m, (cy-cv)/f*distance_m])
|
|
2366
|
-
|
|
2367
|
-
R_cam, T_cam = world_to_camera_persp(R_world, T_world)
|
|
2368
|
-
Tvec_cam = T_cam.reshape(1,3).tolist()
|
|
2369
|
-
Rvec_cam = cv2.Rodrigues(R_cam)[0].reshape(1,3).tolist()
|
|
2370
|
-
|
|
2371
|
-
# Write calibration file
|
|
2372
2245
|
toml_write(calib_file_path, N, S, D, K, Rvec_cam, Tvec_cam)
|
|
2373
2246
|
logging.info(f'Calibration saved to {calib_file_path}.')
|
|
2374
2247
|
|
|
@@ -2417,12 +2290,20 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2417
2290
|
if to_meters and (show_plots or save_plots):
|
|
2418
2291
|
pw = pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i, show=show_plots)
|
|
2419
2292
|
if save_plots:
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2424
|
-
|
|
2425
|
-
|
|
2293
|
+
if show_plots:
|
|
2294
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2295
|
+
dpi = pw.canvases[n].figure.dpi
|
|
2296
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2297
|
+
title = pw.tabs.tabText(n)
|
|
2298
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_m_{title.replace(" ","_").replace("/","_")}.png')
|
|
2299
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2300
|
+
else: # Tabbed plots not used
|
|
2301
|
+
for title, f in pw:
|
|
2302
|
+
dpi = f.dpi
|
|
2303
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2304
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_m_{title.replace(" ","_").replace("/","_")}.png')
|
|
2305
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2306
|
+
plt.close(f)
|
|
2426
2307
|
logging.info(f'Pose plots (m) saved in {plots_output_dir}.')
|
|
2427
2308
|
|
|
2428
2309
|
# Write to trc file
|
|
@@ -2553,12 +2434,20 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2553
2434
|
if show_plots or save_plots:
|
|
2554
2435
|
pw = angle_plots(all_frames_angles_person, angle_data, i, show=show_plots) # i = current person
|
|
2555
2436
|
if save_plots:
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2559
|
-
|
|
2560
|
-
|
|
2561
|
-
|
|
2437
|
+
if show_plots:
|
|
2438
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2439
|
+
dpi = pw.canvases[n].figure.dpi
|
|
2440
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2441
|
+
title = pw.tabs.tabText(n)
|
|
2442
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_ang_{title.replace(" ","_").replace("/","_")}.png')
|
|
2443
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2444
|
+
else: # Tabbed plots not used
|
|
2445
|
+
for title, f in pw:
|
|
2446
|
+
dpi = f.dpi
|
|
2447
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2448
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_ang_{title.replace(" ","_").replace("/","_")}.png')
|
|
2449
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2450
|
+
plt.close(f)
|
|
2562
2451
|
logging.info(f'Pose plots (m) saved in {plots_output_dir}.')
|
|
2563
2452
|
|
|
2564
2453
|
|