sports2d 0.7.0__tar.gz → 0.7.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {sports2d-0.7.0 → sports2d-0.7.3}/PKG-INFO +8 -4
  2. {sports2d-0.7.0 → sports2d-0.7.3}/README.md +5 -2
  3. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/Demo/Config_demo.toml +3 -3
  4. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/Sports2D.py +3 -2
  5. sports2d-0.7.3/Sports2D/Utilities/__init__.py +8 -0
  6. sports2d-0.7.3/Sports2D/Utilities/common.py +226 -0
  7. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/Utilities/filter.py +2 -1
  8. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/Utilities/tests.py +8 -6
  9. sports2d-0.7.3/Sports2D/__init__.py +8 -0
  10. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/process.py +25 -9
  11. {sports2d-0.7.0 → sports2d-0.7.3}/setup.cfg +1 -1
  12. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/PKG-INFO +8 -4
  13. sports2d-0.7.0/Sports2D/Utilities/__init__.py +0 -7
  14. sports2d-0.7.0/Sports2D/Utilities/common.py +0 -1163
  15. sports2d-0.7.0/Sports2D/__init__.py +0 -7
  16. {sports2d-0.7.0 → sports2d-0.7.3}/LICENSE +0 -0
  17. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/Demo/demo.mp4 +0 -0
  18. {sports2d-0.7.0 → sports2d-0.7.3}/Sports2D/Utilities/skeletons.py +0 -0
  19. {sports2d-0.7.0 → sports2d-0.7.3}/pyproject.toml +0 -0
  20. {sports2d-0.7.0 → sports2d-0.7.3}/setup.py +0 -0
  21. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/SOURCES.txt +0 -0
  22. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/dependency_links.txt +0 -0
  23. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/entry_points.txt +0 -0
  24. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/not-zip-safe +0 -0
  25. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/requires.txt +0 -0
  26. {sports2d-0.7.0 → sports2d-0.7.3}/sports2d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.7.0
3
+ Version: 0.7.3
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -40,6 +40,7 @@ Requires-Dist: tqdm
40
40
  Requires-Dist: imageio_ffmpeg
41
41
  Requires-Dist: deep-sort-realtime
42
42
  Requires-Dist: Pose2Sim
43
+ Dynamic: license-file
43
44
 
44
45
 
45
46
  [![Continuous integration](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml/badge.svg?branch=main)](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml)
@@ -81,7 +82,10 @@ Works on any smartphone!**\
81
82
 
82
83
  </br>
83
84
 
84
- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e
85
+
86
+ https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
87
+
88
+ <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
85
89
 
86
90
  `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
87
91
  `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
@@ -465,7 +469,7 @@ sports2d --help
465
469
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
466
470
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
467
471
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
468
- 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
472
+ 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
469
473
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
470
474
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
471
475
  More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
@@ -38,7 +38,10 @@ Works on any smartphone!**\
38
38
 
39
39
  </br>
40
40
 
41
- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e
41
+
42
+ https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
43
+
44
+ <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
42
45
 
43
46
  `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
44
47
  `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
@@ -422,7 +425,7 @@ sports2d --help
422
425
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
423
426
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
424
427
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
425
- 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
428
+ 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
426
429
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
427
430
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
428
431
  More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
@@ -89,7 +89,7 @@ det_frequency = 4 # Run person detection only every N frames, and inbetwee
89
89
  # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
90
90
  device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
91
91
  backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
92
- tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
92
+ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned
93
93
  # deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
94
94
  # More robust in crowded scenes but tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
95
95
  # Requires `pip install torch torchvision torchreid gdown tensorboard`
@@ -151,8 +151,8 @@ filter_type = 'butterworth' # butterworth, gaussian, LOESS, median
151
151
 
152
152
 
153
153
  [kinematics]
154
- do_ik = true # Do scaling and inverse kinematics?
155
- use_augmentation = true # true or false (lowercase) # Set to true if you want to use the model with augmented markers
154
+ do_ik = false # Do scaling and inverse kinematics?
155
+ use_augmentation = false # true or false (lowercase) # Set to true if you want to use the model with augmented markers
156
156
  use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
157
157
  participant_mass = [67.0, 55.0] # kg # defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)
158
158
  right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
@@ -109,6 +109,7 @@
109
109
 
110
110
 
111
111
  ## INIT
112
+ from importlib.metadata import version
112
113
  import argparse
113
114
  import toml
114
115
  from datetime import datetime
@@ -261,7 +262,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
261
262
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
262
263
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
263
264
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
264
- 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
265
+ 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
265
266
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
266
267
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
267
268
  More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
@@ -301,7 +302,7 @@ __author__ = "David Pagnon"
301
302
  __copyright__ = "Copyright 2023, Sports2D"
302
303
  __credits__ = ["David Pagnon"]
303
304
  __license__ = "BSD 3-Clause License"
304
- __version__ = "0.4.0"
305
+ __version__ = version("sports2d")
305
306
  __maintainer__ = "David Pagnon"
306
307
  __email__ = "contact@david-pagnon.com"
307
308
  __status__ = "Development"
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import sys
5
+ from importlib.metadata import version
6
+
7
+ __version__ = version("sports2d")
8
+ VERSION = __version__
@@ -0,0 +1,226 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ '''
6
+ ##################################################
7
+ ## Common classes and functions ##
8
+ ##################################################
9
+
10
+ - A class for displaying several matplotlib figures in tabs.
11
+ - A function for interpolating sequences with missing data.
12
+ It does not interpolate sequences of more than N contiguous missing data.
13
+
14
+ '''
15
+
16
+
17
+ ## INIT
18
+ import sys
19
+ from importlib.metadata import version
20
+ import subprocess
21
+ from pathlib import Path
22
+ import logging
23
+ from collections import defaultdict
24
+ import numpy as np
25
+ import imageio_ffmpeg as ffmpeg
26
+
27
+
28
+ ## AUTHORSHIP INFORMATION
29
+ __author__ = "David Pagnon"
30
+ __copyright__ = "Copyright 2023, Sports2D"
31
+ __credits__ = ["David Pagnon"]
32
+ __license__ = "BSD 3-Clause License"
33
+ __version__ = version("sports2d")
34
+ __maintainer__ = "David Pagnon"
35
+ __email__ = "contact@david-pagnon.com"
36
+ __status__ = "Development"
37
+
38
+
39
+ ## CONSTANTS
40
+ angle_dict = { # lowercase!
41
+ # joint angles
42
+ 'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
43
+ 'left ankle': [['LKnee', 'LAnkle', 'LBigToe', 'LHeel'], 'dorsiflexion', 90, 1],
44
+ 'right knee': [['RAnkle', 'RKnee', 'RHip'], 'flexion', -180, 1],
45
+ 'left knee': [['LAnkle', 'LKnee', 'LHip'], 'flexion', -180, 1],
46
+ 'right hip': [['RKnee', 'RHip', 'Hip', 'Neck'], 'flexion', 0, -1],
47
+ 'left hip': [['LKnee', 'LHip', 'Hip', 'Neck'], 'flexion', 0, -1],
48
+ # 'lumbar': [['Neck', 'Hip', 'RHip', 'LHip'], 'flexion', -180, -1],
49
+ # 'neck': [['Head', 'Neck', 'RShoulder', 'LShoulder'], 'flexion', -180, -1],
50
+ 'right shoulder': [['RElbow', 'RShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
51
+ 'left shoulder': [['LElbow', 'LShoulder', 'Hip', 'Neck'], 'flexion', 0, -1],
52
+ 'right elbow': [['RWrist', 'RElbow', 'RShoulder'], 'flexion', 180, -1],
53
+ 'left elbow': [['LWrist', 'LElbow', 'LShoulder'], 'flexion', 180, -1],
54
+ 'right wrist': [['RElbow', 'RWrist', 'RIndex'], 'flexion', -180, 1],
55
+ 'left wrist': [['LElbow', 'LWrist', 'LIndex'], 'flexion', -180, 1],
56
+
57
+ # segment angles
58
+ 'right foot': [['RBigToe', 'RHeel'], 'horizontal', 0, -1],
59
+ 'left foot': [['LBigToe', 'LHeel'], 'horizontal', 0, -1],
60
+ 'right shank': [['RAnkle', 'RKnee'], 'horizontal', 0, -1],
61
+ 'left shank': [['LAnkle', 'LKnee'], 'horizontal', 0, -1],
62
+ 'right thigh': [['RKnee', 'RHip'], 'horizontal', 0, -1],
63
+ 'left thigh': [['LKnee', 'LHip'], 'horizontal', 0, -1],
64
+ 'pelvis': [['LHip', 'RHip'], 'horizontal', 0, -1],
65
+ 'trunk': [['Neck', 'Hip'], 'horizontal', 0, -1],
66
+ 'shoulders': [['LShoulder', 'RShoulder'], 'horizontal', 0, -1],
67
+ 'head': [['Head', 'Neck'], 'horizontal', 0, -1],
68
+ 'right arm': [['RElbow', 'RShoulder'], 'horizontal', 0, -1],
69
+ 'left arm': [['LElbow', 'LShoulder'], 'horizontal', 0, -1],
70
+ 'right forearm': [['RWrist', 'RElbow'], 'horizontal', 0, -1],
71
+ 'left forearm': [['LWrist', 'LElbow'], 'horizontal', 0, -1],
72
+ 'right hand': [['RIndex', 'RWrist'], 'horizontal', 0, -1],
73
+ 'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
74
+ }
75
+
76
+ marker_Z_positions = {'right':
77
+ {"RHip": 0.105, "RKnee": 0.0886, "RAnkle": 0.0972, "RBigToe":0.0766, "RHeel":0.0883, "RSmallToe": 0.1200,
78
+ "RShoulder": 0.2016, "RElbow": 0.1613, "RWrist": 0.120, "RThumb": 0.1625, "RIndex": 0.1735, "RPinky": 0.1740, "REye": 0.0311,
79
+ "LHip": -0.105, "LKnee": -0.0886, "LAnkle": -0.0972, "LBigToe": -0.0766, "LHeel": -0.0883, "LSmallToe": -0.1200,
80
+ "LShoulder": -0.2016, "LElbow": -0.1613, "LWrist": -0.120, "LThumb": -0.1625, "LIndex": -0.1735, "LPinky": -0.1740, "LEye": -0.0311,
81
+ "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
82
+ 'left':
83
+ {"RHip": -0.105, "RKnee": -0.0886, "RAnkle": -0.0972, "RBigToe": -0.0766, "RHeel": -0.0883, "RSmallToe": -0.1200,
84
+ "RShoulder": -0.2016, "RElbow": -0.1613, "RWrist": -0.120, "RThumb": -0.1625, "RIndex": -0.1735, "RPinky": -0.1740, "REye": -0.0311,
85
+ "LHip": 0.105, "LKnee": 0.0886, "LAnkle": 0.0972, "LBigToe":0.0766, "LHeel":0.0883, "LSmallToe": 0.1200,
86
+ "LShoulder": 0.2016, "LElbow": 0.1613, "LWrist": 0.120, "LThumb": 0.1625, "LIndex": 0.1735, "LPinky": 0.1740, "LEye": 0.0311,
87
+ "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
88
+ 'front': # original knee:0.0179
89
+ {"RHip": 0.0301, "RKnee": 0.129, "RAnkle": 0.0230, "RBigToe": 0.2179, "RHeel": -0.0119, "RSmallToe": 0.1804,
90
+ "RShoulder": -0.01275, "RElbow": 0.0702, "RWrist": 0.1076, "RThumb": 0.0106, "RIndex": -0.0004, "RPinky": -0.0009, "REye": 0.0702,
91
+ "LHip": 0.0301, "LKnee": 0.129, "LAnkle": 0.0230, "LBigToe": 0.2179, "LHeel": -0.0119, "LSmallToe": 0.1804,
92
+ "LShoulder": -0.01275, "LElbow": 0.0702, "LWrist": 0.1076, "LThumb": 0.0106, "LIndex": -0.0004, "LPinky": -0.0009, "LEye": 0.0702,
93
+ "Hip": 0.0301, "Neck": 0.0008, "Head": 0.0655, "Nose": 0.1076},
94
+ 'back':
95
+ {"RHip": -0.0301, "RKnee": -0.129, "RAnkle": -0.0230, "RBigToe": -0.2179, "RHeel": 0.0119, "RSmallToe": -0.1804,
96
+ "RShoulder": 0.01275, "RElbow": 0.0702, "RWrist": -1076.0002, "RThumb": -0.0106, "RIndex": 0.0004, "RPinky": 0.0009, "REye": -0.0702,
97
+ "LHip": -0.0301, "LKnee": -0.129, "LAnkle": -0.0230, "LBigToe": -0.2179, "LHeel": 0.0119, "LSmallToe": -0.1804,
98
+ "LShoulder": 0.01275, "LElbow": 0.0702, "LWrist": -0.1076, "LThumb": -0.0106, "LIndex": 0.0004, "LPinky": 0.0009, "LEye": -0.0702,
99
+ "Hip": -0.0301, "Neck": -0.0008, "Head": -0.0655, "Nose": -0.1076},
100
+ }
101
+
102
+ colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
103
+ (125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
104
+ (255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
105
+ (255, 0, 125), (255, 125, 0), (0, 125, 255), (0, 255, 125), (125, 0, 255), (125, 255, 0), (0, 255, 0)]
106
+ thickness = 1
107
+
108
+ ## FUNCTIONS
109
+ def to_dict(d):
110
+ '''
111
+ Convert a defaultdict to a dict.
112
+ '''
113
+ if isinstance(d, defaultdict):
114
+ return {k: to_dict(v) for k, v in d.items()}
115
+ return d
116
+
117
+
118
+ def make_homogeneous(list_of_arrays):
119
+ '''
120
+ Make a list of arrays (or a list of lists) homogeneous by padding with nans
121
+
122
+ Example: foo = [[array([nan, 656.02643776]), array([nan, nan])],
123
+ [array([1, 2, 3]), array([1, 2])]]
124
+ becomes foo_updated = array([[[nan, 656.02643776, nan], [nan, nan, nan]],
125
+ [[1., 2., 3.], [1., 2., nan]]])
126
+ Or foo = [[1, 2, 3], [1, 2], [3, 4, 5]]
127
+ becomes foo_updated = array([[1., 2., 3.], [1., 2., nan], [3., 4., 5.]])
128
+
129
+ INPUTS:
130
+ - list_of_arrays: list of arrays or list of lists
131
+
132
+ OUTPUT:
133
+ - np.array(list_of_arrays): numpy array of padded arrays
134
+ '''
135
+
136
+ def get_max_shape(list_of_arrays):
137
+ '''
138
+ Recursively determine the maximum shape of a list of arrays.
139
+ '''
140
+ if isinstance(list_of_arrays[0], list):
141
+ # Maximum length at the current level plus the max shape at the next level
142
+ return [max(len(arr) for arr in list_of_arrays)] + get_max_shape(
143
+ [item for sublist in list_of_arrays for item in sublist])
144
+ else:
145
+ # Determine the maximum shape across all list_of_arrays at this level
146
+ return [len(list_of_arrays)] + [max(arr.shape[i] for arr in list_of_arrays if arr.size > 0) for i in range(list_of_arrays[0].ndim)]
147
+
148
+ def pad_with_nans(list_of_arrays, target_shape):
149
+ '''
150
+ Recursively pad list_of_arrays with nans to match the target shape.
151
+ '''
152
+ if isinstance(list_of_arrays, np.ndarray):
153
+ # Pad the current array to the target shape
154
+ pad_width = []
155
+ for dim_index in range(0, len(target_shape)):
156
+ if dim_index == len(list_of_arrays.shape) or dim_index > len(list_of_arrays.shape):
157
+ list_of_arrays = np.expand_dims(list_of_arrays, 0)
158
+ for dim_index in range(0, len(target_shape)):
159
+ max_dim = target_shape[dim_index]
160
+ curr_dim = list_of_arrays.shape[dim_index]
161
+ pad_width.append((0, max_dim - curr_dim))
162
+ return np.pad(list_of_arrays.astype(float), pad_width, constant_values=np.nan)
163
+ # Recursively pad each array in the list
164
+ return [pad_with_nans(array, target_shape[1:]) for array in list_of_arrays]
165
+
166
+ # Pad all missing dimensions of arrays with nans
167
+ list_of_arrays = [np.array(arr, dtype=float) if not isinstance(arr, np.ndarray) else arr for arr in list_of_arrays]
168
+ max_shape = get_max_shape(list_of_arrays)
169
+ list_of_arrays = pad_with_nans(list_of_arrays, max_shape)
170
+
171
+ return np.array(list_of_arrays)
172
+
173
+
174
+ def get_start_time_ffmpeg(video_path):
175
+ '''
176
+ Get the start time of a video using FFmpeg.
177
+ '''
178
+
179
+ try:
180
+ ffmpeg_path = ffmpeg.get_ffmpeg_exe()
181
+ except Exception as e:
182
+ logging.warning(f"No ffmpeg exe could be found. Starting time set to 0.0. Error: {e}")
183
+ return 0.0
184
+
185
+ cmd = [ffmpeg_path, "-i", video_path]
186
+ result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL, text=True)
187
+ for line in result.stderr.splitlines():
188
+ if "start:" in line:
189
+ parts = line.split("start:")
190
+ if len(parts) > 1:
191
+ start_time = parts[1].split(",")[0].strip()
192
+ return float(start_time)
193
+ return 0.0 # Default to 0 if not found
194
+
195
+
196
+ def resample_video(vid_output_path, fps, desired_framerate):
197
+ '''
198
+ Resample video to the desired fps using ffmpeg.
199
+ '''
200
+
201
+ ffmpeg_path = ffmpeg.get_ffmpeg_exe()
202
+ new_vid_path = vid_output_path.parent / Path(vid_output_path.stem+'_2'+vid_output_path.suffix)
203
+ subprocess.run([ffmpeg_path, '-i', vid_output_path, '-filter:v', f'setpts={fps/desired_framerate}*PTS', '-r', str(desired_framerate), new_vid_path])
204
+ vid_output_path.unlink()
205
+ new_vid_path.rename(vid_output_path)
206
+
207
+
208
+ def write_calibration(calib_params, toml_path):
209
+ '''
210
+ Write calibration file from calibration parameters
211
+ '''
212
+
213
+ S, D, N, K, R, T, P = calib_params
214
+ with open(toml_path, 'w+') as cal_f:
215
+ for c in range(len(S)):
216
+ cam_str = f'[{N[c]}]\n'
217
+ name_str = f'name = "{N[c]}"\n'
218
+ size_str = f'size = {S[c]} \n'
219
+ mat_str = f'matrix = {K[c]} \n'
220
+ dist_str = f'distortions = {D[c]} \n'
221
+ rot_str = f'rotation = {R[c]} \n'
222
+ tran_str = f'translation = {T[c]} \n'
223
+ fish_str = f'fisheye = false\n\n'
224
+ cal_f.write(cam_str + name_str + size_str + mat_str + dist_str + rot_str + tran_str + fish_str)
225
+ meta = '[metadata]\nadjusted = false\nerror = 0.0\n'
226
+ cal_f.write(meta)
@@ -19,6 +19,7 @@
19
19
 
20
20
 
21
21
  ## INIT
22
+ from importlib.metadata import version
22
23
  import numpy as np
23
24
  from scipy import signal
24
25
  from scipy.ndimage import gaussian_filter1d
@@ -30,7 +31,7 @@ __author__ = "David Pagnon"
30
31
  __copyright__ = "Copyright 2021, Pose2Sim"
31
32
  __credits__ = ["David Pagnon"]
32
33
  __license__ = "BSD 3-Clause License"
33
- __version__ = "0.4.0"
34
+ __version__ = version("sports2d")
34
35
  __maintainer__ = "David Pagnon"
35
36
  __email__ = "contact@david-pagnon.com"
36
37
  __status__ = "Development"
@@ -13,6 +13,7 @@
13
13
 
14
14
 
15
15
  ## INIT
16
+ from importlib.metadata import version
16
17
  import toml
17
18
  import subprocess
18
19
  from pathlib import Path
@@ -23,7 +24,7 @@ __author__ = "David Pagnon"
23
24
  __copyright__ = "Copyright 2023, Sports2D"
24
25
  __credits__ = ["David Pagnon"]
25
26
  __license__ = "BSD 3-Clause License"
26
- __version__ = "0.4.0"
27
+ __version__ = version("sports2d")
27
28
  __maintainer__ = "David Pagnon"
28
29
  __email__ = "contact@david-pagnon.com"
29
30
  __status__ = "Development"
@@ -58,12 +59,14 @@ def test_workflow():
58
59
  demo_cmd = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False"]
59
60
  subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
60
61
 
61
- # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, time range and slowmo factor
62
+ # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
62
63
  demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
63
64
  "--to_meters", "False",
64
65
  "--multiperson", "False",
65
66
  "--mode", "lightweight", "--det_frequency", "50",
66
- "--time_range", "1.2", "2.7", "--slowmo_factor", "4"]
67
+ "--slowmo_factor", "4",
68
+ "--filter_type", "gaussian",
69
+ "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
67
70
  subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
68
71
 
69
72
  # With a time range, inverse kinematics, marker augmentation, body pose_model and custom RTMO mode
@@ -71,9 +74,8 @@ def test_workflow():
71
74
  "--time_range", "1.2", "2.7",
72
75
  "--do_ik", "True", "--use_augmentation", "True",
73
76
  "--px_to_m_from_person_id", "1", "--px_to_m_person_height", "1.65",
74
- "--visible_side", "left", "front", "--participant_mass", "55.0", "67.0",
75
- "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
76
- subprocess.run(demo_cmd3, check=True, capture_output=True, text=True)
77
+ "--visible_side", "front", "auto", "--participant_mass", "55.0", "67.0"]
78
+ subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
77
79
 
78
80
  # From config file
79
81
  cli_config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import sys
5
+ from importlib.metadata import version
6
+
7
+ __version__ = version("sports2d")
8
+ VERSION = __version__
@@ -58,6 +58,7 @@ import json
58
58
  import ast
59
59
  import shutil
60
60
  import os
61
+ from importlib.metadata import version
61
62
  from functools import partial
62
63
  from datetime import datetime
63
64
  import itertools as it
@@ -72,11 +73,11 @@ import matplotlib as mpl
72
73
  import matplotlib.pyplot as plt
73
74
  from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Custom
74
75
  from deep_sort_realtime.deepsort_tracker import DeepSort
75
- import opensim as osim
76
76
 
77
77
  from Sports2D.Utilities import filter
78
78
  from Sports2D.Utilities.common import *
79
79
  from Sports2D.Utilities.skeletons import *
80
+ from Pose2Sim.common import *
80
81
 
81
82
  DEFAULT_MASS = 70
82
83
  DEFAULT_HEIGHT = 1.7
@@ -86,7 +87,7 @@ __author__ = "David Pagnon, HunMin Kim"
86
87
  __copyright__ = "Copyright 2023, Sports2D"
87
88
  __credits__ = ["David Pagnon"]
88
89
  __license__ = "BSD 3-Clause License"
89
- __version__ = "0.4.0"
90
+ __version__ = version("sports2d")
90
91
  __maintainer__ = "David Pagnon"
91
92
  __email__ = "contact@david-pagnon.com"
92
93
  __status__ = "Development"
@@ -625,7 +626,7 @@ def trc_data_from_XYZtime(X, Y, Z, time):
625
626
  '''
626
627
 
627
628
  trc_data = pd.concat([pd.concat([X.iloc[:,kpt], Y.iloc[:,kpt], Z.iloc[:,kpt]], axis=1) for kpt in range(len(X.columns))], axis=1)
628
- trc_data.insert(0, 't', time)
629
+ trc_data.insert(0, 'time', time)
629
630
 
630
631
  return trc_data
631
632
 
@@ -928,6 +929,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
928
929
  px_to_m_from_person_id = int(config_dict.get('project').get('px_to_m_from_person_id'))
929
930
  px_to_m_person_height_m = config_dict.get('project').get('px_to_m_person_height')
930
931
  visible_side = config_dict.get('project').get('visible_side')
932
+ if isinstance(visible_side, str): visible_side = [visible_side]
931
933
  # Pose from file
932
934
  load_trc_px = config_dict.get('project').get('load_trc_px')
933
935
  if load_trc_px == '': load_trc_px = None
@@ -1040,7 +1042,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1040
1042
  close_to_zero_speed_px = config_dict.get('kinematics').get('close_to_zero_speed_px')
1041
1043
  close_to_zero_speed_m = config_dict.get('kinematics').get('close_to_zero_speed_m')
1042
1044
  if do_ik:
1043
- from Pose2Sim.markerAugmentation import augment_markers_all
1045
+ if use_augmentation:
1046
+ from Pose2Sim.markerAugmentation import augment_markers_all
1044
1047
  from Pose2Sim.kinematics import kinematics_all
1045
1048
  # Create a Pose2Sim dictionary and fill in missing keys
1046
1049
  recursivedict = lambda: defaultdict(recursivedict)
@@ -1127,12 +1130,18 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1127
1130
  logging.error(f'\n{load_trc_px} file needs to be in px, not in meters.')
1128
1131
  logging.info(f'\nUsing a pose file instead of running pose estimation and tracking: {load_trc_px}.')
1129
1132
  # Load pose file in px
1130
- Q_coords, _, _, keypoints_names, _ = read_trc(load_trc_px)
1133
+ Q_coords, _, time_col, keypoints_names, _ = read_trc(load_trc_px)
1134
+
1131
1135
  keypoints_ids = [i for i in range(len(keypoints_names))]
1132
1136
  keypoints_all, scores_all = load_pose_file(Q_coords)
1133
1137
  for pre, _, node in RenderTree(pose_model):
1134
1138
  if node.name in keypoints_names:
1135
1139
  node.id = keypoints_names.index(node.name)
1140
+ if time_range:
1141
+ frame_range = [abs(time_col - time_range[0]).idxmin(), abs(time_col - time_range[1]).idxmin()+1]
1142
+ else:
1143
+ frame_range = [0, len(Q_coords)]
1144
+ frame_iterator = tqdm(range(*frame_range))
1136
1145
 
1137
1146
  else:
1138
1147
  # Retrieve keypoint names from model
@@ -1336,7 +1345,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1336
1345
  all_frames_scores = make_homogeneous(all_frames_scores)
1337
1346
 
1338
1347
  frame_range = [0,frame_count] if video_file == 'webcam' else frame_range
1339
- all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count+1), name='time')
1348
+ if not load_trc_px:
1349
+ all_frames_time = pd.Series(np.linspace(frame_range[0]/fps, frame_range[1]/fps, frame_count-frame_range[0]+1), name='time')
1350
+ else:
1351
+ all_frames_time = time_col
1340
1352
  if not multiperson:
1341
1353
  px_to_m_from_person_id = get_personID_with_highest_scores(all_frames_scores)
1342
1354
  detected_persons = [px_to_m_from_person_id]
@@ -1475,7 +1487,6 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1475
1487
  if not np.array(trc_data[i].iloc[:,1:] ==0).all():
1476
1488
  # Automatically determine visible side
1477
1489
  visible_side_i = visible_side[i] if len(visible_side)>i else 'auto' # set to 'auto' if list too short
1478
-
1479
1490
  # Set to 'front' if slope of X values between [-5,5]
1480
1491
  if visible_side_i == 'auto':
1481
1492
  try:
@@ -1512,7 +1523,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1512
1523
  pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{idx_path:02d}.trc'))
1513
1524
  make_trc_with_trc_data(trc_data_m_i, pose_path_person_m_i, fps=fps)
1514
1525
  if make_c3d:
1515
- c3d_path = convert_to_c3d(pose_path_person_m_i)
1526
+ c3d_path = convert_to_c3d(str(pose_path_person_m_i))
1516
1527
  logging.info(f'Pose in meters saved to {pose_path_person_m_i.resolve()}. {"Also saved in c3d format." if make_c3d else ""}')
1517
1528
 
1518
1529
 
@@ -1643,12 +1654,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
1643
1654
 
1644
1655
  # OpenSim inverse kinematics (and optional marker augmentation)
1645
1656
  if do_ik or use_augmentation:
1657
+ import opensim as osim
1646
1658
  logging.info('\nPost-processing angles (with inverse kinematics):')
1647
1659
  if not to_meters:
1648
1660
  logging.warning('Skipping marker augmentation and inverse kinematics as to_meters was set to False.')
1649
1661
  else:
1650
1662
  # move all trc files containing _m_ string to pose3d_dir
1651
- for trc_file in output_dir.glob('*_m_*.trc'):
1663
+ if not load_trc_px:
1664
+ trc_list = output_dir.glob('*_m_*.trc')
1665
+ else:
1666
+ trc_list = [pose_path_person_m_i]
1667
+ for trc_file in trc_list:
1652
1668
  if (pose3d_dir/trc_file.name).exists():
1653
1669
  os.remove(pose3d_dir/trc_file.name)
1654
1670
  shutil.move(trc_file, pose3d_dir)
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = sports2d
3
- version = 0.7.0
3
+ version = 0.7.3
4
4
  author = David Pagnon
5
5
  author_email = contact@david-pagnon.com
6
6
  description = Detect pose and compute 2D joint angles from a video.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.7.0
3
+ Version: 0.7.3
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -40,6 +40,7 @@ Requires-Dist: tqdm
40
40
  Requires-Dist: imageio_ffmpeg
41
41
  Requires-Dist: deep-sort-realtime
42
42
  Requires-Dist: Pose2Sim
43
+ Dynamic: license-file
43
44
 
44
45
 
45
46
  [![Continuous integration](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml/badge.svg?branch=main)](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml)
@@ -81,7 +82,10 @@ Works on any smartphone!**\
81
82
 
82
83
  </br>
83
84
 
84
- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e
85
+
86
+ https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
87
+
88
+ <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
85
89
 
86
90
  `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
87
91
  `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
@@ -465,7 +469,7 @@ sports2d --help
465
469
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
466
470
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
467
471
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
468
- 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
472
+ 'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
469
473
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
470
474
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
471
475
  More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
@@ -1,7 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
- import sys
5
-
6
- __version__ = "0.4.0"
7
- VERSION = __version__