sports2d 0.8.4__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Content/Demo_plots.png ADDED
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Content/paper.bib ADDED
@@ -0,0 +1,298 @@
1
+ @article{Bazarevsky_2020,
2
+ title={Blazepose: On-device real-time body pose tracking},
3
+ author={Bazarevsky, Valentin and Grishchenko, Ivan and Raveendran, Karthik and Zhu, Tyler and Zhang, Fan and Grundmann, Matthias},
4
+ DOI={10.48550/arXiv.2006.10204},
5
+ journal={arXiv preprint arXiv:2006.10204},
6
+ year={2020}
7
+ }
8
+
9
+ @article{Bisong_2019,
10
+ title={Google colaboratory},
11
+ author={Bisong, Ekaba and Bisong, Ekaba},
12
+ DOI={10.1007/978-1-4842-4470-8},
13
+ journal={Building machine learning and deep learning models on google cloud platform: a comprehensive guide for beginners},
14
+ pages={59--64},
15
+ year={2019},
16
+ publisher={Springer}
17
+ }
18
+
19
+ @article{Boswell_2023,
20
+ title={Smartphone videos of the sit-to-stand test predict osteoarthritis and health outcomes in a nationwide study},
21
+ author={Boswell, Melissa A and Kidzi{\'n}ski, {\L}ukasz and Hicks, Jennifer L and Uhlrich, Scott D and Falisse, Antoine and Delp, Scott L},
22
+ DOI={10.1038/s41746-023-00775-1},
23
+ journal={npj Digital Medicine},
24
+ volume={6},
25
+ number={1},
26
+ pages={32},
27
+ year={2023},
28
+ publisher={Nature Publishing Group UK London}
29
+ }
30
+
31
+ @article{Bradski_2000,
32
+ author = {Bradski, G.},
33
+ citeulike-article-id = {2236121},
34
+ journal = {Dr. Dobb's Journal of Software Tools},
35
+ keywords = {bibtex-import},
36
+ posted-at = {2008-01-15 19:21:54},
37
+ priority = {4},
38
+ title = {The OpenCV Library},
39
+ year = {2000}
40
+ }
41
+
42
+ @article{Bright_2012,
43
+ title={Effect of clinical decision-support systems: a systematic review},
44
+ author={Bright, Tiffani J and Wong, Anthony and Dhurjati, Ravi and Bristow, Erin and Bastian, Lori and Coeytaux, Remy R and Samsa, Gregory and Hasselblad, Vic and Williams, John W and Musty, Michael D and others},
45
+ DOI={10.7326/0003-4819-157-1-201207030-00450},
46
+ journal={Annals of internal medicine},
47
+ volume={157},
48
+ number={1},
49
+ pages={29--43},
50
+ year={2012},
51
+ publisher={American College of Physicians}
52
+ }
53
+
54
+ @article{Butterworth_1930,
55
+ title={On the theory of filter amplifiers},
56
+ author={Butterworth, Stephen},
57
+ journal={Wireless Engineer},
58
+ volume={7},
59
+ number={6},
60
+ pages={536--541},
61
+ year={1930}
62
+ }
63
+
64
+ @article{Cao_2019,
65
+ title={OpenPose: realtime multi-person 2D pose estimation using Part Affinity Fields},
66
+ author={Cao, Zhe and Hidalgo, Gines and Simon, Tomas and Wei, Shih-En and Sheikh, Yaser},
67
+ journal={IEEE transactions on pattern analysis and machine intelligence},
68
+ volume={43},
69
+ number={1},
70
+ pages={172--186},
71
+ year={2019},
72
+ URL = {https://arxiv.org/abs/1611.08050},
73
+ DOI = {10.1109/TPAMI.2019.2929257},
74
+ publisher={IEEE}
75
+ }
76
+
77
+ @article{Cleveland_1981,
78
+ title={LOWESS: A program for smoothing scatterplots by robust locally weighted regression},
79
+ author={Cleveland, William S},
80
+ DOI={10.2307/2683591},
81
+ journal={American Statistician},
82
+ volume={35},
83
+ number={1},
84
+ pages={54},
85
+ year={1981}
86
+ }
87
+
88
+ @article{Colyer_2018,
89
+ title={A review of the evolution of vision-based motion analysis and the integration of advanced computer vision methods towards developing a markerless system},
90
+ author={Colyer, Steffi L and Evans, Murray and Cosker, Darren P and Salo, Aki IT},
91
+ journal={Sports medicine-open},
92
+ DOI={10.1186/s40798-018-0139-y},
93
+ volume={4},
94
+ number={1},
95
+ pages={1--15},
96
+ year={2018},
97
+ publisher={SpringerOpen}
98
+ }
99
+
100
+ @article{Delp_2007,
101
+ title={OpenSim: open-source software to create and analyze dynamic simulations of movement},
102
+ author={Delp, Scott L and Anderson, Frank C and Arnold, Allison S and Loan, Peter and Habib, Ayman and John, Chand T and Guendelman, Eran and Thelen, Darryl G},
103
+ journal={IEEE transactions on biomedical engineering},
104
+ volume={54},
105
+ number={11},
106
+ pages={1940--1950},
107
+ year={2007},
108
+ URL = {https://ieeexplore.ieee.org/abstract/document/4352056},
109
+ DOI = {10.1109/TBME.2007.901024},
110
+ publisher={IEEE}
111
+ }
112
+
113
+ @article{Di_2021,
114
+ title={A 2D qualitative movement assessment of a deceleration task detects football players with high knee joint loading},
115
+ author={Di Paolo, Stefano and Zaffagnini, Stefano and Tosarelli, Filippo and Aggio, Fabrizio and Bragonzoni, Laura and Grassi, Alberto and Della Villa, Francesco},
116
+ DOI={10.1007/s00167-021-06709-2},
117
+ journal={Knee Surgery, Sports Traumatology, Arthroscopy},
118
+ volume={29},
119
+ pages={4032--4040},
120
+ year={2021},
121
+ publisher={Springer}
122
+ }
123
+
124
+ @article{Jiang_2023,
125
+ title={RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose},
126
+ author={Tao Jiang and Peng Lu and Li Zhang and Ningsheng Ma and Rui Han and Chengqi Lyu and Yining Li and Kai Chen},
127
+ DOI={10.48550/arXiv.2303.07399},
128
+ journal={arXiv},
129
+ year={2023},
130
+ eprint={2303.07399},
131
+ archivePrefix={arXiv},
132
+ primaryClass={cs.CV},
133
+ url={https://arxiv.org/abs/2303.07399},
134
+ }
135
+
136
+ @article{Kidzinski_2020,
137
+ title={Deep neural networks enable quantitative movement analysis using single-camera videos},
138
+ author={Kidzi{\'n}ski, {\L}ukasz and Yang, Bryan and Hicks, Jennifer L and Rajagopal, Apoorva and Delp, Scott L and Schwartz, Michael H},
139
+ DOI={10.1038/s41467-020-17807-z},
140
+ journal={Nature communications},
141
+ volume={11},
142
+ number={1},
143
+ pages={4054},
144
+ year={2020},
145
+ publisher={Nature Publishing Group UK London}
146
+ }
147
+
148
+ @misc{Kinovea,
149
+ author = {Kinovea},
150
+ title = {Kinovea - A microscope for your videos},
151
+ year = {},
152
+ publisher = {GitHub},
153
+ url = {https://www.kinovea.org/features.html},
154
+ howpublished = {\url{https://www.kinovea.org/features.html}}
155
+ }
156
+
157
+ @article{Mathis_2018,
158
+ title={DeepLabCut: markerless pose estimation of user-defined body parts with deep learning},
159
+ author={Mathis, Alexander and Mamidanna, Pranav and Cury, Kevin M and Abe, Taiga and Murthy, Venkatesh N and Mathis, Mackenzie Weygandt and Bethge, Matthias},
160
+ journal={Nature neuroscience},
161
+ volume={21},
162
+ number={9},
163
+ pages={1281--1289},
164
+ year={2018},
165
+ URL = {https://www.nature.com/articles/s41593-018-0209-y},
166
+ DOI = {10.1038/s41593-018-0209-y},
167
+ publisher={Nature Publishing Group}
168
+ }
169
+
170
+ @article{Minssen_2020,
171
+ title={Clinical trial data transparency and GDPR compliance: Implications for data sharing and open innovation},
172
+ author={Minssen, Timo and Rajam, Neethu and Bogers, Marcel},
173
+ DOI={10.2139/ssrn.3413035},
174
+ journal={Science and Public Policy},
175
+ volume={47},
176
+ number={5},
177
+ pages={616--626},
178
+ year={2020},
179
+ publisher={Oxford University Press}
180
+ }
181
+
182
+ @article{ODonoghue_2008,
183
+ title={Principal components analysis in the selection of key performance indicators in sport},
184
+ author={O’Donoghue, Peter},
185
+ doi={10.1080/24748668.2008.11868456},
186
+ journal={International Journal of Performance Analysis in Sport},
187
+ volume={8},
188
+ number={3},
189
+ pages={145--155},
190
+ year={2008},
191
+ publisher={Taylor \& Francis}
192
+ }
193
+
194
+ @article{Pagnon_2022a,
195
+ author = {Pagnon, David and {\sortcite{Pagnonb}}Domalain, Mathieu and Reveret, Lionel},
196
+ title = {Pose2Sim: An open-source Python package for multiview markerless kinematics},
197
+ journal = {Journal of Open Source Software},
198
+ publisher = {The Open Journal},
199
+ year = {2022},
200
+ doi = {10.21105/joss.04362},
201
+ url = {https://joss.theoj.org/papers/10.21105/joss.04362},
202
+ volume = {7},
203
+ number = {77},
204
+ pages = {4362}
205
+ }
206
+
207
+ @inproceedings{Pagnon_2022b,
208
+ title = {{A 3D markerless protocol with action cameras – Key performance indicators in boxing}},
209
+ author = {Pagnon, David and {\sortcite{Pagnonc}}Domalain, Mathieu and Robert, Thomas and Lahkar, Bhrigu-Kumar and Moussa, Issa and Sauli{\`e}re, Guillaume and Goyallon, Thibault and Reveret, Lionel},
210
+ booktitle={2022 Congress of the European College of Sport Science (ECSS)},
211
+ url = {https://hal.archives-ouvertes.fr/hal-03790926},
212
+ note = {Poster},
213
+ year = {2022}
214
+ }
215
+
216
+ @article{Patrizi_2016,
217
+ title={Comparison between low-cost marker-less and high-end marker-based motion capture systems for the computer-aided assessment of working ergonomics},
218
+ author={Patrizi, Alfredo and Pennestr{\`\i}, Ettore and Valentini, Pier Paolo},
219
+ DOI={10.1080/00140139.2015.1057238},
220
+ journal={Ergonomics},
221
+ volume={59},
222
+ number={1},
223
+ pages={155--162},
224
+ year={2016},
225
+ publisher={Taylor \& Francis}
226
+ }
227
+
228
+ @article{Seth_2018,
229
+ DOI = {10.1371/journal.pcbi.1006223},
230
+ author = {Seth, Ajay AND Hicks, Jennifer L. AND Uchida, Thomas K. AND Habib, Ayman AND Dembia, Christopher L. AND Dunne, James J. AND Ong, Carmichael F. AND DeMers, Matthew S. AND Rajagopal, Apoorva AND Millard, Matthew AND Hamner, Samuel R. AND Arnold, Edith M. AND Yong, Jennifer R. AND Lakshmikanth, Shrinidhi K. AND Sherman, Michael A. AND Ku, Joy P. AND Delp, Scott L.},
231
+ journal = {PLOS Computational Biology},
232
+ publisher = {Public Library of Science},
233
+ title = {OpenSim: Simulating musculoskeletal dynamics and neuromuscular control to study human and animal movement},
234
+ year = {2018},
235
+ month = {07},
236
+ volume = {14},
237
+ url = {https://doi.org/10.1371/journal.pcbi.1006223},
238
+ pages = {1-20},
239
+ number = {7},
240
+ }
241
+
242
+ @article{Uhlrich_2022,
243
+ title={OpenCap: 3D human movement dynamics from smartphone videos},
244
+ url={https://www.biorxiv.org/content/10.1101/2022.07.07.499061v1},
245
+ DOI={10.1101/2022.07.07.499061},
246
+ publisher={bioRxiv},
247
+ author={Uhlrich, Scott D. and Falisse, Antoine and Kidziński, Łukasz and Muccini, Julie and Ko, Michael and Chaudhari, Akshay S. and Hicks, Jennifer L. and Delp, Scott L.},
248
+ year={2022},
249
+ month={Jul},
250
+ pages={2022.07.07.499061}
251
+ }
252
+
253
+ @article{Venkatesh_2012,
254
+ title={Consumer acceptance and use of information technology: extending the unified theory of acceptance and use of technology},
255
+ author={Venkatesh, Viswanath and Thong, James YL and Xu, Xin},
256
+ DOI={10.2307/41410412},
257
+ journal={MIS quarterly},
258
+ pages={157--178},
259
+ year={2012},
260
+ publisher={JSTOR}
261
+ }
262
+
263
+ @article{Wade_2022,
264
+ title={Applications and limitations of current markerless motion capture methods for clinical gait biomechanics},
265
+ author={Wade, Logan and Needham, Laurie and McGuigan, Polly and Bilzon, James},
266
+ DOI={10.7717/peerj.12995},
267
+ journal={PeerJ},
268
+ volume={10},
269
+ pages={e12995},
270
+ year={2022},
271
+ publisher={PeerJ Inc.}
272
+ }
273
+
274
+ @book{Whittle_2014,
275
+ title={Gait analysis: an introduction},
276
+ author={Whittle, Michael W},
277
+ year={2014},
278
+ publisher={Butterworth-Heinemann}
279
+ }
280
+
281
+ @book{winter2009biomechanics,
282
+ title={Biomechanics and motor control of human movement},
283
+ author={Winter, David A},
284
+ year={2009},
285
+ publisher={John wiley \& sons}
286
+ }
287
+
288
+ @article{Zheng_2023,
289
+ title={Deep learning-based human pose estimation: A survey},
290
+ author={Zheng, Ce and Wu, Wenhan and Chen, Chen and Yang, Taojiannan and Zhu, Sijie and Shen, Ju and Kehtarnavaz, Nasser and Shah, Mubarak},
291
+ DOI={10.1145/3603618},
292
+ journal={ACM Computing Surveys},
293
+ volume={56},
294
+ number={1},
295
+ pages={1--37},
296
+ year={2023},
297
+ publisher={ACM New York, NY}
298
+ }
Content/paper.md ADDED
@@ -0,0 +1,141 @@
1
+ ---
2
+ title: 'Sports2D: Compute 2D human pose and angles from a video or a webcam'
3
+ tags:
4
+ - python
5
+ - markerless kinematics
6
+ - motion capture
7
+ - sports performance analysis
8
+ - rtmpose
9
+ - clinical gait analysis
10
+ authors:
11
+ - name: David Pagnon^[corresponding author]
12
+ orcid: 0000-0002-6891-8331
13
+ affiliation: 1
14
+ - name: HunMin Kim
15
+ orcid: 0009-0007-7710-8051
16
+ affiliation: 2
17
+ affiliations:
18
+ - name: Centre for the Analysis of Motion, Entertainment Research & Applications (CAMERA), University of Bath, Claverton Down, Bath, BA2 7AY, United Kingdom
19
+ index: 1
20
+ - name: Inha University, Yonghyeon Campus, 100 Inha-ro, Michuhol-gu, Incheon 22212, South Korea
21
+ index: 2
22
+ date: February 14 2024
23
+ bibliography: paper.bib
24
+ ---
25
+
26
+
27
+ # Summary
28
+ `Sports2D` provides a user-friendly solution for automatic and real-time analysis of multi-person human movement from a video or a webcam. This Python package uses 2D markerless pose estimation to detect joint coordinates from videos, and then computes 2D joint and segment angles.
29
+ <!--It can be installed either locally or on a free server, which makes it possible to run it directly from a smartphone.-->
30
+
31
+ The output incorporates annotated videos and image sequences overlaid with joint locations, joint angles, and segment angles, for each of the detected persons. For further analysis, this information is also stored in files that are editable with MS Excel® or any other spreadsheet editor (.trc for locations, .mot for angles, according to the OpenSim standard [@Delp_2007; @Seth_2018]).
32
+
33
+ `Sports2D` may be useful for clinicians as a decision supports system (CDSS) [@Bright_2012], as well as for gait analysis [@Whittle_2014] or ergonomic design [@Patrizi_2016]. Sports coaches can also use it to quantify key performance indicators (KPIs) [@ODonoghue_2008; @Pagnon_2022b], or to better understand, correct, or compare athletes' movement patterns. Finally, it can be used by researchers as a simple tool for 2D biomechanical analysis on the fly. One of the multiple use cases would be to evaluate ACL injury risks from deceleration drills [@Di_2021].
34
+
35
+
36
+ # Statement of need
37
+
38
+ Machine learning has recently accelerated the development and availability of markerless kinematics [@Zheng_2023; @Colyer_2018], which allows for the collection of kinematic data without the use of physical markers or manual annotation.
39
+
40
+ A large part of these tools focus on 2D analysis, such as `OpenPose` [@Cao_2019], `BlazePose` [@Bazarevsky_2020], or `DeepLabCut` [@Mathis_2018]. More recently, `RTMPose` [@Jiang_2023] offered a faster, more accurate, and more flexible alternative to the previous solutions. Still, although they bear the advantage of being open-source, none of these options are easily accessible to people who do not have a programming background, and the output is not directly usable for further kinematic investigation. Yet, clinical acceptance of new technologies is known to be influenced not only by their price value and their performance, but also by their perceived ease-of-use, the social influence around the customer, and other parameters described by the Unified Theory of Acceptance and Use of Technology (UTAUT2) [@Venkatesh_2012].
41
+
42
+ ![Example results from a demonstration video.\label{fig:Demo_results}](Demo_results.png)
43
+
44
+ ![Example joint angle output.\label{fig:Demo_plots}](Demo_plots.png)
45
+
46
+ In fact, there is a clear trade-off between accuracy and ease-of-use. Some open-source tools focus on the accuracy of a 3D analysis by using multiple cameras, such as `Pose2Sim` [@Pagnon_2022a] or `OpenCap` [@Uhlrich_2022]. These, however, require either a certain level of programming skills, a particular hardware setup, or to send data to a server that does not comply with the European rules of data protection (GDPR). Some other tools choose to put more emphasis on user-friendliness, and point out that 2D analysis is often sufficient when the analyzed motion mostly lies in the sagittal or frontal plane. `Sit2Stand` [@Boswell_2023] and `CP GaitLab` [@Kidzinski_2020] provide such tools, although they are focused on very specific tasks. `Kinovea` [@Kinovea], on the other hand, is a widely used software for sports performance analysis, which provides multiple additional features. However, it relies on tracking manual labels. This can be time-consuming when analyzing numerous videos, and it may also be lacking robustness when the tracked points are lost. It is also only available on Windows, and requires the user to transfer files prior to analysis.
47
+
48
+ `Sports2D` is an alternative solution that aims at filling this gap: it is free and open-source, straightforward to install and to run, can be run on any platform, can be run locally for data protection, and it automatically provides 2D joint and segment angles without the need for manual annotation. It is also robust and flexible, works in real-time, supports multi-person analysis, and can process one video, several videos simultaneously, or a webcam stream. The output is provided as .trc files for locations and .mot files for angles, which makes it compatible with OpenSim [@Delp_2007; @Seth_2018] and readable by any spreadsheet software for further statistical analysis.
49
+
50
+
51
+ # Workflow
52
+
53
+ ## Installation and usage
54
+
55
+ `Sports2d` is installed under Python via `pip install sports2d`. If a valid CUDA installation is found, Sports2D uses the GPU, otherwise it uses the CPU with OpenVino acceleration.
56
+
57
+ <!-- `Sports2D` can be installed and run two different ways: locally, or on a Google Colab® free server [@Bisong_2019].
58
+
59
+ * *If run locally*, it is installed under Python via `pip install sports2d`. If a valid CUDA installation is found, Sports2D uses the GPU, otherwise it uses the CPU with OpenVino acceleration.
60
+
61
+ * *If run on Colab*, it can be installed in one click from any computer or smartphone device, either every time the user needs it, or once for all on Google Drive®. Results are automatically saved on Google Drive®. The arguments are the same as with the local installation. A full video tutorial can be found at this address: [https://www.youtube.com/watch?v=Er5RpcJ8o1Y](https://www.youtube.com/watch?v=Er5RpcJ8o1Y).-->
62
+
63
+ A detailed installation and usage guide can be found on the repository: https://github.com/davidpagnon/Sports2D.
64
+
65
+ ## Sports2D method details
66
+
67
+ [Sports2D]{.ul}:
68
+
69
+ 1. Reads stream from a webcam, from one video, or from a list of videos. It selects an optional specified time range to process.
70
+ 2. Sets up the RTMLib pose tracker with specified parameters. It can be run in lightweight, balanced, or performance mode, and for faster inference, keypoints can be tracked for a certain number of frames instead of detected. Any RTMPose model can be used.
71
+ 3. Tracks people so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears for a few frames. This carefully crafted `sports2d` tracker runs at a comparable speed as the RTMlib one but is much more robust. The user can still choose the RTMLib method if they need it by using the `tracking_mode` argument.
72
+ 4. Retrieves the keypoints with high enough confidence, and only keeps the persons with enough average high-confidence.
73
+ 5. Computes the selected joint and segment angles, and flips them on the left/right side if the respective foot is pointing to the left/right. The user can select which angles they want to compute, display, and save.
74
+ 5. Draws bounding boxes around each person and writes their IDs\
75
+ Draws the skeleton and the keypoints, with a green to red color scale to account for their confidence\
76
+ Draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
77
+ 6. Interpolates missing pose and angle sequences if gaps are not too large. Filters them with the selected filter (among `Butterworth`, `Gaussian`, `LOESS`, or `Median`) and their parameters
78
+ 7. Optionally shows processed images, saves them, or saves them as a video\
79
+ Optionally plots pose and angle data before and after processing for comparison\
80
+ Optionally saves poses for each person as a TRC file, and angles as a MOT file
81
+
82
+ <br>
83
+
84
+ [The Demo video]{.ul} that Sports2D is tested on is voluntarily challenging, in order to demonstrate the robustness of the process after sorting, interpolation and filtering. It contains:
85
+
86
+ * One person walking in the sagittal plane
87
+ * One person in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
88
+ * One tiny person flickering in the background who needs to be ignored
89
+
90
+ <br>
91
+
92
+ [Joint and segment angle estimation]{.ul}:
93
+
94
+ Specific joint and segment angles can be chosen. They are consistent regardless of the direction the participant is facing: the participant is considered to look to the left when their toes are to the left of their heels, and to the right otherwise. Resulting angles can be filtered in the same way as point coordinates, and they can also be plotted.
95
+
96
+ Joint angle conventions are as follows (\autoref{fig:joint_angle_conventions}):
97
+
98
+ * Ankle dorsiflexion: Between heel and big toe, and ankle and knee.\
99
+ *-90° when the foot is aligned with the shank.*
100
+ * Knee flexion: Between hip, knee, and ankle.\
101
+ *0° when the shank is aligned with the thigh.*
102
+ * Hip flexion: Between knee, hip, and shoulder.\
103
+ *0° when the trunk is aligned with the thigh.*
104
+ * Shoulder flexion: Between hip, shoulder, and elbow.\
105
+ *180° when the arm is aligned with the trunk.*
106
+ * Elbow flexion: Between wrist, elbow, and shoulder.\
107
+ *0° when the forearm is aligned with the arm.*
108
+
109
+ Segment angles are measured anticlockwise between the horizontal and the segment lines:
110
+
111
+ * Foot: Between heel and big toe.
112
+ * Shank: Between knee and ankle.
113
+ * Thigh: Between hip and knee.
114
+ * Pelvis: Between left and right hip
115
+ * Trunk: Between hip midpoint and shoulder midpoint
116
+ * Shoulders: Between left and right shoulder
117
+ * Head: Between neck and top of the head
118
+ * Arm: Between shoulder and elbow.
119
+ * Forearm: Between elbow and wrist.
120
+
121
+ ![Joint angle conventions\label{fig:joint_angle_conventions}](joint_convention.png)
122
+
123
+
124
+ # Limitations
125
+
126
+ The user of `Sports2D` should be aware of the following limitations:
127
+
128
+ * Results are acceptable only if the participants move in the 2D plane, either in the frontal plane or in the sagittal one. If you need research-grade markerless joint kinematics, consider using several cameras, and constraining angles to a biomechanically accurate model. See `Pose2Sim` [@Pagnon_2022a] for example.
129
+ * Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect [@Wade_2022], especially if motion blur is significant such as on some broadcast videos.
130
+ <!--* Google Colab does not follow the European GDPR requirements regarding data privacy [@Minssen_2020]. Install locally if this matters.-->
131
+
132
+
133
+ # Acknowledgements
134
+
135
+ I would like to acknowledge Rob Olivar, a sports coach who enlightened me about the need for such a tool.\
136
+ I also acknowledge the work of the dedicated people involved in the many major open-source software programs and packages used by `Sports2D`, such as `Python`, `RTMPPose`, `OpenCV` [@Bradski_2000], among others.
137
+
138
+
139
+ # References
140
+
141
+
Binary file
Binary file