sports2d 0.8.19__tar.gz → 0.8.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sports2d-0.8.19 → sports2d-0.8.21}/PKG-INFO +67 -37
- {sports2d-0.8.19 → sports2d-0.8.21}/README.md +65 -35
- sports2d-0.8.21/Sports2D/Demo/Calib_demo.toml +12 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Demo/Config_demo.toml +4 -3
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Sports2D.py +16 -5
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Utilities/tests.py +30 -8
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/process.py +58 -17
- {sports2d-0.8.19 → sports2d-0.8.21}/pyproject.toml +2 -2
- {sports2d-0.8.19 → sports2d-0.8.21}/sports2d.egg-info/PKG-INFO +67 -37
- {sports2d-0.8.19 → sports2d-0.8.21}/sports2d.egg-info/SOURCES.txt +1 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/sports2d.egg-info/requires.txt +1 -1
- {sports2d-0.8.19 → sports2d-0.8.21}/.github/workflows/continuous-integration.yml +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/.github/workflows/joss_pdf.yml +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/.github/workflows/publish-on-release.yml +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/.gitignore +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/CITATION.cff +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/Demo_plots.png +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/Demo_results.png +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/Demo_terminal.png +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/Person_selection.png +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/Video_tuto_Sports2D_Colab.png +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/joint_convention.png +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/paper.bib +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/paper.md +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/sports2d_blender.gif +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Content/sports2d_opensim.gif +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/LICENSE +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Demo/demo.mp4 +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Sports2D.ipynb +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Utilities/__init__.py +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/Utilities/common.py +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/Sports2D/__init__.py +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/setup.cfg +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/sports2d.egg-info/dependency_links.txt +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/sports2d.egg-info/entry_points.txt +0 -0
- {sports2d-0.8.19 → sports2d-0.8.21}/sports2d.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.21
|
|
4
4
|
Summary: Compute 2D human pose and angles from a video or a webcam.
|
|
5
5
|
Author-email: David Pagnon <contact@david-pagnon.com>
|
|
6
6
|
Maintainer-email: David Pagnon <contact@david-pagnon.com>
|
|
@@ -38,7 +38,7 @@ Requires-Dist: openvino
|
|
|
38
38
|
Requires-Dist: opencv-python<4.12
|
|
39
39
|
Requires-Dist: imageio_ffmpeg
|
|
40
40
|
Requires-Dist: deep-sort-realtime
|
|
41
|
-
Requires-Dist: Pose2Sim>=0.10.
|
|
41
|
+
Requires-Dist: Pose2Sim>=0.10.36
|
|
42
42
|
Dynamic: license-file
|
|
43
43
|
|
|
44
44
|
|
|
@@ -67,6 +67,7 @@ Dynamic: license-file
|
|
|
67
67
|
</br>
|
|
68
68
|
|
|
69
69
|
> **`Announcements:`**
|
|
70
|
+
> - Generate or import a calibration file, OpenSim skeleton overlay **New in v0.9!**
|
|
70
71
|
> - Select only the persons you want to analyze **New in v0.8!**
|
|
71
72
|
> - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
|
|
72
73
|
> - Any detector and pose estimation model can be used. **New in v0.6!**
|
|
@@ -218,16 +219,19 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
|
|
|
218
219
|
|
|
219
220
|
1. **Install the Pose2Sim_Blender add-on.**\
|
|
220
221
|
Follow instructions on the [Pose2Sim_Blender](https://github.com/davidpagnon/Pose2Sim_Blender) add-on page.
|
|
222
|
+
2. **Import the camera and video.**
|
|
223
|
+
- **Cameras -> Import**: Open your `demo_calib.toml` file from your `result_dir` folder.
|
|
224
|
+
- **Images/Videos -> Show**: open your video file (e.g., `demo_Sports2D.mp4`).\
|
|
225
|
+
-> **Other tools -> See through camera**
|
|
221
226
|
2. **Open your point coordinates.**\
|
|
222
|
-
**
|
|
223
|
-
|
|
227
|
+
**OpenSim data -> Markers**: Open your trc file(e.g., `demo_Sports2D_m_person00.trc`) from your `result_dir` folder.\
|
|
224
228
|
This will optionally create **an animated rig** based on the motion of the captured person.
|
|
225
229
|
3. **Open your animated skeleton:**\
|
|
226
230
|
Make sure you first set `--do_ik True` ([full install](#full-install) required). See [inverse kinematics](#run-inverse-kinematics) section for more details.
|
|
227
|
-
- **
|
|
228
|
-
- **
|
|
231
|
+
- **OpenSim data -> Model**: Open your scaled model (e.g., `demo_Sports2D_m_person00_LSTM.osim`).
|
|
232
|
+
- **OpenSim data -> Motion**: Open your motion file (e.g., `demo_Sports2D_m_person00_LSTM_ik.mot`).
|
|
229
233
|
|
|
230
|
-
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)**
|
|
234
|
+
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)** [](https://discord.com/invite/4mXUdSFjmt)
|
|
231
235
|
|
|
232
236
|
<img src="Content/sports2d_blender.gif" width="760">
|
|
233
237
|
|
|
@@ -284,7 +288,7 @@ If you only want to analyze a subset of the detected persons, you can use the `-
|
|
|
284
288
|
sports2d --nb_persons_to_detect 2 --person_ordering_method highest_likelihood
|
|
285
289
|
```
|
|
286
290
|
|
|
287
|
-
We recommend
|
|
291
|
+
We recommend using the `on_click` method if you can afford a manual input. This lets the user handle both the person number and their order in the same stage. When prompted, select the persons you are interested in in the desired order. In our case, lets slide to a frame where both people are visible, and select the woman first, then the man.
|
|
288
292
|
|
|
289
293
|
Otherwise, if you want to run Sports2D automatically for example, you can choose other ordering methods such as 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'.
|
|
290
294
|
|
|
@@ -301,28 +305,32 @@ sports2d --person_ordering_method on_click
|
|
|
301
305
|
|
|
302
306
|
|
|
303
307
|
#### Get coordinates in meters:
|
|
304
|
-
> **N.B.:**
|
|
308
|
+
> **N.B.:** The Z coordinate (depth) should not be overly trusted.
|
|
305
309
|
|
|
306
|
-
|
|
307
|
-
You may need to convert pixel coordinates to meters.\
|
|
308
|
-
Just provide the height of the reference person (and their ID in case of multiple person detection).
|
|
310
|
+
You may want coordinates in meters rather than pixels. 2 options to do so:
|
|
309
311
|
|
|
310
|
-
|
|
312
|
+
1. **Just provide the height of a reference person**:
|
|
313
|
+
- Their height in meters is be compared with their height in pixels to get a pixel-to-meter conversion factor.
|
|
314
|
+
- To estimate the depth coordinates, specify which side of the person is visible: `left`, `right`, `front`, or `back`. Use `auto` if you want it to be automatically determined (only works for motions in the sagittal plane), or `none` if you want to keep 2D coordinates instead of 3D (if the person turns around, for example).
|
|
315
|
+
- The floor angle is automatically estimated from gait, as well as the origin of the xy axis. The person trajectory is corrected accordingly. You can use the `--floor_angle` and `--xy_origin` parameters to manually specify them if your subject is not travelling horizontally or if you want the origin not to be under their feet (note that the `y` axis points down).
|
|
316
|
+
|
|
317
|
+
**N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
|
|
311
318
|
|
|
312
|
-
|
|
313
|
-
|
|
319
|
+
``` cmd
|
|
320
|
+
sports2d --first_person_height 1.65 --visible_side auto front none
|
|
321
|
+
```
|
|
322
|
+
``` cmd
|
|
323
|
+
sports2d --first_person_height 1.65 --visible_side auto front none `
|
|
324
|
+
--person_ordering_method on_click `
|
|
325
|
+
--floor_angle 0 --xy_origin 0 940
|
|
326
|
+
```
|
|
314
327
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
``` cmd
|
|
319
|
-
sports2d --
|
|
320
|
-
```
|
|
321
|
-
``` cmd
|
|
322
|
-
sports2d --to_meters True --first_person_height 1.65 --visible_side auto front none `
|
|
323
|
-
--person_ordering_method on_click `
|
|
324
|
-
--floor_angle 0 --xy_origin 0 940
|
|
325
|
-
```
|
|
328
|
+
2. **Or use a calibration file**:\
|
|
329
|
+
It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
|
|
330
|
+
|
|
331
|
+
``` cmd
|
|
332
|
+
sports2d --calib_file Calib_demo.toml --visible_side auto front none
|
|
333
|
+
```
|
|
326
334
|
|
|
327
335
|
<br>
|
|
328
336
|
|
|
@@ -337,18 +345,22 @@ OpenSim inverse kinematics allows you to set joint constraints, joint angle limi
|
|
|
337
345
|
This is done via [Pose2Sim](https://github.com/perfanalytics/pose2sim).\
|
|
338
346
|
Model scaling is done according to the mean of the segment lengths, across a subset of frames. We remove the 10% fastest frames (potential outliers), the frames where the speed is 0 (person probably out of frame), the frames where the average knee and hip flexion angles are above 45° (pose estimation is not precise when the person is crouching) and the 20% most extreme segment values after the previous operations (potential outliers). All these parameters can be edited in your Config.toml file.
|
|
339
347
|
|
|
348
|
+
**N.B.: This will not work on sections where the person is not moving in a single plane. You can split your video into several time ranges if needed.**
|
|
349
|
+
|
|
340
350
|
```cmd
|
|
341
351
|
sports2d --time_range 1.2 2.7 `
|
|
342
352
|
--do_ik true --first_person_height 1.65 --visible_side auto front
|
|
343
353
|
```
|
|
344
354
|
|
|
345
355
|
You can optionally use the LSTM marker augmentation to improve the quality of the output motion.\
|
|
346
|
-
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis)
|
|
356
|
+
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis).\
|
|
357
|
+
Optionally again, you can [visualize the overlaid results in Blender](#visualize-in-blender). The automatic calibration won't be accurate with such a small time range, so you need to use the provided calibration file (or one that has been generated from the full walk).
|
|
347
358
|
|
|
348
359
|
```cmd
|
|
349
360
|
sports2d --time_range 1.2 2.7 `
|
|
350
361
|
--do_ik true --first_person_height 1.65 --visible_side left front `
|
|
351
|
-
--use_augmentation True --participant_mass 55.0 67.0
|
|
362
|
+
--use_augmentation True --participant_mass 55.0 67.0 `
|
|
363
|
+
--calib_file Calib_demo.toml
|
|
352
364
|
```
|
|
353
365
|
|
|
354
366
|
<br>
|
|
@@ -376,14 +388,31 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
376
388
|
``` cmd
|
|
377
389
|
sports2d --config Config_demo.toml
|
|
378
390
|
```
|
|
379
|
-
- Run within Python
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
391
|
+
- Run within Python, for example:\
|
|
392
|
+
- Edit `Demo/Config_demo.toml` and run:
|
|
393
|
+
```python
|
|
394
|
+
from Sports2D import Sports2D
|
|
395
|
+
from pathlib import Path
|
|
396
|
+
import toml
|
|
397
|
+
|
|
398
|
+
config_path = Path(Sports2D.__file__).parent / 'Demo'/'Config_demo.toml'
|
|
399
|
+
config_dict = toml.load(config_path)
|
|
400
|
+
Sports2D.process(config_dict)
|
|
401
|
+
```
|
|
402
|
+
- Or you can pass the non default values only:
|
|
403
|
+
```python
|
|
404
|
+
from Sports2D import Sports2D
|
|
405
|
+
config_dict = {
|
|
406
|
+
'base': {
|
|
407
|
+
'nb_persons_to_detect': 1,
|
|
408
|
+
'person_ordering_method': 'greatest_displacement'
|
|
409
|
+
},
|
|
410
|
+
'pose': {
|
|
411
|
+
'mode': 'lightweight',
|
|
412
|
+
'det_frequency': 50
|
|
413
|
+
}}
|
|
414
|
+
Sports2D.process(config_dict)
|
|
415
|
+
```
|
|
387
416
|
|
|
388
417
|
<br>
|
|
389
418
|
|
|
@@ -407,7 +436,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
407
436
|
```cmd
|
|
408
437
|
sports2d --flip_left_right true # Default
|
|
409
438
|
```
|
|
410
|
-
- Correct segment angles according to the estimated camera
|
|
439
|
+
- Correct segment angles according to the estimated camera tilt angle.\
|
|
411
440
|
**N.B.:** *The camera tilt angle is automatically estimated. Set to false if it is actually the floor which is tilted rather than the camera.*
|
|
412
441
|
```cmd
|
|
413
442
|
sports2d --correct_segment_angles_with_floor_angle true # Default
|
|
@@ -477,6 +506,7 @@ sports2d --help
|
|
|
477
506
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
478
507
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
479
508
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
509
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. false if not specified"],
|
|
480
510
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
481
511
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
482
512
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -24,6 +24,7 @@
|
|
|
24
24
|
</br>
|
|
25
25
|
|
|
26
26
|
> **`Announcements:`**
|
|
27
|
+
> - Generate or import a calibration file, OpenSim skeleton overlay **New in v0.9!**
|
|
27
28
|
> - Select only the persons you want to analyze **New in v0.8!**
|
|
28
29
|
> - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
|
|
29
30
|
> - Any detector and pose estimation model can be used. **New in v0.6!**
|
|
@@ -175,16 +176,19 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
|
|
|
175
176
|
|
|
176
177
|
1. **Install the Pose2Sim_Blender add-on.**\
|
|
177
178
|
Follow instructions on the [Pose2Sim_Blender](https://github.com/davidpagnon/Pose2Sim_Blender) add-on page.
|
|
179
|
+
2. **Import the camera and video.**
|
|
180
|
+
- **Cameras -> Import**: Open your `demo_calib.toml` file from your `result_dir` folder.
|
|
181
|
+
- **Images/Videos -> Show**: open your video file (e.g., `demo_Sports2D.mp4`).\
|
|
182
|
+
-> **Other tools -> See through camera**
|
|
178
183
|
2. **Open your point coordinates.**\
|
|
179
|
-
**
|
|
180
|
-
|
|
184
|
+
**OpenSim data -> Markers**: Open your trc file(e.g., `demo_Sports2D_m_person00.trc`) from your `result_dir` folder.\
|
|
181
185
|
This will optionally create **an animated rig** based on the motion of the captured person.
|
|
182
186
|
3. **Open your animated skeleton:**\
|
|
183
187
|
Make sure you first set `--do_ik True` ([full install](#full-install) required). See [inverse kinematics](#run-inverse-kinematics) section for more details.
|
|
184
|
-
- **
|
|
185
|
-
- **
|
|
188
|
+
- **OpenSim data -> Model**: Open your scaled model (e.g., `demo_Sports2D_m_person00_LSTM.osim`).
|
|
189
|
+
- **OpenSim data -> Motion**: Open your motion file (e.g., `demo_Sports2D_m_person00_LSTM_ik.mot`).
|
|
186
190
|
|
|
187
|
-
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)**
|
|
191
|
+
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)** [](https://discord.com/invite/4mXUdSFjmt)
|
|
188
192
|
|
|
189
193
|
<img src="Content/sports2d_blender.gif" width="760">
|
|
190
194
|
|
|
@@ -241,7 +245,7 @@ If you only want to analyze a subset of the detected persons, you can use the `-
|
|
|
241
245
|
sports2d --nb_persons_to_detect 2 --person_ordering_method highest_likelihood
|
|
242
246
|
```
|
|
243
247
|
|
|
244
|
-
We recommend
|
|
248
|
+
We recommend using the `on_click` method if you can afford a manual input. This lets the user handle both the person number and their order in the same stage. When prompted, select the persons you are interested in in the desired order. In our case, lets slide to a frame where both people are visible, and select the woman first, then the man.
|
|
245
249
|
|
|
246
250
|
Otherwise, if you want to run Sports2D automatically for example, you can choose other ordering methods such as 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'.
|
|
247
251
|
|
|
@@ -258,28 +262,32 @@ sports2d --person_ordering_method on_click
|
|
|
258
262
|
|
|
259
263
|
|
|
260
264
|
#### Get coordinates in meters:
|
|
261
|
-
> **N.B.:**
|
|
265
|
+
> **N.B.:** The Z coordinate (depth) should not be overly trusted.
|
|
262
266
|
|
|
263
|
-
|
|
264
|
-
You may need to convert pixel coordinates to meters.\
|
|
265
|
-
Just provide the height of the reference person (and their ID in case of multiple person detection).
|
|
267
|
+
You may want coordinates in meters rather than pixels. 2 options to do so:
|
|
266
268
|
|
|
267
|
-
|
|
269
|
+
1. **Just provide the height of a reference person**:
|
|
270
|
+
- Their height in meters is be compared with their height in pixels to get a pixel-to-meter conversion factor.
|
|
271
|
+
- To estimate the depth coordinates, specify which side of the person is visible: `left`, `right`, `front`, or `back`. Use `auto` if you want it to be automatically determined (only works for motions in the sagittal plane), or `none` if you want to keep 2D coordinates instead of 3D (if the person turns around, for example).
|
|
272
|
+
- The floor angle is automatically estimated from gait, as well as the origin of the xy axis. The person trajectory is corrected accordingly. You can use the `--floor_angle` and `--xy_origin` parameters to manually specify them if your subject is not travelling horizontally or if you want the origin not to be under their feet (note that the `y` axis points down).
|
|
273
|
+
|
|
274
|
+
**N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
|
|
268
275
|
|
|
269
|
-
|
|
270
|
-
|
|
276
|
+
``` cmd
|
|
277
|
+
sports2d --first_person_height 1.65 --visible_side auto front none
|
|
278
|
+
```
|
|
279
|
+
``` cmd
|
|
280
|
+
sports2d --first_person_height 1.65 --visible_side auto front none `
|
|
281
|
+
--person_ordering_method on_click `
|
|
282
|
+
--floor_angle 0 --xy_origin 0 940
|
|
283
|
+
```
|
|
271
284
|
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
``` cmd
|
|
276
|
-
sports2d --
|
|
277
|
-
```
|
|
278
|
-
``` cmd
|
|
279
|
-
sports2d --to_meters True --first_person_height 1.65 --visible_side auto front none `
|
|
280
|
-
--person_ordering_method on_click `
|
|
281
|
-
--floor_angle 0 --xy_origin 0 940
|
|
282
|
-
```
|
|
285
|
+
2. **Or use a calibration file**:\
|
|
286
|
+
It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
|
|
287
|
+
|
|
288
|
+
``` cmd
|
|
289
|
+
sports2d --calib_file Calib_demo.toml --visible_side auto front none
|
|
290
|
+
```
|
|
283
291
|
|
|
284
292
|
<br>
|
|
285
293
|
|
|
@@ -294,18 +302,22 @@ OpenSim inverse kinematics allows you to set joint constraints, joint angle limi
|
|
|
294
302
|
This is done via [Pose2Sim](https://github.com/perfanalytics/pose2sim).\
|
|
295
303
|
Model scaling is done according to the mean of the segment lengths, across a subset of frames. We remove the 10% fastest frames (potential outliers), the frames where the speed is 0 (person probably out of frame), the frames where the average knee and hip flexion angles are above 45° (pose estimation is not precise when the person is crouching) and the 20% most extreme segment values after the previous operations (potential outliers). All these parameters can be edited in your Config.toml file.
|
|
296
304
|
|
|
305
|
+
**N.B.: This will not work on sections where the person is not moving in a single plane. You can split your video into several time ranges if needed.**
|
|
306
|
+
|
|
297
307
|
```cmd
|
|
298
308
|
sports2d --time_range 1.2 2.7 `
|
|
299
309
|
--do_ik true --first_person_height 1.65 --visible_side auto front
|
|
300
310
|
```
|
|
301
311
|
|
|
302
312
|
You can optionally use the LSTM marker augmentation to improve the quality of the output motion.\
|
|
303
|
-
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis)
|
|
313
|
+
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis).\
|
|
314
|
+
Optionally again, you can [visualize the overlaid results in Blender](#visualize-in-blender). The automatic calibration won't be accurate with such a small time range, so you need to use the provided calibration file (or one that has been generated from the full walk).
|
|
304
315
|
|
|
305
316
|
```cmd
|
|
306
317
|
sports2d --time_range 1.2 2.7 `
|
|
307
318
|
--do_ik true --first_person_height 1.65 --visible_side left front `
|
|
308
|
-
--use_augmentation True --participant_mass 55.0 67.0
|
|
319
|
+
--use_augmentation True --participant_mass 55.0 67.0 `
|
|
320
|
+
--calib_file Calib_demo.toml
|
|
309
321
|
```
|
|
310
322
|
|
|
311
323
|
<br>
|
|
@@ -333,14 +345,31 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
333
345
|
``` cmd
|
|
334
346
|
sports2d --config Config_demo.toml
|
|
335
347
|
```
|
|
336
|
-
- Run within Python
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
348
|
+
- Run within Python, for example:\
|
|
349
|
+
- Edit `Demo/Config_demo.toml` and run:
|
|
350
|
+
```python
|
|
351
|
+
from Sports2D import Sports2D
|
|
352
|
+
from pathlib import Path
|
|
353
|
+
import toml
|
|
354
|
+
|
|
355
|
+
config_path = Path(Sports2D.__file__).parent / 'Demo'/'Config_demo.toml'
|
|
356
|
+
config_dict = toml.load(config_path)
|
|
357
|
+
Sports2D.process(config_dict)
|
|
358
|
+
```
|
|
359
|
+
- Or you can pass the non default values only:
|
|
360
|
+
```python
|
|
361
|
+
from Sports2D import Sports2D
|
|
362
|
+
config_dict = {
|
|
363
|
+
'base': {
|
|
364
|
+
'nb_persons_to_detect': 1,
|
|
365
|
+
'person_ordering_method': 'greatest_displacement'
|
|
366
|
+
},
|
|
367
|
+
'pose': {
|
|
368
|
+
'mode': 'lightweight',
|
|
369
|
+
'det_frequency': 50
|
|
370
|
+
}}
|
|
371
|
+
Sports2D.process(config_dict)
|
|
372
|
+
```
|
|
344
373
|
|
|
345
374
|
<br>
|
|
346
375
|
|
|
@@ -364,7 +393,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
364
393
|
```cmd
|
|
365
394
|
sports2d --flip_left_right true # Default
|
|
366
395
|
```
|
|
367
|
-
- Correct segment angles according to the estimated camera
|
|
396
|
+
- Correct segment angles according to the estimated camera tilt angle.\
|
|
368
397
|
**N.B.:** *The camera tilt angle is automatically estimated. Set to false if it is actually the floor which is tilted rather than the camera.*
|
|
369
398
|
```cmd
|
|
370
399
|
sports2d --correct_segment_angles_with_floor_angle true # Default
|
|
@@ -434,6 +463,7 @@ sports2d --help
|
|
|
434
463
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
435
464
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
436
465
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
466
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. false if not specified"],
|
|
437
467
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
438
468
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
439
469
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
[demo]
|
|
2
|
+
name = "demo"
|
|
3
|
+
size = [ 1768, 994]
|
|
4
|
+
matrix = [ [ 2520.0897058227038, 0.0, 884.0], [ 0.0, 2520.0897058227038, 497.0], [ 0.0, 0.0, 1.0]]
|
|
5
|
+
distortions = [ 0.0, 0.0, 0.0, 0.0]
|
|
6
|
+
rotation = [ 1.2082126924727719, 1.2098328575850605, -1.2082126924727719]
|
|
7
|
+
translation = [ -3.510103521992233, 1.7079310029359385, 10.0]
|
|
8
|
+
fisheye = false
|
|
9
|
+
|
|
10
|
+
[metadata]
|
|
11
|
+
adjusted = false
|
|
12
|
+
error = 0.0
|
|
@@ -60,8 +60,8 @@ pose_model = 'Body_with_feet' #With RTMLib:
|
|
|
60
60
|
# - Hand (HAND_21, only lightweight mode. Potentially better results with Whole_body),
|
|
61
61
|
# - Face (FACE_106),
|
|
62
62
|
# - Animal (ANIMAL2D_17)
|
|
63
|
-
#
|
|
64
|
-
#
|
|
63
|
+
# ⚠ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
|
64
|
+
# ⚠ For Face and Animal, use mode="""{dictionary}""", and find the corresponding .onnx model there https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose
|
|
65
65
|
mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
|
|
66
66
|
|
|
67
67
|
# A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
|
|
@@ -103,7 +103,7 @@ keypoint_number_threshold = 0.3 # Person will be ignored if the number of go
|
|
|
103
103
|
# Pixel to meters conversion
|
|
104
104
|
to_meters = true
|
|
105
105
|
make_c3d = true
|
|
106
|
-
save_calib =
|
|
106
|
+
save_calib = false
|
|
107
107
|
|
|
108
108
|
# If conversion from first_person_height
|
|
109
109
|
floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
|
|
@@ -139,6 +139,7 @@ reject_outliers = true # Hampel filter for outlier rejection before other f
|
|
|
139
139
|
|
|
140
140
|
filter = true
|
|
141
141
|
show_graphs = true # Show plots of raw and processed results
|
|
142
|
+
save_graphs = true # Save position and angle plots of raw and processed results
|
|
142
143
|
filter_type = 'butterworth' # butterworth, kalman, gcv_spline, gaussian, loess, median, butterworth_on_speed
|
|
143
144
|
|
|
144
145
|
# Most intuitive and standard filter in biomechanics
|
|
@@ -28,7 +28,7 @@
|
|
|
28
28
|
- Run on webcam with default parameters:
|
|
29
29
|
sports2d --video_input webcam
|
|
30
30
|
- Run with custom parameters (all non specified are set to default):
|
|
31
|
-
sports2d --
|
|
31
|
+
sports2d --show_graphs False --time_range 0 2.1 --result_dir path_to_result_dir
|
|
32
32
|
sports2d --person_detection_method highest_likelihood --mode lightweight --det_frequency 50
|
|
33
33
|
- Run with a toml configuration file:
|
|
34
34
|
sports2d --config path_to_config.toml
|
|
@@ -44,7 +44,7 @@
|
|
|
44
44
|
pip install .
|
|
45
45
|
|
|
46
46
|
-----
|
|
47
|
-
|
|
47
|
+
⚠ Warning ⚠
|
|
48
48
|
-----
|
|
49
49
|
- The angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.
|
|
50
50
|
- It will only lead to acceptable results if the persons move in the 2D plane (sagittal plane).
|
|
@@ -196,7 +196,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
196
196
|
'calib_file': '',
|
|
197
197
|
'floor_angle': 'auto',
|
|
198
198
|
'xy_origin': ['auto'],
|
|
199
|
-
'save_calib':
|
|
199
|
+
'save_calib': False
|
|
200
200
|
},
|
|
201
201
|
'angles': {'display_angle_values_on': ['body', 'list'],
|
|
202
202
|
'fontSize': 0.3,
|
|
@@ -236,6 +236,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
236
236
|
'reject_outliers': True,
|
|
237
237
|
'filter': True,
|
|
238
238
|
'show_graphs': True,
|
|
239
|
+
'save_graphs': True,
|
|
239
240
|
'filter_type': 'butterworth',
|
|
240
241
|
'butterworth': {'order': 4, 'cut_off_frequency': 6.0},
|
|
241
242
|
'kalman': {'trust_ratio': 500.0, 'smooth':True},
|
|
@@ -279,6 +280,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
279
280
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
280
281
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
281
282
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
283
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. true if not specified"],
|
|
282
284
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
283
285
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
284
286
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -471,6 +473,14 @@ def set_nested_value(config, flat_key, value):
|
|
|
471
473
|
d[keys[-1]] = value
|
|
472
474
|
|
|
473
475
|
|
|
476
|
+
def merge_dicts(original, overrides):
|
|
477
|
+
for key, value in overrides.items():
|
|
478
|
+
if isinstance(value, dict) and isinstance(original.get(key), dict):
|
|
479
|
+
merge_dicts(original[key], value)
|
|
480
|
+
else:
|
|
481
|
+
original[key] = value
|
|
482
|
+
|
|
483
|
+
|
|
474
484
|
def str2bool(v):
|
|
475
485
|
'''
|
|
476
486
|
Convert a string to a boolean value.
|
|
@@ -498,7 +508,8 @@ def process(config='Config_demo.toml'):
|
|
|
498
508
|
from Sports2D.process import process_fun
|
|
499
509
|
|
|
500
510
|
if type(config) == dict:
|
|
501
|
-
config_dict =
|
|
511
|
+
config_dict = DEFAULT_CONFIG.copy()
|
|
512
|
+
merge_dicts(config_dict, config)
|
|
502
513
|
else:
|
|
503
514
|
config_dict = read_config_file(config)
|
|
504
515
|
video_dir, video_files, frame_rates, time_ranges, result_dir = base_params(config_dict)
|
|
@@ -546,7 +557,7 @@ def main():
|
|
|
546
557
|
- Run on webcam with default parameters:
|
|
547
558
|
sports2d --video_input webcam
|
|
548
559
|
- Run with custom parameters (all non specified are set to default):
|
|
549
|
-
sports2d --
|
|
560
|
+
sports2d --show_graphs False --time_range 0 2.1 --result_dir path_to_result_dir
|
|
550
561
|
sports2d --mode lightweight --det_frequency 50
|
|
551
562
|
- Run with a toml configuration file:
|
|
552
563
|
sports2d --config path_to_config.toml
|
|
@@ -45,7 +45,7 @@ def test_workflow():
|
|
|
45
45
|
## From Python ##
|
|
46
46
|
#############################
|
|
47
47
|
|
|
48
|
-
# Default
|
|
48
|
+
# Default from the demo config file
|
|
49
49
|
config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
|
|
50
50
|
config_dict = toml.load(config_path)
|
|
51
51
|
video_dir = Path(__file__).resolve().parent.parent / 'Demo'
|
|
@@ -53,6 +53,28 @@ def test_workflow():
|
|
|
53
53
|
config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
|
|
54
54
|
config_dict.get("base").update({"show_realtime_results":False})
|
|
55
55
|
config_dict.get("post-processing").update({"show_graphs":False})
|
|
56
|
+
config_dict.get("post-processing").update({"save_graphs":False})
|
|
57
|
+
|
|
58
|
+
Sports2D.process(config_dict)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# Only passing the updated values
|
|
62
|
+
video_dir = Path(__file__).resolve().parent.parent / 'Demo'
|
|
63
|
+
config_dict = {
|
|
64
|
+
'base': {
|
|
65
|
+
'nb_persons_to_detect': 1,
|
|
66
|
+
'person_ordering_method': 'greatest_displacement',
|
|
67
|
+
"show_realtime_results":False
|
|
68
|
+
},
|
|
69
|
+
'pose': {
|
|
70
|
+
'mode': 'lightweight',
|
|
71
|
+
'det_frequency': 50
|
|
72
|
+
},
|
|
73
|
+
'post-processing': {
|
|
74
|
+
'show_graphs':False,
|
|
75
|
+
'save_graphs':False
|
|
76
|
+
}
|
|
77
|
+
}
|
|
56
78
|
|
|
57
79
|
Sports2D.process(config_dict)
|
|
58
80
|
|
|
@@ -62,28 +84,28 @@ def test_workflow():
|
|
|
62
84
|
#############################
|
|
63
85
|
|
|
64
86
|
# Default
|
|
65
|
-
demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False"]
|
|
87
|
+
demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False"]
|
|
66
88
|
subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
67
89
|
|
|
68
90
|
# With loading a trc file, visible_side 'front', first_person_height '1.76", floor_angle 0, xy_origin [0, 928]
|
|
69
|
-
demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
91
|
+
demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
|
|
70
92
|
"--load_trc_px", os.path.join(root_dir, "demo_Sports2D", "demo_Sports2D_px_person01.trc"),
|
|
71
93
|
"--visible_side", "front", "--first_person_height", "1.76", "--time_range", "1.2", "2.7",
|
|
72
94
|
"--floor_angle", "0", "--xy_origin", "0", "928"]
|
|
73
95
|
subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
74
96
|
|
|
75
97
|
# With no pixels to meters conversion, one person to select, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
|
|
76
|
-
demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
77
|
-
|
|
98
|
+
demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
|
|
99
|
+
# "--calib_file", "calib_demo.toml",
|
|
78
100
|
"--nb_persons_to_detect", "1", "--person_ordering_method", "greatest_displacement",
|
|
79
101
|
"--mode", "lightweight", "--det_frequency", "50",
|
|
80
102
|
"--slowmo_factor", "4",
|
|
81
|
-
"--filter_type", "gaussian",
|
|
103
|
+
"--filter_type", "gaussian", "--use_augmentation", "False",
|
|
82
104
|
"--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
|
|
83
105
|
subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
84
106
|
|
|
85
107
|
# With a time range, inverse kinematics, marker augmentation
|
|
86
|
-
demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
108
|
+
demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
|
|
87
109
|
"--time_range", "1.2", "2.7",
|
|
88
110
|
"--do_ik", "True", "--use_augmentation", "True",
|
|
89
111
|
"--nb_persons_to_detect", "all", "--first_person_height", "1.65",
|
|
@@ -97,7 +119,7 @@ def test_workflow():
|
|
|
97
119
|
config_dict.get("base").update({"video_dir": str(video_dir)})
|
|
98
120
|
config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
|
|
99
121
|
with open(config_path, 'w') as f: toml.dump(config_dict, f)
|
|
100
|
-
demo_cmd5 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
|
|
122
|
+
demo_cmd5 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",]
|
|
101
123
|
subprocess.run(demo_cmd5, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
102
124
|
|
|
103
125
|
|
|
@@ -29,7 +29,7 @@
|
|
|
29
29
|
- optionally plots pose and angle data before and after processing for comparison
|
|
30
30
|
- optionally saves poses for each person as a trc file, and angles as a mot file
|
|
31
31
|
|
|
32
|
-
|
|
32
|
+
⚠ Warning ⚠
|
|
33
33
|
- The pose detection is only as good as the pose estimation algorithm, i.e., it is not perfect.
|
|
34
34
|
- It will lead to reliable results only if the persons move in the 2D plane (sagittal or frontal plane).
|
|
35
35
|
- The persons need to be filmed as perpendicularly as possible from their side.
|
|
@@ -77,6 +77,7 @@ from matplotlib.widgets import Slider, Button
|
|
|
77
77
|
from matplotlib import patheffects
|
|
78
78
|
|
|
79
79
|
from rtmlib import PoseTracker, BodyWithFeet, Wholebody, Body, Hand, Custom
|
|
80
|
+
from rtmlib.tools.object_detection.post_processings import nms
|
|
80
81
|
from deep_sort_realtime.deepsort_tracker import DeepSort
|
|
81
82
|
|
|
82
83
|
from Sports2D.Utilities.common import *
|
|
@@ -789,10 +790,10 @@ def make_mot_with_angles(angles, time, mot_path):
|
|
|
789
790
|
return angles
|
|
790
791
|
|
|
791
792
|
|
|
792
|
-
def pose_plots(trc_data_unfiltered, trc_data, person_id):
|
|
793
|
+
def pose_plots(trc_data_unfiltered, trc_data, person_id, show=True):
|
|
793
794
|
'''
|
|
794
795
|
Displays trc filtered and unfiltered data for comparison
|
|
795
|
-
|
|
796
|
+
⚠ Often crashes on the third window...
|
|
796
797
|
|
|
797
798
|
INPUTS:
|
|
798
799
|
- trc_data_unfiltered: pd.DataFrame. The unfiltered trc data
|
|
@@ -835,13 +836,16 @@ def pose_plots(trc_data_unfiltered, trc_data, person_id):
|
|
|
835
836
|
|
|
836
837
|
pw.addPlot(keypoint, f)
|
|
837
838
|
|
|
838
|
-
|
|
839
|
+
if show:
|
|
840
|
+
pw.show()
|
|
839
841
|
|
|
842
|
+
return pw
|
|
840
843
|
|
|
841
|
-
|
|
844
|
+
|
|
845
|
+
def angle_plots(angle_data_unfiltered, angle_data, person_id, show=True):
|
|
842
846
|
'''
|
|
843
847
|
Displays angle filtered and unfiltered data for comparison
|
|
844
|
-
|
|
848
|
+
⚠ Often crashes on the third window...
|
|
845
849
|
|
|
846
850
|
INPUTS:
|
|
847
851
|
- angle_data_unfiltered: pd.DataFrame. The unfiltered angle data
|
|
@@ -878,7 +882,10 @@ def angle_plots(angle_data_unfiltered, angle_data, person_id):
|
|
|
878
882
|
|
|
879
883
|
pw.addPlot(angle, f)
|
|
880
884
|
|
|
881
|
-
|
|
885
|
+
if show:
|
|
886
|
+
pw.show()
|
|
887
|
+
|
|
888
|
+
return pw
|
|
882
889
|
|
|
883
890
|
|
|
884
891
|
def get_personIDs_with_highest_scores(all_frames_scores, nb_persons_to_detect):
|
|
@@ -1374,7 +1381,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1374
1381
|
- optionally plots pose and angle data before and after processing for comparison
|
|
1375
1382
|
- optionally saves poses for each person as a trc file, and angles as a mot file
|
|
1376
1383
|
|
|
1377
|
-
|
|
1384
|
+
⚠ Warning ⚠
|
|
1378
1385
|
- The pose detection is only as good as the pose estimation algorithm, i.e., it is not perfect.
|
|
1379
1386
|
- It will lead to reliable results only if the persons move in the 2D plane (sagittal or frontal plane).
|
|
1380
1387
|
- The persons need to be filmed as perpendicularly as possible from their side.
|
|
@@ -1490,6 +1497,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1490
1497
|
handle_LR_swap = config_dict.get('post-processing').get('handle_LR_swap', False)
|
|
1491
1498
|
reject_outliers = config_dict.get('post-processing').get('reject_outliers', False)
|
|
1492
1499
|
show_plots = config_dict.get('post-processing').get('show_graphs')
|
|
1500
|
+
save_plots = config_dict.get('post-processing').get('save_graphs')
|
|
1493
1501
|
filter_type = config_dict.get('post-processing').get('filter_type')
|
|
1494
1502
|
butterworth_filter_order = config_dict.get('post-processing').get('butterworth', {}).get('order')
|
|
1495
1503
|
butterworth_filter_cutoff = config_dict.get('post-processing').get('butterworth', {}).get('cut_off_frequency')
|
|
@@ -1513,6 +1521,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1513
1521
|
output_dir_name = f'{video_file_stem}_Sports2D'
|
|
1514
1522
|
video_file_path = video_dir / video_file
|
|
1515
1523
|
output_dir = result_dir / output_dir_name
|
|
1524
|
+
plots_output_dir = output_dir / f'{output_dir_name}_graphs'
|
|
1516
1525
|
img_output_dir = output_dir / f'{output_dir_name}_img'
|
|
1517
1526
|
vid_output_path = output_dir / f'{output_dir_name}.mp4'
|
|
1518
1527
|
pose_output_path = output_dir / f'{output_dir_name}_px.trc'
|
|
@@ -1521,6 +1530,8 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1521
1530
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1522
1531
|
if save_img:
|
|
1523
1532
|
img_output_dir.mkdir(parents=True, exist_ok=True)
|
|
1533
|
+
if save_plots:
|
|
1534
|
+
plots_output_dir.mkdir(parents=True, exist_ok=True)
|
|
1524
1535
|
|
|
1525
1536
|
# Inverse kinematics settings
|
|
1526
1537
|
do_ik = config_dict.get('kinematics').get('do_ik')
|
|
@@ -1721,6 +1732,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1721
1732
|
# Detect poses
|
|
1722
1733
|
keypoints, scores = pose_tracker(frame)
|
|
1723
1734
|
|
|
1735
|
+
# Non maximum suppression (at pose level, not detection)
|
|
1736
|
+
frame_shape = frame.shape
|
|
1737
|
+
bboxes = bbox_xyxy_compute(frame_shape, keypoints, padding=0)
|
|
1738
|
+
score_bboxes = np.array([np.mean(s) for s in scores])
|
|
1739
|
+
keep = nms(bboxes, score_bboxes, nms_thr=0.45)
|
|
1740
|
+
keypoints, scores = keypoints[keep], scores[keep]
|
|
1741
|
+
|
|
1724
1742
|
# Track poses across frames
|
|
1725
1743
|
if tracking_mode == 'deepsort':
|
|
1726
1744
|
keypoints, scores = sort_people_deepsort(keypoints, scores, deepsort_tracker, frame, frame_count)
|
|
@@ -1999,9 +2017,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1999
2017
|
columns_to_concat.extend([all_frames_X_person.iloc[:,kpt], all_frames_Y_person.iloc[:,kpt], all_frames_Z_homog.iloc[:,kpt]])
|
|
2000
2018
|
trc_data_unfiltered_i = pd.concat([all_frames_time] + columns_to_concat, axis=1)
|
|
2001
2019
|
trc_data_unfiltered.append(trc_data_unfiltered_i)
|
|
2002
|
-
if
|
|
2003
|
-
pose_plots(trc_data_unfiltered_i, trc_data_i, i)
|
|
2004
|
-
|
|
2020
|
+
if not to_meters and (show_plots or save_plots):
|
|
2021
|
+
pw = pose_plots(trc_data_unfiltered_i, trc_data_i, i, show=show_plots)
|
|
2022
|
+
if save_plots:
|
|
2023
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2024
|
+
dpi = pw.canvases[i].figure.dpi
|
|
2025
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2026
|
+
title = pw.tabs.tabText(n)
|
|
2027
|
+
plot_path = plots_output_dir / (pose_output_path.stem + f'_person{i:02d}_px_{title.replace(" ","_").replace("/","_")}.png')
|
|
2028
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2029
|
+
logging.info(f'Pose plots (px) saved in {plots_output_dir}.')
|
|
2030
|
+
|
|
2005
2031
|
all_frames_X_processed[:,idx_person,:], all_frames_Y_processed[:,idx_person,:] = all_frames_X_person_filt, all_frames_Y_person_filt
|
|
2006
2032
|
if calculate_angles or save_angles:
|
|
2007
2033
|
all_frames_X_flipped_processed[:,idx_person,:] = all_frames_X_flipped_person
|
|
@@ -2087,9 +2113,17 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2087
2113
|
px_to_m_unfiltered_i = [convert_px_to_meters(trc_data_unfiltered[i][kpt_name], first_person_height, height_px, cx, cy, -floor_angle_estim) for kpt_name in new_keypoints_names]
|
|
2088
2114
|
trc_data_unfiltered_m_i = pd.concat([all_frames_time.rename('time')]+px_to_m_unfiltered_i, axis=1)
|
|
2089
2115
|
|
|
2090
|
-
if to_meters and show_plots:
|
|
2091
|
-
pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i)
|
|
2092
|
-
|
|
2116
|
+
if to_meters and (show_plots or save_plots):
|
|
2117
|
+
pw = pose_plots(trc_data_unfiltered_m_i, trc_data_m_i, i, show=show_plots)
|
|
2118
|
+
if save_plots:
|
|
2119
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2120
|
+
dpi = pw.canvases[i].figure.dpi
|
|
2121
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2122
|
+
title = pw.tabs.tabText(n)
|
|
2123
|
+
plot_path = plots_output_dir / (pose_output_path_m.stem + f'_person{i:02d}_m_{title.replace(" ","_").replace("/","_")}.png')
|
|
2124
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2125
|
+
logging.info(f'Pose plots (m) saved in {plots_output_dir}.')
|
|
2126
|
+
|
|
2093
2127
|
# Write to trc file
|
|
2094
2128
|
trc_data_m.append(trc_data_m_i)
|
|
2095
2129
|
pose_path_person_m_i = (pose_output_path.parent / (pose_output_path_m.stem + f'_person{i:02d}.trc'))
|
|
@@ -2248,9 +2282,16 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2248
2282
|
logging.info(f'Angles saved to {angles_path_person.resolve()}.')
|
|
2249
2283
|
|
|
2250
2284
|
# Plotting angles before and after interpolation and filtering
|
|
2251
|
-
|
|
2252
|
-
|
|
2253
|
-
angle_plots(all_frames_angles_person, angle_data, i) # i = current person
|
|
2285
|
+
all_frames_angles_person.insert(0, 'time', all_frames_time)
|
|
2286
|
+
if save_plots and (show_plots or save_plots):
|
|
2287
|
+
pw = angle_plots(all_frames_angles_person, angle_data, i, show=show_plots) # i = current person
|
|
2288
|
+
for n, f in enumerate(pw.figure_handles):
|
|
2289
|
+
dpi = pw.canvases[i].figure.dpi
|
|
2290
|
+
f.set_size_inches(1280/dpi, 720/dpi)
|
|
2291
|
+
title = pw.tabs.tabText(n)
|
|
2292
|
+
plot_path = plots_output_dir / (pose_output_path_m.stem + f'_person{i:02d}_ang_{title.replace(" ","_").replace("/","_")}.png')
|
|
2293
|
+
f.savefig(plot_path, dpi=dpi, bbox_inches='tight')
|
|
2294
|
+
logging.info(f'Pose plots (m) saved in {plots_output_dir}.')
|
|
2254
2295
|
|
|
2255
2296
|
|
|
2256
2297
|
#%% ==================================================
|
|
@@ -46,10 +46,10 @@ dependencies = [
|
|
|
46
46
|
"c3d",
|
|
47
47
|
"rtmlib",
|
|
48
48
|
"openvino",
|
|
49
|
-
"opencv-python<4.12", #
|
|
49
|
+
"opencv-python<4.12", # otherwise forces numpy>=2.0, which is incompatible with some opensim/python combinations
|
|
50
50
|
"imageio_ffmpeg",
|
|
51
51
|
"deep-sort-realtime",
|
|
52
|
-
"Pose2Sim>=0.10.
|
|
52
|
+
"Pose2Sim>=0.10.36"
|
|
53
53
|
]
|
|
54
54
|
|
|
55
55
|
[tool.setuptools_scm]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.21
|
|
4
4
|
Summary: Compute 2D human pose and angles from a video or a webcam.
|
|
5
5
|
Author-email: David Pagnon <contact@david-pagnon.com>
|
|
6
6
|
Maintainer-email: David Pagnon <contact@david-pagnon.com>
|
|
@@ -38,7 +38,7 @@ Requires-Dist: openvino
|
|
|
38
38
|
Requires-Dist: opencv-python<4.12
|
|
39
39
|
Requires-Dist: imageio_ffmpeg
|
|
40
40
|
Requires-Dist: deep-sort-realtime
|
|
41
|
-
Requires-Dist: Pose2Sim>=0.10.
|
|
41
|
+
Requires-Dist: Pose2Sim>=0.10.36
|
|
42
42
|
Dynamic: license-file
|
|
43
43
|
|
|
44
44
|
|
|
@@ -67,6 +67,7 @@ Dynamic: license-file
|
|
|
67
67
|
</br>
|
|
68
68
|
|
|
69
69
|
> **`Announcements:`**
|
|
70
|
+
> - Generate or import a calibration file, OpenSim skeleton overlay **New in v0.9!**
|
|
70
71
|
> - Select only the persons you want to analyze **New in v0.8!**
|
|
71
72
|
> - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
|
|
72
73
|
> - Any detector and pose estimation model can be used. **New in v0.6!**
|
|
@@ -218,16 +219,19 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
|
|
|
218
219
|
|
|
219
220
|
1. **Install the Pose2Sim_Blender add-on.**\
|
|
220
221
|
Follow instructions on the [Pose2Sim_Blender](https://github.com/davidpagnon/Pose2Sim_Blender) add-on page.
|
|
222
|
+
2. **Import the camera and video.**
|
|
223
|
+
- **Cameras -> Import**: Open your `demo_calib.toml` file from your `result_dir` folder.
|
|
224
|
+
- **Images/Videos -> Show**: open your video file (e.g., `demo_Sports2D.mp4`).\
|
|
225
|
+
-> **Other tools -> See through camera**
|
|
221
226
|
2. **Open your point coordinates.**\
|
|
222
|
-
**
|
|
223
|
-
|
|
227
|
+
**OpenSim data -> Markers**: Open your trc file(e.g., `demo_Sports2D_m_person00.trc`) from your `result_dir` folder.\
|
|
224
228
|
This will optionally create **an animated rig** based on the motion of the captured person.
|
|
225
229
|
3. **Open your animated skeleton:**\
|
|
226
230
|
Make sure you first set `--do_ik True` ([full install](#full-install) required). See [inverse kinematics](#run-inverse-kinematics) section for more details.
|
|
227
|
-
- **
|
|
228
|
-
- **
|
|
231
|
+
- **OpenSim data -> Model**: Open your scaled model (e.g., `demo_Sports2D_m_person00_LSTM.osim`).
|
|
232
|
+
- **OpenSim data -> Motion**: Open your motion file (e.g., `demo_Sports2D_m_person00_LSTM_ik.mot`).
|
|
229
233
|
|
|
230
|
-
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)**
|
|
234
|
+
The OpenSim skeleton is not rigged yet. **[Feel free to contribute!](https://github.com/perfanalytics/pose2sim/issues/40)** [](https://discord.com/invite/4mXUdSFjmt)
|
|
231
235
|
|
|
232
236
|
<img src="Content/sports2d_blender.gif" width="760">
|
|
233
237
|
|
|
@@ -284,7 +288,7 @@ If you only want to analyze a subset of the detected persons, you can use the `-
|
|
|
284
288
|
sports2d --nb_persons_to_detect 2 --person_ordering_method highest_likelihood
|
|
285
289
|
```
|
|
286
290
|
|
|
287
|
-
We recommend
|
|
291
|
+
We recommend using the `on_click` method if you can afford a manual input. This lets the user handle both the person number and their order in the same stage. When prompted, select the persons you are interested in in the desired order. In our case, lets slide to a frame where both people are visible, and select the woman first, then the man.
|
|
288
292
|
|
|
289
293
|
Otherwise, if you want to run Sports2D automatically for example, you can choose other ordering methods such as 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'.
|
|
290
294
|
|
|
@@ -301,28 +305,32 @@ sports2d --person_ordering_method on_click
|
|
|
301
305
|
|
|
302
306
|
|
|
303
307
|
#### Get coordinates in meters:
|
|
304
|
-
> **N.B.:**
|
|
308
|
+
> **N.B.:** The Z coordinate (depth) should not be overly trusted.
|
|
305
309
|
|
|
306
|
-
|
|
307
|
-
You may need to convert pixel coordinates to meters.\
|
|
308
|
-
Just provide the height of the reference person (and their ID in case of multiple person detection).
|
|
310
|
+
You may want coordinates in meters rather than pixels. 2 options to do so:
|
|
309
311
|
|
|
310
|
-
|
|
312
|
+
1. **Just provide the height of a reference person**:
|
|
313
|
+
- Their height in meters is be compared with their height in pixels to get a pixel-to-meter conversion factor.
|
|
314
|
+
- To estimate the depth coordinates, specify which side of the person is visible: `left`, `right`, `front`, or `back`. Use `auto` if you want it to be automatically determined (only works for motions in the sagittal plane), or `none` if you want to keep 2D coordinates instead of 3D (if the person turns around, for example).
|
|
315
|
+
- The floor angle is automatically estimated from gait, as well as the origin of the xy axis. The person trajectory is corrected accordingly. You can use the `--floor_angle` and `--xy_origin` parameters to manually specify them if your subject is not travelling horizontally or if you want the origin not to be under their feet (note that the `y` axis points down).
|
|
316
|
+
|
|
317
|
+
**N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
|
|
311
318
|
|
|
312
|
-
|
|
313
|
-
|
|
319
|
+
``` cmd
|
|
320
|
+
sports2d --first_person_height 1.65 --visible_side auto front none
|
|
321
|
+
```
|
|
322
|
+
``` cmd
|
|
323
|
+
sports2d --first_person_height 1.65 --visible_side auto front none `
|
|
324
|
+
--person_ordering_method on_click `
|
|
325
|
+
--floor_angle 0 --xy_origin 0 940
|
|
326
|
+
```
|
|
314
327
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
``` cmd
|
|
319
|
-
sports2d --
|
|
320
|
-
```
|
|
321
|
-
``` cmd
|
|
322
|
-
sports2d --to_meters True --first_person_height 1.65 --visible_side auto front none `
|
|
323
|
-
--person_ordering_method on_click `
|
|
324
|
-
--floor_angle 0 --xy_origin 0 940
|
|
325
|
-
```
|
|
328
|
+
2. **Or use a calibration file**:\
|
|
329
|
+
It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
|
|
330
|
+
|
|
331
|
+
``` cmd
|
|
332
|
+
sports2d --calib_file Calib_demo.toml --visible_side auto front none
|
|
333
|
+
```
|
|
326
334
|
|
|
327
335
|
<br>
|
|
328
336
|
|
|
@@ -337,18 +345,22 @@ OpenSim inverse kinematics allows you to set joint constraints, joint angle limi
|
|
|
337
345
|
This is done via [Pose2Sim](https://github.com/perfanalytics/pose2sim).\
|
|
338
346
|
Model scaling is done according to the mean of the segment lengths, across a subset of frames. We remove the 10% fastest frames (potential outliers), the frames where the speed is 0 (person probably out of frame), the frames where the average knee and hip flexion angles are above 45° (pose estimation is not precise when the person is crouching) and the 20% most extreme segment values after the previous operations (potential outliers). All these parameters can be edited in your Config.toml file.
|
|
339
347
|
|
|
348
|
+
**N.B.: This will not work on sections where the person is not moving in a single plane. You can split your video into several time ranges if needed.**
|
|
349
|
+
|
|
340
350
|
```cmd
|
|
341
351
|
sports2d --time_range 1.2 2.7 `
|
|
342
352
|
--do_ik true --first_person_height 1.65 --visible_side auto front
|
|
343
353
|
```
|
|
344
354
|
|
|
345
355
|
You can optionally use the LSTM marker augmentation to improve the quality of the output motion.\
|
|
346
|
-
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis)
|
|
356
|
+
You can also optionally give the participants proper masses. Mass has no influence on motion, only on forces (if you decide to further pursue kinetics analysis).\
|
|
357
|
+
Optionally again, you can [visualize the overlaid results in Blender](#visualize-in-blender). The automatic calibration won't be accurate with such a small time range, so you need to use the provided calibration file (or one that has been generated from the full walk).
|
|
347
358
|
|
|
348
359
|
```cmd
|
|
349
360
|
sports2d --time_range 1.2 2.7 `
|
|
350
361
|
--do_ik true --first_person_height 1.65 --visible_side left front `
|
|
351
|
-
--use_augmentation True --participant_mass 55.0 67.0
|
|
362
|
+
--use_augmentation True --participant_mass 55.0 67.0 `
|
|
363
|
+
--calib_file Calib_demo.toml
|
|
352
364
|
```
|
|
353
365
|
|
|
354
366
|
<br>
|
|
@@ -376,14 +388,31 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
376
388
|
``` cmd
|
|
377
389
|
sports2d --config Config_demo.toml
|
|
378
390
|
```
|
|
379
|
-
- Run within Python
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
391
|
+
- Run within Python, for example:\
|
|
392
|
+
- Edit `Demo/Config_demo.toml` and run:
|
|
393
|
+
```python
|
|
394
|
+
from Sports2D import Sports2D
|
|
395
|
+
from pathlib import Path
|
|
396
|
+
import toml
|
|
397
|
+
|
|
398
|
+
config_path = Path(Sports2D.__file__).parent / 'Demo'/'Config_demo.toml'
|
|
399
|
+
config_dict = toml.load(config_path)
|
|
400
|
+
Sports2D.process(config_dict)
|
|
401
|
+
```
|
|
402
|
+
- Or you can pass the non default values only:
|
|
403
|
+
```python
|
|
404
|
+
from Sports2D import Sports2D
|
|
405
|
+
config_dict = {
|
|
406
|
+
'base': {
|
|
407
|
+
'nb_persons_to_detect': 1,
|
|
408
|
+
'person_ordering_method': 'greatest_displacement'
|
|
409
|
+
},
|
|
410
|
+
'pose': {
|
|
411
|
+
'mode': 'lightweight',
|
|
412
|
+
'det_frequency': 50
|
|
413
|
+
}}
|
|
414
|
+
Sports2D.process(config_dict)
|
|
415
|
+
```
|
|
387
416
|
|
|
388
417
|
<br>
|
|
389
418
|
|
|
@@ -407,7 +436,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
407
436
|
```cmd
|
|
408
437
|
sports2d --flip_left_right true # Default
|
|
409
438
|
```
|
|
410
|
-
- Correct segment angles according to the estimated camera
|
|
439
|
+
- Correct segment angles according to the estimated camera tilt angle.\
|
|
411
440
|
**N.B.:** *The camera tilt angle is automatically estimated. Set to false if it is actually the floor which is tilted rather than the camera.*
|
|
412
441
|
```cmd
|
|
413
442
|
sports2d --correct_segment_angles_with_floor_angle true # Default
|
|
@@ -477,6 +506,7 @@ sports2d --help
|
|
|
477
506
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
478
507
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
479
508
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
509
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. false if not specified"],
|
|
480
510
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
481
511
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
482
512
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|