sports2d 0.8.24__tar.gz → 0.8.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sports2d-0.8.26/.github/workflows/sync_to_hf.yml.bak +19 -0
- sports2d-0.8.26/Content/huggingface_demo.png +0 -0
- {sports2d-0.8.24/sports2d.egg-info → sports2d-0.8.26}/PKG-INFO +60 -38
- {sports2d-0.8.24 → sports2d-0.8.26}/README.md +59 -37
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Demo/Config_demo.toml +17 -11
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Sports2D.py +14 -8
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Utilities/common.py +3 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Utilities/tests.py +3 -2
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/process.py +436 -227
- {sports2d-0.8.24 → sports2d-0.8.26/sports2d.egg-info}/PKG-INFO +60 -38
- {sports2d-0.8.24 → sports2d-0.8.26}/sports2d.egg-info/SOURCES.txt +2 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/.github/workflows/continuous-integration.yml +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/.github/workflows/joss_pdf.yml +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/.github/workflows/publish-on-release.yml +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/.gitignore +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/CITATION.cff +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/Demo_plots.png +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/Demo_results.png +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/Demo_terminal.png +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/Person_selection.png +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/Video_tuto_Sports2D_Colab.png +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/joint_convention.png +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/paper.bib +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/paper.md +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/sports2d_blender.gif +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Content/sports2d_opensim.gif +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/LICENSE +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Demo/Calib_demo.toml +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Demo/demo.mp4 +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Sports2D.ipynb +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/Utilities/__init__.py +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/Sports2D/__init__.py +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/pyproject.toml +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/setup.cfg +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/sports2d.egg-info/dependency_links.txt +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/sports2d.egg-info/entry_points.txt +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/sports2d.egg-info/requires.txt +0 -0
- {sports2d-0.8.24 → sports2d-0.8.26}/sports2d.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
name: Sync to Hugging Face Space
|
|
2
|
+
on:
|
|
3
|
+
push:
|
|
4
|
+
branches: [ main ]
|
|
5
|
+
jobs:
|
|
6
|
+
sync:
|
|
7
|
+
runs-on: ubuntu-latest
|
|
8
|
+
steps:
|
|
9
|
+
- uses: actions/checkout@v4
|
|
10
|
+
- name: Push to Hugging Face Space
|
|
11
|
+
run: |
|
|
12
|
+
git clone https://${{ secrets.HUGGINGFACE_TOKEN }}@huggingface.co/spaces/DavidPagnon/sports2d
|
|
13
|
+
cd sports2d
|
|
14
|
+
git config --global user.name "DavidPagnon"
|
|
15
|
+
git config --global user.email "contact@david-pagnon.com"
|
|
16
|
+
cp -r ../Sports2D/* .
|
|
17
|
+
git add .
|
|
18
|
+
git commit -m "Sync from GitHub"
|
|
19
|
+
git push https://${{ secrets.HUGGINGFACE_TOKEN }}@huggingface.co/spaces/DavidPagnon/sports2d
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.26
|
|
4
4
|
Summary: Compute 2D human pose and angles from a video or a webcam.
|
|
5
5
|
Author-email: David Pagnon <contact@david-pagnon.com>
|
|
6
6
|
Maintainer-email: David Pagnon <contact@david-pagnon.com>
|
|
@@ -40,6 +40,8 @@ Dynamic: license-file
|
|
|
40
40
|
[](https://opensource.org/licenses/BSD-3-Clause)
|
|
41
41
|
\
|
|
42
42
|
[](https://discord.com/invite/4mXUdSFjmt)
|
|
43
|
+
[](https://huggingface.co/spaces/DavidPagnon/sports2d)
|
|
44
|
+
|
|
43
45
|
|
|
44
46
|
<!-- [](https://bit.ly/Sports2D_Colab)-->
|
|
45
47
|
|
|
@@ -52,7 +54,7 @@ Dynamic: license-file
|
|
|
52
54
|
</br>
|
|
53
55
|
|
|
54
56
|
> **`Announcements:`**
|
|
55
|
-
> -
|
|
57
|
+
> - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.8.25!**
|
|
56
58
|
> - Select only the persons you want to analyze **New in v0.8!**
|
|
57
59
|
> - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
|
|
58
60
|
> - Any detector and pose estimation model can be used. **New in v0.6!**
|
|
@@ -80,7 +82,7 @@ https://github.com/user-attachments/assets/2ce62012-f28c-4e23-b3b8-f68931bacb77
|
|
|
80
82
|
<!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
|
|
81
83
|
|
|
82
84
|
`Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
|
|
83
|
-
`Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal
|
|
85
|
+
`Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal). The persons need to be filmed as parallel as possible to the motion plane.\
|
|
84
86
|
If you need 3D research-grade markerless joint kinematics, consider using several cameras with **[Pose2Sim](https://github.com/perfanalytics/pose2sim)**.
|
|
85
87
|
|
|
86
88
|
<!--`Warning:` Google Colab does not follow the European GDPR requirements regarding data privacy. [Install locally](#installation) if this matters.-->
|
|
@@ -90,7 +92,8 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
90
92
|
|
|
91
93
|
## Contents
|
|
92
94
|
1. [Installation and Demonstration](#installation-and-demonstration)
|
|
93
|
-
1. [
|
|
95
|
+
1. [Test it on Hugging face](#test-it-on-hugging-face)
|
|
96
|
+
1. [Local installation](#local-installation)
|
|
94
97
|
1. [Quick install](#quick-install)
|
|
95
98
|
2. [Full install](#full-install)
|
|
96
99
|
2. [Demonstration](#demonstration)
|
|
@@ -119,7 +122,16 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
119
122
|
|
|
120
123
|
## Installation and Demonstration
|
|
121
124
|
|
|
122
|
-
|
|
125
|
+
|
|
126
|
+
### Test it on Hugging face
|
|
127
|
+
|
|
128
|
+
Test an online, limited version [on Hugging Face](https://huggingface.co/spaces/DavidPagnon/sports2d): [](https://huggingface.co/spaces/DavidPagnon/sports2d)
|
|
129
|
+
|
|
130
|
+
<img src="Content/huggingface_demo.png" width="760">
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
### Local installation
|
|
123
135
|
|
|
124
136
|
<!--- OPTION 0: **Use Colab** \
|
|
125
137
|
User-friendly (but full) version, also works on a phone or a tablet.\
|
|
@@ -294,30 +306,35 @@ sports2d --person_ordering_method on_click
|
|
|
294
306
|
#### Get coordinates in meters:
|
|
295
307
|
> **N.B.:** The Z coordinate (depth) should not be overly trusted.
|
|
296
308
|
|
|
297
|
-
|
|
309
|
+
To convert from pixels to meters, you need a minima the height of a participant. Better results can be obtained by also providing an information on depth. The camera horizon angle and the floor height are generally automatically estimated. **N.B.: A calibration file will be generated.**
|
|
298
310
|
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
**N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
|
|
311
|
+
- The pixel-to-meters scale is computed from the ratio between the height of the participant in meters and in pixels. The height in pixels is automatically calculated; use the `--first_person_height` parameter to specify the height in meters.
|
|
312
|
+
- Depth perspective effects can be compensated either with the camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or from a calibration file. Use the `--perspective_unit` ('distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib') and `--perspective_value` parameters (resp. in m, px, deg, rad, or '').
|
|
313
|
+
- The camera horizon angle can be estimated from kinematics (`auto`), from a calibration file (`from_calib`), or manually (float). Use the `--floor_angle` parameter.
|
|
314
|
+
- Likewise for the floor level. Use the `--xy_origin` parameter.
|
|
305
315
|
|
|
306
|
-
|
|
307
|
-
sports2d --first_person_height 1.65 --visible_side auto front none
|
|
308
|
-
```
|
|
309
|
-
``` cmd
|
|
310
|
-
sports2d --first_person_height 1.65 --visible_side auto front none `
|
|
311
|
-
--person_ordering_method on_click `
|
|
312
|
-
--floor_angle 0 --xy_origin 0 940
|
|
313
|
-
```
|
|
316
|
+
If one of these parameters is set to `from_calib`, then use `--calib_file`.
|
|
314
317
|
|
|
315
|
-
2. **Or use a calibration file**:\
|
|
316
|
-
It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
|
|
317
318
|
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
319
|
+
``` cmd
|
|
320
|
+
sports2d --first_person_height 1.65
|
|
321
|
+
```
|
|
322
|
+
``` cmd
|
|
323
|
+
sports2d --first_person_height 1.65 `
|
|
324
|
+
--floor_angle auto `
|
|
325
|
+
--xy_origin auto`
|
|
326
|
+
--perspective_unit distance_m --perspective_value 10
|
|
327
|
+
```
|
|
328
|
+
``` cmd
|
|
329
|
+
sports2d --first_person_height 1.65 `
|
|
330
|
+
--floor_angle 0 `
|
|
331
|
+
--xy_origin from_calib`
|
|
332
|
+
--perspective_unit from_calib --calib_file Sports2D\Demo\Calib_demo.toml
|
|
333
|
+
```
|
|
334
|
+
``` cmd
|
|
335
|
+
sports2d --first_person_height 1.65 `
|
|
336
|
+
--perspective_unit f_px --perspective_value 2520
|
|
337
|
+
```
|
|
321
338
|
|
|
322
339
|
<br>
|
|
323
340
|
|
|
@@ -419,7 +436,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
419
436
|
sports2d --calculate_angles false
|
|
420
437
|
```
|
|
421
438
|
- Flip angles when the person faces the other side.\
|
|
422
|
-
**N.B
|
|
439
|
+
**N.B.: Set to false when sprinting.** *We consider that each limb "looks" to the right if the toe keypoint is to the right of the heel one. This is not always true, particularly during the swing phase of sprinting. Set it to false if you want timeseries to be continuous even when the participant switches their stance.*
|
|
423
440
|
```cmd
|
|
424
441
|
sports2d --flip_left_right true # Default
|
|
425
442
|
```
|
|
@@ -480,20 +497,25 @@ sports2d --help
|
|
|
480
497
|
'config': ["C", "path to a toml configuration file"],
|
|
481
498
|
|
|
482
499
|
'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
|
|
500
|
+
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
483
501
|
'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
|
|
484
502
|
'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
|
|
485
503
|
'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
|
|
486
504
|
'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
|
|
505
|
+
'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
|
|
506
|
+
'perspective_value': ["", "Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'"],
|
|
507
|
+
'perspective_unit': ["", "'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'"],
|
|
508
|
+
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
509
|
+
'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
|
|
487
510
|
'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
|
|
488
511
|
'compare': ["", "visually compare motion with trc file. false if not specified"],
|
|
489
|
-
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
490
|
-
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
491
512
|
'video_dir': ["d", "current directory if not specified"],
|
|
492
513
|
'result_dir': ["r", "current directory if not specified"],
|
|
514
|
+
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
493
515
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
494
516
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
495
517
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
496
|
-
'save_graphs': ["", "save position and angle plots of raw and processed results.
|
|
518
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. true if not specified"],
|
|
497
519
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
498
520
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
499
521
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -514,11 +536,9 @@ sports2d --help
|
|
|
514
536
|
'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
|
|
515
537
|
'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
|
|
516
538
|
'save_calib': ["", "save calibration file. true if not specified"],
|
|
517
|
-
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
518
|
-
'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
|
|
519
539
|
'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
|
|
520
|
-
'
|
|
521
|
-
'
|
|
540
|
+
'distortions': ["", "camera distortion coefficients [k1, k2, p1, p2, k3] or 'from_calib'. [0.0, 0.0, 0.0, 0.0, 0.0] if not specified"],
|
|
541
|
+
'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
|
|
522
542
|
'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
|
|
523
543
|
'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
|
|
524
544
|
'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
|
|
@@ -528,6 +548,7 @@ sports2d --help
|
|
|
528
548
|
'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
|
|
529
549
|
'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
|
|
530
550
|
'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
|
|
551
|
+
'max_distance': ["", "If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one. in px or None, 100 by default."],
|
|
531
552
|
'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
|
|
532
553
|
'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
|
|
533
554
|
'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
|
|
@@ -539,15 +560,16 @@ sports2d --help
|
|
|
539
560
|
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
540
561
|
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
541
562
|
'sections_to_keep': ["", "all, largest, first, or last. Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one"],
|
|
563
|
+
'min_chunk_size': ["", "Minimum number of valid frames in a row to keep a chunk of data for a person. 10 if not specified"],
|
|
542
564
|
'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
|
|
543
565
|
'filter': ["", "filter results. true if not specified"],
|
|
544
566
|
'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
|
|
567
|
+
'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 6 if not specified"],
|
|
545
568
|
'order': ["", "order of the Butterworth filter. 4 if not specified"],
|
|
546
|
-
'
|
|
569
|
+
'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' is usually better, unless the signal is too short (noise can then be considered as signal -> trajectories not filtered). 'auto' if not specified"],
|
|
570
|
+
'gcv_smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 1.0 if not specified"],
|
|
547
571
|
'trust_ratio': ["", "trust ratio of the Kalman filter: How much more do you trust triangulation results (measurements), than the assumption of constant acceleration(process)? 500 if not specified"],
|
|
548
572
|
'smooth': ["", "dual Kalman smoothing. true if not specified"],
|
|
549
|
-
'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' if not specified"],
|
|
550
|
-
'smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 0.1 if not specified"],
|
|
551
573
|
'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
|
|
552
574
|
'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
|
|
553
575
|
'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
|
|
@@ -656,11 +678,11 @@ Sports2D:
|
|
|
656
678
|
|
|
657
679
|
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
|
|
658
680
|
|
|
659
|
-
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames
|
|
681
|
+
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames, thanks to the 'sports2D' tracker. [See Release notes of v0.8.22 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.22).
|
|
660
682
|
|
|
661
683
|
4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
|
|
662
684
|
|
|
663
|
-
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The
|
|
685
|
+
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The camera horizon angle and the floor level can either be detected automatically from the gait sequence, be manually specified, or obtained frmm a calibration file. The depth perspective effects are compensated thanks with the distance from the camera to the subject, the focal length, the field of view, or from a calibration file. [See Release notes of v0.8.25 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.25).
|
|
664
686
|
|
|
665
687
|
5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
|
|
666
688
|
|
|
@@ -12,6 +12,8 @@
|
|
|
12
12
|
[](https://opensource.org/licenses/BSD-3-Clause)
|
|
13
13
|
\
|
|
14
14
|
[](https://discord.com/invite/4mXUdSFjmt)
|
|
15
|
+
[](https://huggingface.co/spaces/DavidPagnon/sports2d)
|
|
16
|
+
|
|
15
17
|
|
|
16
18
|
<!-- [](https://bit.ly/Sports2D_Colab)-->
|
|
17
19
|
|
|
@@ -24,7 +26,7 @@
|
|
|
24
26
|
</br>
|
|
25
27
|
|
|
26
28
|
> **`Announcements:`**
|
|
27
|
-
> -
|
|
29
|
+
> - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.8.25!**
|
|
28
30
|
> - Select only the persons you want to analyze **New in v0.8!**
|
|
29
31
|
> - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
|
|
30
32
|
> - Any detector and pose estimation model can be used. **New in v0.6!**
|
|
@@ -52,7 +54,7 @@ https://github.com/user-attachments/assets/2ce62012-f28c-4e23-b3b8-f68931bacb77
|
|
|
52
54
|
<!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
|
|
53
55
|
|
|
54
56
|
`Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
|
|
55
|
-
`Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal
|
|
57
|
+
`Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal). The persons need to be filmed as parallel as possible to the motion plane.\
|
|
56
58
|
If you need 3D research-grade markerless joint kinematics, consider using several cameras with **[Pose2Sim](https://github.com/perfanalytics/pose2sim)**.
|
|
57
59
|
|
|
58
60
|
<!--`Warning:` Google Colab does not follow the European GDPR requirements regarding data privacy. [Install locally](#installation) if this matters.-->
|
|
@@ -62,7 +64,8 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
62
64
|
|
|
63
65
|
## Contents
|
|
64
66
|
1. [Installation and Demonstration](#installation-and-demonstration)
|
|
65
|
-
1. [
|
|
67
|
+
1. [Test it on Hugging face](#test-it-on-hugging-face)
|
|
68
|
+
1. [Local installation](#local-installation)
|
|
66
69
|
1. [Quick install](#quick-install)
|
|
67
70
|
2. [Full install](#full-install)
|
|
68
71
|
2. [Demonstration](#demonstration)
|
|
@@ -91,7 +94,16 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
91
94
|
|
|
92
95
|
## Installation and Demonstration
|
|
93
96
|
|
|
94
|
-
|
|
97
|
+
|
|
98
|
+
### Test it on Hugging face
|
|
99
|
+
|
|
100
|
+
Test an online, limited version [on Hugging Face](https://huggingface.co/spaces/DavidPagnon/sports2d): [](https://huggingface.co/spaces/DavidPagnon/sports2d)
|
|
101
|
+
|
|
102
|
+
<img src="Content/huggingface_demo.png" width="760">
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
### Local installation
|
|
95
107
|
|
|
96
108
|
<!--- OPTION 0: **Use Colab** \
|
|
97
109
|
User-friendly (but full) version, also works on a phone or a tablet.\
|
|
@@ -266,30 +278,35 @@ sports2d --person_ordering_method on_click
|
|
|
266
278
|
#### Get coordinates in meters:
|
|
267
279
|
> **N.B.:** The Z coordinate (depth) should not be overly trusted.
|
|
268
280
|
|
|
269
|
-
|
|
281
|
+
To convert from pixels to meters, you need a minima the height of a participant. Better results can be obtained by also providing an information on depth. The camera horizon angle and the floor height are generally automatically estimated. **N.B.: A calibration file will be generated.**
|
|
270
282
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
**N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
|
|
283
|
+
- The pixel-to-meters scale is computed from the ratio between the height of the participant in meters and in pixels. The height in pixels is automatically calculated; use the `--first_person_height` parameter to specify the height in meters.
|
|
284
|
+
- Depth perspective effects can be compensated either with the camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or from a calibration file. Use the `--perspective_unit` ('distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib') and `--perspective_value` parameters (resp. in m, px, deg, rad, or '').
|
|
285
|
+
- The camera horizon angle can be estimated from kinematics (`auto`), from a calibration file (`from_calib`), or manually (float). Use the `--floor_angle` parameter.
|
|
286
|
+
- Likewise for the floor level. Use the `--xy_origin` parameter.
|
|
277
287
|
|
|
278
|
-
|
|
279
|
-
sports2d --first_person_height 1.65 --visible_side auto front none
|
|
280
|
-
```
|
|
281
|
-
``` cmd
|
|
282
|
-
sports2d --first_person_height 1.65 --visible_side auto front none `
|
|
283
|
-
--person_ordering_method on_click `
|
|
284
|
-
--floor_angle 0 --xy_origin 0 940
|
|
285
|
-
```
|
|
288
|
+
If one of these parameters is set to `from_calib`, then use `--calib_file`.
|
|
286
289
|
|
|
287
|
-
2. **Or use a calibration file**:\
|
|
288
|
-
It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
|
|
289
290
|
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
291
|
+
``` cmd
|
|
292
|
+
sports2d --first_person_height 1.65
|
|
293
|
+
```
|
|
294
|
+
``` cmd
|
|
295
|
+
sports2d --first_person_height 1.65 `
|
|
296
|
+
--floor_angle auto `
|
|
297
|
+
--xy_origin auto`
|
|
298
|
+
--perspective_unit distance_m --perspective_value 10
|
|
299
|
+
```
|
|
300
|
+
``` cmd
|
|
301
|
+
sports2d --first_person_height 1.65 `
|
|
302
|
+
--floor_angle 0 `
|
|
303
|
+
--xy_origin from_calib`
|
|
304
|
+
--perspective_unit from_calib --calib_file Sports2D\Demo\Calib_demo.toml
|
|
305
|
+
```
|
|
306
|
+
``` cmd
|
|
307
|
+
sports2d --first_person_height 1.65 `
|
|
308
|
+
--perspective_unit f_px --perspective_value 2520
|
|
309
|
+
```
|
|
293
310
|
|
|
294
311
|
<br>
|
|
295
312
|
|
|
@@ -391,7 +408,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
|
|
|
391
408
|
sports2d --calculate_angles false
|
|
392
409
|
```
|
|
393
410
|
- Flip angles when the person faces the other side.\
|
|
394
|
-
**N.B
|
|
411
|
+
**N.B.: Set to false when sprinting.** *We consider that each limb "looks" to the right if the toe keypoint is to the right of the heel one. This is not always true, particularly during the swing phase of sprinting. Set it to false if you want timeseries to be continuous even when the participant switches their stance.*
|
|
395
412
|
```cmd
|
|
396
413
|
sports2d --flip_left_right true # Default
|
|
397
414
|
```
|
|
@@ -452,20 +469,25 @@ sports2d --help
|
|
|
452
469
|
'config': ["C", "path to a toml configuration file"],
|
|
453
470
|
|
|
454
471
|
'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
|
|
472
|
+
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
455
473
|
'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
|
|
456
474
|
'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
|
|
457
475
|
'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
|
|
458
476
|
'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
|
|
477
|
+
'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
|
|
478
|
+
'perspective_value': ["", "Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'"],
|
|
479
|
+
'perspective_unit': ["", "'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'"],
|
|
480
|
+
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
481
|
+
'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
|
|
459
482
|
'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
|
|
460
483
|
'compare': ["", "visually compare motion with trc file. false if not specified"],
|
|
461
|
-
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
462
|
-
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
463
484
|
'video_dir': ["d", "current directory if not specified"],
|
|
464
485
|
'result_dir': ["r", "current directory if not specified"],
|
|
486
|
+
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
465
487
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
466
488
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
467
489
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
468
|
-
'save_graphs': ["", "save position and angle plots of raw and processed results.
|
|
490
|
+
'save_graphs': ["", "save position and angle plots of raw and processed results. true if not specified"],
|
|
469
491
|
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
470
492
|
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
471
493
|
'save_vid': ["V", "save processed video. true if not specified"],
|
|
@@ -486,11 +508,9 @@ sports2d --help
|
|
|
486
508
|
'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
|
|
487
509
|
'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
|
|
488
510
|
'save_calib': ["", "save calibration file. true if not specified"],
|
|
489
|
-
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
490
|
-
'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
|
|
491
511
|
'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
|
|
492
|
-
'
|
|
493
|
-
'
|
|
512
|
+
'distortions': ["", "camera distortion coefficients [k1, k2, p1, p2, k3] or 'from_calib'. [0.0, 0.0, 0.0, 0.0, 0.0] if not specified"],
|
|
513
|
+
'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
|
|
494
514
|
'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
|
|
495
515
|
'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
|
|
496
516
|
'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
|
|
@@ -500,6 +520,7 @@ sports2d --help
|
|
|
500
520
|
'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
|
|
501
521
|
'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
|
|
502
522
|
'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
|
|
523
|
+
'max_distance': ["", "If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one. in px or None, 100 by default."],
|
|
503
524
|
'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
|
|
504
525
|
'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
|
|
505
526
|
'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
|
|
@@ -511,15 +532,16 @@ sports2d --help
|
|
|
511
532
|
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
512
533
|
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
513
534
|
'sections_to_keep': ["", "all, largest, first, or last. Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one"],
|
|
535
|
+
'min_chunk_size': ["", "Minimum number of valid frames in a row to keep a chunk of data for a person. 10 if not specified"],
|
|
514
536
|
'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
|
|
515
537
|
'filter': ["", "filter results. true if not specified"],
|
|
516
538
|
'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
|
|
539
|
+
'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 6 if not specified"],
|
|
517
540
|
'order': ["", "order of the Butterworth filter. 4 if not specified"],
|
|
518
|
-
'
|
|
541
|
+
'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' is usually better, unless the signal is too short (noise can then be considered as signal -> trajectories not filtered). 'auto' if not specified"],
|
|
542
|
+
'gcv_smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 1.0 if not specified"],
|
|
519
543
|
'trust_ratio': ["", "trust ratio of the Kalman filter: How much more do you trust triangulation results (measurements), than the assumption of constant acceleration(process)? 500 if not specified"],
|
|
520
544
|
'smooth': ["", "dual Kalman smoothing. true if not specified"],
|
|
521
|
-
'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' if not specified"],
|
|
522
|
-
'smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 0.1 if not specified"],
|
|
523
545
|
'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
|
|
524
546
|
'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
|
|
525
547
|
'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
|
|
@@ -628,11 +650,11 @@ Sports2D:
|
|
|
628
650
|
|
|
629
651
|
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
|
|
630
652
|
|
|
631
|
-
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames
|
|
653
|
+
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames, thanks to the 'sports2D' tracker. [See Release notes of v0.8.22 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.22).
|
|
632
654
|
|
|
633
655
|
4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
|
|
634
656
|
|
|
635
|
-
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The
|
|
657
|
+
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The camera horizon angle and the floor level can either be detected automatically from the gait sequence, be manually specified, or obtained frmm a calibration file. The depth perspective effects are compensated thanks with the distance from the camera to the subject, the focal length, the field of view, or from a calibration file. [See Release notes of v0.8.25 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.25).
|
|
636
658
|
|
|
637
659
|
5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
|
|
638
660
|
|
|
@@ -97,7 +97,8 @@ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, har
|
|
|
97
97
|
keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
|
|
98
98
|
average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
|
|
99
99
|
keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
|
|
100
|
-
max_distance =
|
|
100
|
+
max_distance = 250 # in px or None # If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one
|
|
101
|
+
|
|
101
102
|
|
|
102
103
|
[px_to_meters_conversion]
|
|
103
104
|
# Pixel to meters conversion
|
|
@@ -105,15 +106,20 @@ to_meters = true
|
|
|
105
106
|
make_c3d = true
|
|
106
107
|
save_calib = true
|
|
107
108
|
|
|
108
|
-
#
|
|
109
|
-
floor_angle = 'auto'
|
|
110
|
-
xy_origin = ['auto']
|
|
109
|
+
# Compensate for camera horizon
|
|
110
|
+
floor_angle = 'auto' # float, 'from_kinematics', 'from_calib', or 'auto' # 'auto' is equivalent to 'from_kinematics', ie angle calculated from foot contacts. 'from_calib' calculates it from a toml calibration file. Use float to manually specify it in degrees
|
|
111
|
+
xy_origin = ['auto'] # [px_x,px_y], or ['from kinematics'], ['from_calib'], or ['auto']. # BETWEEN BRACKETS! # ['auto'] is equivalent to ['from_kinematics'], ie origin estimated at first foot contact, direction is direction of motion. ['from_calib'] calculates it from a calibration file. Use [px_x,px_y] to manually specify it in pixels (px_y points downwards)
|
|
112
|
+
|
|
113
|
+
# Compensate for perspective effects, which make the further limb look smaller. 1-2% coordinate error at 10 m, less if the camera is further away
|
|
114
|
+
perspective_value = 10 # Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'
|
|
115
|
+
perspective_unit = 'distance_m' # 'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'
|
|
116
|
+
|
|
117
|
+
# Optional distortion coefficients
|
|
118
|
+
distortions = [0.0, 0.0, 0.0, 0.0, 0.0] # [k1, k2, p1, p2, k3] or 'from_calib' (not implemented yet)
|
|
119
|
+
|
|
120
|
+
# Optional calibration file
|
|
121
|
+
calib_file = '' # Calibration file in the Pose2Sim toml format, or '' if not available
|
|
111
122
|
|
|
112
|
-
# If conversion from a calibration file
|
|
113
|
-
calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
|
|
114
|
-
# subject_distance
|
|
115
|
-
# focal_distance
|
|
116
|
-
# recalculate_extrinsics
|
|
117
123
|
|
|
118
124
|
[angles]
|
|
119
125
|
display_angle_values_on = ['body', 'list'] # 'body', 'list', ['body', 'list'], 'none'. Display angle values on the body, as a list in the upper left of the image, both, or do not display them.
|
|
@@ -127,7 +133,7 @@ joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right h
|
|
|
127
133
|
segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
|
|
128
134
|
|
|
129
135
|
# Processing parameters
|
|
130
|
-
flip_left_right =
|
|
136
|
+
flip_left_right = false # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participant switches their stance.
|
|
131
137
|
correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false if it is the floor which is actually tilted
|
|
132
138
|
|
|
133
139
|
|
|
@@ -209,7 +215,7 @@ use_custom_logging = false # if integrated in an API that already has logging
|
|
|
209
215
|
#
|
|
210
216
|
# Check your model hierarchy with: for pre, _, node in RenderTree(model):
|
|
211
217
|
# print(f'{pre}{node.name} id={node.id}')
|
|
212
|
-
[pose.CUSTOM]
|
|
218
|
+
[[pose.CUSTOM]]
|
|
213
219
|
name = "Hip"
|
|
214
220
|
id = 19
|
|
215
221
|
[[pose.CUSTOM.children]]
|
|
@@ -152,7 +152,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
152
152
|
'keypoint_likelihood_threshold': 0.3,
|
|
153
153
|
'average_likelihood_threshold': 0.5,
|
|
154
154
|
'keypoint_number_threshold': 0.3,
|
|
155
|
-
'max_distance':
|
|
155
|
+
'max_distance': 250,
|
|
156
156
|
'CUSTOM': { 'name': 'Hip',
|
|
157
157
|
'id': 19,
|
|
158
158
|
'children': [{'name': 'RHip',
|
|
@@ -194,10 +194,13 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
194
194
|
'px_to_meters_conversion': {
|
|
195
195
|
'to_meters': True,
|
|
196
196
|
'make_c3d': True,
|
|
197
|
-
'
|
|
197
|
+
'save_calib': True,
|
|
198
|
+
'perspective_value': 10.0,
|
|
199
|
+
'perspective_unit': 'distance_m',
|
|
200
|
+
'distortions': [0.0, 0.0, 0.0, 0.0, 0.0],
|
|
198
201
|
'floor_angle': 'auto',
|
|
199
202
|
'xy_origin': ['auto'],
|
|
200
|
-
'
|
|
203
|
+
'calib_file': '',
|
|
201
204
|
},
|
|
202
205
|
'angles': {'display_angle_values_on': ['body', 'list'],
|
|
203
206
|
'fontSize': 0.3,
|
|
@@ -269,16 +272,21 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
269
272
|
|
|
270
273
|
CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
271
274
|
'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
|
|
275
|
+
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
272
276
|
'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
|
|
273
277
|
'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
|
|
274
278
|
'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
|
|
275
279
|
'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
|
|
280
|
+
'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
|
|
281
|
+
'perspective_value': ["", "Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'"],
|
|
282
|
+
'perspective_unit': ["", "'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'"],
|
|
283
|
+
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
284
|
+
'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
|
|
276
285
|
'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
|
|
277
286
|
'compare': ["", "visually compare motion with trc file. false if not specified"],
|
|
278
|
-
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
279
|
-
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
280
287
|
'video_dir': ["d", "current directory if not specified"],
|
|
281
288
|
'result_dir': ["r", "current directory if not specified"],
|
|
289
|
+
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
282
290
|
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
283
291
|
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
284
292
|
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
@@ -303,11 +311,9 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
303
311
|
'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
|
|
304
312
|
'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
|
|
305
313
|
'save_calib': ["", "save calibration file. true if not specified"],
|
|
306
|
-
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
307
|
-
'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
|
|
308
314
|
'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
|
|
315
|
+
'distortions': ["", "camera distortion coefficients [k1, k2, p1, p2, k3] or 'from_calib'. [0.0, 0.0, 0.0, 0.0, 0.0] if not specified"],
|
|
309
316
|
'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
|
|
310
|
-
'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
|
|
311
317
|
'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
|
|
312
318
|
'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
|
|
313
319
|
'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
|
|
@@ -36,6 +36,9 @@ __status__ = "Development"
|
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
## CONSTANTS
|
|
39
|
+
# 4 points joint angle: between knee and ankle, and toe and heel. Add 90° offset and multiply by 1
|
|
40
|
+
# 3 points joint angle: between ankle, knee, hip. -180° offset, multiply by -1
|
|
41
|
+
# 2 points segment angle: between horizontal and ankle and knee, 0° offset, multiply by -1
|
|
39
42
|
angle_dict = { # lowercase!
|
|
40
43
|
# joint angles
|
|
41
44
|
'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
|