sports2d 0.8.25__tar.gz → 0.8.27__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. sports2d-0.8.27/.github/workflows/sync_to_hf.yml.bak +19 -0
  2. sports2d-0.8.27/Content/huggingface_demo.png +0 -0
  3. {sports2d-0.8.25/sports2d.egg-info → sports2d-0.8.27}/PKG-INFO +34 -21
  4. {sports2d-0.8.25 → sports2d-0.8.27}/README.md +32 -19
  5. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Demo/Config_demo.toml +9 -6
  6. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Sports2D.py +2 -0
  7. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/process.py +192 -303
  8. {sports2d-0.8.25 → sports2d-0.8.27}/pyproject.toml +1 -1
  9. {sports2d-0.8.25 → sports2d-0.8.27/sports2d.egg-info}/PKG-INFO +34 -21
  10. {sports2d-0.8.25 → sports2d-0.8.27}/sports2d.egg-info/SOURCES.txt +2 -1
  11. sports2d-0.8.27/sports2d.egg-info/requires.txt +2 -0
  12. sports2d-0.8.25/Sports2D/Sports2D.ipynb +0 -3114
  13. sports2d-0.8.25/sports2d.egg-info/requires.txt +0 -2
  14. {sports2d-0.8.25 → sports2d-0.8.27}/.github/workflows/continuous-integration.yml +0 -0
  15. {sports2d-0.8.25 → sports2d-0.8.27}/.github/workflows/joss_pdf.yml +0 -0
  16. {sports2d-0.8.25 → sports2d-0.8.27}/.github/workflows/publish-on-release.yml +0 -0
  17. {sports2d-0.8.25 → sports2d-0.8.27}/.gitignore +0 -0
  18. {sports2d-0.8.25 → sports2d-0.8.27}/CITATION.cff +0 -0
  19. {sports2d-0.8.25 → sports2d-0.8.27}/Content/Demo_plots.png +0 -0
  20. {sports2d-0.8.25 → sports2d-0.8.27}/Content/Demo_results.png +0 -0
  21. {sports2d-0.8.25 → sports2d-0.8.27}/Content/Demo_terminal.png +0 -0
  22. {sports2d-0.8.25 → sports2d-0.8.27}/Content/Person_selection.png +0 -0
  23. {sports2d-0.8.25 → sports2d-0.8.27}/Content/Video_tuto_Sports2D_Colab.png +0 -0
  24. {sports2d-0.8.25 → sports2d-0.8.27}/Content/joint_convention.png +0 -0
  25. {sports2d-0.8.25 → sports2d-0.8.27}/Content/paper.bib +0 -0
  26. {sports2d-0.8.25 → sports2d-0.8.27}/Content/paper.md +0 -0
  27. {sports2d-0.8.25 → sports2d-0.8.27}/Content/sports2d_blender.gif +0 -0
  28. {sports2d-0.8.25 → sports2d-0.8.27}/Content/sports2d_opensim.gif +0 -0
  29. {sports2d-0.8.25 → sports2d-0.8.27}/LICENSE +0 -0
  30. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Demo/Calib_demo.toml +0 -0
  31. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Demo/demo.mp4 +0 -0
  32. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Utilities/__init__.py +0 -0
  33. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Utilities/common.py +0 -0
  34. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/Utilities/tests.py +0 -0
  35. {sports2d-0.8.25 → sports2d-0.8.27}/Sports2D/__init__.py +0 -0
  36. {sports2d-0.8.25 → sports2d-0.8.27}/setup.cfg +0 -0
  37. {sports2d-0.8.25 → sports2d-0.8.27}/sports2d.egg-info/dependency_links.txt +0 -0
  38. {sports2d-0.8.25 → sports2d-0.8.27}/sports2d.egg-info/entry_points.txt +0 -0
  39. {sports2d-0.8.25 → sports2d-0.8.27}/sports2d.egg-info/top_level.txt +0 -0
@@ -0,0 +1,19 @@
1
+ name: Sync to Hugging Face Space
2
+ on:
3
+ push:
4
+ branches: [ main ]
5
+ jobs:
6
+ sync:
7
+ runs-on: ubuntu-latest
8
+ steps:
9
+ - uses: actions/checkout@v4
10
+ - name: Push to Hugging Face Space
11
+ run: |
12
+ git clone https://${{ secrets.HUGGINGFACE_TOKEN }}@huggingface.co/spaces/DavidPagnon/sports2d
13
+ cd sports2d
14
+ git config --global user.name "DavidPagnon"
15
+ git config --global user.email "contact@david-pagnon.com"
16
+ cp -r ../Sports2D/* .
17
+ git add .
18
+ git commit -m "Sync from GitHub"
19
+ git push https://${{ secrets.HUGGINGFACE_TOKEN }}@huggingface.co/spaces/DavidPagnon/sports2d
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.8.25
3
+ Version: 0.8.27
4
4
  Summary: Compute 2D human pose and angles from a video or a webcam.
5
5
  Author-email: David Pagnon <contact@david-pagnon.com>
6
6
  Maintainer-email: David Pagnon <contact@david-pagnon.com>
@@ -23,7 +23,7 @@ Requires-Python: >=3.9
23
23
  Description-Content-Type: text/markdown
24
24
  License-File: LICENSE
25
25
  Requires-Dist: imageio_ffmpeg
26
- Requires-Dist: Pose2Sim>=0.10.38
26
+ Requires-Dist: Pose2Sim>=0.10.40
27
27
  Dynamic: license-file
28
28
 
29
29
 
@@ -40,6 +40,8 @@ Dynamic: license-file
40
40
  [![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
41
41
  \
42
42
  [![Discord](https://img.shields.io/discord/1183750225471492206?logo=Discord&label=Discord%20community)](https://discord.com/invite/4mXUdSFjmt)
43
+ [![Hugging Face Space](https://img.shields.io/badge/HuggingFace-Sports2D-yellow?logo=huggingface)](https://huggingface.co/spaces/DavidPagnon/sports2d)
44
+
43
45
 
44
46
  <!-- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://bit.ly/Sports2D_Colab)-->
45
47
 
@@ -52,7 +54,7 @@ Dynamic: license-file
52
54
  </br>
53
55
 
54
56
  > **`Announcements:`**
55
- > - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.9!**
57
+ > - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.8.25!**
56
58
  > - Select only the persons you want to analyze **New in v0.8!**
57
59
  > - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
58
60
  > - Any detector and pose estimation model can be used. **New in v0.6!**
@@ -80,7 +82,7 @@ https://github.com/user-attachments/assets/2ce62012-f28c-4e23-b3b8-f68931bacb77
80
82
  <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
81
83
 
82
84
  `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
83
- `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
85
+ `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal). The persons need to be filmed as parallel as possible to the motion plane.\
84
86
  If you need 3D research-grade markerless joint kinematics, consider using several cameras with **[Pose2Sim](https://github.com/perfanalytics/pose2sim)**.
85
87
 
86
88
  <!--`Warning:` Google Colab does not follow the European GDPR requirements regarding data privacy. [Install locally](#installation) if this matters.-->
@@ -90,36 +92,46 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
90
92
 
91
93
  ## Contents
92
94
  1. [Installation and Demonstration](#installation-and-demonstration)
93
- 1. [Installation](#installation)
95
+ 1. [Test it on Hugging face](#test-it-on-hugging-face)
96
+ 1. [Local installation](#local-installation)
94
97
  1. [Quick install](#quick-install)
95
98
  2. [Full install](#full-install)
96
99
  2. [Demonstration](#demonstration)
97
100
  1. [Run the demo](#run-the-demo)
98
101
  2. [Visualize in OpenSim](#visualize-in-opensim)
99
102
  3. [Visualize in Blender](#visualize-in-blender)
100
- 3. [Play with the parameters](#play-with-the-parameters)
101
- 1. [Run on a custom video or on a webcam](#run-on-a-custom-video-or-on-a-webcam)
102
- 2. [Run for a specific time range](#run-for-a-specific-time-range)
103
- 3. [Select the persons you are interested in](#select-the-persons-you-are-interested-in)
104
- 4. [Get coordinates in meters](#get-coordinates-in-meters)
105
- 5. [Run inverse kinematics](#run-inverse-kinematics)
106
- 6. [Run on several videos at once](#run-on-several-videos-at-once)
107
- 7. [Use the configuration file or run within Python](#use-the-configuration-file-or-run-within-python)
108
- 8. [Get the angles the way you want](#get-the-angles-the-way-you-want)
109
- 9. [Customize your output](#customize-your-output)
110
- 10. [Use a custom pose estimation model](#use-a-custom-pose-estimation-model)
111
- 11. [All the parameters](#all-the-parameters)
112
- 2. [Go further](#go-further)
103
+ 2. [Play with the parameters](#play-with-the-parameters)
104
+ 1. [Run on a custom video or on a webcam](#run-on-a-custom-video-or-on-a-webcam)
105
+ 2. [Run for a specific time range](#run-for-a-specific-time-range)
106
+ 3. [Select the persons you are interested in](#select-the-persons-you-are-interested-in)
107
+ 4. [Get coordinates in meters](#get-coordinates-in-meters)
108
+ 5. [Run inverse kinematics](#run-inverse-kinematics)
109
+ 6. [Run on several videos at once](#run-on-several-videos-at-once)
110
+ 7. [Use the configuration file or run within Python](#use-the-configuration-file-or-run-within-python)
111
+ 8. [Get the angles the way you want](#get-the-angles-the-way-you-want)
112
+ 9. [Customize your output](#customize-your-output)
113
+ 10. [Use a custom pose estimation model](#use-a-custom-pose-estimation-model)
114
+ 11. [All the parameters](#all-the-parameters)
115
+ 3. [Go further](#go-further)
113
116
  1. [Too slow for you?](#too-slow-for-you)
114
117
  3. [Run inverse kinematics](#run-inverse-kinematics)
115
118
  4. [How it works](#how-it-works)
116
- 3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
119
+ 4. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
117
120
 
118
121
  <br>
119
122
 
120
123
  ## Installation and Demonstration
121
124
 
122
- ### Installation
125
+
126
+ ### Test it on Hugging face
127
+
128
+ Test an online, limited version [on Hugging Face](https://huggingface.co/spaces/DavidPagnon/sports2d): [![Hugging Face Space](https://img.shields.io/badge/HuggingFace-Sports2D-yellow?logo=huggingface)](https://huggingface.co/spaces/DavidPagnon/sports2d)
129
+
130
+ <img src="Content/huggingface_demo.png" width="760">
131
+
132
+
133
+
134
+ ### Local installation
123
135
 
124
136
  <!--- OPTION 0: **Use Colab** \
125
137
  User-friendly (but full) version, also works on a phone or a tablet.\
@@ -424,7 +436,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
424
436
  sports2d --calculate_angles false
425
437
  ```
426
438
  - Flip angles when the person faces the other side.\
427
- **N.B.:** *We consider that the person looks to the right if their toe keypoint is to the right of their heel. This is not always true when the person is sprinting, especially in the swing phase. Set it to false if you want timeseries to be continuous even when the participant switches their stance.*
439
+ **N.B.: Set to false when sprinting.** *We consider that each limb "looks" to the right if the toe keypoint is to the right of the heel one. This is not always true, particularly during the swing phase of sprinting. Set it to false if you want timeseries to be continuous even when the participant switches their stance.*
428
440
  ```cmd
429
441
  sports2d --flip_left_right true # Default
430
442
  ```
@@ -525,6 +537,7 @@ sports2d --help
525
537
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
526
538
  'save_calib': ["", "save calibration file. true if not specified"],
527
539
  'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
540
+ 'distortions': ["", "camera distortion coefficients [k1, k2, p1, p2, k3] or 'from_calib'. [0.0, 0.0, 0.0, 0.0, 0.0] if not specified"],
528
541
  'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
529
542
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
530
543
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
@@ -12,6 +12,8 @@
12
12
  [![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
13
13
  \
14
14
  [![Discord](https://img.shields.io/discord/1183750225471492206?logo=Discord&label=Discord%20community)](https://discord.com/invite/4mXUdSFjmt)
15
+ [![Hugging Face Space](https://img.shields.io/badge/HuggingFace-Sports2D-yellow?logo=huggingface)](https://huggingface.co/spaces/DavidPagnon/sports2d)
16
+
15
17
 
16
18
  <!-- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://bit.ly/Sports2D_Colab)-->
17
19
 
@@ -24,7 +26,7 @@
24
26
  </br>
25
27
 
26
28
  > **`Announcements:`**
27
- > - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.9!**
29
+ > - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.8.25!**
28
30
  > - Select only the persons you want to analyze **New in v0.8!**
29
31
  > - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
30
32
  > - Any detector and pose estimation model can be used. **New in v0.6!**
@@ -52,7 +54,7 @@ https://github.com/user-attachments/assets/2ce62012-f28c-4e23-b3b8-f68931bacb77
52
54
  <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
53
55
 
54
56
  `Warning:` Angle estimation is only as good as the pose estimation algorithm, i.e., it is not perfect.\
55
- `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal plane). The persons need to be filmed as parallel as possible to the motion plane.\
57
+ `Warning:` Results are acceptable only if the persons move in the 2D plane (sagittal or frontal). The persons need to be filmed as parallel as possible to the motion plane.\
56
58
  If you need 3D research-grade markerless joint kinematics, consider using several cameras with **[Pose2Sim](https://github.com/perfanalytics/pose2sim)**.
57
59
 
58
60
  <!--`Warning:` Google Colab does not follow the European GDPR requirements regarding data privacy. [Install locally](#installation) if this matters.-->
@@ -62,36 +64,46 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
62
64
 
63
65
  ## Contents
64
66
  1. [Installation and Demonstration](#installation-and-demonstration)
65
- 1. [Installation](#installation)
67
+ 1. [Test it on Hugging face](#test-it-on-hugging-face)
68
+ 1. [Local installation](#local-installation)
66
69
  1. [Quick install](#quick-install)
67
70
  2. [Full install](#full-install)
68
71
  2. [Demonstration](#demonstration)
69
72
  1. [Run the demo](#run-the-demo)
70
73
  2. [Visualize in OpenSim](#visualize-in-opensim)
71
74
  3. [Visualize in Blender](#visualize-in-blender)
72
- 3. [Play with the parameters](#play-with-the-parameters)
73
- 1. [Run on a custom video or on a webcam](#run-on-a-custom-video-or-on-a-webcam)
74
- 2. [Run for a specific time range](#run-for-a-specific-time-range)
75
- 3. [Select the persons you are interested in](#select-the-persons-you-are-interested-in)
76
- 4. [Get coordinates in meters](#get-coordinates-in-meters)
77
- 5. [Run inverse kinematics](#run-inverse-kinematics)
78
- 6. [Run on several videos at once](#run-on-several-videos-at-once)
79
- 7. [Use the configuration file or run within Python](#use-the-configuration-file-or-run-within-python)
80
- 8. [Get the angles the way you want](#get-the-angles-the-way-you-want)
81
- 9. [Customize your output](#customize-your-output)
82
- 10. [Use a custom pose estimation model](#use-a-custom-pose-estimation-model)
83
- 11. [All the parameters](#all-the-parameters)
84
- 2. [Go further](#go-further)
75
+ 2. [Play with the parameters](#play-with-the-parameters)
76
+ 1. [Run on a custom video or on a webcam](#run-on-a-custom-video-or-on-a-webcam)
77
+ 2. [Run for a specific time range](#run-for-a-specific-time-range)
78
+ 3. [Select the persons you are interested in](#select-the-persons-you-are-interested-in)
79
+ 4. [Get coordinates in meters](#get-coordinates-in-meters)
80
+ 5. [Run inverse kinematics](#run-inverse-kinematics)
81
+ 6. [Run on several videos at once](#run-on-several-videos-at-once)
82
+ 7. [Use the configuration file or run within Python](#use-the-configuration-file-or-run-within-python)
83
+ 8. [Get the angles the way you want](#get-the-angles-the-way-you-want)
84
+ 9. [Customize your output](#customize-your-output)
85
+ 10. [Use a custom pose estimation model](#use-a-custom-pose-estimation-model)
86
+ 11. [All the parameters](#all-the-parameters)
87
+ 3. [Go further](#go-further)
85
88
  1. [Too slow for you?](#too-slow-for-you)
86
89
  3. [Run inverse kinematics](#run-inverse-kinematics)
87
90
  4. [How it works](#how-it-works)
88
- 3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
91
+ 4. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
89
92
 
90
93
  <br>
91
94
 
92
95
  ## Installation and Demonstration
93
96
 
94
- ### Installation
97
+
98
+ ### Test it on Hugging face
99
+
100
+ Test an online, limited version [on Hugging Face](https://huggingface.co/spaces/DavidPagnon/sports2d): [![Hugging Face Space](https://img.shields.io/badge/HuggingFace-Sports2D-yellow?logo=huggingface)](https://huggingface.co/spaces/DavidPagnon/sports2d)
101
+
102
+ <img src="Content/huggingface_demo.png" width="760">
103
+
104
+
105
+
106
+ ### Local installation
95
107
 
96
108
  <!--- OPTION 0: **Use Colab** \
97
109
  User-friendly (but full) version, also works on a phone or a tablet.\
@@ -396,7 +408,7 @@ sports2d --video_input demo.mp4 other_video.mp4 --time_range 1.2 2.7 0 3.5
396
408
  sports2d --calculate_angles false
397
409
  ```
398
410
  - Flip angles when the person faces the other side.\
399
- **N.B.:** *We consider that the person looks to the right if their toe keypoint is to the right of their heel. This is not always true when the person is sprinting, especially in the swing phase. Set it to false if you want timeseries to be continuous even when the participant switches their stance.*
411
+ **N.B.: Set to false when sprinting.** *We consider that each limb "looks" to the right if the toe keypoint is to the right of the heel one. This is not always true, particularly during the swing phase of sprinting. Set it to false if you want timeseries to be continuous even when the participant switches their stance.*
400
412
  ```cmd
401
413
  sports2d --flip_left_right true # Default
402
414
  ```
@@ -497,6 +509,7 @@ sports2d --help
497
509
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
498
510
  'save_calib': ["", "save calibration file. true if not specified"],
499
511
  'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
512
+ 'distortions': ["", "camera distortion coefficients [k1, k2, p1, p2, k3] or 'from_calib'. [0.0, 0.0, 0.0, 0.0, 0.0] if not specified"],
500
513
  'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
501
514
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
502
515
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
@@ -89,7 +89,7 @@ det_frequency = 4 # Run person detection only every N frames, and inbetwee
89
89
  device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
90
90
  backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
91
91
  tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned
92
- # deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
92
+ # deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, 'embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
93
93
  # More robust in crowded scenes but tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
94
94
  # Requires `pip install torch torchvision torchreid gdown tensorboard`
95
95
 
@@ -106,13 +106,16 @@ to_meters = true
106
106
  make_c3d = true
107
107
  save_calib = true
108
108
 
109
+ # Compensate for camera horizon
110
+ floor_angle = 'auto' # float, 'from_kinematics', 'from_calib', or 'auto' # 'auto' is equivalent to 'from_kinematics', ie angle calculated from foot contacts. 'from_calib' calculates it from a toml calibration file. Use float to manually specify it in degrees
111
+ xy_origin = ['auto'] # [px_x,px_y], or ['from kinematics'], ['from_calib'], or ['auto']. # BETWEEN BRACKETS! # ['auto'] is equivalent to ['from_kinematics'], ie origin estimated at first foot contact, direction is direction of motion. ['from_calib'] calculates it from a calibration file. Use [px_x,px_y] to manually specify it in pixels (px_y points downwards)
112
+
109
113
  # Compensate for perspective effects, which make the further limb look smaller. 1-2% coordinate error at 10 m, less if the camera is further away
110
114
  perspective_value = 10 # Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'
111
115
  perspective_unit = 'distance_m' # 'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'
112
116
 
113
- # Compensate for camera horizon
114
- floor_angle = 'auto' # float, 'from_kinematics', 'from_calib', or 'auto' # 'auto' is equivalent to 'from_kinematics', ie angle calculated from foot contacts. 'from_calib' calculates it from a toml calibration file. Use float to manually specify it in degrees
115
- xy_origin = ['auto'] # [px_x,px_y], or ['from kinematics'], ['from_calib'], or ['auto']. # BETWEEN BRACKETS! # ['auto'] is equivalent to ['from_kinematics'], ie origin estimated at first foot contact, direction is direction of motion. ['from_calib'] calculates it from a calibration file. Use [px_x,px_y] to manually specify it in pixels (px_y points downwards)
117
+ # Optional distortion coefficients
118
+ distortions = [0.0, 0.0, 0.0, 0.0, 0.0] # [k1, k2, p1, p2, k3] or 'from_calib' (not implemented yet)
116
119
 
117
120
  # Optional calibration file
118
121
  calib_file = '' # Calibration file in the Pose2Sim toml format, or '' if not available
@@ -130,7 +133,7 @@ joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right h
130
133
  segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
131
134
 
132
135
  # Processing parameters
133
- flip_left_right = true # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participant switches their stance.
136
+ flip_left_right = false # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participant switches their stance.
134
137
  correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false if it is the floor which is actually tilted
135
138
 
136
139
 
@@ -212,7 +215,7 @@ use_custom_logging = false # if integrated in an API that already has logging
212
215
  #
213
216
  # Check your model hierarchy with: for pre, _, node in RenderTree(model):
214
217
  # print(f'{pre}{node.name} id={node.id}')
215
- [pose.CUSTOM]
218
+ [[pose.CUSTOM]]
216
219
  name = "Hip"
217
220
  id = 19
218
221
  [[pose.CUSTOM.children]]
@@ -197,6 +197,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
197
197
  'save_calib': True,
198
198
  'perspective_value': 10.0,
199
199
  'perspective_unit': 'distance_m',
200
+ 'distortions': [0.0, 0.0, 0.0, 0.0, 0.0],
200
201
  'floor_angle': 'auto',
201
202
  'xy_origin': ['auto'],
202
203
  'calib_file': '',
@@ -311,6 +312,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
311
312
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
312
313
  'save_calib': ["", "save calibration file. true if not specified"],
313
314
  'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
315
+ 'distortions': ["", "camera distortion coefficients [k1, k2, p1, p2, k3] or 'from_calib'. [0.0, 0.0, 0.0, 0.0, 0.0] if not specified"],
314
316
  'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
315
317
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
316
318
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],