sports2d 0.8.17__tar.gz → 0.8.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sports2d-0.8.17 → sports2d-0.8.19}/.github/workflows/continuous-integration.yml +3 -5
- {sports2d-0.8.17 → sports2d-0.8.19}/PKG-INFO +11 -9
- {sports2d-0.8.17 → sports2d-0.8.19}/README.md +9 -7
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Demo/Config_demo.toml +5 -4
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Sports2D.py +4 -4
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Utilities/tests.py +5 -5
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/process.py +221 -155
- {sports2d-0.8.17 → sports2d-0.8.19}/pyproject.toml +1 -1
- {sports2d-0.8.17 → sports2d-0.8.19}/sports2d.egg-info/PKG-INFO +11 -9
- {sports2d-0.8.17 → sports2d-0.8.19}/sports2d.egg-info/requires.txt +1 -1
- {sports2d-0.8.17 → sports2d-0.8.19}/.github/workflows/joss_pdf.yml +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/.github/workflows/publish-on-release.yml +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/.gitignore +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/CITATION.cff +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/Demo_plots.png +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/Demo_results.png +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/Demo_terminal.png +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/Person_selection.png +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/Video_tuto_Sports2D_Colab.png +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/joint_convention.png +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/paper.bib +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/paper.md +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/sports2d_blender.gif +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Content/sports2d_opensim.gif +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/LICENSE +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Demo/demo.mp4 +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Sports2D.ipynb +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Utilities/__init__.py +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/Utilities/common.py +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/Sports2D/__init__.py +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/setup.cfg +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/sports2d.egg-info/SOURCES.txt +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/sports2d.egg-info/dependency_links.txt +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/sports2d.egg-info/entry_points.txt +0 -0
- {sports2d-0.8.17 → sports2d-0.8.19}/sports2d.egg-info/top_level.txt +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
|
2
2
|
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
|
|
3
3
|
|
|
4
|
-
name: Build on Win-MacOS-Ubuntu with Python 3.10-3.
|
|
4
|
+
name: Build on Win-MacOS-Ubuntu with Python 3.10-3.12
|
|
5
5
|
|
|
6
6
|
on:
|
|
7
7
|
push:
|
|
@@ -23,8 +23,8 @@ jobs:
|
|
|
23
23
|
strategy:
|
|
24
24
|
fail-fast: false
|
|
25
25
|
matrix:
|
|
26
|
-
os: [ubuntu-latest, windows-latest, macos-latest
|
|
27
|
-
python-version: ["3.10", "3.11"]
|
|
26
|
+
os: [ubuntu-latest, windows-latest, macos-latest] #, macos-13] # opensim not supported on macos Intel AMD x64 beyond python 3.11
|
|
27
|
+
python-version: ["3.10", "3.11", "3.12"]
|
|
28
28
|
include:
|
|
29
29
|
- os: ubuntu-latest
|
|
30
30
|
cache-path: ~/.cache/pip
|
|
@@ -32,8 +32,6 @@ jobs:
|
|
|
32
32
|
cache-path: C:\Users\runneradmin\AppData\Local\pip\Cache
|
|
33
33
|
- os: macos-latest
|
|
34
34
|
cache-path: ~/Library/Caches/pip
|
|
35
|
-
- os: macos-13
|
|
36
|
-
cache-path: ~/Library/Caches/pip
|
|
37
35
|
|
|
38
36
|
steps:
|
|
39
37
|
- name: Checkout code
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.19
|
|
4
4
|
Summary: Compute 2D human pose and angles from a video or a webcam.
|
|
5
5
|
Author-email: David Pagnon <contact@david-pagnon.com>
|
|
6
6
|
Maintainer-email: David Pagnon <contact@david-pagnon.com>
|
|
@@ -35,7 +35,7 @@ Requires-Dist: ipython
|
|
|
35
35
|
Requires-Dist: c3d
|
|
36
36
|
Requires-Dist: rtmlib
|
|
37
37
|
Requires-Dist: openvino
|
|
38
|
-
Requires-Dist: opencv-python
|
|
38
|
+
Requires-Dist: opencv-python<4.12
|
|
39
39
|
Requires-Dist: imageio_ffmpeg
|
|
40
40
|
Requires-Dist: deep-sort-realtime
|
|
41
41
|
Requires-Dist: Pose2Sim>=0.10.33
|
|
@@ -145,7 +145,7 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
145
145
|
|
|
146
146
|
> N.B.: Full install is required for OpenSim inverse kinematics.
|
|
147
147
|
|
|
148
|
-
Open a terminal. Type `python -V` to make sure python >=3.10 <=3.
|
|
148
|
+
Open a terminal. Type `python -V` to make sure python >=3.10 <=3.12 is installed. If not, install it [from there](https://www.python.org/downloads/).
|
|
149
149
|
|
|
150
150
|
Run:
|
|
151
151
|
``` cmd
|
|
@@ -169,7 +169,7 @@ pip install .
|
|
|
169
169
|
- Install Anaconda or [Miniconda](https://docs.conda.io/en/latest/miniconda.html):\
|
|
170
170
|
Open an Anaconda prompt and create a virtual environment:
|
|
171
171
|
``` cmd
|
|
172
|
-
conda create -n Sports2D python=3.
|
|
172
|
+
conda create -n Sports2D python=3.12 -y
|
|
173
173
|
conda activate Sports2D
|
|
174
174
|
```
|
|
175
175
|
- **Install OpenSim**:\
|
|
@@ -568,7 +568,7 @@ Note that any detection and pose models can be used (first [deploy them with MMP
|
|
|
568
568
|
'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
|
|
569
569
|
'pose_input_size':[192,256]}"""
|
|
570
570
|
```
|
|
571
|
-
- Use `--det_frequency 50`:
|
|
571
|
+
- Use `--det_frequency 50`: Rtmlib is (by default) a top-down method: detects bounding boxes for every person in the frame, and then detects keypoints inside of each box. The person detection stage is much slower. You can choose to detect persons only every 50 frames (for example), and track bounding boxes inbetween, which is much faster.
|
|
572
572
|
- Use `--load_trc_px <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
|
|
573
573
|
- Make sure you use `--tracking_mode sports2d`: Will use the default Sports2D tracker. Unlike DeepSort, it is faster, does not require any parametrization, and is as good in non-crowded scenes.
|
|
574
574
|
|
|
@@ -637,13 +637,13 @@ Sports2D:
|
|
|
637
637
|
|
|
638
638
|
1. **Reads stream from a webcam, from one video, or from a list of videos**. Selects the specified time range to process.
|
|
639
639
|
|
|
640
|
-
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference,
|
|
640
|
+
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
|
|
641
641
|
|
|
642
642
|
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
|
|
643
643
|
|
|
644
|
-
4. **Chooses
|
|
644
|
+
4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
|
|
645
645
|
|
|
646
|
-
4. **Converts the pixel coordinates to meters.** The user can provide
|
|
646
|
+
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
|
|
647
647
|
|
|
648
648
|
5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
|
|
649
649
|
|
|
@@ -652,12 +652,14 @@ Sports2D:
|
|
|
652
652
|
Draws the skeleton and the keypoints, with a green to red color scale to account for their confidence\
|
|
653
653
|
Draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
|
|
654
654
|
|
|
655
|
-
6. **Interpolates and filters results:** Missing pose and angle sequences are interpolated unless gaps are too large
|
|
655
|
+
6. **Interpolates and filters results:** (1) Swaps between right and left limbs are corrected, (2) Missing pose and angle sequences are interpolated unless gaps are too large, (3) Outliers are rejected with a Hampel filter, and finally (4) Results are filtered, by default with a 6 Hz Butterworth filter. All of the above can be configured or deactivated, and other filters such as Kalman, GCV, Gaussian, LOESS, Median, and Butterworth on speeds are also available (see [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml))
|
|
656
656
|
|
|
657
657
|
7. **Optionally show** processed images, saves them, or saves them as a video\
|
|
658
658
|
**Optionally plots** pose and angle data before and after processing for comparison\
|
|
659
659
|
**Optionally saves** poses for each person as a TRC file in pixels and meters, angles as a MOT file, and calibration data as a [Pose2Sim](https://github.com/perfanalytics/pose2sim) TOML file
|
|
660
660
|
|
|
661
|
+
8. **Optionally runs scaling and inverse kinematics** with OpenSim via [Pose2Sim](https://github.com/perfanalytics/pose2sim).
|
|
662
|
+
|
|
661
663
|
<br>
|
|
662
664
|
|
|
663
665
|
**Joint angle conventions:**
|
|
@@ -102,7 +102,7 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
102
102
|
|
|
103
103
|
> N.B.: Full install is required for OpenSim inverse kinematics.
|
|
104
104
|
|
|
105
|
-
Open a terminal. Type `python -V` to make sure python >=3.10 <=3.
|
|
105
|
+
Open a terminal. Type `python -V` to make sure python >=3.10 <=3.12 is installed. If not, install it [from there](https://www.python.org/downloads/).
|
|
106
106
|
|
|
107
107
|
Run:
|
|
108
108
|
``` cmd
|
|
@@ -126,7 +126,7 @@ pip install .
|
|
|
126
126
|
- Install Anaconda or [Miniconda](https://docs.conda.io/en/latest/miniconda.html):\
|
|
127
127
|
Open an Anaconda prompt and create a virtual environment:
|
|
128
128
|
``` cmd
|
|
129
|
-
conda create -n Sports2D python=3.
|
|
129
|
+
conda create -n Sports2D python=3.12 -y
|
|
130
130
|
conda activate Sports2D
|
|
131
131
|
```
|
|
132
132
|
- **Install OpenSim**:\
|
|
@@ -525,7 +525,7 @@ Note that any detection and pose models can be used (first [deploy them with MMP
|
|
|
525
525
|
'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
|
|
526
526
|
'pose_input_size':[192,256]}"""
|
|
527
527
|
```
|
|
528
|
-
- Use `--det_frequency 50`:
|
|
528
|
+
- Use `--det_frequency 50`: Rtmlib is (by default) a top-down method: detects bounding boxes for every person in the frame, and then detects keypoints inside of each box. The person detection stage is much slower. You can choose to detect persons only every 50 frames (for example), and track bounding boxes inbetween, which is much faster.
|
|
529
529
|
- Use `--load_trc_px <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
|
|
530
530
|
- Make sure you use `--tracking_mode sports2d`: Will use the default Sports2D tracker. Unlike DeepSort, it is faster, does not require any parametrization, and is as good in non-crowded scenes.
|
|
531
531
|
|
|
@@ -594,13 +594,13 @@ Sports2D:
|
|
|
594
594
|
|
|
595
595
|
1. **Reads stream from a webcam, from one video, or from a list of videos**. Selects the specified time range to process.
|
|
596
596
|
|
|
597
|
-
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference,
|
|
597
|
+
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
|
|
598
598
|
|
|
599
599
|
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
|
|
600
600
|
|
|
601
|
-
4. **Chooses
|
|
601
|
+
4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
|
|
602
602
|
|
|
603
|
-
4. **Converts the pixel coordinates to meters.** The user can provide
|
|
603
|
+
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
|
|
604
604
|
|
|
605
605
|
5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
|
|
606
606
|
|
|
@@ -609,12 +609,14 @@ Sports2D:
|
|
|
609
609
|
Draws the skeleton and the keypoints, with a green to red color scale to account for their confidence\
|
|
610
610
|
Draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
|
|
611
611
|
|
|
612
|
-
6. **Interpolates and filters results:** Missing pose and angle sequences are interpolated unless gaps are too large
|
|
612
|
+
6. **Interpolates and filters results:** (1) Swaps between right and left limbs are corrected, (2) Missing pose and angle sequences are interpolated unless gaps are too large, (3) Outliers are rejected with a Hampel filter, and finally (4) Results are filtered, by default with a 6 Hz Butterworth filter. All of the above can be configured or deactivated, and other filters such as Kalman, GCV, Gaussian, LOESS, Median, and Butterworth on speeds are also available (see [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml))
|
|
613
613
|
|
|
614
614
|
7. **Optionally show** processed images, saves them, or saves them as a video\
|
|
615
615
|
**Optionally plots** pose and angle data before and after processing for comparison\
|
|
616
616
|
**Optionally saves** poses for each person as a TRC file in pixels and meters, angles as a MOT file, and calibration data as a [Pose2Sim](https://github.com/perfanalytics/pose2sim) TOML file
|
|
617
617
|
|
|
618
|
+
8. **Optionally runs scaling and inverse kinematics** with OpenSim via [Pose2Sim](https://github.com/perfanalytics/pose2sim).
|
|
619
|
+
|
|
618
620
|
<br>
|
|
619
621
|
|
|
620
622
|
**Joint angle conventions:**
|
|
@@ -139,11 +139,12 @@ reject_outliers = true # Hampel filter for outlier rejection before other f
|
|
|
139
139
|
|
|
140
140
|
filter = true
|
|
141
141
|
show_graphs = true # Show plots of raw and processed results
|
|
142
|
-
filter_type = 'butterworth' # butterworth,
|
|
142
|
+
filter_type = 'butterworth' # butterworth, kalman, gcv_spline, gaussian, loess, median, butterworth_on_speed
|
|
143
|
+
|
|
143
144
|
# Most intuitive and standard filter in biomechanics
|
|
144
145
|
[post-processing.butterworth]
|
|
145
|
-
order = 4
|
|
146
146
|
cut_off_frequency = 6 # Hz # Will be divided by slowmo_factor to be equivalent to non slowed-down video
|
|
147
|
+
order = 4
|
|
147
148
|
|
|
148
149
|
# Used in countless applications, this one is a simplified Kalman filter
|
|
149
150
|
[post-processing.kalman]
|
|
@@ -153,8 +154,8 @@ filter_type = 'butterworth' # butterworth, gcv_spline, kalman, gaussian, loe
|
|
|
153
154
|
|
|
154
155
|
# Automatically determines optimal parameters for each point, which is good when some move faster than others (eg fingers vs hips).
|
|
155
156
|
[post-processing.gcv_spline]
|
|
156
|
-
|
|
157
|
-
|
|
157
|
+
gcv_cut_off_frequency = 'auto' # 'auto' or int # If int, behaves like a Butterworth filter. 'auto' is usually better, unless the signal is too short (noise can then be considered as signal -> trajectories not filtered)
|
|
158
|
+
gcv_smoothing_factor = 0.1 # >=0, ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1)
|
|
158
159
|
|
|
159
160
|
[post-processing.loess]
|
|
160
161
|
nb_values_used = 5 # = fraction of data used * nb frames
|
|
@@ -239,7 +239,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
|
|
|
239
239
|
'filter_type': 'butterworth',
|
|
240
240
|
'butterworth': {'order': 4, 'cut_off_frequency': 6.0},
|
|
241
241
|
'kalman': {'trust_ratio': 500.0, 'smooth':True},
|
|
242
|
-
'gcv_spline': {'gcv_cut_off_frequency': 'auto', '
|
|
242
|
+
'gcv_spline': {'gcv_cut_off_frequency': 'auto', 'gcv_smoothing_factor': 1.0},
|
|
243
243
|
'gaussian': {'sigma_kernel': 1},
|
|
244
244
|
'loess': {'nb_values_used': 5},
|
|
245
245
|
'median': {'kernel_size': 3},
|
|
@@ -327,12 +327,12 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
327
327
|
'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
|
|
328
328
|
'filter': ["", "filter results. true if not specified"],
|
|
329
329
|
'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
|
|
330
|
+
'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 6 if not specified"],
|
|
330
331
|
'order': ["", "order of the Butterworth filter. 4 if not specified"],
|
|
331
|
-
'
|
|
332
|
+
'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' is usually better, unless the signal is too short (noise can then be considered as signal -> trajectories not filtered). 'auto' if not specified"],
|
|
333
|
+
'gcv_smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 1.0 if not specified"],
|
|
332
334
|
'trust_ratio': ["", "trust ratio of the Kalman filter: How much more do you trust triangulation results (measurements), than the assumption of constant acceleration(process)? 500 if not specified"],
|
|
333
335
|
'smooth': ["", "dual Kalman smoothing. true if not specified"],
|
|
334
|
-
'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' if not specified"],
|
|
335
|
-
'smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 0.1 if not specified"],
|
|
336
336
|
'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
|
|
337
337
|
'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
|
|
338
338
|
'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
|
|
@@ -63,14 +63,14 @@ def test_workflow():
|
|
|
63
63
|
|
|
64
64
|
# Default
|
|
65
65
|
demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False"]
|
|
66
|
-
subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
|
|
66
|
+
subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
67
67
|
|
|
68
68
|
# With loading a trc file, visible_side 'front', first_person_height '1.76", floor_angle 0, xy_origin [0, 928]
|
|
69
69
|
demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
70
70
|
"--load_trc_px", os.path.join(root_dir, "demo_Sports2D", "demo_Sports2D_px_person01.trc"),
|
|
71
71
|
"--visible_side", "front", "--first_person_height", "1.76", "--time_range", "1.2", "2.7",
|
|
72
72
|
"--floor_angle", "0", "--xy_origin", "0", "928"]
|
|
73
|
-
subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
|
|
73
|
+
subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
74
74
|
|
|
75
75
|
# With no pixels to meters conversion, one person to select, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
|
|
76
76
|
demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
@@ -80,7 +80,7 @@ def test_workflow():
|
|
|
80
80
|
"--slowmo_factor", "4",
|
|
81
81
|
"--filter_type", "gaussian",
|
|
82
82
|
"--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
|
|
83
|
-
subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
|
|
83
|
+
subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
84
84
|
|
|
85
85
|
# With a time range, inverse kinematics, marker augmentation
|
|
86
86
|
demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False",
|
|
@@ -88,7 +88,7 @@ def test_workflow():
|
|
|
88
88
|
"--do_ik", "True", "--use_augmentation", "True",
|
|
89
89
|
"--nb_persons_to_detect", "all", "--first_person_height", "1.65",
|
|
90
90
|
"--visible_side", "auto", "front", "--participant_mass", "55.0", "67.0"]
|
|
91
|
-
subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')
|
|
91
|
+
subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
92
92
|
|
|
93
93
|
# From config file
|
|
94
94
|
config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
|
|
@@ -98,7 +98,7 @@ def test_workflow():
|
|
|
98
98
|
config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
|
|
99
99
|
with open(config_path, 'w') as f: toml.dump(config_dict, f)
|
|
100
100
|
demo_cmd5 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
|
|
101
|
-
subprocess.run(demo_cmd5, check=True, capture_output=True, text=True, encoding='utf-8')
|
|
101
|
+
subprocess.run(demo_cmd5, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
|
|
102
102
|
|
|
103
103
|
|
|
104
104
|
if __name__ == "__main__":
|
|
@@ -83,8 +83,14 @@ from Sports2D.Utilities.common import *
|
|
|
83
83
|
from Pose2Sim.common import *
|
|
84
84
|
from Pose2Sim.skeletons import *
|
|
85
85
|
from Pose2Sim.triangulation import indices_of_first_last_non_nan_chunks
|
|
86
|
+
from Pose2Sim.personAssociation import *
|
|
86
87
|
from Pose2Sim.filtering import *
|
|
87
88
|
|
|
89
|
+
# Not safe, but to be used until OpenMMLab/RTMlib's SSL certificates are updated
|
|
90
|
+
import ssl
|
|
91
|
+
ssl._create_default_https_context = ssl._create_unverified_context
|
|
92
|
+
|
|
93
|
+
|
|
88
94
|
|
|
89
95
|
DEFAULT_MASS = 70
|
|
90
96
|
DEFAULT_HEIGHT = 1.7
|
|
@@ -101,7 +107,7 @@ __status__ = "Development"
|
|
|
101
107
|
|
|
102
108
|
|
|
103
109
|
# FUNCTIONS
|
|
104
|
-
def setup_webcam(webcam_id,
|
|
110
|
+
def setup_webcam(webcam_id, vid_output_path, input_size):
|
|
105
111
|
'''
|
|
106
112
|
Set up webcam capture with OpenCV.
|
|
107
113
|
|
|
@@ -127,29 +133,28 @@ def setup_webcam(webcam_id, save_vid, vid_output_path, input_size):
|
|
|
127
133
|
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, input_size[1])
|
|
128
134
|
cam_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
129
135
|
cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
136
|
+
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
|
|
130
137
|
fps = round(cap.get(cv2.CAP_PROP_FPS))
|
|
131
138
|
if fps == 0: fps = 30
|
|
132
139
|
|
|
133
140
|
if cam_width != input_size[0] or cam_height != input_size[1]:
|
|
134
141
|
logging.warning(f"Warning: Your webcam does not support {input_size[0]}x{input_size[1]} resolution. Resolution set to the closest supported one: {cam_width}x{cam_height}.")
|
|
135
142
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
out_vid = cv2.VideoWriter(vid_output_path, fourcc, fps, (cam_width, cam_height))
|
|
147
|
-
# logging.info("Failed to open video writer with 'avc1' (h264). Using 'mp4v' instead.")
|
|
143
|
+
# fourcc MJPG produces very large files but is faster. If it is too slow, consider using it and then converting the video to h264
|
|
144
|
+
# try:
|
|
145
|
+
# fourcc = cv2.VideoWriter_fourcc(*'avc1') # =h264. better compression and quality but may fail on some systems
|
|
146
|
+
# out_vid = cv2.VideoWriter(vid_output_path, fourcc, fps, (cam_width, cam_height))
|
|
147
|
+
# if not out_vid.isOpened():
|
|
148
|
+
# raise ValueError("Failed to open video writer with 'avc1' (h264)")
|
|
149
|
+
# except Exception:
|
|
150
|
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
|
151
|
+
out_vid = cv2.VideoWriter(vid_output_path, fourcc, fps, (cam_width, cam_height))
|
|
152
|
+
# logging.info("Failed to open video writer with 'avc1' (h264). Using 'mp4v' instead.")
|
|
148
153
|
|
|
149
154
|
return cap, out_vid, cam_width, cam_height, fps
|
|
150
155
|
|
|
151
156
|
|
|
152
|
-
def setup_video(video_file_path,
|
|
157
|
+
def setup_video(video_file_path, vid_output_path, save_vid):
|
|
153
158
|
'''
|
|
154
159
|
Set up video capture with OpenCV.
|
|
155
160
|
|
|
@@ -980,12 +985,13 @@ def get_personIDs_with_greatest_displacement(all_frames_X_homog, all_frames_Y_ho
|
|
|
980
985
|
return selected_persons
|
|
981
986
|
|
|
982
987
|
|
|
983
|
-
def get_personIDs_on_click(
|
|
988
|
+
def get_personIDs_on_click(video_file_path, frame_range, all_frames_X_homog, all_frames_Y_homog):
|
|
984
989
|
'''
|
|
985
990
|
Get the person IDs on click in the image
|
|
986
991
|
|
|
987
992
|
INPUTS:
|
|
988
|
-
-
|
|
993
|
+
- video_file_path: path to video file
|
|
994
|
+
- frame_range: tuple (start_frame, end_frame)
|
|
989
995
|
- all_frames_X_homog: shape (Nframes, Npersons, Nkpts)
|
|
990
996
|
- all_frames_Y_homog: shape (Nframes, Npersons, Nkpts)
|
|
991
997
|
|
|
@@ -996,23 +1002,19 @@ def get_personIDs_on_click(frames, all_frames_X_homog, all_frames_Y_homog):
|
|
|
996
1002
|
# Reorganize the coordinates to shape (Nframes, Npersons, Nkpts, Ndims)
|
|
997
1003
|
all_pose_coords = np.stack((all_frames_X_homog, all_frames_Y_homog), axis=-1)
|
|
998
1004
|
|
|
999
|
-
# Trim all_pose_coords and frames to the same size
|
|
1000
|
-
min_frames = min(all_pose_coords.shape[0], len(frames))
|
|
1001
|
-
all_pose_coords = all_pose_coords[:min_frames]
|
|
1002
|
-
frames = frames[:min_frames]
|
|
1003
|
-
|
|
1004
1005
|
# Select person IDs on click on video/image
|
|
1005
|
-
selected_persons = select_persons_on_vid(
|
|
1006
|
+
selected_persons = select_persons_on_vid(video_file_path, frame_range, all_pose_coords)
|
|
1006
1007
|
|
|
1007
1008
|
return selected_persons
|
|
1008
1009
|
|
|
1009
1010
|
|
|
1010
|
-
def select_persons_on_vid(
|
|
1011
|
+
def select_persons_on_vid(video_file_path, frame_range, all_pose_coords):
|
|
1011
1012
|
'''
|
|
1012
1013
|
Interactive UI to select persons from a video by clicking on their bounding boxes.
|
|
1013
1014
|
|
|
1014
1015
|
INPUTS:
|
|
1015
|
-
-
|
|
1016
|
+
- video_file_path: path to video file
|
|
1017
|
+
- frame_range: tuple (start_frame, end_frame)
|
|
1016
1018
|
- all_pose_coords: keypoints coordinates. shape (Nframes, Npersons, Nkpts, Ndims)
|
|
1017
1019
|
|
|
1018
1020
|
OUTPUT:
|
|
@@ -1026,93 +1028,42 @@ def select_persons_on_vid(frames, all_pose_coords):
|
|
|
1026
1028
|
LINE_UNSELECTED_COLOR = 'white'
|
|
1027
1029
|
LINE_SELECTED_COLOR = 'darkorange'
|
|
1028
1030
|
|
|
1029
|
-
selected_persons = []
|
|
1030
|
-
|
|
1031
|
-
# Calculate bounding boxes for each person in each frame
|
|
1032
|
-
n_frames, n_persons = all_pose_coords.shape[0], all_pose_coords.shape[1]
|
|
1033
|
-
all_bboxes = []
|
|
1034
|
-
for frame_idx in range(n_frames):
|
|
1035
|
-
frame_bboxes = []
|
|
1036
|
-
for person_idx in range(n_persons):
|
|
1037
|
-
# Get keypoints for current person
|
|
1038
|
-
keypoints = all_pose_coords[frame_idx, person_idx]
|
|
1039
|
-
valid_keypoints = keypoints[~np.isnan(keypoints).all(axis=1)]
|
|
1040
|
-
if len(valid_keypoints) > 0:
|
|
1041
|
-
# Calculate bounding box
|
|
1042
|
-
x_min, y_min = np.min(valid_keypoints, axis=0)
|
|
1043
|
-
x_max, y_max = np.max(valid_keypoints, axis=0)
|
|
1044
|
-
frame_bboxes.append((x_min, y_min, x_max, y_max))
|
|
1045
|
-
else:
|
|
1046
|
-
frame_bboxes.append((np.nan, np.nan, np.nan, np.nan)) # No valid bounding box for this person
|
|
1047
|
-
all_bboxes.append(frame_bboxes)
|
|
1048
|
-
all_bboxes = np.array(all_bboxes) # Shape: (Nframes, Npersons, 4)
|
|
1049
|
-
|
|
1050
|
-
# Create figure, axes, and slider
|
|
1051
|
-
frame_height, frame_width = frames[0].shape[:2]
|
|
1052
|
-
is_vertical = frame_height > frame_width
|
|
1053
|
-
if is_vertical:
|
|
1054
|
-
fig_height = frame_height / 250 # For vertical videos
|
|
1055
|
-
else:
|
|
1056
|
-
fig_height = max(frame_height / 300, 6) # For horizontal videos
|
|
1057
|
-
fig = plt.figure(figsize=(8, fig_height), num=f'Select the persons to analyze in the desired order')
|
|
1058
|
-
fig.patch.set_facecolor(BACKGROUND_COLOR)
|
|
1059
|
-
|
|
1060
|
-
video_axes_height = 0.7 if is_vertical else 0.6
|
|
1061
|
-
ax_video = plt.axes([0.1, 0.2, 0.8, video_axes_height])
|
|
1062
|
-
ax_video.axis('off')
|
|
1063
|
-
ax_video.set_facecolor(BACKGROUND_COLOR)
|
|
1064
|
-
|
|
1065
|
-
# First image
|
|
1066
|
-
frame_rgb = cv2.cvtColor(frames[0], cv2.COLOR_BGR2RGB)
|
|
1067
|
-
rects, annotations = [], []
|
|
1068
|
-
for person_idx, bbox in enumerate(all_bboxes[0]):
|
|
1069
|
-
if ~np.isnan(bbox).any():
|
|
1070
|
-
x_min, y_min, x_max, y_max = bbox.astype(int)
|
|
1071
|
-
rect = plt.Rectangle(
|
|
1072
|
-
(x_min, y_min), x_max - x_min, y_max - y_min,
|
|
1073
|
-
linewidth=1, edgecolor=LINE_UNSELECTED_COLOR, facecolor=UNSELECTED_COLOR,
|
|
1074
|
-
linestyle='-', path_effects=[patheffects.withSimplePatchShadow()], zorder=2
|
|
1075
|
-
)
|
|
1076
|
-
ax_video.add_patch(rect)
|
|
1077
|
-
annotation = ax_video.text(
|
|
1078
|
-
x_min, y_min - 10, f'{person_idx}', color=LINE_UNSELECTED_COLOR, fontsize=7, fontweight='normal',
|
|
1079
|
-
bbox=dict(facecolor=UNSELECTED_COLOR, edgecolor=LINE_UNSELECTED_COLOR, boxstyle='square,pad=0.3', path_effects=[patheffects.withSimplePatchShadow()]), zorder=3
|
|
1080
|
-
)
|
|
1081
|
-
rects.append(rect)
|
|
1082
|
-
annotations.append(annotation)
|
|
1083
|
-
img_plot = ax_video.imshow(frame_rgb)
|
|
1084
|
-
|
|
1085
|
-
# Slider
|
|
1086
|
-
ax_slider = plt.axes([ax_video.get_position().x0, ax_video.get_position().y0-0.05, ax_video.get_position().width, 0.04])
|
|
1087
|
-
ax_slider.set_facecolor(BACKGROUND_COLOR)
|
|
1088
|
-
frame_slider = Slider(
|
|
1089
|
-
ax=ax_slider,
|
|
1090
|
-
label='',
|
|
1091
|
-
valmin=0,
|
|
1092
|
-
valmax=len(all_pose_coords)-1,
|
|
1093
|
-
valinit=0,
|
|
1094
|
-
valstep=1,
|
|
1095
|
-
valfmt=None
|
|
1096
|
-
)
|
|
1097
|
-
frame_slider.poly.set_edgecolor(SLIDER_EDGE_COLOR)
|
|
1098
|
-
frame_slider.poly.set_facecolor(SLIDER_COLOR)
|
|
1099
|
-
frame_slider.poly.set_linewidth(1)
|
|
1100
|
-
frame_slider.valtext.set_visible(False)
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
# Status text and OK button
|
|
1104
|
-
ax_status = plt.axes([ax_video.get_position().x0, ax_video.get_position().y0-0.1, 2*ax_video.get_position().width/3, 0.04])
|
|
1105
|
-
ax_status.axis('off')
|
|
1106
|
-
status_text = ax_status.text(0.0, 0.5, f"Selected: None", color='black', fontsize=10)
|
|
1107
1031
|
|
|
1108
|
-
|
|
1109
|
-
|
|
1032
|
+
def get_frame(frame_idx):
|
|
1033
|
+
"""Get frame with caching"""
|
|
1034
|
+
actual_frame_idx = start_frame + frame_idx
|
|
1035
|
+
|
|
1036
|
+
# Check cache first
|
|
1037
|
+
if actual_frame_idx in frame_cache:
|
|
1038
|
+
# Move to end of cache order (recently used)
|
|
1039
|
+
cache_order.remove(actual_frame_idx)
|
|
1040
|
+
cache_order.append(actual_frame_idx)
|
|
1041
|
+
return frame_cache[actual_frame_idx]
|
|
1042
|
+
|
|
1043
|
+
# Load from video
|
|
1044
|
+
cap.set(cv2.CAP_PROP_POS_FRAMES, actual_frame_idx)
|
|
1045
|
+
success, frame = cap.read()
|
|
1046
|
+
if not success:
|
|
1047
|
+
raise ValueError(f"Could not read frame {actual_frame_idx}")
|
|
1048
|
+
|
|
1049
|
+
# Add to cache
|
|
1050
|
+
frame_cache[actual_frame_idx] = frame.copy()
|
|
1051
|
+
cache_order.append(actual_frame_idx)
|
|
1052
|
+
|
|
1053
|
+
# Remove old frames if cache too large
|
|
1054
|
+
while len(frame_cache) > cache_size:
|
|
1055
|
+
oldest_frame = cache_order.pop(0)
|
|
1056
|
+
if oldest_frame in frame_cache:
|
|
1057
|
+
del frame_cache[oldest_frame]
|
|
1058
|
+
|
|
1059
|
+
return frame
|
|
1110
1060
|
|
|
1111
1061
|
|
|
1112
1062
|
def update_frame(val):
|
|
1113
1063
|
# Update image
|
|
1114
1064
|
frame_idx = int(frame_slider.val)
|
|
1115
|
-
|
|
1065
|
+
frame = get_frame(frame_idx)
|
|
1066
|
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
1116
1067
|
|
|
1117
1068
|
# Update bboxes and annotations
|
|
1118
1069
|
for items in [rects, annotations]:
|
|
@@ -1205,6 +1156,101 @@ def select_persons_on_vid(frames, all_pose_coords):
|
|
|
1205
1156
|
plt.close(fig)
|
|
1206
1157
|
|
|
1207
1158
|
|
|
1159
|
+
# Open video
|
|
1160
|
+
cap = cv2.VideoCapture(video_file_path)
|
|
1161
|
+
if not cap.isOpened():
|
|
1162
|
+
raise ValueError(f"Could not open video: {video_file_path}")
|
|
1163
|
+
start_frame, end_frame = frame_range
|
|
1164
|
+
|
|
1165
|
+
|
|
1166
|
+
# Frame cache for efficiency - only keep recently accessed frames
|
|
1167
|
+
frame_cache = {}
|
|
1168
|
+
cache_size = 20 # Keep last 20 frames in memory
|
|
1169
|
+
cache_order = []
|
|
1170
|
+
|
|
1171
|
+
# Calculate bounding boxes for each person in each frame
|
|
1172
|
+
selected_persons = []
|
|
1173
|
+
n_frames, n_persons = all_pose_coords.shape[0], all_pose_coords.shape[1]
|
|
1174
|
+
all_bboxes = []
|
|
1175
|
+
for frame_idx in range(n_frames):
|
|
1176
|
+
frame_bboxes = []
|
|
1177
|
+
for person_idx in range(n_persons):
|
|
1178
|
+
# Get keypoints for current person
|
|
1179
|
+
keypoints = all_pose_coords[frame_idx, person_idx]
|
|
1180
|
+
valid_keypoints = keypoints[~np.isnan(keypoints).all(axis=1)]
|
|
1181
|
+
if len(valid_keypoints) > 0:
|
|
1182
|
+
# Calculate bounding box
|
|
1183
|
+
x_min, y_min = np.min(valid_keypoints, axis=0)
|
|
1184
|
+
x_max, y_max = np.max(valid_keypoints, axis=0)
|
|
1185
|
+
frame_bboxes.append((x_min, y_min, x_max, y_max))
|
|
1186
|
+
else:
|
|
1187
|
+
frame_bboxes.append((np.nan, np.nan, np.nan, np.nan)) # No valid bounding box for this person
|
|
1188
|
+
all_bboxes.append(frame_bboxes)
|
|
1189
|
+
all_bboxes = np.array(all_bboxes) # Shape: (Nframes, Npersons, 4)
|
|
1190
|
+
|
|
1191
|
+
# Create figure, axes, and slider
|
|
1192
|
+
first_frame = get_frame(0)
|
|
1193
|
+
frame_height, frame_width = first_frame.shape[:2]
|
|
1194
|
+
is_vertical = frame_height > frame_width
|
|
1195
|
+
if is_vertical:
|
|
1196
|
+
fig_height = frame_height / 250 # For vertical videos
|
|
1197
|
+
else:
|
|
1198
|
+
fig_height = max(frame_height / 300, 6) # For horizontal videos
|
|
1199
|
+
fig = plt.figure(figsize=(8, fig_height), num=f'Select the persons to analyze in the desired order')
|
|
1200
|
+
fig.patch.set_facecolor(BACKGROUND_COLOR)
|
|
1201
|
+
|
|
1202
|
+
video_axes_height = 0.7 if is_vertical else 0.6
|
|
1203
|
+
ax_video = plt.axes([0.1, 0.2, 0.8, video_axes_height])
|
|
1204
|
+
ax_video.axis('off')
|
|
1205
|
+
ax_video.set_facecolor(BACKGROUND_COLOR)
|
|
1206
|
+
|
|
1207
|
+
# First image
|
|
1208
|
+
frame_rgb = cv2.cvtColor(first_frame, cv2.COLOR_BGR2RGB)
|
|
1209
|
+
rects, annotations = [], []
|
|
1210
|
+
for person_idx, bbox in enumerate(all_bboxes[0]):
|
|
1211
|
+
if ~np.isnan(bbox).any():
|
|
1212
|
+
x_min, y_min, x_max, y_max = bbox.astype(int)
|
|
1213
|
+
rect = plt.Rectangle(
|
|
1214
|
+
(x_min, y_min), x_max - x_min, y_max - y_min,
|
|
1215
|
+
linewidth=1, edgecolor=LINE_UNSELECTED_COLOR, facecolor=UNSELECTED_COLOR,
|
|
1216
|
+
linestyle='-', path_effects=[patheffects.withSimplePatchShadow()], zorder=2
|
|
1217
|
+
)
|
|
1218
|
+
ax_video.add_patch(rect)
|
|
1219
|
+
annotation = ax_video.text(
|
|
1220
|
+
x_min, y_min - 10, f'{person_idx}', color=LINE_UNSELECTED_COLOR, fontsize=7, fontweight='normal',
|
|
1221
|
+
bbox=dict(facecolor=UNSELECTED_COLOR, edgecolor=LINE_UNSELECTED_COLOR, boxstyle='square,pad=0.3', path_effects=[patheffects.withSimplePatchShadow()]), zorder=3
|
|
1222
|
+
)
|
|
1223
|
+
rects.append(rect)
|
|
1224
|
+
annotations.append(annotation)
|
|
1225
|
+
img_plot = ax_video.imshow(frame_rgb)
|
|
1226
|
+
|
|
1227
|
+
# Slider
|
|
1228
|
+
ax_slider = plt.axes([ax_video.get_position().x0, ax_video.get_position().y0-0.05, ax_video.get_position().width, 0.04])
|
|
1229
|
+
ax_slider.set_facecolor(BACKGROUND_COLOR)
|
|
1230
|
+
frame_slider = Slider(
|
|
1231
|
+
ax=ax_slider,
|
|
1232
|
+
label='',
|
|
1233
|
+
valmin=0,
|
|
1234
|
+
valmax=len(all_pose_coords)-1,
|
|
1235
|
+
valinit=0,
|
|
1236
|
+
valstep=1,
|
|
1237
|
+
valfmt=None
|
|
1238
|
+
)
|
|
1239
|
+
frame_slider.poly.set_edgecolor(SLIDER_EDGE_COLOR)
|
|
1240
|
+
frame_slider.poly.set_facecolor(SLIDER_COLOR)
|
|
1241
|
+
frame_slider.poly.set_linewidth(1)
|
|
1242
|
+
frame_slider.valtext.set_visible(False)
|
|
1243
|
+
|
|
1244
|
+
|
|
1245
|
+
# Status text and OK button
|
|
1246
|
+
ax_status = plt.axes([ax_video.get_position().x0, ax_video.get_position().y0-0.1, 2*ax_video.get_position().width/3, 0.04])
|
|
1247
|
+
ax_status.axis('off')
|
|
1248
|
+
status_text = ax_status.text(0.0, 0.5, f"Selected: None", color='black', fontsize=10)
|
|
1249
|
+
|
|
1250
|
+
ax_button = plt.axes([ax_video.get_position().x0 + 3*ax_video.get_position().width/4, ax_video.get_position().y0-0.1, ax_video.get_position().width/4, 0.04])
|
|
1251
|
+
ok_button = Button(ax_button, 'OK', color=BACKGROUND_COLOR)
|
|
1252
|
+
|
|
1253
|
+
|
|
1208
1254
|
# Connect events
|
|
1209
1255
|
frame_slider.on_changed(update_frame)
|
|
1210
1256
|
fig.canvas.mpl_connect('button_press_event', on_click)
|
|
@@ -1441,29 +1487,31 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1441
1487
|
sections_to_keep = config_dict.get('post-processing').get('sections_to_keep')
|
|
1442
1488
|
|
|
1443
1489
|
do_filter = config_dict.get('post-processing').get('filter')
|
|
1490
|
+
handle_LR_swap = config_dict.get('post-processing').get('handle_LR_swap', False)
|
|
1444
1491
|
reject_outliers = config_dict.get('post-processing').get('reject_outliers', False)
|
|
1445
1492
|
show_plots = config_dict.get('post-processing').get('show_graphs')
|
|
1446
1493
|
filter_type = config_dict.get('post-processing').get('filter_type')
|
|
1447
|
-
butterworth_filter_order = config_dict.get('post-processing').get('butterworth').get('order')
|
|
1448
|
-
butterworth_filter_cutoff = config_dict.get('post-processing').get('butterworth').get('cut_off_frequency')
|
|
1449
|
-
gcv_filter_cutoff = config_dict.get('post-processing').get('gcv_spline').get('
|
|
1450
|
-
|
|
1451
|
-
kalman_filter_trust_ratio = config_dict.get('post-processing').get('kalman').get('trust_ratio')
|
|
1452
|
-
kalman_filter_smooth = config_dict.get('post-processing').get('kalman').get('smooth')
|
|
1453
|
-
gaussian_filter_kernel = config_dict.get('post-processing').get('gaussian').get('sigma_kernel')
|
|
1454
|
-
loess_filter_kernel = config_dict.get('post-processing').get('loess').get('nb_values_used')
|
|
1455
|
-
median_filter_kernel = config_dict.get('post-processing').get('median').get('kernel_size')
|
|
1456
|
-
butterworthspeed_filter_order = config_dict.get('post-processing').get('butterworth_on_speed').get('order')
|
|
1457
|
-
butterworthspeed_filter_cutoff = config_dict.get('post-processing').get('butterworth_on_speed').get('cut_off_frequency')
|
|
1494
|
+
butterworth_filter_order = config_dict.get('post-processing').get('butterworth', {}).get('order')
|
|
1495
|
+
butterworth_filter_cutoff = config_dict.get('post-processing').get('butterworth', {}).get('cut_off_frequency')
|
|
1496
|
+
gcv_filter_cutoff = config_dict.get('post-processing').get('gcv_spline', {}).get('gcv_cut_off_frequency')
|
|
1497
|
+
gcv_smoothing_factor = config_dict.get('post-processing').get('gcv_spline', {}).get('gcv_smoothing_factor')
|
|
1498
|
+
kalman_filter_trust_ratio = config_dict.get('post-processing').get('kalman', {}).get('trust_ratio')
|
|
1499
|
+
kalman_filter_smooth = config_dict.get('post-processing').get('kalman', {}).get('smooth')
|
|
1500
|
+
gaussian_filter_kernel = config_dict.get('post-processing').get('gaussian', {}).get('sigma_kernel')
|
|
1501
|
+
loess_filter_kernel = config_dict.get('post-processing').get('loess', {}).get('nb_values_used')
|
|
1502
|
+
median_filter_kernel = config_dict.get('post-processing').get('median', {}).get('kernel_size')
|
|
1503
|
+
butterworthspeed_filter_order = config_dict.get('post-processing').get('butterworth_on_speed', {}).get('order')
|
|
1504
|
+
butterworthspeed_filter_cutoff = config_dict.get('post-processing').get('butterworth_on_speed', {}).get('cut_off_frequency')
|
|
1458
1505
|
|
|
1459
1506
|
# Create output directories
|
|
1460
1507
|
if video_file == "webcam":
|
|
1461
1508
|
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1462
|
-
output_dir_name = f'webcam_{current_date}'
|
|
1509
|
+
output_dir_name = f'webcam_{current_date}_Sports2D'
|
|
1510
|
+
video_file_path = result_dir / output_dir_name / f'webcam_{current_date}_raw.mp4'
|
|
1463
1511
|
else:
|
|
1464
|
-
video_file_path = video_dir / video_file
|
|
1465
1512
|
video_file_stem = video_file.stem
|
|
1466
1513
|
output_dir_name = f'{video_file_stem}_Sports2D'
|
|
1514
|
+
video_file_path = video_dir / video_file
|
|
1467
1515
|
output_dir = result_dir / output_dir_name
|
|
1468
1516
|
img_output_dir = output_dir / f'{output_dir_name}_img'
|
|
1469
1517
|
vid_output_path = output_dir / f'{output_dir_name}.mp4'
|
|
@@ -1485,21 +1533,19 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1485
1533
|
trimmed_extrema_percent = config_dict.get('kinematics').get('trimmed_extrema_percent')
|
|
1486
1534
|
close_to_zero_speed_px = config_dict.get('kinematics').get('close_to_zero_speed_px')
|
|
1487
1535
|
close_to_zero_speed_m = config_dict.get('kinematics').get('close_to_zero_speed_m')
|
|
1488
|
-
|
|
1536
|
+
# Create a Pose2Sim dictionary and fill in missing keys
|
|
1537
|
+
recursivedict = lambda: defaultdict(recursivedict)
|
|
1538
|
+
Pose2Sim_config_dict = recursivedict()
|
|
1539
|
+
if do_ik or use_augmentation:
|
|
1489
1540
|
try:
|
|
1490
1541
|
if use_augmentation:
|
|
1491
1542
|
from Pose2Sim.markerAugmentation import augment_markers_all
|
|
1492
1543
|
if do_ik:
|
|
1493
1544
|
from Pose2Sim.kinematics import kinematics_all
|
|
1494
|
-
if do_filter:
|
|
1495
|
-
from Pose2Sim.filtering import filter_all
|
|
1496
1545
|
except ImportError:
|
|
1497
1546
|
logging.error("OpenSim package is not installed. Please install it to use inverse kinematics or marker augmentation features (see 'Full install' section of the documentation).")
|
|
1498
1547
|
raise ImportError("OpenSim package is not installed. Please install it to use inverse kinematics or marker augmentation features (see 'Full install' section of the documentation).")
|
|
1499
1548
|
|
|
1500
|
-
# Create a Pose2Sim dictionary and fill in missing keys
|
|
1501
|
-
recursivedict = lambda: defaultdict(recursivedict)
|
|
1502
|
-
Pose2Sim_config_dict = recursivedict()
|
|
1503
1549
|
# Fill Pose2Sim dictionary (height and mass will be filled later)
|
|
1504
1550
|
Pose2Sim_config_dict['project']['project_dir'] = str(output_dir)
|
|
1505
1551
|
Pose2Sim_config_dict['markerAugmentation']['make_c3d'] = make_c3d
|
|
@@ -1511,14 +1557,14 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1511
1557
|
kinematics_dir.mkdir(parents=True, exist_ok=True)
|
|
1512
1558
|
|
|
1513
1559
|
if do_filter:
|
|
1514
|
-
|
|
1560
|
+
Pose2Sim_config_dict['personAssociation']['handle_LR_swap'] = handle_LR_swap
|
|
1515
1561
|
Pose2Sim_config_dict['filtering']['reject_outliers'] = reject_outliers
|
|
1516
1562
|
Pose2Sim_config_dict['filtering']['filter'] = do_filter
|
|
1517
1563
|
Pose2Sim_config_dict['filtering']['type'] = filter_type
|
|
1518
|
-
Pose2Sim_config_dict['filtering']['butterworth']['order'] = butterworth_filter_order
|
|
1519
|
-
Pose2Sim_config_dict['filtering']['butterworth']['cut_off_frequency'] = butterworth_filter_cutoff
|
|
1520
1564
|
Pose2Sim_config_dict['filtering']['gcv_spline']['cut_off_frequency'] = gcv_filter_cutoff
|
|
1521
|
-
Pose2Sim_config_dict['filtering']['gcv_spline']['smoothing_factor'] =
|
|
1565
|
+
Pose2Sim_config_dict['filtering']['gcv_spline']['smoothing_factor'] = gcv_smoothing_factor
|
|
1566
|
+
Pose2Sim_config_dict['filtering']['butterworth']['cut_off_frequency'] = butterworth_filter_cutoff
|
|
1567
|
+
Pose2Sim_config_dict['filtering']['butterworth']['order'] = butterworth_filter_order
|
|
1522
1568
|
Pose2Sim_config_dict['filtering']['kalman']['trust_ratio'] = kalman_filter_trust_ratio
|
|
1523
1569
|
Pose2Sim_config_dict['filtering']['kalman']['smooth'] = kalman_filter_smooth
|
|
1524
1570
|
Pose2Sim_config_dict['filtering']['gaussian']['sigma_kernel'] = gaussian_filter_kernel
|
|
@@ -1530,12 +1576,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1530
1576
|
|
|
1531
1577
|
# Set up video capture
|
|
1532
1578
|
if video_file == "webcam":
|
|
1533
|
-
cap, out_vid, cam_width, cam_height, fps = setup_webcam(webcam_id,
|
|
1579
|
+
cap, out_vid, cam_width, cam_height, fps = setup_webcam(webcam_id, vid_output_path, input_size)
|
|
1580
|
+
frame_rate = fps
|
|
1534
1581
|
frame_range = [0,sys.maxsize]
|
|
1535
1582
|
frame_iterator = range(*frame_range)
|
|
1536
1583
|
logging.warning('Webcam input: the framerate may vary. If results are filtered, Sports2D will use the average framerate as input.')
|
|
1537
1584
|
else:
|
|
1538
|
-
cap, out_vid, cam_width, cam_height, fps = setup_video(video_file_path,
|
|
1585
|
+
cap, out_vid, cam_width, cam_height, fps = setup_video(video_file_path, vid_output_path, save_vid)
|
|
1539
1586
|
fps *= slowmo_factor
|
|
1540
1587
|
start_time = get_start_time_ffmpeg(video_file_path)
|
|
1541
1588
|
frame_range = [int((time_range[0]-start_time) * frame_rate), int((time_range[1]-start_time) * frame_rate)] if time_range else [0, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))]
|
|
@@ -1632,10 +1679,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1632
1679
|
all_frames_X, all_frames_X_flipped, all_frames_Y, all_frames_scores, all_frames_angles = [], [], [], [], []
|
|
1633
1680
|
frame_processing_times = []
|
|
1634
1681
|
frame_count = 0
|
|
1635
|
-
|
|
1682
|
+
first_frame = max(int(t0 * fps), frame_range[0])
|
|
1683
|
+
# frames = []
|
|
1636
1684
|
while cap.isOpened():
|
|
1637
1685
|
# Skip to the starting frame
|
|
1638
|
-
if frame_count
|
|
1686
|
+
if frame_count < first_frame:
|
|
1639
1687
|
cap.read()
|
|
1640
1688
|
frame_count += 1
|
|
1641
1689
|
continue
|
|
@@ -1655,9 +1703,9 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1655
1703
|
if save_angles:
|
|
1656
1704
|
all_frames_angles.append([])
|
|
1657
1705
|
continue
|
|
1658
|
-
else: # does not store all frames in memory if they are not saved or used for ordering
|
|
1659
|
-
|
|
1660
|
-
|
|
1706
|
+
# else: # does not store all frames in memory if they are not saved or used for ordering
|
|
1707
|
+
# if save_img or save_vid or person_ordering_method == 'on_click':
|
|
1708
|
+
# frames.append(frame.copy())
|
|
1661
1709
|
|
|
1662
1710
|
# Retrieve pose or Estimate pose and track people
|
|
1663
1711
|
if load_trc_px:
|
|
@@ -1666,6 +1714,10 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1666
1714
|
keypoints = keypoints_all[frame_nb]
|
|
1667
1715
|
scores = scores_all[frame_nb]
|
|
1668
1716
|
else:
|
|
1717
|
+
# Save video on the fly if the input is a webcam
|
|
1718
|
+
if video_file == "webcam":
|
|
1719
|
+
out_vid.write(frame)
|
|
1720
|
+
|
|
1669
1721
|
# Detect poses
|
|
1670
1722
|
keypoints, scores = pose_tracker(frame)
|
|
1671
1723
|
|
|
@@ -1771,8 +1823,11 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1771
1823
|
# End of the video is reached
|
|
1772
1824
|
cap.release()
|
|
1773
1825
|
logging.info(f"Video processing completed.")
|
|
1774
|
-
if save_vid:
|
|
1826
|
+
if save_vid or video_file == "webcam":
|
|
1775
1827
|
out_vid.release()
|
|
1828
|
+
if video_file == "webcam":
|
|
1829
|
+
vid_output_path.absolute().rename(video_file_path)
|
|
1830
|
+
|
|
1776
1831
|
if show_realtime_results:
|
|
1777
1832
|
cv2.destroyAllWindows()
|
|
1778
1833
|
|
|
@@ -1809,7 +1864,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1809
1864
|
nb_persons_to_detect = nb_detected_persons
|
|
1810
1865
|
|
|
1811
1866
|
if person_ordering_method == 'on_click':
|
|
1812
|
-
selected_persons = get_personIDs_on_click(
|
|
1867
|
+
selected_persons = get_personIDs_on_click(video_file_path, frame_range, all_frames_X_homog, all_frames_Y_homog)
|
|
1813
1868
|
if len(selected_persons) == 0:
|
|
1814
1869
|
logging.warning('No persons selected. Analyzing all detected persons.')
|
|
1815
1870
|
selected_persons = list(range(nb_detected_persons))
|
|
@@ -1886,8 +1941,13 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
1886
1941
|
all_frames_Y_person_interp.replace(np.nan, 0, inplace=True)
|
|
1887
1942
|
|
|
1888
1943
|
# Filter
|
|
1944
|
+
# if handle_LR_swap:
|
|
1945
|
+
# logging.info(f'Handling left-right swaps.')
|
|
1946
|
+
# all_frames_X_person_interp = all_frames_X_person_interp.apply(LR_unswap, axis=0)
|
|
1947
|
+
# all_frames_Y_person_interp = all_frames_Y_person_interp.apply(LR_unswap, axis=0)
|
|
1948
|
+
|
|
1889
1949
|
if reject_outliers:
|
|
1890
|
-
logging.info('Rejecting outliers with Hampel filter.')
|
|
1950
|
+
logging.info('Rejecting outliers with a Hampel filter.')
|
|
1891
1951
|
all_frames_X_person_interp = all_frames_X_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
|
|
1892
1952
|
all_frames_Y_person_interp = all_frames_Y_person_interp.apply(hampel_filter, axis=0, args = [round(7*frame_rate/30), 2])
|
|
1893
1953
|
|
|
@@ -2136,7 +2196,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2136
2196
|
|
|
2137
2197
|
# Filter
|
|
2138
2198
|
if reject_outliers:
|
|
2139
|
-
logging.info(f'Rejecting outliers with Hampel filter.')
|
|
2199
|
+
logging.info(f'Rejecting outliers with a Hampel filter.')
|
|
2140
2200
|
all_frames_angles_person_interp = all_frames_angles_person_interp.apply(hampel_filter, axis=0)
|
|
2141
2201
|
|
|
2142
2202
|
if not do_filter:
|
|
@@ -2168,7 +2228,7 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2168
2228
|
logging.error(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2169
2229
|
raise ValueError(f"Invalid filter_type: {filter_type}. Must be 'butterworth', 'gcv_spline', 'kalman', 'gaussian', 'loess', or 'median'.")
|
|
2170
2230
|
|
|
2171
|
-
logging.info(f'Filtering with {args}
|
|
2231
|
+
logging.info(f'Filtering with {args}')
|
|
2172
2232
|
all_frames_angles_person_filt = all_frames_angles_person_interp.apply(filter1d, axis=0, args = [Pose2Sim_config_dict, filter_type, frame_rate])
|
|
2173
2233
|
|
|
2174
2234
|
# Add floor_angle_estim to segment angles
|
|
@@ -2224,22 +2284,28 @@ def process_fun(config_dict, video_file, time_range, frame_rate, result_dir):
|
|
|
2224
2284
|
new_keypoints_ids = list(range(len(new_keypoints_ids)))
|
|
2225
2285
|
|
|
2226
2286
|
# Draw pose and angles
|
|
2287
|
+
first_frame, last_frame = frame_range
|
|
2227
2288
|
if 'first_trim' not in locals():
|
|
2228
|
-
first_trim, last_trim =
|
|
2229
|
-
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
|
|
2289
|
+
first_trim, last_trim = first_frame, last_frame
|
|
2290
|
+
cap = cv2.VideoCapture(video_file_path)
|
|
2291
|
+
cap.set(cv2.CAP_PROP_POS_FRAMES, first_frame+first_trim)
|
|
2292
|
+
for i in range(first_trim, last_trim):
|
|
2293
|
+
success, frame = cap.read()
|
|
2294
|
+
if not success:
|
|
2295
|
+
raise ValueError(f"Could not read frame {i}")
|
|
2296
|
+
img = frame.copy()
|
|
2297
|
+
img = draw_bounding_box(img, all_frames_X_processed[i], all_frames_Y_processed[i], colors=colors, fontSize=fontSize, thickness=thickness)
|
|
2298
|
+
img = draw_keypts(img, all_frames_X_processed[i], all_frames_Y_processed[i], all_frames_scores_processed[i], cmap_str='RdYlGn')
|
|
2299
|
+
img = draw_skel(img, all_frames_X_processed[i], all_frames_Y_processed[i], pose_model_with_new_ids)
|
|
2300
|
+
if calculate_angles:
|
|
2301
|
+
img = draw_angles(img, all_frames_X_processed[i], all_frames_Y_processed[i], all_frames_angles_processed[i], all_frames_X_flipped_processed[i], new_keypoints_ids, new_keypoints_names, angle_names, display_angle_values_on=display_angle_values_on, colors=colors, fontSize=fontSize, thickness=thickness)
|
|
2302
|
+
|
|
2303
|
+
# Save video or images
|
|
2304
|
+
if save_vid:
|
|
2305
|
+
out_vid.write(img)
|
|
2306
|
+
if save_img:
|
|
2307
|
+
cv2.imwrite(str((img_output_dir / f'{output_dir_name}_{(i+frame_range[0]):06d}.png')), img)
|
|
2308
|
+
cap.release()
|
|
2243
2309
|
|
|
2244
2310
|
if save_vid:
|
|
2245
2311
|
out_vid.release()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.19
|
|
4
4
|
Summary: Compute 2D human pose and angles from a video or a webcam.
|
|
5
5
|
Author-email: David Pagnon <contact@david-pagnon.com>
|
|
6
6
|
Maintainer-email: David Pagnon <contact@david-pagnon.com>
|
|
@@ -35,7 +35,7 @@ Requires-Dist: ipython
|
|
|
35
35
|
Requires-Dist: c3d
|
|
36
36
|
Requires-Dist: rtmlib
|
|
37
37
|
Requires-Dist: openvino
|
|
38
|
-
Requires-Dist: opencv-python
|
|
38
|
+
Requires-Dist: opencv-python<4.12
|
|
39
39
|
Requires-Dist: imageio_ffmpeg
|
|
40
40
|
Requires-Dist: deep-sort-realtime
|
|
41
41
|
Requires-Dist: Pose2Sim>=0.10.33
|
|
@@ -145,7 +145,7 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
145
145
|
|
|
146
146
|
> N.B.: Full install is required for OpenSim inverse kinematics.
|
|
147
147
|
|
|
148
|
-
Open a terminal. Type `python -V` to make sure python >=3.10 <=3.
|
|
148
|
+
Open a terminal. Type `python -V` to make sure python >=3.10 <=3.12 is installed. If not, install it [from there](https://www.python.org/downloads/).
|
|
149
149
|
|
|
150
150
|
Run:
|
|
151
151
|
``` cmd
|
|
@@ -169,7 +169,7 @@ pip install .
|
|
|
169
169
|
- Install Anaconda or [Miniconda](https://docs.conda.io/en/latest/miniconda.html):\
|
|
170
170
|
Open an Anaconda prompt and create a virtual environment:
|
|
171
171
|
``` cmd
|
|
172
|
-
conda create -n Sports2D python=3.
|
|
172
|
+
conda create -n Sports2D python=3.12 -y
|
|
173
173
|
conda activate Sports2D
|
|
174
174
|
```
|
|
175
175
|
- **Install OpenSim**:\
|
|
@@ -568,7 +568,7 @@ Note that any detection and pose models can be used (first [deploy them with MMP
|
|
|
568
568
|
'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
|
|
569
569
|
'pose_input_size':[192,256]}"""
|
|
570
570
|
```
|
|
571
|
-
- Use `--det_frequency 50`:
|
|
571
|
+
- Use `--det_frequency 50`: Rtmlib is (by default) a top-down method: detects bounding boxes for every person in the frame, and then detects keypoints inside of each box. The person detection stage is much slower. You can choose to detect persons only every 50 frames (for example), and track bounding boxes inbetween, which is much faster.
|
|
572
572
|
- Use `--load_trc_px <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
|
|
573
573
|
- Make sure you use `--tracking_mode sports2d`: Will use the default Sports2D tracker. Unlike DeepSort, it is faster, does not require any parametrization, and is as good in non-crowded scenes.
|
|
574
574
|
|
|
@@ -637,13 +637,13 @@ Sports2D:
|
|
|
637
637
|
|
|
638
638
|
1. **Reads stream from a webcam, from one video, or from a list of videos**. Selects the specified time range to process.
|
|
639
639
|
|
|
640
|
-
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference,
|
|
640
|
+
2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
|
|
641
641
|
|
|
642
642
|
3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
|
|
643
643
|
|
|
644
|
-
4. **Chooses
|
|
644
|
+
4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
|
|
645
645
|
|
|
646
|
-
4. **Converts the pixel coordinates to meters.** The user can provide
|
|
646
|
+
4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
|
|
647
647
|
|
|
648
648
|
5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
|
|
649
649
|
|
|
@@ -652,12 +652,14 @@ Sports2D:
|
|
|
652
652
|
Draws the skeleton and the keypoints, with a green to red color scale to account for their confidence\
|
|
653
653
|
Draws joint and segment angles on the body, and writes the values either near the joint/segment, or on the upper-left of the image with a progress bar
|
|
654
654
|
|
|
655
|
-
6. **Interpolates and filters results:** Missing pose and angle sequences are interpolated unless gaps are too large
|
|
655
|
+
6. **Interpolates and filters results:** (1) Swaps between right and left limbs are corrected, (2) Missing pose and angle sequences are interpolated unless gaps are too large, (3) Outliers are rejected with a Hampel filter, and finally (4) Results are filtered, by default with a 6 Hz Butterworth filter. All of the above can be configured or deactivated, and other filters such as Kalman, GCV, Gaussian, LOESS, Median, and Butterworth on speeds are also available (see [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml))
|
|
656
656
|
|
|
657
657
|
7. **Optionally show** processed images, saves them, or saves them as a video\
|
|
658
658
|
**Optionally plots** pose and angle data before and after processing for comparison\
|
|
659
659
|
**Optionally saves** poses for each person as a TRC file in pixels and meters, angles as a MOT file, and calibration data as a [Pose2Sim](https://github.com/perfanalytics/pose2sim) TOML file
|
|
660
660
|
|
|
661
|
+
8. **Optionally runs scaling and inverse kinematics** with OpenSim via [Pose2Sim](https://github.com/perfanalytics/pose2sim).
|
|
662
|
+
|
|
661
663
|
<br>
|
|
662
664
|
|
|
663
665
|
**Joint angle conventions:**
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|