sports2d 0.8.23__tar.gz → 0.8.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {sports2d-0.8.23/sports2d.egg-info → sports2d-0.8.25}/PKG-INFO +46 -50
  2. {sports2d-0.8.23 → sports2d-0.8.25}/README.md +45 -34
  3. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Demo/Config_demo.toml +12 -9
  4. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Sports2D.py +12 -8
  5. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Utilities/common.py +3 -0
  6. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Utilities/tests.py +3 -2
  7. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/process.py +278 -114
  8. {sports2d-0.8.23 → sports2d-0.8.25}/pyproject.toml +0 -15
  9. {sports2d-0.8.23 → sports2d-0.8.25/sports2d.egg-info}/PKG-INFO +46 -50
  10. sports2d-0.8.25/sports2d.egg-info/requires.txt +2 -0
  11. sports2d-0.8.23/sports2d.egg-info/requires.txt +0 -17
  12. {sports2d-0.8.23 → sports2d-0.8.25}/.github/workflows/continuous-integration.yml +0 -0
  13. {sports2d-0.8.23 → sports2d-0.8.25}/.github/workflows/joss_pdf.yml +0 -0
  14. {sports2d-0.8.23 → sports2d-0.8.25}/.github/workflows/publish-on-release.yml +0 -0
  15. {sports2d-0.8.23 → sports2d-0.8.25}/.gitignore +0 -0
  16. {sports2d-0.8.23 → sports2d-0.8.25}/CITATION.cff +0 -0
  17. {sports2d-0.8.23 → sports2d-0.8.25}/Content/Demo_plots.png +0 -0
  18. {sports2d-0.8.23 → sports2d-0.8.25}/Content/Demo_results.png +0 -0
  19. {sports2d-0.8.23 → sports2d-0.8.25}/Content/Demo_terminal.png +0 -0
  20. {sports2d-0.8.23 → sports2d-0.8.25}/Content/Person_selection.png +0 -0
  21. {sports2d-0.8.23 → sports2d-0.8.25}/Content/Video_tuto_Sports2D_Colab.png +0 -0
  22. {sports2d-0.8.23 → sports2d-0.8.25}/Content/joint_convention.png +0 -0
  23. {sports2d-0.8.23 → sports2d-0.8.25}/Content/paper.bib +0 -0
  24. {sports2d-0.8.23 → sports2d-0.8.25}/Content/paper.md +0 -0
  25. {sports2d-0.8.23 → sports2d-0.8.25}/Content/sports2d_blender.gif +0 -0
  26. {sports2d-0.8.23 → sports2d-0.8.25}/Content/sports2d_opensim.gif +0 -0
  27. {sports2d-0.8.23 → sports2d-0.8.25}/LICENSE +0 -0
  28. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Demo/Calib_demo.toml +0 -0
  29. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Demo/demo.mp4 +0 -0
  30. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Sports2D.ipynb +0 -0
  31. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/Utilities/__init__.py +0 -0
  32. {sports2d-0.8.23 → sports2d-0.8.25}/Sports2D/__init__.py +0 -0
  33. {sports2d-0.8.23 → sports2d-0.8.25}/setup.cfg +0 -0
  34. {sports2d-0.8.23 → sports2d-0.8.25}/sports2d.egg-info/SOURCES.txt +0 -0
  35. {sports2d-0.8.23 → sports2d-0.8.25}/sports2d.egg-info/dependency_links.txt +0 -0
  36. {sports2d-0.8.23 → sports2d-0.8.25}/sports2d.egg-info/entry_points.txt +0 -0
  37. {sports2d-0.8.23 → sports2d-0.8.25}/sports2d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sports2d
3
- Version: 0.8.23
3
+ Version: 0.8.25
4
4
  Summary: Compute 2D human pose and angles from a video or a webcam.
5
5
  Author-email: David Pagnon <contact@david-pagnon.com>
6
6
  Maintainer-email: David Pagnon <contact@david-pagnon.com>
@@ -22,22 +22,7 @@ Classifier: Topic :: Multimedia :: Graphics :: 3D Modeling
22
22
  Requires-Python: >=3.9
23
23
  Description-Content-Type: text/markdown
24
24
  License-File: LICENSE
25
- Requires-Dist: toml
26
- Requires-Dist: numpy>=1.19
27
- Requires-Dist: matplotlib
28
- Requires-Dist: PyQt5
29
- Requires-Dist: tqdm
30
- Requires-Dist: anytree
31
- Requires-Dist: pandas>=1.5
32
- Requires-Dist: scipy
33
- Requires-Dist: statsmodels
34
- Requires-Dist: ipython
35
- Requires-Dist: c3d
36
- Requires-Dist: rtmlib
37
- Requires-Dist: openvino
38
- Requires-Dist: opencv-python<4.12
39
25
  Requires-Dist: imageio_ffmpeg
40
- Requires-Dist: deep-sort-realtime
41
26
  Requires-Dist: Pose2Sim>=0.10.38
42
27
  Dynamic: license-file
43
28
 
@@ -67,7 +52,7 @@ Dynamic: license-file
67
52
  </br>
68
53
 
69
54
  > **`Announcements:`**
70
- > - Generate or import a calibration file, OpenSim skeleton overlay **New in v0.9!**
55
+ > - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.9!**
71
56
  > - Select only the persons you want to analyze **New in v0.8!**
72
57
  > - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
73
58
  > - Any detector and pose estimation model can be used. **New in v0.6!**
@@ -88,7 +73,9 @@ Works on any smartphone!**\
88
73
  </br>
89
74
 
90
75
 
91
- https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
76
+ https://github.com/user-attachments/assets/2ce62012-f28c-4e23-b3b8-f68931bacb77
77
+
78
+ <!-- https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad -->
92
79
 
93
80
  <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
94
81
 
@@ -307,30 +294,35 @@ sports2d --person_ordering_method on_click
307
294
  #### Get coordinates in meters:
308
295
  > **N.B.:** The Z coordinate (depth) should not be overly trusted.
309
296
 
310
- You may want coordinates in meters rather than pixels. 2 options to do so:
297
+ To convert from pixels to meters, you need a minima the height of a participant. Better results can be obtained by also providing an information on depth. The camera horizon angle and the floor height are generally automatically estimated. **N.B.: A calibration file will be generated.**
311
298
 
312
- 1. **Just provide the height of a reference person**:
313
- - Their height in meters is be compared with their height in pixels to get a pixel-to-meter conversion factor.
314
- - To estimate the depth coordinates, specify which side of the person is visible: `left`, `right`, `front`, or `back`. Use `auto` if you want it to be automatically determined (only works for motions in the sagittal plane), or `none` if you want to keep 2D coordinates instead of 3D (if the person turns around, for example).
315
- - The floor angle is automatically estimated from gait, as well as the origin of the xy axis. The person trajectory is corrected accordingly. You can use the `--floor_angle` and `--xy_origin` parameters to manually specify them if your subject is not travelling horizontally or if you want the origin not to be under their feet (note that the `y` axis points down).
316
-
317
- **N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
299
+ - The pixel-to-meters scale is computed from the ratio between the height of the participant in meters and in pixels. The height in pixels is automatically calculated; use the `--first_person_height` parameter to specify the height in meters.
300
+ - Depth perspective effects can be compensated either with the camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or from a calibration file. Use the `--perspective_unit` ('distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib') and `--perspective_value` parameters (resp. in m, px, deg, rad, or '').
301
+ - The camera horizon angle can be estimated from kinematics (`auto`), from a calibration file (`from_calib`), or manually (float). Use the `--floor_angle` parameter.
302
+ - Likewise for the floor level. Use the `--xy_origin` parameter.
318
303
 
319
- ``` cmd
320
- sports2d --first_person_height 1.65 --visible_side auto front none
321
- ```
322
- ``` cmd
323
- sports2d --first_person_height 1.65 --visible_side auto front none `
324
- --person_ordering_method on_click `
325
- --floor_angle 0 --xy_origin 0 940
326
- ```
304
+ If one of these parameters is set to `from_calib`, then use `--calib_file`.
327
305
 
328
- 2. **Or use a calibration file**:\
329
- It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
330
306
 
331
- ``` cmd
332
- sports2d --calib_file Calib_demo.toml --visible_side auto front none
333
- ```
307
+ ``` cmd
308
+ sports2d --first_person_height 1.65
309
+ ```
310
+ ``` cmd
311
+ sports2d --first_person_height 1.65 `
312
+ --floor_angle auto `
313
+ --xy_origin auto`
314
+ --perspective_unit distance_m --perspective_value 10
315
+ ```
316
+ ``` cmd
317
+ sports2d --first_person_height 1.65 `
318
+ --floor_angle 0 `
319
+ --xy_origin from_calib`
320
+ --perspective_unit from_calib --calib_file Sports2D\Demo\Calib_demo.toml
321
+ ```
322
+ ``` cmd
323
+ sports2d --first_person_height 1.65 `
324
+ --perspective_unit f_px --perspective_value 2520
325
+ ```
334
326
 
335
327
  <br>
336
328
 
@@ -493,20 +485,25 @@ sports2d --help
493
485
  'config': ["C", "path to a toml configuration file"],
494
486
 
495
487
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
488
+ 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
496
489
  'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
497
490
  'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
498
491
  'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
499
492
  'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
493
+ 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
494
+ 'perspective_value': ["", "Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'"],
495
+ 'perspective_unit': ["", "'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'"],
496
+ 'do_ik': ["", "do inverse kinematics. false if not specified"],
497
+ 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
500
498
  'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
501
499
  'compare': ["", "visually compare motion with trc file. false if not specified"],
502
- 'webcam_id': ["w", "webcam ID. 0 if not specified"],
503
- 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
504
500
  'video_dir': ["d", "current directory if not specified"],
505
501
  'result_dir': ["r", "current directory if not specified"],
502
+ 'webcam_id': ["w", "webcam ID. 0 if not specified"],
506
503
  'show_realtime_results': ["R", "show results in real-time. true if not specified"],
507
504
  'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
508
505
  'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
509
- 'save_graphs': ["", "save position and angle plots of raw and processed results. false if not specified"],
506
+ 'save_graphs': ["", "save position and angle plots of raw and processed results. true if not specified"],
510
507
  'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
511
508
  'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
512
509
  'save_vid': ["V", "save processed video. true if not specified"],
@@ -527,11 +524,8 @@ sports2d --help
527
524
  'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
528
525
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
529
526
  'save_calib': ["", "save calibration file. true if not specified"],
530
- 'do_ik': ["", "do inverse kinematics. false if not specified"],
531
- 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
532
527
  'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
533
- 'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine. false if not specified"],
534
- 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
528
+ 'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
535
529
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
536
530
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
537
531
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
@@ -541,6 +535,7 @@ sports2d --help
541
535
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
542
536
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
543
537
  'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
538
+ 'max_distance': ["", "If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one. in px or None, 100 by default."],
544
539
  'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
545
540
  'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
546
541
  'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
@@ -552,15 +547,16 @@ sports2d --help
552
547
  'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
553
548
  'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
554
549
  'sections_to_keep': ["", "all, largest, first, or last. Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one"],
550
+ 'min_chunk_size': ["", "Minimum number of valid frames in a row to keep a chunk of data for a person. 10 if not specified"],
555
551
  'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
556
552
  'filter': ["", "filter results. true if not specified"],
557
553
  'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
554
+ 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 6 if not specified"],
558
555
  'order': ["", "order of the Butterworth filter. 4 if not specified"],
559
- 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
556
+ 'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' is usually better, unless the signal is too short (noise can then be considered as signal -> trajectories not filtered). 'auto' if not specified"],
557
+ 'gcv_smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 1.0 if not specified"],
560
558
  'trust_ratio': ["", "trust ratio of the Kalman filter: How much more do you trust triangulation results (measurements), than the assumption of constant acceleration(process)? 500 if not specified"],
561
559
  'smooth': ["", "dual Kalman smoothing. true if not specified"],
562
- 'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' if not specified"],
563
- 'smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 0.1 if not specified"],
564
560
  'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
565
561
  'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
566
562
  'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
@@ -669,11 +665,11 @@ Sports2D:
669
665
 
670
666
  2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
671
667
 
672
- 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
668
+ 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames, thanks to the 'sports2D' tracker. [See Release notes of v0.8.22 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.22).
673
669
 
674
670
  4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
675
671
 
676
- 4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
672
+ 4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The camera horizon angle and the floor level can either be detected automatically from the gait sequence, be manually specified, or obtained frmm a calibration file. The depth perspective effects are compensated thanks with the distance from the camera to the subject, the focal length, the field of view, or from a calibration file. [See Release notes of v0.8.25 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.25).
677
673
 
678
674
  5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
679
675
 
@@ -24,7 +24,7 @@
24
24
  </br>
25
25
 
26
26
  > **`Announcements:`**
27
- > - Generate or import a calibration file, OpenSim skeleton overlay **New in v0.9!**
27
+ > - Compensate for floor angle, floor height, depth perspective effects, generate a calibration file **New in v0.9!**
28
28
  > - Select only the persons you want to analyze **New in v0.8!**
29
29
  > - MarkerAugmentation and Inverse Kinematics for accurate 3D motion with OpenSim. **New in v0.7!**
30
30
  > - Any detector and pose estimation model can be used. **New in v0.6!**
@@ -45,7 +45,9 @@ Works on any smartphone!**\
45
45
  </br>
46
46
 
47
47
 
48
- https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad
48
+ https://github.com/user-attachments/assets/2ce62012-f28c-4e23-b3b8-f68931bacb77
49
+
50
+ <!-- https://github.com/user-attachments/assets/6a444474-4df1-4134-af0c-e9746fa433ad -->
49
51
 
50
52
  <!-- https://github.com/user-attachments/assets/1c6e2d6b-d0cf-4165-864e-d9f01c0b8a0e -->
51
53
 
@@ -264,30 +266,35 @@ sports2d --person_ordering_method on_click
264
266
  #### Get coordinates in meters:
265
267
  > **N.B.:** The Z coordinate (depth) should not be overly trusted.
266
268
 
267
- You may want coordinates in meters rather than pixels. 2 options to do so:
269
+ To convert from pixels to meters, you need a minima the height of a participant. Better results can be obtained by also providing an information on depth. The camera horizon angle and the floor height are generally automatically estimated. **N.B.: A calibration file will be generated.**
268
270
 
269
- 1. **Just provide the height of a reference person**:
270
- - Their height in meters is be compared with their height in pixels to get a pixel-to-meter conversion factor.
271
- - To estimate the depth coordinates, specify which side of the person is visible: `left`, `right`, `front`, or `back`. Use `auto` if you want it to be automatically determined (only works for motions in the sagittal plane), or `none` if you want to keep 2D coordinates instead of 3D (if the person turns around, for example).
272
- - The floor angle is automatically estimated from gait, as well as the origin of the xy axis. The person trajectory is corrected accordingly. You can use the `--floor_angle` and `--xy_origin` parameters to manually specify them if your subject is not travelling horizontally or if you want the origin not to be under their feet (note that the `y` axis points down).
273
-
274
- **N.B.: A calibration file will be generated.** By convention, the camera-to-subject distance is set to 10 meters.
271
+ - The pixel-to-meters scale is computed from the ratio between the height of the participant in meters and in pixels. The height in pixels is automatically calculated; use the `--first_person_height` parameter to specify the height in meters.
272
+ - Depth perspective effects can be compensated either with the camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or from a calibration file. Use the `--perspective_unit` ('distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib') and `--perspective_value` parameters (resp. in m, px, deg, rad, or '').
273
+ - The camera horizon angle can be estimated from kinematics (`auto`), from a calibration file (`from_calib`), or manually (float). Use the `--floor_angle` parameter.
274
+ - Likewise for the floor level. Use the `--xy_origin` parameter.
275
275
 
276
- ``` cmd
277
- sports2d --first_person_height 1.65 --visible_side auto front none
278
- ```
279
- ``` cmd
280
- sports2d --first_person_height 1.65 --visible_side auto front none `
281
- --person_ordering_method on_click `
282
- --floor_angle 0 --xy_origin 0 940
283
- ```
276
+ If one of these parameters is set to `from_calib`, then use `--calib_file`.
284
277
 
285
- 2. **Or use a calibration file**:\
286
- It can either be a `.toml` calibration file previously generated by Sports2D, or a more accurate one coming from another system. For example, [Pose2Sim](https://github.com/perfanalytics/pose2sim) can be used to accurately calculate calibration, or to convert calibration files from Qualisys, Vicon, OpenCap, FreeMoCap, etc.
287
278
 
288
- ``` cmd
289
- sports2d --calib_file Calib_demo.toml --visible_side auto front none
290
- ```
279
+ ``` cmd
280
+ sports2d --first_person_height 1.65
281
+ ```
282
+ ``` cmd
283
+ sports2d --first_person_height 1.65 `
284
+ --floor_angle auto `
285
+ --xy_origin auto`
286
+ --perspective_unit distance_m --perspective_value 10
287
+ ```
288
+ ``` cmd
289
+ sports2d --first_person_height 1.65 `
290
+ --floor_angle 0 `
291
+ --xy_origin from_calib`
292
+ --perspective_unit from_calib --calib_file Sports2D\Demo\Calib_demo.toml
293
+ ```
294
+ ``` cmd
295
+ sports2d --first_person_height 1.65 `
296
+ --perspective_unit f_px --perspective_value 2520
297
+ ```
291
298
 
292
299
  <br>
293
300
 
@@ -450,20 +457,25 @@ sports2d --help
450
457
  'config': ["C", "path to a toml configuration file"],
451
458
 
452
459
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
460
+ 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
453
461
  'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
454
462
  'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
455
463
  'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
456
464
  'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
465
+ 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
466
+ 'perspective_value': ["", "Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'"],
467
+ 'perspective_unit': ["", "'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'"],
468
+ 'do_ik': ["", "do inverse kinematics. false if not specified"],
469
+ 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
457
470
  'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
458
471
  'compare': ["", "visually compare motion with trc file. false if not specified"],
459
- 'webcam_id': ["w", "webcam ID. 0 if not specified"],
460
- 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
461
472
  'video_dir': ["d", "current directory if not specified"],
462
473
  'result_dir': ["r", "current directory if not specified"],
474
+ 'webcam_id': ["w", "webcam ID. 0 if not specified"],
463
475
  'show_realtime_results': ["R", "show results in real-time. true if not specified"],
464
476
  'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
465
477
  'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
466
- 'save_graphs': ["", "save position and angle plots of raw and processed results. false if not specified"],
478
+ 'save_graphs': ["", "save position and angle plots of raw and processed results. true if not specified"],
467
479
  'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
468
480
  'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
469
481
  'save_vid': ["V", "save processed video. true if not specified"],
@@ -484,11 +496,8 @@ sports2d --help
484
496
  'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
485
497
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
486
498
  'save_calib': ["", "save calibration file. true if not specified"],
487
- 'do_ik': ["", "do inverse kinematics. false if not specified"],
488
- 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
489
499
  'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
490
- 'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine. false if not specified"],
491
- 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
500
+ 'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
492
501
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
493
502
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
494
503
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
@@ -498,6 +507,7 @@ sports2d --help
498
507
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
499
508
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
500
509
  'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
510
+ 'max_distance': ["", "If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one. in px or None, 100 by default."],
501
511
  'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
502
512
  'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
503
513
  'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
@@ -509,15 +519,16 @@ sports2d --help
509
519
  'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
510
520
  'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
511
521
  'sections_to_keep': ["", "all, largest, first, or last. Keep 'all' valid sections even when they are interspersed with undetected chunks, or the 'largest' valid section, or the 'first' one, or the 'last' one"],
522
+ 'min_chunk_size': ["", "Minimum number of valid frames in a row to keep a chunk of data for a person. 10 if not specified"],
512
523
  'reject_outliers': ["", "reject outliers with Hampel filter before other filtering methods. true if not specified"],
513
524
  'filter': ["", "filter results. true if not specified"],
514
525
  'filter_type': ["", "butterworth, kalman, gcv_spline, gaussian, median, or loess. butterworth if not specified"],
526
+ 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 6 if not specified"],
515
527
  'order': ["", "order of the Butterworth filter. 4 if not specified"],
516
- 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
528
+ 'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' is usually better, unless the signal is too short (noise can then be considered as signal -> trajectories not filtered). 'auto' if not specified"],
529
+ 'gcv_smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 1.0 if not specified"],
517
530
  'trust_ratio': ["", "trust ratio of the Kalman filter: How much more do you trust triangulation results (measurements), than the assumption of constant acceleration(process)? 500 if not specified"],
518
531
  'smooth': ["", "dual Kalman smoothing. true if not specified"],
519
- 'gcv_cut_off_frequency': ["", "cut-off frequency of the GCV spline filter. 'auto' if not specified"],
520
- 'smoothing_factor': ["", "smoothing factor of the GCV spline filter (>=0). Ignored if cut_off_frequency != 'auto'. Biases results towards more smoothing (>1) or more fidelity to data (<1). 0.1 if not specified"],
521
532
  'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
522
533
  'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
523
534
  'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
@@ -626,11 +637,11 @@ Sports2D:
626
637
 
627
638
  2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, the person bounding boxes can be tracked instead of detected every frame. Any RTMPose model can be used.
628
639
 
629
- 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
640
+ 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames, thanks to the 'sports2D' tracker. [See Release notes of v0.8.22 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.22).
630
641
 
631
642
  4. **Chooses which persons to analyze.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, you can choose the number of persons to analyze (`nb_persons_to_detect`), and how to order them (`person_ordering_method`). The ordering method can be 'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. `on_click` is default and lets the user click on the persons they are interested in, in the desired order.
632
643
 
633
- 4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The floor angle and the coordinate origin can either be detected automatically from the gait sequence, or be manually specified. The depth coordinates are set to normative values, depending on whether the person is going left, right, facing the camera, or looking away.
644
+ 4. **Converts the pixel coordinates to meters.** The user can provide the size of a specified person to scale results accordingly. The camera horizon angle and the floor level can either be detected automatically from the gait sequence, be manually specified, or obtained frmm a calibration file. The depth perspective effects are compensated thanks with the distance from the camera to the subject, the focal length, the field of view, or from a calibration file. [See Release notes of v0.8.25 for more information](https://github.com/davidpagnon/Sports2D/releases/tag/v0.8.25).
634
645
 
635
646
  5. **Computes the selected joint and segment angles**, and flips them on the left/right side if the respective foot is pointing to the left/right.
636
647
 
@@ -97,7 +97,8 @@ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower, har
97
97
  keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
98
98
  average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
99
99
  keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
100
- max_distance = 100 # in px or None # If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one
100
+ max_distance = 250 # in px or None # If a person is detected further than max_distance from its position on the previous frame, it will be considered as a new one
101
+
101
102
 
102
103
  [px_to_meters_conversion]
103
104
  # Pixel to meters conversion
@@ -105,15 +106,17 @@ to_meters = true
105
106
  make_c3d = true
106
107
  save_calib = true
107
108
 
108
- # If conversion from first_person_height
109
- floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
110
- xy_origin = ['auto'] # ['auto'] or [px_x,px_y]. N.B.: px_y points downwards. If ['auto'], direction estimated from the start to the end of the line formed by the toes when they are on the ground
109
+ # Compensate for perspective effects, which make the further limb look smaller. 1-2% coordinate error at 10 m, less if the camera is further away
110
+ perspective_value = 10 # Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'
111
+ perspective_unit = 'distance_m' # 'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'
112
+
113
+ # Compensate for camera horizon
114
+ floor_angle = 'auto' # float, 'from_kinematics', 'from_calib', or 'auto' # 'auto' is equivalent to 'from_kinematics', ie angle calculated from foot contacts. 'from_calib' calculates it from a toml calibration file. Use float to manually specify it in degrees
115
+ xy_origin = ['auto'] # [px_x,px_y], or ['from kinematics'], ['from_calib'], or ['auto']. # BETWEEN BRACKETS! # ['auto'] is equivalent to ['from_kinematics'], ie origin estimated at first foot contact, direction is direction of motion. ['from_calib'] calculates it from a calibration file. Use [px_x,px_y] to manually specify it in pixels (px_y points downwards)
116
+
117
+ # Optional calibration file
118
+ calib_file = '' # Calibration file in the Pose2Sim toml format, or '' if not available
111
119
 
112
- # If conversion from a calibration file
113
- calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
114
- # subject_distance
115
- # focal_distance
116
- # recalculate_extrinsics
117
120
 
118
121
  [angles]
119
122
  display_angle_values_on = ['body', 'list'] # 'body', 'list', ['body', 'list'], 'none'. Display angle values on the body, as a list in the upper left of the image, both, or do not display them.
@@ -152,7 +152,7 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
152
152
  'keypoint_likelihood_threshold': 0.3,
153
153
  'average_likelihood_threshold': 0.5,
154
154
  'keypoint_number_threshold': 0.3,
155
- 'max_distance': 100,
155
+ 'max_distance': 250,
156
156
  'CUSTOM': { 'name': 'Hip',
157
157
  'id': 19,
158
158
  'children': [{'name': 'RHip',
@@ -194,10 +194,12 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
194
194
  'px_to_meters_conversion': {
195
195
  'to_meters': True,
196
196
  'make_c3d': True,
197
- 'calib_file': '',
197
+ 'save_calib': True,
198
+ 'perspective_value': 10.0,
199
+ 'perspective_unit': 'distance_m',
198
200
  'floor_angle': 'auto',
199
201
  'xy_origin': ['auto'],
200
- 'save_calib': True
202
+ 'calib_file': '',
201
203
  },
202
204
  'angles': {'display_angle_values_on': ['body', 'list'],
203
205
  'fontSize': 0.3,
@@ -269,16 +271,21 @@ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
269
271
 
270
272
  CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
271
273
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
274
+ 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
272
275
  'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
273
276
  'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'largest_size', 'smallest_size', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
274
277
  'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
275
278
  'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
279
+ 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
280
+ 'perspective_value': ["", "Either camera-to-person distance (m), or focal length (px), or field-of-view (degrees or radians), or '' if perspective_unit=='from_calib'"],
281
+ 'perspective_unit': ["", "'distance_m', 'f_px', 'fov_deg', 'fov_rad', or 'from_calib'"],
282
+ 'do_ik': ["", "do inverse kinematics. false if not specified"],
283
+ 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
276
284
  'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
277
285
  'compare': ["", "visually compare motion with trc file. false if not specified"],
278
- 'webcam_id': ["w", "webcam ID. 0 if not specified"],
279
- 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
280
286
  'video_dir': ["d", "current directory if not specified"],
281
287
  'result_dir': ["r", "current directory if not specified"],
288
+ 'webcam_id': ["w", "webcam ID. 0 if not specified"],
282
289
  'show_realtime_results': ["R", "show results in real-time. true if not specified"],
283
290
  'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
284
291
  'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
@@ -303,11 +310,8 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
303
310
  'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
304
311
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
305
312
  'save_calib': ["", "save calibration file. true if not specified"],
306
- 'do_ik': ["", "do inverse kinematics. false if not specified"],
307
- 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
308
313
  'feet_on_floor': ["", "offset marker augmentation results so that feet are at floor level. true if not specified"],
309
314
  'use_simple_model': ["", "IK 10+ times faster, but no muscles or flexible spine, no patella. false if not specified"],
310
- 'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
311
315
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
312
316
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
313
317
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
@@ -36,6 +36,9 @@ __status__ = "Development"
36
36
 
37
37
 
38
38
  ## CONSTANTS
39
+ # 4 points joint angle: between knee and ankle, and toe and heel. Add 90° offset and multiply by 1
40
+ # 3 points joint angle: between ankle, knee, hip. -180° offset, multiply by -1
41
+ # 2 points segment angle: between horizontal and ankle and knee, 0° offset, multiply by -1
39
42
  angle_dict = { # lowercase!
40
43
  # joint angles
41
44
  'right ankle': [['RKnee', 'RAnkle', 'RBigToe', 'RHeel'], 'dorsiflexion', 90, 1],
@@ -96,7 +96,7 @@ def test_workflow():
96
96
 
97
97
  # With no pixels to meters conversion, one person to select, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
98
98
  demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
99
- # "--calib_file", "calib_demo.toml",
99
+ "--floor_angle", "from_calib", "--xy_origin", "from_calib", "--perspective_unit", "from_calib", "--calib_file", os.path.join(root_dir, "demo_Sports2D", "demo_Sports2D_calib.toml"),
100
100
  "--nb_persons_to_detect", "1", "--person_ordering_method", "greatest_displacement",
101
101
  "--mode", "lightweight", "--det_frequency", "50",
102
102
  "--slowmo_factor", "4",
@@ -104,9 +104,10 @@ def test_workflow():
104
104
  "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
105
105
  subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
106
106
 
107
- # With a time range, inverse kinematics, marker augmentation
107
+ # With a time range, inverse kinematics, marker augmentation, perspective value in fov
108
108
  demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False", "--save_graphs", "False",
109
109
  "--time_range", "1.2", "2.7",
110
+ "--perspective_value", "40", "--perspective_unit", "fov_deg",
110
111
  "--do_ik", "True", "--use_augmentation", "True",
111
112
  "--nb_persons_to_detect", "all", "--first_person_height", "1.65",
112
113
  "--visible_side", "auto", "front", "--participant_mass", "55.0", "67.0"]