sports2d 0.5.6__tar.gz → 0.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {sports2d-0.5.6 → sports2d-0.6.2}/PKG-INFO +108 -9
  2. {sports2d-0.5.6 → sports2d-0.6.2}/README.md +105 -7
  3. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/Demo/Config_demo.toml +116 -8
  4. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/Sports2D.py +23 -10
  5. sports2d-0.6.2/Sports2D/Utilities/common.py +1042 -0
  6. sports2d-0.6.2/Sports2D/Utilities/skeletons.py +1002 -0
  7. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/Utilities/tests.py +12 -5
  8. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/process.py +238 -565
  9. {sports2d-0.5.6 → sports2d-0.6.2}/setup.cfg +2 -1
  10. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/PKG-INFO +108 -9
  11. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/requires.txt +1 -0
  12. sports2d-0.5.6/Sports2D/Utilities/common.py +0 -400
  13. sports2d-0.5.6/Sports2D/Utilities/skeletons.py +0 -491
  14. {sports2d-0.5.6 → sports2d-0.6.2}/LICENSE +0 -0
  15. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/Demo/demo.mp4 +0 -0
  16. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/Utilities/__init__.py +0 -0
  17. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/Utilities/filter.py +0 -0
  18. {sports2d-0.5.6 → sports2d-0.6.2}/Sports2D/__init__.py +0 -0
  19. {sports2d-0.5.6 → sports2d-0.6.2}/pyproject.toml +0 -0
  20. {sports2d-0.5.6 → sports2d-0.6.2}/setup.py +0 -0
  21. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/SOURCES.txt +0 -0
  22. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/dependency_links.txt +0 -0
  23. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/entry_points.txt +0 -0
  24. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/not-zip-safe +0 -0
  25. {sports2d-0.5.6 → sports2d-0.6.2}/sports2d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: sports2d
3
- Version: 0.5.6
3
+ Version: 0.6.2
4
4
  Summary: Detect pose and compute 2D joint angles from a video.
5
5
  Home-page: https://github.com/davidpagnon/Sports2D
6
6
  Author: David Pagnon
@@ -37,6 +37,7 @@ Requires-Dist: rtmlib
37
37
  Requires-Dist: openvino
38
38
  Requires-Dist: tqdm
39
39
  Requires-Dist: imageio_ffmpeg
40
+ Requires-Dist: deep-sort-realtime
40
41
 
41
42
 
42
43
  [![Continuous integration](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml/badge.svg?branch=main)](https://github.com/davidpagnon/sports2d/actions/workflows/continuous-integration.yml)
@@ -96,7 +97,8 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
96
97
  2. [Go further](#go-further)
97
98
  1. [Too slow for you?](#too-slow-for-you)
98
99
  2. [What you need is what you get](#what-you-need-is-what-you-get)
99
- 3. [How it works](#how-it-works)
100
+ 3. [All the parameters](#all-the-parameters)
101
+ 4. [How it works](#how-it-works)
100
102
  3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
101
103
 
102
104
  <br>
@@ -160,12 +162,13 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
160
162
  - One person walking in the sagittal plane
161
163
  - One person doing jumping jacks in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
162
164
  - One tiny person flickering in the background who needs to be ignored
165
+ - The first person is starting high and ending low on the image, which messes up the automatic floor angle calculation. You can set it up manually with the parameter `--floor_angle 0`
163
166
 
164
167
  <br>
165
168
 
166
169
  ### Play with the parameters
167
170
 
168
- For a full list of the available parameters, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
171
+ For a full list of the available parameters, see [this section](#all-the-parameters) of the documentation, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file, or type:
169
172
  ``` cmd
170
173
  sports2d --help
171
174
  ```
@@ -208,7 +211,10 @@ Note that it does not take distortions into account, and that it will be less ac
208
211
  sports2d --show_graphs False --time_range 1.2 2.7 --result_dir path_to_result_dir --slowmo_factor 4
209
212
  ```
210
213
  ``` cmd
211
- sports2d --multiperson false --mode lightweight --det_frequency 50
214
+ sports2d --multiperson false --pose_model Body --mode lightweight --det_frequency 50
215
+ ```
216
+ ``` cmd
217
+ sports2d --tracking_mode deepsort --deepsort_params """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}"""
212
218
  ```
213
219
  <br>
214
220
 
@@ -234,10 +240,20 @@ Note that it does not take distortions into account, and that it will be less ac
234
240
 
235
241
  **Quick fixes:**
236
242
  - Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
237
- - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.
243
+ - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.\
244
+ Note that any detection and pose models can be used (first [deploy them with MMPose](https://mmpose.readthedocs.io/en/latest/user_guides/how_to_deploy.html#onnx) if you do not have their .onnx or .zip files), with the following formalism:
245
+ ```
246
+ --mode """{'det_class':'YOLOX',
247
+ 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_nano_8xb8-300e_humanart-40f6f0d0.zip',
248
+ 'det_input_size':[416,416],
249
+ 'pose_class':'RTMPose',
250
+ 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
251
+ 'pose_input_size':[192,256]}"""
252
+ ```
238
253
  - Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
239
254
  - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
240
255
  - Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
256
+ - Use `--tracking_mode sports2d`: Will use the default Sports2D tracker. Unlike DeepSort, it is faster, does not require any parametrization, and is as good in non-crowded scenes.
241
257
 
242
258
  <br>
243
259
 
@@ -278,9 +294,9 @@ Will be much faster, with no impact on accuracy. However, the installation takes
278
294
  <br>
279
295
 
280
296
  #### Customize your output:
281
- - Choose whether you want video, images, trc pose file, angle mot file, and real-time display:
297
+ - Choose whether you want video, images, trc pose file, angle mot file, real-time display, and plots:
282
298
  ```cmd
283
- sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false
299
+ sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false --show_graphs false
284
300
  ```
285
301
  - Choose which angles you need:
286
302
  ```cmd
@@ -355,6 +371,85 @@ sports2d --time_range 1.2 2.7 --ik true --person_orientation front none left
355
371
 
356
372
  <br>
357
373
 
374
+
375
+ ### All the parameters
376
+
377
+ For a full list of the available parameters, have a look at the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
378
+
379
+ ``` cmd
380
+ sports2d --help
381
+ ```
382
+
383
+ ```
384
+ ['config': "C", "path to a toml configuration file"],
385
+
386
+ 'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
387
+ 'person_height': ["H", "height of the person in meters. 1.70 if not specified"],
388
+ 'load_trc': ["", "load trc file to avaid running pose estimation again. false if not specified"],
389
+ 'compare': ["", "visually compare motion with trc file. false if not specified"],
390
+ 'webcam_id': ["w", "webcam ID. 0 if not specified"],
391
+ 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
392
+ 'video_dir': ["d", "current directory if not specified"],
393
+ 'result_dir': ["r", "current directory if not specified"],
394
+ 'show_realtime_results': ["R", "show results in real-time. true if not specified"],
395
+ 'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
396
+ 'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
397
+ 'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
398
+ 'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
399
+ 'save_vid': ["V", "save processed video. true if not specified"],
400
+ 'save_img': ["I", "save processed images. true if not specified"],
401
+ 'save_pose': ["P", "save pose as trc files. true if not specified"],
402
+ 'calculate_angles': ["c", "calculate joint and segment angles. true if not specified"],
403
+ 'save_angles': ["A", "save angles as mot files. true if not specified"],
404
+ 'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
405
+ 'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
406
+ 'mode': ["m", "light, balanced, or performance. balanced if not specified"],
407
+ 'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
408
+ Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
409
+ 'to_meters': ["M", "convert pixels to meters. true if not specified"],
410
+
411
+ 'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
412
+ 'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
413
+ 'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
414
+ 'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
415
+ 'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
416
+ 'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
417
+ 'save_calib': ["", "save calibration file. true if not specified"],
418
+ 'do_ik': ["", "do inverse kinematics. false if not specified"],
419
+ 'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
420
+ 'person_orientation': ["", "front, back, left, right, auto, or none. 'front none left' if not specified. If 'auto', will be either left or right depending on the direction of the motion."],
421
+ 'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
422
+ 'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
423
+ 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
424
+ 'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
425
+ More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
426
+ 'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
427
+ 'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
428
+ 'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
429
+ 'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
430
+ 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
431
+ 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
432
+ 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
433
+ 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
434
+ 'fontSize': ["", "font size for angle values. 0.3 if not specified"],
435
+ 'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
436
+ 'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
437
+ 'interpolate': ["", "interpolate missing data. true if not specified"],
438
+ 'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
439
+ 'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
440
+ 'filter': ["", "filter results. true if not specified"],
441
+ 'filter_type': ["", "butterworth, gaussian, median, or loess. butterworth if not specified"],
442
+ 'order': ["", "order of the Butterworth filter. 4 if not specified"],
443
+ 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
444
+ 'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
445
+ 'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
446
+ 'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
447
+ 'use_custom_logging': ["", "use custom logging. false if not specified"]
448
+ ```
449
+
450
+ <br>
451
+
452
+
358
453
  ### How it works
359
454
 
360
455
  Sports2D:
@@ -372,7 +467,7 @@ Sports2D:
372
467
 
373
468
  2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, keypoints can be tracked instead of detected for a certain number of frames. Any RTMPose model can be used.
374
469
 
375
- 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. This carefully crafted `sports2d` tracker runs at a comparable speed as the RTMlib one but is much more robust. The user can still choose the RTMLib method if they need it by specifying it in the Config.toml file.
470
+ 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
376
471
 
377
472
  4. **Chooses the right persons to keep.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, only retrieves the keypoints with high enough confidence, and only keeps the persons with high enough average confidence over each frame.
378
473
 
@@ -455,7 +550,11 @@ If you want to contribute to Sports2D, please follow [this guide](https://docs.g
455
550
  - [x] Option to only save one person (with the highest average score, or with the most frames and fastest speed)
456
551
  - [x] Run again without pose estimation with the option `--load_trc` for px .trc file.
457
552
  - [x] **Convert positions to meters** by providing the person height, a calibration file, or 3D points [to click on the image](https://stackoverflow.com/questions/74248955/how-to-display-the-coordinates-of-the-points-clicked-on-the-image-in-google-cola)
553
+ - [x] Support any detection and/or pose estimation model.
554
+
458
555
  - [ ] Perform **Inverse kinematics and dynamics** with OpenSim (cf. [Pose2Sim](https://github.com/perfanalytics/pose2sim), but in 2D). Update [this model](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Utilities/2D_gait.osim) (add arms, markers, remove muscles and contact spheres). Add pipeline example.
556
+ - [ ] Optionally let user select the person of interest in single_person mode:\
557
+ `multiperson = true # true, or 'single_auto', or 'single_click'. 'single_auto' selects the person with highest average likelihood, and 'single_click' lets the user manually select the person of interest.`
459
558
  - [ ] Run with the option `--compare_to` to visually compare motion with a trc file. If run with a webcam input, the user can follow the motion of the trc file. Further calculation can then be done to compare specific variables.
460
559
  - [ ] **Colab version**: more user-friendly, usable on a smartphone.
461
560
  - [ ] **GUI applications** for Windows, Mac, and Linux, as well as for Android and iOS.
@@ -56,7 +56,8 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
56
56
  2. [Go further](#go-further)
57
57
  1. [Too slow for you?](#too-slow-for-you)
58
58
  2. [What you need is what you get](#what-you-need-is-what-you-get)
59
- 3. [How it works](#how-it-works)
59
+ 3. [All the parameters](#all-the-parameters)
60
+ 4. [How it works](#how-it-works)
60
61
  3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
61
62
 
62
63
  <br>
@@ -120,12 +121,13 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
120
121
  - One person walking in the sagittal plane
121
122
  - One person doing jumping jacks in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
122
123
  - One tiny person flickering in the background who needs to be ignored
124
+ - The first person is starting high and ending low on the image, which messes up the automatic floor angle calculation. You can set it up manually with the parameter `--floor_angle 0`
123
125
 
124
126
  <br>
125
127
 
126
128
  ### Play with the parameters
127
129
 
128
- For a full list of the available parameters, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
130
+ For a full list of the available parameters, see [this section](#all-the-parameters) of the documentation, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file, or type:
129
131
  ``` cmd
130
132
  sports2d --help
131
133
  ```
@@ -168,7 +170,10 @@ Note that it does not take distortions into account, and that it will be less ac
168
170
  sports2d --show_graphs False --time_range 1.2 2.7 --result_dir path_to_result_dir --slowmo_factor 4
169
171
  ```
170
172
  ``` cmd
171
- sports2d --multiperson false --mode lightweight --det_frequency 50
173
+ sports2d --multiperson false --pose_model Body --mode lightweight --det_frequency 50
174
+ ```
175
+ ``` cmd
176
+ sports2d --tracking_mode deepsort --deepsort_params """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}"""
172
177
  ```
173
178
  <br>
174
179
 
@@ -194,10 +199,20 @@ Note that it does not take distortions into account, and that it will be less ac
194
199
 
195
200
  **Quick fixes:**
196
201
  - Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
197
- - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.
202
+ - Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.\
203
+ Note that any detection and pose models can be used (first [deploy them with MMPose](https://mmpose.readthedocs.io/en/latest/user_guides/how_to_deploy.html#onnx) if you do not have their .onnx or .zip files), with the following formalism:
204
+ ```
205
+ --mode """{'det_class':'YOLOX',
206
+ 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_nano_8xb8-300e_humanart-40f6f0d0.zip',
207
+ 'det_input_size':[416,416],
208
+ 'pose_class':'RTMPose',
209
+ 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
210
+ 'pose_input_size':[192,256]}"""
211
+ ```
198
212
  - Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
199
213
  - Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
200
214
  - Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
215
+ - Use `--tracking_mode sports2d`: Will use the default Sports2D tracker. Unlike DeepSort, it is faster, does not require any parametrization, and is as good in non-crowded scenes.
201
216
 
202
217
  <br>
203
218
 
@@ -238,9 +253,9 @@ Will be much faster, with no impact on accuracy. However, the installation takes
238
253
  <br>
239
254
 
240
255
  #### Customize your output:
241
- - Choose whether you want video, images, trc pose file, angle mot file, and real-time display:
256
+ - Choose whether you want video, images, trc pose file, angle mot file, real-time display, and plots:
242
257
  ```cmd
243
- sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false
258
+ sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false --show_graphs false
244
259
  ```
245
260
  - Choose which angles you need:
246
261
  ```cmd
@@ -315,6 +330,85 @@ sports2d --time_range 1.2 2.7 --ik true --person_orientation front none left
315
330
 
316
331
  <br>
317
332
 
333
+
334
+ ### All the parameters
335
+
336
+ For a full list of the available parameters, have a look at the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
337
+
338
+ ``` cmd
339
+ sports2d --help
340
+ ```
341
+
342
+ ```
343
+ ['config': "C", "path to a toml configuration file"],
344
+
345
+ 'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
346
+ 'person_height': ["H", "height of the person in meters. 1.70 if not specified"],
347
+ 'load_trc': ["", "load trc file to avaid running pose estimation again. false if not specified"],
348
+ 'compare': ["", "visually compare motion with trc file. false if not specified"],
349
+ 'webcam_id': ["w", "webcam ID. 0 if not specified"],
350
+ 'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
351
+ 'video_dir': ["d", "current directory if not specified"],
352
+ 'result_dir': ["r", "current directory if not specified"],
353
+ 'show_realtime_results': ["R", "show results in real-time. true if not specified"],
354
+ 'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
355
+ 'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
356
+ 'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
357
+ 'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
358
+ 'save_vid': ["V", "save processed video. true if not specified"],
359
+ 'save_img': ["I", "save processed images. true if not specified"],
360
+ 'save_pose': ["P", "save pose as trc files. true if not specified"],
361
+ 'calculate_angles': ["c", "calculate joint and segment angles. true if not specified"],
362
+ 'save_angles': ["A", "save angles as mot files. true if not specified"],
363
+ 'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
364
+ 'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
365
+ 'mode': ["m", "light, balanced, or performance. balanced if not specified"],
366
+ 'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
367
+ Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
368
+ 'to_meters': ["M", "convert pixels to meters. true if not specified"],
369
+
370
+ 'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
371
+ 'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
372
+ 'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
373
+ 'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
374
+ 'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
375
+ 'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
376
+ 'save_calib': ["", "save calibration file. true if not specified"],
377
+ 'do_ik': ["", "do inverse kinematics. false if not specified"],
378
+ 'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
379
+ 'person_orientation': ["", "front, back, left, right, auto, or none. 'front none left' if not specified. If 'auto', will be either left or right depending on the direction of the motion."],
380
+ 'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
381
+ 'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
382
+ 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
383
+ 'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
384
+ More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
385
+ 'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
386
+ 'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
387
+ 'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
388
+ 'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
389
+ 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
390
+ 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
391
+ 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
392
+ 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
393
+ 'fontSize': ["", "font size for angle values. 0.3 if not specified"],
394
+ 'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
395
+ 'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
396
+ 'interpolate': ["", "interpolate missing data. true if not specified"],
397
+ 'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
398
+ 'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
399
+ 'filter': ["", "filter results. true if not specified"],
400
+ 'filter_type': ["", "butterworth, gaussian, median, or loess. butterworth if not specified"],
401
+ 'order': ["", "order of the Butterworth filter. 4 if not specified"],
402
+ 'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
403
+ 'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
404
+ 'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
405
+ 'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
406
+ 'use_custom_logging': ["", "use custom logging. false if not specified"]
407
+ ```
408
+
409
+ <br>
410
+
411
+
318
412
  ### How it works
319
413
 
320
414
  Sports2D:
@@ -332,7 +426,7 @@ Sports2D:
332
426
 
333
427
  2. **Sets up pose estimation with RTMLib.** It can be run in lightweight, balanced, or performance mode, and for faster inference, keypoints can be tracked instead of detected for a certain number of frames. Any RTMPose model can be used.
334
428
 
335
- 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. This carefully crafted `sports2d` tracker runs at a comparable speed as the RTMlib one but is much more robust. The user can still choose the RTMLib method if they need it by specifying it in the Config.toml file.
429
+ 3. **Tracks people** so that their IDs are consistent across frames. A person is associated to another in the next frame when they are at a small distance. IDs remain consistent even if the person disappears from a few frames. We crafted a 'sports2D' tracker which gives good results and runs in real time, but it is also possible to use `deepsort` in particularly challenging situations.
336
430
 
337
431
  4. **Chooses the right persons to keep.** In single-person mode, only keeps the person with the highest average scores over the sequence. In multi-person mode, only retrieves the keypoints with high enough confidence, and only keeps the persons with high enough average confidence over each frame.
338
432
 
@@ -415,7 +509,11 @@ If you want to contribute to Sports2D, please follow [this guide](https://docs.g
415
509
  - [x] Option to only save one person (with the highest average score, or with the most frames and fastest speed)
416
510
  - [x] Run again without pose estimation with the option `--load_trc` for px .trc file.
417
511
  - [x] **Convert positions to meters** by providing the person height, a calibration file, or 3D points [to click on the image](https://stackoverflow.com/questions/74248955/how-to-display-the-coordinates-of-the-points-clicked-on-the-image-in-google-cola)
512
+ - [x] Support any detection and/or pose estimation model.
513
+
418
514
  - [ ] Perform **Inverse kinematics and dynamics** with OpenSim (cf. [Pose2Sim](https://github.com/perfanalytics/pose2sim), but in 2D). Update [this model](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Utilities/2D_gait.osim) (add arms, markers, remove muscles and contact spheres). Add pipeline example.
515
+ - [ ] Optionally let user select the person of interest in single_person mode:\
516
+ `multiperson = true # true, or 'single_auto', or 'single_click'. 'single_auto' selects the person with highest average likelihood, and 'single_click' lets the user manually select the person of interest.`
419
517
  - [ ] Run with the option `--compare_to` to visually compare motion with a trc file. If run with a webcam input, the user can follow the motion of the trc file. Further calculation can then be done to compare specific variables.
420
518
  - [ ] **Colab version**: more user-friendly, usable on a smartphone.
421
519
  - [ ] **GUI applications** for Windows, Mac, and Linux, as well as for Android and iOS.
@@ -13,7 +13,7 @@
13
13
 
14
14
  [project]
15
15
  video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
16
- # Time ranges can be different for each video. All other processing arguments will be identical.
16
+ # On Windows, replace '\' with '/'
17
17
  # Beware that images won't be saved if paths contain non ASCII characters.
18
18
  person_height = 1.70 # Height of the person in meters (for pixels -> meters conversion)
19
19
  load_trc = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
@@ -21,6 +21,7 @@ compare = false # Not implemented yet
21
21
 
22
22
  # Video parameters
23
23
  time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
24
+ # Time ranges can be different for each video.
24
25
  video_dir = '' # If empty, video dir is current dir
25
26
 
26
27
  # Webcam parameters
@@ -48,16 +49,39 @@ result_dir = '' # If empty, project dir is current dir
48
49
  slowmo_factor = 1 # 1 for normal speed. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8
49
50
 
50
51
  # Pose detection parameters
51
- pose_model = 'body_with_feet' # Only body_with_feet is available for now
52
- mode = 'balanced' # 'lightweight', 'balanced', or 'performance'
53
- det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
52
+ pose_model = 'Body_with_feet' #With RTMLib: Body_with_feet (default HALPE_26 model), Whole_body (COCO_133: body + feet + hands), Body (COCO_17), CUSTOM (see example at the end of the file), or any from skeletons.py
53
+ mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
54
+
55
+ # A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
56
+ # Models can be local paths or URLs.
57
+ # Make sure the input_sizes are within square brackets, and that they are in the opposite order from the one in the model path (for example, it would be [192,256] for rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip).
58
+ # If your pose_model is not provided in skeletons.py, you may have to create your own one (see example at the end of the file).
59
+ # Example, equivalent to mode='balanced':
60
+ # mode = """{'det_class':'YOLOX',
61
+ # 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
62
+ # 'det_input_size':[640, 640],
63
+ # 'pose_class':'RTMPose',
64
+ # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
65
+ # 'pose_input_size':[192,256]}"""
66
+ # Example with one-stage RTMO model (Requires pose_model = 'Body'):
67
+ # mode = """{'pose_class':'RTMO',
68
+ # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
69
+ # 'pose_input_size':[640, 640]}"""
70
+
71
+ det_frequency = 4 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
54
72
  # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
55
- tracking_mode = 'sports2d' # 'rtmlib' or 'sports2d'. 'sports2d' is generally much more accurate and comparable in speed
73
+ device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
74
+ backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
75
+ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
76
+ deepsort_params = """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}""" # """{dictionary between 3 double quotes}"""
77
+ # More robust in crowded scenes but Can be tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
78
+ # Note: For even more robust tracking, use 'embedder':'torchreid', which runs osnet_ain_x1_0 by default. Install additional dependencies with: `pip install torchreid gdown tensorboard`
79
+
56
80
 
57
81
  # Processing parameters
58
82
  keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
59
83
  average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
60
- keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints is less than this fraction
84
+ keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
61
85
 
62
86
 
63
87
  [px_to_meters_conversion]
@@ -83,13 +107,14 @@ fontSize = 0.3
83
107
 
84
108
  # Select joint angles among
85
109
  # ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
86
- joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow']
110
+ joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
87
111
  # Select segment angles among
88
112
  # ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
89
113
  segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
90
114
 
91
115
  # Processing parameters
92
116
  flip_left_right = true # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participent switches their stance.
117
+ correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead
93
118
 
94
119
 
95
120
  [post-processing]
@@ -121,5 +146,88 @@ person_orientation = ['front', 'none', 'left'] # Choose among 'auto', 'none', 'f
121
146
  osim_setup_path = '../OpenSim_setup' # Path to the OpenSim setup folder
122
147
  close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
123
148
 
149
+
124
150
  [logging]
125
- use_custom_logging = false # if integrated in an API that already has logging
151
+ use_custom_logging = false # if integrated in an API that already has logging
152
+
153
+
154
+
155
+ # CUSTOM skeleton
156
+ # If you use a model with different keypoints and/or different ordering
157
+ # Useful if you trained your own model, from DeepLabCut or MMPose for example.
158
+ # Make sure the ids are set in the right order and start from zero.
159
+ #
160
+ # If you want to perform inverse kinematics, you will also need to create an OpenSim model
161
+ # and add to its markerset the location where you expect the triangulated keypoints to be detected.
162
+ #
163
+ # In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
164
+ # You can create as many custom skeletons as you want, just add them further down and rename them.
165
+ #
166
+ # Check your model hierarchy with: for pre, _, node in RenderTree(model):
167
+ # print(f'{pre}{node.name} id={node.id}')
168
+ [pose.CUSTOM]
169
+ name = "Hip"
170
+ id = 19
171
+ [[pose.CUSTOM.children]]
172
+ name = "RHip"
173
+ id = 12
174
+ [[pose.CUSTOM.children.children]]
175
+ name = "RKnee"
176
+ id = 14
177
+ [[pose.CUSTOM.children.children.children]]
178
+ name = "RAnkle"
179
+ id = 16
180
+ [[pose.CUSTOM.children.children.children.children]]
181
+ name = "RBigToe"
182
+ id = 21
183
+ [[pose.CUSTOM.children.children.children.children.children]]
184
+ name = "RSmallToe"
185
+ id = 23
186
+ [[pose.CUSTOM.children.children.children.children]]
187
+ name = "RHeel"
188
+ id = 25
189
+ [[pose.CUSTOM.children]]
190
+ name = "LHip"
191
+ id = 11
192
+ [[pose.CUSTOM.children.children]]
193
+ name = "LKnee"
194
+ id = 13
195
+ [[pose.CUSTOM.children.children.children]]
196
+ name = "LAnkle"
197
+ id = 15
198
+ [[pose.CUSTOM.children.children.children.children]]
199
+ name = "LBigToe"
200
+ id = 20
201
+ [[pose.CUSTOM.children.children.children.children.children]]
202
+ name = "LSmallToe"
203
+ id = 22
204
+ [[pose.CUSTOM.children.children.children.children]]
205
+ name = "LHeel"
206
+ id = 24
207
+ [[pose.CUSTOM.children]]
208
+ name = "Neck"
209
+ id = 18
210
+ [[pose.CUSTOM.children.children]]
211
+ name = "Head"
212
+ id = 17
213
+ [[pose.CUSTOM.children.children.children]]
214
+ name = "Nose"
215
+ id = 0
216
+ [[pose.CUSTOM.children.children]]
217
+ name = "RShoulder"
218
+ id = 6
219
+ [[pose.CUSTOM.children.children.children]]
220
+ name = "RElbow"
221
+ id = 8
222
+ [[pose.CUSTOM.children.children.children.children]]
223
+ name = "RWrist"
224
+ id = 10
225
+ [[pose.CUSTOM.children.children]]
226
+ name = "LShoulder"
227
+ id = 5
228
+ [[pose.CUSTOM.children.children.children]]
229
+ name = "LElbow"
230
+ id = 7
231
+ [[pose.CUSTOM.children.children.children.children]]
232
+ name = "LWrist"
233
+ id = 9