sports2d 0.5.5__tar.gz → 0.6.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sports2d-0.5.5 → sports2d-0.6.1}/PKG-INFO +102 -10
- {sports2d-0.5.5 → sports2d-0.6.1}/README.md +99 -7
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/Demo/Config_demo.toml +111 -6
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/Sports2D.py +18 -9
- sports2d-0.6.1/Sports2D/Utilities/common.py +691 -0
- sports2d-0.6.1/Sports2D/Utilities/skeletons.py +1002 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/Utilities/tests.py +12 -5
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/process.py +223 -310
- {sports2d-0.5.5 → sports2d-0.6.1}/setup.cfg +2 -2
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/PKG-INFO +102 -10
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/requires.txt +1 -1
- sports2d-0.5.5/Sports2D/Utilities/common.py +0 -400
- sports2d-0.5.5/Sports2D/Utilities/skeletons.py +0 -491
- {sports2d-0.5.5 → sports2d-0.6.1}/LICENSE +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/Demo/demo.mp4 +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/Utilities/__init__.py +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/Utilities/filter.py +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/Sports2D/__init__.py +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/pyproject.toml +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/setup.py +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/SOURCES.txt +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/dependency_links.txt +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/entry_points.txt +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/not-zip-safe +0 -0
- {sports2d-0.5.5 → sports2d-0.6.1}/sports2d.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
2
|
Name: sports2d
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.1
|
|
4
4
|
Summary: Detect pose and compute 2D joint angles from a video.
|
|
5
5
|
Home-page: https://github.com/davidpagnon/Sports2D
|
|
6
6
|
Author: David Pagnon
|
|
@@ -33,7 +33,7 @@ Requires-Dist: opencv-python
|
|
|
33
33
|
Requires-Dist: matplotlib
|
|
34
34
|
Requires-Dist: PyQt5
|
|
35
35
|
Requires-Dist: statsmodels
|
|
36
|
-
Requires-Dist:
|
|
36
|
+
Requires-Dist: rtmlib_pose2sim
|
|
37
37
|
Requires-Dist: openvino
|
|
38
38
|
Requires-Dist: tqdm
|
|
39
39
|
Requires-Dist: imageio_ffmpeg
|
|
@@ -96,7 +96,8 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
96
96
|
2. [Go further](#go-further)
|
|
97
97
|
1. [Too slow for you?](#too-slow-for-you)
|
|
98
98
|
2. [What you need is what you get](#what-you-need-is-what-you-get)
|
|
99
|
-
3. [
|
|
99
|
+
3. [All the parameters](#all-the-parameters)
|
|
100
|
+
4. [How it works](#how-it-works)
|
|
100
101
|
3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
|
|
101
102
|
|
|
102
103
|
<br>
|
|
@@ -160,12 +161,13 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
|
|
|
160
161
|
- One person walking in the sagittal plane
|
|
161
162
|
- One person doing jumping jacks in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
|
|
162
163
|
- One tiny person flickering in the background who needs to be ignored
|
|
164
|
+
- The first person is starting high and ending low on the image, which messes up the automatic floor angle calculation. You can set it up manually with the parameter `--floor_angle 0`
|
|
163
165
|
|
|
164
166
|
<br>
|
|
165
167
|
|
|
166
168
|
### Play with the parameters
|
|
167
169
|
|
|
168
|
-
For a full list of the available parameters, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
|
|
170
|
+
For a full list of the available parameters, see [this section](#all-the-parameters) of the documentation, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file, or type:
|
|
169
171
|
``` cmd
|
|
170
172
|
sports2d --help
|
|
171
173
|
```
|
|
@@ -208,7 +210,7 @@ Note that it does not take distortions into account, and that it will be less ac
|
|
|
208
210
|
sports2d --show_graphs False --time_range 1.2 2.7 --result_dir path_to_result_dir --slowmo_factor 4
|
|
209
211
|
```
|
|
210
212
|
``` cmd
|
|
211
|
-
sports2d --multiperson false --mode lightweight --det_frequency 50
|
|
213
|
+
sports2d --multiperson false --pose_model Body --mode lightweight --det_frequency 50
|
|
212
214
|
```
|
|
213
215
|
<br>
|
|
214
216
|
|
|
@@ -233,9 +235,19 @@ Note that it does not take distortions into account, and that it will be less ac
|
|
|
233
235
|
### Too slow for you?
|
|
234
236
|
|
|
235
237
|
**Quick fixes:**
|
|
236
|
-
- Use
|
|
237
|
-
- Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate
|
|
238
|
+
- Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
|
|
239
|
+
- Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.\
|
|
240
|
+
Note that any detection and pose models can be used (first [deploy them with MMPose](https://mmpose.readthedocs.io/en/latest/user_guides/how_to_deploy.html#onnx) if you do not have their .onnx or .zip files), with the following formalism:
|
|
241
|
+
```
|
|
242
|
+
--mode """{'det_class':'YOLOX',
|
|
243
|
+
'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_nano_8xb8-300e_humanart-40f6f0d0.zip',
|
|
244
|
+
'det_input_size':[416,416],
|
|
245
|
+
'pose_class':'RTMPose',
|
|
246
|
+
'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
|
|
247
|
+
'pose_input_size':[192,256]}"""
|
|
248
|
+
```
|
|
238
249
|
- Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
|
|
250
|
+
- Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
|
|
239
251
|
- Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
|
|
240
252
|
|
|
241
253
|
<br>
|
|
@@ -277,9 +289,9 @@ Will be much faster, with no impact on accuracy. However, the installation takes
|
|
|
277
289
|
<br>
|
|
278
290
|
|
|
279
291
|
#### Customize your output:
|
|
280
|
-
- Choose whether you want video, images, trc pose file, angle mot file,
|
|
292
|
+
- Choose whether you want video, images, trc pose file, angle mot file, real-time display, and plots:
|
|
281
293
|
```cmd
|
|
282
|
-
sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false
|
|
294
|
+
sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false --show_graphs false
|
|
283
295
|
```
|
|
284
296
|
- Choose which angles you need:
|
|
285
297
|
```cmd
|
|
@@ -354,6 +366,82 @@ sports2d --time_range 1.2 2.7 --ik true --person_orientation front none left
|
|
|
354
366
|
|
|
355
367
|
<br>
|
|
356
368
|
|
|
369
|
+
|
|
370
|
+
### All the parameters
|
|
371
|
+
|
|
372
|
+
Have a look at the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type for a full list of the available parameters:
|
|
373
|
+
|
|
374
|
+
``` cmd
|
|
375
|
+
sports2d --help
|
|
376
|
+
```
|
|
377
|
+
|
|
378
|
+
```
|
|
379
|
+
['config': "C", "path to a toml configuration file"],
|
|
380
|
+
|
|
381
|
+
'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
|
|
382
|
+
'person_height': ["H", "height of the person in meters. 1.70 if not specified"],
|
|
383
|
+
'load_trc': ["", "load trc file to avaid running pose estimation again. false if not specified"],
|
|
384
|
+
'compare': ["", "visually compare motion with trc file. false if not specified"],
|
|
385
|
+
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
386
|
+
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
387
|
+
'video_dir': ["d", "current directory if not specified"],
|
|
388
|
+
'result_dir': ["r", "current directory if not specified"],
|
|
389
|
+
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
390
|
+
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
391
|
+
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
392
|
+
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
393
|
+
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
394
|
+
'save_vid': ["V", "save processed video. true if not specified"],
|
|
395
|
+
'save_img': ["I", "save processed images. true if not specified"],
|
|
396
|
+
'save_pose': ["P", "save pose as trc files. true if not specified"],
|
|
397
|
+
'calculate_angles': ["c", "calculate joint and segment angles. true if not specified"],
|
|
398
|
+
'save_angles': ["A", "save angles as mot files. true if not specified"],
|
|
399
|
+
'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
|
|
400
|
+
'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
|
|
401
|
+
'mode': ["m", "light, balanced, or performance. balanced if not specified"],
|
|
402
|
+
'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
|
|
403
|
+
Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
|
|
404
|
+
'to_meters': ["M", "convert pixels to meters. true if not specified"],
|
|
405
|
+
|
|
406
|
+
'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
|
|
407
|
+
'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
|
|
408
|
+
'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
|
|
409
|
+
'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
|
|
410
|
+
'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
|
|
411
|
+
'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
|
|
412
|
+
'save_calib': ["", "save calibration file. true if not specified"],
|
|
413
|
+
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
414
|
+
'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
|
|
415
|
+
'person_orientation': ["", "front, back, left, right, auto, or none. 'front none left' if not specified. If 'auto', will be either left or right depending on the direction of the motion."],
|
|
416
|
+
'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
|
|
417
|
+
'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"], 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
|
|
418
|
+
'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
|
|
419
|
+
'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
|
|
420
|
+
'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
|
|
421
|
+
'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
|
|
422
|
+
'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
|
|
423
|
+
'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
|
|
424
|
+
'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
|
|
425
|
+
'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
|
|
426
|
+
'fontSize': ["", "font size for angle values. 0.3 if not specified"],
|
|
427
|
+
'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
|
|
428
|
+
'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
|
|
429
|
+
'interpolate': ["", "interpolate missing data. true if not specified"],
|
|
430
|
+
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
431
|
+
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
432
|
+
'filter': ["", "filter results. true if not specified"],
|
|
433
|
+
'filter_type': ["", "butterworth, gaussian, median, or loess. butterworth if not specified"],
|
|
434
|
+
'order': ["", "order of the Butterworth filter. 4 if not specified"],
|
|
435
|
+
'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
|
|
436
|
+
'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
|
|
437
|
+
'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
|
|
438
|
+
'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
|
|
439
|
+
'use_custom_logging': ["", "use custom logging. false if not specified"]
|
|
440
|
+
```
|
|
441
|
+
|
|
442
|
+
<br>
|
|
443
|
+
|
|
444
|
+
|
|
357
445
|
### How it works
|
|
358
446
|
|
|
359
447
|
Sports2D:
|
|
@@ -454,7 +542,11 @@ If you want to contribute to Sports2D, please follow [this guide](https://docs.g
|
|
|
454
542
|
- [x] Option to only save one person (with the highest average score, or with the most frames and fastest speed)
|
|
455
543
|
- [x] Run again without pose estimation with the option `--load_trc` for px .trc file.
|
|
456
544
|
- [x] **Convert positions to meters** by providing the person height, a calibration file, or 3D points [to click on the image](https://stackoverflow.com/questions/74248955/how-to-display-the-coordinates-of-the-points-clicked-on-the-image-in-google-cola)
|
|
545
|
+
- [x] Support any detection and/or pose estimation model.
|
|
546
|
+
|
|
457
547
|
- [ ] Perform **Inverse kinematics and dynamics** with OpenSim (cf. [Pose2Sim](https://github.com/perfanalytics/pose2sim), but in 2D). Update [this model](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Utilities/2D_gait.osim) (add arms, markers, remove muscles and contact spheres). Add pipeline example.
|
|
548
|
+
- [ ] Optionally let user select the person of interest in single_person mode:\
|
|
549
|
+
`multiperson = true # true, or 'single_auto', or 'single_click'. 'single_auto' selects the person with highest average likelihood, and 'single_click' lets the user manually select the person of interest.`
|
|
458
550
|
- [ ] Run with the option `--compare_to` to visually compare motion with a trc file. If run with a webcam input, the user can follow the motion of the trc file. Further calculation can then be done to compare specific variables.
|
|
459
551
|
- [ ] **Colab version**: more user-friendly, usable on a smartphone.
|
|
460
552
|
- [ ] **GUI applications** for Windows, Mac, and Linux, as well as for Android and iOS.
|
|
@@ -56,7 +56,8 @@ If you need 3D research-grade markerless joint kinematics, consider using severa
|
|
|
56
56
|
2. [Go further](#go-further)
|
|
57
57
|
1. [Too slow for you?](#too-slow-for-you)
|
|
58
58
|
2. [What you need is what you get](#what-you-need-is-what-you-get)
|
|
59
|
-
3. [
|
|
59
|
+
3. [All the parameters](#all-the-parameters)
|
|
60
|
+
4. [How it works](#how-it-works)
|
|
60
61
|
3. [How to cite and how to contribute](#how-to-cite-and-how-to-contribute)
|
|
61
62
|
|
|
62
63
|
<br>
|
|
@@ -120,12 +121,13 @@ The Demo video is voluntarily challenging to demonstrate the robustness of the p
|
|
|
120
121
|
- One person walking in the sagittal plane
|
|
121
122
|
- One person doing jumping jacks in the frontal plane. This person then performs a flip while being backlit, both of which are challenging for the pose detection algorithm
|
|
122
123
|
- One tiny person flickering in the background who needs to be ignored
|
|
124
|
+
- The first person is starting high and ending low on the image, which messes up the automatic floor angle calculation. You can set it up manually with the parameter `--floor_angle 0`
|
|
123
125
|
|
|
124
126
|
<br>
|
|
125
127
|
|
|
126
128
|
### Play with the parameters
|
|
127
129
|
|
|
128
|
-
For a full list of the available parameters, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type:
|
|
130
|
+
For a full list of the available parameters, see [this section](#all-the-parameters) of the documentation, check the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file, or type:
|
|
129
131
|
``` cmd
|
|
130
132
|
sports2d --help
|
|
131
133
|
```
|
|
@@ -168,7 +170,7 @@ Note that it does not take distortions into account, and that it will be less ac
|
|
|
168
170
|
sports2d --show_graphs False --time_range 1.2 2.7 --result_dir path_to_result_dir --slowmo_factor 4
|
|
169
171
|
```
|
|
170
172
|
``` cmd
|
|
171
|
-
sports2d --multiperson false --mode lightweight --det_frequency 50
|
|
173
|
+
sports2d --multiperson false --pose_model Body --mode lightweight --det_frequency 50
|
|
172
174
|
```
|
|
173
175
|
<br>
|
|
174
176
|
|
|
@@ -193,9 +195,19 @@ Note that it does not take distortions into account, and that it will be less ac
|
|
|
193
195
|
### Too slow for you?
|
|
194
196
|
|
|
195
197
|
**Quick fixes:**
|
|
196
|
-
- Use
|
|
197
|
-
- Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate
|
|
198
|
+
- Use ` --save_vid false --save_img false --show_realtime_results false`: Will not save images or videos, and will not display the results in real time.
|
|
199
|
+
- Use `--mode lightweight`: Will use a lighter version of RTMPose, which is faster but less accurate.\
|
|
200
|
+
Note that any detection and pose models can be used (first [deploy them with MMPose](https://mmpose.readthedocs.io/en/latest/user_guides/how_to_deploy.html#onnx) if you do not have their .onnx or .zip files), with the following formalism:
|
|
201
|
+
```
|
|
202
|
+
--mode """{'det_class':'YOLOX',
|
|
203
|
+
'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_nano_8xb8-300e_humanart-40f6f0d0.zip',
|
|
204
|
+
'det_input_size':[416,416],
|
|
205
|
+
'pose_class':'RTMPose',
|
|
206
|
+
'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-t_simcc-body7_pt-body7_420e-256x192-026a1439_20230504.zip',
|
|
207
|
+
'pose_input_size':[192,256]}"""
|
|
208
|
+
```
|
|
198
209
|
- Use `--det_frequency 50`: Will detect poses only every 50 frames, and track keypoints in between, which is faster.
|
|
210
|
+
- Use `--multiperson false`: Can be used if one single person is present in the video. Otherwise, persons' IDs may be mixed up.
|
|
199
211
|
- Use `--load_trc <path_to_file_px.trc>`: Will use pose estimation results from a file. Useful if you want to use different parameters for pixel to meter conversion or angle calculation without running detection and pose estimation all over.
|
|
200
212
|
|
|
201
213
|
<br>
|
|
@@ -237,9 +249,9 @@ Will be much faster, with no impact on accuracy. However, the installation takes
|
|
|
237
249
|
<br>
|
|
238
250
|
|
|
239
251
|
#### Customize your output:
|
|
240
|
-
- Choose whether you want video, images, trc pose file, angle mot file,
|
|
252
|
+
- Choose whether you want video, images, trc pose file, angle mot file, real-time display, and plots:
|
|
241
253
|
```cmd
|
|
242
|
-
sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false
|
|
254
|
+
sports2d --save_vid false --save_img true --save_pose false --save_angles true --show_realtime_results false --show_graphs false
|
|
243
255
|
```
|
|
244
256
|
- Choose which angles you need:
|
|
245
257
|
```cmd
|
|
@@ -314,6 +326,82 @@ sports2d --time_range 1.2 2.7 --ik true --person_orientation front none left
|
|
|
314
326
|
|
|
315
327
|
<br>
|
|
316
328
|
|
|
329
|
+
|
|
330
|
+
### All the parameters
|
|
331
|
+
|
|
332
|
+
Have a look at the [Config_Demo.toml](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Demo/Config_demo.toml) file or type for a full list of the available parameters:
|
|
333
|
+
|
|
334
|
+
``` cmd
|
|
335
|
+
sports2d --help
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
```
|
|
339
|
+
['config': "C", "path to a toml configuration file"],
|
|
340
|
+
|
|
341
|
+
'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
|
|
342
|
+
'person_height': ["H", "height of the person in meters. 1.70 if not specified"],
|
|
343
|
+
'load_trc': ["", "load trc file to avaid running pose estimation again. false if not specified"],
|
|
344
|
+
'compare': ["", "visually compare motion with trc file. false if not specified"],
|
|
345
|
+
'webcam_id': ["w", "webcam ID. 0 if not specified"],
|
|
346
|
+
'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
|
|
347
|
+
'video_dir': ["d", "current directory if not specified"],
|
|
348
|
+
'result_dir': ["r", "current directory if not specified"],
|
|
349
|
+
'show_realtime_results': ["R", "show results in real-time. true if not specified"],
|
|
350
|
+
'display_angle_values_on': ["a", '"body", "list", "body" "list", or "none". body list if not specified'],
|
|
351
|
+
'show_graphs': ["G", "show plots of raw and processed results. true if not specified"],
|
|
352
|
+
'joint_angles': ["j", '"Right ankle" "Left ankle" "Right knee" "Left knee" "Right hip" "Left hip" "Right shoulder" "Left shoulder" "Right elbow" "Left elbow" if not specified'],
|
|
353
|
+
'segment_angles': ["s", '"Right foot" "Left foot" "Right shank" "Left shank" "Right thigh" "Left thigh" "Pelvis" "Trunk" "Shoulders" "Head" "Right arm" "Left arm" "Right forearm" "Left forearm" if not specified'],
|
|
354
|
+
'save_vid': ["V", "save processed video. true if not specified"],
|
|
355
|
+
'save_img': ["I", "save processed images. true if not specified"],
|
|
356
|
+
'save_pose': ["P", "save pose as trc files. true if not specified"],
|
|
357
|
+
'calculate_angles': ["c", "calculate joint and segment angles. true if not specified"],
|
|
358
|
+
'save_angles': ["A", "save angles as mot files. true if not specified"],
|
|
359
|
+
'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
|
|
360
|
+
'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
|
|
361
|
+
'mode': ["m", "light, balanced, or performance. balanced if not specified"],
|
|
362
|
+
'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
|
|
363
|
+
Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
|
|
364
|
+
'to_meters': ["M", "convert pixels to meters. true if not specified"],
|
|
365
|
+
|
|
366
|
+
'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
|
|
367
|
+
'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
|
|
368
|
+
'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
|
|
369
|
+
'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
|
|
370
|
+
'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
|
|
371
|
+
'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
|
|
372
|
+
'save_calib': ["", "save calibration file. true if not specified"],
|
|
373
|
+
'do_ik': ["", "do inverse kinematics. false if not specified"],
|
|
374
|
+
'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
|
|
375
|
+
'person_orientation': ["", "front, back, left, right, auto, or none. 'front none left' if not specified. If 'auto', will be either left or right depending on the direction of the motion."],
|
|
376
|
+
'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
|
|
377
|
+
'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"], 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
|
|
378
|
+
'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
|
|
379
|
+
'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
|
|
380
|
+
'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
|
|
381
|
+
'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
|
|
382
|
+
'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
|
|
383
|
+
'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
|
|
384
|
+
'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
|
|
385
|
+
'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
|
|
386
|
+
'fontSize': ["", "font size for angle values. 0.3 if not specified"],
|
|
387
|
+
'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
|
|
388
|
+
'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
|
|
389
|
+
'interpolate': ["", "interpolate missing data. true if not specified"],
|
|
390
|
+
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
391
|
+
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
392
|
+
'filter': ["", "filter results. true if not specified"],
|
|
393
|
+
'filter_type': ["", "butterworth, gaussian, median, or loess. butterworth if not specified"],
|
|
394
|
+
'order': ["", "order of the Butterworth filter. 4 if not specified"],
|
|
395
|
+
'cut_off_frequency': ["", "cut-off frequency of the Butterworth filter. 3 if not specified"],
|
|
396
|
+
'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
|
|
397
|
+
'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
|
|
398
|
+
'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
|
|
399
|
+
'use_custom_logging': ["", "use custom logging. false if not specified"]
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
<br>
|
|
403
|
+
|
|
404
|
+
|
|
317
405
|
### How it works
|
|
318
406
|
|
|
319
407
|
Sports2D:
|
|
@@ -414,7 +502,11 @@ If you want to contribute to Sports2D, please follow [this guide](https://docs.g
|
|
|
414
502
|
- [x] Option to only save one person (with the highest average score, or with the most frames and fastest speed)
|
|
415
503
|
- [x] Run again without pose estimation with the option `--load_trc` for px .trc file.
|
|
416
504
|
- [x] **Convert positions to meters** by providing the person height, a calibration file, or 3D points [to click on the image](https://stackoverflow.com/questions/74248955/how-to-display-the-coordinates-of-the-points-clicked-on-the-image-in-google-cola)
|
|
505
|
+
- [x] Support any detection and/or pose estimation model.
|
|
506
|
+
|
|
417
507
|
- [ ] Perform **Inverse kinematics and dynamics** with OpenSim (cf. [Pose2Sim](https://github.com/perfanalytics/pose2sim), but in 2D). Update [this model](https://github.com/davidpagnon/Sports2D/blob/main/Sports2D/Utilities/2D_gait.osim) (add arms, markers, remove muscles and contact spheres). Add pipeline example.
|
|
508
|
+
- [ ] Optionally let user select the person of interest in single_person mode:\
|
|
509
|
+
`multiperson = true # true, or 'single_auto', or 'single_click'. 'single_auto' selects the person with highest average likelihood, and 'single_click' lets the user manually select the person of interest.`
|
|
418
510
|
- [ ] Run with the option `--compare_to` to visually compare motion with a trc file. If run with a webcam input, the user can follow the motion of the trc file. Further calculation can then be done to compare specific variables.
|
|
419
511
|
- [ ] **Colab version**: more user-friendly, usable on a smartphone.
|
|
420
512
|
- [ ] **GUI applications** for Windows, Mac, and Linux, as well as for Android and iOS.
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
|
|
14
14
|
[project]
|
|
15
15
|
video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
|
|
16
|
-
#
|
|
16
|
+
# On Windows, replace '\' with '/'
|
|
17
17
|
# Beware that images won't be saved if paths contain non ASCII characters.
|
|
18
18
|
person_height = 1.70 # Height of the person in meters (for pixels -> meters conversion)
|
|
19
19
|
load_trc = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
|
|
@@ -21,7 +21,8 @@ compare = false # Not implemented yet
|
|
|
21
21
|
|
|
22
22
|
# Video parameters
|
|
23
23
|
time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
|
|
24
|
-
|
|
24
|
+
# Time ranges can be different for each video.
|
|
25
|
+
video_dir = '' # If empty, video dir is current dir
|
|
25
26
|
|
|
26
27
|
# Webcam parameters
|
|
27
28
|
webcam_id = 0 # your webcam id (0 is default)
|
|
@@ -48,12 +49,32 @@ result_dir = '' # If empty, project dir is current dir
|
|
|
48
49
|
slowmo_factor = 1 # 1 for normal speed. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8
|
|
49
50
|
|
|
50
51
|
# Pose detection parameters
|
|
51
|
-
pose_model = '
|
|
52
|
-
mode = 'balanced'
|
|
52
|
+
pose_model = 'Body_with_feet' #With RTMLib: Body_with_feet (default HALPE_26 model), Whole_body (COCO_133: body + feet + hands), Body (COCO_17), CUSTOM (see example at the end of the file), or any from skeletons.py
|
|
53
|
+
mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
|
|
54
|
+
|
|
55
|
+
# A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
|
|
56
|
+
# Models can be local paths or URLs.
|
|
57
|
+
# Make sure the input_sizes are within triple quotes, and that they are in the opposite order from the one in the model path (for example, it would be [192,256] for rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip).
|
|
58
|
+
# If your pose_model is not provided in skeletons.py, you may have to create your own one (see example at the end of the file).
|
|
59
|
+
# Example, equivalent to mode='balanced':
|
|
60
|
+
# mode = """{'det_class':'YOLOX',
|
|
61
|
+
# 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
|
|
62
|
+
# 'det_input_size':[640, 640],
|
|
63
|
+
# 'pose_class':'RTMPose',
|
|
64
|
+
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
|
|
65
|
+
# 'pose_input_size':[192,256]}"""
|
|
66
|
+
# Example with one-stage RTMO model (Requires pose_model = 'Body'):
|
|
67
|
+
# mode = """{'pose_class':'RTMO',
|
|
68
|
+
# 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
|
|
69
|
+
# 'pose_input_size':[640, 640]}"""
|
|
70
|
+
|
|
53
71
|
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
|
54
72
|
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
|
73
|
+
device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
|
|
74
|
+
backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
|
|
55
75
|
tracking_mode = 'sports2d' # 'rtmlib' or 'sports2d'. 'sports2d' is generally much more accurate and comparable in speed
|
|
56
76
|
|
|
77
|
+
|
|
57
78
|
# Processing parameters
|
|
58
79
|
keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
|
|
59
80
|
average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
|
|
@@ -83,13 +104,14 @@ fontSize = 0.3
|
|
|
83
104
|
|
|
84
105
|
# Select joint angles among
|
|
85
106
|
# ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
|
|
86
|
-
joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow']
|
|
107
|
+
joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
|
|
87
108
|
# Select segment angles among
|
|
88
109
|
# ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
|
|
89
110
|
segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
|
|
90
111
|
|
|
91
112
|
# Processing parameters
|
|
92
113
|
flip_left_right = true # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participent switches their stance.
|
|
114
|
+
correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead
|
|
93
115
|
|
|
94
116
|
|
|
95
117
|
[post-processing]
|
|
@@ -121,5 +143,88 @@ person_orientation = ['front', 'none', 'left'] # Choose among 'auto', 'none', 'f
|
|
|
121
143
|
osim_setup_path = '../OpenSim_setup' # Path to the OpenSim setup folder
|
|
122
144
|
close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
|
|
123
145
|
|
|
146
|
+
|
|
124
147
|
[logging]
|
|
125
|
-
use_custom_logging = false # if integrated in an API that already has logging
|
|
148
|
+
use_custom_logging = false # if integrated in an API that already has logging
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
# CUSTOM skeleton
|
|
153
|
+
# If you use a model with different keypoints and/or different ordering
|
|
154
|
+
# Useful if you trained your own model, from DeepLabCut or MMPose for example.
|
|
155
|
+
# Make sure the ids are set in the right order and start from zero.
|
|
156
|
+
#
|
|
157
|
+
# If you want to perform inverse kinematics, you will also need to create an OpenSim model
|
|
158
|
+
# and add to its markerset the location where you expect the triangulated keypoints to be detected.
|
|
159
|
+
#
|
|
160
|
+
# In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
|
|
161
|
+
# You can create as many custom skeletons as you want, just add them further down and rename them.
|
|
162
|
+
#
|
|
163
|
+
# Check your model hierarchy with: for pre, _, node in RenderTree(model):
|
|
164
|
+
# print(f'{pre}{node.name} id={node.id}')
|
|
165
|
+
[pose.CUSTOM]
|
|
166
|
+
name = "Hip"
|
|
167
|
+
id = 19
|
|
168
|
+
[[pose.CUSTOM.children]]
|
|
169
|
+
name = "RHip"
|
|
170
|
+
id = 12
|
|
171
|
+
[[pose.CUSTOM.children.children]]
|
|
172
|
+
name = "RKnee"
|
|
173
|
+
id = 14
|
|
174
|
+
[[pose.CUSTOM.children.children.children]]
|
|
175
|
+
name = "RAnkle"
|
|
176
|
+
id = 16
|
|
177
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
178
|
+
name = "RBigToe"
|
|
179
|
+
id = 21
|
|
180
|
+
[[pose.CUSTOM.children.children.children.children.children]]
|
|
181
|
+
name = "RSmallToe"
|
|
182
|
+
id = 23
|
|
183
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
184
|
+
name = "RHeel"
|
|
185
|
+
id = 25
|
|
186
|
+
[[pose.CUSTOM.children]]
|
|
187
|
+
name = "LHip"
|
|
188
|
+
id = 11
|
|
189
|
+
[[pose.CUSTOM.children.children]]
|
|
190
|
+
name = "LKnee"
|
|
191
|
+
id = 13
|
|
192
|
+
[[pose.CUSTOM.children.children.children]]
|
|
193
|
+
name = "LAnkle"
|
|
194
|
+
id = 15
|
|
195
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
196
|
+
name = "LBigToe"
|
|
197
|
+
id = 20
|
|
198
|
+
[[pose.CUSTOM.children.children.children.children.children]]
|
|
199
|
+
name = "LSmallToe"
|
|
200
|
+
id = 22
|
|
201
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
202
|
+
name = "LHeel"
|
|
203
|
+
id = 24
|
|
204
|
+
[[pose.CUSTOM.children]]
|
|
205
|
+
name = "Neck"
|
|
206
|
+
id = 18
|
|
207
|
+
[[pose.CUSTOM.children.children]]
|
|
208
|
+
name = "Head"
|
|
209
|
+
id = 17
|
|
210
|
+
[[pose.CUSTOM.children.children.children]]
|
|
211
|
+
name = "Nose"
|
|
212
|
+
id = 0
|
|
213
|
+
[[pose.CUSTOM.children.children]]
|
|
214
|
+
name = "RShoulder"
|
|
215
|
+
id = 6
|
|
216
|
+
[[pose.CUSTOM.children.children.children]]
|
|
217
|
+
name = "RElbow"
|
|
218
|
+
id = 8
|
|
219
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
220
|
+
name = "RWrist"
|
|
221
|
+
id = 10
|
|
222
|
+
[[pose.CUSTOM.children.children]]
|
|
223
|
+
name = "LShoulder"
|
|
224
|
+
id = 5
|
|
225
|
+
[[pose.CUSTOM.children.children.children]]
|
|
226
|
+
name = "LElbow"
|
|
227
|
+
id = 7
|
|
228
|
+
[[pose.CUSTOM.children.children.children.children]]
|
|
229
|
+
name = "LWrist"
|
|
230
|
+
id = 9
|
|
@@ -143,6 +143,8 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
|
|
|
143
143
|
'pose_model': 'body_with_feet',
|
|
144
144
|
'mode': 'balanced',
|
|
145
145
|
'det_frequency': 4,
|
|
146
|
+
'device': 'auto',
|
|
147
|
+
'backend': 'auto',
|
|
146
148
|
'tracking_mode': 'sports2d',
|
|
147
149
|
'keypoint_likelihood_threshold': 0.3,
|
|
148
150
|
'average_likelihood_threshold': 0.5,
|
|
@@ -171,7 +173,9 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
|
|
|
171
173
|
'Right shoulder',
|
|
172
174
|
'Left shoulder',
|
|
173
175
|
'Right elbow',
|
|
174
|
-
'Left elbow'
|
|
176
|
+
'Left elbow',
|
|
177
|
+
'Right wrist',
|
|
178
|
+
'Left wrist'],
|
|
175
179
|
'segment_angles': [ 'Right foot',
|
|
176
180
|
'Left foot',
|
|
177
181
|
'Right shank',
|
|
@@ -186,7 +190,8 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
|
|
|
186
190
|
'Left arm',
|
|
187
191
|
'Right forearm',
|
|
188
192
|
'Left forearm'],
|
|
189
|
-
'flip_left_right': True
|
|
193
|
+
'flip_left_right': True,
|
|
194
|
+
'correct_segment_angles_with_floor_angle': True
|
|
190
195
|
},
|
|
191
196
|
'post-processing': {'interpolate': True,
|
|
192
197
|
'interp_gap_smaller_than': 10,
|
|
@@ -228,9 +233,11 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
228
233
|
'save_angles': ["A", "save angles as mot files. true if not specified"],
|
|
229
234
|
'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
|
|
230
235
|
'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
|
|
231
|
-
'mode': ["m",
|
|
236
|
+
'mode': ["m", 'light, balanced, performance, or a """{dictionary within triple quote}""". balanced if not specified. Use a dictionary to specify your own detection and/or pose estimation models (more about in the documentation).'],
|
|
232
237
|
'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
|
|
233
238
|
Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
|
|
239
|
+
'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
|
|
240
|
+
'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
|
|
234
241
|
'to_meters': ["M", "convert pixels to meters. true if not specified"],
|
|
235
242
|
'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
|
|
236
243
|
'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
|
|
@@ -252,6 +259,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
|
|
|
252
259
|
'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
|
|
253
260
|
'fontSize': ["", "font size for angle values. 0.3 if not specified"],
|
|
254
261
|
'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
|
|
262
|
+
'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
|
|
255
263
|
'interpolate': ["", "interpolate missing data. true if not specified"],
|
|
256
264
|
'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
|
|
257
265
|
'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
|
|
@@ -320,7 +328,7 @@ def base_params(config_dict):
|
|
|
320
328
|
video = cv2.VideoCapture(str(video_dir / video_file)) if video_dir else cv2.VideoCapture(str(video_file))
|
|
321
329
|
if not video.isOpened():
|
|
322
330
|
raise FileNotFoundError(f'Error: Could not open {video_dir/video_file}. Check that the file exists.')
|
|
323
|
-
frame_rate = video.get(cv2.CAP_PROP_FPS)
|
|
331
|
+
frame_rate = round(video.get(cv2.CAP_PROP_FPS))
|
|
324
332
|
if frame_rate == 0:
|
|
325
333
|
frame_rate = 30
|
|
326
334
|
logging.warning(f'Error: Could not retrieve frame rate from {video_dir/video_file}. Defaulting to 30fps.')
|
|
@@ -435,7 +443,7 @@ def process(config='Config_demo.toml'):
|
|
|
435
443
|
|
|
436
444
|
process_fun(config_dict, video_file, time_range, frame_rate, result_dir)
|
|
437
445
|
|
|
438
|
-
elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
|
|
446
|
+
elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
|
|
439
447
|
logging.info(f'\nProcessing {video_file} took {elapsed_time:.2f} s.')
|
|
440
448
|
|
|
441
449
|
logging.shutdown()
|
|
@@ -501,10 +509,11 @@ def main():
|
|
|
501
509
|
# Override dictionary with command-line arguments if provided
|
|
502
510
|
leaf_keys = get_leaf_keys(new_config)
|
|
503
511
|
for leaf_key, default_value in leaf_keys.items():
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
512
|
+
if not 'CUSTOM' in leaf_key:
|
|
513
|
+
leaf_name = leaf_key.split('.')[-1]
|
|
514
|
+
cli_value = getattr(args, leaf_name)
|
|
515
|
+
if cli_value is not None:
|
|
516
|
+
set_nested_value(new_config, leaf_key, cli_value)
|
|
508
517
|
|
|
509
518
|
# Run process with the new configuration dictionary
|
|
510
519
|
Sports2D.process(new_config)
|