sports2d 0.7.3__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,15 +11,18 @@
11
11
  # Sports2D.process('Config_demo.toml')
12
12
 
13
13
 
14
- [project]
14
+ [base]
15
15
  video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
16
16
  # On Windows, replace '\' with '/'
17
17
  # Beware that images won't be saved if paths contain non ASCII characters.
18
- px_to_m_from_person_id = 2 # Person to use for pixels to meters conversion (not used if a calibration file is provided)
19
- px_to_m_person_height = 1.65 # Height of the reference person in meters (for pixels -> meters conversion).
20
- visible_side = ['front', 'none', 'auto'] # Choose visible side among ['right', 'left', 'front', 'back', 'auto', 'none']. String or list of strings.
18
+
19
+ nb_persons_to_detect = 'all' # int or 'all' # Limiting or not the number of persons to be analyzed
20
+ person_ordering_method = 'highest_likelihood' # 'on_click', 'highest_likelihood', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'
21
+ first_person_height = 1.65 # Height of the reference person in meters (for pixels -> meters conversion: not used if a calibration file is provided)
22
+ visible_side = ['auto', 'front', 'none'] # Choose visible side among ['right', 'left', 'front', 'back', 'auto', 'none']. String or list of strings.
21
23
  # if 'auto', will be either 'left', 'right', or 'front' depending on the direction of the motion
22
24
  # if 'none', coordinates will be left in 2D rather than 3D
25
+
23
26
  load_trc_px = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
24
27
  compare = false # Not implemented yet
25
28
 
@@ -32,10 +35,6 @@ video_dir = '' # If empty, video dir is current dir
32
35
  webcam_id = 0 # your webcam id (0 is default)
33
36
  input_size = [1280, 720] # [W, H]. Lower resolution will be faster but less precise.
34
37
 
35
-
36
- [process]
37
- multiperson = true # Saving the motions of all the persons detected and tracked in the video.
38
- # If false, the person saved will be the one with the highest sum of keypoint scores over the video
39
38
  show_realtime_results = true
40
39
  save_vid = true
41
40
  save_img = true
@@ -107,7 +106,7 @@ to_meters = true
107
106
  make_c3d = true
108
107
  save_calib = true # Coming soon!
109
108
 
110
- # If conversion from px_to_m_person_height
109
+ # If conversion from first_person_height
111
110
  floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
112
111
  xy_origin = ['auto'] # ['auto'] or [px_x,px_y]. N.B.: px_y points downwards. If ['auto'], direction estimated from the start to the end of the line formed by the toes when they are on the ground
113
112
 
@@ -154,7 +153,7 @@ filter_type = 'butterworth' # butterworth, gaussian, LOESS, median
154
153
  do_ik = false # Do scaling and inverse kinematics?
155
154
  use_augmentation = false # true or false (lowercase) # Set to true if you want to use the model with augmented markers
156
155
  use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
157
- participant_mass = [67.0, 55.0] # kg # defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)
156
+ participant_mass = [55.0, 67.0] # kg # defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)
158
157
  right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
159
158
 
160
159
  # Choosing best frames to scale the model
Sports2D/Sports2D.py CHANGED
@@ -29,7 +29,7 @@
29
29
  sports2d --video_input webcam
30
30
  - Run with custom parameters (all non specified are set to default):
31
31
  sports2d --show_plots False --time_range 0 2.1 --result_dir path_to_result_dir
32
- sports2d --multiperson false --mode lightweight --det_frequency 50
32
+ sports2d --person_detection_method highest_likelihood --mode lightweight --det_frequency 50
33
33
  - Run with a toml configuration file:
34
34
  sports2d --config path_to_config.toml
35
35
 
@@ -122,18 +122,17 @@ from Sports2D import Sports2D
122
122
 
123
123
 
124
124
  ## CONSTANTS
125
- DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
126
- 'px_to_m_from_person_id': 2,
127
- 'px_to_m_person_height': 1.65,
128
- 'visible_side': ['front', 'none', 'auto'],
125
+ DEFAULT_CONFIG = {'base': {'video_input': ['demo.mp4'],
126
+ 'nb_persons_to_detect': 'all',
127
+ 'person_ordering_method': 'highest_likelihood',
128
+ 'first_person_height': 1.65,
129
+ 'visible_side': ['auto', 'front', 'none'],
129
130
  'load_trc_px': '',
130
131
  'compare': False,
131
132
  'time_range': [],
132
133
  'video_dir': '',
133
134
  'webcam_id': 0,
134
- 'input_size': [1280, 720]
135
- },
136
- 'process': {'multiperson': True,
135
+ 'input_size': [1280, 720],
137
136
  'show_realtime_results': True,
138
137
  'save_vid': True,
139
138
  'save_img': True,
@@ -149,7 +148,7 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
149
148
  'device': 'auto',
150
149
  'backend': 'auto',
151
150
  'tracking_mode': 'sports2d',
152
- 'deepsort_params': """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}""",
151
+ 'deepsort_params': """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True, 'embedder':'torchreid'}""",
153
152
  'keypoint_likelihood_threshold': 0.3,
154
153
  'average_likelihood_threshold': 0.5,
155
154
  'keypoint_number_threshold': 0.3
@@ -207,7 +206,7 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
207
206
  'kinematics':{'do_ik': False,
208
207
  'use_augmentation': False,
209
208
  'use_contacts_muscles': True,
210
- 'participant_mass': [67.0, 55.0],
209
+ 'participant_mass': [55.0, 67.0],
211
210
  'right_left_symmetry': True,
212
211
  'default_height': 1.70,
213
212
  'remove_individual_scaling_setup': True,
@@ -224,8 +223,10 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
224
223
 
225
224
  CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
226
225
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
227
- 'px_to_m_person_height': ["H", "height of the person in meters. 1.70 if not specified"],
228
- 'visible_side': ["", "front, back, left, right, auto, or none. 'front none auto' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
226
+ 'nb_persons_to_detect': ["n", "number of persons to detect. int or 'all'. 'all' if not specified"],
227
+ 'person_ordering_method': ["", "'on_click', 'highest_likelihood', 'greatest_displacement', 'least_displacement', 'first_detected', or 'last_detected'. 'on_click' if not specified"],
228
+ 'first_person_height': ["H", "height of the reference person in meters. 1.65 if not specified. Not used if a calibration file is provided"],
229
+ 'visible_side': ["", "front, back, left, right, auto, or none. 'auto front none' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
229
230
  'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
230
231
  'compare': ["", "visually compare motion with trc file. false if not specified"],
231
232
  'webcam_id': ["w", "webcam ID. 0 if not specified"],
@@ -251,7 +252,6 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
251
252
  'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
252
253
  'to_meters': ["M", "convert pixels to meters. true if not specified"],
253
254
  'make_c3d': ["", "Convert trc to c3d file. true if not specified"],
254
- 'px_to_m_from_person_id': ["", "person ID to calibrate on. 0 if not specified"],
255
255
  'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
256
256
  'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
257
257
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
@@ -261,7 +261,6 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
261
261
  'use_contacts_muscles': ["", "Use model with contact spheres and muscles. false if not specified"],
262
262
  'participant_mass': ["", "mass of the participant in kg or none. Defaults to 70 if not provided. No influence on kinematics (motion), only on kinetics (forces)"],
263
263
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
264
- 'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
265
264
  'tracking_mode': ["", "'sports2d' or 'deepsort'. 'deepsort' is slower, harder to parametrize but can be more robust if correctly tuned"],
266
265
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
267
266
  Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
@@ -270,6 +269,10 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
270
269
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
271
270
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
272
271
  'keypoint_number_threshold': ["", "detected persons are not retained if number of detected keypoints is below this threshold. 0.3 if not specified, i.e., i.e., 30 percent"],
272
+ 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
273
+ 'close_to_zero_speed_px': ["", "Sum for all keypoints: about 50 px/frame or 0.2 m/frame. Defaults to 50"],
274
+ 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise. Defaults to 45"],
275
+ 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
273
276
  'fontSize': ["", "font size for angle values. 0.3 if not specified"],
274
277
  'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
275
278
  'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
@@ -324,16 +327,16 @@ def base_params(config_dict):
324
327
  '''
325
328
 
326
329
  # video_dir and result_dir
327
- video_dir = config_dict.get('project').get('video_dir')
330
+ video_dir = config_dict.get('base').get('video_dir')
328
331
  if video_dir == '': video_dir = Path.cwd()
329
332
  else: video_dir = Path(video_dir).resolve()
330
333
 
331
- result_dir = config_dict.get('process').get('result_dir')
334
+ result_dir = config_dict.get('base').get('result_dir')
332
335
  if result_dir == '': result_dir = Path.cwd()
333
336
  else: result_dir = Path(result_dir).resolve()
334
337
 
335
338
  # video_files, frame_rates, time_ranges
336
- video_input = config_dict.get('project').get('video_input')
339
+ video_input = config_dict.get('base').get('video_input')
337
340
  if video_input == "webcam" or video_input == ["webcam"]:
338
341
  video_files = ['webcam'] # No video files for webcam
339
342
  frame_rates = [None] # No frame rate for webcam
@@ -359,7 +362,7 @@ def base_params(config_dict):
359
362
  video.release()
360
363
 
361
364
  # time_ranges
362
- time_ranges = np.array(config_dict.get('project').get('time_range'))
365
+ time_ranges = np.array(config_dict.get('base').get('time_range'))
363
366
  # No time range provided
364
367
  if time_ranges.shape == (0,):
365
368
  time_ranges = [None] * len(video_files)
@@ -394,7 +397,7 @@ def get_leaf_keys(config, prefix=''):
394
397
 
395
398
  def update_nested_dict(config, key_path, value):
396
399
  '''
397
- Update a nested dictionary based on a key path string like 'process.multiperson'.
400
+ Update a nested dictionary based on a key path string like 'base.nb_persons_to_detect'.
398
401
  '''
399
402
 
400
403
  keys = key_path.split('.')
@@ -492,7 +495,7 @@ def main():
492
495
  sports2d --video_input webcam
493
496
  - Run with custom parameters (all non specified are set to default):
494
497
  sports2d --show_plots False --time_range 0 2.1 --result_dir path_to_result_dir
495
- sports2d --multiperson false --mode lightweight --det_frequency 50
498
+ sports2d --mode lightweight --det_frequency 50
496
499
  - Run with a toml configuration file:
497
500
  sports2d --config path_to_config.toml
498
501
  '''
@@ -527,7 +530,7 @@ def main():
527
530
  else:
528
531
  new_config = DEFAULT_CONFIG.copy()
529
532
  if not args.video_input:
530
- new_config.get('project').update({'video_dir': Path(__file__).resolve().parent / 'Demo'})
533
+ new_config.get('base').update({'video_dir': Path(__file__).resolve().parent / 'Demo'})
531
534
 
532
535
  # Override dictionary with command-line arguments if provided
533
536
  leaf_keys = get_leaf_keys(new_config)
@@ -15,7 +15,6 @@
15
15
 
16
16
 
17
17
  ## INIT
18
- import sys
19
18
  from importlib.metadata import version
20
19
  import subprocess
21
20
  from pathlib import Path
@@ -7,13 +7,14 @@
7
7
  Disable the real-time results and plots to avoid any GUI issues.
8
8
 
9
9
  Usage:
10
- cd Sports2D/Utilities
10
+ tests_sports2d
11
+ OR
11
12
  python tests.py
12
13
  '''
13
14
 
14
-
15
15
  ## INIT
16
16
  from importlib.metadata import version
17
+ import os
17
18
  import toml
18
19
  import subprocess
19
20
  from pathlib import Path
@@ -36,15 +37,20 @@ def test_workflow():
36
37
  Test the workflow of Sports2D.
37
38
  '''
38
39
 
40
+ root_dir = os.path.dirname(os.path.abspath(__file__))
41
+ os.chdir(root_dir)
42
+
39
43
  #############################
40
44
  ## From Python ##
41
45
  #############################
42
46
 
43
47
  # Default
44
- config_path = Path.cwd().parent / 'Demo' / 'Config_demo.toml'
48
+ config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
45
49
  config_dict = toml.load(config_path)
46
- config_dict.get("project").update({"video_dir":'../Demo'})
47
- config_dict.get("process").update({"show_realtime_results":False})
50
+ video_dir = Path(__file__).resolve().parent.parent / 'Demo'
51
+ config_dict.get("base").update({"video_dir": str(video_dir)})
52
+ config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
53
+ config_dict.get("base").update({"show_realtime_results":False})
48
54
  config_dict.get("post-processing").update({"show_graphs":False})
49
55
 
50
56
  from Sports2D import Sports2D
@@ -56,32 +62,44 @@ def test_workflow():
56
62
  #############################
57
63
 
58
64
  # Default
59
- demo_cmd = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False"]
65
+ demo_cmd = ["sports2d", "--person_ordering_method", "highest_likelihood", "--show_realtime_results", "False", "--show_graphs", "False"]
60
66
  subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
61
67
 
62
- # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
63
- demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
68
+ # With loading a trc file, visible_side 'front', first_person_height '1.76", floor_angle 0, xy_origin [0, 928]
69
+ demo_cmd2 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
70
+ "--load_trc_px", os.path.join(root_dir, "demo_Sports2D", "demo_Sports2D_px_person01.trc"),
71
+ "--visible_side", "front", "--first_person_height", "1.76", "--time_range", "1.2", "2.7",
72
+ "--floor_angle", "0", "--xy_origin", "0", "928"]
73
+ subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
74
+
75
+ # With no pixels to meters conversion, one person to select, lightweight mode, detection frequency, slowmo factor, gaussian filter, RTMO body pose model
76
+ demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
64
77
  "--to_meters", "False",
65
- "--multiperson", "False",
78
+ "--nb_persons_to_detect", "1", "--person_ordering_method", "greatest_displacement",
66
79
  "--mode", "lightweight", "--det_frequency", "50",
67
80
  "--slowmo_factor", "4",
68
81
  "--filter_type", "gaussian",
69
82
  "--pose_model", "body", "--mode", """{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}"""]
70
- subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
83
+ subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
71
84
 
72
85
  # With a time range, inverse kinematics, marker augmentation, body pose_model and custom RTMO mode
73
- demo_cmd3 = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False",
86
+ demo_cmd4 = ["sports2d", "--person_ordering_method", "greatest_displacement", "--show_realtime_results", "False", "--show_graphs", "False",
74
87
  "--time_range", "1.2", "2.7",
75
88
  "--do_ik", "True", "--use_augmentation", "True",
76
- "--px_to_m_from_person_id", "1", "--px_to_m_person_height", "1.65",
77
- "--visible_side", "front", "auto", "--participant_mass", "55.0", "67.0"]
78
- subprocess.run(demo_cmd3, check=True, capture_output=True, text=True, encoding='utf-8')
89
+ "--nb_persons_to_detect", "all", "--first_person_height", "1.65",
90
+ "--visible_side", "auto", "front", "--participant_mass", "55.0", "67.0"]
91
+ subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')
79
92
 
80
93
  # From config file
81
- cli_config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
82
- config_dict = toml.load(cli_config_path)
83
- cli_video_dir = Path(__file__).resolve().parent.parent / 'Demo'
84
- config_dict.get("project").update({"video_dir": str(cli_video_dir)})
85
- with open(cli_config_path, 'w') as f: toml.dump(config_dict, f)
86
- demo_cmd4 = ["sports2d", "--config", str(cli_config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
94
+ config_path = Path(__file__).resolve().parent.parent / 'Demo' / 'Config_demo.toml'
95
+ config_dict = toml.load(config_path)
96
+ video_dir = Path(__file__).resolve().parent.parent / 'Demo'
97
+ config_dict.get("base").update({"video_dir": str(video_dir)})
98
+ config_dict.get("base").update({"person_ordering_method": "highest_likelihood"})
99
+ with open(config_path, 'w') as f: toml.dump(config_dict, f)
100
+ demo_cmd4 = ["sports2d", "--config", str(config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
87
101
  subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')
102
+
103
+
104
+ if __name__ == "__main__":
105
+ test_workflow()