sports2d 0.5.6__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,7 +13,7 @@
13
13
 
14
14
  [project]
15
15
  video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
16
- # Time ranges can be different for each video. All other processing arguments will be identical.
16
+ # On Windows, replace '\' with '/'
17
17
  # Beware that images won't be saved if paths contain non ASCII characters.
18
18
  person_height = 1.70 # Height of the person in meters (for pixels -> meters conversion)
19
19
  load_trc = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
@@ -21,6 +21,7 @@ compare = false # Not implemented yet
21
21
 
22
22
  # Video parameters
23
23
  time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
24
+ # Time ranges can be different for each video.
24
25
  video_dir = '' # If empty, video dir is current dir
25
26
 
26
27
  # Webcam parameters
@@ -48,16 +49,39 @@ result_dir = '' # If empty, project dir is current dir
48
49
  slowmo_factor = 1 # 1 for normal speed. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8
49
50
 
50
51
  # Pose detection parameters
51
- pose_model = 'body_with_feet' # Only body_with_feet is available for now
52
- mode = 'balanced' # 'lightweight', 'balanced', or 'performance'
53
- det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
52
+ pose_model = 'Body_with_feet' #With RTMLib: Body_with_feet (default HALPE_26 model), Whole_body (COCO_133: body + feet + hands), Body (COCO_17), CUSTOM (see example at the end of the file), or any from skeletons.py
53
+ mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}""" (see below)
54
+
55
+ # A dictionary (WITHIN THREE DOUBLE QUOTES) allows you to manually select the person detection (if top_down approach) and/or pose estimation models (see https://github.com/Tau-J/rtmlib).
56
+ # Models can be local paths or URLs.
57
+ # Make sure the input_sizes are within square brackets, and that they are in the opposite order from the one in the model path (for example, it would be [192,256] for rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip).
58
+ # If your pose_model is not provided in skeletons.py, you may have to create your own one (see example at the end of the file).
59
+ # Example, equivalent to mode='balanced':
60
+ # mode = """{'det_class':'YOLOX',
61
+ # 'det_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/yolox_m_8xb8-300e_humanart-c2c7a14a.zip',
62
+ # 'det_input_size':[640, 640],
63
+ # 'pose_class':'RTMPose',
64
+ # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-body7_pt-body7-halpe26_700e-256x192-4d3e73dd_20230605.zip',
65
+ # 'pose_input_size':[192,256]}"""
66
+ # Example with one-stage RTMO model (Requires pose_model = 'Body'):
67
+ # mode = """{'pose_class':'RTMO',
68
+ # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
69
+ # 'pose_input_size':[640, 640]}"""
70
+
71
+ det_frequency = 4 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
54
72
  # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
55
- tracking_mode = 'sports2d' # 'rtmlib' or 'sports2d'. 'sports2d' is generally much more accurate and comparable in speed
73
+ device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
74
+ backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
75
+ tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
76
+ deepsort_params = """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}""" # """{dictionary between 3 double quotes}"""
77
+ # More robust in crowded scenes but Can be tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
78
+ # Note: For even more robust tracking, use 'embedder':'torchreid', which runs osnet_ain_x1_0 by default. Install additional dependencies with: `pip install torchreid gdown tensorboard`
79
+
56
80
 
57
81
  # Processing parameters
58
82
  keypoint_likelihood_threshold = 0.3 # Keypoints whose likelihood is lower will not be taken into account
59
83
  average_likelihood_threshold = 0.5 # Person will be ignored if average likelihood of good keypoints is lower than this value
60
- keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints is less than this fraction
84
+ keypoint_number_threshold = 0.3 # Person will be ignored if the number of good keypoints (above keypoint_likelihood_threshold) is less than this fraction
61
85
 
62
86
 
63
87
  [px_to_meters_conversion]
@@ -83,13 +107,14 @@ fontSize = 0.3
83
107
 
84
108
  # Select joint angles among
85
109
  # ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
86
- joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow']
110
+ joint_angles = ['Right ankle', 'Left ankle', 'Right knee', 'Left knee', 'Right hip', 'Left hip', 'Right shoulder', 'Left shoulder', 'Right elbow', 'Left elbow', 'Right wrist', 'Left wrist']
87
111
  # Select segment angles among
88
112
  # ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
89
113
  segment_angles = ['Right foot', 'Left foot', 'Right shank', 'Left shank', 'Right thigh', 'Left thigh', 'Pelvis', 'Trunk', 'Shoulders', 'Head', 'Right arm', 'Left arm', 'Right forearm', 'Left forearm']
90
114
 
91
115
  # Processing parameters
92
116
  flip_left_right = true # Same angles whether the participant faces left/right. Set it to false if you want timeseries to be continuous even when the participent switches their stance.
117
+ correct_segment_angles_with_floor_angle = true # If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead
93
118
 
94
119
 
95
120
  [post-processing]
@@ -121,5 +146,88 @@ person_orientation = ['front', 'none', 'left'] # Choose among 'auto', 'none', 'f
121
146
  osim_setup_path = '../OpenSim_setup' # Path to the OpenSim setup folder
122
147
  close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
123
148
 
149
+
124
150
  [logging]
125
- use_custom_logging = false # if integrated in an API that already has logging
151
+ use_custom_logging = false # if integrated in an API that already has logging
152
+
153
+
154
+
155
+ # CUSTOM skeleton
156
+ # If you use a model with different keypoints and/or different ordering
157
+ # Useful if you trained your own model, from DeepLabCut or MMPose for example.
158
+ # Make sure the ids are set in the right order and start from zero.
159
+ #
160
+ # If you want to perform inverse kinematics, you will also need to create an OpenSim model
161
+ # and add to its markerset the location where you expect the triangulated keypoints to be detected.
162
+ #
163
+ # In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
164
+ # You can create as many custom skeletons as you want, just add them further down and rename them.
165
+ #
166
+ # Check your model hierarchy with: for pre, _, node in RenderTree(model):
167
+ # print(f'{pre}{node.name} id={node.id}')
168
+ [pose.CUSTOM]
169
+ name = "Hip"
170
+ id = 19
171
+ [[pose.CUSTOM.children]]
172
+ name = "RHip"
173
+ id = 12
174
+ [[pose.CUSTOM.children.children]]
175
+ name = "RKnee"
176
+ id = 14
177
+ [[pose.CUSTOM.children.children.children]]
178
+ name = "RAnkle"
179
+ id = 16
180
+ [[pose.CUSTOM.children.children.children.children]]
181
+ name = "RBigToe"
182
+ id = 21
183
+ [[pose.CUSTOM.children.children.children.children.children]]
184
+ name = "RSmallToe"
185
+ id = 23
186
+ [[pose.CUSTOM.children.children.children.children]]
187
+ name = "RHeel"
188
+ id = 25
189
+ [[pose.CUSTOM.children]]
190
+ name = "LHip"
191
+ id = 11
192
+ [[pose.CUSTOM.children.children]]
193
+ name = "LKnee"
194
+ id = 13
195
+ [[pose.CUSTOM.children.children.children]]
196
+ name = "LAnkle"
197
+ id = 15
198
+ [[pose.CUSTOM.children.children.children.children]]
199
+ name = "LBigToe"
200
+ id = 20
201
+ [[pose.CUSTOM.children.children.children.children.children]]
202
+ name = "LSmallToe"
203
+ id = 22
204
+ [[pose.CUSTOM.children.children.children.children]]
205
+ name = "LHeel"
206
+ id = 24
207
+ [[pose.CUSTOM.children]]
208
+ name = "Neck"
209
+ id = 18
210
+ [[pose.CUSTOM.children.children]]
211
+ name = "Head"
212
+ id = 17
213
+ [[pose.CUSTOM.children.children.children]]
214
+ name = "Nose"
215
+ id = 0
216
+ [[pose.CUSTOM.children.children]]
217
+ name = "RShoulder"
218
+ id = 6
219
+ [[pose.CUSTOM.children.children.children]]
220
+ name = "RElbow"
221
+ id = 8
222
+ [[pose.CUSTOM.children.children.children.children]]
223
+ name = "RWrist"
224
+ id = 10
225
+ [[pose.CUSTOM.children.children]]
226
+ name = "LShoulder"
227
+ id = 5
228
+ [[pose.CUSTOM.children.children.children]]
229
+ name = "LElbow"
230
+ id = 7
231
+ [[pose.CUSTOM.children.children.children.children]]
232
+ name = "LWrist"
233
+ id = 9
Sports2D/Sports2D.py CHANGED
@@ -143,7 +143,10 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
143
143
  'pose_model': 'body_with_feet',
144
144
  'mode': 'balanced',
145
145
  'det_frequency': 4,
146
+ 'device': 'auto',
147
+ 'backend': 'auto',
146
148
  'tracking_mode': 'sports2d',
149
+ 'deepsort_params': """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}""",
147
150
  'keypoint_likelihood_threshold': 0.3,
148
151
  'average_likelihood_threshold': 0.5,
149
152
  'keypoint_number_threshold': 0.3
@@ -171,7 +174,9 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
171
174
  'Right shoulder',
172
175
  'Left shoulder',
173
176
  'Right elbow',
174
- 'Left elbow'],
177
+ 'Left elbow',
178
+ 'Right wrist',
179
+ 'Left wrist'],
175
180
  'segment_angles': [ 'Right foot',
176
181
  'Left foot',
177
182
  'Right shank',
@@ -186,7 +191,8 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
186
191
  'Left arm',
187
192
  'Right forearm',
188
193
  'Left forearm'],
189
- 'flip_left_right': True
194
+ 'flip_left_right': True,
195
+ 'correct_segment_angles_with_floor_angle': True
190
196
  },
191
197
  'post-processing': {'interpolate': True,
192
198
  'interp_gap_smaller_than': 10,
@@ -228,9 +234,11 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
228
234
  'save_angles': ["A", "save angles as mot files. true if not specified"],
229
235
  'slowmo_factor': ["", "slow-motion factor. For a video recorded at 240 fps and exported to 30 fps, it would be 240/30 = 8. 1 if not specified"],
230
236
  'pose_model': ["p", "only body_with_feet is available for now. body_with_feet if not specified"],
231
- 'mode': ["m", "light, balanced, or performance. balanced if not specified"],
237
+ 'mode': ["m", 'light, balanced, performance, or a """{dictionary within triple quote}""". balanced if not specified. Use a dictionary to specify your own detection and/or pose estimation models (more about in the documentation).'],
232
238
  'det_frequency': ["f", "run person detection only every N frames, and inbetween track previously detected bounding boxes. keypoint detection is still run on all frames.\n\
233
239
  Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. 1 if not specified: detection runs on all frames"],
240
+ 'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
241
+ 'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
234
242
  'to_meters': ["M", "convert pixels to meters. true if not specified"],
235
243
  'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
236
244
  'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
@@ -241,7 +249,10 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
241
249
  'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
242
250
  'person_orientation': ["", "front, back, left, right, auto, or none. 'front none left' if not specified. If 'auto', will be either left or right depending on the direction of the motion."],
243
251
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
244
- 'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"], 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
252
+ 'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
253
+ 'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
254
+ 'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
255
+ More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'], #
245
256
  'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
246
257
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
247
258
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
@@ -252,6 +263,7 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
252
263
  'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
253
264
  'fontSize': ["", "font size for angle values. 0.3 if not specified"],
254
265
  'flip_left_right': ["", "true or false. true to get consistent angles with people facing both left and right sides. Set it to false if you want timeseries to be continuous even when the participent switches their stance. true if not specified"],
266
+ 'correct_segment_angles_with_floor_angle': ["", "true or false. If the camera is tilted, corrects segment angles as regards to the floor angle. Set to false is the floor is tilted instead. True if not specified"],
255
267
  'interpolate': ["", "interpolate missing data. true if not specified"],
256
268
  'interp_gap_smaller_than': ["", "interpolate sequences of missing data if they are less than N frames long. 10 if not specified"],
257
269
  'fill_large_gaps_with': ["", "last_value, nan, or zeros. last_value if not specified"],
@@ -320,7 +332,7 @@ def base_params(config_dict):
320
332
  video = cv2.VideoCapture(str(video_dir / video_file)) if video_dir else cv2.VideoCapture(str(video_file))
321
333
  if not video.isOpened():
322
334
  raise FileNotFoundError(f'Error: Could not open {video_dir/video_file}. Check that the file exists.')
323
- frame_rate = video.get(cv2.CAP_PROP_FPS)
335
+ frame_rate = round(video.get(cv2.CAP_PROP_FPS))
324
336
  if frame_rate == 0:
325
337
  frame_rate = 30
326
338
  logging.warning(f'Error: Could not retrieve frame rate from {video_dir/video_file}. Defaulting to 30fps.')
@@ -435,7 +447,7 @@ def process(config='Config_demo.toml'):
435
447
 
436
448
  process_fun(config_dict, video_file, time_range, frame_rate, result_dir)
437
449
 
438
- elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
450
+ elapsed_time = (datetime.now() - currentDateAndTime).total_seconds()
439
451
  logging.info(f'\nProcessing {video_file} took {elapsed_time:.2f} s.')
440
452
 
441
453
  logging.shutdown()
@@ -501,10 +513,11 @@ def main():
501
513
  # Override dictionary with command-line arguments if provided
502
514
  leaf_keys = get_leaf_keys(new_config)
503
515
  for leaf_key, default_value in leaf_keys.items():
504
- leaf_name = leaf_key.split('.')[-1]
505
- cli_value = getattr(args, leaf_name)
506
- if cli_value is not None:
507
- set_nested_value(new_config, leaf_key, cli_value)
516
+ if not 'CUSTOM' in leaf_key:
517
+ leaf_name = leaf_key.split('.')[-1]
518
+ cli_value = getattr(args, leaf_name)
519
+ if cli_value is not None:
520
+ set_nested_value(new_config, leaf_key, cli_value)
508
521
 
509
522
  # Run process with the new configuration dictionary
510
523
  Sports2D.process(new_config)