sports2d 0.6.2__py3-none-any.whl → 0.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,12 +12,16 @@
12
12
 
13
13
 
14
14
  [project]
15
- video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
16
- # On Windows, replace '\' with '/'
17
- # Beware that images won't be saved if paths contain non ASCII characters.
18
- person_height = 1.70 # Height of the person in meters (for pixels -> meters conversion)
19
- load_trc = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
20
- compare = false # Not implemented yet
15
+ video_input = 'demo.mp4' # 'webcam' or '<video_path.ext>', or ['video1_path.mp4', 'video2_path.avi>', ...]
16
+ # On Windows, replace '\' with '/'
17
+ # Beware that images won't be saved if paths contain non ASCII characters.
18
+ px_to_m_from_person_id = 2 # Person to use for pixels to meters conversion (not used if a calibration file is provided)
19
+ px_to_m_person_height = 1.65 # Height of the reference person in meters (for pixels -> meters conversion).
20
+ visible_side = ['front', 'none', 'auto'] # Choose visible side among ['right', 'left', 'front', 'back', 'auto', 'none']. String or list of strings.
21
+ # if 'auto', will be either 'left', 'right', or 'front' depending on the direction of the motion
22
+ # if 'none', no processing will be performed on the corresponding person
23
+ load_trc_px = '' # If you do not want to recalculate pose, load it from a trc file (in px, not in m)
24
+ compare = false # Not implemented yet
21
25
 
22
26
  # Video parameters
23
27
  time_range = [] # [] for the whole video, or [start_time, end_time] (in seconds), or [[start_time1, end_time1], [start_time2, end_time2], ...]
@@ -67,15 +71,19 @@ mode = 'balanced' # 'lightweight', 'balanced', 'performance', or """{dictionary}
67
71
  # mode = """{'pose_class':'RTMO',
68
72
  # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip',
69
73
  # 'pose_input_size':[640, 640]}"""
74
+ # Example with animal pose estimation:
75
+ # mode = """{'pose_class':'RTMPose',
76
+ # 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/onnx_sdk/rtmpose-m_simcc-ap10k_pt-aic-coco_210e-256x256-7a041aa1_20230206.zip',
77
+ # 'pose_input_size':[256,256]}"""
70
78
 
71
79
  det_frequency = 4 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
72
80
  # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
73
81
  device = 'auto' # 'auto', 'CPU', 'CUDA', 'MPS', 'ROCM'
74
82
  backend = 'auto' # 'auto', 'openvino', 'onnxruntime', 'opencv'
75
83
  tracking_mode = 'sports2d' # 'sports2d' or 'deepsort'. 'deepsort' is slower but more robust in difficult configurations
76
- deepsort_params = """{'max_age':30, 'n_init':3, 'nms_max_overlap':0.8, 'max_cosine_distance':0.3, 'nn_budget':200, 'max_iou_distance':0.8, 'embedder_gpu': True}""" # """{dictionary between 3 double quotes}"""
77
- # More robust in crowded scenes but Can be tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
78
- # Note: For even more robust tracking, use 'embedder':'torchreid', which runs osnet_ain_x1_0 by default. Install additional dependencies with: `pip install torchreid gdown tensorboard`
84
+ deepsort_params = """{'max_age':30, 'n_init':3, 'max_cosine_distance':0.3, 'max_iou_distance':0.8, 'embedder_gpu': True, embedder':'torchreid'}""" # """{dictionary between 3 double quotes}"""
85
+ # More robust in crowded scenes but tricky to parametrize. More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51
86
+ # Requires `pip install torch torchvision torchreid gdown tensorboard`
79
87
 
80
88
 
81
89
  # Processing parameters
@@ -87,13 +95,15 @@ keypoint_number_threshold = 0.3 # Person will be ignored if the number of go
87
95
  [px_to_meters_conversion]
88
96
  # Pixel to meters conversion
89
97
  to_meters = true
90
- # If conversion from a calibration file
91
- calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
92
- # If conversion from person_height
93
- calib_on_person_id = 0 # Person to use for calibration
98
+ make_c3d = true
99
+ save_calib = true # Coming soon!
100
+
101
+ # If conversion from px_to_m_person_height
94
102
  floor_angle = 'auto' # 'auto' or a value in degrees, eg 2.3. If 'auto', estimated from the line formed by the toes when they are on the ground (where speed = 0)
95
103
  xy_origin = ['auto'] # ['auto'] or [px_x,px_y]. N.B.: px_y points downwards. If ['auto'], direction estimated from the start to the end of the line formed by the toes when they are on the ground
96
- save_calib = true
104
+
105
+ # If conversion from a calibration file
106
+ calib_file = '' # Calibration in the Pose2Sim format. 'calib_demo.toml', or '' if not available
97
107
 
98
108
  fastest_frames_to_remove_percent = 0.1 # Frames with high speed are considered as outliers
99
109
  close_to_zero_speed_px = 50 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
@@ -136,15 +146,20 @@ filter_type = 'butterworth' # butterworth, gaussian, LOESS, median
136
146
  kernel_size = 3
137
147
 
138
148
 
139
- [inverse-kinematics]
149
+ [kinematics]
140
150
  do_ik = false # Do scaling and inverse kinematics?
141
- person_orientation = ['front', 'none', 'left'] # Choose among 'auto', 'none', 'front', 'back', 'left', 'right'
142
- # if 'none', no IK will be performed on the corresponding person
143
- # if 'auto', will be either 'left' or 'right' depending on the direction of the motion
144
- # Example with one person on one video: ['front']
145
- # Or ['front', 'none', 'left'] with 3 persons on one video
151
+ use_augmentation = true # true or false (lowercase) # Set to true if you want to use the model with augmented markers
152
+ use_contacts_muscles = true # true or false (lowercase) # If true, contact spheres and muscles are added to the model
153
+
146
154
  osim_setup_path = '../OpenSim_setup' # Path to the OpenSim setup folder
147
- close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
155
+ right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
156
+ # default_height = 1.7 # meters # If automatic height calculation did not work, this value is used to scale the model
157
+ remove_individual_scaling_setup = true # true or false (lowercase) # If true, the individual scaling setup files are removed to avoid cluttering
158
+ remove_individual_ik_setup = true # true or false (lowercase) # If true, the individual IK setup files are removed to avoid cluttering
159
+ fastest_frames_to_remove_percent = 0.1 # Frames with high speed are considered as outliers
160
+ close_to_zero_speed_m = 0.2 # Sum for all keypoints: about 50 px/frame or 0.2 m/frame
161
+ large_hip_knee_angles = 45 # Hip and knee angles below this value are considered as imprecise
162
+ trimmed_extrema_percent = 0.5 # Proportion of the most extreme segment values to remove before calculating their mean)
148
163
 
149
164
 
150
165
  [logging]
Sports2D/Sports2D.py CHANGED
@@ -122,8 +122,10 @@ from Sports2D import Sports2D
122
122
 
123
123
  ## CONSTANTS
124
124
  DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
125
- 'person_height': 1.70,
126
- 'load_trc': '',
125
+ 'px_to_m_from_person_id': 2,
126
+ 'px_to_m_person_height': 1.65,
127
+ 'visible_side': ['front', 'auto'],
128
+ 'load_trc_px': '',
127
129
  'compare': False,
128
130
  'time_range': [],
129
131
  'video_dir': '',
@@ -153,8 +155,8 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
153
155
  },
154
156
  'px_to_meters_conversion': {
155
157
  'to_meters': True,
158
+ 'make_c3d': True,
156
159
  'calib_file': '',
157
- 'calib_on_person_id': 0,
158
160
  'floor_angle': 'auto',
159
161
  'xy_origin': ['auto'],
160
162
  'save_calib': True,
@@ -205,18 +207,27 @@ DEFAULT_CONFIG = {'project': {'video_input': ['demo.mp4'],
205
207
  'loess': {'nb_values_used': 5},
206
208
  'median': {'kernel_size': 3}
207
209
  },
208
- 'inverse-kinematics':{'do_ik': False,
209
- 'person_orientation': ['front', '', 'left'],
210
- 'osim_setup_path': '../OpenSim_setup',
211
- 'close_to_zero_speed_m': 0.2
210
+ 'kinematics':{'do_ik': False,
211
+ 'use_augmentation': False,
212
+ 'use_contacts_muscles': False,
213
+ 'right_left_symmetry': True,
214
+ 'default_height': 1.70,
215
+ 'remove_individual_scaling_setup': True,
216
+ 'remove_individual_ik_setup': True,
217
+ 'fastest_frames_to_remove_percent': 0.1,
218
+ 'close_to_zero_speed_m': 0.2,
219
+ 'large_hip_knee_angles': 45,
220
+ 'trimmed_extrema_percent': 0.5,
221
+ 'osim_setup_path': '../OpenSim_setup'
212
222
  },
213
223
  'logging': {'use_custom_logging': False}
214
224
  }
215
225
 
216
226
  CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
217
227
  'video_input': ["i", "webcam, or video_path.mp4, or video1_path.avi video2_path.mp4 ... Beware that images won't be saved if paths contain non ASCII characters"],
218
- 'person_height': ["H", "height of the person in meters. 1.70 if not specified"],
219
- 'load_trc': ["", "load trc file to avaid running pose estimation again. false if not specified"],
228
+ 'px_to_m_person_height': ["H", "height of the person in meters. 1.70 if not specified"],
229
+ 'visible_side': ["", "front, back, left, right, auto, or none. 'front auto' if not specified. If 'auto', will be either left or right depending on the direction of the motion. If 'none', no IK for this person"],
230
+ 'load_trc_px': ["", "load trc file to avaid running pose estimation again. false if not specified"],
220
231
  'compare': ["", "visually compare motion with trc file. false if not specified"],
221
232
  'webcam_id': ["w", "webcam ID. 0 if not specified"],
222
233
  'time_range': ["t", "start_time end_time. In seconds. Whole video if not specified. start_time1 end_time1 start_time2 end_time2 ... if multiple videos with different time ranges"],
@@ -240,19 +251,21 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
240
251
  'backend': ["", "Backend for pose estimation can be 'auto', 'cpu', 'cuda', 'mps' (for MacOS), or 'rocm' (for AMD GPUs)"],
241
252
  'device': ["", "Device for pose estimatino can be 'auto', 'openvino', 'onnxruntime', 'opencv'"],
242
253
  'to_meters': ["M", "convert pixels to meters. true if not specified"],
243
- 'calib_on_person_id': ["", "person ID to calibrate on. 0 if not specified"],
254
+ 'make_c3d': ["", "Convert trc to c3d file. true if not specified"],
255
+ 'px_to_m_from_person_id': ["", "person ID to calibrate on. 0 if not specified"],
244
256
  'floor_angle': ["", "angle of the floor. 'auto' if not specified"],
245
257
  'xy_origin': ["", "origin of the xy plane. 'auto' if not specified"],
246
258
  'calib_file': ["", "path to calibration file. '' if not specified, eg no calibration file"],
247
259
  'save_calib': ["", "save calibration file. true if not specified"],
248
260
  'do_ik': ["", "do inverse kinematics. false if not specified"],
249
- 'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
250
- 'person_orientation': ["", "front, back, left, right, auto, or none. 'front none left' if not specified. If 'auto', will be either left or right depending on the direction of the motion."],
261
+ 'use_augmentation': ["", "Use LSTM marker augmentation. false if not specified"],
262
+ 'use_contacts_muscles': ["", "Use model with contact spheres and muscles. false if not specified"],
251
263
  'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
252
264
  'multiperson': ["", "multiperson involves tracking: will be faster if set to false. true if not specified"],
253
265
  'tracking_mode': ["", "sports2d or rtmlib. sports2d is generally much more accurate and comparable in speed. sports2d if not specified"],
254
266
  'deepsort_params': ["", 'Deepsort tracking parameters: """{dictionary between 3 double quotes}""". \n\
255
- More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'], #
267
+ Default: max_age:30, n_init:3, nms_max_overlap:0.8, max_cosine_distance:0.3, nn_budget:200, max_iou_distance:0.8, embedder_gpu: True\n\
268
+ More information there: https://github.com/levan92/deep_sort_realtime/blob/master/deep_sort_realtime/deepsort_tracker.py#L51'],
256
269
  'input_size': ["", "width, height. 1280, 720 if not specified. Lower resolution will be faster but less precise"],
257
270
  'keypoint_likelihood_threshold': ["", "detected keypoints are not retained if likelihood is below this threshold. 0.3 if not specified"],
258
271
  'average_likelihood_threshold': ["", "detected persons are not retained if average keypoint likelihood is below this threshold. 0.5 if not specified"],
@@ -274,6 +287,15 @@ CONFIG_HELP = {'config': ["C", "path to a toml configuration file"],
274
287
  'sigma_kernel': ["", "sigma of the gaussian filter. 1 if not specified"],
275
288
  'nb_values_used': ["", "number of values used for the loess filter. 5 if not specified"],
276
289
  'kernel_size': ["", "kernel size of the median filter. 3 if not specified"],
290
+ 'osim_setup_path': ["", "path to OpenSim setup. '../OpenSim_setup' if not specified"],
291
+ 'right_left_symmetry': ["", "right left symmetry. true if not specified"],
292
+ 'default_height': ["", "default height for scaling. 1.70 if not specified"],
293
+ 'remove_individual_scaling_setup': ["", "remove individual scaling setup files generated during scaling. true if not specified"],
294
+ 'remove_individual_ik_setup': ["", "remove individual IK setup files generated during IK. true if not specified"],
295
+ 'fastest_frames_to_remove_percent': ["", "Frames with high speed are considered as outliers. Defaults to 0.1"],
296
+ 'close_to_zero_speed_m': ["","Sum for all keypoints: about 50 px/frame or 0.2 m/frame"],
297
+ 'large_hip_knee_angles': ["", "Hip and knee angles below this value are considered as imprecise and ignored. Defaults to 45"],
298
+ 'trimmed_extrema_percent': ["", "Proportion of the most extreme segment values to remove before calculating their mean. Defaults to 50"],
277
299
  'use_custom_logging': ["", "use custom logging. false if not specified"]
278
300
  }
279
301
 
@@ -29,6 +29,7 @@ import pandas as pd
29
29
  from scipy import interpolate
30
30
  import imageio_ffmpeg as ffmpeg
31
31
  import cv2
32
+ import c3d
32
33
 
33
34
  import matplotlib.pyplot as plt
34
35
  from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QVBoxLayout
@@ -84,6 +85,32 @@ angle_dict = { # lowercase!
84
85
  'left hand': [['LIndex', 'LWrist'], 'horizontal', 0, -1]
85
86
  }
86
87
 
88
+ marker_Z_positions = {'right':
89
+ {"RHip": 0.105, "RKnee": 0.0886, "RAnkle": 0.0972, "RBigToe":0.0766, "RHeel":0.0883, "RSmallToe": 0.1200,
90
+ "RShoulder": 0.2016, "RElbow": 0.1613, "RWrist": 0.120, "RThumb": 0.1625, "RIndex": 0.1735, "RPinky": 0.1740, "REye": 0.0311,
91
+ "LHip": -0.105, "LKnee": -0.0886, "LAnkle": -0.0972, "LBigToe": -0.0766, "LHeel": -0.0883, "LSmallToe": -0.1200,
92
+ "LShoulder": -0.2016, "LElbow": -0.1613, "LWrist": -0.120, "LThumb": -0.1625, "LIndex": -0.1735, "LPinky": -0.1740, "LEye": -0.0311,
93
+ "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
94
+ 'left':
95
+ {"RHip": -0.105, "RKnee": -0.0886, "RAnkle": -0.0972, "RBigToe": -0.0766, "RHeel": -0.0883, "RSmallToe": -0.1200,
96
+ "RShoulder": -0.2016, "RElbow": -0.1613, "RWrist": -0.120, "RThumb": -0.1625, "RIndex": -0.1735, "RPinky": -0.1740, "REye": -0.0311,
97
+ "LHip": 0.105, "LKnee": 0.0886, "LAnkle": 0.0972, "LBigToe":0.0766, "LHeel":0.0883, "LSmallToe": 0.1200,
98
+ "LShoulder": 0.2016, "LElbow": 0.1613, "LWrist": 0.120, "LThumb": 0.1625, "LIndex": 0.1735, "LPinky": 0.1740, "LEye": 0.0311,
99
+ "Hip": 0.0, "Neck": 0.0, "Head":0.0, "Nose": 0.0},
100
+ 'front':
101
+ {"RHip": 0.0301, "RKnee": 0.0179, "RAnkle": 0.0230, "RBigToe": 0.2179, "RHeel": -0.0119, "RSmallToe": 0.1804,
102
+ "RShoulder": -0.01275, "RElbow": 0.0119, "RWrist": 0.0002, "RThumb": 0.0106, "RIndex": -0.0004, "RPinky": -0.0009, "REye": 0.0702,
103
+ "LHip": -0.0301, "LKnee": -0.0179, "LAnkle": 0.0230, "LBigToe": 0.2179, "LHeel": -0.0119, "LSmallToe": 0.1804,
104
+ "LShoulder": 0.01275, "LElbow": -0.0119, "LWrist": -0.0002, "LThumb": -0.0106, "LIndex": 0.0004, "LPinky": 0.0009, "LEye": -0.0702,
105
+ "Hip": 0.0301, "Neck": -0.0008, "Head": 0.0655, "Nose": 0.1076},
106
+ 'back':
107
+ {"RHip": -0.0301, "RKnee": -0.0179, "RAnkle": -0.0230, "RBigToe": -0.2179, "RHeel": 0.0119, "RSmallToe": -0.1804,
108
+ "RShoulder": 0.01275, "RElbow": -0.0119, "RWrist": -0.0002, "RThumb": -0.0106, "RIndex": 0.0004, "RPinky": 0.0009, "REye": -0.0702,
109
+ "LHip": 0.0301, "LKnee": 0.0179, "LAnkle": -0.0230, "LBigToe": -0.2179, "LHeel": 0.0119, "LSmallToe": -0.1804,
110
+ "LShoulder": -0.01275, "LElbow": 0.0119, "LWrist": 0.0002, "LThumb": 0.0106, "LIndex": -0.0004, "LPinky": -0.0009, "LEye": 0.0702,
111
+ "Hip": 0.0301, "Neck": -0.0008, "Head": -0.0655, "Nose": 0.1076},
112
+ }
113
+
87
114
  colors = [(255, 0, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0), (255, 255, 255),
88
115
  (125, 0, 0), (0, 125, 0), (0, 0, 125), (125, 125, 0), (125, 0, 125), (0, 125, 125),
89
116
  (255, 125, 125), (125, 255, 125), (125, 125, 255), (255, 255, 125), (255, 125, 255), (125, 255, 255), (125, 125, 125),
@@ -172,6 +199,85 @@ def read_trc(trc_path):
172
199
  raise ValueError(f"Error reading TRC file at {trc_path}: {e}")
173
200
 
174
201
 
202
+ def extract_trc_data(trc_path):
203
+ '''
204
+ Extract marker names and coordinates from a trc file.
205
+
206
+ INPUTS:
207
+ - trc_path: Path to the trc file
208
+
209
+ OUTPUTS:
210
+ - marker_names: List of marker names
211
+ - marker_coords: Array of marker coordinates (n_frames, t+3*n_markers)
212
+ '''
213
+
214
+ # marker names
215
+ with open(trc_path, 'r') as file:
216
+ lines = file.readlines()
217
+ marker_names_line = lines[3]
218
+ marker_names = marker_names_line.strip().split('\t')[2::3]
219
+
220
+ # time and marker coordinates
221
+ trc_data_np = np.genfromtxt(trc_path, skip_header=5, delimiter = '\t')[:,1:]
222
+
223
+ return marker_names, trc_data_np
224
+
225
+
226
+ def create_c3d_file(c3d_path, marker_names, trc_data_np):
227
+ '''
228
+ Create a c3d file from the data extracted from a trc file.
229
+
230
+ INPUTS:
231
+ - c3d_path: Path to the c3d file
232
+ - marker_names: List of marker names
233
+ - trc_data_np: Array of marker coordinates (n_frames, t+3*n_markers)
234
+
235
+ OUTPUTS:
236
+ - c3d file
237
+ '''
238
+
239
+ # retrieve frame rate
240
+ times = trc_data_np[:,0]
241
+ frame_rate = round((len(times)-1) / (times[-1] - times[0]))
242
+
243
+ # write c3d file
244
+ writer = c3d.Writer(point_rate=frame_rate, analog_rate=0, point_scale=1.0, point_units='mm', gen_scale=-1.0)
245
+ writer.set_point_labels(marker_names)
246
+ writer.set_screen_axis(X='+Z', Y='+Y')
247
+
248
+ for frame in trc_data_np:
249
+ residuals = np.full((len(marker_names), 1), 0.0)
250
+ cameras = np.zeros((len(marker_names), 1))
251
+ coords = frame[1:].reshape(-1,3)*1000
252
+ points = np.hstack((coords, residuals, cameras))
253
+ writer.add_frames([(points, np.array([]))])
254
+
255
+ writer.set_start_frame(0)
256
+ writer._set_last_frame(len(trc_data_np)-1)
257
+
258
+ with open(c3d_path, 'wb') as handle:
259
+ writer.write(handle)
260
+
261
+
262
+ def convert_to_c3d(trc_path):
263
+ '''
264
+ Make Visual3D compatible c3d files from a trc path
265
+
266
+ INPUT:
267
+ - trc_path: string, trc file to convert
268
+
269
+ OUTPUT:
270
+ - c3d file
271
+ '''
272
+
273
+ trc_path = str(trc_path)
274
+ c3d_path = trc_path.replace('.trc', '.c3d')
275
+ marker_names, trc_data_np = extract_trc_data(trc_path)
276
+ create_c3d_file(c3d_path, marker_names, trc_data_np)
277
+
278
+ return c3d_path
279
+
280
+
175
281
  def interpolate_zeros_nans(col, *args):
176
282
  '''
177
283
  Interpolate missing points (of value zero),
@@ -85,10 +85,10 @@ HALPE_26 = Node("Hip", id=19, children=[
85
85
  ])
86
86
 
87
87
 
88
- '''COCO_133_wrist (full-body with hands and face, from AlphaPose, MMPose, etc.)
88
+ '''COCO_133_WRIST (full-body with hands and face, from AlphaPose, MMPose, etc.)
89
89
  https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md
90
90
  https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
91
- COCO_133_wrist = Node("CHip", id=None, children=[
91
+ COCO_133_WRIST = Node("Hip", id=None, children=[
92
92
  Node("RHip", id=12, children=[
93
93
  Node("RKnee", id=14, children=[
94
94
  Node("RAnkle", id=16, children=[
@@ -139,7 +139,7 @@ COCO_133_wrist = Node("CHip", id=None, children=[
139
139
  '''COCO_133 (full-body with hands and face, from AlphaPose, MMPose, etc.)
140
140
  https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md
141
141
  https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
142
- COCO_133 = Node("CHip", id=None, children=[
142
+ COCO_133 = Node("Hip", id=None, children=[
143
143
  Node("RHip", id=12, children=[
144
144
  Node("RKnee", id=14, children=[
145
145
  Node("RAnkle", id=16, children=[
@@ -359,9 +359,7 @@ COCO_133 = Node("CHip", id=None, children=[
359
359
  Node("Mouth17", id=87, children=[
360
360
  Node("Mouth18", id=88, children=[
361
361
  Node("Mouth19", id=89, children=[
362
- Node("Mouth20", id=90, children=[
363
- Node("Mouth21", id=91)
364
- ]),
362
+ Node("Mouth20", id=90)
365
363
  ]),
366
364
  ]),
367
365
  ]),
@@ -387,7 +385,7 @@ COCO_133 = Node("CHip", id=None, children=[
387
385
 
388
386
  '''COCO_17 (full-body without hands and feet, from OpenPose, AlphaPose, OpenPifPaf, YOLO-pose, MMPose, etc.)
389
387
  https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
390
- COCO_17 = Node("CHip", id=None, children=[
388
+ COCO_17 = Node("Hip", id=None, children=[
391
389
  Node("RHip", id=12, children=[
392
390
  Node("RKnee", id=14, children=[
393
391
  Node("RAnkle", id=16),
@@ -645,9 +643,10 @@ FACE_106 = Node("root", id=None, children=[
645
643
  ]),
646
644
  ])
647
645
 
646
+
648
647
  '''ANIMAL2D_17 (full-body animal)
649
648
  https://github.com/AlexTheBad/AP-10K/'''
650
- ANIMAL2D_17 = Node("CHip", id=4, children=[
649
+ ANIMAL2D_17 = Node("Hip", id=4, children=[
651
650
  Node("RHip", id=14, children=[
652
651
  Node("RKnee", id=15, children=[
653
652
  Node("RAnkle", id=16),
@@ -56,11 +56,11 @@ def test_workflow():
56
56
 
57
57
  # Default
58
58
  demo_cmd = ["sports2d", "--show_realtime_results", "False", "--show_graphs", "False"]
59
- subprocess.run(demo_cmd, check=True, capture_output=True, text=True)
59
+ subprocess.run(demo_cmd, check=True, capture_output=True, text=True, encoding='utf-8')
60
60
 
61
61
  # With no pixels to meters conversion, no multiperson, lightweight mode, detection frequency, time range and slowmo factor
62
62
  demo_cmd2 = ["sports2d", "--to_meters", "False", "--multiperson", "False", "--mode", "lightweight", "--det_frequency", "50", "--time_range", "1.2", "2.7", "--slowmo_factor", "4", "--show_realtime_results", "False", "--show_graphs", "False"]
63
- subprocess.run(demo_cmd2, check=True, capture_output=True, text=True)
63
+ subprocess.run(demo_cmd2, check=True, capture_output=True, text=True, encoding='utf-8')
64
64
 
65
65
  # With inverse kinematics, body pose_model and custom RTMO mode
66
66
  # demo_cmd3 = ["sports2d", "--do_ik", "--person_orientation", "front none left", "--pose_model", "body", "--mode", "{'pose_class':'RTMO', 'pose_model':'https://download.openmmlab.com/mmpose/v1/projects/rtmo/onnx_sdk/rtmo-m_16xb16-600e_body7-640x640-39e78cc4_20231211.zip', 'pose_input_size':[640, 640]}", "--show_realtime_results", "False", "--show_graphs", "False"]
@@ -74,4 +74,4 @@ def test_workflow():
74
74
  with open(cli_config_path, 'w') as f: toml.dump(config_dict, f)
75
75
 
76
76
  demo_cmd4 = ["sports2d", "--config", str(cli_config_path), "--show_realtime_results", "False", "--show_graphs", "False"]
77
- subprocess.run(demo_cmd4, check=True, capture_output=True, text=True)
77
+ subprocess.run(demo_cmd4, check=True, capture_output=True, text=True, encoding='utf-8')