kinemotion 0.45.1__py3-none-any.whl → 0.47.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -39,7 +39,7 @@ from .core.pipeline_utils import (
39
39
  )
40
40
  from .core.pose import PoseTracker
41
41
  from .core.quality import assess_jump_quality
42
- from .core.timing import PerformanceTimer
42
+ from .core.timing import NULL_TIMER, PerformanceTimer, Timer
43
43
  from .core.video_io import VideoProcessor
44
44
  from .dropjump.analysis import (
45
45
  detect_ground_contact,
@@ -86,7 +86,7 @@ def _generate_debug_video(
86
86
  smoothed_landmarks: list,
87
87
  contact_states: list,
88
88
  metrics: DropJumpMetrics,
89
- timer: PerformanceTimer | None,
89
+ timer: Timer | None,
90
90
  verbose: bool,
91
91
  ) -> None:
92
92
  """Generate debug video with overlay."""
@@ -96,6 +96,7 @@ def _generate_debug_video(
96
96
  if not frames:
97
97
  return
98
98
 
99
+ timer = timer or NULL_TIMER
99
100
  debug_h, debug_w = frames[0].shape[:2]
100
101
 
101
102
  if video_fps > 30:
@@ -129,11 +130,7 @@ def _generate_debug_video(
129
130
  timer=timer,
130
131
  )
131
132
 
132
- if timer:
133
- with timer.measure("debug_video_generation"):
134
- with renderer_context as renderer:
135
- _render_frames(renderer)
136
- else:
133
+ with timer.measure("debug_video_generation"):
137
134
  with renderer_context as renderer:
138
135
  _render_frames(renderer)
139
136
 
@@ -154,7 +151,7 @@ def process_dropjump_video(
154
151
  detection_confidence: float | None = None,
155
152
  tracking_confidence: float | None = None,
156
153
  verbose: bool = False,
157
- timer: PerformanceTimer | None = None,
154
+ timer: Timer | None = None,
158
155
  pose_tracker: "PoseTracker | None" = None,
159
156
  ) -> DropJumpMetrics:
160
157
  """
@@ -175,7 +172,7 @@ def process_dropjump_video(
175
172
  detection_confidence: Optional override for pose detection confidence
176
173
  tracking_confidence: Optional override for pose tracking confidence
177
174
  verbose: Print processing details
178
- timer: Optional PerformanceTimer for measuring operations
175
+ timer: Optional Timer for measuring operations
179
176
  pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
180
177
 
181
178
  Returns:
@@ -263,6 +260,7 @@ def process_dropjump_video(
263
260
  visibilities=visibilities,
264
261
  window_length=params.smoothing_window,
265
262
  polyorder=params.polyorder,
263
+ timer=timer,
266
264
  )
267
265
 
268
266
  if verbose:
@@ -277,6 +275,7 @@ def process_dropjump_video(
277
275
  smoothing_window=params.smoothing_window,
278
276
  polyorder=params.polyorder,
279
277
  use_curvature=params.use_curvature,
278
+ timer=timer,
280
279
  )
281
280
 
282
281
  if verbose:
@@ -391,13 +390,7 @@ def process_dropjump_video(
391
390
  metrics.result_metadata = result_metadata
392
391
 
393
392
  if json_output:
394
- if timer:
395
- with timer.measure("json_serialization"):
396
- output_path = Path(json_output)
397
- metrics_dict = metrics.to_dict()
398
- json_str = json.dumps(metrics_dict, indent=2)
399
- output_path.write_text(json_str)
400
- else:
393
+ with timer.measure("json_serialization"):
401
394
  output_path = Path(json_output)
402
395
  metrics_dict = metrics.to_dict()
403
396
  json_str = json.dumps(metrics_dict, indent=2)
@@ -528,7 +521,7 @@ def process_cmj_video(
528
521
  detection_confidence: float | None = None,
529
522
  tracking_confidence: float | None = None,
530
523
  verbose: bool = False,
531
- timer: PerformanceTimer | None = None,
524
+ timer: Timer | None = None,
532
525
  pose_tracker: "PoseTracker | None" = None,
533
526
  ) -> CMJMetrics:
534
527
  """
@@ -550,7 +543,7 @@ def process_cmj_video(
550
543
  detection_confidence: Optional override for pose detection confidence
551
544
  tracking_confidence: Optional override for pose tracking confidence
552
545
  verbose: Print processing details
553
- timer: Optional PerformanceTimer for measuring operations
546
+ timer: Optional Timer for measuring operations
554
547
  pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
555
548
 
556
549
  Returns:
@@ -644,6 +637,7 @@ def process_cmj_video(
644
637
  window_length=params.smoothing_window,
645
638
  polyorder=params.polyorder,
646
639
  landing_positions=foot_positions,
640
+ timer=timer,
647
641
  )
648
642
 
649
643
  if phases is None:
@@ -738,23 +732,7 @@ def process_cmj_video(
738
732
  step = max(1, int(video.fps / 30.0))
739
733
  debug_fps = video.fps / step
740
734
 
741
- if timer:
742
- with timer.measure("debug_video_generation"):
743
- with CMJDebugOverlayRenderer(
744
- output_video,
745
- debug_w,
746
- debug_h,
747
- debug_w,
748
- debug_h,
749
- debug_fps,
750
- timer=timer,
751
- ) as renderer:
752
- for frame, idx in zip(frames, frame_indices, strict=True):
753
- annotated = renderer.render_frame(
754
- frame, smoothed_landmarks[idx], idx, metrics
755
- )
756
- renderer.write_frame(annotated)
757
- else:
735
+ with timer.measure("debug_video_generation"):
758
736
  with CMJDebugOverlayRenderer(
759
737
  output_video,
760
738
  debug_w,
@@ -799,13 +777,7 @@ def process_cmj_video(
799
777
  metrics.result_metadata = result_metadata
800
778
 
801
779
  if json_output:
802
- if timer:
803
- with timer.measure("json_serialization"):
804
- output_path = Path(json_output)
805
- metrics_dict = metrics.to_dict()
806
- json_str = json.dumps(metrics_dict, indent=2)
807
- output_path.write_text(json_str)
808
- else:
780
+ with timer.measure("json_serialization"):
809
781
  output_path = Path(json_output)
810
782
  metrics_dict = metrics.to_dict()
811
783
  json_str = json.dumps(metrics_dict, indent=2)
@@ -7,6 +7,7 @@ from scipy.signal import savgol_filter
7
7
 
8
8
  from ..core.experimental import unused
9
9
  from ..core.smoothing import compute_acceleration_from_derivative
10
+ from ..core.timing import NULL_TIMER, Timer
10
11
 
11
12
 
12
13
  def compute_signed_velocity(
@@ -545,6 +546,7 @@ def detect_cmj_phases(
545
546
  window_length: int = 5,
546
547
  polyorder: int = 2,
547
548
  landing_positions: np.ndarray | None = None,
549
+ timer: Timer | None = None,
548
550
  ) -> tuple[float | None, float, float, float] | None:
549
551
  """
550
552
  Detect all phases of a counter movement jump using a simplified, robust approach.
@@ -562,18 +564,22 @@ def detect_cmj_phases(
562
564
  polyorder: Polynomial order for Savitzky-Golay filter
563
565
  landing_positions: Optional array of positions for landing detection
564
566
  (e.g., Feet). If None, uses `positions` (Hips) for landing too.
567
+ timer: Optional Timer for measuring operations
565
568
 
566
569
  Returns:
567
570
  Tuple of (standing_end_frame, lowest_point_frame, takeoff_frame, landing_frame)
568
571
  with fractional precision, or None if phases cannot be detected.
569
572
  """
573
+ timer = timer or NULL_TIMER
574
+
570
575
  # Compute SIGNED velocities and accelerations for primary signal (Hips)
571
- velocities = compute_signed_velocity(
572
- positions, window_length=window_length, polyorder=polyorder
573
- )
574
- accelerations = compute_acceleration_from_derivative(
575
- positions, window_length=window_length, polyorder=polyorder
576
- )
576
+ with timer.measure("cmj_compute_derivatives"):
577
+ velocities = compute_signed_velocity(
578
+ positions, window_length=window_length, polyorder=polyorder
579
+ )
580
+ accelerations = compute_acceleration_from_derivative(
581
+ positions, window_length=window_length, polyorder=polyorder
582
+ )
577
583
 
578
584
  # Step 1: Find peak height (global minimum y = highest point in frame)
579
585
  peak_height_frame = int(np.argmin(positions))
@@ -581,34 +587,42 @@ def detect_cmj_phases(
581
587
  return None # Peak too early, invalid
582
588
 
583
589
  # Step 2-4: Find all phases using helper functions
584
- takeoff_frame = find_takeoff_frame(velocities, peak_height_frame, fps)
585
- lowest_point = find_lowest_frame(velocities, positions, takeoff_frame, fps)
590
+ with timer.measure("cmj_find_takeoff"):
591
+ takeoff_frame = find_takeoff_frame(velocities, peak_height_frame, fps)
592
+
593
+ with timer.measure("cmj_find_lowest_point"):
594
+ lowest_point = find_lowest_frame(velocities, positions, takeoff_frame, fps)
586
595
 
587
596
  # Determine landing frame
588
- if landing_positions is not None:
589
- # Use specific landing signal (Feet) for landing detection
590
- landing_velocities = compute_signed_velocity(
591
- landing_positions, window_length=window_length, polyorder=polyorder
592
- )
593
- landing_accelerations = compute_acceleration_from_derivative(
594
- landing_positions, window_length=window_length, polyorder=polyorder
595
- )
596
- # We still reference peak_height_frame from Hips, as Feet peak
597
- # might be different/noisy but generally they align in time.
598
- landing_frame = find_landing_frame(
599
- landing_accelerations,
600
- landing_velocities,
601
- peak_height_frame,
602
- fps,
603
- )
604
- else:
605
- # Use primary signal (Hips)
606
- landing_frame = find_landing_frame(
607
- accelerations,
608
- velocities,
609
- peak_height_frame,
610
- fps,
597
+ with timer.measure("cmj_find_landing"):
598
+ if landing_positions is not None:
599
+ # Use specific landing signal (Feet) for landing detection
600
+ landing_velocities = compute_signed_velocity(
601
+ landing_positions, window_length=window_length, polyorder=polyorder
602
+ )
603
+ landing_accelerations = compute_acceleration_from_derivative(
604
+ landing_positions, window_length=window_length, polyorder=polyorder
605
+ )
606
+ # We still reference peak_height_frame from Hips, as Feet peak
607
+ # might be different/noisy but generally they align in time.
608
+ landing_frame = find_landing_frame(
609
+ landing_accelerations,
610
+ landing_velocities,
611
+ peak_height_frame,
612
+ fps,
613
+ )
614
+ else:
615
+ # Use primary signal (Hips)
616
+ landing_frame = find_landing_frame(
617
+ accelerations,
618
+ velocities,
619
+ peak_height_frame,
620
+ fps,
621
+ )
622
+
623
+ with timer.measure("cmj_find_standing_end"):
624
+ standing_end = find_standing_end(
625
+ velocities, lowest_point, positions, accelerations
611
626
  )
612
627
 
613
- standing_end = find_standing_end(velocities, lowest_point, positions, accelerations)
614
628
  return (standing_end, lowest_point, takeoff_frame, landing_frame)
@@ -22,7 +22,14 @@ from .smoothing import (
22
22
  smooth_landmarks,
23
23
  smooth_landmarks_advanced,
24
24
  )
25
- from .timing import PerformanceTimer
25
+ from .timing import (
26
+ NULL_TIMER,
27
+ CompositeTimer,
28
+ NullTimer,
29
+ OpenTelemetryTimer,
30
+ PerformanceTimer,
31
+ Timer,
32
+ )
26
33
  from .video_io import VideoProcessor
27
34
 
28
35
  __all__ = [
@@ -49,6 +56,11 @@ __all__ = [
49
56
  "calculate_position_stability",
50
57
  # Timing
51
58
  "PerformanceTimer",
59
+ "Timer",
60
+ "NullTimer",
61
+ "NULL_TIMER",
62
+ "CompositeTimer",
63
+ "OpenTelemetryTimer",
52
64
  # Video I/O
53
65
  "VideoProcessor",
54
66
  ]
@@ -10,7 +10,7 @@ from typing import Self
10
10
  import cv2
11
11
  import numpy as np
12
12
 
13
- from .timing import PerformanceTimer
13
+ from .timing import NULL_TIMER, Timer
14
14
 
15
15
 
16
16
  def create_video_writer(
@@ -107,7 +107,7 @@ class BaseDebugOverlayRenderer:
107
107
  display_width: int,
108
108
  display_height: int,
109
109
  fps: float,
110
- timer: PerformanceTimer | None = None,
110
+ timer: Timer | None = None,
111
111
  ):
112
112
  """
113
113
  Initialize overlay renderer.
@@ -119,12 +119,12 @@ class BaseDebugOverlayRenderer:
119
119
  display_width: Display width (considering SAR)
120
120
  display_height: Display height (considering SAR)
121
121
  fps: Frames per second
122
- timer: Optional PerformanceTimer for measuring operations
122
+ timer: Optional Timer for measuring operations
123
123
  """
124
124
  self.output_path = output_path
125
125
  self.width = width
126
126
  self.height = height
127
- self.timer = timer
127
+ self.timer = timer or NULL_TIMER
128
128
 
129
129
  # Optimize debug video resolution: Cap max dimension to 720p
130
130
  # Reduces software encoding time on single-core Cloud Run instances.
@@ -166,26 +166,14 @@ class BaseDebugOverlayRenderer:
166
166
 
167
167
  # Resize to display dimensions if needed (to handle SAR)
168
168
  if self.needs_resize:
169
- if self.timer:
170
- with self.timer.measure("debug_video_resize"):
171
- frame = cv2.resize(
172
- frame,
173
- (self.display_width, self.display_height),
174
- interpolation=cv2.INTER_LINEAR,
175
- )
176
- else:
169
+ with self.timer.measure("debug_video_resize"):
177
170
  frame = cv2.resize(
178
171
  frame,
179
172
  (self.display_width, self.display_height),
180
173
  interpolation=cv2.INTER_LINEAR,
181
174
  )
182
175
 
183
- if self.timer:
184
- with self.timer.measure("debug_video_write"):
185
- write_overlay_frame(
186
- self.writer, frame, self.display_width, self.display_height
187
- )
188
- else:
176
+ with self.timer.measure("debug_video_write"):
189
177
  write_overlay_frame(
190
178
  self.writer, frame, self.display_width, self.display_height
191
179
  )
@@ -12,7 +12,7 @@ from ..dropjump.analysis import compute_average_foot_position
12
12
  from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
13
13
  from .pose import PoseTracker
14
14
  from .smoothing import smooth_landmarks, smooth_landmarks_advanced
15
- from .timing import PerformanceTimer
15
+ from .timing import NULL_TIMER, Timer
16
16
  from .video_io import VideoProcessor
17
17
 
18
18
  TResult = TypeVar("TResult")
@@ -182,7 +182,7 @@ def process_all_frames(
182
182
  video: VideoProcessor,
183
183
  tracker: PoseTracker,
184
184
  verbose: bool,
185
- timer: PerformanceTimer | None = None,
185
+ timer: Timer | None = None,
186
186
  close_tracker: bool = True,
187
187
  target_debug_fps: float = 30.0,
188
188
  max_debug_dim: int = 720,
@@ -193,7 +193,7 @@ def process_all_frames(
193
193
  video: Video processor to read frames from
194
194
  tracker: Pose tracker for landmark detection
195
195
  verbose: Print progress messages
196
- timer: Optional PerformanceTimer for measuring operations
196
+ timer: Optional Timer for measuring operations
197
197
  close_tracker: Whether to close the tracker after processing (default: True)
198
198
  target_debug_fps: Target FPS for debug video (default: 30.0)
199
199
  max_debug_dim: Max dimension for debug video frames (default: 720)
@@ -207,6 +207,7 @@ def process_all_frames(
207
207
  if verbose:
208
208
  print("Tracking pose landmarks...")
209
209
 
210
+ timer = timer or NULL_TIMER
210
211
  step = max(1, int(video.fps / target_debug_fps))
211
212
 
212
213
  w, h = video.display_width, video.display_height
@@ -218,12 +219,7 @@ def process_all_frames(
218
219
  debug_h = int(h * scale) // 2 * 2
219
220
  should_resize = (debug_w != video.width) or (debug_h != video.height)
220
221
 
221
- if timer:
222
- with timer.measure("pose_tracking"):
223
- debug_frames, landmarks_sequence, frame_indices = _process_frames_loop(
224
- video, tracker, step, should_resize, debug_w, debug_h
225
- )
226
- else:
222
+ with timer.measure("pose_tracking"):
227
223
  debug_frames, landmarks_sequence, frame_indices = _process_frames_loop(
228
224
  video, tracker, step, should_resize, debug_w, debug_h
229
225
  )
@@ -241,7 +237,7 @@ def apply_smoothing(
241
237
  landmarks_sequence: list,
242
238
  params: AnalysisParameters,
243
239
  verbose: bool,
244
- timer: PerformanceTimer | None = None,
240
+ timer: Timer | None = None,
245
241
  ) -> list:
246
242
  """Apply smoothing to landmark sequence with auto-tuned parameters.
247
243
 
@@ -249,11 +245,12 @@ def apply_smoothing(
249
245
  landmarks_sequence: Sequence of landmarks from all frames
250
246
  params: Auto-tuned parameters containing smoothing settings
251
247
  verbose: Print progress messages
252
- timer: Optional PerformanceTimer for measuring operations
248
+ timer: Optional Timer for measuring operations
253
249
 
254
250
  Returns:
255
251
  Smoothed landmarks sequence
256
252
  """
253
+ timer = timer or NULL_TIMER
257
254
  use_advanced = params.outlier_rejection or params.bilateral_filter
258
255
 
259
256
  if verbose:
@@ -273,6 +270,7 @@ def apply_smoothing(
273
270
  polyorder=params.polyorder,
274
271
  use_outlier_rejection=params.outlier_rejection,
275
272
  use_bilateral=params.bilateral_filter,
273
+ timer=timer,
276
274
  )
277
275
  else:
278
276
  return smooth_landmarks(
@@ -281,10 +279,8 @@ def apply_smoothing(
281
279
  polyorder=params.polyorder,
282
280
  )
283
281
 
284
- if timer:
285
- with timer.measure("smoothing"):
286
- return _run_smoothing()
287
- return _run_smoothing()
282
+ with timer.measure("smoothing"):
283
+ return _run_smoothing()
288
284
 
289
285
 
290
286
  def calculate_foot_visibility(frame_landmarks: dict) -> float:
@@ -341,7 +337,7 @@ def convert_timer_to_stage_names(
341
337
  """Convert timer metric names to human-readable stage names.
342
338
 
343
339
  Args:
344
- timer_metrics: Dictionary from PerformanceTimer.get_metrics()
340
+ timer_metrics: Dictionary from Timer.get_metrics()
345
341
 
346
342
  Returns:
347
343
  Dictionary with human-readable stage names as keys
@@ -366,6 +362,24 @@ def convert_timer_to_stage_names(
366
362
  "debug_video_copy": "Debug video frame copy",
367
363
  "debug_video_draw": "Debug video drawing",
368
364
  "debug_video_write": "Debug video encoding",
365
+ # Granular metrics
366
+ "frame_conversion": "Frame BGR-RGB conversion",
367
+ "mediapipe_inference": "MediaPipe inference",
368
+ "landmark_extraction": "Landmark extraction",
369
+ "smoothing_outlier_rejection": "Smoothing (outlier rejection)",
370
+ "smoothing_bilateral": "Smoothing (bilateral)",
371
+ "smoothing_savgol": "Smoothing (Savitzky-Golay)",
372
+ "cmj_compute_derivatives": "CMJ derivatives computation",
373
+ "cmj_find_takeoff": "CMJ takeoff detection",
374
+ "cmj_find_lowest_point": "CMJ lowest point detection",
375
+ "cmj_find_landing": "CMJ landing detection",
376
+ "cmj_find_standing_end": "CMJ standing end detection",
377
+ "dj_compute_velocity": "DJ velocity computation",
378
+ "dj_find_contact_frames": "DJ contact frame search",
379
+ "dj_detect_drop_start": "DJ drop start detection",
380
+ "dj_find_phases": "DJ phase finding",
381
+ "dj_identify_contact": "DJ contact identification",
382
+ "dj_analyze_flight": "DJ flight analysis",
369
383
  }
370
384
  return {mapping.get(k, k): v for k, v in timer_metrics.items()}
371
385
 
kinemotion/core/pose.py CHANGED
@@ -4,7 +4,7 @@ import cv2
4
4
  import mediapipe as mp
5
5
  import numpy as np
6
6
 
7
- from .timing import PerformanceTimer
7
+ from .timing import NULL_TIMER, Timer
8
8
 
9
9
 
10
10
  class PoseTracker:
@@ -14,7 +14,7 @@ class PoseTracker:
14
14
  self,
15
15
  min_detection_confidence: float = 0.5,
16
16
  min_tracking_confidence: float = 0.5,
17
- timer: PerformanceTimer | None = None,
17
+ timer: Timer | None = None,
18
18
  ) -> None:
19
19
  """
20
20
  Initialize the pose tracker.
@@ -22,9 +22,9 @@ class PoseTracker:
22
22
  Args:
23
23
  min_detection_confidence: Minimum confidence for pose detection
24
24
  min_tracking_confidence: Minimum confidence for pose tracking
25
- timer: Optional PerformanceTimer for measuring operations
25
+ timer: Optional Timer for measuring operations
26
26
  """
27
- self.timer = timer
27
+ self.timer = timer or NULL_TIMER
28
28
  self.mp_pose = mp.solutions.pose
29
29
  self.pose = self.mp_pose.Pose(
30
30
  static_image_mode=False, # Use tracking mode for better performance
@@ -47,42 +47,41 @@ class PoseTracker:
47
47
  or None if no pose detected. Coordinates are normalized (0-1).
48
48
  """
49
49
  # Convert BGR to RGB
50
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
50
+ with self.timer.measure("frame_conversion"):
51
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
51
52
 
52
53
  # Process the frame
53
- if self.timer:
54
- with self.timer.measure("mediapipe_inference"):
55
- results = self.pose.process(rgb_frame)
56
- else:
54
+ with self.timer.measure("mediapipe_inference"):
57
55
  results = self.pose.process(rgb_frame)
58
56
 
59
57
  if not results.pose_landmarks:
60
58
  return None
61
59
 
62
60
  # Extract key landmarks for feet tracking and CoM estimation
63
- landmarks = {}
64
- landmark_names = {
65
- # Feet landmarks
66
- self.mp_pose.PoseLandmark.LEFT_ANKLE: "left_ankle",
67
- self.mp_pose.PoseLandmark.RIGHT_ANKLE: "right_ankle",
68
- self.mp_pose.PoseLandmark.LEFT_HEEL: "left_heel",
69
- self.mp_pose.PoseLandmark.RIGHT_HEEL: "right_heel",
70
- self.mp_pose.PoseLandmark.LEFT_FOOT_INDEX: "left_foot_index",
71
- self.mp_pose.PoseLandmark.RIGHT_FOOT_INDEX: "right_foot_index",
72
- # Torso landmarks for CoM estimation
73
- self.mp_pose.PoseLandmark.LEFT_HIP: "left_hip",
74
- self.mp_pose.PoseLandmark.RIGHT_HIP: "right_hip",
75
- self.mp_pose.PoseLandmark.LEFT_SHOULDER: "left_shoulder",
76
- self.mp_pose.PoseLandmark.RIGHT_SHOULDER: "right_shoulder",
77
- # Additional landmarks for better CoM estimation
78
- self.mp_pose.PoseLandmark.NOSE: "nose",
79
- self.mp_pose.PoseLandmark.LEFT_KNEE: "left_knee",
80
- self.mp_pose.PoseLandmark.RIGHT_KNEE: "right_knee",
81
- }
82
-
83
- for landmark_id, name in landmark_names.items():
84
- lm = results.pose_landmarks.landmark[landmark_id]
85
- landmarks[name] = (lm.x, lm.y, lm.visibility)
61
+ with self.timer.measure("landmark_extraction"):
62
+ landmarks = {}
63
+ landmark_names = {
64
+ # Feet landmarks
65
+ self.mp_pose.PoseLandmark.LEFT_ANKLE: "left_ankle",
66
+ self.mp_pose.PoseLandmark.RIGHT_ANKLE: "right_ankle",
67
+ self.mp_pose.PoseLandmark.LEFT_HEEL: "left_heel",
68
+ self.mp_pose.PoseLandmark.RIGHT_HEEL: "right_heel",
69
+ self.mp_pose.PoseLandmark.LEFT_FOOT_INDEX: "left_foot_index",
70
+ self.mp_pose.PoseLandmark.RIGHT_FOOT_INDEX: "right_foot_index",
71
+ # Torso landmarks for CoM estimation
72
+ self.mp_pose.PoseLandmark.LEFT_HIP: "left_hip",
73
+ self.mp_pose.PoseLandmark.RIGHT_HIP: "right_hip",
74
+ self.mp_pose.PoseLandmark.LEFT_SHOULDER: "left_shoulder",
75
+ self.mp_pose.PoseLandmark.RIGHT_SHOULDER: "right_shoulder",
76
+ # Additional landmarks for better CoM estimation
77
+ self.mp_pose.PoseLandmark.NOSE: "nose",
78
+ self.mp_pose.PoseLandmark.LEFT_KNEE: "left_knee",
79
+ self.mp_pose.PoseLandmark.RIGHT_KNEE: "right_knee",
80
+ }
81
+
82
+ for landmark_id, name in landmark_names.items():
83
+ lm = results.pose_landmarks.landmark[landmark_id]
84
+ landmarks[name] = (lm.x, lm.y, lm.visibility)
86
85
 
87
86
  return landmarks
88
87