kinemotion 0.46.0__py3-none-any.whl → 0.47.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -39,7 +39,7 @@ from .core.pipeline_utils import (
39
39
  )
40
40
  from .core.pose import PoseTracker
41
41
  from .core.quality import assess_jump_quality
42
- from .core.timing import PerformanceTimer
42
+ from .core.timing import NULL_TIMER, PerformanceTimer, Timer
43
43
  from .core.video_io import VideoProcessor
44
44
  from .dropjump.analysis import (
45
45
  detect_ground_contact,
@@ -86,7 +86,7 @@ def _generate_debug_video(
86
86
  smoothed_landmarks: list,
87
87
  contact_states: list,
88
88
  metrics: DropJumpMetrics,
89
- timer: PerformanceTimer | None,
89
+ timer: Timer | None,
90
90
  verbose: bool,
91
91
  ) -> None:
92
92
  """Generate debug video with overlay."""
@@ -96,6 +96,7 @@ def _generate_debug_video(
96
96
  if not frames:
97
97
  return
98
98
 
99
+ timer = timer or NULL_TIMER
99
100
  debug_h, debug_w = frames[0].shape[:2]
100
101
 
101
102
  if video_fps > 30:
@@ -129,11 +130,7 @@ def _generate_debug_video(
129
130
  timer=timer,
130
131
  )
131
132
 
132
- if timer:
133
- with timer.measure("debug_video_generation"):
134
- with renderer_context as renderer:
135
- _render_frames(renderer)
136
- else:
133
+ with timer.measure("debug_video_generation"):
137
134
  with renderer_context as renderer:
138
135
  _render_frames(renderer)
139
136
 
@@ -154,7 +151,7 @@ def process_dropjump_video(
154
151
  detection_confidence: float | None = None,
155
152
  tracking_confidence: float | None = None,
156
153
  verbose: bool = False,
157
- timer: PerformanceTimer | None = None,
154
+ timer: Timer | None = None,
158
155
  pose_tracker: "PoseTracker | None" = None,
159
156
  ) -> DropJumpMetrics:
160
157
  """
@@ -175,7 +172,7 @@ def process_dropjump_video(
175
172
  detection_confidence: Optional override for pose detection confidence
176
173
  tracking_confidence: Optional override for pose tracking confidence
177
174
  verbose: Print processing details
178
- timer: Optional PerformanceTimer for measuring operations
175
+ timer: Optional Timer for measuring operations
179
176
  pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
180
177
 
181
178
  Returns:
@@ -263,6 +260,7 @@ def process_dropjump_video(
263
260
  visibilities=visibilities,
264
261
  window_length=params.smoothing_window,
265
262
  polyorder=params.polyorder,
263
+ timer=timer,
266
264
  )
267
265
 
268
266
  if verbose:
@@ -277,6 +275,7 @@ def process_dropjump_video(
277
275
  smoothing_window=params.smoothing_window,
278
276
  polyorder=params.polyorder,
279
277
  use_curvature=params.use_curvature,
278
+ timer=timer,
280
279
  )
281
280
 
282
281
  if verbose:
@@ -391,13 +390,7 @@ def process_dropjump_video(
391
390
  metrics.result_metadata = result_metadata
392
391
 
393
392
  if json_output:
394
- if timer:
395
- with timer.measure("json_serialization"):
396
- output_path = Path(json_output)
397
- metrics_dict = metrics.to_dict()
398
- json_str = json.dumps(metrics_dict, indent=2)
399
- output_path.write_text(json_str)
400
- else:
393
+ with timer.measure("json_serialization"):
401
394
  output_path = Path(json_output)
402
395
  metrics_dict = metrics.to_dict()
403
396
  json_str = json.dumps(metrics_dict, indent=2)
@@ -528,7 +521,7 @@ def process_cmj_video(
528
521
  detection_confidence: float | None = None,
529
522
  tracking_confidence: float | None = None,
530
523
  verbose: bool = False,
531
- timer: PerformanceTimer | None = None,
524
+ timer: Timer | None = None,
532
525
  pose_tracker: "PoseTracker | None" = None,
533
526
  ) -> CMJMetrics:
534
527
  """
@@ -550,7 +543,7 @@ def process_cmj_video(
550
543
  detection_confidence: Optional override for pose detection confidence
551
544
  tracking_confidence: Optional override for pose tracking confidence
552
545
  verbose: Print processing details
553
- timer: Optional PerformanceTimer for measuring operations
546
+ timer: Optional Timer for measuring operations
554
547
  pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
555
548
 
556
549
  Returns:
@@ -644,6 +637,7 @@ def process_cmj_video(
644
637
  window_length=params.smoothing_window,
645
638
  polyorder=params.polyorder,
646
639
  landing_positions=foot_positions,
640
+ timer=timer,
647
641
  )
648
642
 
649
643
  if phases is None:
@@ -738,23 +732,7 @@ def process_cmj_video(
738
732
  step = max(1, int(video.fps / 30.0))
739
733
  debug_fps = video.fps / step
740
734
 
741
- if timer:
742
- with timer.measure("debug_video_generation"):
743
- with CMJDebugOverlayRenderer(
744
- output_video,
745
- debug_w,
746
- debug_h,
747
- debug_w,
748
- debug_h,
749
- debug_fps,
750
- timer=timer,
751
- ) as renderer:
752
- for frame, idx in zip(frames, frame_indices, strict=True):
753
- annotated = renderer.render_frame(
754
- frame, smoothed_landmarks[idx], idx, metrics
755
- )
756
- renderer.write_frame(annotated)
757
- else:
735
+ with timer.measure("debug_video_generation"):
758
736
  with CMJDebugOverlayRenderer(
759
737
  output_video,
760
738
  debug_w,
@@ -799,13 +777,7 @@ def process_cmj_video(
799
777
  metrics.result_metadata = result_metadata
800
778
 
801
779
  if json_output:
802
- if timer:
803
- with timer.measure("json_serialization"):
804
- output_path = Path(json_output)
805
- metrics_dict = metrics.to_dict()
806
- json_str = json.dumps(metrics_dict, indent=2)
807
- output_path.write_text(json_str)
808
- else:
780
+ with timer.measure("json_serialization"):
809
781
  output_path = Path(json_output)
810
782
  metrics_dict = metrics.to_dict()
811
783
  json_str = json.dumps(metrics_dict, indent=2)
@@ -7,6 +7,7 @@ from scipy.signal import savgol_filter
7
7
 
8
8
  from ..core.experimental import unused
9
9
  from ..core.smoothing import compute_acceleration_from_derivative
10
+ from ..core.timing import NULL_TIMER, Timer
10
11
 
11
12
 
12
13
  def compute_signed_velocity(
@@ -545,6 +546,7 @@ def detect_cmj_phases(
545
546
  window_length: int = 5,
546
547
  polyorder: int = 2,
547
548
  landing_positions: np.ndarray | None = None,
549
+ timer: Timer | None = None,
548
550
  ) -> tuple[float | None, float, float, float] | None:
549
551
  """
550
552
  Detect all phases of a counter movement jump using a simplified, robust approach.
@@ -562,18 +564,22 @@ def detect_cmj_phases(
562
564
  polyorder: Polynomial order for Savitzky-Golay filter
563
565
  landing_positions: Optional array of positions for landing detection
564
566
  (e.g., Feet). If None, uses `positions` (Hips) for landing too.
567
+ timer: Optional Timer for measuring operations
565
568
 
566
569
  Returns:
567
570
  Tuple of (standing_end_frame, lowest_point_frame, takeoff_frame, landing_frame)
568
571
  with fractional precision, or None if phases cannot be detected.
569
572
  """
573
+ timer = timer or NULL_TIMER
574
+
570
575
  # Compute SIGNED velocities and accelerations for primary signal (Hips)
571
- velocities = compute_signed_velocity(
572
- positions, window_length=window_length, polyorder=polyorder
573
- )
574
- accelerations = compute_acceleration_from_derivative(
575
- positions, window_length=window_length, polyorder=polyorder
576
- )
576
+ with timer.measure("cmj_compute_derivatives"):
577
+ velocities = compute_signed_velocity(
578
+ positions, window_length=window_length, polyorder=polyorder
579
+ )
580
+ accelerations = compute_acceleration_from_derivative(
581
+ positions, window_length=window_length, polyorder=polyorder
582
+ )
577
583
 
578
584
  # Step 1: Find peak height (global minimum y = highest point in frame)
579
585
  peak_height_frame = int(np.argmin(positions))
@@ -581,34 +587,42 @@ def detect_cmj_phases(
581
587
  return None # Peak too early, invalid
582
588
 
583
589
  # Step 2-4: Find all phases using helper functions
584
- takeoff_frame = find_takeoff_frame(velocities, peak_height_frame, fps)
585
- lowest_point = find_lowest_frame(velocities, positions, takeoff_frame, fps)
590
+ with timer.measure("cmj_find_takeoff"):
591
+ takeoff_frame = find_takeoff_frame(velocities, peak_height_frame, fps)
592
+
593
+ with timer.measure("cmj_find_lowest_point"):
594
+ lowest_point = find_lowest_frame(velocities, positions, takeoff_frame, fps)
586
595
 
587
596
  # Determine landing frame
588
- if landing_positions is not None:
589
- # Use specific landing signal (Feet) for landing detection
590
- landing_velocities = compute_signed_velocity(
591
- landing_positions, window_length=window_length, polyorder=polyorder
592
- )
593
- landing_accelerations = compute_acceleration_from_derivative(
594
- landing_positions, window_length=window_length, polyorder=polyorder
595
- )
596
- # We still reference peak_height_frame from Hips, as Feet peak
597
- # might be different/noisy but generally they align in time.
598
- landing_frame = find_landing_frame(
599
- landing_accelerations,
600
- landing_velocities,
601
- peak_height_frame,
602
- fps,
603
- )
604
- else:
605
- # Use primary signal (Hips)
606
- landing_frame = find_landing_frame(
607
- accelerations,
608
- velocities,
609
- peak_height_frame,
610
- fps,
597
+ with timer.measure("cmj_find_landing"):
598
+ if landing_positions is not None:
599
+ # Use specific landing signal (Feet) for landing detection
600
+ landing_velocities = compute_signed_velocity(
601
+ landing_positions, window_length=window_length, polyorder=polyorder
602
+ )
603
+ landing_accelerations = compute_acceleration_from_derivative(
604
+ landing_positions, window_length=window_length, polyorder=polyorder
605
+ )
606
+ # We still reference peak_height_frame from Hips, as Feet peak
607
+ # might be different/noisy but generally they align in time.
608
+ landing_frame = find_landing_frame(
609
+ landing_accelerations,
610
+ landing_velocities,
611
+ peak_height_frame,
612
+ fps,
613
+ )
614
+ else:
615
+ # Use primary signal (Hips)
616
+ landing_frame = find_landing_frame(
617
+ accelerations,
618
+ velocities,
619
+ peak_height_frame,
620
+ fps,
621
+ )
622
+
623
+ with timer.measure("cmj_find_standing_end"):
624
+ standing_end = find_standing_end(
625
+ velocities, lowest_point, positions, accelerations
611
626
  )
612
627
 
613
- standing_end = find_standing_end(velocities, lowest_point, positions, accelerations)
614
628
  return (standing_end, lowest_point, takeoff_frame, landing_frame)
@@ -22,7 +22,14 @@ from .smoothing import (
22
22
  smooth_landmarks,
23
23
  smooth_landmarks_advanced,
24
24
  )
25
- from .timing import NULL_TIMER, NullTimer, PerformanceTimer, Timer
25
+ from .timing import (
26
+ NULL_TIMER,
27
+ CompositeTimer,
28
+ NullTimer,
29
+ OpenTelemetryTimer,
30
+ PerformanceTimer,
31
+ Timer,
32
+ )
26
33
  from .video_io import VideoProcessor
27
34
 
28
35
  __all__ = [
@@ -52,6 +59,8 @@ __all__ = [
52
59
  "Timer",
53
60
  "NullTimer",
54
61
  "NULL_TIMER",
62
+ "CompositeTimer",
63
+ "OpenTelemetryTimer",
55
64
  # Video I/O
56
65
  "VideoProcessor",
57
66
  ]
@@ -10,7 +10,7 @@ from typing import Self
10
10
  import cv2
11
11
  import numpy as np
12
12
 
13
- from .timing import PerformanceTimer
13
+ from .timing import NULL_TIMER, Timer
14
14
 
15
15
 
16
16
  def create_video_writer(
@@ -107,7 +107,7 @@ class BaseDebugOverlayRenderer:
107
107
  display_width: int,
108
108
  display_height: int,
109
109
  fps: float,
110
- timer: PerformanceTimer | None = None,
110
+ timer: Timer | None = None,
111
111
  ):
112
112
  """
113
113
  Initialize overlay renderer.
@@ -119,12 +119,12 @@ class BaseDebugOverlayRenderer:
119
119
  display_width: Display width (considering SAR)
120
120
  display_height: Display height (considering SAR)
121
121
  fps: Frames per second
122
- timer: Optional PerformanceTimer for measuring operations
122
+ timer: Optional Timer for measuring operations
123
123
  """
124
124
  self.output_path = output_path
125
125
  self.width = width
126
126
  self.height = height
127
- self.timer = timer
127
+ self.timer = timer or NULL_TIMER
128
128
 
129
129
  # Optimize debug video resolution: Cap max dimension to 720p
130
130
  # Reduces software encoding time on single-core Cloud Run instances.
@@ -166,26 +166,14 @@ class BaseDebugOverlayRenderer:
166
166
 
167
167
  # Resize to display dimensions if needed (to handle SAR)
168
168
  if self.needs_resize:
169
- if self.timer:
170
- with self.timer.measure("debug_video_resize"):
171
- frame = cv2.resize(
172
- frame,
173
- (self.display_width, self.display_height),
174
- interpolation=cv2.INTER_LINEAR,
175
- )
176
- else:
169
+ with self.timer.measure("debug_video_resize"):
177
170
  frame = cv2.resize(
178
171
  frame,
179
172
  (self.display_width, self.display_height),
180
173
  interpolation=cv2.INTER_LINEAR,
181
174
  )
182
175
 
183
- if self.timer:
184
- with self.timer.measure("debug_video_write"):
185
- write_overlay_frame(
186
- self.writer, frame, self.display_width, self.display_height
187
- )
188
- else:
176
+ with self.timer.measure("debug_video_write"):
189
177
  write_overlay_frame(
190
178
  self.writer, frame, self.display_width, self.display_height
191
179
  )
@@ -12,7 +12,7 @@ from ..dropjump.analysis import compute_average_foot_position
12
12
  from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
13
13
  from .pose import PoseTracker
14
14
  from .smoothing import smooth_landmarks, smooth_landmarks_advanced
15
- from .timing import PerformanceTimer
15
+ from .timing import NULL_TIMER, Timer
16
16
  from .video_io import VideoProcessor
17
17
 
18
18
  TResult = TypeVar("TResult")
@@ -182,7 +182,7 @@ def process_all_frames(
182
182
  video: VideoProcessor,
183
183
  tracker: PoseTracker,
184
184
  verbose: bool,
185
- timer: PerformanceTimer | None = None,
185
+ timer: Timer | None = None,
186
186
  close_tracker: bool = True,
187
187
  target_debug_fps: float = 30.0,
188
188
  max_debug_dim: int = 720,
@@ -193,7 +193,7 @@ def process_all_frames(
193
193
  video: Video processor to read frames from
194
194
  tracker: Pose tracker for landmark detection
195
195
  verbose: Print progress messages
196
- timer: Optional PerformanceTimer for measuring operations
196
+ timer: Optional Timer for measuring operations
197
197
  close_tracker: Whether to close the tracker after processing (default: True)
198
198
  target_debug_fps: Target FPS for debug video (default: 30.0)
199
199
  max_debug_dim: Max dimension for debug video frames (default: 720)
@@ -207,6 +207,7 @@ def process_all_frames(
207
207
  if verbose:
208
208
  print("Tracking pose landmarks...")
209
209
 
210
+ timer = timer or NULL_TIMER
210
211
  step = max(1, int(video.fps / target_debug_fps))
211
212
 
212
213
  w, h = video.display_width, video.display_height
@@ -218,12 +219,7 @@ def process_all_frames(
218
219
  debug_h = int(h * scale) // 2 * 2
219
220
  should_resize = (debug_w != video.width) or (debug_h != video.height)
220
221
 
221
- if timer:
222
- with timer.measure("pose_tracking"):
223
- debug_frames, landmarks_sequence, frame_indices = _process_frames_loop(
224
- video, tracker, step, should_resize, debug_w, debug_h
225
- )
226
- else:
222
+ with timer.measure("pose_tracking"):
227
223
  debug_frames, landmarks_sequence, frame_indices = _process_frames_loop(
228
224
  video, tracker, step, should_resize, debug_w, debug_h
229
225
  )
@@ -241,7 +237,7 @@ def apply_smoothing(
241
237
  landmarks_sequence: list,
242
238
  params: AnalysisParameters,
243
239
  verbose: bool,
244
- timer: PerformanceTimer | None = None,
240
+ timer: Timer | None = None,
245
241
  ) -> list:
246
242
  """Apply smoothing to landmark sequence with auto-tuned parameters.
247
243
 
@@ -249,11 +245,12 @@ def apply_smoothing(
249
245
  landmarks_sequence: Sequence of landmarks from all frames
250
246
  params: Auto-tuned parameters containing smoothing settings
251
247
  verbose: Print progress messages
252
- timer: Optional PerformanceTimer for measuring operations
248
+ timer: Optional Timer for measuring operations
253
249
 
254
250
  Returns:
255
251
  Smoothed landmarks sequence
256
252
  """
253
+ timer = timer or NULL_TIMER
257
254
  use_advanced = params.outlier_rejection or params.bilateral_filter
258
255
 
259
256
  if verbose:
@@ -273,6 +270,7 @@ def apply_smoothing(
273
270
  polyorder=params.polyorder,
274
271
  use_outlier_rejection=params.outlier_rejection,
275
272
  use_bilateral=params.bilateral_filter,
273
+ timer=timer,
276
274
  )
277
275
  else:
278
276
  return smooth_landmarks(
@@ -281,10 +279,8 @@ def apply_smoothing(
281
279
  polyorder=params.polyorder,
282
280
  )
283
281
 
284
- if timer:
285
- with timer.measure("smoothing"):
286
- return _run_smoothing()
287
- return _run_smoothing()
282
+ with timer.measure("smoothing"):
283
+ return _run_smoothing()
288
284
 
289
285
 
290
286
  def calculate_foot_visibility(frame_landmarks: dict) -> float:
@@ -341,7 +337,7 @@ def convert_timer_to_stage_names(
341
337
  """Convert timer metric names to human-readable stage names.
342
338
 
343
339
  Args:
344
- timer_metrics: Dictionary from PerformanceTimer.get_metrics()
340
+ timer_metrics: Dictionary from Timer.get_metrics()
345
341
 
346
342
  Returns:
347
343
  Dictionary with human-readable stage names as keys
@@ -366,6 +362,24 @@ def convert_timer_to_stage_names(
366
362
  "debug_video_copy": "Debug video frame copy",
367
363
  "debug_video_draw": "Debug video drawing",
368
364
  "debug_video_write": "Debug video encoding",
365
+ # Granular metrics
366
+ "frame_conversion": "Frame BGR-RGB conversion",
367
+ "mediapipe_inference": "MediaPipe inference",
368
+ "landmark_extraction": "Landmark extraction",
369
+ "smoothing_outlier_rejection": "Smoothing (outlier rejection)",
370
+ "smoothing_bilateral": "Smoothing (bilateral)",
371
+ "smoothing_savgol": "Smoothing (Savitzky-Golay)",
372
+ "cmj_compute_derivatives": "CMJ derivatives computation",
373
+ "cmj_find_takeoff": "CMJ takeoff detection",
374
+ "cmj_find_lowest_point": "CMJ lowest point detection",
375
+ "cmj_find_landing": "CMJ landing detection",
376
+ "cmj_find_standing_end": "CMJ standing end detection",
377
+ "dj_compute_velocity": "DJ velocity computation",
378
+ "dj_find_contact_frames": "DJ contact frame search",
379
+ "dj_detect_drop_start": "DJ drop start detection",
380
+ "dj_find_phases": "DJ phase finding",
381
+ "dj_identify_contact": "DJ contact identification",
382
+ "dj_analyze_flight": "DJ flight analysis",
369
383
  }
370
384
  return {mapping.get(k, k): v for k, v in timer_metrics.items()}
371
385
 
kinemotion/core/pose.py CHANGED
@@ -4,7 +4,7 @@ import cv2
4
4
  import mediapipe as mp
5
5
  import numpy as np
6
6
 
7
- from .timing import PerformanceTimer
7
+ from .timing import NULL_TIMER, Timer
8
8
 
9
9
 
10
10
  class PoseTracker:
@@ -14,7 +14,7 @@ class PoseTracker:
14
14
  self,
15
15
  min_detection_confidence: float = 0.5,
16
16
  min_tracking_confidence: float = 0.5,
17
- timer: PerformanceTimer | None = None,
17
+ timer: Timer | None = None,
18
18
  ) -> None:
19
19
  """
20
20
  Initialize the pose tracker.
@@ -22,9 +22,9 @@ class PoseTracker:
22
22
  Args:
23
23
  min_detection_confidence: Minimum confidence for pose detection
24
24
  min_tracking_confidence: Minimum confidence for pose tracking
25
- timer: Optional PerformanceTimer for measuring operations
25
+ timer: Optional Timer for measuring operations
26
26
  """
27
- self.timer = timer
27
+ self.timer = timer or NULL_TIMER
28
28
  self.mp_pose = mp.solutions.pose
29
29
  self.pose = self.mp_pose.Pose(
30
30
  static_image_mode=False, # Use tracking mode for better performance
@@ -47,42 +47,41 @@ class PoseTracker:
47
47
  or None if no pose detected. Coordinates are normalized (0-1).
48
48
  """
49
49
  # Convert BGR to RGB
50
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
50
+ with self.timer.measure("frame_conversion"):
51
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
51
52
 
52
53
  # Process the frame
53
- if self.timer:
54
- with self.timer.measure("mediapipe_inference"):
55
- results = self.pose.process(rgb_frame)
56
- else:
54
+ with self.timer.measure("mediapipe_inference"):
57
55
  results = self.pose.process(rgb_frame)
58
56
 
59
57
  if not results.pose_landmarks:
60
58
  return None
61
59
 
62
60
  # Extract key landmarks for feet tracking and CoM estimation
63
- landmarks = {}
64
- landmark_names = {
65
- # Feet landmarks
66
- self.mp_pose.PoseLandmark.LEFT_ANKLE: "left_ankle",
67
- self.mp_pose.PoseLandmark.RIGHT_ANKLE: "right_ankle",
68
- self.mp_pose.PoseLandmark.LEFT_HEEL: "left_heel",
69
- self.mp_pose.PoseLandmark.RIGHT_HEEL: "right_heel",
70
- self.mp_pose.PoseLandmark.LEFT_FOOT_INDEX: "left_foot_index",
71
- self.mp_pose.PoseLandmark.RIGHT_FOOT_INDEX: "right_foot_index",
72
- # Torso landmarks for CoM estimation
73
- self.mp_pose.PoseLandmark.LEFT_HIP: "left_hip",
74
- self.mp_pose.PoseLandmark.RIGHT_HIP: "right_hip",
75
- self.mp_pose.PoseLandmark.LEFT_SHOULDER: "left_shoulder",
76
- self.mp_pose.PoseLandmark.RIGHT_SHOULDER: "right_shoulder",
77
- # Additional landmarks for better CoM estimation
78
- self.mp_pose.PoseLandmark.NOSE: "nose",
79
- self.mp_pose.PoseLandmark.LEFT_KNEE: "left_knee",
80
- self.mp_pose.PoseLandmark.RIGHT_KNEE: "right_knee",
81
- }
82
-
83
- for landmark_id, name in landmark_names.items():
84
- lm = results.pose_landmarks.landmark[landmark_id]
85
- landmarks[name] = (lm.x, lm.y, lm.visibility)
61
+ with self.timer.measure("landmark_extraction"):
62
+ landmarks = {}
63
+ landmark_names = {
64
+ # Feet landmarks
65
+ self.mp_pose.PoseLandmark.LEFT_ANKLE: "left_ankle",
66
+ self.mp_pose.PoseLandmark.RIGHT_ANKLE: "right_ankle",
67
+ self.mp_pose.PoseLandmark.LEFT_HEEL: "left_heel",
68
+ self.mp_pose.PoseLandmark.RIGHT_HEEL: "right_heel",
69
+ self.mp_pose.PoseLandmark.LEFT_FOOT_INDEX: "left_foot_index",
70
+ self.mp_pose.PoseLandmark.RIGHT_FOOT_INDEX: "right_foot_index",
71
+ # Torso landmarks for CoM estimation
72
+ self.mp_pose.PoseLandmark.LEFT_HIP: "left_hip",
73
+ self.mp_pose.PoseLandmark.RIGHT_HIP: "right_hip",
74
+ self.mp_pose.PoseLandmark.LEFT_SHOULDER: "left_shoulder",
75
+ self.mp_pose.PoseLandmark.RIGHT_SHOULDER: "right_shoulder",
76
+ # Additional landmarks for better CoM estimation
77
+ self.mp_pose.PoseLandmark.NOSE: "nose",
78
+ self.mp_pose.PoseLandmark.LEFT_KNEE: "left_knee",
79
+ self.mp_pose.PoseLandmark.RIGHT_KNEE: "right_knee",
80
+ }
81
+
82
+ for landmark_id, name in landmark_names.items():
83
+ lm = results.pose_landmarks.landmark[landmark_id]
84
+ landmarks[name] = (lm.x, lm.y, lm.visibility)
86
85
 
87
86
  return landmarks
88
87
 
@@ -9,6 +9,7 @@ from .filtering import (
9
9
  bilateral_temporal_filter,
10
10
  reject_outliers,
11
11
  )
12
+ from .timing import NULL_TIMER, Timer
12
13
 
13
14
  # Type aliases for landmark data structures
14
15
  LandmarkCoord: TypeAlias = tuple[float, float, float] # (x, y, visibility)
@@ -347,6 +348,7 @@ def smooth_landmarks_advanced(
347
348
  ransac_threshold: float = 0.02,
348
349
  bilateral_sigma_spatial: float = 3.0,
349
350
  bilateral_sigma_intensity: float = 0.02,
351
+ timer: Timer | None = None,
350
352
  ) -> LandmarkSequence:
351
353
  """
352
354
  Advanced landmark smoothing with outlier rejection and bilateral filtering.
@@ -365,10 +367,12 @@ def smooth_landmarks_advanced(
365
367
  ransac_threshold: Threshold for RANSAC outlier detection
366
368
  bilateral_sigma_spatial: Spatial sigma for bilateral filter
367
369
  bilateral_sigma_intensity: Intensity sigma for bilateral filter
370
+ timer: Optional Timer for measuring operations
368
371
 
369
372
  Returns:
370
373
  Smoothed landmark sequence with same structure as input
371
374
  """
375
+ timer = timer or NULL_TIMER
372
376
  if len(landmark_sequence) < window_length:
373
377
  return landmark_sequence
374
378
 
@@ -382,37 +386,40 @@ def smooth_landmarks_advanced(
382
386
 
383
387
  # Step 1: Outlier rejection
384
388
  if use_outlier_rejection:
385
- x_array, _ = reject_outliers(
386
- x_array,
387
- use_ransac=True,
388
- use_median=True,
389
- ransac_threshold=ransac_threshold,
390
- )
391
- y_array, _ = reject_outliers(
392
- y_array,
393
- use_ransac=True,
394
- use_median=True,
395
- ransac_threshold=ransac_threshold,
396
- )
389
+ with timer.measure("smoothing_outlier_rejection"):
390
+ x_array, _ = reject_outliers(
391
+ x_array,
392
+ use_ransac=True,
393
+ use_median=True,
394
+ ransac_threshold=ransac_threshold,
395
+ )
396
+ y_array, _ = reject_outliers(
397
+ y_array,
398
+ use_ransac=True,
399
+ use_median=True,
400
+ ransac_threshold=ransac_threshold,
401
+ )
397
402
 
398
403
  # Step 2: Smoothing (bilateral or Savitzky-Golay)
399
404
  if use_bilateral:
400
- x_smooth = bilateral_temporal_filter(
401
- x_array,
402
- window_size=window_length,
403
- sigma_spatial=bilateral_sigma_spatial,
404
- sigma_intensity=bilateral_sigma_intensity,
405
- )
406
- y_smooth = bilateral_temporal_filter(
407
- y_array,
408
- window_size=window_length,
409
- sigma_spatial=bilateral_sigma_spatial,
410
- sigma_intensity=bilateral_sigma_intensity,
411
- )
405
+ with timer.measure("smoothing_bilateral"):
406
+ x_smooth = bilateral_temporal_filter(
407
+ x_array,
408
+ window_size=window_length,
409
+ sigma_spatial=bilateral_sigma_spatial,
410
+ sigma_intensity=bilateral_sigma_intensity,
411
+ )
412
+ y_smooth = bilateral_temporal_filter(
413
+ y_array,
414
+ window_size=window_length,
415
+ sigma_spatial=bilateral_sigma_spatial,
416
+ sigma_intensity=bilateral_sigma_intensity,
417
+ )
412
418
  else:
413
419
  # Standard Savitzky-Golay
414
- x_smooth = savgol_filter(x_array, window_length, polyorder)
415
- y_smooth = savgol_filter(y_array, window_length, polyorder)
420
+ with timer.measure("smoothing_savgol"):
421
+ x_smooth = savgol_filter(x_array, window_length, polyorder)
422
+ y_smooth = savgol_filter(y_array, window_length, polyorder)
416
423
 
417
424
  return x_smooth, y_smooth
418
425
 
kinemotion/core/timing.py CHANGED
@@ -26,9 +26,43 @@ Example:
26
26
  """
27
27
 
28
28
  import time
29
- from contextlib import AbstractContextManager
29
+ from contextlib import AbstractContextManager, ExitStack, contextmanager
30
30
  from typing import Protocol, runtime_checkable
31
31
 
32
+ # OpenTelemetry related imports, guarded by try-except for optional dependency
33
+ _trace_module = None # This will hold the actual 'trace' module if imported
34
+ _otel_tracer_class = None # This will hold the actual 'Tracer' class if imported
35
+
36
+ try:
37
+ import opentelemetry.trace as _trace_module_import # Import the module directly
38
+
39
+ _otel_tracer_class = (
40
+ _trace_module_import.Tracer
41
+ ) # Get the Tracer class from the module
42
+ _trace_module = (
43
+ _trace_module_import # Expose the trace module globally after successful import
44
+ )
45
+ except ImportError:
46
+ pass # No OTel, so these remain None
47
+
48
+ # Now define the global/module-level variables used elsewhere
49
+ # Conditionally expose 'trace' and 'Tracer' aliases
50
+ trace = _trace_module # This will be the actual module or None
51
+
52
+
53
+ class Tracer: # Dummy for type hints if actual Tracer is not available
54
+ pass
55
+
56
+
57
+ if _otel_tracer_class:
58
+ Tracer = _otel_tracer_class # Override dummy if actual Tracer is available
59
+
60
+ # This _OPENTELEMETRY_AVAILABLE variable is assigned only once,
61
+ # after the try-except block
62
+ _OPENTELEMETRY_AVAILABLE = bool(
63
+ _otel_tracer_class
64
+ ) # True if Tracer class was successfully loaded
65
+
32
66
 
33
67
  @runtime_checkable
34
68
  class Timer(Protocol):
@@ -59,6 +93,87 @@ class Timer(Protocol):
59
93
  ...
60
94
 
61
95
 
96
+ class _NullContext(AbstractContextManager[None]):
97
+ """Singleton null context manager with zero overhead.
98
+
99
+ Implements the context manager protocol but performs no operations.
100
+ Optimized away by the Python interpreter for minimal overhead.
101
+ """
102
+
103
+ __slots__ = ()
104
+
105
+ def __enter__(self) -> None:
106
+ """No-op entry - returns immediately."""
107
+ return None
108
+
109
+ def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> bool:
110
+ """No-op exit - returns immediately.
111
+
112
+ Args:
113
+ exc_type: Exception type (ignored)
114
+ exc_val: Exception value (ignored)
115
+ exc_tb: Exception traceback (ignored)
116
+
117
+ Returns:
118
+ False (does not suppress exceptions)
119
+ """
120
+ return False
121
+
122
+
123
+ class NullTimer:
124
+ """No-op timer implementing the Null Object Pattern.
125
+
126
+ Provides zero-overhead instrumentation when profiling is disabled.
127
+ All methods are no-ops that optimize away at runtime.
128
+
129
+ Performance: ~20-30 nanoseconds overhead per measure() call.
130
+ This is negligible compared to any actual work being measured.
131
+
132
+ Use Cases:
133
+ - Production deployments (profiling disabled)
134
+ - Performance-critical paths
135
+ - Testing without timing dependencies
136
+
137
+ Example:
138
+ # Use global singleton for zero allocation overhead
139
+ tracker = PoseTracker(timer=NULL_TIMER)
140
+
141
+ # No overhead - measure() call optimizes to nothing
142
+ with tracker.timer.measure("operation"):
143
+ do_work()
144
+ """
145
+
146
+ __slots__ = ()
147
+
148
+ def measure(self, name: str) -> AbstractContextManager[None]:
149
+ """Return a no-op context manager.
150
+
151
+ This method does nothing and is optimized away by the Python interpreter.
152
+ The context manager protocol (__enter__/__exit__) has minimal overhead.
153
+
154
+ Args:
155
+ name: Ignored - kept for protocol compatibility
156
+
157
+ Returns:
158
+ Singleton null context manager
159
+ """
160
+ return _NULL_CONTEXT
161
+
162
+ def get_metrics(self) -> dict[str, float]:
163
+ """Return empty metrics dictionary.
164
+
165
+ Returns:
166
+ Empty dictionary (no metrics collected)
167
+ """
168
+ return {}
169
+
170
+
171
+ # Singleton instances for global reuse
172
+ # Use these instead of creating new instances to avoid allocation overhead
173
+ _NULL_CONTEXT = _NullContext()
174
+ NULL_TIMER: Timer = NullTimer()
175
+
176
+
62
177
  class _MeasureContext(AbstractContextManager[None]):
63
178
  """Optimized context manager for active timing.
64
179
 
@@ -165,82 +280,103 @@ class PerformanceTimer:
165
280
  return self.metrics.copy()
166
281
 
167
282
 
168
- class _NullContext(AbstractContextManager[None]):
169
- """Singleton null context manager with zero overhead.
283
+ @contextmanager
284
+ def _composite_context_manager(contexts: list[AbstractContextManager[None]]):
285
+ """Helper to combine multiple context managers into one.
170
286
 
171
- Implements the context manager protocol but performs no operations.
172
- Optimized away by the Python interpreter for minimal overhead.
287
+ Uses ExitStack to manage entering and exiting multiple contexts transparently.
173
288
  """
289
+ with ExitStack() as stack:
290
+ for ctx in contexts:
291
+ stack.enter_context(ctx)
292
+ yield
174
293
 
175
- __slots__ = ()
176
294
 
177
- def __enter__(self) -> None:
178
- """No-op entry - returns immediately."""
179
- return None
295
+ class CompositeTimer:
296
+ """Timer that delegates measurements to multiple underlying timers.
180
297
 
181
- def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> bool:
182
- """No-op exit - returns immediately.
298
+ Useful for enabling both local performance timing (for JSON output)
299
+ and distributed tracing (OpenTelemetry) simultaneously.
300
+ """
301
+
302
+ __slots__ = ("timers",)
303
+
304
+ def __init__(self, timers: list[Timer]) -> None:
305
+ """Initialize composite timer.
183
306
 
184
307
  Args:
185
- exc_type: Exception type (ignored)
186
- exc_val: Exception value (ignored)
187
- exc_tb: Exception traceback (ignored)
308
+ timers: List of timer instances to delegate to
309
+ """
310
+ self.timers = timers
311
+
312
+ def measure(self, name: str) -> AbstractContextManager[None]:
313
+ """Measure using all underlying timers.
314
+
315
+ Args:
316
+ name: Name of the operation
188
317
 
189
318
  Returns:
190
- False (does not suppress exceptions)
319
+ Context manager that manages all underlying timers
191
320
  """
192
- return False
321
+ contexts = [timer.measure(name) for timer in self.timers]
322
+ return _composite_context_manager(contexts)
193
323
 
324
+ def get_metrics(self) -> dict[str, float]:
325
+ """Get combined metrics from all timers.
194
326
 
195
- class NullTimer:
196
- """No-op timer implementing the Null Object Pattern.
327
+ Returns:
328
+ Merged dictionary of metrics
329
+ """
330
+ metrics = {}
331
+ for timer in self.timers:
332
+ metrics.update(timer.get_metrics())
333
+ return metrics
197
334
 
198
- Provides zero-overhead instrumentation when profiling is disabled.
199
- All methods are no-ops that optimize away at runtime.
200
335
 
201
- Performance: ~20-30 nanoseconds overhead per measure() call.
202
- This is negligible compared to any actual work being measured.
336
+ class OpenTelemetryTimer:
337
+ """Timer implementation that creates OpenTelemetry spans.
203
338
 
204
- Use Cases:
205
- - Production deployments (profiling disabled)
206
- - Performance-critical paths
207
- - Testing without timing dependencies
339
+ Maps 'measure' calls to OTel spans. Requires opentelemetry-api installed.
340
+ """
208
341
 
209
- Example:
210
- # Use global singleton for zero allocation overhead
211
- tracker = PoseTracker(timer=NULL_TIMER)
342
+ __slots__ = ("tracer",)
212
343
 
213
- # No overhead - measure() call optimizes to nothing
214
- with tracker.timer.measure("operation"):
215
- do_work()
216
- """
344
+ def __init__(self, tracer: Tracer | None = None) -> None:
345
+ """Initialize OTel timer.
217
346
 
218
- __slots__ = ()
347
+ Args:
348
+ tracer: Optional OTel tracer. If None, gets tracer for module name.
349
+ """
350
+ if not _OPENTELEMETRY_AVAILABLE:
351
+ self.tracer = None # Always initialize self.tracer for __slots__
352
+ return
353
+
354
+ if trace is not None:
355
+ self.tracer = tracer or trace.get_tracer(__name__)
356
+ else:
357
+ # This branch should ideally not be reached if _OPENTELEMETRY_AVAILABLE
358
+ # is True but trace is None (meaning import succeeded but trace was not what
359
+ # expected). Defensive programming: ensure self.tracer is set.
360
+ self.tracer = None
219
361
 
220
362
  def measure(self, name: str) -> AbstractContextManager[None]:
221
- """Return a no-op context manager.
222
-
223
- This method does nothing and is optimized away by the Python interpreter.
224
- The context manager protocol (__enter__/__exit__) has minimal overhead.
363
+ """Start an OpenTelemetry span.
225
364
 
226
365
  Args:
227
- name: Ignored - kept for protocol compatibility
366
+ name: Name of the span
228
367
 
229
368
  Returns:
230
- Singleton null context manager
369
+ Span context manager (compatible with AbstractContextManager)
231
370
  """
232
- return _NULL_CONTEXT
371
+ if not _OPENTELEMETRY_AVAILABLE or self.tracer is None:
372
+ return _NULL_CONTEXT # Return the no-op context
373
+
374
+ return self.tracer.start_as_current_span(name)
233
375
 
234
376
  def get_metrics(self) -> dict[str, float]:
235
- """Return empty metrics dictionary.
377
+ """Return empty metrics (OTel handles export asynchronously).
236
378
 
237
379
  Returns:
238
- Empty dictionary (no metrics collected)
380
+ Empty dictionary
239
381
  """
240
382
  return {}
241
-
242
-
243
- # Singleton instances for global reuse
244
- # Use these instead of creating new instances to avoid allocation overhead
245
- _NULL_CONTEXT = _NullContext()
246
- NULL_TIMER: Timer = NullTimer()
@@ -7,7 +7,7 @@ import warnings
7
7
  import cv2
8
8
  import numpy as np
9
9
 
10
- from .timing import PerformanceTimer
10
+ from .timing import NULL_TIMER, Timer
11
11
 
12
12
 
13
13
  class VideoProcessor:
@@ -18,16 +18,16 @@ class VideoProcessor:
18
18
  No dimensions are hardcoded - all dimensions are extracted from actual frame data.
19
19
  """
20
20
 
21
- def __init__(self, video_path: str, timer: PerformanceTimer | None = None) -> None:
21
+ def __init__(self, video_path: str, timer: Timer | None = None) -> None:
22
22
  """
23
23
  Initialize video processor.
24
24
 
25
25
  Args:
26
26
  video_path: Path to input video file
27
- timer: Optional PerformanceTimer for measuring operations
27
+ timer: Optional Timer for measuring operations
28
28
  """
29
29
  self.video_path = video_path
30
- self.timer = timer
30
+ self.timer = timer or NULL_TIMER
31
31
  self.cap = cv2.VideoCapture(video_path)
32
32
 
33
33
  if not self.cap.isOpened():
@@ -179,28 +179,14 @@ class VideoProcessor:
179
179
  OpenCV ignores rotation metadata, so we manually apply rotation
180
180
  based on the display matrix metadata extracted from the video.
181
181
  """
182
- if self.timer:
183
- with self.timer.measure("frame_read"):
184
- ret, frame = self.cap.read()
185
- else:
182
+ with self.timer.measure("frame_read"):
186
183
  ret, frame = self.cap.read()
187
184
 
188
185
  if not ret:
189
186
  return None
190
187
 
191
188
  # Apply rotation if video has rotation metadata
192
- if self.timer:
193
- with self.timer.measure("frame_rotation"):
194
- if self.rotation == -90 or self.rotation == 270:
195
- # -90 degrees = rotate 90 degrees clockwise
196
- frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
197
- elif self.rotation == 90 or self.rotation == -270:
198
- # 90 degrees = rotate 90 degrees counter-clockwise
199
- frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
200
- elif self.rotation == 180 or self.rotation == -180:
201
- # 180 degrees rotation
202
- frame = cv2.rotate(frame, cv2.ROTATE_180)
203
- else:
189
+ with self.timer.measure("frame_rotation"):
204
190
  if self.rotation == -90 or self.rotation == 270:
205
191
  # -90 degrees = rotate 90 degrees clockwise
206
192
  frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
@@ -10,6 +10,7 @@ from ..core.smoothing import (
10
10
  compute_velocity_from_derivative,
11
11
  interpolate_threshold_crossing,
12
12
  )
13
+ from ..core.timing import NULL_TIMER, Timer
13
14
 
14
15
 
15
16
  class ContactState(Enum):
@@ -345,6 +346,7 @@ def detect_ground_contact(
345
346
  visibilities: np.ndarray | None = None,
346
347
  window_length: int = 5,
347
348
  polyorder: int = 2,
349
+ timer: Timer | None = None,
348
350
  ) -> list[ContactState]:
349
351
  """
350
352
  Detect when feet are in contact with ground based on vertical motion.
@@ -361,19 +363,22 @@ def detect_ground_contact(
361
363
  visibilities: Array of visibility scores for each frame
362
364
  window_length: Window size for velocity derivative calculation (must be odd)
363
365
  polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
366
+ timer: Optional Timer for measuring operations
364
367
 
365
368
  Returns:
366
369
  List of ContactState for each frame
367
370
  """
371
+ timer = timer or NULL_TIMER
368
372
  n_frames = len(foot_positions)
369
373
 
370
374
  if n_frames < 2:
371
375
  return [ContactState.UNKNOWN] * n_frames
372
376
 
373
377
  # Compute vertical velocity using derivative-based method
374
- velocities = compute_velocity_from_derivative(
375
- foot_positions, window_length=window_length, polyorder=polyorder
376
- )
378
+ with timer.measure("dj_compute_velocity"):
379
+ velocities = compute_velocity_from_derivative(
380
+ foot_positions, window_length=window_length, polyorder=polyorder
381
+ )
377
382
 
378
383
  # Detect stationary frames based on velocity threshold
379
384
  is_stationary = np.abs(velocities) < velocity_threshold
@@ -384,7 +389,8 @@ def detect_ground_contact(
384
389
  )
385
390
 
386
391
  # Find frames with sustained contact
387
- contact_frames = _find_contact_frames(is_stationary, min_contact_frames)
392
+ with timer.measure("dj_find_contact_frames"):
393
+ contact_frames = _find_contact_frames(is_stationary, min_contact_frames)
388
394
 
389
395
  # Assign states
390
396
  return _assign_contact_states(
@@ -138,10 +138,7 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
138
138
  Returns:
139
139
  Frame with debug overlay
140
140
  """
141
- if self.timer:
142
- with self.timer.measure("debug_video_copy"):
143
- annotated = frame.copy()
144
- else:
141
+ with self.timer.measure("debug_video_copy"):
145
142
  annotated = frame.copy()
146
143
 
147
144
  def _draw_overlays() -> None:
@@ -181,10 +178,7 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
181
178
  if metrics:
182
179
  self._draw_phase_labels(annotated, frame_idx, metrics)
183
180
 
184
- if self.timer:
185
- with self.timer.measure("debug_video_draw"):
186
- _draw_overlays()
187
- else:
181
+ with self.timer.measure("debug_video_draw"):
188
182
  _draw_overlays()
189
183
 
190
184
  return annotated
@@ -7,6 +7,7 @@ from numpy.typing import NDArray
7
7
 
8
8
  from ..core.formatting import format_float_metric, format_int_metric
9
9
  from ..core.smoothing import compute_acceleration_from_derivative
10
+ from ..core.timing import NULL_TIMER, Timer
10
11
  from .analysis import (
11
12
  ContactState,
12
13
  detect_drop_start,
@@ -433,6 +434,7 @@ def calculate_drop_jump_metrics(
433
434
  smoothing_window: int = 5,
434
435
  polyorder: int = 2,
435
436
  use_curvature: bool = True,
437
+ timer: Timer | None = None,
436
438
  ) -> DropJumpMetrics:
437
439
  """
438
440
  Calculate drop-jump metrics from contact states and positions.
@@ -450,16 +452,19 @@ def calculate_drop_jump_metrics(
450
452
  (must be odd)
451
453
  polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
452
454
  use_curvature: Whether to use curvature analysis for refining transitions
455
+ timer: Optional Timer for measuring operations
453
456
 
454
457
  Returns:
455
458
  DropJumpMetrics object with calculated values
456
459
  """
460
+ timer = timer or NULL_TIMER
457
461
  metrics = DropJumpMetrics()
458
462
 
459
463
  # Determine drop start frame
460
- drop_start_frame_value = _determine_drop_start_frame(
461
- drop_start_frame, foot_y_positions, fps, smoothing_window
462
- )
464
+ with timer.measure("dj_detect_drop_start"):
465
+ drop_start_frame_value = _determine_drop_start_frame(
466
+ drop_start_frame, foot_y_positions, fps, smoothing_window
467
+ )
463
468
 
464
469
  # Store drop start frame in metrics
465
470
  metrics.drop_start_frame = (
@@ -467,15 +472,16 @@ def calculate_drop_jump_metrics(
467
472
  )
468
473
 
469
474
  # Find contact phases
470
- phases = find_contact_phases(contact_states)
471
- interpolated_phases = find_interpolated_phase_transitions_with_curvature(
472
- foot_y_positions,
473
- contact_states,
474
- velocity_threshold,
475
- smoothing_window,
476
- polyorder,
477
- use_curvature,
478
- )
475
+ with timer.measure("dj_find_phases"):
476
+ phases = find_contact_phases(contact_states)
477
+ interpolated_phases = find_interpolated_phase_transitions_with_curvature(
478
+ foot_y_positions,
479
+ contact_states,
480
+ velocity_threshold,
481
+ smoothing_window,
482
+ polyorder,
483
+ use_curvature,
484
+ )
479
485
 
480
486
  if not phases:
481
487
  return metrics
@@ -504,9 +510,10 @@ def calculate_drop_jump_metrics(
504
510
  return metrics
505
511
 
506
512
  # Identify main contact phase
507
- contact_start, contact_end, _ = _identify_main_contact_phase(
508
- phases, ground_phases, air_phases_indexed, foot_y_positions
509
- )
513
+ with timer.measure("dj_identify_contact"):
514
+ contact_start, contact_end, _ = _identify_main_contact_phase(
515
+ phases, ground_phases, air_phases_indexed, foot_y_positions
516
+ )
510
517
 
511
518
  # Store integer frame indices
512
519
  metrics.contact_start_frame = contact_start
@@ -524,15 +531,16 @@ def calculate_drop_jump_metrics(
524
531
  metrics.contact_end_frame_precise = contact_end_frac
525
532
 
526
533
  # Analyze flight phase and calculate jump height
527
- _analyze_flight_phase(
528
- metrics,
529
- phases,
530
- interpolated_phases,
531
- contact_end,
532
- foot_y_positions,
533
- fps,
534
- smoothing_window,
535
- polyorder,
536
- )
534
+ with timer.measure("dj_analyze_flight"):
535
+ _analyze_flight_phase(
536
+ metrics,
537
+ phases,
538
+ interpolated_phases,
539
+ contact_end,
540
+ foot_y_positions,
541
+ fps,
542
+ smoothing_window,
543
+ polyorder,
544
+ )
537
545
 
538
546
  return metrics
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.46.0
3
+ Version: 0.47.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,40 +1,40 @@
1
1
  kinemotion/__init__.py,sha256=wPItmyGJUOFM6GPRVhAEvRz0-ErI7e2qiUREYJ9EfPQ,943
2
- kinemotion/api.py,sha256=AWURqiz0SI1BGh6mTlywTOWKFGrXyoZJbmo_t6sRkjQ,32538
2
+ kinemotion/api.py,sha256=K3E5kEQQyPZrEWYaIczJNxxWREWfclIvQYjXcX--9-k,31185
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
- kinemotion/cmj/analysis.py,sha256=qtULzp9uYzm5M0_Qu5YGJpuwjg9fz1VKAg6xg4NJxvM,21639
5
+ kinemotion/cmj/analysis.py,sha256=YDj7HpSCzrsw6mPtE3phDfYuAWQV0w-CCiLiQjkh3Mg,22196
6
6
  kinemotion/cmj/cli.py,sha256=HpZgLWoLjcgsfOZu6EQ_26tg6QwTgFjR-Ly8WCBg24c,9904
7
7
  kinemotion/cmj/debug_overlay.py,sha256=fXmWoHhqMLGo4vTtB6Ezs3yLUDOLw63zLIgU2gFlJQU,15892
8
8
  kinemotion/cmj/joint_angles.py,sha256=HmheIEiKcQz39cRezk4h-htorOhGNPsqKIR9RsAEKts,9960
9
9
  kinemotion/cmj/kinematics.py,sha256=Lq9m9MNQxnXv31VhKmXVrlM7rRkhi8PxW50N_CC8_8Y,11860
10
10
  kinemotion/cmj/metrics_validator.py,sha256=V_fmlczYH06SBtwqESv-IfGi3wDsIy3RQbd7VwOyNo0,31359
11
11
  kinemotion/cmj/validation_bounds.py,sha256=9ZTo68fl3ooyWjXXyTMRLpK9tFANa_rQf3oHhq7iQGE,11995
12
- kinemotion/core/__init__.py,sha256=mIsuXS9L7jk-3TCSlEdQ5nlgEAMXl7v5xfRFycwDn80,1430
12
+ kinemotion/core/__init__.py,sha256=rBIEx9sW6E-nyVdWmoVGJYhfPikLukoDp7lxKri7RTQ,1543
13
13
  kinemotion/core/auto_tuning.py,sha256=wtCUMOhBChVJNXfEeku3GCMW4qED6MF-O_mv2sPTiVQ,11324
14
14
  kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
15
- kinemotion/core/debug_overlay_utils.py,sha256=Eu4GXm8VeaDhU7voDjPJ4JvR-7ypT1mYmCz0d-M39N4,9027
15
+ kinemotion/core/debug_overlay_utils.py,sha256=-goE3w4gBij99y1U4ckU5iaQPS0SupcHplT04DDWzUo,8579
16
16
  kinemotion/core/determinism.py,sha256=NwVrHqJiVxxFHTBPVy8aDBJH2SLIcYIpdGFp7glblB8,2515
17
17
  kinemotion/core/experimental.py,sha256=IK05AF4aZS15ke85hF3TWCqRIXU1AlD_XKzFz735Ua8,3640
18
18
  kinemotion/core/filtering.py,sha256=GsC9BB71V07LJJHgS2lsaxUAtJsupcUiwtZFDgODh8c,11417
19
19
  kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
20
20
  kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
21
- kinemotion/core/pipeline_utils.py,sha256=n6ee90xOYfBGkDCM1_F2rpYVsC3wWyKSTtWpAFz0Fh0,14161
22
- kinemotion/core/pose.py,sha256=Tq4VS0YmMzrprVUsELm6FQczyLhP8UKurM9ccYn1LLU,8959
21
+ kinemotion/core/pipeline_utils.py,sha256=0u7o-UFZX6cOu3NaWpFmEy5ejS0WUKggZ1HSdeZXhoA,14964
22
+ kinemotion/core/pose.py,sha256=z1OGuwnc-NdK6Aoc9UYCyPBzomw4eInexOWonZbsEoA,9057
23
23
  kinemotion/core/quality.py,sha256=dPGQp08y8DdEUbUdjTThnUOUsALgF0D2sdz50cm6wLI,13098
24
- kinemotion/core/smoothing.py,sha256=GAfC-jxu1eqNyDjsUXqUBicKx9um5hrk49wz1FxfRNM,15219
25
- kinemotion/core/timing.py,sha256=Zjhue9LBM1kOcYhqYx3K-OIulnMN8yJer_m3V9i_vqo,7730
24
+ kinemotion/core/smoothing.py,sha256=FZmv3rumn0mYKU2y3JPKz46EvD8TVmQ6_GsN_Vp3BdU,15650
25
+ kinemotion/core/timing.py,sha256=mXwFTEYcB2cfAqQZAlucPN8cqPbVs7as2qjVMPToBdw,12024
26
26
  kinemotion/core/validation.py,sha256=LmKfSl4Ayw3DgwKD9IrhsPdzp5ia4drLsHA2UuU1SCM,6310
27
- kinemotion/core/video_io.py,sha256=HyLwn22fKe37j18853YYYrQi0JQWAwxpepPLNkuZKnQ,8586
27
+ kinemotion/core/video_io.py,sha256=vCwpWnlW2y29l48dFXokdehQn42w_IQvayxbVTjpXqQ,7863
28
28
  kinemotion/dropjump/__init__.py,sha256=tC3H3BrCg8Oj-db-Vrtx4PH_llR1Ppkd5jwaOjhQcLg,862
29
- kinemotion/dropjump/analysis.py,sha256=MjxO-vps0nz_hXlnGk7cgq3jFenJYzsM0VVpHwnHXsM,27935
29
+ kinemotion/dropjump/analysis.py,sha256=p7nnCe7V6vnhQKZVYk--_nhsTvVa_WY-A3zXmyplsew,28211
30
30
  kinemotion/dropjump/cli.py,sha256=eLIA0rnx60vqD__PinB1-5nQ8_xQUhCGplwsB0u9MgU,15824
31
- kinemotion/dropjump/debug_overlay.py,sha256=9nlnDYB_ZJO4dC1uMhDa4UOYGMBsDpyPQD3WbJjbwpM,6130
32
- kinemotion/dropjump/kinematics.py,sha256=kH-XM66wlOCYMpjvyb6_Qh5ZebyOfFZ47rmhgE1Tww4,19404
31
+ kinemotion/dropjump/debug_overlay.py,sha256=8XVuDyZ3nuNoCYkxcUWC7wyEoHyBxx77Sb--B1KiYWw,5974
32
+ kinemotion/dropjump/kinematics.py,sha256=PATlGaClutGKJslL-LRIXHmTsvb-xEB8PUIMScU_K4c,19849
33
33
  kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
34
34
  kinemotion/dropjump/validation_bounds.py,sha256=5b4I3CKPybuvrbn-nP5yCcGF_sH4Vtyw3a5AWWvWnBk,4645
35
35
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- kinemotion-0.46.0.dist-info/METADATA,sha256=IRNoNMIpHqtIEc1LZzTvL6k4_8SzAaPLlY6SqI1RzsM,26020
37
- kinemotion-0.46.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
38
- kinemotion-0.46.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
39
- kinemotion-0.46.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
40
- kinemotion-0.46.0.dist-info/RECORD,,
36
+ kinemotion-0.47.0.dist-info/METADATA,sha256=Kz7ptLupptK9uFRaatqfcVBvsaN_N6vqf0zKFitX1-o,26020
37
+ kinemotion-0.47.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
38
+ kinemotion-0.47.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
39
+ kinemotion-0.47.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
40
+ kinemotion-0.47.0.dist-info/RECORD,,