kinemotion 0.71.0__py3-none-any.whl → 0.72.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (36) hide show
  1. kinemotion/__init__.py +1 -1
  2. kinemotion/api.py +2 -2
  3. kinemotion/cli.py +1 -1
  4. kinemotion/cmj/analysis.py +2 -4
  5. kinemotion/cmj/api.py +9 -7
  6. kinemotion/cmj/debug_overlay.py +154 -286
  7. kinemotion/cmj/joint_angles.py +96 -31
  8. kinemotion/cmj/metrics_validator.py +22 -29
  9. kinemotion/cmj/validation_bounds.py +1 -18
  10. kinemotion/core/__init__.py +0 -2
  11. kinemotion/core/auto_tuning.py +95 -100
  12. kinemotion/core/debug_overlay_utils.py +142 -15
  13. kinemotion/core/experimental.py +55 -51
  14. kinemotion/core/filtering.py +15 -11
  15. kinemotion/core/overlay_constants.py +61 -0
  16. kinemotion/core/pipeline_utils.py +1 -1
  17. kinemotion/core/pose.py +47 -98
  18. kinemotion/core/smoothing.py +65 -51
  19. kinemotion/core/types.py +15 -0
  20. kinemotion/core/validation.py +6 -7
  21. kinemotion/core/video_io.py +14 -9
  22. kinemotion/{dropjump → dj}/__init__.py +2 -2
  23. kinemotion/{dropjump → dj}/analysis.py +192 -75
  24. kinemotion/{dropjump → dj}/api.py +13 -17
  25. kinemotion/{dropjump → dj}/cli.py +62 -78
  26. kinemotion/dj/debug_overlay.py +241 -0
  27. kinemotion/{dropjump → dj}/kinematics.py +106 -44
  28. kinemotion/{dropjump → dj}/metrics_validator.py +1 -1
  29. kinemotion/{dropjump → dj}/validation_bounds.py +1 -1
  30. {kinemotion-0.71.0.dist-info → kinemotion-0.72.0.dist-info}/METADATA +1 -1
  31. kinemotion-0.72.0.dist-info/RECORD +50 -0
  32. kinemotion/dropjump/debug_overlay.py +0 -182
  33. kinemotion-0.71.0.dist-info/RECORD +0 -49
  34. {kinemotion-0.71.0.dist-info → kinemotion-0.72.0.dist-info}/WHEEL +0 -0
  35. {kinemotion-0.71.0.dist-info → kinemotion-0.72.0.dist-info}/entry_points.txt +0 -0
  36. {kinemotion-0.71.0.dist-info → kinemotion-0.72.0.dist-info}/licenses/LICENSE +0 -0
@@ -181,6 +181,18 @@ def _smooth_landmarks_core( # NOSONAR(S1172) - polyorder used via closure
181
181
  return smoothed_sequence
182
182
 
183
183
 
184
+ def _ensure_odd_window_length(window_length: int) -> int:
185
+ """Ensure window_length is odd (required for Savitzky-Golay filter).
186
+
187
+ Args:
188
+ window_length: Desired window length
189
+
190
+ Returns:
191
+ Odd window length (increments by 1 if even)
192
+ """
193
+ return window_length + 1 if window_length % 2 == 0 else window_length
194
+
195
+
184
196
  def smooth_landmarks(
185
197
  landmark_sequence: LandmarkSequence,
186
198
  window_length: int = 5,
@@ -200,9 +212,7 @@ def smooth_landmarks(
200
212
  if len(landmark_sequence) < window_length:
201
213
  return landmark_sequence
202
214
 
203
- # Ensure window_length is odd
204
- if window_length % 2 == 0:
205
- window_length += 1
215
+ window_length = _ensure_odd_window_length(window_length)
206
216
 
207
217
  def savgol_smoother(
208
218
  x_coords: list[float], y_coords: list[float], _valid_frames: list[int]
@@ -231,61 +241,87 @@ def compute_velocity(positions: np.ndarray, fps: float, smooth_window: int = 3)
231
241
 
232
242
  # Smooth velocity if we have enough data
233
243
  if len(velocity) >= smooth_window and smooth_window > 1:
234
- if smooth_window % 2 == 0:
235
- smooth_window += 1
244
+ smooth_window = _ensure_odd_window_length(smooth_window)
236
245
  for dim in range(velocity.shape[1]):
237
246
  velocity[:, dim] = savgol_filter(velocity[:, dim], smooth_window, 1)
238
247
 
239
248
  return velocity
240
249
 
241
250
 
242
- def compute_velocity_from_derivative(
251
+ def _compute_derivative(
243
252
  positions: np.ndarray,
253
+ deriv_order: int,
244
254
  window_length: int = 5,
245
255
  polyorder: int = 2,
246
256
  ) -> np.ndarray:
247
257
  """
248
- Compute velocity as derivative of smoothed position trajectory.
249
-
250
- Uses Savitzky-Golay filter to compute the derivative directly, which provides
251
- a much smoother and more accurate velocity estimate than frame-to-frame differences.
258
+ Compute nth derivative using Savitzky-Golay filter.
252
259
 
253
- This method:
254
- 1. Fits a polynomial to the position data in a sliding window
255
- 2. Analytically computes the derivative of that polynomial
256
- 3. Returns smooth velocity values
260
+ This unified function handles both velocity (first derivative) and
261
+ acceleration (second derivative) computation with a single implementation.
257
262
 
258
263
  Args:
259
264
  positions: 1D array of position values (e.g., foot y-positions)
265
+ deriv_order: Order of derivative (1 for velocity, 2 for acceleration)
260
266
  window_length: Window size for smoothing (must be odd, >= polyorder + 2)
261
267
  polyorder: Polynomial order for Savitzky-Golay filter (typically 2 or 3)
262
268
 
263
269
  Returns:
264
- Array of absolute velocity values (magnitude of derivative)
270
+ Array of derivative values
265
271
  """
266
272
  if len(positions) < window_length:
267
273
  # Fallback to simple differences for short sequences
268
- return np.abs(np.diff(positions, prepend=positions[0]))
274
+ if deriv_order == 1:
275
+ return np.abs(np.diff(positions, prepend=positions[0]))
276
+ # Second derivative fallback
277
+ velocity = np.diff(positions, prepend=positions[0])
278
+ return np.diff(velocity, prepend=velocity[0])
269
279
 
270
- # Ensure window_length is odd
271
- if window_length % 2 == 0:
272
- window_length += 1
280
+ window_length = _ensure_odd_window_length(window_length)
273
281
 
274
282
  # Compute derivative using Savitzky-Golay filter
275
- # deriv=1: compute first derivative
276
- # delta=1.0: frame spacing (velocity per frame)
283
+ # delta=1.0: frame spacing
277
284
  # mode='interp': interpolate at boundaries
278
- velocity = savgol_filter(
285
+ result = savgol_filter(
279
286
  positions,
280
287
  window_length,
281
288
  polyorder,
282
- deriv=1, # First derivative
283
- delta=1.0, # Frame spacing
289
+ deriv=deriv_order,
290
+ delta=1.0,
284
291
  mode="interp",
285
292
  )
286
293
 
287
- # Return absolute velocity (magnitude only)
288
- return np.abs(velocity)
294
+ # Return absolute values for velocity (first derivative)
295
+ return np.abs(result) if deriv_order == 1 else result
296
+
297
+
298
+ def compute_velocity_from_derivative(
299
+ positions: np.ndarray,
300
+ window_length: int = 5,
301
+ polyorder: int = 2,
302
+ ) -> np.ndarray:
303
+ """
304
+ Compute velocity as derivative of smoothed position trajectory.
305
+
306
+ Uses Savitzky-Golay filter to compute the derivative directly, which provides
307
+ a much smoother and more accurate velocity estimate than frame-to-frame differences.
308
+
309
+ This method:
310
+ 1. Fits a polynomial to the position data in a sliding window
311
+ 2. Analytically computes the derivative of that polynomial
312
+ 3. Returns smooth velocity values
313
+
314
+ Args:
315
+ positions: 1D array of position values (e.g., foot y-positions)
316
+ window_length: Window size for smoothing (must be odd, >= polyorder + 2)
317
+ polyorder: Polynomial order for Savitzky-Golay filter (typically 2 or 3)
318
+
319
+ Returns:
320
+ Array of absolute velocity values (magnitude of derivative)
321
+ """
322
+ return _compute_derivative(
323
+ positions, deriv_order=1, window_length=window_length, polyorder=polyorder
324
+ )
289
325
 
290
326
 
291
327
  def compute_acceleration_from_derivative(
@@ -314,30 +350,10 @@ def compute_acceleration_from_derivative(
314
350
  Returns:
315
351
  Array of acceleration values (second derivative of position)
316
352
  """
317
- if len(positions) < window_length:
318
- # Fallback to simple second differences for short sequences
319
- velocity = np.diff(positions, prepend=positions[0])
320
- return np.diff(velocity, prepend=velocity[0])
321
-
322
- # Ensure window_length is odd
323
- if window_length % 2 == 0:
324
- window_length += 1
325
-
326
- # Compute second derivative using Savitzky-Golay filter
327
- # deriv=2: compute second derivative (acceleration/curvature)
328
- # delta=1.0: frame spacing
329
- # mode='interp': interpolate at boundaries
330
- acceleration = savgol_filter(
331
- positions,
332
- window_length,
333
- polyorder,
334
- deriv=2, # Second derivative
335
- delta=1.0, # Frame spacing
336
- mode="interp",
353
+ return _compute_derivative(
354
+ positions, deriv_order=2, window_length=window_length, polyorder=polyorder
337
355
  )
338
356
 
339
- return acceleration
340
-
341
357
 
342
358
  def smooth_landmarks_advanced(
343
359
  landmark_sequence: LandmarkSequence,
@@ -376,9 +392,7 @@ def smooth_landmarks_advanced(
376
392
  if len(landmark_sequence) < window_length:
377
393
  return landmark_sequence
378
394
 
379
- # Ensure window_length is odd
380
- if window_length % 2 == 0:
381
- window_length += 1
395
+ window_length = _ensure_odd_window_length(window_length)
382
396
 
383
397
  def advanced_smoother(
384
398
  x_coords: list[float], y_coords: list[float], _valid_frames: list[int]
kinemotion/core/types.py CHANGED
@@ -29,6 +29,19 @@ LandmarkSequence: TypeAlias = list[LandmarkFrame]
29
29
  # - Wrapper structures: e.g. {"data": {...actual metrics...}}
30
30
  MetricsDict: TypeAlias = dict[str, Any]
31
31
 
32
+ # MediaPipe foot landmark names used for position and visibility tracking
33
+ FOOT_KEYS: tuple[str, ...] = (
34
+ "left_ankle",
35
+ "right_ankle",
36
+ "left_heel",
37
+ "right_heel",
38
+ "left_foot_index",
39
+ "right_foot_index",
40
+ )
41
+
42
+ # MediaPipe hip landmark names used for position tracking
43
+ HIP_KEYS: tuple[str, ...] = ("left_hip", "right_hip")
44
+
32
45
  __all__ = [
33
46
  "FloatArray",
34
47
  "Float64Array",
@@ -39,4 +52,6 @@ __all__ = [
39
52
  "LandmarkFrame",
40
53
  "LandmarkSequence",
41
54
  "MetricsDict",
55
+ "FOOT_KEYS",
56
+ "HIP_KEYS",
42
57
  ]
@@ -78,19 +78,18 @@ class MetricBounds:
78
78
 
79
79
  def contains(self, value: float, profile: AthleteProfile) -> bool:
80
80
  """Check if value is within bounds for athlete profile."""
81
- if profile == AthleteProfile.ELDERLY:
81
+ # ELDERLY and UNTRAINED use same bounds (practical to recreational)
82
+ if profile in (AthleteProfile.ELDERLY, AthleteProfile.UNTRAINED):
82
83
  return self.practical_min <= value <= self.recreational_max
83
- elif profile == AthleteProfile.UNTRAINED:
84
- return self.practical_min <= value <= self.recreational_max
85
- elif profile == AthleteProfile.RECREATIONAL:
84
+ if profile == AthleteProfile.RECREATIONAL:
86
85
  return self.recreational_min <= value <= self.recreational_max
87
- elif profile == AthleteProfile.TRAINED:
86
+ if profile == AthleteProfile.ELITE:
87
+ return self.elite_min <= value <= self.elite_max
88
+ if profile == AthleteProfile.TRAINED:
88
89
  # Trained athletes: midpoint between recreational and elite
89
90
  trained_min = (self.recreational_min + self.elite_min) / 2
90
91
  trained_max = (self.recreational_max + self.elite_max) / 2
91
92
  return trained_min <= value <= trained_max
92
- elif profile == AthleteProfile.ELITE:
93
- return self.elite_min <= value <= self.elite_max
94
93
  return False
95
94
 
96
95
  def is_physically_possible(self, value: float) -> bool:
@@ -18,6 +18,17 @@ class VideoProcessor:
18
18
  No dimensions are hardcoded - all dimensions are extracted from actual frame data.
19
19
  """
20
20
 
21
+ # Mapping of rotation angles to OpenCV rotation operations
22
+ # Keys are normalized angles (equivalent angles grouped)
23
+ _ROTATION_OPS: dict[int, int] = {
24
+ -90: cv2.ROTATE_90_CLOCKWISE,
25
+ 270: cv2.ROTATE_90_CLOCKWISE,
26
+ 90: cv2.ROTATE_90_COUNTERCLOCKWISE,
27
+ -270: cv2.ROTATE_90_COUNTERCLOCKWISE,
28
+ 180: cv2.ROTATE_180,
29
+ -180: cv2.ROTATE_180,
30
+ }
31
+
21
32
  def __init__(self, video_path: str, timer: Timer | None = None) -> None:
22
33
  """
23
34
  Initialize video processor.
@@ -216,15 +227,9 @@ class VideoProcessor:
216
227
 
217
228
  # Apply rotation if video has rotation metadata
218
229
  with self.timer.measure("frame_rotation"):
219
- if self.rotation == -90 or self.rotation == 270:
220
- # -90 degrees = rotate 90 degrees clockwise
221
- frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
222
- elif self.rotation == 90 or self.rotation == -270:
223
- # 90 degrees = rotate 90 degrees counter-clockwise
224
- frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
225
- elif self.rotation == 180 or self.rotation == -180:
226
- # 180 degrees rotation
227
- frame = cv2.rotate(frame, cv2.ROTATE_180)
230
+ rotation_op = self._ROTATION_OPS.get(self.rotation)
231
+ if rotation_op is not None:
232
+ frame = cv2.rotate(frame, rotation_op)
228
233
 
229
234
  self._frame_index += 1
230
235
  return frame
@@ -9,7 +9,7 @@ from .analysis import (
9
9
  find_interpolated_phase_transitions_with_curvature,
10
10
  refine_transition_with_curvature,
11
11
  )
12
- from .debug_overlay import DebugOverlayRenderer
12
+ from .debug_overlay import DropJumpDebugOverlayRenderer
13
13
  from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
14
14
 
15
15
  __all__ = [
@@ -25,5 +25,5 @@ __all__ = [
25
25
  "DropJumpMetrics",
26
26
  "calculate_drop_jump_metrics",
27
27
  # Debug overlay
28
- "DebugOverlayRenderer",
28
+ "DropJumpDebugOverlayRenderer",
29
29
  ]
@@ -11,7 +11,7 @@ from ..core.smoothing import (
11
11
  interpolate_threshold_crossing,
12
12
  )
13
13
  from ..core.timing import NULL_TIMER, Timer
14
- from ..core.types import BoolArray, FloatArray
14
+ from ..core.types import FOOT_KEYS, BoolArray, FloatArray
15
15
 
16
16
 
17
17
  class ContactState(Enum):
@@ -334,6 +334,48 @@ def _assign_contact_states(
334
334
  return states
335
335
 
336
336
 
337
+ def _compute_near_ground_mask(
338
+ foot_positions: FloatArray,
339
+ height_tolerance: float = 0.35,
340
+ ) -> BoolArray:
341
+ """Compute mask for frames where feet are near ground level.
342
+
343
+ Uses position-based filtering to identify frames near ground baseline.
344
+ In normalized coordinates: y=1 is bottom (ground), y=0 is top.
345
+
346
+ The ground baseline is established as the 90th percentile of positions,
347
+ which represents the typical ground level while handling outliers.
348
+
349
+ The tolerance is set at 35% of the position range by default, which is
350
+ generous enough to capture the full reactive contact phase (where athletes
351
+ maintain an athletic stance) while still filtering out the jump apex
352
+ (where y is much lower than ground level).
353
+
354
+ Args:
355
+ foot_positions: Array of foot y-positions (normalized, 0-1)
356
+ height_tolerance: Fraction of position range allowed above ground (default 35%)
357
+
358
+ Returns:
359
+ Boolean array where True indicates frame is near ground level
360
+ """
361
+ # Ground baseline: 90th percentile (where feet are typically on ground)
362
+ # Using 90th instead of 95th to be less sensitive to final landing positions
363
+ ground_baseline = float(np.percentile(foot_positions, 90))
364
+
365
+ # Compute position range for tolerance calculation
366
+ position_range = float(np.max(foot_positions) - np.min(foot_positions))
367
+
368
+ # Minimum absolute tolerance to handle small movements
369
+ min_tolerance = 0.03 # 3% of normalized range
370
+
371
+ # Height tolerance: percentage of position range or minimum
372
+ tolerance = max(position_range * height_tolerance, min_tolerance)
373
+
374
+ # Frames are near ground if y >= ground_baseline - tolerance
375
+ # (Remember: higher y = closer to ground in normalized coords)
376
+ return foot_positions >= (ground_baseline - tolerance)
377
+
378
+
337
379
  def detect_ground_contact(
338
380
  foot_positions: FloatArray,
339
381
  velocity_threshold: float = 0.02,
@@ -343,13 +385,14 @@ def detect_ground_contact(
343
385
  window_length: int = 5,
344
386
  polyorder: int = 2,
345
387
  timer: Timer | None = None,
388
+ height_tolerance: float = 0.35,
346
389
  ) -> list[ContactState]:
347
390
  """
348
- Detect when feet are in contact with ground based on vertical motion.
391
+ Detect when feet are in contact with ground based on vertical motion AND position.
349
392
 
350
- Uses derivative-based velocity calculation via Savitzky-Goyal filter for smooth,
351
- accurate velocity estimates. This is consistent with the velocity calculation used
352
- throughout the pipeline for sub-frame interpolation and curvature analysis.
393
+ Uses derivative-based velocity calculation via Savitzky-Golay filter for smooth,
394
+ accurate velocity estimates. Additionally uses position-based filtering to prevent
395
+ false ON_GROUND classification at jump apex where velocity approaches zero.
353
396
 
354
397
  Args:
355
398
  foot_positions: Array of foot y-positions (normalized, 0-1, where 1 is bottom)
@@ -360,6 +403,7 @@ def detect_ground_contact(
360
403
  window_length: Window size for velocity derivative calculation (must be odd)
361
404
  polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
362
405
  timer: Optional Timer for measuring operations
406
+ height_tolerance: Fraction of position range to allow above ground baseline (default 35%)
363
407
 
364
408
  Returns:
365
409
  List of ContactState for each frame
@@ -379,6 +423,14 @@ def detect_ground_contact(
379
423
  # Detect stationary frames based on velocity threshold
380
424
  is_stationary = np.abs(velocities) < velocity_threshold
381
425
 
426
+ # Position-based filtering to prevent false ON_GROUND at jump apex
427
+ # In normalized coords: y=1 is bottom (ground), y=0 is top
428
+ # Ground baseline is the 95th percentile (handles outliers)
429
+ is_near_ground = _compute_near_ground_mask(foot_positions, height_tolerance)
430
+
431
+ # Both conditions must be true: low velocity AND near ground
432
+ is_stationary = is_stationary & is_near_ground
433
+
382
434
  # Apply visibility filter
383
435
  is_stationary = _filter_stationary_with_visibility(
384
436
  is_stationary, visibilities, visibility_threshold
@@ -424,6 +476,57 @@ def find_contact_phases(
424
476
  return phases
425
477
 
426
478
 
479
+ def _interpolate_phase_boundary(
480
+ boundary_idx: int,
481
+ state: ContactState,
482
+ velocities: FloatArray,
483
+ velocity_threshold: float,
484
+ is_start: bool,
485
+ ) -> float:
486
+ """Interpolate phase boundary with sub-frame precision.
487
+
488
+ Args:
489
+ boundary_idx: Index of the boundary frame
490
+ state: Contact state of the phase
491
+ velocities: Velocity array
492
+ velocity_threshold: Threshold value for crossing detection
493
+ is_start: True for phase start, False for phase end
494
+
495
+ Returns:
496
+ Fractional frame index, or float(boundary_idx) if no interpolation.
497
+ """
498
+ n_velocities = len(velocities)
499
+
500
+ if is_start:
501
+ # For start boundary, look at velocity before and at the boundary
502
+ if boundary_idx <= 0 or boundary_idx >= n_velocities:
503
+ return float(boundary_idx)
504
+ vel_before = velocities[boundary_idx - 1]
505
+ vel_at = velocities[boundary_idx]
506
+ # Check threshold crossing based on state
507
+ is_crossing = (
508
+ state == ContactState.ON_GROUND and vel_before > velocity_threshold > vel_at
509
+ ) or (state == ContactState.IN_AIR and vel_before < velocity_threshold < vel_at)
510
+ if is_crossing:
511
+ offset = interpolate_threshold_crossing(vel_before, vel_at, velocity_threshold)
512
+ return (boundary_idx - 1) + offset
513
+ return float(boundary_idx)
514
+
515
+ # For end boundary, look at velocity at and after the boundary
516
+ if boundary_idx + 1 >= n_velocities:
517
+ return float(boundary_idx)
518
+ vel_at = velocities[boundary_idx]
519
+ vel_after = velocities[boundary_idx + 1]
520
+ # Check threshold crossing based on state
521
+ is_crossing = (
522
+ state == ContactState.ON_GROUND and vel_at < velocity_threshold < vel_after
523
+ ) or (state == ContactState.IN_AIR and vel_at > velocity_threshold > vel_after)
524
+ if is_crossing:
525
+ offset = interpolate_threshold_crossing(vel_at, vel_after, velocity_threshold)
526
+ return boundary_idx + offset
527
+ return float(boundary_idx)
528
+
529
+
427
530
  def _interpolate_phase_start(
428
531
  start_idx: int,
429
532
  state: ContactState,
@@ -435,21 +538,9 @@ def _interpolate_phase_start(
435
538
  Returns:
436
539
  Fractional start frame, or float(start_idx) if no interpolation.
437
540
  """
438
- if start_idx <= 0 or start_idx >= len(velocities):
439
- return float(start_idx)
440
-
441
- vel_before = velocities[start_idx - 1]
442
- vel_at = velocities[start_idx]
443
-
444
- # Check threshold crossing based on state
445
- is_landing = state == ContactState.ON_GROUND and vel_before > velocity_threshold > vel_at
446
- is_takeoff = state == ContactState.IN_AIR and vel_before < velocity_threshold < vel_at
447
-
448
- if is_landing or is_takeoff:
449
- offset = interpolate_threshold_crossing(vel_before, vel_at, velocity_threshold)
450
- return (start_idx - 1) + offset
451
-
452
- return float(start_idx)
541
+ return _interpolate_phase_boundary(
542
+ start_idx, state, velocities, velocity_threshold, is_start=True
543
+ )
453
544
 
454
545
 
455
546
  def _interpolate_phase_end(
@@ -457,28 +548,16 @@ def _interpolate_phase_end(
457
548
  state: ContactState,
458
549
  velocities: FloatArray,
459
550
  velocity_threshold: float,
460
- max_idx: int,
551
+ _max_idx: int,
461
552
  ) -> float:
462
553
  """Interpolate end boundary of a phase with sub-frame precision.
463
554
 
464
555
  Returns:
465
556
  Fractional end frame, or float(end_idx) if no interpolation.
466
557
  """
467
- if end_idx >= max_idx - 1 or end_idx + 1 >= len(velocities):
468
- return float(end_idx)
469
-
470
- vel_at = velocities[end_idx]
471
- vel_after = velocities[end_idx + 1]
472
-
473
- # Check threshold crossing based on state
474
- is_takeoff = state == ContactState.ON_GROUND and vel_at < velocity_threshold < vel_after
475
- is_landing = state == ContactState.IN_AIR and vel_at > velocity_threshold > vel_after
476
-
477
- if is_takeoff or is_landing:
478
- offset = interpolate_threshold_crossing(vel_at, vel_after, velocity_threshold)
479
- return end_idx + offset
480
-
481
- return float(end_idx)
558
+ return _interpolate_phase_boundary(
559
+ end_idx, state, velocities, velocity_threshold, is_start=False
560
+ )
482
561
 
483
562
 
484
563
  def find_interpolated_phase_transitions(
@@ -689,25 +768,28 @@ def find_landing_from_acceleration(
689
768
  accelerations: FloatArray,
690
769
  takeoff_frame: int,
691
770
  fps: float,
692
- search_duration: float = 0.7,
771
+ search_duration: float = 1.5,
693
772
  ) -> int:
694
773
  """
695
- Find landing frame by detecting impact acceleration after takeoff.
774
+ Find landing frame using position-based detection with acceleration refinement.
775
+
776
+ Primary method: Find when feet return to near-takeoff level after peak.
777
+ Secondary: Refine with acceleration spike if present.
696
778
 
697
- Detects the moment of initial ground contact, characterized by a sharp
698
- deceleration (positive acceleration spike) as downward velocity is arrested.
779
+ For drop jumps, landing is defined as the first ground contact after the
780
+ reactive jump, when feet return to approximately the same level as takeoff.
699
781
 
700
782
  Args:
701
- positions: Array of vertical positions (normalized 0-1)
783
+ positions: Array of vertical positions (normalized 0-1, where higher = closer to ground)
702
784
  accelerations: Array of accelerations (second derivative)
703
785
  takeoff_frame: Frame at takeoff (end of ground contact)
704
786
  fps: Video frame rate
705
- search_duration: Duration in seconds to search for landing (default: 0.7s)
787
+ search_duration: Duration in seconds to search for landing (default: 1.5s)
706
788
 
707
789
  Returns:
708
790
  Landing frame index (integer)
709
791
  """
710
- # Find peak height (minimum y value = highest point)
792
+ # Extended search window to capture full flight
711
793
  search_start = takeoff_frame
712
794
  search_end = min(len(positions), takeoff_frame + int(fps * search_duration))
713
795
 
@@ -715,61 +797,91 @@ def find_landing_from_acceleration(
715
797
  return min(len(positions) - 1, takeoff_frame + int(fps * 0.3))
716
798
 
717
799
  flight_positions = positions[search_start:search_end]
800
+
801
+ # Find peak height (minimum y value = highest point)
718
802
  peak_idx = int(np.argmin(flight_positions))
719
803
  peak_frame = search_start + peak_idx
720
804
 
721
- # After peak, look for landing (impact with ground)
722
- # Landing is detected by maximum positive acceleration (deceleration on impact)
723
- landing_search_start = peak_frame + 2
724
- landing_search_end = min(len(accelerations), landing_search_start + int(fps * 0.6))
805
+ # Get takeoff position as reference for landing detection
806
+ takeoff_position = positions[takeoff_frame]
807
+
808
+ # Position-based landing: find first frame after peak where position
809
+ # returns to within 5% of takeoff level (or 95% of the way back)
810
+ landing_threshold = takeoff_position - 0.05 * (takeoff_position - positions[peak_frame])
725
811
 
726
- if landing_search_end <= landing_search_start:
727
- return min(len(positions) - 1, peak_frame + int(fps * 0.2))
812
+ # Search for landing after peak
813
+ landing_frame = None
814
+ for i in range(peak_frame + 2, min(len(positions), search_end)):
815
+ if positions[i] >= landing_threshold:
816
+ landing_frame = i
817
+ break
728
818
 
729
- # Find impact: maximum negative acceleration after peak (deceleration on impact)
730
- # The impact creates a large upward force (negative acceleration in Y-down)
731
- landing_accelerations = accelerations[landing_search_start:landing_search_end]
732
- impact_idx = int(np.argmin(landing_accelerations))
733
- landing_frame = landing_search_start + impact_idx
819
+ # If position-based detection fails, use end of search window
820
+ if landing_frame is None:
821
+ landing_frame = min(len(positions) - 1, search_end - 1)
822
+
823
+ # Refine with acceleration if there's a clear impact spike
824
+ # Look for significant acceleration in a small window around the position-based landing
825
+ refine_start = max(peak_frame + 2, landing_frame - int(fps * 0.1))
826
+ refine_end = min(len(accelerations), landing_frame + int(fps * 0.1))
827
+
828
+ if refine_end > refine_start:
829
+ window_accelerations = accelerations[refine_start:refine_end]
830
+ # Check if there's a significant acceleration spike (> 3x median)
831
+ median_acc = float(np.median(np.abs(window_accelerations)))
832
+ max_acc_idx = int(np.argmax(np.abs(window_accelerations)))
833
+ max_acc = float(np.abs(window_accelerations[max_acc_idx]))
834
+
835
+ if median_acc > 0 and max_acc > 3 * median_acc:
836
+ # Use acceleration-refined landing frame
837
+ landing_frame = refine_start + max_acc_idx
734
838
 
735
839
  return landing_frame
736
840
 
737
841
 
738
842
  def compute_average_foot_position(
739
843
  landmarks: dict[str, tuple[float, float, float]],
844
+ visibility_threshold: float = 0.5,
740
845
  ) -> tuple[float, float]:
741
846
  """
742
847
  Compute average foot position from ankle and foot landmarks.
743
848
 
849
+ Uses tiered visibility approach to avoid returning center (0.5, 0.5)
850
+ which can cause false phase transitions in contact detection.
851
+
744
852
  Args:
745
853
  landmarks: Dictionary of landmark positions
854
+ visibility_threshold: Minimum visibility to include landmark (default: 0.5)
746
855
 
747
856
  Returns:
748
857
  (x, y) average foot position in normalized coordinates
749
858
  """
750
- foot_keys = [
751
- "left_ankle",
752
- "right_ankle",
753
- "left_heel",
754
- "right_heel",
755
- "left_foot_index",
756
- "right_foot_index",
757
- ]
758
-
759
- x_positions = []
760
- y_positions = []
761
-
762
- for key in foot_keys:
859
+ # Collect all foot landmarks with their visibility
860
+ foot_data: list[tuple[float, float, float]] = []
861
+ for key in FOOT_KEYS:
763
862
  if key in landmarks:
764
863
  x, y, visibility = landmarks[key]
765
- if visibility > 0.5: # Only use visible landmarks
766
- x_positions.append(x)
767
- y_positions.append(y)
864
+ foot_data.append((x, y, visibility))
768
865
 
769
- if not x_positions:
770
- return (0.5, 0.5) # Default to center if no visible feet
866
+ if not foot_data:
867
+ # No foot landmarks at all - return center as last resort
868
+ return (0.5, 0.5)
771
869
 
772
- return (float(np.mean(x_positions)), float(np.mean(y_positions)))
870
+ # Tier 1: Use landmarks above visibility threshold
871
+ high_vis = [(x, y) for x, y, v in foot_data if v > visibility_threshold]
872
+ if high_vis:
873
+ xs, ys = zip(*high_vis, strict=False)
874
+ return (float(np.mean(xs)), float(np.mean(ys)))
875
+
876
+ # Tier 2: Use landmarks with any reasonable visibility (> 0.1)
877
+ low_vis = [(x, y) for x, y, v in foot_data if v > 0.1]
878
+ if low_vis:
879
+ xs, ys = zip(*low_vis, strict=False)
880
+ return (float(np.mean(xs)), float(np.mean(ys)))
881
+
882
+ # Tier 3: Use highest visibility landmark regardless of threshold
883
+ best = max(foot_data, key=lambda t: t[2])
884
+ return (best[0], best[1])
773
885
 
774
886
 
775
887
  def _calculate_average_visibility(
@@ -783,8 +895,13 @@ def _calculate_average_visibility(
783
895
  Returns:
784
896
  Average visibility of foot landmarks (0.0 if none visible)
785
897
  """
786
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
787
- foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
898
+ # Only use ankles and heels for visibility (foot_index can be noisy)
899
+ visibility_keys = ("left_ankle", "right_ankle", "left_heel", "right_heel")
900
+ foot_vis = [
901
+ frame_landmarks[key][2]
902
+ for key in FOOT_KEYS
903
+ if key in frame_landmarks and key in visibility_keys
904
+ ]
788
905
  return float(np.mean(foot_vis)) if foot_vis else 0.0
789
906
 
790
907