kinemotion 0.6.4__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,289 @@
1
+ """Automatic parameter tuning based on video characteristics."""
2
+
3
+ from dataclasses import dataclass
4
+ from enum import Enum
5
+
6
+ import numpy as np
7
+
8
+
9
+ class QualityPreset(str, Enum):
10
+ """Quality presets for analysis."""
11
+
12
+ FAST = "fast" # Quick analysis, lower precision
13
+ BALANCED = "balanced" # Default: good balance of speed and accuracy
14
+ ACCURATE = "accurate" # Research-grade analysis, slower
15
+
16
+
17
+ @dataclass
18
+ class VideoCharacteristics:
19
+ """Characteristics extracted from video analysis."""
20
+
21
+ fps: float
22
+ frame_count: int
23
+ avg_visibility: float # Average landmark visibility (0-1)
24
+ position_variance: float # Variance in foot positions
25
+ has_stable_period: bool # Whether video has initial stationary period
26
+ tracking_quality: str # "low", "medium", "high"
27
+
28
+
29
+ @dataclass
30
+ class AnalysisParameters:
31
+ """Auto-tuned parameters for drop jump analysis."""
32
+
33
+ smoothing_window: int
34
+ polyorder: int
35
+ velocity_threshold: float
36
+ min_contact_frames: int
37
+ visibility_threshold: float
38
+ detection_confidence: float
39
+ tracking_confidence: float
40
+ outlier_rejection: bool
41
+ bilateral_filter: bool
42
+ use_curvature: bool
43
+
44
+ def to_dict(self) -> dict:
45
+ """Convert to dictionary."""
46
+ return {
47
+ "smoothing_window": self.smoothing_window,
48
+ "polyorder": self.polyorder,
49
+ "velocity_threshold": self.velocity_threshold,
50
+ "min_contact_frames": self.min_contact_frames,
51
+ "visibility_threshold": self.visibility_threshold,
52
+ "detection_confidence": self.detection_confidence,
53
+ "tracking_confidence": self.tracking_confidence,
54
+ "outlier_rejection": self.outlier_rejection,
55
+ "bilateral_filter": self.bilateral_filter,
56
+ "use_curvature": self.use_curvature,
57
+ }
58
+
59
+
60
+ def analyze_tracking_quality(avg_visibility: float) -> str:
61
+ """
62
+ Classify tracking quality based on average landmark visibility.
63
+
64
+ Args:
65
+ avg_visibility: Average visibility score across all tracked landmarks
66
+
67
+ Returns:
68
+ Quality classification: "low", "medium", or "high"
69
+ """
70
+ if avg_visibility < 0.4:
71
+ return "low"
72
+ elif avg_visibility < 0.7:
73
+ return "medium"
74
+ else:
75
+ return "high"
76
+
77
+
78
+ def auto_tune_parameters(
79
+ characteristics: VideoCharacteristics,
80
+ quality_preset: QualityPreset = QualityPreset.BALANCED,
81
+ ) -> AnalysisParameters:
82
+ """
83
+ Automatically tune analysis parameters based on video characteristics.
84
+
85
+ This function implements heuristics to select optimal parameters without
86
+ requiring user expertise in video analysis or kinematic tracking.
87
+
88
+ Key principles:
89
+ 1. FPS-based scaling: Higher fps needs lower velocity thresholds
90
+ 2. Quality-based smoothing: Noisy video needs more smoothing
91
+ 3. Always enable proven features: outlier rejection, curvature analysis
92
+ 4. Preset modifiers: fast/balanced/accurate adjust base parameters
93
+
94
+ Args:
95
+ characteristics: Analyzed video characteristics
96
+ quality_preset: Quality vs speed tradeoff
97
+
98
+ Returns:
99
+ AnalysisParameters with auto-tuned values
100
+ """
101
+ fps = characteristics.fps
102
+ quality = characteristics.tracking_quality
103
+
104
+ # =================================================================
105
+ # STEP 1: FPS-based baseline parameters
106
+ # These scale automatically with frame rate to maintain consistent
107
+ # temporal resolution and sensitivity
108
+ # =================================================================
109
+
110
+ # Velocity threshold: Scale inversely with fps
111
+ # At 30fps, feet move ~2% of frame per frame when "stationary"
112
+ # At 60fps, feet move ~1% of frame per frame when "stationary"
113
+ # Formula: threshold = 0.02 * (30 / fps)
114
+ base_velocity_threshold = 0.02 * (30.0 / fps)
115
+
116
+ # Min contact frames: Scale with fps to maintain same time duration
117
+ # Goal: ~100ms minimum contact (3 frames @ 30fps, 6 frames @ 60fps)
118
+ # Formula: frames = round(3 * (fps / 30))
119
+ base_min_contact_frames = max(2, round(3.0 * (fps / 30.0)))
120
+
121
+ # Smoothing window: Decrease with higher fps for better temporal resolution
122
+ # Lower fps (30fps): 5-frame window = 167ms
123
+ # Higher fps (60fps): 3-frame window = 50ms (same temporal resolution)
124
+ if fps <= 30:
125
+ base_smoothing_window = 5
126
+ elif fps <= 60:
127
+ base_smoothing_window = 3
128
+ else:
129
+ base_smoothing_window = 3 # Even at 120fps, 3 is minimum for Savitzky-Golay
130
+
131
+ # =================================================================
132
+ # STEP 2: Quality-based adjustments
133
+ # Adapt smoothing and filtering based on tracking quality
134
+ # =================================================================
135
+
136
+ smoothing_adjustment = 0
137
+ enable_bilateral = False
138
+
139
+ if quality == "low":
140
+ # Poor tracking quality: aggressive smoothing and filtering
141
+ smoothing_adjustment = +2
142
+ enable_bilateral = True
143
+ elif quality == "medium":
144
+ # Moderate quality: slight smoothing increase
145
+ smoothing_adjustment = +1
146
+ enable_bilateral = True
147
+ else: # high quality
148
+ # Good tracking: preserve detail, minimal smoothing
149
+ smoothing_adjustment = 0
150
+ enable_bilateral = False
151
+
152
+ # =================================================================
153
+ # STEP 3: Apply quality preset modifiers
154
+ # User can choose speed vs accuracy tradeoff
155
+ # =================================================================
156
+
157
+ if quality_preset == QualityPreset.FAST:
158
+ # Fast: Trade accuracy for speed
159
+ velocity_threshold = base_velocity_threshold * 1.5 # Less sensitive
160
+ min_contact_frames = max(2, int(base_min_contact_frames * 0.67))
161
+ smoothing_window = max(3, base_smoothing_window - 2 + smoothing_adjustment)
162
+ bilateral_filter = False # Skip expensive filtering
163
+ detection_confidence = 0.3
164
+ tracking_confidence = 0.3
165
+
166
+ elif quality_preset == QualityPreset.ACCURATE:
167
+ # Accurate: Maximize accuracy, accept slower processing
168
+ velocity_threshold = base_velocity_threshold * 0.5 # More sensitive
169
+ min_contact_frames = (
170
+ base_min_contact_frames # Don't increase (would miss brief)
171
+ )
172
+ smoothing_window = min(11, base_smoothing_window + 2 + smoothing_adjustment)
173
+ bilateral_filter = True # Always use for best accuracy
174
+ detection_confidence = 0.6
175
+ tracking_confidence = 0.6
176
+
177
+ else: # QualityPreset.BALANCED (default)
178
+ # Balanced: Good accuracy, reasonable speed
179
+ velocity_threshold = base_velocity_threshold
180
+ min_contact_frames = base_min_contact_frames
181
+ smoothing_window = max(3, base_smoothing_window + smoothing_adjustment)
182
+ bilateral_filter = enable_bilateral
183
+ detection_confidence = 0.5
184
+ tracking_confidence = 0.5
185
+
186
+ # Ensure smoothing window is odd (required for Savitzky-Golay)
187
+ if smoothing_window % 2 == 0:
188
+ smoothing_window += 1
189
+
190
+ # =================================================================
191
+ # STEP 4: Set fixed optimal values
192
+ # These are always the same regardless of video characteristics
193
+ # =================================================================
194
+
195
+ # Polyorder: Always 2 (quadratic) - optimal for jump physics (parabolic motion)
196
+ polyorder = 2
197
+
198
+ # Visibility threshold: Standard MediaPipe threshold
199
+ visibility_threshold = 0.5
200
+
201
+ # Always enable proven accuracy features
202
+ outlier_rejection = True # Removes tracking glitches (minimal cost)
203
+ use_curvature = True # Trajectory curvature analysis (minimal cost)
204
+
205
+ return AnalysisParameters(
206
+ smoothing_window=smoothing_window,
207
+ polyorder=polyorder,
208
+ velocity_threshold=velocity_threshold,
209
+ min_contact_frames=min_contact_frames,
210
+ visibility_threshold=visibility_threshold,
211
+ detection_confidence=detection_confidence,
212
+ tracking_confidence=tracking_confidence,
213
+ outlier_rejection=outlier_rejection,
214
+ bilateral_filter=bilateral_filter,
215
+ use_curvature=use_curvature,
216
+ )
217
+
218
+
219
+ def analyze_video_sample(
220
+ landmarks_sequence: list[dict[str, tuple[float, float, float]] | None],
221
+ fps: float,
222
+ frame_count: int,
223
+ ) -> VideoCharacteristics:
224
+ """
225
+ Analyze video characteristics from a sample of frames.
226
+
227
+ This function should be called after tracking the first 30-60 frames
228
+ to understand video quality and characteristics.
229
+
230
+ Args:
231
+ landmarks_sequence: Tracked landmarks from sample frames
232
+ fps: Video frame rate
233
+ frame_count: Total number of frames in video
234
+
235
+ Returns:
236
+ VideoCharacteristics with analyzed properties
237
+ """
238
+ # Calculate average landmark visibility
239
+ visibilities = []
240
+ positions = []
241
+
242
+ for frame_landmarks in landmarks_sequence:
243
+ if frame_landmarks:
244
+ # Collect visibility scores from foot landmarks
245
+ foot_keys = [
246
+ "left_ankle",
247
+ "right_ankle",
248
+ "left_heel",
249
+ "right_heel",
250
+ "left_foot_index",
251
+ "right_foot_index",
252
+ ]
253
+
254
+ frame_vis = []
255
+ frame_y_positions = []
256
+
257
+ for key in foot_keys:
258
+ if key in frame_landmarks:
259
+ _, y, vis = frame_landmarks[key] # x not needed for analysis
260
+ frame_vis.append(vis)
261
+ frame_y_positions.append(y)
262
+
263
+ if frame_vis:
264
+ visibilities.append(float(np.mean(frame_vis)))
265
+ if frame_y_positions:
266
+ positions.append(float(np.mean(frame_y_positions)))
267
+
268
+ # Compute metrics
269
+ avg_visibility = float(np.mean(visibilities)) if visibilities else 0.5
270
+ position_variance = float(np.var(positions)) if len(positions) > 1 else 0.0
271
+
272
+ # Determine tracking quality
273
+ tracking_quality = analyze_tracking_quality(avg_visibility)
274
+
275
+ # Check for stable period (indicates drop jump from elevated platform)
276
+ # Simple check: do first 30 frames have low variance?
277
+ has_stable_period = False
278
+ if len(positions) >= 30:
279
+ first_30_std = float(np.std(positions[:30]))
280
+ has_stable_period = first_30_std < 0.01 # Very stable = on platform
281
+
282
+ return VideoCharacteristics(
283
+ fps=fps,
284
+ frame_count=frame_count,
285
+ avg_visibility=avg_visibility,
286
+ position_variance=position_variance,
287
+ has_stable_period=has_stable_period,
288
+ tracking_quality=tracking_quality,
289
+ )
@@ -104,7 +104,7 @@ def detect_outliers_median(
104
104
  # Mark as outlier if deviation exceeds threshold
105
105
  is_outlier = deviations > threshold
106
106
 
107
- return is_outlier # type: ignore[no-any-return]
107
+ return is_outlier
108
108
 
109
109
 
110
110
  def remove_outliers(
@@ -1,6 +1,5 @@
1
1
  """Landmark smoothing utilities to reduce jitter in pose tracking."""
2
2
 
3
-
4
3
  import numpy as np
5
4
  from scipy.signal import savgol_filter
6
5
 
@@ -79,12 +78,12 @@ def smooth_landmarks(
79
78
  smoothed_sequence[frame_idx] = {}
80
79
 
81
80
  if (
82
- landmark_name not in smoothed_sequence[frame_idx] # type: ignore[operator]
81
+ landmark_name not in smoothed_sequence[frame_idx]
83
82
  and landmark_sequence[frame_idx] is not None
84
83
  ):
85
84
  # Keep original visibility
86
- orig_vis = landmark_sequence[frame_idx][landmark_name][2] # type: ignore[index]
87
- smoothed_sequence[frame_idx][landmark_name] = ( # type: ignore[index]
85
+ orig_vis = landmark_sequence[frame_idx][landmark_name][2]
86
+ smoothed_sequence[frame_idx][landmark_name] = (
88
87
  float(x_smooth[idx]),
89
88
  float(y_smooth[idx]),
90
89
  orig_vis,
@@ -125,7 +124,7 @@ def compute_velocity(
125
124
  for dim in range(velocity.shape[1]):
126
125
  velocity[:, dim] = savgol_filter(velocity[:, dim], smooth_window, 1)
127
126
 
128
- return velocity # type: ignore[no-any-return]
127
+ return velocity
129
128
 
130
129
 
131
130
  def compute_velocity_from_derivative(
@@ -154,7 +153,7 @@ def compute_velocity_from_derivative(
154
153
  """
155
154
  if len(positions) < window_length:
156
155
  # Fallback to simple differences for short sequences
157
- return np.abs(np.diff(positions, prepend=positions[0])) # type: ignore[no-any-return]
156
+ return np.abs(np.diff(positions, prepend=positions[0]))
158
157
 
159
158
  # Ensure window_length is odd
160
159
  if window_length % 2 == 0:
@@ -174,7 +173,7 @@ def compute_velocity_from_derivative(
174
173
  )
175
174
 
176
175
  # Return absolute velocity (magnitude only)
177
- return np.abs(velocity) # type: ignore[no-any-return]
176
+ return np.abs(velocity)
178
177
 
179
178
 
180
179
  def compute_acceleration_from_derivative(
@@ -225,7 +224,7 @@ def compute_acceleration_from_derivative(
225
224
  mode="interp",
226
225
  )
227
226
 
228
- return acceleration # type: ignore[no-any-return]
227
+ return acceleration
229
228
 
230
229
 
231
230
  def smooth_landmarks_advanced(
@@ -345,12 +344,12 @@ def smooth_landmarks_advanced(
345
344
  smoothed_sequence[frame_idx] = {}
346
345
 
347
346
  if (
348
- landmark_name not in smoothed_sequence[frame_idx] # type: ignore[operator]
347
+ landmark_name not in smoothed_sequence[frame_idx]
349
348
  and landmark_sequence[frame_idx] is not None
350
349
  ):
351
350
  # Keep original visibility
352
- orig_vis = landmark_sequence[frame_idx][landmark_name][2] # type: ignore[index]
353
- smoothed_sequence[frame_idx][landmark_name] = ( # type: ignore[index]
351
+ orig_vis = landmark_sequence[frame_idx][landmark_name][2]
352
+ smoothed_sequence[frame_idx][landmark_name] = (
354
353
  float(x_smooth[idx]),
355
354
  float(y_smooth[idx]),
356
355
  orig_vis,
@@ -45,20 +45,35 @@ class VideoProcessor:
45
45
  self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
46
46
  self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
47
47
 
48
+ # Extract rotation metadata from video (iPhones store rotation in side_data_list)
49
+ # OpenCV ignores rotation metadata, so we need to extract and apply it manually
50
+ self.rotation = 0 # Will be set by _extract_video_metadata()
51
+
48
52
  # Calculate display dimensions considering SAR (Sample Aspect Ratio)
49
53
  # Mobile videos often have non-square pixels encoded in SAR metadata
50
54
  # OpenCV doesn't directly expose SAR, but we need to handle display correctly
51
55
  self.display_width = self.width
52
56
  self.display_height = self.height
53
- self._calculate_display_dimensions()
57
+ self._extract_video_metadata()
58
+
59
+ # Apply rotation to dimensions if needed
60
+ if self.rotation in [90, -90, 270]:
61
+ # Swap dimensions for 90/-90 degree rotations
62
+ self.width, self.height = self.height, self.width
63
+ self.display_width, self.display_height = (
64
+ self.display_height,
65
+ self.display_width,
66
+ )
54
67
 
55
- def _calculate_display_dimensions(self) -> None:
68
+ def _extract_video_metadata(self) -> None:
56
69
  """
57
- Calculate display dimensions by reading SAR metadata from video file.
70
+ Extract video metadata including SAR and rotation using ffprobe.
58
71
 
59
- Many mobile videos use non-square pixels (SAR != 1:1), which means
60
- the encoded dimensions differ from how the video should be displayed.
61
- We use ffprobe to extract this metadata.
72
+ Many mobile videos (especially from iPhones) have:
73
+ - Non-square pixels (SAR != 1:1) affecting display dimensions
74
+ - Rotation metadata in side_data_list that OpenCV ignores
75
+
76
+ We extract both to ensure proper display and pose detection.
62
77
  """
63
78
  try:
64
79
  # Use ffprobe to get SAR metadata
@@ -83,6 +98,8 @@ class VideoProcessor:
83
98
  data = json.loads(result.stdout)
84
99
  if "streams" in data and len(data["streams"]) > 0:
85
100
  stream = data["streams"][0]
101
+
102
+ # Extract SAR (Sample Aspect Ratio)
86
103
  sar_str = stream.get("sample_aspect_ratio", "1:1")
87
104
 
88
105
  # Parse SAR (e.g., "270:473")
@@ -98,14 +115,41 @@ class VideoProcessor:
98
115
  self.width * sar_width / sar_height
99
116
  )
100
117
  self.display_height = self.height
118
+
119
+ # Extract rotation from side_data_list (common for iPhone videos)
120
+ side_data_list = stream.get("side_data_list", [])
121
+ for side_data in side_data_list:
122
+ if side_data.get("side_data_type") == "Display Matrix":
123
+ rotation = side_data.get("rotation", 0)
124
+ # Convert to int and normalize to 0, 90, -90, 180
125
+ self.rotation = int(rotation)
101
126
  except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
102
127
  # If ffprobe fails, keep original dimensions (square pixels)
103
128
  pass
104
129
 
105
130
  def read_frame(self) -> np.ndarray | None:
106
- """Read next frame from video."""
131
+ """
132
+ Read next frame from video and apply rotation if needed.
133
+
134
+ OpenCV ignores rotation metadata, so we manually apply rotation
135
+ based on the display matrix metadata extracted from the video.
136
+ """
107
137
  ret, frame = self.cap.read()
108
- return frame if ret else None
138
+ if not ret:
139
+ return None
140
+
141
+ # Apply rotation if video has rotation metadata
142
+ if self.rotation == -90 or self.rotation == 270:
143
+ # -90 degrees = rotate 90 degrees clockwise
144
+ frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
145
+ elif self.rotation == 90 or self.rotation == -270:
146
+ # 90 degrees = rotate 90 degrees counter-clockwise
147
+ frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
148
+ elif self.rotation == 180 or self.rotation == -180:
149
+ # 180 degrees rotation
150
+ frame = cv2.rotate(frame, cv2.ROTATE_180)
151
+
152
+ return frame
109
153
 
110
154
  def reset(self) -> None:
111
155
  """Reset video to beginning."""
@@ -89,6 +89,123 @@ def calculate_adaptive_threshold(
89
89
  return adaptive_threshold
90
90
 
91
91
 
92
+ def detect_drop_start(
93
+ positions: np.ndarray,
94
+ fps: float,
95
+ min_stationary_duration: float = 1.0,
96
+ position_change_threshold: float = 0.02,
97
+ smoothing_window: int = 5,
98
+ debug: bool = False,
99
+ ) -> int:
100
+ """
101
+ Detect when the drop jump actually starts by finding stable period then detecting drop.
102
+
103
+ Strategy:
104
+ 1. Scan forward to find first STABLE period (low variance over N frames)
105
+ 2. Use that stable period as baseline
106
+ 3. Detect when position starts changing significantly from baseline
107
+
108
+ This handles videos where athlete steps onto box at start (unstable beginning).
109
+
110
+ Args:
111
+ positions: Array of vertical positions (0-1 normalized, y increases downward)
112
+ fps: Video frame rate
113
+ min_stationary_duration: Minimum duration (seconds) of stable period (default: 1.0s)
114
+ position_change_threshold: Position change indicating start of drop
115
+ (default: 0.02 = 2% of frame)
116
+ smoothing_window: Window for computing position variance
117
+ debug: Print debug information (default: False)
118
+
119
+ Returns:
120
+ Frame index where drop starts (or 0 if no clear stable period found)
121
+
122
+ Example:
123
+ - Frames 0-14: Stepping onto box (noisy, unstable)
124
+ - Frames 15-119: Standing on box (stable, low variance)
125
+ - Frame 119: Drop begins (position changes significantly)
126
+ - Returns: 119
127
+ """
128
+ min_stable_frames = int(fps * min_stationary_duration)
129
+ if len(positions) < min_stable_frames + 30: # Need some frames after stable period
130
+ if debug:
131
+ min_frames_needed = min_stable_frames + 30
132
+ print(
133
+ f"[detect_drop_start] Video too short: {len(positions)} < {min_frames_needed}"
134
+ )
135
+ return 0
136
+
137
+ # STEP 1: Find first stable period by scanning forward
138
+ # Look for window with low variance (< 1% of frame height)
139
+ stability_threshold = 0.01 # 1% of frame height
140
+ stable_window = min_stable_frames
141
+
142
+ baseline_start = -1
143
+ baseline_position = 0.0
144
+
145
+ # Scan from start, looking for stable window
146
+ for start_idx in range(0, len(positions) - stable_window, 5): # Step by 5 frames
147
+ window = positions[start_idx : start_idx + stable_window]
148
+ window_std = float(np.std(window))
149
+
150
+ if window_std < stability_threshold:
151
+ # Found stable period!
152
+ baseline_start = start_idx
153
+ baseline_position = float(np.median(window))
154
+
155
+ if debug:
156
+ end_frame = baseline_start + stable_window - 1
157
+ print("[detect_drop_start] Found stable period:")
158
+ print(f" frames {baseline_start}-{end_frame}")
159
+ print(f" baseline_position: {baseline_position:.4f}")
160
+ print(f" baseline_std: {window_std:.4f} < {stability_threshold:.4f}")
161
+ break
162
+
163
+ if baseline_start < 0:
164
+ if debug:
165
+ msg = (
166
+ f"No stable period found (variance always > {stability_threshold:.4f})"
167
+ )
168
+ print(f"[detect_drop_start] {msg}")
169
+ return 0
170
+
171
+ # STEP 2: Find when position changes significantly from baseline
172
+ # Start searching after stable period ends
173
+ search_start = baseline_start + stable_window
174
+ window_size = max(3, smoothing_window)
175
+
176
+ for i in range(search_start, len(positions) - window_size):
177
+ # Average position over small window to reduce noise
178
+ window_positions = positions[i : i + window_size]
179
+ avg_position = float(np.mean(window_positions))
180
+
181
+ # Check if position has increased (dropped) significantly
182
+ position_change = avg_position - baseline_position
183
+
184
+ if position_change > position_change_threshold:
185
+ # Found start of drop - back up slightly to catch beginning
186
+ drop_frame_candidate = i - window_size
187
+ if drop_frame_candidate < baseline_start:
188
+ drop_frame = baseline_start
189
+ else:
190
+ drop_frame = drop_frame_candidate
191
+
192
+ if debug:
193
+ print(f"[detect_drop_start] Drop detected at frame {drop_frame}")
194
+ print(
195
+ f" position_change: {position_change:.4f} > {position_change_threshold:.4f}"
196
+ )
197
+ print(
198
+ f" avg_position: {avg_position:.4f} vs baseline: {baseline_position:.4f}"
199
+ )
200
+
201
+ return drop_frame
202
+
203
+ # No significant position change detected
204
+ if debug:
205
+ print("[detect_drop_start] No drop detected after stable period")
206
+ return 0
207
+
208
+
92
209
  def detect_ground_contact(
93
210
  foot_positions: np.ndarray,
94
211
  velocity_threshold: float = 0.02,
@@ -273,7 +390,9 @@ def find_interpolated_phase_transitions(
273
390
 
274
391
  # Interpolate start boundary (transition INTO this phase)
275
392
  if start_idx > 0 and start_idx < len(velocities):
276
- vel_before = velocities[start_idx - 1] if start_idx > 0 else velocities[start_idx]
393
+ vel_before = (
394
+ velocities[start_idx - 1] if start_idx > 0 else velocities[start_idx]
395
+ )
277
396
  vel_at = velocities[start_idx]
278
397
 
279
398
  # Check if we're crossing the threshold at this boundary
@@ -392,9 +511,7 @@ def refine_transition_with_curvature(
392
511
  # Blend with original estimate (don't stray too far)
393
512
  # 70% curvature-based, 30% velocity-based
394
513
  blend_factor = 0.7
395
- refined_frame = (
396
- blend_factor * refined_frame + (1 - blend_factor) * estimated_frame
397
- )
514
+ refined_frame = blend_factor * refined_frame + (1 - blend_factor) * estimated_frame
398
515
 
399
516
  return refined_frame
400
517