kinemotion 0.6.4__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,289 @@
1
+ """Automatic parameter tuning based on video characteristics."""
2
+
3
+ from dataclasses import dataclass
4
+ from enum import Enum
5
+
6
+ import numpy as np
7
+
8
+
9
+ class QualityPreset(str, Enum):
10
+ """Quality presets for analysis."""
11
+
12
+ FAST = "fast" # Quick analysis, lower precision
13
+ BALANCED = "balanced" # Default: good balance of speed and accuracy
14
+ ACCURATE = "accurate" # Research-grade analysis, slower
15
+
16
+
17
+ @dataclass
18
+ class VideoCharacteristics:
19
+ """Characteristics extracted from video analysis."""
20
+
21
+ fps: float
22
+ frame_count: int
23
+ avg_visibility: float # Average landmark visibility (0-1)
24
+ position_variance: float # Variance in foot positions
25
+ has_stable_period: bool # Whether video has initial stationary period
26
+ tracking_quality: str # "low", "medium", "high"
27
+
28
+
29
+ @dataclass
30
+ class AnalysisParameters:
31
+ """Auto-tuned parameters for drop jump analysis."""
32
+
33
+ smoothing_window: int
34
+ polyorder: int
35
+ velocity_threshold: float
36
+ min_contact_frames: int
37
+ visibility_threshold: float
38
+ detection_confidence: float
39
+ tracking_confidence: float
40
+ outlier_rejection: bool
41
+ bilateral_filter: bool
42
+ use_curvature: bool
43
+
44
+ def to_dict(self) -> dict:
45
+ """Convert to dictionary."""
46
+ return {
47
+ "smoothing_window": self.smoothing_window,
48
+ "polyorder": self.polyorder,
49
+ "velocity_threshold": self.velocity_threshold,
50
+ "min_contact_frames": self.min_contact_frames,
51
+ "visibility_threshold": self.visibility_threshold,
52
+ "detection_confidence": self.detection_confidence,
53
+ "tracking_confidence": self.tracking_confidence,
54
+ "outlier_rejection": self.outlier_rejection,
55
+ "bilateral_filter": self.bilateral_filter,
56
+ "use_curvature": self.use_curvature,
57
+ }
58
+
59
+
60
+ def analyze_tracking_quality(avg_visibility: float) -> str:
61
+ """
62
+ Classify tracking quality based on average landmark visibility.
63
+
64
+ Args:
65
+ avg_visibility: Average visibility score across all tracked landmarks
66
+
67
+ Returns:
68
+ Quality classification: "low", "medium", or "high"
69
+ """
70
+ if avg_visibility < 0.4:
71
+ return "low"
72
+ elif avg_visibility < 0.7:
73
+ return "medium"
74
+ else:
75
+ return "high"
76
+
77
+
78
+ def auto_tune_parameters(
79
+ characteristics: VideoCharacteristics,
80
+ quality_preset: QualityPreset = QualityPreset.BALANCED,
81
+ ) -> AnalysisParameters:
82
+ """
83
+ Automatically tune analysis parameters based on video characteristics.
84
+
85
+ This function implements heuristics to select optimal parameters without
86
+ requiring user expertise in video analysis or kinematic tracking.
87
+
88
+ Key principles:
89
+ 1. FPS-based scaling: Higher fps needs lower velocity thresholds
90
+ 2. Quality-based smoothing: Noisy video needs more smoothing
91
+ 3. Always enable proven features: outlier rejection, curvature analysis
92
+ 4. Preset modifiers: fast/balanced/accurate adjust base parameters
93
+
94
+ Args:
95
+ characteristics: Analyzed video characteristics
96
+ quality_preset: Quality vs speed tradeoff
97
+
98
+ Returns:
99
+ AnalysisParameters with auto-tuned values
100
+ """
101
+ fps = characteristics.fps
102
+ quality = characteristics.tracking_quality
103
+
104
+ # =================================================================
105
+ # STEP 1: FPS-based baseline parameters
106
+ # These scale automatically with frame rate to maintain consistent
107
+ # temporal resolution and sensitivity
108
+ # =================================================================
109
+
110
+ # Velocity threshold: Scale inversely with fps
111
+ # At 30fps, feet move ~2% of frame per frame when "stationary"
112
+ # At 60fps, feet move ~1% of frame per frame when "stationary"
113
+ # Formula: threshold = 0.02 * (30 / fps)
114
+ base_velocity_threshold = 0.02 * (30.0 / fps)
115
+
116
+ # Min contact frames: Scale with fps to maintain same time duration
117
+ # Goal: ~100ms minimum contact (3 frames @ 30fps, 6 frames @ 60fps)
118
+ # Formula: frames = round(3 * (fps / 30))
119
+ base_min_contact_frames = max(2, round(3.0 * (fps / 30.0)))
120
+
121
+ # Smoothing window: Decrease with higher fps for better temporal resolution
122
+ # Lower fps (30fps): 5-frame window = 167ms
123
+ # Higher fps (60fps): 3-frame window = 50ms (same temporal resolution)
124
+ if fps <= 30:
125
+ base_smoothing_window = 5
126
+ elif fps <= 60:
127
+ base_smoothing_window = 3
128
+ else:
129
+ base_smoothing_window = 3 # Even at 120fps, 3 is minimum for Savitzky-Golay
130
+
131
+ # =================================================================
132
+ # STEP 2: Quality-based adjustments
133
+ # Adapt smoothing and filtering based on tracking quality
134
+ # =================================================================
135
+
136
+ smoothing_adjustment = 0
137
+ enable_bilateral = False
138
+
139
+ if quality == "low":
140
+ # Poor tracking quality: aggressive smoothing and filtering
141
+ smoothing_adjustment = +2
142
+ enable_bilateral = True
143
+ elif quality == "medium":
144
+ # Moderate quality: slight smoothing increase
145
+ smoothing_adjustment = +1
146
+ enable_bilateral = True
147
+ else: # high quality
148
+ # Good tracking: preserve detail, minimal smoothing
149
+ smoothing_adjustment = 0
150
+ enable_bilateral = False
151
+
152
+ # =================================================================
153
+ # STEP 3: Apply quality preset modifiers
154
+ # User can choose speed vs accuracy tradeoff
155
+ # =================================================================
156
+
157
+ if quality_preset == QualityPreset.FAST:
158
+ # Fast: Trade accuracy for speed
159
+ velocity_threshold = base_velocity_threshold * 1.5 # Less sensitive
160
+ min_contact_frames = max(2, int(base_min_contact_frames * 0.67))
161
+ smoothing_window = max(3, base_smoothing_window - 2 + smoothing_adjustment)
162
+ bilateral_filter = False # Skip expensive filtering
163
+ detection_confidence = 0.3
164
+ tracking_confidence = 0.3
165
+
166
+ elif quality_preset == QualityPreset.ACCURATE:
167
+ # Accurate: Maximize accuracy, accept slower processing
168
+ velocity_threshold = base_velocity_threshold * 0.5 # More sensitive
169
+ min_contact_frames = (
170
+ base_min_contact_frames # Don't increase (would miss brief)
171
+ )
172
+ smoothing_window = min(11, base_smoothing_window + 2 + smoothing_adjustment)
173
+ bilateral_filter = True # Always use for best accuracy
174
+ detection_confidence = 0.6
175
+ tracking_confidence = 0.6
176
+
177
+ else: # QualityPreset.BALANCED (default)
178
+ # Balanced: Good accuracy, reasonable speed
179
+ velocity_threshold = base_velocity_threshold
180
+ min_contact_frames = base_min_contact_frames
181
+ smoothing_window = max(3, base_smoothing_window + smoothing_adjustment)
182
+ bilateral_filter = enable_bilateral
183
+ detection_confidence = 0.5
184
+ tracking_confidence = 0.5
185
+
186
+ # Ensure smoothing window is odd (required for Savitzky-Golay)
187
+ if smoothing_window % 2 == 0:
188
+ smoothing_window += 1
189
+
190
+ # =================================================================
191
+ # STEP 4: Set fixed optimal values
192
+ # These are always the same regardless of video characteristics
193
+ # =================================================================
194
+
195
+ # Polyorder: Always 2 (quadratic) - optimal for jump physics (parabolic motion)
196
+ polyorder = 2
197
+
198
+ # Visibility threshold: Standard MediaPipe threshold
199
+ visibility_threshold = 0.5
200
+
201
+ # Always enable proven accuracy features
202
+ outlier_rejection = True # Removes tracking glitches (minimal cost)
203
+ use_curvature = True # Trajectory curvature analysis (minimal cost)
204
+
205
+ return AnalysisParameters(
206
+ smoothing_window=smoothing_window,
207
+ polyorder=polyorder,
208
+ velocity_threshold=velocity_threshold,
209
+ min_contact_frames=min_contact_frames,
210
+ visibility_threshold=visibility_threshold,
211
+ detection_confidence=detection_confidence,
212
+ tracking_confidence=tracking_confidence,
213
+ outlier_rejection=outlier_rejection,
214
+ bilateral_filter=bilateral_filter,
215
+ use_curvature=use_curvature,
216
+ )
217
+
218
+
219
+ def analyze_video_sample(
220
+ landmarks_sequence: list[dict[str, tuple[float, float, float]] | None],
221
+ fps: float,
222
+ frame_count: int,
223
+ ) -> VideoCharacteristics:
224
+ """
225
+ Analyze video characteristics from a sample of frames.
226
+
227
+ This function should be called after tracking the first 30-60 frames
228
+ to understand video quality and characteristics.
229
+
230
+ Args:
231
+ landmarks_sequence: Tracked landmarks from sample frames
232
+ fps: Video frame rate
233
+ frame_count: Total number of frames in video
234
+
235
+ Returns:
236
+ VideoCharacteristics with analyzed properties
237
+ """
238
+ # Calculate average landmark visibility
239
+ visibilities = []
240
+ positions = []
241
+
242
+ for frame_landmarks in landmarks_sequence:
243
+ if frame_landmarks:
244
+ # Collect visibility scores from foot landmarks
245
+ foot_keys = [
246
+ "left_ankle",
247
+ "right_ankle",
248
+ "left_heel",
249
+ "right_heel",
250
+ "left_foot_index",
251
+ "right_foot_index",
252
+ ]
253
+
254
+ frame_vis = []
255
+ frame_y_positions = []
256
+
257
+ for key in foot_keys:
258
+ if key in frame_landmarks:
259
+ _, y, vis = frame_landmarks[key] # x not needed for analysis
260
+ frame_vis.append(vis)
261
+ frame_y_positions.append(y)
262
+
263
+ if frame_vis:
264
+ visibilities.append(float(np.mean(frame_vis)))
265
+ if frame_y_positions:
266
+ positions.append(float(np.mean(frame_y_positions)))
267
+
268
+ # Compute metrics
269
+ avg_visibility = float(np.mean(visibilities)) if visibilities else 0.5
270
+ position_variance = float(np.var(positions)) if len(positions) > 1 else 0.0
271
+
272
+ # Determine tracking quality
273
+ tracking_quality = analyze_tracking_quality(avg_visibility)
274
+
275
+ # Check for stable period (indicates drop jump from elevated platform)
276
+ # Simple check: do first 30 frames have low variance?
277
+ has_stable_period = False
278
+ if len(positions) >= 30:
279
+ first_30_std = float(np.std(positions[:30]))
280
+ has_stable_period = first_30_std < 0.01 # Very stable = on platform
281
+
282
+ return VideoCharacteristics(
283
+ fps=fps,
284
+ frame_count=frame_count,
285
+ avg_visibility=avg_visibility,
286
+ position_variance=position_variance,
287
+ has_stable_period=has_stable_period,
288
+ tracking_quality=tracking_quality,
289
+ )
@@ -104,7 +104,7 @@ def detect_outliers_median(
104
104
  # Mark as outlier if deviation exceeds threshold
105
105
  is_outlier = deviations > threshold
106
106
 
107
- return is_outlier # type: ignore[no-any-return]
107
+ return is_outlier
108
108
 
109
109
 
110
110
  def remove_outliers(
@@ -1,6 +1,5 @@
1
1
  """Landmark smoothing utilities to reduce jitter in pose tracking."""
2
2
 
3
-
4
3
  import numpy as np
5
4
  from scipy.signal import savgol_filter
6
5
 
@@ -79,12 +78,12 @@ def smooth_landmarks(
79
78
  smoothed_sequence[frame_idx] = {}
80
79
 
81
80
  if (
82
- landmark_name not in smoothed_sequence[frame_idx] # type: ignore[operator]
81
+ landmark_name not in smoothed_sequence[frame_idx]
83
82
  and landmark_sequence[frame_idx] is not None
84
83
  ):
85
84
  # Keep original visibility
86
- orig_vis = landmark_sequence[frame_idx][landmark_name][2] # type: ignore[index]
87
- smoothed_sequence[frame_idx][landmark_name] = ( # type: ignore[index]
85
+ orig_vis = landmark_sequence[frame_idx][landmark_name][2]
86
+ smoothed_sequence[frame_idx][landmark_name] = (
88
87
  float(x_smooth[idx]),
89
88
  float(y_smooth[idx]),
90
89
  orig_vis,
@@ -125,7 +124,7 @@ def compute_velocity(
125
124
  for dim in range(velocity.shape[1]):
126
125
  velocity[:, dim] = savgol_filter(velocity[:, dim], smooth_window, 1)
127
126
 
128
- return velocity # type: ignore[no-any-return]
127
+ return velocity
129
128
 
130
129
 
131
130
  def compute_velocity_from_derivative(
@@ -154,7 +153,7 @@ def compute_velocity_from_derivative(
154
153
  """
155
154
  if len(positions) < window_length:
156
155
  # Fallback to simple differences for short sequences
157
- return np.abs(np.diff(positions, prepend=positions[0])) # type: ignore[no-any-return]
156
+ return np.abs(np.diff(positions, prepend=positions[0]))
158
157
 
159
158
  # Ensure window_length is odd
160
159
  if window_length % 2 == 0:
@@ -174,7 +173,7 @@ def compute_velocity_from_derivative(
174
173
  )
175
174
 
176
175
  # Return absolute velocity (magnitude only)
177
- return np.abs(velocity) # type: ignore[no-any-return]
176
+ return np.abs(velocity)
178
177
 
179
178
 
180
179
  def compute_acceleration_from_derivative(
@@ -225,7 +224,7 @@ def compute_acceleration_from_derivative(
225
224
  mode="interp",
226
225
  )
227
226
 
228
- return acceleration # type: ignore[no-any-return]
227
+ return acceleration
229
228
 
230
229
 
231
230
  def smooth_landmarks_advanced(
@@ -345,12 +344,12 @@ def smooth_landmarks_advanced(
345
344
  smoothed_sequence[frame_idx] = {}
346
345
 
347
346
  if (
348
- landmark_name not in smoothed_sequence[frame_idx] # type: ignore[operator]
347
+ landmark_name not in smoothed_sequence[frame_idx]
349
348
  and landmark_sequence[frame_idx] is not None
350
349
  ):
351
350
  # Keep original visibility
352
- orig_vis = landmark_sequence[frame_idx][landmark_name][2] # type: ignore[index]
353
- smoothed_sequence[frame_idx][landmark_name] = ( # type: ignore[index]
351
+ orig_vis = landmark_sequence[frame_idx][landmark_name][2]
352
+ smoothed_sequence[frame_idx][landmark_name] = (
354
353
  float(x_smooth[idx]),
355
354
  float(y_smooth[idx]),
356
355
  orig_vis,
@@ -45,20 +45,35 @@ class VideoProcessor:
45
45
  self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
46
46
  self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
47
47
 
48
+ # Extract rotation metadata from video (iPhones store rotation in side_data_list)
49
+ # OpenCV ignores rotation metadata, so we need to extract and apply it manually
50
+ self.rotation = 0 # Will be set by _extract_video_metadata()
51
+
48
52
  # Calculate display dimensions considering SAR (Sample Aspect Ratio)
49
53
  # Mobile videos often have non-square pixels encoded in SAR metadata
50
54
  # OpenCV doesn't directly expose SAR, but we need to handle display correctly
51
55
  self.display_width = self.width
52
56
  self.display_height = self.height
53
- self._calculate_display_dimensions()
57
+ self._extract_video_metadata()
58
+
59
+ # Apply rotation to dimensions if needed
60
+ if self.rotation in [90, -90, 270]:
61
+ # Swap dimensions for 90/-90 degree rotations
62
+ self.width, self.height = self.height, self.width
63
+ self.display_width, self.display_height = (
64
+ self.display_height,
65
+ self.display_width,
66
+ )
54
67
 
55
- def _calculate_display_dimensions(self) -> None:
68
+ def _extract_video_metadata(self) -> None:
56
69
  """
57
- Calculate display dimensions by reading SAR metadata from video file.
70
+ Extract video metadata including SAR and rotation using ffprobe.
58
71
 
59
- Many mobile videos use non-square pixels (SAR != 1:1), which means
60
- the encoded dimensions differ from how the video should be displayed.
61
- We use ffprobe to extract this metadata.
72
+ Many mobile videos (especially from iPhones) have:
73
+ - Non-square pixels (SAR != 1:1) affecting display dimensions
74
+ - Rotation metadata in side_data_list that OpenCV ignores
75
+
76
+ We extract both to ensure proper display and pose detection.
62
77
  """
63
78
  try:
64
79
  # Use ffprobe to get SAR metadata
@@ -83,6 +98,8 @@ class VideoProcessor:
83
98
  data = json.loads(result.stdout)
84
99
  if "streams" in data and len(data["streams"]) > 0:
85
100
  stream = data["streams"][0]
101
+
102
+ # Extract SAR (Sample Aspect Ratio)
86
103
  sar_str = stream.get("sample_aspect_ratio", "1:1")
87
104
 
88
105
  # Parse SAR (e.g., "270:473")
@@ -98,14 +115,41 @@ class VideoProcessor:
98
115
  self.width * sar_width / sar_height
99
116
  )
100
117
  self.display_height = self.height
118
+
119
+ # Extract rotation from side_data_list (common for iPhone videos)
120
+ side_data_list = stream.get("side_data_list", [])
121
+ for side_data in side_data_list:
122
+ if side_data.get("side_data_type") == "Display Matrix":
123
+ rotation = side_data.get("rotation", 0)
124
+ # Convert to int and normalize to 0, 90, -90, 180
125
+ self.rotation = int(rotation)
101
126
  except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
102
127
  # If ffprobe fails, keep original dimensions (square pixels)
103
128
  pass
104
129
 
105
130
  def read_frame(self) -> np.ndarray | None:
106
- """Read next frame from video."""
131
+ """
132
+ Read next frame from video and apply rotation if needed.
133
+
134
+ OpenCV ignores rotation metadata, so we manually apply rotation
135
+ based on the display matrix metadata extracted from the video.
136
+ """
107
137
  ret, frame = self.cap.read()
108
- return frame if ret else None
138
+ if not ret:
139
+ return None
140
+
141
+ # Apply rotation if video has rotation metadata
142
+ if self.rotation == -90 or self.rotation == 270:
143
+ # -90 degrees = rotate 90 degrees clockwise
144
+ frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
145
+ elif self.rotation == 90 or self.rotation == -270:
146
+ # 90 degrees = rotate 90 degrees counter-clockwise
147
+ frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
148
+ elif self.rotation == 180 or self.rotation == -180:
149
+ # 180 degrees rotation
150
+ frame = cv2.rotate(frame, cv2.ROTATE_180)
151
+
152
+ return frame
109
153
 
110
154
  def reset(self) -> None:
111
155
  """Reset video to beginning."""
@@ -89,6 +89,123 @@ def calculate_adaptive_threshold(
89
89
  return adaptive_threshold
90
90
 
91
91
 
92
+ def detect_drop_start(
93
+ positions: np.ndarray,
94
+ fps: float,
95
+ min_stationary_duration: float = 1.0,
96
+ position_change_threshold: float = 0.02,
97
+ smoothing_window: int = 5,
98
+ debug: bool = False,
99
+ ) -> int:
100
+ """
101
+ Detect when the drop jump actually starts by finding stable period then detecting drop.
102
+
103
+ Strategy:
104
+ 1. Scan forward to find first STABLE period (low variance over N frames)
105
+ 2. Use that stable period as baseline
106
+ 3. Detect when position starts changing significantly from baseline
107
+
108
+ This handles videos where athlete steps onto box at start (unstable beginning).
109
+
110
+ Args:
111
+ positions: Array of vertical positions (0-1 normalized, y increases downward)
112
+ fps: Video frame rate
113
+ min_stationary_duration: Minimum duration (seconds) of stable period (default: 1.0s)
114
+ position_change_threshold: Position change indicating start of drop
115
+ (default: 0.02 = 2% of frame)
116
+ smoothing_window: Window for computing position variance
117
+ debug: Print debug information (default: False)
118
+
119
+ Returns:
120
+ Frame index where drop starts (or 0 if no clear stable period found)
121
+
122
+ Example:
123
+ - Frames 0-14: Stepping onto box (noisy, unstable)
124
+ - Frames 15-119: Standing on box (stable, low variance)
125
+ - Frame 119: Drop begins (position changes significantly)
126
+ - Returns: 119
127
+ """
128
+ min_stable_frames = int(fps * min_stationary_duration)
129
+ if len(positions) < min_stable_frames + 30: # Need some frames after stable period
130
+ if debug:
131
+ min_frames_needed = min_stable_frames + 30
132
+ print(
133
+ f"[detect_drop_start] Video too short: {len(positions)} < {min_frames_needed}"
134
+ )
135
+ return 0
136
+
137
+ # STEP 1: Find first stable period by scanning forward
138
+ # Look for window with low variance (< 1% of frame height)
139
+ stability_threshold = 0.01 # 1% of frame height
140
+ stable_window = min_stable_frames
141
+
142
+ baseline_start = -1
143
+ baseline_position = 0.0
144
+
145
+ # Scan from start, looking for stable window
146
+ for start_idx in range(0, len(positions) - stable_window, 5): # Step by 5 frames
147
+ window = positions[start_idx : start_idx + stable_window]
148
+ window_std = float(np.std(window))
149
+
150
+ if window_std < stability_threshold:
151
+ # Found stable period!
152
+ baseline_start = start_idx
153
+ baseline_position = float(np.median(window))
154
+
155
+ if debug:
156
+ end_frame = baseline_start + stable_window - 1
157
+ print("[detect_drop_start] Found stable period:")
158
+ print(f" frames {baseline_start}-{end_frame}")
159
+ print(f" baseline_position: {baseline_position:.4f}")
160
+ print(f" baseline_std: {window_std:.4f} < {stability_threshold:.4f}")
161
+ break
162
+
163
+ if baseline_start < 0:
164
+ if debug:
165
+ msg = (
166
+ f"No stable period found (variance always > {stability_threshold:.4f})"
167
+ )
168
+ print(f"[detect_drop_start] {msg}")
169
+ return 0
170
+
171
+ # STEP 2: Find when position changes significantly from baseline
172
+ # Start searching after stable period ends
173
+ search_start = baseline_start + stable_window
174
+ window_size = max(3, smoothing_window)
175
+
176
+ for i in range(search_start, len(positions) - window_size):
177
+ # Average position over small window to reduce noise
178
+ window_positions = positions[i : i + window_size]
179
+ avg_position = float(np.mean(window_positions))
180
+
181
+ # Check if position has increased (dropped) significantly
182
+ position_change = avg_position - baseline_position
183
+
184
+ if position_change > position_change_threshold:
185
+ # Found start of drop - back up slightly to catch beginning
186
+ drop_frame_candidate = i - window_size
187
+ if drop_frame_candidate < baseline_start:
188
+ drop_frame = baseline_start
189
+ else:
190
+ drop_frame = drop_frame_candidate
191
+
192
+ if debug:
193
+ print(f"[detect_drop_start] Drop detected at frame {drop_frame}")
194
+ print(
195
+ f" position_change: {position_change:.4f} > {position_change_threshold:.4f}"
196
+ )
197
+ print(
198
+ f" avg_position: {avg_position:.4f} vs baseline: {baseline_position:.4f}"
199
+ )
200
+
201
+ return drop_frame
202
+
203
+ # No significant position change detected
204
+ if debug:
205
+ print("[detect_drop_start] No drop detected after stable period")
206
+ return 0
207
+
208
+
92
209
  def detect_ground_contact(
93
210
  foot_positions: np.ndarray,
94
211
  velocity_threshold: float = 0.02,
@@ -273,7 +390,9 @@ def find_interpolated_phase_transitions(
273
390
 
274
391
  # Interpolate start boundary (transition INTO this phase)
275
392
  if start_idx > 0 and start_idx < len(velocities):
276
- vel_before = velocities[start_idx - 1] if start_idx > 0 else velocities[start_idx]
393
+ vel_before = (
394
+ velocities[start_idx - 1] if start_idx > 0 else velocities[start_idx]
395
+ )
277
396
  vel_at = velocities[start_idx]
278
397
 
279
398
  # Check if we're crossing the threshold at this boundary
@@ -392,9 +511,7 @@ def refine_transition_with_curvature(
392
511
  # Blend with original estimate (don't stray too far)
393
512
  # 70% curvature-based, 30% velocity-based
394
513
  blend_factor = 0.7
395
- refined_frame = (
396
- blend_factor * refined_frame + (1 - blend_factor) * estimated_frame
397
- )
514
+ refined_frame = blend_factor * refined_frame + (1 - blend_factor) * estimated_frame
398
515
 
399
516
  return refined_frame
400
517
 
@@ -3,10 +3,16 @@
3
3
  import json
4
4
  import sys
5
5
  from pathlib import Path
6
+ from typing import Any
6
7
 
7
8
  import click
8
9
  import numpy as np
9
10
 
11
+ from ..core.auto_tuning import (
12
+ QualityPreset,
13
+ analyze_video_sample,
14
+ auto_tune_parameters,
15
+ )
10
16
  from ..core.pose import PoseTracker
11
17
  from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
12
18
  from ..core.video_io import VideoProcessor
@@ -33,140 +39,102 @@ from .kinematics import calculate_drop_jump_metrics
33
39
  help="Path for JSON metrics output (default: stdout)",
34
40
  )
35
41
  @click.option(
36
- "--smoothing-window",
37
- type=int,
38
- default=5,
39
- help="Smoothing window size (must be odd, >= 3)",
40
- show_default=True,
42
+ "--drop-height",
43
+ type=float,
44
+ required=True,
45
+ help=(
46
+ "Height of drop box/platform in meters (e.g., 0.40 for 40cm box) - "
47
+ "REQUIRED for accurate calibration"
48
+ ),
41
49
  )
42
50
  @click.option(
43
- "--polyorder",
44
- type=int,
45
- default=2,
51
+ "--quality",
52
+ type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
53
+ default="balanced",
46
54
  help=(
47
- "Polynomial order for Savitzky-Golay smoothing "
48
- "(2=quadratic, 3=cubic, must be < smoothing-window)"
55
+ "Analysis quality preset: "
56
+ "fast (quick, less precise), "
57
+ "balanced (default, good for most cases), "
58
+ "accurate (research-grade, slower)"
49
59
  ),
50
60
  show_default=True,
51
61
  )
52
62
  @click.option(
53
- "--outlier-rejection/--no-outlier-rejection",
54
- default=True,
55
- help=(
56
- "Apply RANSAC and median-based outlier rejection to remove tracking glitches "
57
- "(default: enabled, +1-2%% accuracy)"
58
- ),
63
+ "--verbose",
64
+ "-v",
65
+ is_flag=True,
66
+ help="Show auto-selected parameters and analysis details",
59
67
  )
68
+ # Expert parameters (hidden in help, but always available for advanced users)
60
69
  @click.option(
61
- "--bilateral-filter/--no-bilateral-filter",
62
- default=False,
63
- help=(
64
- "Use bilateral temporal filter for edge-preserving smoothing "
65
- "(default: disabled, experimental)"
66
- ),
70
+ "--drop-start-frame",
71
+ type=int,
72
+ default=None,
73
+ help="[EXPERT] Manually specify frame where drop begins (overrides auto-detection)",
74
+ )
75
+ @click.option(
76
+ "--smoothing-window",
77
+ type=int,
78
+ default=None,
79
+ help="[EXPERT] Override auto-tuned smoothing window size",
67
80
  )
68
81
  @click.option(
69
82
  "--velocity-threshold",
70
83
  type=float,
71
- default=0.02,
72
- help="Velocity threshold for contact detection (normalized units)",
73
- show_default=True,
84
+ default=None,
85
+ help="[EXPERT] Override auto-tuned velocity threshold",
74
86
  )
75
87
  @click.option(
76
88
  "--min-contact-frames",
77
89
  type=int,
78
- default=3,
79
- help="Minimum frames for valid ground contact",
80
- show_default=True,
90
+ default=None,
91
+ help="[EXPERT] Override auto-tuned minimum contact frames",
81
92
  )
82
93
  @click.option(
83
94
  "--visibility-threshold",
84
95
  type=float,
85
- default=0.5,
86
- help="Minimum landmark visibility score (0-1)",
87
- show_default=True,
96
+ default=None,
97
+ help="[EXPERT] Override visibility threshold",
88
98
  )
89
99
  @click.option(
90
100
  "--detection-confidence",
91
101
  type=float,
92
- default=0.5,
93
- help="Pose detection confidence threshold (0-1)",
94
- show_default=True,
102
+ default=None,
103
+ help="[EXPERT] Override pose detection confidence",
95
104
  )
96
105
  @click.option(
97
106
  "--tracking-confidence",
98
107
  type=float,
99
- default=0.5,
100
- help="Pose tracking confidence threshold (0-1)",
101
- show_default=True,
102
- )
103
- @click.option(
104
- "--drop-height",
105
- type=float,
106
108
  default=None,
107
- help="Height of drop box/platform in meters (e.g., 0.40 for 40cm) - used for calibration",
108
- )
109
- @click.option(
110
- "--use-curvature/--no-curvature",
111
- default=True,
112
- help="Use trajectory curvature analysis for refining transitions (default: enabled)",
113
- )
114
- @click.option(
115
- "--kinematic-correction-factor",
116
- type=float,
117
- default=1.0,
118
- help=(
119
- "Correction factor for kinematic jump height (default: 1.0 = no correction). "
120
- "Historical testing suggested 1.35, but this is UNVALIDATED. "
121
- "Use --drop-height for validated measurements."
122
- ),
123
- show_default=True,
109
+ help="[EXPERT] Override pose tracking confidence",
124
110
  )
125
111
  def dropjump_analyze(
126
112
  video_path: str,
127
113
  output: str | None,
128
114
  json_output: str | None,
129
- smoothing_window: int,
130
- polyorder: int,
131
- outlier_rejection: bool,
132
- bilateral_filter: bool,
133
- velocity_threshold: float,
134
- min_contact_frames: int,
135
- visibility_threshold: float,
136
- detection_confidence: float,
137
- tracking_confidence: float,
138
- drop_height: float | None,
139
- use_curvature: bool,
140
- kinematic_correction_factor: float,
115
+ drop_height: float,
116
+ quality: str,
117
+ verbose: bool,
118
+ drop_start_frame: int | None,
119
+ smoothing_window: int | None,
120
+ velocity_threshold: float | None,
121
+ min_contact_frames: int | None,
122
+ visibility_threshold: float | None,
123
+ detection_confidence: float | None,
124
+ tracking_confidence: float | None,
141
125
  ) -> None:
142
126
  """
143
127
  Analyze drop-jump video to estimate ground contact time, flight time, and jump height.
144
128
 
129
+ Uses intelligent auto-tuning to select optimal parameters based on video characteristics.
130
+ Parameters are automatically adjusted for frame rate, tracking quality, and analysis preset.
131
+
145
132
  VIDEO_PATH: Path to the input video file
146
133
  """
147
134
  click.echo(f"Analyzing video: {video_path}", err=True)
148
135
 
149
- # Validate parameters
150
- if smoothing_window < 3:
151
- click.echo("Error: smoothing-window must be >= 3", err=True)
152
- sys.exit(1)
153
-
154
- if smoothing_window % 2 == 0:
155
- smoothing_window += 1
156
- click.echo(
157
- f"Adjusting smoothing-window to {smoothing_window} (must be odd)", err=True
158
- )
159
-
160
- if polyorder < 1:
161
- click.echo("Error: polyorder must be >= 1", err=True)
162
- sys.exit(1)
163
-
164
- if polyorder >= smoothing_window:
165
- click.echo(
166
- f"Error: polyorder ({polyorder}) must be < smoothing-window ({smoothing_window})",
167
- err=True,
168
- )
169
- sys.exit(1)
136
+ # Convert quality string to enum
137
+ quality_preset = QualityPreset(quality.lower())
170
138
 
171
139
  try:
172
140
  # Initialize video processor
@@ -177,10 +145,32 @@ def dropjump_analyze(
177
145
  err=True,
178
146
  )
179
147
 
148
+ # ================================================================
149
+ # STEP 1: Auto-tune parameters based on video characteristics
150
+ # ================================================================
151
+
152
+ # Analyze video characteristics from a sample to determine optimal parameters
153
+ # We'll use detection/tracking confidence from quality preset for initial tracking
154
+ initial_detection_conf = 0.5
155
+ initial_tracking_conf = 0.5
156
+
157
+ if quality_preset == QualityPreset.FAST:
158
+ initial_detection_conf = 0.3
159
+ initial_tracking_conf = 0.3
160
+ elif quality_preset == QualityPreset.ACCURATE:
161
+ initial_detection_conf = 0.6
162
+ initial_tracking_conf = 0.6
163
+
164
+ # Override with expert values if provided
165
+ if detection_confidence is not None:
166
+ initial_detection_conf = detection_confidence
167
+ if tracking_confidence is not None:
168
+ initial_tracking_conf = tracking_confidence
169
+
180
170
  # Initialize pose tracker
181
171
  tracker = PoseTracker(
182
- min_detection_confidence=detection_confidence,
183
- min_tracking_confidence=tracking_confidence,
172
+ min_detection_confidence=initial_detection_conf,
173
+ min_tracking_confidence=initial_tracking_conf,
184
174
  )
185
175
 
186
176
  # Process all frames
@@ -189,6 +179,7 @@ def dropjump_analyze(
189
179
  frames = []
190
180
 
191
181
  frame_idx = 0
182
+ bar: Any
192
183
  with click.progressbar(
193
184
  length=video.frame_count, label="Processing frames"
194
185
  ) as bar:
@@ -210,28 +201,90 @@ def dropjump_analyze(
210
201
  click.echo("Error: No frames processed", err=True)
211
202
  sys.exit(1)
212
203
 
213
- # Smooth landmarks
214
- if outlier_rejection or bilateral_filter:
215
- if outlier_rejection:
204
+ # ================================================================
205
+ # STEP 2: Analyze video characteristics and auto-tune parameters
206
+ # ================================================================
207
+
208
+ characteristics = analyze_video_sample(
209
+ landmarks_sequence, video.fps, video.frame_count
210
+ )
211
+
212
+ # Auto-tune parameters based on video characteristics
213
+ params = auto_tune_parameters(characteristics, quality_preset)
214
+
215
+ # Apply expert overrides if provided
216
+ if smoothing_window is not None:
217
+ params.smoothing_window = smoothing_window
218
+ if velocity_threshold is not None:
219
+ params.velocity_threshold = velocity_threshold
220
+ if min_contact_frames is not None:
221
+ params.min_contact_frames = min_contact_frames
222
+ if visibility_threshold is not None:
223
+ params.visibility_threshold = visibility_threshold
224
+
225
+ # Show selected parameters if verbose
226
+ if verbose:
227
+ click.echo("\n" + "=" * 60, err=True)
228
+ click.echo("AUTO-TUNED PARAMETERS", err=True)
229
+ click.echo("=" * 60, err=True)
230
+ click.echo(f"Video FPS: {video.fps:.2f}", err=True)
231
+ click.echo(
232
+ f"Tracking quality: {characteristics.tracking_quality} "
233
+ f"(avg visibility: {characteristics.avg_visibility:.2f})",
234
+ err=True,
235
+ )
236
+ click.echo(f"Quality preset: {quality_preset.value}", err=True)
237
+ click.echo("\nSelected parameters:", err=True)
238
+ click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
239
+ click.echo(f" polyorder: {params.polyorder}", err=True)
240
+ click.echo(
241
+ f" velocity_threshold: {params.velocity_threshold:.4f}", err=True
242
+ )
243
+ click.echo(
244
+ f" min_contact_frames: {params.min_contact_frames}", err=True
245
+ )
246
+ click.echo(
247
+ f" visibility_threshold: {params.visibility_threshold}", err=True
248
+ )
249
+ click.echo(
250
+ f" detection_confidence: {params.detection_confidence}", err=True
251
+ )
252
+ click.echo(
253
+ f" tracking_confidence: {params.tracking_confidence}", err=True
254
+ )
255
+ click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
256
+ click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
257
+ click.echo(f" use_curvature: {params.use_curvature}", err=True)
258
+ click.echo("=" * 60 + "\n", err=True)
259
+
260
+ # ================================================================
261
+ # STEP 3: Apply smoothing with auto-tuned parameters
262
+ # ================================================================
263
+
264
+ # Smooth landmarks using auto-tuned parameters
265
+ if params.outlier_rejection or params.bilateral_filter:
266
+ if params.outlier_rejection:
216
267
  click.echo(
217
268
  "Smoothing landmarks with outlier rejection...", err=True
218
269
  )
219
- if bilateral_filter:
270
+ if params.bilateral_filter:
220
271
  click.echo(
221
272
  "Using bilateral temporal filter for edge-preserving smoothing...",
222
273
  err=True,
223
274
  )
224
275
  smoothed_landmarks = smooth_landmarks_advanced(
225
276
  landmarks_sequence,
226
- window_length=smoothing_window,
227
- polyorder=polyorder,
228
- use_outlier_rejection=outlier_rejection,
229
- use_bilateral=bilateral_filter,
277
+ window_length=params.smoothing_window,
278
+ polyorder=params.polyorder,
279
+ use_outlier_rejection=params.outlier_rejection,
280
+ use_bilateral=params.bilateral_filter,
230
281
  )
231
282
  else:
232
283
  click.echo("Smoothing landmarks...", err=True)
233
284
  smoothed_landmarks = smooth_landmarks(
234
- landmarks_sequence, window_length=smoothing_window, polyorder=polyorder
285
+ landmarks_sequence,
286
+ window_length=params.smoothing_window,
287
+ polyorder=params.polyorder,
235
288
  )
236
289
 
237
290
  # Extract vertical positions from feet
@@ -261,42 +314,40 @@ def dropjump_analyze(
261
314
  )
262
315
  else:
263
316
  # Use previous position if available, otherwise default
264
- position_list.append(
265
- position_list[-1] if position_list else 0.5
266
- )
317
+ position_list.append(position_list[-1] if position_list else 0.5)
267
318
  visibilities_list.append(0.0)
268
319
 
269
320
  vertical_positions: np.ndarray = np.array(position_list)
270
321
  visibilities: np.ndarray = np.array(visibilities_list)
271
322
 
272
- # Detect ground contact
323
+ # Detect ground contact using auto-tuned parameters
273
324
  contact_states = detect_ground_contact(
274
325
  vertical_positions,
275
- velocity_threshold=velocity_threshold,
276
- min_contact_frames=min_contact_frames,
277
- visibility_threshold=visibility_threshold,
326
+ velocity_threshold=params.velocity_threshold,
327
+ min_contact_frames=params.min_contact_frames,
328
+ visibility_threshold=params.visibility_threshold,
278
329
  visibilities=visibilities,
279
- window_length=smoothing_window,
280
- polyorder=polyorder,
330
+ window_length=params.smoothing_window,
331
+ polyorder=params.polyorder,
281
332
  )
282
333
 
283
334
  # Calculate metrics
284
335
  click.echo("Calculating metrics...", err=True)
285
- if drop_height:
286
- click.echo(
287
- f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
288
- err=True,
289
- )
336
+ click.echo(
337
+ f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
338
+ err=True,
339
+ )
290
340
  metrics = calculate_drop_jump_metrics(
291
341
  contact_states,
292
342
  vertical_positions,
293
343
  video.fps,
294
344
  drop_height_m=drop_height,
295
- velocity_threshold=velocity_threshold,
296
- smoothing_window=smoothing_window,
297
- polyorder=polyorder,
298
- use_curvature=use_curvature,
299
- kinematic_correction_factor=kinematic_correction_factor,
345
+ drop_start_frame=drop_start_frame,
346
+ velocity_threshold=params.velocity_threshold,
347
+ smoothing_window=params.smoothing_window,
348
+ polyorder=params.polyorder,
349
+ use_curvature=params.use_curvature,
350
+ kinematic_correction_factor=1.0, # Always 1.0 now (no experimental correction)
300
351
  )
301
352
 
302
353
  # Output metrics as JSON
@@ -313,7 +364,10 @@ def dropjump_analyze(
313
364
  # Generate debug video if requested
314
365
  if output:
315
366
  click.echo(f"Generating debug video: {output}", err=True)
316
- if video.display_width != video.width or video.display_height != video.height:
367
+ if (
368
+ video.display_width != video.width
369
+ or video.display_height != video.height
370
+ ):
317
371
  click.echo(
318
372
  f"Source video encoded: {video.width}x{video.height}",
319
373
  err=True,
@@ -337,9 +391,10 @@ def dropjump_analyze(
337
391
  video.display_height,
338
392
  video.fps,
339
393
  ) as renderer:
394
+ render_bar: Any
340
395
  with click.progressbar(
341
396
  length=len(frames), label="Rendering frames"
342
- ) as bar:
397
+ ) as render_bar:
343
398
  for i, frame in enumerate(frames):
344
399
  annotated = renderer.render_frame(
345
400
  frame,
@@ -350,7 +405,7 @@ def dropjump_analyze(
350
405
  use_com=False,
351
406
  )
352
407
  renderer.write_frame(annotated)
353
- bar.update(1)
408
+ render_bar.update(1)
354
409
 
355
410
  click.echo(f"Debug video saved: {output}", err=True)
356
411
 
@@ -38,7 +38,7 @@ class DebugOverlayRenderer:
38
38
  self.needs_resize = (display_width != width) or (display_height != height)
39
39
 
40
40
  # Try H.264 codec first (better quality/compatibility), fallback to mp4v
41
- fourcc = cv2.VideoWriter_fourcc(*"avc1") # type: ignore[attr-defined]
41
+ fourcc = cv2.VideoWriter_fourcc(*"avc1")
42
42
  # IMPORTANT: cv2.VideoWriter expects (width, height) tuple - NOT (height, width)
43
43
  # Write at display dimensions so video displays correctly without SAR metadata
44
44
  self.writer = cv2.VideoWriter(
@@ -47,7 +47,7 @@ class DebugOverlayRenderer:
47
47
 
48
48
  # Check if writer opened successfully, fallback to mp4v if not
49
49
  if not self.writer.isOpened():
50
- fourcc = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore[attr-defined]
50
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
51
51
  self.writer = cv2.VideoWriter(
52
52
  output_path, fourcc, fps, (display_width, display_height)
53
53
  )
@@ -93,7 +93,9 @@ class DebugOverlayRenderer:
93
93
 
94
94
  # Draw CoM with larger circle
95
95
  color = (
96
- (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
96
+ (0, 255, 0)
97
+ if contact_state == ContactState.ON_GROUND
98
+ else (0, 0, 255)
97
99
  )
98
100
  cv2.circle(annotated, (px, py), 15, color, -1)
99
101
  cv2.circle(annotated, (px, py), 17, (255, 255, 255), 2) # White border
@@ -105,7 +107,9 @@ class DebugOverlayRenderer:
105
107
  rh_x, rh_y, _ = landmarks["right_hip"]
106
108
  hip_x = int((lh_x + rh_x) / 2 * self.width)
107
109
  hip_y = int((lh_y + rh_y) / 2 * self.height)
108
- cv2.circle(annotated, (hip_x, hip_y), 8, (255, 165, 0), -1) # Orange
110
+ cv2.circle(
111
+ annotated, (hip_x, hip_y), 8, (255, 165, 0), -1
112
+ ) # Orange
109
113
  # Draw line from hip to CoM
110
114
  cv2.line(annotated, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
111
115
  else:
@@ -116,7 +120,9 @@ class DebugOverlayRenderer:
116
120
 
117
121
  # Draw foot position circle
118
122
  color = (
119
- (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
123
+ (0, 255, 0)
124
+ if contact_state == ContactState.ON_GROUND
125
+ else (0, 0, 255)
120
126
  )
121
127
  cv2.circle(annotated, (px, py), 10, color, -1)
122
128
 
@@ -1,10 +1,10 @@
1
1
  """Kinematic calculations for drop-jump metrics."""
2
2
 
3
-
4
3
  import numpy as np
5
4
 
6
5
  from .analysis import (
7
6
  ContactState,
7
+ detect_drop_start,
8
8
  find_contact_phases,
9
9
  find_interpolated_phase_transitions_with_curvature,
10
10
  )
@@ -109,6 +109,7 @@ def calculate_drop_jump_metrics(
109
109
  foot_y_positions: np.ndarray,
110
110
  fps: float,
111
111
  drop_height_m: float | None = None,
112
+ drop_start_frame: int | None = None,
112
113
  velocity_threshold: float = 0.02,
113
114
  smoothing_window: int = 5,
114
115
  polyorder: int = 2,
@@ -135,6 +136,20 @@ def calculate_drop_jump_metrics(
135
136
  DropJumpMetrics object with calculated values
136
137
  """
137
138
  metrics = DropJumpMetrics()
139
+
140
+ # Detect or use manually specified drop jump start frame
141
+ if drop_start_frame is None:
142
+ # Auto-detect where drop jump actually starts (skip initial stationary period)
143
+ drop_start_frame = detect_drop_start(
144
+ foot_y_positions,
145
+ fps,
146
+ min_stationary_duration=0.5, # 0.5s stable period (~30 frames @ 60fps)
147
+ position_change_threshold=0.005, # 0.5% of frame height - sensitive to drop start
148
+ smoothing_window=smoothing_window,
149
+ )
150
+ # If manually specified or auto-detected, use it
151
+ drop_start_frame_value = drop_start_frame if drop_start_frame is not None else 0
152
+
138
153
  phases = find_contact_phases(contact_states)
139
154
 
140
155
  # Get interpolated phases with curvature-based refinement
@@ -148,6 +163,23 @@ def calculate_drop_jump_metrics(
148
163
  use_curvature,
149
164
  )
150
165
 
166
+ if not phases:
167
+ return metrics
168
+
169
+ # Filter phases to only include those after drop start
170
+ # This removes the initial stationary period where athlete is standing on box
171
+ if drop_start_frame_value > 0:
172
+ phases = [
173
+ (start, end, state)
174
+ for start, end, state in phases
175
+ if end >= drop_start_frame_value
176
+ ]
177
+ interpolated_phases = [
178
+ (start, end, state)
179
+ for start, end, state in interpolated_phases
180
+ if end >= drop_start_frame_value
181
+ ]
182
+
151
183
  if not phases:
152
184
  return metrics
153
185
 
@@ -177,7 +209,9 @@ def calculate_drop_jump_metrics(
177
209
 
178
210
  # Find ground phase after first air phase
179
211
  ground_after_air = [
180
- (start, end, idx) for start, end, idx in ground_phases if idx > first_air_idx
212
+ (start, end, idx)
213
+ for start, end, idx in ground_phases
214
+ if idx > first_air_idx
181
215
  ]
182
216
 
183
217
  if ground_after_air and first_ground_idx < first_air_idx:
@@ -241,7 +275,9 @@ def calculate_drop_jump_metrics(
241
275
  # Look back a few frames to get stable position on box
242
276
  lookback_start = max(0, first_air_start - 5)
243
277
  if lookback_start < first_air_start:
244
- initial_position = float(np.mean(foot_y_positions[lookback_start:first_air_start]))
278
+ initial_position = float(
279
+ np.mean(foot_y_positions[lookback_start:first_air_start])
280
+ )
245
281
  else:
246
282
  initial_position = float(foot_y_positions[first_air_start])
247
283
 
@@ -337,13 +373,17 @@ def calculate_drop_jump_metrics(
337
373
  # For validated measurements, use:
338
374
  # - Calibrated measurement with --drop-height parameter
339
375
  # - Or compare against validated measurement systems
340
- metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
376
+ metrics.jump_height = (
377
+ jump_height_kinematic * kinematic_correction_factor
378
+ )
341
379
  metrics.jump_height_kinematic = jump_height_kinematic
342
380
  else:
343
381
  # Fallback to kinematic if no position data
344
382
  if drop_height_m is None:
345
383
  # Apply kinematic correction factor (see detailed comment above)
346
- metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
384
+ metrics.jump_height = (
385
+ jump_height_kinematic * kinematic_correction_factor
386
+ )
347
387
  else:
348
388
  metrics.jump_height = jump_height_kinematic
349
389
  metrics.jump_height_kinematic = jump_height_kinematic
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.6.4
3
+ Version: 0.7.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -0,0 +1,18 @@
1
+ kinemotion/__init__.py,sha256=JhS0ZTgcTdcMH5WcIyWxEqZJPOoBUSKX8tT8hsG-xWk,98
2
+ kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
3
+ kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
4
+ kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQIdw,10508
5
+ kinemotion/core/filtering.py,sha256=5opOq0Fbot_AvsT8cT2kS9uyD_uqLW-jR6SXZbg672c,11235
6
+ kinemotion/core/pose.py,sha256=5Dhw3LqX3STR-eLb5JAQkxhS-dd0PqGytBWnaQ66nWc,8391
7
+ kinemotion/core/smoothing.py,sha256=VVv95auiuah_GPG3jxiQPyiYXF5i3B4fF9UGI5FLX-Q,12897
8
+ kinemotion/core/video_io.py,sha256=z8Z0qbNaKbcdB40KnbNOBMzab3BbgnhBxp-mUBYeXgM,6577
9
+ kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
10
+ kinemotion/dropjump/analysis.py,sha256=HfJt2t9IsMBiBUz7apIzdxbRH9QqzlFnDVVWcKhU3ow,23291
11
+ kinemotion/dropjump/cli.py,sha256=nhcqYClTx9R0XeTduJCNspltNgeaK4W8ZUT1ACB8GFI,15601
12
+ kinemotion/dropjump/debug_overlay.py,sha256=hmEtadqYP8K-kGr_Q03KDQyl1152-YSPeRJzEXMyuhs,8687
13
+ kinemotion/dropjump/kinematics.py,sha256=RceIH2HndpHQpcOQd56MmEdXQNEst-CWXfBKPJk2g3Y,17659
14
+ kinemotion-0.7.0.dist-info/METADATA,sha256=D9N1JQN9MQpSDch4TdC-cz_SaBYXaaaj8kiEuLsacAk,19321
15
+ kinemotion-0.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
16
+ kinemotion-0.7.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
17
+ kinemotion-0.7.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
18
+ kinemotion-0.7.0.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- kinemotion/__init__.py,sha256=JhS0ZTgcTdcMH5WcIyWxEqZJPOoBUSKX8tT8hsG-xWk,98
2
- kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
3
- kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
4
- kinemotion/core/filtering.py,sha256=QtZRz8KlcLtR4dLRFH9sGqRQsUo_Dqcr1ZJIyWwPlcM,11266
5
- kinemotion/core/pose.py,sha256=5Dhw3LqX3STR-eLb5JAQkxhS-dd0PqGytBWnaQ66nWc,8391
6
- kinemotion/core/smoothing.py,sha256=z2qnpEGohDm6ZUrzqRXGLp189-NJL0ngKqYwXkU-iW0,13166
7
- kinemotion/core/video_io.py,sha256=LD7qmHIqUYomGxS1kxz6khugIbFo2y4tDSY7XqJQCOM,4581
8
- kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
9
- kinemotion/dropjump/analysis.py,sha256=5lyTJFiItqmSHw96m8HmFrl7N6nCVQZnERWU2prjn9Y,18719
10
- kinemotion/dropjump/cli.py,sha256=URQguQ6tmDofWagGydXzvc4NPXOCfOGX-yyFgvLV6lM,11954
11
- kinemotion/dropjump/debug_overlay.py,sha256=s7hwYLA2JenRYOPD2GNmx3kATFseeZT3pW8jxiVgys8,8621
12
- kinemotion/dropjump/kinematics.py,sha256=wcXaGUrb1kjSTus0KEwgdDzdkJRMy-umAzfStGq0_t4,16258
13
- kinemotion-0.6.4.dist-info/METADATA,sha256=3GyfOFp8WbHPjauu1gJ42dMNySoHfGUw5sFksrimOnY,19321
14
- kinemotion-0.6.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
15
- kinemotion-0.6.4.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
16
- kinemotion-0.6.4.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
17
- kinemotion-0.6.4.dist-info/RECORD,,