kinemotion 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -1,131 +1,13 @@
1
- """Video I/O and debug overlay rendering."""
2
-
3
-
4
- import json
5
- import subprocess
1
+ """Debug overlay rendering for drop jump analysis."""
6
2
 
7
3
  import cv2
8
4
  import numpy as np
9
5
 
10
- from .contact_detection import ContactState, compute_average_foot_position
6
+ from ..core.pose import compute_center_of_mass
7
+ from .analysis import ContactState, compute_average_foot_position
11
8
  from .kinematics import DropJumpMetrics
12
9
 
13
10
 
14
- class VideoProcessor:
15
- """
16
- Handles video reading and processing.
17
-
18
- IMPORTANT: This class preserves the exact aspect ratio of the source video.
19
- No dimensions are hardcoded - all dimensions are extracted from actual frame data.
20
- """
21
-
22
- def __init__(self, video_path: str):
23
- """
24
- Initialize video processor.
25
-
26
- Args:
27
- video_path: Path to input video file
28
- """
29
- self.video_path = video_path
30
- self.cap = cv2.VideoCapture(video_path)
31
-
32
- if not self.cap.isOpened():
33
- raise ValueError(f"Could not open video: {video_path}")
34
-
35
- self.fps = self.cap.get(cv2.CAP_PROP_FPS)
36
- self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
37
-
38
- # Read first frame to get actual dimensions
39
- # This is critical for preserving aspect ratio, especially with mobile videos
40
- # that have rotation metadata. OpenCV properties (CAP_PROP_FRAME_WIDTH/HEIGHT)
41
- # may return incorrect dimensions, so we read the actual frame data.
42
- ret, first_frame = self.cap.read()
43
- if ret:
44
- # frame.shape is (height, width, channels) - extract actual dimensions
45
- self.height, self.width = first_frame.shape[:2]
46
- self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to beginning
47
- else:
48
- # Fallback to video properties if can't read frame
49
- self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
50
- self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
51
-
52
- # Calculate display dimensions considering SAR (Sample Aspect Ratio)
53
- # Mobile videos often have non-square pixels encoded in SAR metadata
54
- # OpenCV doesn't directly expose SAR, but we need to handle display correctly
55
- self.display_width = self.width
56
- self.display_height = self.height
57
- self._calculate_display_dimensions()
58
-
59
- def _calculate_display_dimensions(self) -> None:
60
- """
61
- Calculate display dimensions by reading SAR metadata from video file.
62
-
63
- Many mobile videos use non-square pixels (SAR != 1:1), which means
64
- the encoded dimensions differ from how the video should be displayed.
65
- We use ffprobe to extract this metadata.
66
- """
67
- try:
68
- # Use ffprobe to get SAR metadata
69
- result = subprocess.run(
70
- [
71
- "ffprobe",
72
- "-v",
73
- "quiet",
74
- "-print_format",
75
- "json",
76
- "-show_streams",
77
- "-select_streams",
78
- "v:0",
79
- self.video_path,
80
- ],
81
- capture_output=True,
82
- text=True,
83
- timeout=5,
84
- )
85
-
86
- if result.returncode == 0:
87
- data = json.loads(result.stdout)
88
- if "streams" in data and len(data["streams"]) > 0:
89
- stream = data["streams"][0]
90
- sar_str = stream.get("sample_aspect_ratio", "1:1")
91
-
92
- # Parse SAR (e.g., "270:473")
93
- if sar_str and ":" in sar_str:
94
- sar_parts = sar_str.split(":")
95
- sar_width = int(sar_parts[0])
96
- sar_height = int(sar_parts[1])
97
-
98
- # Calculate display dimensions
99
- # DAR = (width * SAR_width) / (height * SAR_height)
100
- if sar_width != sar_height:
101
- self.display_width = int(
102
- self.width * sar_width / sar_height
103
- )
104
- self.display_height = self.height
105
- except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
106
- # If ffprobe fails, keep original dimensions (square pixels)
107
- pass
108
-
109
- def read_frame(self) -> np.ndarray | None:
110
- """Read next frame from video."""
111
- ret, frame = self.cap.read()
112
- return frame if ret else None
113
-
114
- def reset(self) -> None:
115
- """Reset video to beginning."""
116
- self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
117
-
118
- def close(self) -> None:
119
- """Release video capture."""
120
- self.cap.release()
121
-
122
- def __enter__(self) -> "VideoProcessor":
123
- return self
124
-
125
- def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore[no-untyped-def]
126
- self.close()
127
-
128
-
129
11
  class DebugOverlayRenderer:
130
12
  """Renders debug information on video frames."""
131
13
 
@@ -183,6 +65,7 @@ class DebugOverlayRenderer:
183
65
  contact_state: ContactState,
184
66
  frame_idx: int,
185
67
  metrics: DropJumpMetrics | None = None,
68
+ use_com: bool = False,
186
69
  ) -> np.ndarray:
187
70
  """
188
71
  Render debug overlay on frame.
@@ -193,6 +76,7 @@ class DebugOverlayRenderer:
193
76
  contact_state: Ground contact state
194
77
  frame_idx: Current frame index
195
78
  metrics: Drop-jump metrics (optional)
79
+ use_com: Whether to visualize CoM instead of feet (optional)
196
80
 
197
81
  Returns:
198
82
  Frame with debug overlay
@@ -201,25 +85,50 @@ class DebugOverlayRenderer:
201
85
 
202
86
  # Draw landmarks if available
203
87
  if landmarks:
204
- foot_x, foot_y = compute_average_foot_position(landmarks)
205
- px = int(foot_x * self.width)
206
- py = int(foot_y * self.height)
207
-
208
- # Draw foot position circle
209
- color = (
210
- (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
211
- )
212
- cv2.circle(annotated, (px, py), 10, color, -1)
213
-
214
- # Draw individual foot landmarks
215
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
216
- for key in foot_keys:
217
- if key in landmarks:
218
- x, y, vis = landmarks[key]
219
- if vis > 0.5:
220
- lx = int(x * self.width)
221
- ly = int(y * self.height)
222
- cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
88
+ if use_com:
89
+ # Draw center of mass position
90
+ com_x, com_y, com_vis = compute_center_of_mass(landmarks)
91
+ px = int(com_x * self.width)
92
+ py = int(com_y * self.height)
93
+
94
+ # Draw CoM with larger circle
95
+ color = (
96
+ (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
97
+ )
98
+ cv2.circle(annotated, (px, py), 15, color, -1)
99
+ cv2.circle(annotated, (px, py), 17, (255, 255, 255), 2) # White border
100
+
101
+ # Draw body segments for reference
102
+ # Draw hip midpoint
103
+ if "left_hip" in landmarks and "right_hip" in landmarks:
104
+ lh_x, lh_y, _ = landmarks["left_hip"]
105
+ rh_x, rh_y, _ = landmarks["right_hip"]
106
+ hip_x = int((lh_x + rh_x) / 2 * self.width)
107
+ hip_y = int((lh_y + rh_y) / 2 * self.height)
108
+ cv2.circle(annotated, (hip_x, hip_y), 8, (255, 165, 0), -1) # Orange
109
+ # Draw line from hip to CoM
110
+ cv2.line(annotated, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
111
+ else:
112
+ # Draw foot position (original method)
113
+ foot_x, foot_y = compute_average_foot_position(landmarks)
114
+ px = int(foot_x * self.width)
115
+ py = int(foot_y * self.height)
116
+
117
+ # Draw foot position circle
118
+ color = (
119
+ (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
120
+ )
121
+ cv2.circle(annotated, (px, py), 10, color, -1)
122
+
123
+ # Draw individual foot landmarks
124
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
125
+ for key in foot_keys:
126
+ if key in landmarks:
127
+ x, y, vis = landmarks[key]
128
+ if vis > 0.5:
129
+ lx = int(x * self.width)
130
+ ly = int(y * self.height)
131
+ cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
223
132
 
224
133
  # Draw contact state
225
134
  state_text = f"State: {contact_state.value}"
@@ -3,7 +3,7 @@
3
3
 
4
4
  import numpy as np
5
5
 
6
- from .contact_detection import (
6
+ from .analysis import (
7
7
  ContactState,
8
8
  find_contact_phases,
9
9
  find_interpolated_phase_transitions_with_curvature,
@@ -111,6 +111,7 @@ def calculate_drop_jump_metrics(
111
111
  drop_height_m: float | None = None,
112
112
  velocity_threshold: float = 0.02,
113
113
  smoothing_window: int = 5,
114
+ polyorder: int = 2,
114
115
  use_curvature: bool = True,
115
116
  ) -> DropJumpMetrics:
116
117
  """
@@ -123,6 +124,7 @@ def calculate_drop_jump_metrics(
123
124
  drop_height_m: Known drop box/platform height in meters for calibration (optional)
124
125
  velocity_threshold: Velocity threshold used for contact detection (for interpolation)
125
126
  smoothing_window: Window size for velocity/acceleration smoothing (must be odd)
127
+ polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
126
128
  use_curvature: Whether to use curvature analysis for refining transitions
127
129
 
128
130
  Returns:
@@ -138,6 +140,7 @@ def calculate_drop_jump_metrics(
138
140
  contact_states,
139
141
  velocity_threshold,
140
142
  smoothing_window,
143
+ polyorder,
141
144
  use_curvature,
142
145
  )
143
146
 
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
- Project-URL: Homepage, https://github.com/feniix/kinemetry
6
- Project-URL: Repository, https://github.com/feniix/kinemetry
7
- Project-URL: Issues, https://github.com/feniix/kinemetry/issues
5
+ Project-URL: Homepage, https://github.com/feniix/kinemotion
6
+ Project-URL: Repository, https://github.com/feniix/kinemotion
7
+ Project-URL: Issues, https://github.com/feniix/kinemotion/issues
8
8
  Author-email: Sebastian Otaegui <feniix@gmail.com>
9
9
  License: MIT
10
10
  License-File: LICENSE
11
- Keywords: athletic-performance,drop-jump,kinemetry,mediapipe,pose-tracking,video-analysis
11
+ Keywords: athletic-performance,drop-jump,kinemetry,kinemotion,mediapipe,pose-tracking,video-analysis
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Intended Audience :: Science/Research
14
14
  Classifier: License :: OSI Approved :: MIT License
@@ -26,14 +26,16 @@ Requires-Dist: opencv-python>=4.9.0
26
26
  Requires-Dist: scipy>=1.11.0
27
27
  Description-Content-Type: text/markdown
28
28
 
29
- # Kinemetry
29
+ # Kinemotion
30
30
 
31
31
  A video-based kinematic analysis tool for athletic performance. Analyzes side-view drop-jump videos to estimate key performance metrics: ground contact time, flight time, and jump height. Uses MediaPipe pose tracking and advanced kinematics.
32
32
 
33
33
  ## Features
34
34
 
35
35
  - **Automatic pose tracking** using MediaPipe Pose landmarks
36
- - **Ground contact detection** based on foot velocity and position
36
+ - **Center of mass (CoM) tracking** - biomechanical CoM estimation for 3-5% accuracy improvement
37
+ - **Adaptive velocity thresholding** - auto-calibrates from video baseline for 2-3% additional accuracy
38
+ - **Ground contact detection** based on velocity and position (feet or CoM)
37
39
  - **Derivative-based velocity** - smooth velocity calculation from position trajectory
38
40
  - **Trajectory curvature analysis** - acceleration patterns for refined event detection
39
41
  - **Sub-frame interpolation** - precise timing beyond frame boundaries for improved accuracy
@@ -43,6 +45,8 @@ A video-based kinematic analysis tool for athletic performance. Analyzes side-vi
43
45
  - Flight time (ms)
44
46
  - Jump height (m) - with optional calibration using drop box height
45
47
  - **Calibrated measurements** - use known drop height for ~88% accuracy (vs 71% uncalibrated)
48
+ - With CoM tracking: potential for 91-93% accuracy
49
+ - With adaptive thresholding + CoM: potential for 93-96% accuracy
46
50
  - **JSON output** for easy integration with other tools
47
51
  - **Optional debug video** with visual overlays showing contact states and landmarks
48
52
  - **Configurable parameters** for smoothing, thresholds, and detection
@@ -75,7 +79,7 @@ asdf install
75
79
  uv sync
76
80
  ```
77
81
 
78
- This will install all dependencies and make the `kinemetry` command available.
82
+ This will install all dependencies and make the `kinemotion` command available.
79
83
 
80
84
  ## Usage
81
85
 
@@ -84,13 +88,13 @@ This will install all dependencies and make the `kinemetry` command available.
84
88
  Analyze a video and output metrics to stdout as JSON:
85
89
 
86
90
  ```bash
87
- kinemetry dropjump-analyze video.mp4
91
+ kinemotion dropjump-analyze video.mp4
88
92
  ```
89
93
 
90
94
  ### Save Metrics to File
91
95
 
92
96
  ```bash
93
- kinemetry dropjump-analyze video.mp4 --json-output metrics.json
97
+ kinemotion dropjump-analyze video.mp4 --json-output metrics.json
94
98
  ```
95
99
 
96
100
  ### Generate Debug Video
@@ -98,7 +102,7 @@ kinemetry dropjump-analyze video.mp4 --json-output metrics.json
98
102
  Create an annotated video showing pose tracking and contact detection:
99
103
 
100
104
  ```bash
101
- kinemetry dropjump-analyze video.mp4 --output debug.mp4
105
+ kinemotion dropjump-analyze video.mp4 --output debug.mp4
102
106
  ```
103
107
 
104
108
  ### Calibrated Drop Jump Analysis
@@ -107,24 +111,71 @@ For most accurate measurements, provide the drop box height in meters:
107
111
 
108
112
  ```bash
109
113
  # 40cm drop box
110
- kinemetry dropjump-analyze drop-jump.mp4 --drop-height 0.40
114
+ kinemotion dropjump-analyze drop-jump.mp4 --drop-height 0.40
111
115
 
112
116
  # 60cm drop box with full outputs
113
- kinemetry dropjump-analyze drop-jump.mp4 \
117
+ kinemotion dropjump-analyze drop-jump.mp4 \
114
118
  --drop-height 0.60 \
115
119
  --json-output metrics.json \
116
120
  --output debug.mp4
117
121
  ```
118
122
 
119
- ### Full Example
123
+ ### Center of Mass Tracking (Improved Accuracy)
124
+
125
+ Use CoM tracking for 3-5% accuracy improvement:
120
126
 
121
127
  ```bash
122
- kinemetry dropjump-analyze jump.mp4 \
123
- --json-output results.json \
128
+ # Basic CoM tracking
129
+ kinemotion dropjump-analyze video.mp4 --use-com
130
+
131
+ # CoM tracking with calibration for maximum accuracy
132
+ kinemotion dropjump-analyze drop-jump.mp4 \
133
+ --use-com \
134
+ --drop-height 0.40 \
135
+ --output debug_com.mp4 \
136
+ --json-output metrics.json
137
+ ```
138
+
139
+ ### Adaptive Thresholding (Auto-Calibration)
140
+
141
+ Auto-calibrate velocity threshold from video baseline for 2-3% accuracy improvement:
142
+
143
+ ```bash
144
+ # Basic adaptive thresholding
145
+ kinemotion dropjump-analyze video.mp4 --adaptive-threshold
146
+
147
+ # Combined with CoM for maximum accuracy
148
+ kinemotion dropjump-analyze video.mp4 \
149
+ --adaptive-threshold \
150
+ --use-com \
151
+ --drop-height 0.40 \
124
152
  --output debug.mp4 \
153
+ --json-output metrics.json
154
+ ```
155
+
156
+ ### Full Example (Maximum Accuracy)
157
+
158
+ ```bash
159
+ # With all accuracy improvements enabled (~93-96% accuracy)
160
+ kinemotion dropjump-analyze jump.mp4 \
161
+ --adaptive-threshold \
162
+ --use-com \
163
+ --outlier-rejection \
125
164
  --drop-height 0.40 \
165
+ --output debug.mp4 \
166
+ --json-output results.json \
126
167
  --smoothing-window 7 \
127
- --velocity-threshold 0.015
168
+ --polyorder 3
169
+
170
+ # Alternative: With experimental bilateral filter
171
+ kinemotion dropjump-analyze jump.mp4 \
172
+ --adaptive-threshold \
173
+ --use-com \
174
+ --outlier-rejection \
175
+ --bilateral-filter \
176
+ --drop-height 0.40 \
177
+ --output debug.mp4 \
178
+ --json-output results.json
128
179
  ```
129
180
 
130
181
  ## Configuration Options
@@ -146,6 +197,43 @@ kinemetry dropjump-analyze jump.mp4 \
146
197
  - Larger values = smoother trajectories but less responsive
147
198
  - **Tip**: Increase for noisy videos, decrease for high-quality stable footage
148
199
 
200
+ - `--polyorder <int>` (default: 2)
201
+ - Polynomial order for Savitzky-Golay smoothing filter
202
+ - Must be < smoothing-window (typically 2 or 3)
203
+ - 2 = quadratic fit (good for parabolic motion like jumps)
204
+ - 3 = cubic fit (better for complex motion patterns)
205
+ - Higher order captures more motion complexity but more sensitive to noise
206
+ - **Tip**: Use 2 for most cases, try 3 for high-quality videos with complex motion
207
+ - **Accuracy improvement**: +1-2% for complex motion patterns
208
+
209
+ ### Advanced Filtering
210
+
211
+ - `--outlier-rejection / --no-outlier-rejection` (default: --outlier-rejection)
212
+ - Apply RANSAC and median-based outlier rejection to remove tracking glitches
213
+ - **With outlier rejection** (`--outlier-rejection`): Detects and removes MediaPipe tracking errors
214
+ - RANSAC-based polynomial fitting identifies positions that deviate from smooth trajectory
215
+ - Median filtering catches spikes in otherwise smooth motion
216
+ - Outliers replaced with interpolated values from neighboring valid points
217
+ - Removes jumps, jitter, and temporary tracking losses
218
+ - **Accuracy improvement**: +1-2% by eliminating tracking glitches
219
+ - **Without outlier rejection** (`--no-outlier-rejection`): Uses raw tracked positions
220
+ - Faster processing, relies entirely on MediaPipe quality
221
+ - **Tip**: Keep enabled (default) unless debugging or working with perfect tracking
222
+
223
+ - `--bilateral-filter / --no-bilateral-filter` (default: --no-bilateral-filter)
224
+ - Use bilateral temporal filter for edge-preserving smoothing
225
+ - **With bilateral filter** (`--bilateral-filter`): Preserves sharp transitions while smoothing noise
226
+ - Weights each frame by temporal distance AND position similarity
227
+ - Landing/takeoff transitions remain sharp (not smoothed away)
228
+ - Noise in smooth regions (flight, ground contact) is reduced
229
+ - Edge-preserving alternative to Savitzky-Golay smoothing
230
+ - **Accuracy improvement**: +1-2% by preserving event timing precision
231
+ - **Without bilateral filter** (`--no-bilateral-filter`): Uses standard Savitzky-Golay smoothing
232
+ - Uniform smoothing across all frames
233
+ - Well-tested baseline method
234
+ - **Tip**: Experimental feature; enable for videos with rapid transitions or variable motion
235
+ - **Note**: Cannot be used simultaneously with Savitzky-Golay; bilateral replaces it when enabled
236
+
149
237
  ### Contact Detection
150
238
 
151
239
  - `--velocity-threshold <float>` (default: 0.02)
@@ -186,6 +274,49 @@ kinemetry dropjump-analyze jump.mp4 \
186
274
  - Only applicable for drop jumps (box → drop → landing → jump)
187
275
  - **Tip**: Measure your box height accurately for best results
188
276
 
277
+ ### Tracking Method
278
+
279
+ - `--use-com / --use-feet` (default: --use-feet)
280
+ - Choose between center of mass (CoM) or foot-based tracking
281
+ - **CoM tracking** (`--use-com`): Uses biomechanical CoM estimation with Dempster's body segment parameters
282
+ - Head: 8%, Trunk: 50%, Thighs: 20%, Legs: 10%, Feet: 3% of body mass
283
+ - Tracks true body movement instead of foot position
284
+ - Reduces error from foot dorsiflexion/plantarflexion during flight
285
+ - **Accuracy improvement**: +3-5% over foot-based tracking
286
+ - **Foot tracking** (`--use-feet`): Traditional method using average ankle/heel positions
287
+ - Faster, simpler, well-tested baseline method
288
+ - **Tip**: Use `--use-com` for maximum accuracy, especially for drop jumps
289
+
290
+ ### Velocity Threshold Mode
291
+
292
+ - `--adaptive-threshold / --fixed-threshold` (default: --fixed-threshold)
293
+ - Choose between adaptive or fixed velocity threshold for contact detection
294
+ - **Adaptive threshold** (`--adaptive-threshold`): Auto-calibrates from video baseline
295
+ - Analyzes first 3 seconds of video (assumed relatively stationary)
296
+ - Computes noise floor as 95th percentile of baseline velocity
297
+ - Sets threshold as 1.5× noise floor (bounded: 0.005-0.05)
298
+ - Adapts to camera distance, lighting, frame rate, and compression artifacts
299
+ - **Accuracy improvement**: +2-3% by eliminating manual tuning
300
+ - **Fixed threshold** (`--fixed-threshold`): Uses `--velocity-threshold` value (default: 0.02)
301
+ - Consistent, predictable behavior
302
+ - Requires manual tuning for optimal results
303
+ - **Tip**: Use `--adaptive-threshold` for varying video conditions or when unsure of optimal threshold
304
+
305
+ ### Trajectory Analysis
306
+
307
+ - `--use-curvature / --no-curvature` (default: --use-curvature)
308
+ - Enable/disable trajectory curvature analysis for refining transitions
309
+ - **With curvature** (`--use-curvature`): Uses acceleration patterns to refine event timing
310
+ - Landing detection: Finds acceleration spike from impact deceleration
311
+ - Takeoff detection: Finds acceleration change as body transitions from static to upward motion
312
+ - Blends curvature-based refinement (70%) with velocity-based estimate (30%)
313
+ - Provides physics-based validation of velocity threshold crossings
314
+ - **Accuracy improvement**: More precise timing, especially for rapid transitions
315
+ - **Without curvature** (`--no-curvature`): Pure velocity-based detection with sub-frame interpolation
316
+ - Simpler, faster algorithm
317
+ - Still highly accurate with smooth velocity curves
318
+ - **Tip**: Keep enabled (default) for best results; disable only for debugging or comparison
319
+
189
320
  ## Output Format
190
321
 
191
322
  ### JSON Metrics
@@ -281,24 +412,29 @@ The debug video includes:
281
412
 
282
413
  ## How It Works
283
414
 
284
- 1. **Pose Tracking**: MediaPipe extracts 2D pose landmarks (ankles, heels, foot indices) from each frame
285
- 2. **Smoothing**: Savitzky-Golay filter reduces tracking jitter while preserving motion dynamics
286
- 3. **Contact Detection**: Analyzes vertical foot velocity to identify ground contact vs. flight phases
287
- 4. **Phase Identification**: Finds continuous ground contact and flight periods
415
+ 1. **Pose Tracking**: MediaPipe extracts 2D pose landmarks (13 points: feet, ankles, knees, hips, shoulders, nose) from each frame
416
+ 2. **Position Calculation**: Two methods available:
417
+ - **Foot-based** (default): Averages ankle, heel, and foot index positions
418
+ - **CoM-based** (--use-com): Biomechanical center of mass using Dempster's body segment parameters
419
+ - Head: 8%, Trunk: 50%, Thighs: 20%, Legs: 10%, Feet: 3% of body mass
420
+ - Weighted average reduces error from foot movement artifacts
421
+ 3. **Smoothing**: Savitzky-Golay filter reduces tracking jitter while preserving motion dynamics
422
+ 4. **Contact Detection**: Analyzes vertical position velocity to identify ground contact vs. flight phases
423
+ 5. **Phase Identification**: Finds continuous ground contact and flight periods
288
424
  - Automatically detects drop jumps vs regular jumps
289
425
  - For drop jumps: identifies box → drop → ground contact → jump sequence
290
- 5. **Sub-Frame Interpolation**: Estimates exact transition times between frames
426
+ 6. **Sub-Frame Interpolation**: Estimates exact transition times between frames
291
427
  - Uses Savitzky-Golay derivative for smooth velocity calculation
292
428
  - Linear interpolation of velocity to find threshold crossings
293
429
  - Achieves sub-millisecond timing precision (at 30fps: ±10ms vs ±33ms)
294
430
  - Reduces timing error by 60-70% for contact and flight measurements
295
431
  - Smoother velocity curves eliminate false threshold crossings
296
- 6. **Trajectory Curvature Analysis**: Refines transitions using acceleration patterns
432
+ 7. **Trajectory Curvature Analysis**: Refines transitions using acceleration patterns
297
433
  - Computes second derivative (acceleration) from position trajectory
298
434
  - Detects landing impact by acceleration spike
299
435
  - Identifies takeoff by acceleration change patterns
300
436
  - Provides independent validation and refinement of velocity-based detection
301
- 7. **Metric Calculation**:
437
+ 8. **Metric Calculation**:
302
438
  - Ground contact time = contact phase duration (using fractional frames)
303
439
  - Flight time = flight phase duration (using fractional frames)
304
440
  - Jump height = calibrated position-based measurement (if --drop-height provided)
@@ -312,13 +448,13 @@ This project enforces strict code quality standards:
312
448
  - **Type safety**: Full mypy strict mode compliance with complete type annotations
313
449
  - **Linting**: Comprehensive ruff checks (pycodestyle, pyflakes, isort, pep8-naming, etc.)
314
450
  - **Formatting**: Black code style
315
- - **Testing**: pytest with 9 unit tests
451
+ - **Testing**: pytest with 25 unit tests
316
452
 
317
453
  ### Development Commands
318
454
 
319
455
  ```bash
320
456
  # Run the tool
321
- uv run kinemetry dropjump-analyze <video_path>
457
+ uv run kinemotion dropjump-analyze <video_path>
322
458
 
323
459
  # Run all tests
324
460
  uv run pytest
@@ -0,0 +1,16 @@
1
+ kinemotion/__init__.py,sha256=JhS0ZTgcTdcMH5WcIyWxEqZJPOoBUSKX8tT8hsG-xWk,98
2
+ kinemotion/cli.py,sha256=afQiAWBQBbLM2SZnwFPZ0gr_jjVIQtkhYh5cHYWPeco,13532
3
+ kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
4
+ kinemotion/core/filtering.py,sha256=QtZRz8KlcLtR4dLRFH9sGqRQsUo_Dqcr1ZJIyWwPlcM,11266
5
+ kinemotion/core/pose.py,sha256=5Dhw3LqX3STR-eLb5JAQkxhS-dd0PqGytBWnaQ66nWc,8391
6
+ kinemotion/core/smoothing.py,sha256=z2qnpEGohDm6ZUrzqRXGLp189-NJL0ngKqYwXkU-iW0,13166
7
+ kinemotion/core/video_io.py,sha256=LD7qmHIqUYomGxS1kxz6khugIbFo2y4tDSY7XqJQCOM,4581
8
+ kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
9
+ kinemotion/dropjump/analysis.py,sha256=MsEnho8WeGFxStHpKVGbj7gzdb3MUfozksmlAReAkI0,18026
10
+ kinemotion/dropjump/debug_overlay.py,sha256=s7hwYLA2JenRYOPD2GNmx3kATFseeZT3pW8jxiVgys8,8621
11
+ kinemotion/dropjump/kinematics.py,sha256=bM1A6LGSDWbNOrRa_x2v9hXJOwxef69h3R_0naLZ4Zw,15092
12
+ kinemotion-0.2.0.dist-info/METADATA,sha256=pJ1KUIaG6F7xljGjjmwLALM2j9IHEAE5eJ4F6lWB4Lc,20616
13
+ kinemotion-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
14
+ kinemotion-0.2.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
15
+ kinemotion-0.2.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
16
+ kinemotion-0.2.0.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ kinemotion = kinemotion.cli:cli
dropjump/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- """Drop-jump video analysis tool."""
2
-
3
- __version__ = "0.1.0"
dropjump/pose_tracker.py DELETED
@@ -1,74 +0,0 @@
1
- """Pose tracking using MediaPipe Pose."""
2
-
3
-
4
- import cv2
5
- import mediapipe as mp
6
- import numpy as np
7
-
8
-
9
- class PoseTracker:
10
- """Tracks human pose landmarks in video frames using MediaPipe."""
11
-
12
- def __init__(
13
- self,
14
- min_detection_confidence: float = 0.5,
15
- min_tracking_confidence: float = 0.5,
16
- ):
17
- """
18
- Initialize the pose tracker.
19
-
20
- Args:
21
- min_detection_confidence: Minimum confidence for pose detection
22
- min_tracking_confidence: Minimum confidence for pose tracking
23
- """
24
- self.mp_pose = mp.solutions.pose
25
- self.pose = self.mp_pose.Pose(
26
- min_detection_confidence=min_detection_confidence,
27
- min_tracking_confidence=min_tracking_confidence,
28
- model_complexity=1,
29
- )
30
-
31
- def process_frame(
32
- self, frame: np.ndarray
33
- ) -> dict[str, tuple[float, float, float]] | None:
34
- """
35
- Process a single frame and extract pose landmarks.
36
-
37
- Args:
38
- frame: BGR image frame
39
-
40
- Returns:
41
- Dictionary mapping landmark names to (x, y, visibility) tuples,
42
- or None if no pose detected. Coordinates are normalized (0-1).
43
- """
44
- # Convert BGR to RGB
45
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
46
-
47
- # Process the frame
48
- results = self.pose.process(rgb_frame)
49
-
50
- if not results.pose_landmarks:
51
- return None
52
-
53
- # Extract key landmarks for feet tracking
54
- landmarks = {}
55
- landmark_names = {
56
- self.mp_pose.PoseLandmark.LEFT_ANKLE: "left_ankle",
57
- self.mp_pose.PoseLandmark.RIGHT_ANKLE: "right_ankle",
58
- self.mp_pose.PoseLandmark.LEFT_HEEL: "left_heel",
59
- self.mp_pose.PoseLandmark.RIGHT_HEEL: "right_heel",
60
- self.mp_pose.PoseLandmark.LEFT_FOOT_INDEX: "left_foot_index",
61
- self.mp_pose.PoseLandmark.RIGHT_FOOT_INDEX: "right_foot_index",
62
- self.mp_pose.PoseLandmark.LEFT_HIP: "left_hip",
63
- self.mp_pose.PoseLandmark.RIGHT_HIP: "right_hip",
64
- }
65
-
66
- for landmark_id, name in landmark_names.items():
67
- lm = results.pose_landmarks.landmark[landmark_id]
68
- landmarks[name] = (lm.x, lm.y, lm.visibility)
69
-
70
- return landmarks
71
-
72
- def close(self) -> None:
73
- """Release resources."""
74
- self.pose.close()
@@ -1,12 +0,0 @@
1
- dropjump/__init__.py,sha256=3os3CgjXm09srnAvsNIjA_KpHHfsu4ioRY0_oVDaX0w,60
2
- dropjump/cli.py,sha256=mW_wUaAb1mzQks8oFcZHre50U0pbTacJZg3x-VUdLtY,9563
3
- dropjump/contact_detection.py,sha256=cHqQ_nR9mbKyHeb90iuGjE8Hq34S01VURBMF8x5oHeM,15015
4
- dropjump/kinematics.py,sha256=x2SB_4Pj-kJUFCI-KSMjr5PypCmh9FkRR9dorvMq8zI,14983
5
- dropjump/pose_tracker.py,sha256=JDHK7di6-ObxwVR3TzERUmwCXeCFZWua1kUp1W9W55c,2367
6
- dropjump/smoothing.py,sha256=FCAk6PnMs7v8dzh98cteLaCvHOuYtfWDVI3X51MxEqs,7836
7
- dropjump/video_io.py,sha256=Cw_dYRgwybN1gIj4P88tedWAjwmQ4UxkRDztdpRD1hQ,11614
8
- kinemotion-0.1.0.dist-info/METADATA,sha256=VpBdG6PQcEdjskv-oQssggqltnuDgxvmfd0-1nTj5oA,13737
9
- kinemotion-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
10
- kinemotion-0.1.0.dist-info/entry_points.txt,sha256=yRp0CWopxkd5fjbnhQ9OjI9gELPmpcCSMCDvWidatD4,47
11
- kinemotion-0.1.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
12
- kinemotion-0.1.0.dist-info/RECORD,,