kinemotion 0.1.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +3 -0
- kinemotion/cli.py +20 -0
- kinemotion/core/__init__.py +40 -0
- kinemotion/core/filtering.py +345 -0
- kinemotion/core/pose.py +221 -0
- {dropjump → kinemotion/core}/smoothing.py +144 -0
- kinemotion/core/video_io.py +122 -0
- kinemotion/dropjump/__init__.py +29 -0
- dropjump/contact_detection.py → kinemotion/dropjump/analysis.py +95 -4
- {dropjump → kinemotion/dropjump}/cli.py +98 -31
- dropjump/video_io.py → kinemotion/dropjump/debug_overlay.py +49 -140
- {dropjump → kinemotion/dropjump}/kinematics.py +27 -8
- {kinemotion-0.1.0.dist-info → kinemotion-0.4.0.dist-info}/METADATA +119 -33
- kinemotion-0.4.0.dist-info/RECORD +17 -0
- kinemotion-0.4.0.dist-info/entry_points.txt +2 -0
- dropjump/__init__.py +0 -3
- dropjump/pose_tracker.py +0 -74
- kinemotion-0.1.0.dist-info/RECORD +0 -12
- kinemotion-0.1.0.dist-info/entry_points.txt +0 -2
- {kinemotion-0.1.0.dist-info → kinemotion-0.4.0.dist-info}/WHEEL +0 -0
- {kinemotion-0.1.0.dist-info → kinemotion-0.4.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Command-line interface for
|
|
1
|
+
"""Command-line interface for drop jump analysis."""
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
4
|
import sys
|
|
@@ -7,24 +7,18 @@ from pathlib import Path
|
|
|
7
7
|
import click
|
|
8
8
|
import numpy as np
|
|
9
9
|
|
|
10
|
-
from .
|
|
10
|
+
from ..core.pose import PoseTracker
|
|
11
|
+
from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
12
|
+
from ..core.video_io import VideoProcessor
|
|
13
|
+
from .analysis import (
|
|
11
14
|
compute_average_foot_position,
|
|
12
15
|
detect_ground_contact,
|
|
13
16
|
)
|
|
17
|
+
from .debug_overlay import DebugOverlayRenderer
|
|
14
18
|
from .kinematics import calculate_drop_jump_metrics
|
|
15
|
-
from .pose_tracker import PoseTracker
|
|
16
|
-
from .smoothing import smooth_landmarks
|
|
17
|
-
from .video_io import DebugOverlayRenderer, VideoProcessor
|
|
18
19
|
|
|
19
20
|
|
|
20
|
-
@click.
|
|
21
|
-
@click.version_option(package_name="dropjump-analyze")
|
|
22
|
-
def cli() -> None:
|
|
23
|
-
"""Kinemetry: Video-based kinematic analysis for athletic performance."""
|
|
24
|
-
pass
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
@cli.command(name="dropjump-analyze")
|
|
21
|
+
@click.command(name="dropjump-analyze")
|
|
28
22
|
@click.argument("video_path", type=click.Path(exists=True))
|
|
29
23
|
@click.option(
|
|
30
24
|
"--output",
|
|
@@ -45,6 +39,32 @@ def cli() -> None:
|
|
|
45
39
|
help="Smoothing window size (must be odd, >= 3)",
|
|
46
40
|
show_default=True,
|
|
47
41
|
)
|
|
42
|
+
@click.option(
|
|
43
|
+
"--polyorder",
|
|
44
|
+
type=int,
|
|
45
|
+
default=2,
|
|
46
|
+
help=(
|
|
47
|
+
"Polynomial order for Savitzky-Golay smoothing "
|
|
48
|
+
"(2=quadratic, 3=cubic, must be < smoothing-window)"
|
|
49
|
+
),
|
|
50
|
+
show_default=True,
|
|
51
|
+
)
|
|
52
|
+
@click.option(
|
|
53
|
+
"--outlier-rejection/--no-outlier-rejection",
|
|
54
|
+
default=True,
|
|
55
|
+
help=(
|
|
56
|
+
"Apply RANSAC and median-based outlier rejection to remove tracking glitches "
|
|
57
|
+
"(default: enabled, +1-2%% accuracy)"
|
|
58
|
+
),
|
|
59
|
+
)
|
|
60
|
+
@click.option(
|
|
61
|
+
"--bilateral-filter/--no-bilateral-filter",
|
|
62
|
+
default=False,
|
|
63
|
+
help=(
|
|
64
|
+
"Use bilateral temporal filter for edge-preserving smoothing "
|
|
65
|
+
"(default: disabled, experimental)"
|
|
66
|
+
),
|
|
67
|
+
)
|
|
48
68
|
@click.option(
|
|
49
69
|
"--velocity-threshold",
|
|
50
70
|
type=float,
|
|
@@ -91,11 +111,25 @@ def cli() -> None:
|
|
|
91
111
|
default=True,
|
|
92
112
|
help="Use trajectory curvature analysis for refining transitions (default: enabled)",
|
|
93
113
|
)
|
|
114
|
+
@click.option(
|
|
115
|
+
"--kinematic-correction-factor",
|
|
116
|
+
type=float,
|
|
117
|
+
default=1.0,
|
|
118
|
+
help=(
|
|
119
|
+
"Correction factor for kinematic jump height (default: 1.0 = no correction). "
|
|
120
|
+
"Historical testing suggested 1.35, but this is UNVALIDATED. "
|
|
121
|
+
"Use --drop-height for validated measurements."
|
|
122
|
+
),
|
|
123
|
+
show_default=True,
|
|
124
|
+
)
|
|
94
125
|
def dropjump_analyze(
|
|
95
126
|
video_path: str,
|
|
96
127
|
output: str | None,
|
|
97
128
|
json_output: str | None,
|
|
98
129
|
smoothing_window: int,
|
|
130
|
+
polyorder: int,
|
|
131
|
+
outlier_rejection: bool,
|
|
132
|
+
bilateral_filter: bool,
|
|
99
133
|
velocity_threshold: float,
|
|
100
134
|
min_contact_frames: int,
|
|
101
135
|
visibility_threshold: float,
|
|
@@ -103,6 +137,7 @@ def dropjump_analyze(
|
|
|
103
137
|
tracking_confidence: float,
|
|
104
138
|
drop_height: float | None,
|
|
105
139
|
use_curvature: bool,
|
|
140
|
+
kinematic_correction_factor: float,
|
|
106
141
|
) -> None:
|
|
107
142
|
"""
|
|
108
143
|
Analyze drop-jump video to estimate ground contact time, flight time, and jump height.
|
|
@@ -122,6 +157,17 @@ def dropjump_analyze(
|
|
|
122
157
|
f"Adjusting smoothing-window to {smoothing_window} (must be odd)", err=True
|
|
123
158
|
)
|
|
124
159
|
|
|
160
|
+
if polyorder < 1:
|
|
161
|
+
click.echo("Error: polyorder must be >= 1", err=True)
|
|
162
|
+
sys.exit(1)
|
|
163
|
+
|
|
164
|
+
if polyorder >= smoothing_window:
|
|
165
|
+
click.echo(
|
|
166
|
+
f"Error: polyorder ({polyorder}) must be < smoothing-window ({smoothing_window})",
|
|
167
|
+
err=True,
|
|
168
|
+
)
|
|
169
|
+
sys.exit(1)
|
|
170
|
+
|
|
125
171
|
try:
|
|
126
172
|
# Initialize video processor
|
|
127
173
|
with VideoProcessor(video_path) as video:
|
|
@@ -165,20 +211,40 @@ def dropjump_analyze(
|
|
|
165
211
|
sys.exit(1)
|
|
166
212
|
|
|
167
213
|
# Smooth landmarks
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
214
|
+
if outlier_rejection or bilateral_filter:
|
|
215
|
+
if outlier_rejection:
|
|
216
|
+
click.echo(
|
|
217
|
+
"Smoothing landmarks with outlier rejection...", err=True
|
|
218
|
+
)
|
|
219
|
+
if bilateral_filter:
|
|
220
|
+
click.echo(
|
|
221
|
+
"Using bilateral temporal filter for edge-preserving smoothing...",
|
|
222
|
+
err=True,
|
|
223
|
+
)
|
|
224
|
+
smoothed_landmarks = smooth_landmarks_advanced(
|
|
225
|
+
landmarks_sequence,
|
|
226
|
+
window_length=smoothing_window,
|
|
227
|
+
polyorder=polyorder,
|
|
228
|
+
use_outlier_rejection=outlier_rejection,
|
|
229
|
+
use_bilateral=bilateral_filter,
|
|
230
|
+
)
|
|
231
|
+
else:
|
|
232
|
+
click.echo("Smoothing landmarks...", err=True)
|
|
233
|
+
smoothed_landmarks = smooth_landmarks(
|
|
234
|
+
landmarks_sequence, window_length=smoothing_window, polyorder=polyorder
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# Extract vertical positions from feet
|
|
238
|
+
click.echo("Extracting foot positions...", err=True)
|
|
172
239
|
|
|
173
|
-
|
|
174
|
-
click.echo("Detecting ground contact...", err=True)
|
|
175
|
-
foot_positions_list: list[float] = []
|
|
240
|
+
position_list: list[float] = []
|
|
176
241
|
visibilities_list: list[float] = []
|
|
177
242
|
|
|
178
243
|
for frame_landmarks in smoothed_landmarks:
|
|
179
244
|
if frame_landmarks:
|
|
180
|
-
|
|
181
|
-
|
|
245
|
+
# Use average foot position
|
|
246
|
+
_, foot_y = compute_average_foot_position(frame_landmarks)
|
|
247
|
+
position_list.append(foot_y)
|
|
182
248
|
|
|
183
249
|
# Average visibility of foot landmarks
|
|
184
250
|
foot_vis = []
|
|
@@ -195,21 +261,23 @@ def dropjump_analyze(
|
|
|
195
261
|
)
|
|
196
262
|
else:
|
|
197
263
|
# Use previous position if available, otherwise default
|
|
198
|
-
|
|
199
|
-
|
|
264
|
+
position_list.append(
|
|
265
|
+
position_list[-1] if position_list else 0.5
|
|
200
266
|
)
|
|
201
267
|
visibilities_list.append(0.0)
|
|
202
268
|
|
|
203
|
-
|
|
269
|
+
vertical_positions: np.ndarray = np.array(position_list)
|
|
204
270
|
visibilities: np.ndarray = np.array(visibilities_list)
|
|
205
271
|
|
|
206
272
|
# Detect ground contact
|
|
207
273
|
contact_states = detect_ground_contact(
|
|
208
|
-
|
|
274
|
+
vertical_positions,
|
|
209
275
|
velocity_threshold=velocity_threshold,
|
|
210
276
|
min_contact_frames=min_contact_frames,
|
|
211
277
|
visibility_threshold=visibility_threshold,
|
|
212
278
|
visibilities=visibilities,
|
|
279
|
+
window_length=smoothing_window,
|
|
280
|
+
polyorder=polyorder,
|
|
213
281
|
)
|
|
214
282
|
|
|
215
283
|
# Calculate metrics
|
|
@@ -221,12 +289,14 @@ def dropjump_analyze(
|
|
|
221
289
|
)
|
|
222
290
|
metrics = calculate_drop_jump_metrics(
|
|
223
291
|
contact_states,
|
|
224
|
-
|
|
292
|
+
vertical_positions,
|
|
225
293
|
video.fps,
|
|
226
294
|
drop_height_m=drop_height,
|
|
227
295
|
velocity_threshold=velocity_threshold,
|
|
228
296
|
smoothing_window=smoothing_window,
|
|
297
|
+
polyorder=polyorder,
|
|
229
298
|
use_curvature=use_curvature,
|
|
299
|
+
kinematic_correction_factor=kinematic_correction_factor,
|
|
230
300
|
)
|
|
231
301
|
|
|
232
302
|
# Output metrics as JSON
|
|
@@ -277,6 +347,7 @@ def dropjump_analyze(
|
|
|
277
347
|
contact_states[i],
|
|
278
348
|
i,
|
|
279
349
|
metrics,
|
|
350
|
+
use_com=False,
|
|
280
351
|
)
|
|
281
352
|
renderer.write_frame(annotated)
|
|
282
353
|
bar.update(1)
|
|
@@ -288,7 +359,3 @@ def dropjump_analyze(
|
|
|
288
359
|
except Exception as e:
|
|
289
360
|
click.echo(f"Error: {str(e)}", err=True)
|
|
290
361
|
sys.exit(1)
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
if __name__ == "__main__":
|
|
294
|
-
cli()
|
|
@@ -1,131 +1,13 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import json
|
|
5
|
-
import subprocess
|
|
1
|
+
"""Debug overlay rendering for drop jump analysis."""
|
|
6
2
|
|
|
7
3
|
import cv2
|
|
8
4
|
import numpy as np
|
|
9
5
|
|
|
10
|
-
from .
|
|
6
|
+
from ..core.pose import compute_center_of_mass
|
|
7
|
+
from .analysis import ContactState, compute_average_foot_position
|
|
11
8
|
from .kinematics import DropJumpMetrics
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class VideoProcessor:
|
|
15
|
-
"""
|
|
16
|
-
Handles video reading and processing.
|
|
17
|
-
|
|
18
|
-
IMPORTANT: This class preserves the exact aspect ratio of the source video.
|
|
19
|
-
No dimensions are hardcoded - all dimensions are extracted from actual frame data.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
def __init__(self, video_path: str):
|
|
23
|
-
"""
|
|
24
|
-
Initialize video processor.
|
|
25
|
-
|
|
26
|
-
Args:
|
|
27
|
-
video_path: Path to input video file
|
|
28
|
-
"""
|
|
29
|
-
self.video_path = video_path
|
|
30
|
-
self.cap = cv2.VideoCapture(video_path)
|
|
31
|
-
|
|
32
|
-
if not self.cap.isOpened():
|
|
33
|
-
raise ValueError(f"Could not open video: {video_path}")
|
|
34
|
-
|
|
35
|
-
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
|
36
|
-
self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
37
|
-
|
|
38
|
-
# Read first frame to get actual dimensions
|
|
39
|
-
# This is critical for preserving aspect ratio, especially with mobile videos
|
|
40
|
-
# that have rotation metadata. OpenCV properties (CAP_PROP_FRAME_WIDTH/HEIGHT)
|
|
41
|
-
# may return incorrect dimensions, so we read the actual frame data.
|
|
42
|
-
ret, first_frame = self.cap.read()
|
|
43
|
-
if ret:
|
|
44
|
-
# frame.shape is (height, width, channels) - extract actual dimensions
|
|
45
|
-
self.height, self.width = first_frame.shape[:2]
|
|
46
|
-
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to beginning
|
|
47
|
-
else:
|
|
48
|
-
# Fallback to video properties if can't read frame
|
|
49
|
-
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
50
|
-
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
51
|
-
|
|
52
|
-
# Calculate display dimensions considering SAR (Sample Aspect Ratio)
|
|
53
|
-
# Mobile videos often have non-square pixels encoded in SAR metadata
|
|
54
|
-
# OpenCV doesn't directly expose SAR, but we need to handle display correctly
|
|
55
|
-
self.display_width = self.width
|
|
56
|
-
self.display_height = self.height
|
|
57
|
-
self._calculate_display_dimensions()
|
|
58
|
-
|
|
59
|
-
def _calculate_display_dimensions(self) -> None:
|
|
60
|
-
"""
|
|
61
|
-
Calculate display dimensions by reading SAR metadata from video file.
|
|
62
|
-
|
|
63
|
-
Many mobile videos use non-square pixels (SAR != 1:1), which means
|
|
64
|
-
the encoded dimensions differ from how the video should be displayed.
|
|
65
|
-
We use ffprobe to extract this metadata.
|
|
66
|
-
"""
|
|
67
|
-
try:
|
|
68
|
-
# Use ffprobe to get SAR metadata
|
|
69
|
-
result = subprocess.run(
|
|
70
|
-
[
|
|
71
|
-
"ffprobe",
|
|
72
|
-
"-v",
|
|
73
|
-
"quiet",
|
|
74
|
-
"-print_format",
|
|
75
|
-
"json",
|
|
76
|
-
"-show_streams",
|
|
77
|
-
"-select_streams",
|
|
78
|
-
"v:0",
|
|
79
|
-
self.video_path,
|
|
80
|
-
],
|
|
81
|
-
capture_output=True,
|
|
82
|
-
text=True,
|
|
83
|
-
timeout=5,
|
|
84
|
-
)
|
|
85
|
-
|
|
86
|
-
if result.returncode == 0:
|
|
87
|
-
data = json.loads(result.stdout)
|
|
88
|
-
if "streams" in data and len(data["streams"]) > 0:
|
|
89
|
-
stream = data["streams"][0]
|
|
90
|
-
sar_str = stream.get("sample_aspect_ratio", "1:1")
|
|
91
|
-
|
|
92
|
-
# Parse SAR (e.g., "270:473")
|
|
93
|
-
if sar_str and ":" in sar_str:
|
|
94
|
-
sar_parts = sar_str.split(":")
|
|
95
|
-
sar_width = int(sar_parts[0])
|
|
96
|
-
sar_height = int(sar_parts[1])
|
|
97
|
-
|
|
98
|
-
# Calculate display dimensions
|
|
99
|
-
# DAR = (width * SAR_width) / (height * SAR_height)
|
|
100
|
-
if sar_width != sar_height:
|
|
101
|
-
self.display_width = int(
|
|
102
|
-
self.width * sar_width / sar_height
|
|
103
|
-
)
|
|
104
|
-
self.display_height = self.height
|
|
105
|
-
except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
|
|
106
|
-
# If ffprobe fails, keep original dimensions (square pixels)
|
|
107
|
-
pass
|
|
108
|
-
|
|
109
|
-
def read_frame(self) -> np.ndarray | None:
|
|
110
|
-
"""Read next frame from video."""
|
|
111
|
-
ret, frame = self.cap.read()
|
|
112
|
-
return frame if ret else None
|
|
113
|
-
|
|
114
|
-
def reset(self) -> None:
|
|
115
|
-
"""Reset video to beginning."""
|
|
116
|
-
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
|
117
|
-
|
|
118
|
-
def close(self) -> None:
|
|
119
|
-
"""Release video capture."""
|
|
120
|
-
self.cap.release()
|
|
121
|
-
|
|
122
|
-
def __enter__(self) -> "VideoProcessor":
|
|
123
|
-
return self
|
|
124
|
-
|
|
125
|
-
def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore[no-untyped-def]
|
|
126
|
-
self.close()
|
|
127
|
-
|
|
128
|
-
|
|
129
11
|
class DebugOverlayRenderer:
|
|
130
12
|
"""Renders debug information on video frames."""
|
|
131
13
|
|
|
@@ -183,6 +65,7 @@ class DebugOverlayRenderer:
|
|
|
183
65
|
contact_state: ContactState,
|
|
184
66
|
frame_idx: int,
|
|
185
67
|
metrics: DropJumpMetrics | None = None,
|
|
68
|
+
use_com: bool = False,
|
|
186
69
|
) -> np.ndarray:
|
|
187
70
|
"""
|
|
188
71
|
Render debug overlay on frame.
|
|
@@ -193,6 +76,7 @@ class DebugOverlayRenderer:
|
|
|
193
76
|
contact_state: Ground contact state
|
|
194
77
|
frame_idx: Current frame index
|
|
195
78
|
metrics: Drop-jump metrics (optional)
|
|
79
|
+
use_com: Whether to visualize CoM instead of feet (optional)
|
|
196
80
|
|
|
197
81
|
Returns:
|
|
198
82
|
Frame with debug overlay
|
|
@@ -201,25 +85,50 @@ class DebugOverlayRenderer:
|
|
|
201
85
|
|
|
202
86
|
# Draw landmarks if available
|
|
203
87
|
if landmarks:
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
88
|
+
if use_com:
|
|
89
|
+
# Draw center of mass position
|
|
90
|
+
com_x, com_y, com_vis = compute_center_of_mass(landmarks)
|
|
91
|
+
px = int(com_x * self.width)
|
|
92
|
+
py = int(com_y * self.height)
|
|
93
|
+
|
|
94
|
+
# Draw CoM with larger circle
|
|
95
|
+
color = (
|
|
96
|
+
(0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
|
|
97
|
+
)
|
|
98
|
+
cv2.circle(annotated, (px, py), 15, color, -1)
|
|
99
|
+
cv2.circle(annotated, (px, py), 17, (255, 255, 255), 2) # White border
|
|
100
|
+
|
|
101
|
+
# Draw body segments for reference
|
|
102
|
+
# Draw hip midpoint
|
|
103
|
+
if "left_hip" in landmarks and "right_hip" in landmarks:
|
|
104
|
+
lh_x, lh_y, _ = landmarks["left_hip"]
|
|
105
|
+
rh_x, rh_y, _ = landmarks["right_hip"]
|
|
106
|
+
hip_x = int((lh_x + rh_x) / 2 * self.width)
|
|
107
|
+
hip_y = int((lh_y + rh_y) / 2 * self.height)
|
|
108
|
+
cv2.circle(annotated, (hip_x, hip_y), 8, (255, 165, 0), -1) # Orange
|
|
109
|
+
# Draw line from hip to CoM
|
|
110
|
+
cv2.line(annotated, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
|
|
111
|
+
else:
|
|
112
|
+
# Draw foot position (original method)
|
|
113
|
+
foot_x, foot_y = compute_average_foot_position(landmarks)
|
|
114
|
+
px = int(foot_x * self.width)
|
|
115
|
+
py = int(foot_y * self.height)
|
|
116
|
+
|
|
117
|
+
# Draw foot position circle
|
|
118
|
+
color = (
|
|
119
|
+
(0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
|
|
120
|
+
)
|
|
121
|
+
cv2.circle(annotated, (px, py), 10, color, -1)
|
|
122
|
+
|
|
123
|
+
# Draw individual foot landmarks
|
|
124
|
+
foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
|
|
125
|
+
for key in foot_keys:
|
|
126
|
+
if key in landmarks:
|
|
127
|
+
x, y, vis = landmarks[key]
|
|
128
|
+
if vis > 0.5:
|
|
129
|
+
lx = int(x * self.width)
|
|
130
|
+
ly = int(y * self.height)
|
|
131
|
+
cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
|
|
223
132
|
|
|
224
133
|
# Draw contact state
|
|
225
134
|
state_text = f"State: {contact_state.value}"
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
|
|
6
|
-
from .
|
|
6
|
+
from .analysis import (
|
|
7
7
|
ContactState,
|
|
8
8
|
find_contact_phases,
|
|
9
9
|
find_interpolated_phase_transitions_with_curvature,
|
|
@@ -111,7 +111,9 @@ def calculate_drop_jump_metrics(
|
|
|
111
111
|
drop_height_m: float | None = None,
|
|
112
112
|
velocity_threshold: float = 0.02,
|
|
113
113
|
smoothing_window: int = 5,
|
|
114
|
+
polyorder: int = 2,
|
|
114
115
|
use_curvature: bool = True,
|
|
116
|
+
kinematic_correction_factor: float = 1.0,
|
|
115
117
|
) -> DropJumpMetrics:
|
|
116
118
|
"""
|
|
117
119
|
Calculate drop-jump metrics from contact states and positions.
|
|
@@ -123,7 +125,11 @@ def calculate_drop_jump_metrics(
|
|
|
123
125
|
drop_height_m: Known drop box/platform height in meters for calibration (optional)
|
|
124
126
|
velocity_threshold: Velocity threshold used for contact detection (for interpolation)
|
|
125
127
|
smoothing_window: Window size for velocity/acceleration smoothing (must be odd)
|
|
128
|
+
polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
|
|
126
129
|
use_curvature: Whether to use curvature analysis for refining transitions
|
|
130
|
+
kinematic_correction_factor: Correction factor for kinematic jump height calculation
|
|
131
|
+
(default: 1.0 = no correction). Historical testing suggested 1.35, but this is
|
|
132
|
+
unvalidated. Use calibrated measurement (--drop-height) for validated results.
|
|
127
133
|
|
|
128
134
|
Returns:
|
|
129
135
|
DropJumpMetrics object with calculated values
|
|
@@ -138,6 +144,7 @@ def calculate_drop_jump_metrics(
|
|
|
138
144
|
contact_states,
|
|
139
145
|
velocity_threshold,
|
|
140
146
|
smoothing_window,
|
|
147
|
+
polyorder,
|
|
141
148
|
use_curvature,
|
|
142
149
|
)
|
|
143
150
|
|
|
@@ -312,18 +319,30 @@ def calculate_drop_jump_metrics(
|
|
|
312
319
|
metrics.jump_height = height_normalized * scale_factor
|
|
313
320
|
metrics.jump_height_kinematic = jump_height_kinematic
|
|
314
321
|
else:
|
|
315
|
-
#
|
|
316
|
-
#
|
|
317
|
-
#
|
|
318
|
-
#
|
|
319
|
-
#
|
|
320
|
-
|
|
322
|
+
# Apply kinematic correction factor to kinematic method
|
|
323
|
+
# ⚠️ WARNING: Kinematic correction factor is EXPERIMENTAL and UNVALIDATED
|
|
324
|
+
#
|
|
325
|
+
# The kinematic method h = (g × t²) / 8 may underestimate jump height due to:
|
|
326
|
+
# 1. Contact detection timing (may detect landing slightly early/late)
|
|
327
|
+
# 2. Frame rate limitations (30 fps = 33ms intervals between samples)
|
|
328
|
+
# 3. Foot position vs center of mass difference (feet land before CoM peak)
|
|
329
|
+
#
|
|
330
|
+
# Default correction factor is 1.0 (no correction). Historical testing
|
|
331
|
+
# suggested 1.35 could improve accuracy, but:
|
|
332
|
+
# - This value has NOT been validated against gold standards
|
|
333
|
+
# (force plates, motion capture)
|
|
334
|
+
# - The actual correction needed may vary by athlete, jump type, and video quality
|
|
335
|
+
# - Using a correction factor without validation is experimental
|
|
336
|
+
#
|
|
337
|
+
# For validated measurements, use:
|
|
338
|
+
# - Calibrated measurement with --drop-height parameter
|
|
339
|
+
# - Or compare against validated measurement systems
|
|
321
340
|
metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
|
|
322
341
|
metrics.jump_height_kinematic = jump_height_kinematic
|
|
323
342
|
else:
|
|
324
343
|
# Fallback to kinematic if no position data
|
|
325
344
|
if drop_height_m is None:
|
|
326
|
-
|
|
345
|
+
# Apply kinematic correction factor (see detailed comment above)
|
|
327
346
|
metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
|
|
328
347
|
else:
|
|
329
348
|
metrics.jump_height = jump_height_kinematic
|