kinemotion 0.6.4__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinemotion/core/auto_tuning.py +289 -0
- kinemotion/core/filtering.py +1 -1
- kinemotion/core/smoothing.py +10 -11
- kinemotion/core/video_io.py +52 -8
- kinemotion/dropjump/analysis.py +121 -4
- kinemotion/dropjump/cli.py +179 -124
- kinemotion/dropjump/debug_overlay.py +11 -5
- kinemotion/dropjump/kinematics.py +45 -5
- {kinemotion-0.6.4.dist-info → kinemotion-0.7.1.dist-info}/METADATA +89 -138
- kinemotion-0.7.1.dist-info/RECORD +18 -0
- kinemotion-0.6.4.dist-info/RECORD +0 -17
- {kinemotion-0.6.4.dist-info → kinemotion-0.7.1.dist-info}/WHEEL +0 -0
- {kinemotion-0.6.4.dist-info → kinemotion-0.7.1.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.6.4.dist-info → kinemotion-0.7.1.dist-info}/licenses/LICENSE +0 -0
kinemotion/dropjump/cli.py
CHANGED
|
@@ -3,10 +3,16 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import sys
|
|
5
5
|
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
6
7
|
|
|
7
8
|
import click
|
|
8
9
|
import numpy as np
|
|
9
10
|
|
|
11
|
+
from ..core.auto_tuning import (
|
|
12
|
+
QualityPreset,
|
|
13
|
+
analyze_video_sample,
|
|
14
|
+
auto_tune_parameters,
|
|
15
|
+
)
|
|
10
16
|
from ..core.pose import PoseTracker
|
|
11
17
|
from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
12
18
|
from ..core.video_io import VideoProcessor
|
|
@@ -33,140 +39,102 @@ from .kinematics import calculate_drop_jump_metrics
|
|
|
33
39
|
help="Path for JSON metrics output (default: stdout)",
|
|
34
40
|
)
|
|
35
41
|
@click.option(
|
|
36
|
-
"--
|
|
37
|
-
type=
|
|
38
|
-
|
|
39
|
-
help=
|
|
40
|
-
|
|
42
|
+
"--drop-height",
|
|
43
|
+
type=float,
|
|
44
|
+
required=True,
|
|
45
|
+
help=(
|
|
46
|
+
"Height of drop box/platform in meters (e.g., 0.40 for 40cm box) - "
|
|
47
|
+
"REQUIRED for accurate calibration"
|
|
48
|
+
),
|
|
41
49
|
)
|
|
42
50
|
@click.option(
|
|
43
|
-
"--
|
|
44
|
-
type=
|
|
45
|
-
default=
|
|
51
|
+
"--quality",
|
|
52
|
+
type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
|
|
53
|
+
default="balanced",
|
|
46
54
|
help=(
|
|
47
|
-
"
|
|
48
|
-
"(
|
|
55
|
+
"Analysis quality preset: "
|
|
56
|
+
"fast (quick, less precise), "
|
|
57
|
+
"balanced (default, good for most cases), "
|
|
58
|
+
"accurate (research-grade, slower)"
|
|
49
59
|
),
|
|
50
60
|
show_default=True,
|
|
51
61
|
)
|
|
52
62
|
@click.option(
|
|
53
|
-
"--
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
"(default: enabled, +1-2%% accuracy)"
|
|
58
|
-
),
|
|
63
|
+
"--verbose",
|
|
64
|
+
"-v",
|
|
65
|
+
is_flag=True,
|
|
66
|
+
help="Show auto-selected parameters and analysis details",
|
|
59
67
|
)
|
|
68
|
+
# Expert parameters (hidden in help, but always available for advanced users)
|
|
60
69
|
@click.option(
|
|
61
|
-
"--
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
70
|
+
"--drop-start-frame",
|
|
71
|
+
type=int,
|
|
72
|
+
default=None,
|
|
73
|
+
help="[EXPERT] Manually specify frame where drop begins (overrides auto-detection)",
|
|
74
|
+
)
|
|
75
|
+
@click.option(
|
|
76
|
+
"--smoothing-window",
|
|
77
|
+
type=int,
|
|
78
|
+
default=None,
|
|
79
|
+
help="[EXPERT] Override auto-tuned smoothing window size",
|
|
67
80
|
)
|
|
68
81
|
@click.option(
|
|
69
82
|
"--velocity-threshold",
|
|
70
83
|
type=float,
|
|
71
|
-
default=
|
|
72
|
-
help="
|
|
73
|
-
show_default=True,
|
|
84
|
+
default=None,
|
|
85
|
+
help="[EXPERT] Override auto-tuned velocity threshold",
|
|
74
86
|
)
|
|
75
87
|
@click.option(
|
|
76
88
|
"--min-contact-frames",
|
|
77
89
|
type=int,
|
|
78
|
-
default=
|
|
79
|
-
help="
|
|
80
|
-
show_default=True,
|
|
90
|
+
default=None,
|
|
91
|
+
help="[EXPERT] Override auto-tuned minimum contact frames",
|
|
81
92
|
)
|
|
82
93
|
@click.option(
|
|
83
94
|
"--visibility-threshold",
|
|
84
95
|
type=float,
|
|
85
|
-
default=
|
|
86
|
-
help="
|
|
87
|
-
show_default=True,
|
|
96
|
+
default=None,
|
|
97
|
+
help="[EXPERT] Override visibility threshold",
|
|
88
98
|
)
|
|
89
99
|
@click.option(
|
|
90
100
|
"--detection-confidence",
|
|
91
101
|
type=float,
|
|
92
|
-
default=
|
|
93
|
-
help="
|
|
94
|
-
show_default=True,
|
|
102
|
+
default=None,
|
|
103
|
+
help="[EXPERT] Override pose detection confidence",
|
|
95
104
|
)
|
|
96
105
|
@click.option(
|
|
97
106
|
"--tracking-confidence",
|
|
98
107
|
type=float,
|
|
99
|
-
default=0.5,
|
|
100
|
-
help="Pose tracking confidence threshold (0-1)",
|
|
101
|
-
show_default=True,
|
|
102
|
-
)
|
|
103
|
-
@click.option(
|
|
104
|
-
"--drop-height",
|
|
105
|
-
type=float,
|
|
106
108
|
default=None,
|
|
107
|
-
help="
|
|
108
|
-
)
|
|
109
|
-
@click.option(
|
|
110
|
-
"--use-curvature/--no-curvature",
|
|
111
|
-
default=True,
|
|
112
|
-
help="Use trajectory curvature analysis for refining transitions (default: enabled)",
|
|
113
|
-
)
|
|
114
|
-
@click.option(
|
|
115
|
-
"--kinematic-correction-factor",
|
|
116
|
-
type=float,
|
|
117
|
-
default=1.0,
|
|
118
|
-
help=(
|
|
119
|
-
"Correction factor for kinematic jump height (default: 1.0 = no correction). "
|
|
120
|
-
"Historical testing suggested 1.35, but this is UNVALIDATED. "
|
|
121
|
-
"Use --drop-height for validated measurements."
|
|
122
|
-
),
|
|
123
|
-
show_default=True,
|
|
109
|
+
help="[EXPERT] Override pose tracking confidence",
|
|
124
110
|
)
|
|
125
111
|
def dropjump_analyze(
|
|
126
112
|
video_path: str,
|
|
127
113
|
output: str | None,
|
|
128
114
|
json_output: str | None,
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
use_curvature: bool,
|
|
140
|
-
kinematic_correction_factor: float,
|
|
115
|
+
drop_height: float,
|
|
116
|
+
quality: str,
|
|
117
|
+
verbose: bool,
|
|
118
|
+
drop_start_frame: int | None,
|
|
119
|
+
smoothing_window: int | None,
|
|
120
|
+
velocity_threshold: float | None,
|
|
121
|
+
min_contact_frames: int | None,
|
|
122
|
+
visibility_threshold: float | None,
|
|
123
|
+
detection_confidence: float | None,
|
|
124
|
+
tracking_confidence: float | None,
|
|
141
125
|
) -> None:
|
|
142
126
|
"""
|
|
143
127
|
Analyze drop-jump video to estimate ground contact time, flight time, and jump height.
|
|
144
128
|
|
|
129
|
+
Uses intelligent auto-tuning to select optimal parameters based on video characteristics.
|
|
130
|
+
Parameters are automatically adjusted for frame rate, tracking quality, and analysis preset.
|
|
131
|
+
|
|
145
132
|
VIDEO_PATH: Path to the input video file
|
|
146
133
|
"""
|
|
147
134
|
click.echo(f"Analyzing video: {video_path}", err=True)
|
|
148
135
|
|
|
149
|
-
#
|
|
150
|
-
|
|
151
|
-
click.echo("Error: smoothing-window must be >= 3", err=True)
|
|
152
|
-
sys.exit(1)
|
|
153
|
-
|
|
154
|
-
if smoothing_window % 2 == 0:
|
|
155
|
-
smoothing_window += 1
|
|
156
|
-
click.echo(
|
|
157
|
-
f"Adjusting smoothing-window to {smoothing_window} (must be odd)", err=True
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
if polyorder < 1:
|
|
161
|
-
click.echo("Error: polyorder must be >= 1", err=True)
|
|
162
|
-
sys.exit(1)
|
|
163
|
-
|
|
164
|
-
if polyorder >= smoothing_window:
|
|
165
|
-
click.echo(
|
|
166
|
-
f"Error: polyorder ({polyorder}) must be < smoothing-window ({smoothing_window})",
|
|
167
|
-
err=True,
|
|
168
|
-
)
|
|
169
|
-
sys.exit(1)
|
|
136
|
+
# Convert quality string to enum
|
|
137
|
+
quality_preset = QualityPreset(quality.lower())
|
|
170
138
|
|
|
171
139
|
try:
|
|
172
140
|
# Initialize video processor
|
|
@@ -177,10 +145,32 @@ def dropjump_analyze(
|
|
|
177
145
|
err=True,
|
|
178
146
|
)
|
|
179
147
|
|
|
148
|
+
# ================================================================
|
|
149
|
+
# STEP 1: Auto-tune parameters based on video characteristics
|
|
150
|
+
# ================================================================
|
|
151
|
+
|
|
152
|
+
# Analyze video characteristics from a sample to determine optimal parameters
|
|
153
|
+
# We'll use detection/tracking confidence from quality preset for initial tracking
|
|
154
|
+
initial_detection_conf = 0.5
|
|
155
|
+
initial_tracking_conf = 0.5
|
|
156
|
+
|
|
157
|
+
if quality_preset == QualityPreset.FAST:
|
|
158
|
+
initial_detection_conf = 0.3
|
|
159
|
+
initial_tracking_conf = 0.3
|
|
160
|
+
elif quality_preset == QualityPreset.ACCURATE:
|
|
161
|
+
initial_detection_conf = 0.6
|
|
162
|
+
initial_tracking_conf = 0.6
|
|
163
|
+
|
|
164
|
+
# Override with expert values if provided
|
|
165
|
+
if detection_confidence is not None:
|
|
166
|
+
initial_detection_conf = detection_confidence
|
|
167
|
+
if tracking_confidence is not None:
|
|
168
|
+
initial_tracking_conf = tracking_confidence
|
|
169
|
+
|
|
180
170
|
# Initialize pose tracker
|
|
181
171
|
tracker = PoseTracker(
|
|
182
|
-
min_detection_confidence=
|
|
183
|
-
min_tracking_confidence=
|
|
172
|
+
min_detection_confidence=initial_detection_conf,
|
|
173
|
+
min_tracking_confidence=initial_tracking_conf,
|
|
184
174
|
)
|
|
185
175
|
|
|
186
176
|
# Process all frames
|
|
@@ -189,6 +179,7 @@ def dropjump_analyze(
|
|
|
189
179
|
frames = []
|
|
190
180
|
|
|
191
181
|
frame_idx = 0
|
|
182
|
+
bar: Any
|
|
192
183
|
with click.progressbar(
|
|
193
184
|
length=video.frame_count, label="Processing frames"
|
|
194
185
|
) as bar:
|
|
@@ -210,28 +201,90 @@ def dropjump_analyze(
|
|
|
210
201
|
click.echo("Error: No frames processed", err=True)
|
|
211
202
|
sys.exit(1)
|
|
212
203
|
|
|
213
|
-
#
|
|
214
|
-
|
|
215
|
-
|
|
204
|
+
# ================================================================
|
|
205
|
+
# STEP 2: Analyze video characteristics and auto-tune parameters
|
|
206
|
+
# ================================================================
|
|
207
|
+
|
|
208
|
+
characteristics = analyze_video_sample(
|
|
209
|
+
landmarks_sequence, video.fps, video.frame_count
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# Auto-tune parameters based on video characteristics
|
|
213
|
+
params = auto_tune_parameters(characteristics, quality_preset)
|
|
214
|
+
|
|
215
|
+
# Apply expert overrides if provided
|
|
216
|
+
if smoothing_window is not None:
|
|
217
|
+
params.smoothing_window = smoothing_window
|
|
218
|
+
if velocity_threshold is not None:
|
|
219
|
+
params.velocity_threshold = velocity_threshold
|
|
220
|
+
if min_contact_frames is not None:
|
|
221
|
+
params.min_contact_frames = min_contact_frames
|
|
222
|
+
if visibility_threshold is not None:
|
|
223
|
+
params.visibility_threshold = visibility_threshold
|
|
224
|
+
|
|
225
|
+
# Show selected parameters if verbose
|
|
226
|
+
if verbose:
|
|
227
|
+
click.echo("\n" + "=" * 60, err=True)
|
|
228
|
+
click.echo("AUTO-TUNED PARAMETERS", err=True)
|
|
229
|
+
click.echo("=" * 60, err=True)
|
|
230
|
+
click.echo(f"Video FPS: {video.fps:.2f}", err=True)
|
|
231
|
+
click.echo(
|
|
232
|
+
f"Tracking quality: {characteristics.tracking_quality} "
|
|
233
|
+
f"(avg visibility: {characteristics.avg_visibility:.2f})",
|
|
234
|
+
err=True,
|
|
235
|
+
)
|
|
236
|
+
click.echo(f"Quality preset: {quality_preset.value}", err=True)
|
|
237
|
+
click.echo("\nSelected parameters:", err=True)
|
|
238
|
+
click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
|
|
239
|
+
click.echo(f" polyorder: {params.polyorder}", err=True)
|
|
240
|
+
click.echo(
|
|
241
|
+
f" velocity_threshold: {params.velocity_threshold:.4f}", err=True
|
|
242
|
+
)
|
|
243
|
+
click.echo(
|
|
244
|
+
f" min_contact_frames: {params.min_contact_frames}", err=True
|
|
245
|
+
)
|
|
246
|
+
click.echo(
|
|
247
|
+
f" visibility_threshold: {params.visibility_threshold}", err=True
|
|
248
|
+
)
|
|
249
|
+
click.echo(
|
|
250
|
+
f" detection_confidence: {params.detection_confidence}", err=True
|
|
251
|
+
)
|
|
252
|
+
click.echo(
|
|
253
|
+
f" tracking_confidence: {params.tracking_confidence}", err=True
|
|
254
|
+
)
|
|
255
|
+
click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
|
|
256
|
+
click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
|
|
257
|
+
click.echo(f" use_curvature: {params.use_curvature}", err=True)
|
|
258
|
+
click.echo("=" * 60 + "\n", err=True)
|
|
259
|
+
|
|
260
|
+
# ================================================================
|
|
261
|
+
# STEP 3: Apply smoothing with auto-tuned parameters
|
|
262
|
+
# ================================================================
|
|
263
|
+
|
|
264
|
+
# Smooth landmarks using auto-tuned parameters
|
|
265
|
+
if params.outlier_rejection or params.bilateral_filter:
|
|
266
|
+
if params.outlier_rejection:
|
|
216
267
|
click.echo(
|
|
217
268
|
"Smoothing landmarks with outlier rejection...", err=True
|
|
218
269
|
)
|
|
219
|
-
if bilateral_filter:
|
|
270
|
+
if params.bilateral_filter:
|
|
220
271
|
click.echo(
|
|
221
272
|
"Using bilateral temporal filter for edge-preserving smoothing...",
|
|
222
273
|
err=True,
|
|
223
274
|
)
|
|
224
275
|
smoothed_landmarks = smooth_landmarks_advanced(
|
|
225
276
|
landmarks_sequence,
|
|
226
|
-
window_length=smoothing_window,
|
|
227
|
-
polyorder=polyorder,
|
|
228
|
-
use_outlier_rejection=outlier_rejection,
|
|
229
|
-
use_bilateral=bilateral_filter,
|
|
277
|
+
window_length=params.smoothing_window,
|
|
278
|
+
polyorder=params.polyorder,
|
|
279
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
280
|
+
use_bilateral=params.bilateral_filter,
|
|
230
281
|
)
|
|
231
282
|
else:
|
|
232
283
|
click.echo("Smoothing landmarks...", err=True)
|
|
233
284
|
smoothed_landmarks = smooth_landmarks(
|
|
234
|
-
landmarks_sequence,
|
|
285
|
+
landmarks_sequence,
|
|
286
|
+
window_length=params.smoothing_window,
|
|
287
|
+
polyorder=params.polyorder,
|
|
235
288
|
)
|
|
236
289
|
|
|
237
290
|
# Extract vertical positions from feet
|
|
@@ -261,42 +314,40 @@ def dropjump_analyze(
|
|
|
261
314
|
)
|
|
262
315
|
else:
|
|
263
316
|
# Use previous position if available, otherwise default
|
|
264
|
-
position_list.append(
|
|
265
|
-
position_list[-1] if position_list else 0.5
|
|
266
|
-
)
|
|
317
|
+
position_list.append(position_list[-1] if position_list else 0.5)
|
|
267
318
|
visibilities_list.append(0.0)
|
|
268
319
|
|
|
269
320
|
vertical_positions: np.ndarray = np.array(position_list)
|
|
270
321
|
visibilities: np.ndarray = np.array(visibilities_list)
|
|
271
322
|
|
|
272
|
-
# Detect ground contact
|
|
323
|
+
# Detect ground contact using auto-tuned parameters
|
|
273
324
|
contact_states = detect_ground_contact(
|
|
274
325
|
vertical_positions,
|
|
275
|
-
velocity_threshold=velocity_threshold,
|
|
276
|
-
min_contact_frames=min_contact_frames,
|
|
277
|
-
visibility_threshold=visibility_threshold,
|
|
326
|
+
velocity_threshold=params.velocity_threshold,
|
|
327
|
+
min_contact_frames=params.min_contact_frames,
|
|
328
|
+
visibility_threshold=params.visibility_threshold,
|
|
278
329
|
visibilities=visibilities,
|
|
279
|
-
window_length=smoothing_window,
|
|
280
|
-
polyorder=polyorder,
|
|
330
|
+
window_length=params.smoothing_window,
|
|
331
|
+
polyorder=params.polyorder,
|
|
281
332
|
)
|
|
282
333
|
|
|
283
334
|
# Calculate metrics
|
|
284
335
|
click.echo("Calculating metrics...", err=True)
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
)
|
|
336
|
+
click.echo(
|
|
337
|
+
f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
|
|
338
|
+
err=True,
|
|
339
|
+
)
|
|
290
340
|
metrics = calculate_drop_jump_metrics(
|
|
291
341
|
contact_states,
|
|
292
342
|
vertical_positions,
|
|
293
343
|
video.fps,
|
|
294
344
|
drop_height_m=drop_height,
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
345
|
+
drop_start_frame=drop_start_frame,
|
|
346
|
+
velocity_threshold=params.velocity_threshold,
|
|
347
|
+
smoothing_window=params.smoothing_window,
|
|
348
|
+
polyorder=params.polyorder,
|
|
349
|
+
use_curvature=params.use_curvature,
|
|
350
|
+
kinematic_correction_factor=1.0, # Always 1.0 now (no experimental correction)
|
|
300
351
|
)
|
|
301
352
|
|
|
302
353
|
# Output metrics as JSON
|
|
@@ -313,7 +364,10 @@ def dropjump_analyze(
|
|
|
313
364
|
# Generate debug video if requested
|
|
314
365
|
if output:
|
|
315
366
|
click.echo(f"Generating debug video: {output}", err=True)
|
|
316
|
-
if
|
|
367
|
+
if (
|
|
368
|
+
video.display_width != video.width
|
|
369
|
+
or video.display_height != video.height
|
|
370
|
+
):
|
|
317
371
|
click.echo(
|
|
318
372
|
f"Source video encoded: {video.width}x{video.height}",
|
|
319
373
|
err=True,
|
|
@@ -337,9 +391,10 @@ def dropjump_analyze(
|
|
|
337
391
|
video.display_height,
|
|
338
392
|
video.fps,
|
|
339
393
|
) as renderer:
|
|
394
|
+
render_bar: Any
|
|
340
395
|
with click.progressbar(
|
|
341
396
|
length=len(frames), label="Rendering frames"
|
|
342
|
-
) as
|
|
397
|
+
) as render_bar:
|
|
343
398
|
for i, frame in enumerate(frames):
|
|
344
399
|
annotated = renderer.render_frame(
|
|
345
400
|
frame,
|
|
@@ -350,7 +405,7 @@ def dropjump_analyze(
|
|
|
350
405
|
use_com=False,
|
|
351
406
|
)
|
|
352
407
|
renderer.write_frame(annotated)
|
|
353
|
-
|
|
408
|
+
render_bar.update(1)
|
|
354
409
|
|
|
355
410
|
click.echo(f"Debug video saved: {output}", err=True)
|
|
356
411
|
|
|
@@ -38,7 +38,7 @@ class DebugOverlayRenderer:
|
|
|
38
38
|
self.needs_resize = (display_width != width) or (display_height != height)
|
|
39
39
|
|
|
40
40
|
# Try H.264 codec first (better quality/compatibility), fallback to mp4v
|
|
41
|
-
fourcc = cv2.VideoWriter_fourcc(*"avc1")
|
|
41
|
+
fourcc = cv2.VideoWriter_fourcc(*"avc1")
|
|
42
42
|
# IMPORTANT: cv2.VideoWriter expects (width, height) tuple - NOT (height, width)
|
|
43
43
|
# Write at display dimensions so video displays correctly without SAR metadata
|
|
44
44
|
self.writer = cv2.VideoWriter(
|
|
@@ -47,7 +47,7 @@ class DebugOverlayRenderer:
|
|
|
47
47
|
|
|
48
48
|
# Check if writer opened successfully, fallback to mp4v if not
|
|
49
49
|
if not self.writer.isOpened():
|
|
50
|
-
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
|
50
|
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
|
51
51
|
self.writer = cv2.VideoWriter(
|
|
52
52
|
output_path, fourcc, fps, (display_width, display_height)
|
|
53
53
|
)
|
|
@@ -93,7 +93,9 @@ class DebugOverlayRenderer:
|
|
|
93
93
|
|
|
94
94
|
# Draw CoM with larger circle
|
|
95
95
|
color = (
|
|
96
|
-
(0, 255, 0)
|
|
96
|
+
(0, 255, 0)
|
|
97
|
+
if contact_state == ContactState.ON_GROUND
|
|
98
|
+
else (0, 0, 255)
|
|
97
99
|
)
|
|
98
100
|
cv2.circle(annotated, (px, py), 15, color, -1)
|
|
99
101
|
cv2.circle(annotated, (px, py), 17, (255, 255, 255), 2) # White border
|
|
@@ -105,7 +107,9 @@ class DebugOverlayRenderer:
|
|
|
105
107
|
rh_x, rh_y, _ = landmarks["right_hip"]
|
|
106
108
|
hip_x = int((lh_x + rh_x) / 2 * self.width)
|
|
107
109
|
hip_y = int((lh_y + rh_y) / 2 * self.height)
|
|
108
|
-
cv2.circle(
|
|
110
|
+
cv2.circle(
|
|
111
|
+
annotated, (hip_x, hip_y), 8, (255, 165, 0), -1
|
|
112
|
+
) # Orange
|
|
109
113
|
# Draw line from hip to CoM
|
|
110
114
|
cv2.line(annotated, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
|
|
111
115
|
else:
|
|
@@ -116,7 +120,9 @@ class DebugOverlayRenderer:
|
|
|
116
120
|
|
|
117
121
|
# Draw foot position circle
|
|
118
122
|
color = (
|
|
119
|
-
(0, 255, 0)
|
|
123
|
+
(0, 255, 0)
|
|
124
|
+
if contact_state == ContactState.ON_GROUND
|
|
125
|
+
else (0, 0, 255)
|
|
120
126
|
)
|
|
121
127
|
cv2.circle(annotated, (px, py), 10, color, -1)
|
|
122
128
|
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
"""Kinematic calculations for drop-jump metrics."""
|
|
2
2
|
|
|
3
|
-
|
|
4
3
|
import numpy as np
|
|
5
4
|
|
|
6
5
|
from .analysis import (
|
|
7
6
|
ContactState,
|
|
7
|
+
detect_drop_start,
|
|
8
8
|
find_contact_phases,
|
|
9
9
|
find_interpolated_phase_transitions_with_curvature,
|
|
10
10
|
)
|
|
@@ -109,6 +109,7 @@ def calculate_drop_jump_metrics(
|
|
|
109
109
|
foot_y_positions: np.ndarray,
|
|
110
110
|
fps: float,
|
|
111
111
|
drop_height_m: float | None = None,
|
|
112
|
+
drop_start_frame: int | None = None,
|
|
112
113
|
velocity_threshold: float = 0.02,
|
|
113
114
|
smoothing_window: int = 5,
|
|
114
115
|
polyorder: int = 2,
|
|
@@ -135,6 +136,20 @@ def calculate_drop_jump_metrics(
|
|
|
135
136
|
DropJumpMetrics object with calculated values
|
|
136
137
|
"""
|
|
137
138
|
metrics = DropJumpMetrics()
|
|
139
|
+
|
|
140
|
+
# Detect or use manually specified drop jump start frame
|
|
141
|
+
if drop_start_frame is None:
|
|
142
|
+
# Auto-detect where drop jump actually starts (skip initial stationary period)
|
|
143
|
+
drop_start_frame = detect_drop_start(
|
|
144
|
+
foot_y_positions,
|
|
145
|
+
fps,
|
|
146
|
+
min_stationary_duration=0.5, # 0.5s stable period (~30 frames @ 60fps)
|
|
147
|
+
position_change_threshold=0.005, # 0.5% of frame height - sensitive to drop start
|
|
148
|
+
smoothing_window=smoothing_window,
|
|
149
|
+
)
|
|
150
|
+
# If manually specified or auto-detected, use it
|
|
151
|
+
drop_start_frame_value = drop_start_frame if drop_start_frame is not None else 0
|
|
152
|
+
|
|
138
153
|
phases = find_contact_phases(contact_states)
|
|
139
154
|
|
|
140
155
|
# Get interpolated phases with curvature-based refinement
|
|
@@ -148,6 +163,23 @@ def calculate_drop_jump_metrics(
|
|
|
148
163
|
use_curvature,
|
|
149
164
|
)
|
|
150
165
|
|
|
166
|
+
if not phases:
|
|
167
|
+
return metrics
|
|
168
|
+
|
|
169
|
+
# Filter phases to only include those after drop start
|
|
170
|
+
# This removes the initial stationary period where athlete is standing on box
|
|
171
|
+
if drop_start_frame_value > 0:
|
|
172
|
+
phases = [
|
|
173
|
+
(start, end, state)
|
|
174
|
+
for start, end, state in phases
|
|
175
|
+
if end >= drop_start_frame_value
|
|
176
|
+
]
|
|
177
|
+
interpolated_phases = [
|
|
178
|
+
(start, end, state)
|
|
179
|
+
for start, end, state in interpolated_phases
|
|
180
|
+
if end >= drop_start_frame_value
|
|
181
|
+
]
|
|
182
|
+
|
|
151
183
|
if not phases:
|
|
152
184
|
return metrics
|
|
153
185
|
|
|
@@ -177,7 +209,9 @@ def calculate_drop_jump_metrics(
|
|
|
177
209
|
|
|
178
210
|
# Find ground phase after first air phase
|
|
179
211
|
ground_after_air = [
|
|
180
|
-
(start, end, idx)
|
|
212
|
+
(start, end, idx)
|
|
213
|
+
for start, end, idx in ground_phases
|
|
214
|
+
if idx > first_air_idx
|
|
181
215
|
]
|
|
182
216
|
|
|
183
217
|
if ground_after_air and first_ground_idx < first_air_idx:
|
|
@@ -241,7 +275,9 @@ def calculate_drop_jump_metrics(
|
|
|
241
275
|
# Look back a few frames to get stable position on box
|
|
242
276
|
lookback_start = max(0, first_air_start - 5)
|
|
243
277
|
if lookback_start < first_air_start:
|
|
244
|
-
initial_position = float(
|
|
278
|
+
initial_position = float(
|
|
279
|
+
np.mean(foot_y_positions[lookback_start:first_air_start])
|
|
280
|
+
)
|
|
245
281
|
else:
|
|
246
282
|
initial_position = float(foot_y_positions[first_air_start])
|
|
247
283
|
|
|
@@ -337,13 +373,17 @@ def calculate_drop_jump_metrics(
|
|
|
337
373
|
# For validated measurements, use:
|
|
338
374
|
# - Calibrated measurement with --drop-height parameter
|
|
339
375
|
# - Or compare against validated measurement systems
|
|
340
|
-
metrics.jump_height =
|
|
376
|
+
metrics.jump_height = (
|
|
377
|
+
jump_height_kinematic * kinematic_correction_factor
|
|
378
|
+
)
|
|
341
379
|
metrics.jump_height_kinematic = jump_height_kinematic
|
|
342
380
|
else:
|
|
343
381
|
# Fallback to kinematic if no position data
|
|
344
382
|
if drop_height_m is None:
|
|
345
383
|
# Apply kinematic correction factor (see detailed comment above)
|
|
346
|
-
metrics.jump_height =
|
|
384
|
+
metrics.jump_height = (
|
|
385
|
+
jump_height_kinematic * kinematic_correction_factor
|
|
386
|
+
)
|
|
347
387
|
else:
|
|
348
388
|
metrics.jump_height = jump_height_kinematic
|
|
349
389
|
metrics.jump_height_kinematic = jump_height_kinematic
|