kinemotion 0.10.3__py3-none-any.whl → 0.10.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinemotion/api.py +330 -154
- kinemotion/dropjump/kinematics.py +318 -206
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.5.dist-info}/METADATA +1 -1
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.5.dist-info}/RECORD +7 -7
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.5.dist-info}/WHEEL +0 -0
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.5.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.5.dist-info}/licenses/LICENSE +0 -0
kinemotion/api.py
CHANGED
|
@@ -9,18 +9,315 @@ from pathlib import Path
|
|
|
9
9
|
import numpy as np
|
|
10
10
|
|
|
11
11
|
from .core.auto_tuning import (
|
|
12
|
+
AnalysisParameters,
|
|
12
13
|
QualityPreset,
|
|
14
|
+
VideoCharacteristics,
|
|
13
15
|
analyze_video_sample,
|
|
14
16
|
auto_tune_parameters,
|
|
15
17
|
)
|
|
16
18
|
from .core.pose import PoseTracker
|
|
17
19
|
from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
18
20
|
from .core.video_io import VideoProcessor
|
|
19
|
-
from .dropjump.analysis import
|
|
21
|
+
from .dropjump.analysis import (
|
|
22
|
+
ContactState,
|
|
23
|
+
compute_average_foot_position,
|
|
24
|
+
detect_ground_contact,
|
|
25
|
+
)
|
|
20
26
|
from .dropjump.debug_overlay import DebugOverlayRenderer
|
|
21
27
|
from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
|
|
22
28
|
|
|
23
29
|
|
|
30
|
+
def _parse_quality_preset(quality: str) -> QualityPreset:
|
|
31
|
+
"""Parse and validate quality preset string.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
quality: Quality preset string ('fast', 'balanced', or 'accurate')
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
QualityPreset enum value
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
ValueError: If quality preset is invalid
|
|
41
|
+
"""
|
|
42
|
+
try:
|
|
43
|
+
return QualityPreset(quality.lower())
|
|
44
|
+
except ValueError as e:
|
|
45
|
+
raise ValueError(
|
|
46
|
+
f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
|
|
47
|
+
) from e
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _determine_confidence_levels(
|
|
51
|
+
quality_preset: QualityPreset,
|
|
52
|
+
detection_confidence: float | None,
|
|
53
|
+
tracking_confidence: float | None,
|
|
54
|
+
) -> tuple[float, float]:
|
|
55
|
+
"""Determine detection and tracking confidence levels.
|
|
56
|
+
|
|
57
|
+
Confidence levels are set based on quality preset and can be overridden
|
|
58
|
+
by expert parameters.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
quality_preset: Quality preset enum
|
|
62
|
+
detection_confidence: Optional expert override for detection confidence
|
|
63
|
+
tracking_confidence: Optional expert override for tracking confidence
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Tuple of (detection_confidence, tracking_confidence)
|
|
67
|
+
"""
|
|
68
|
+
# Set initial confidence from quality preset
|
|
69
|
+
initial_detection_conf = 0.5
|
|
70
|
+
initial_tracking_conf = 0.5
|
|
71
|
+
|
|
72
|
+
if quality_preset == QualityPreset.FAST:
|
|
73
|
+
initial_detection_conf = 0.3
|
|
74
|
+
initial_tracking_conf = 0.3
|
|
75
|
+
elif quality_preset == QualityPreset.ACCURATE:
|
|
76
|
+
initial_detection_conf = 0.6
|
|
77
|
+
initial_tracking_conf = 0.6
|
|
78
|
+
|
|
79
|
+
# Override with expert values if provided
|
|
80
|
+
if detection_confidence is not None:
|
|
81
|
+
initial_detection_conf = detection_confidence
|
|
82
|
+
if tracking_confidence is not None:
|
|
83
|
+
initial_tracking_conf = tracking_confidence
|
|
84
|
+
|
|
85
|
+
return initial_detection_conf, initial_tracking_conf
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _apply_expert_overrides(
|
|
89
|
+
params: AnalysisParameters,
|
|
90
|
+
smoothing_window: int | None,
|
|
91
|
+
velocity_threshold: float | None,
|
|
92
|
+
min_contact_frames: int | None,
|
|
93
|
+
visibility_threshold: float | None,
|
|
94
|
+
) -> AnalysisParameters:
|
|
95
|
+
"""Apply expert parameter overrides to auto-tuned parameters.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
params: Auto-tuned parameters object
|
|
99
|
+
smoothing_window: Optional override for smoothing window
|
|
100
|
+
velocity_threshold: Optional override for velocity threshold
|
|
101
|
+
min_contact_frames: Optional override for minimum contact frames
|
|
102
|
+
visibility_threshold: Optional override for visibility threshold
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Modified params object (mutated in place)
|
|
106
|
+
"""
|
|
107
|
+
if smoothing_window is not None:
|
|
108
|
+
params.smoothing_window = smoothing_window
|
|
109
|
+
if velocity_threshold is not None:
|
|
110
|
+
params.velocity_threshold = velocity_threshold
|
|
111
|
+
if min_contact_frames is not None:
|
|
112
|
+
params.min_contact_frames = min_contact_frames
|
|
113
|
+
if visibility_threshold is not None:
|
|
114
|
+
params.visibility_threshold = visibility_threshold
|
|
115
|
+
return params
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _print_verbose_parameters(
|
|
119
|
+
video: VideoProcessor,
|
|
120
|
+
characteristics: VideoCharacteristics,
|
|
121
|
+
quality_preset: QualityPreset,
|
|
122
|
+
params: AnalysisParameters,
|
|
123
|
+
) -> None:
|
|
124
|
+
"""Print auto-tuned parameters in verbose mode.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
video: Video processor with fps and dimensions
|
|
128
|
+
characteristics: Video analysis characteristics
|
|
129
|
+
quality_preset: Selected quality preset
|
|
130
|
+
params: Auto-tuned parameters
|
|
131
|
+
"""
|
|
132
|
+
print("\n" + "=" * 60)
|
|
133
|
+
print("AUTO-TUNED PARAMETERS")
|
|
134
|
+
print("=" * 60)
|
|
135
|
+
print(f"Video FPS: {video.fps:.2f}")
|
|
136
|
+
print(
|
|
137
|
+
f"Tracking quality: {characteristics.tracking_quality} "
|
|
138
|
+
f"(avg visibility: {characteristics.avg_visibility:.2f})"
|
|
139
|
+
)
|
|
140
|
+
print(f"Quality preset: {quality_preset.value}")
|
|
141
|
+
print("\nSelected parameters:")
|
|
142
|
+
print(f" smoothing_window: {params.smoothing_window}")
|
|
143
|
+
print(f" polyorder: {params.polyorder}")
|
|
144
|
+
print(f" velocity_threshold: {params.velocity_threshold:.4f}")
|
|
145
|
+
print(f" min_contact_frames: {params.min_contact_frames}")
|
|
146
|
+
print(f" visibility_threshold: {params.visibility_threshold}")
|
|
147
|
+
print(f" detection_confidence: {params.detection_confidence}")
|
|
148
|
+
print(f" tracking_confidence: {params.tracking_confidence}")
|
|
149
|
+
print(f" outlier_rejection: {params.outlier_rejection}")
|
|
150
|
+
print(f" bilateral_filter: {params.bilateral_filter}")
|
|
151
|
+
print(f" use_curvature: {params.use_curvature}")
|
|
152
|
+
print("=" * 60 + "\n")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _process_all_frames(
|
|
156
|
+
video: VideoProcessor, tracker: PoseTracker, verbose: bool
|
|
157
|
+
) -> tuple[list, list]:
|
|
158
|
+
"""Process all frames from video and extract pose landmarks.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
video: Video processor to read frames from
|
|
162
|
+
tracker: Pose tracker for landmark detection
|
|
163
|
+
verbose: Print progress messages
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Tuple of (frames, landmarks_sequence)
|
|
167
|
+
|
|
168
|
+
Raises:
|
|
169
|
+
ValueError: If no frames could be processed
|
|
170
|
+
"""
|
|
171
|
+
if verbose:
|
|
172
|
+
print("Tracking pose landmarks...")
|
|
173
|
+
|
|
174
|
+
landmarks_sequence = []
|
|
175
|
+
frames = []
|
|
176
|
+
|
|
177
|
+
while True:
|
|
178
|
+
frame = video.read_frame()
|
|
179
|
+
if frame is None:
|
|
180
|
+
break
|
|
181
|
+
|
|
182
|
+
frames.append(frame)
|
|
183
|
+
landmarks = tracker.process_frame(frame)
|
|
184
|
+
landmarks_sequence.append(landmarks)
|
|
185
|
+
|
|
186
|
+
tracker.close()
|
|
187
|
+
|
|
188
|
+
if not landmarks_sequence:
|
|
189
|
+
raise ValueError("No frames could be processed from video")
|
|
190
|
+
|
|
191
|
+
return frames, landmarks_sequence
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _apply_smoothing(
|
|
195
|
+
landmarks_sequence: list, params: AnalysisParameters, verbose: bool
|
|
196
|
+
) -> list:
|
|
197
|
+
"""Apply smoothing to landmark sequence with auto-tuned parameters.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
landmarks_sequence: Sequence of landmarks from all frames
|
|
201
|
+
params: Auto-tuned parameters containing smoothing settings
|
|
202
|
+
verbose: Print progress messages
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Smoothed landmarks sequence
|
|
206
|
+
"""
|
|
207
|
+
if params.outlier_rejection or params.bilateral_filter:
|
|
208
|
+
if verbose:
|
|
209
|
+
if params.outlier_rejection:
|
|
210
|
+
print("Smoothing landmarks with outlier rejection...")
|
|
211
|
+
if params.bilateral_filter:
|
|
212
|
+
print("Using bilateral temporal filter...")
|
|
213
|
+
return smooth_landmarks_advanced(
|
|
214
|
+
landmarks_sequence,
|
|
215
|
+
window_length=params.smoothing_window,
|
|
216
|
+
polyorder=params.polyorder,
|
|
217
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
218
|
+
use_bilateral=params.bilateral_filter,
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
if verbose:
|
|
222
|
+
print("Smoothing landmarks...")
|
|
223
|
+
return smooth_landmarks(
|
|
224
|
+
landmarks_sequence,
|
|
225
|
+
window_length=params.smoothing_window,
|
|
226
|
+
polyorder=params.polyorder,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _extract_vertical_positions(
|
|
231
|
+
smoothed_landmarks: list,
|
|
232
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
233
|
+
"""Extract vertical foot positions and visibilities from smoothed landmarks.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
smoothed_landmarks: Smoothed landmark sequence
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
Tuple of (vertical_positions, visibilities) as numpy arrays
|
|
240
|
+
"""
|
|
241
|
+
position_list: list[float] = []
|
|
242
|
+
visibilities_list: list[float] = []
|
|
243
|
+
|
|
244
|
+
for frame_landmarks in smoothed_landmarks:
|
|
245
|
+
if frame_landmarks:
|
|
246
|
+
_, foot_y = compute_average_foot_position(frame_landmarks)
|
|
247
|
+
position_list.append(foot_y)
|
|
248
|
+
|
|
249
|
+
# Average visibility of foot landmarks
|
|
250
|
+
foot_vis = []
|
|
251
|
+
for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
|
|
252
|
+
if key in frame_landmarks:
|
|
253
|
+
foot_vis.append(frame_landmarks[key][2])
|
|
254
|
+
visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
|
|
255
|
+
else:
|
|
256
|
+
position_list.append(position_list[-1] if position_list else 0.5)
|
|
257
|
+
visibilities_list.append(0.0)
|
|
258
|
+
|
|
259
|
+
return np.array(position_list), np.array(visibilities_list)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def _generate_outputs(
|
|
263
|
+
metrics: DropJumpMetrics,
|
|
264
|
+
json_output: str | None,
|
|
265
|
+
output_video: str | None,
|
|
266
|
+
frames: list,
|
|
267
|
+
smoothed_landmarks: list,
|
|
268
|
+
contact_states: list[ContactState],
|
|
269
|
+
video: VideoProcessor,
|
|
270
|
+
verbose: bool,
|
|
271
|
+
) -> None:
|
|
272
|
+
"""Generate JSON and debug video outputs if requested.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
metrics: Calculated drop jump metrics
|
|
276
|
+
json_output: Optional path for JSON output
|
|
277
|
+
output_video: Optional path for debug video
|
|
278
|
+
frames: List of video frames
|
|
279
|
+
smoothed_landmarks: Smoothed landmark sequence
|
|
280
|
+
contact_states: Ground contact state for each frame
|
|
281
|
+
video: Video processor with dimensions and fps
|
|
282
|
+
verbose: Print progress messages
|
|
283
|
+
"""
|
|
284
|
+
# Save JSON if requested
|
|
285
|
+
if json_output:
|
|
286
|
+
import json
|
|
287
|
+
|
|
288
|
+
output_path = Path(json_output)
|
|
289
|
+
output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
|
|
290
|
+
if verbose:
|
|
291
|
+
print(f"Metrics written to: {json_output}")
|
|
292
|
+
|
|
293
|
+
# Generate debug video if requested
|
|
294
|
+
if output_video:
|
|
295
|
+
if verbose:
|
|
296
|
+
print(f"Generating debug video: {output_video}")
|
|
297
|
+
|
|
298
|
+
with DebugOverlayRenderer(
|
|
299
|
+
output_video,
|
|
300
|
+
video.width,
|
|
301
|
+
video.height,
|
|
302
|
+
video.display_width,
|
|
303
|
+
video.display_height,
|
|
304
|
+
video.fps,
|
|
305
|
+
) as renderer:
|
|
306
|
+
for i, frame in enumerate(frames):
|
|
307
|
+
annotated = renderer.render_frame(
|
|
308
|
+
frame,
|
|
309
|
+
smoothed_landmarks[i],
|
|
310
|
+
contact_states[i],
|
|
311
|
+
i,
|
|
312
|
+
metrics,
|
|
313
|
+
use_com=False,
|
|
314
|
+
)
|
|
315
|
+
renderer.write_frame(annotated)
|
|
316
|
+
|
|
317
|
+
if verbose:
|
|
318
|
+
print(f"Debug video saved: {output_video}")
|
|
319
|
+
|
|
320
|
+
|
|
24
321
|
@dataclass
|
|
25
322
|
class VideoResult:
|
|
26
323
|
"""Result of processing a single video."""
|
|
@@ -94,12 +391,7 @@ def process_video(
|
|
|
94
391
|
raise FileNotFoundError(f"Video file not found: {video_path}")
|
|
95
392
|
|
|
96
393
|
# Convert quality string to enum
|
|
97
|
-
|
|
98
|
-
quality_preset = QualityPreset(quality.lower())
|
|
99
|
-
except ValueError as e:
|
|
100
|
-
raise ValueError(
|
|
101
|
-
f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
|
|
102
|
-
) from e
|
|
394
|
+
quality_preset = _parse_quality_preset(quality)
|
|
103
395
|
|
|
104
396
|
# Initialize video processor
|
|
105
397
|
with VideoProcessor(video_path) as video:
|
|
@@ -109,138 +401,46 @@ def process_video(
|
|
|
109
401
|
f"{video.frame_count} frames"
|
|
110
402
|
)
|
|
111
403
|
|
|
112
|
-
# Determine
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
if quality_preset == QualityPreset.FAST:
|
|
117
|
-
initial_detection_conf = 0.3
|
|
118
|
-
initial_tracking_conf = 0.3
|
|
119
|
-
elif quality_preset == QualityPreset.ACCURATE:
|
|
120
|
-
initial_detection_conf = 0.6
|
|
121
|
-
initial_tracking_conf = 0.6
|
|
122
|
-
|
|
123
|
-
# Override with expert values if provided
|
|
124
|
-
if detection_confidence is not None:
|
|
125
|
-
initial_detection_conf = detection_confidence
|
|
126
|
-
if tracking_confidence is not None:
|
|
127
|
-
initial_tracking_conf = tracking_confidence
|
|
404
|
+
# Determine detection/tracking confidence levels
|
|
405
|
+
detection_conf, tracking_conf = _determine_confidence_levels(
|
|
406
|
+
quality_preset, detection_confidence, tracking_confidence
|
|
407
|
+
)
|
|
128
408
|
|
|
129
|
-
#
|
|
409
|
+
# Process all frames with pose tracking
|
|
130
410
|
tracker = PoseTracker(
|
|
131
|
-
min_detection_confidence=
|
|
132
|
-
min_tracking_confidence=
|
|
411
|
+
min_detection_confidence=detection_conf,
|
|
412
|
+
min_tracking_confidence=tracking_conf,
|
|
133
413
|
)
|
|
134
|
-
|
|
135
|
-
# Process all frames
|
|
136
|
-
if verbose:
|
|
137
|
-
print("Tracking pose landmarks...")
|
|
138
|
-
|
|
139
|
-
landmarks_sequence = []
|
|
140
|
-
frames = []
|
|
141
|
-
|
|
142
|
-
while True:
|
|
143
|
-
frame = video.read_frame()
|
|
144
|
-
if frame is None:
|
|
145
|
-
break
|
|
146
|
-
|
|
147
|
-
frames.append(frame)
|
|
148
|
-
landmarks = tracker.process_frame(frame)
|
|
149
|
-
landmarks_sequence.append(landmarks)
|
|
150
|
-
|
|
151
|
-
tracker.close()
|
|
152
|
-
|
|
153
|
-
if not landmarks_sequence:
|
|
154
|
-
raise ValueError("No frames could be processed from video")
|
|
414
|
+
frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
|
|
155
415
|
|
|
156
416
|
# Analyze video characteristics and auto-tune parameters
|
|
157
417
|
characteristics = analyze_video_sample(
|
|
158
418
|
landmarks_sequence, video.fps, video.frame_count
|
|
159
419
|
)
|
|
160
|
-
|
|
161
420
|
params = auto_tune_parameters(characteristics, quality_preset)
|
|
162
421
|
|
|
163
422
|
# Apply expert overrides if provided
|
|
164
|
-
|
|
165
|
-
params
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
params.visibility_threshold = visibility_threshold
|
|
423
|
+
params = _apply_expert_overrides(
|
|
424
|
+
params,
|
|
425
|
+
smoothing_window,
|
|
426
|
+
velocity_threshold,
|
|
427
|
+
min_contact_frames,
|
|
428
|
+
visibility_threshold,
|
|
429
|
+
)
|
|
172
430
|
|
|
173
431
|
# Show selected parameters if verbose
|
|
174
432
|
if verbose:
|
|
175
|
-
|
|
176
|
-
print("AUTO-TUNED PARAMETERS")
|
|
177
|
-
print("=" * 60)
|
|
178
|
-
print(f"Video FPS: {video.fps:.2f}")
|
|
179
|
-
print(
|
|
180
|
-
f"Tracking quality: {characteristics.tracking_quality} "
|
|
181
|
-
f"(avg visibility: {characteristics.avg_visibility:.2f})"
|
|
182
|
-
)
|
|
183
|
-
print(f"Quality preset: {quality_preset.value}")
|
|
184
|
-
print("\nSelected parameters:")
|
|
185
|
-
print(f" smoothing_window: {params.smoothing_window}")
|
|
186
|
-
print(f" polyorder: {params.polyorder}")
|
|
187
|
-
print(f" velocity_threshold: {params.velocity_threshold:.4f}")
|
|
188
|
-
print(f" min_contact_frames: {params.min_contact_frames}")
|
|
189
|
-
print(f" visibility_threshold: {params.visibility_threshold}")
|
|
190
|
-
print(f" detection_confidence: {params.detection_confidence}")
|
|
191
|
-
print(f" tracking_confidence: {params.tracking_confidence}")
|
|
192
|
-
print(f" outlier_rejection: {params.outlier_rejection}")
|
|
193
|
-
print(f" bilateral_filter: {params.bilateral_filter}")
|
|
194
|
-
print(f" use_curvature: {params.use_curvature}")
|
|
195
|
-
print("=" * 60 + "\n")
|
|
433
|
+
_print_verbose_parameters(video, characteristics, quality_preset, params)
|
|
196
434
|
|
|
197
435
|
# Apply smoothing with auto-tuned parameters
|
|
198
|
-
|
|
199
|
-
if verbose:
|
|
200
|
-
if params.outlier_rejection:
|
|
201
|
-
print("Smoothing landmarks with outlier rejection...")
|
|
202
|
-
if params.bilateral_filter:
|
|
203
|
-
print("Using bilateral temporal filter...")
|
|
204
|
-
smoothed_landmarks = smooth_landmarks_advanced(
|
|
205
|
-
landmarks_sequence,
|
|
206
|
-
window_length=params.smoothing_window,
|
|
207
|
-
polyorder=params.polyorder,
|
|
208
|
-
use_outlier_rejection=params.outlier_rejection,
|
|
209
|
-
use_bilateral=params.bilateral_filter,
|
|
210
|
-
)
|
|
211
|
-
else:
|
|
212
|
-
if verbose:
|
|
213
|
-
print("Smoothing landmarks...")
|
|
214
|
-
smoothed_landmarks = smooth_landmarks(
|
|
215
|
-
landmarks_sequence,
|
|
216
|
-
window_length=params.smoothing_window,
|
|
217
|
-
polyorder=params.polyorder,
|
|
218
|
-
)
|
|
436
|
+
smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
|
|
219
437
|
|
|
220
438
|
# Extract vertical positions from feet
|
|
221
439
|
if verbose:
|
|
222
440
|
print("Extracting foot positions...")
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
for frame_landmarks in smoothed_landmarks:
|
|
228
|
-
if frame_landmarks:
|
|
229
|
-
_, foot_y = compute_average_foot_position(frame_landmarks)
|
|
230
|
-
position_list.append(foot_y)
|
|
231
|
-
|
|
232
|
-
# Average visibility of foot landmarks
|
|
233
|
-
foot_vis = []
|
|
234
|
-
for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
|
|
235
|
-
if key in frame_landmarks:
|
|
236
|
-
foot_vis.append(frame_landmarks[key][2])
|
|
237
|
-
visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
|
|
238
|
-
else:
|
|
239
|
-
position_list.append(position_list[-1] if position_list else 0.5)
|
|
240
|
-
visibilities_list.append(0.0)
|
|
241
|
-
|
|
242
|
-
vertical_positions: np.ndarray = np.array(position_list)
|
|
243
|
-
visibilities: np.ndarray = np.array(visibilities_list)
|
|
441
|
+
vertical_positions, visibilities = _extract_vertical_positions(
|
|
442
|
+
smoothed_landmarks
|
|
443
|
+
)
|
|
244
444
|
|
|
245
445
|
# Detect ground contact
|
|
246
446
|
contact_states = detect_ground_contact(
|
|
@@ -273,41 +473,17 @@ def process_video(
|
|
|
273
473
|
kinematic_correction_factor=1.0,
|
|
274
474
|
)
|
|
275
475
|
|
|
276
|
-
#
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
if verbose:
|
|
288
|
-
print(f"Generating debug video: {output_video}")
|
|
289
|
-
|
|
290
|
-
with DebugOverlayRenderer(
|
|
291
|
-
output_video,
|
|
292
|
-
video.width,
|
|
293
|
-
video.height,
|
|
294
|
-
video.display_width,
|
|
295
|
-
video.display_height,
|
|
296
|
-
video.fps,
|
|
297
|
-
) as renderer:
|
|
298
|
-
for i, frame in enumerate(frames):
|
|
299
|
-
annotated = renderer.render_frame(
|
|
300
|
-
frame,
|
|
301
|
-
smoothed_landmarks[i],
|
|
302
|
-
contact_states[i],
|
|
303
|
-
i,
|
|
304
|
-
metrics,
|
|
305
|
-
use_com=False,
|
|
306
|
-
)
|
|
307
|
-
renderer.write_frame(annotated)
|
|
308
|
-
|
|
309
|
-
if verbose:
|
|
310
|
-
print(f"Debug video saved: {output_video}")
|
|
476
|
+
# Generate outputs (JSON and debug video)
|
|
477
|
+
_generate_outputs(
|
|
478
|
+
metrics,
|
|
479
|
+
json_output,
|
|
480
|
+
output_video,
|
|
481
|
+
frames,
|
|
482
|
+
smoothed_landmarks,
|
|
483
|
+
contact_states,
|
|
484
|
+
video,
|
|
485
|
+
verbose,
|
|
486
|
+
)
|
|
311
487
|
|
|
312
488
|
if verbose:
|
|
313
489
|
print("Analysis complete!")
|
|
@@ -104,113 +104,89 @@ class DropJumpMetrics:
|
|
|
104
104
|
}
|
|
105
105
|
|
|
106
106
|
|
|
107
|
-
def
|
|
108
|
-
|
|
107
|
+
def _determine_drop_start_frame(
|
|
108
|
+
drop_start_frame: int | None,
|
|
109
109
|
foot_y_positions: np.ndarray,
|
|
110
110
|
fps: float,
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
smoothing_window: int = 5,
|
|
115
|
-
polyorder: int = 2,
|
|
116
|
-
use_curvature: bool = True,
|
|
117
|
-
kinematic_correction_factor: float = 1.0,
|
|
118
|
-
) -> DropJumpMetrics:
|
|
119
|
-
"""
|
|
120
|
-
Calculate drop-jump metrics from contact states and positions.
|
|
111
|
+
smoothing_window: int,
|
|
112
|
+
) -> int:
|
|
113
|
+
"""Determine the drop start frame for analysis.
|
|
121
114
|
|
|
122
115
|
Args:
|
|
123
|
-
|
|
124
|
-
foot_y_positions: Vertical positions
|
|
116
|
+
drop_start_frame: Manual drop start frame or None for auto-detection
|
|
117
|
+
foot_y_positions: Vertical positions array
|
|
125
118
|
fps: Video frame rate
|
|
126
|
-
|
|
127
|
-
velocity_threshold: Velocity threshold used for contact detection (for interpolation)
|
|
128
|
-
smoothing_window: Window size for velocity/acceleration smoothing (must be odd)
|
|
129
|
-
polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
|
|
130
|
-
use_curvature: Whether to use curvature analysis for refining transitions
|
|
131
|
-
kinematic_correction_factor: Correction factor for kinematic jump height calculation
|
|
132
|
-
(default: 1.0 = no correction). Historical testing suggested 1.35, but this is
|
|
133
|
-
unvalidated. Use calibrated measurement (--drop-height) for validated results.
|
|
119
|
+
smoothing_window: Smoothing window size
|
|
134
120
|
|
|
135
121
|
Returns:
|
|
136
|
-
|
|
122
|
+
Drop start frame (0 if not detected/provided)
|
|
137
123
|
"""
|
|
138
|
-
metrics = DropJumpMetrics()
|
|
139
|
-
|
|
140
|
-
# Detect or use manually specified drop jump start frame
|
|
141
124
|
if drop_start_frame is None:
|
|
142
125
|
# Auto-detect where drop jump actually starts (skip initial stationary period)
|
|
143
|
-
|
|
126
|
+
detected_frame = detect_drop_start(
|
|
144
127
|
foot_y_positions,
|
|
145
128
|
fps,
|
|
146
|
-
min_stationary_duration=0.5,
|
|
147
|
-
position_change_threshold=0.005,
|
|
129
|
+
min_stationary_duration=0.5,
|
|
130
|
+
position_change_threshold=0.005,
|
|
148
131
|
smoothing_window=smoothing_window,
|
|
149
132
|
)
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
if drop_start_frame is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
153
|
-
drop_start_frame_value = 0
|
|
154
|
-
else:
|
|
155
|
-
drop_start_frame_value = drop_start_frame
|
|
133
|
+
return detected_frame if detected_frame is not None else 0
|
|
134
|
+
return drop_start_frame
|
|
156
135
|
|
|
157
|
-
phases = find_contact_phases(contact_states)
|
|
158
136
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
interpolated_phases
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
use_curvature,
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
if not phases:
|
|
171
|
-
return metrics
|
|
137
|
+
def _filter_phases_after_drop(
|
|
138
|
+
phases: list[tuple[int, int, ContactState]],
|
|
139
|
+
interpolated_phases: list[tuple[float, float, ContactState]],
|
|
140
|
+
drop_start_frame: int,
|
|
141
|
+
) -> tuple[
|
|
142
|
+
list[tuple[int, int, ContactState]], list[tuple[float, float, ContactState]]
|
|
143
|
+
]:
|
|
144
|
+
"""Filter phases to only include those after drop start.
|
|
172
145
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
(start, end, state)
|
|
178
|
-
for start, end, state in phases
|
|
179
|
-
if end >= drop_start_frame_value
|
|
180
|
-
]
|
|
181
|
-
interpolated_phases = [
|
|
182
|
-
(start, end, state)
|
|
183
|
-
for start, end, state in interpolated_phases
|
|
184
|
-
if end >= drop_start_frame_value
|
|
185
|
-
]
|
|
146
|
+
Args:
|
|
147
|
+
phases: Integer frame phases
|
|
148
|
+
interpolated_phases: Sub-frame precision phases
|
|
149
|
+
drop_start_frame: Frame where drop starts
|
|
186
150
|
|
|
187
|
-
|
|
188
|
-
|
|
151
|
+
Returns:
|
|
152
|
+
Tuple of (filtered_phases, filtered_interpolated_phases)
|
|
153
|
+
"""
|
|
154
|
+
if drop_start_frame <= 0:
|
|
155
|
+
return phases, interpolated_phases
|
|
189
156
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
# For regular jumps: use longest ON_GROUND phase
|
|
193
|
-
ground_phases = [
|
|
194
|
-
(start, end, i)
|
|
195
|
-
for i, (start, end, state) in enumerate(phases)
|
|
196
|
-
if state == ContactState.ON_GROUND
|
|
157
|
+
filtered_phases = [
|
|
158
|
+
(start, end, state) for start, end, state in phases if end >= drop_start_frame
|
|
197
159
|
]
|
|
198
|
-
|
|
199
|
-
(start, end,
|
|
200
|
-
for
|
|
201
|
-
if
|
|
160
|
+
filtered_interpolated = [
|
|
161
|
+
(start, end, state)
|
|
162
|
+
for start, end, state in interpolated_phases
|
|
163
|
+
if end >= drop_start_frame
|
|
202
164
|
]
|
|
165
|
+
return filtered_phases, filtered_interpolated
|
|
203
166
|
|
|
204
|
-
if not ground_phases:
|
|
205
|
-
return metrics
|
|
206
167
|
|
|
207
|
-
|
|
208
|
-
|
|
168
|
+
def _identify_main_contact_phase(
|
|
169
|
+
phases: list[tuple[int, int, ContactState]],
|
|
170
|
+
ground_phases: list[tuple[int, int, int]],
|
|
171
|
+
air_phases_indexed: list[tuple[int, int, int]],
|
|
172
|
+
foot_y_positions: np.ndarray,
|
|
173
|
+
) -> tuple[int, int, bool]:
|
|
174
|
+
"""Identify the main contact phase and determine if it's a drop jump.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
phases: All phase tuples
|
|
178
|
+
ground_phases: Ground phases with indices
|
|
179
|
+
air_phases_indexed: Air phases with indices
|
|
180
|
+
foot_y_positions: Vertical position array
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Tuple of (contact_start, contact_end, is_drop_jump)
|
|
184
|
+
"""
|
|
185
|
+
# Initialize with first ground phase as fallback
|
|
209
186
|
contact_start, contact_end = ground_phases[0][0], ground_phases[0][1]
|
|
187
|
+
is_drop_jump = False
|
|
210
188
|
|
|
211
189
|
# Detect if this is a drop jump or regular jump
|
|
212
|
-
# Drop jump: first ground phase is elevated (lower y), followed by drop, then landing (higher y)
|
|
213
|
-
is_drop_jump = False
|
|
214
190
|
if air_phases_indexed and len(ground_phases) >= 2:
|
|
215
191
|
first_ground_start, first_ground_end, first_ground_idx = ground_phases[0]
|
|
216
192
|
first_air_idx = air_phases_indexed[0][2]
|
|
@@ -243,17 +219,29 @@ def calculate_drop_jump_metrics(
|
|
|
243
219
|
[(s, e) for s, e, _ in ground_phases], key=lambda p: p[1] - p[0]
|
|
244
220
|
)
|
|
245
221
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
metrics.contact_end_frame = contact_end
|
|
222
|
+
return contact_start, contact_end, is_drop_jump
|
|
223
|
+
|
|
249
224
|
|
|
250
|
-
|
|
225
|
+
def _find_precise_phase_timing(
|
|
226
|
+
contact_start: int,
|
|
227
|
+
contact_end: int,
|
|
228
|
+
interpolated_phases: list[tuple[float, float, ContactState]],
|
|
229
|
+
) -> tuple[float, float]:
|
|
230
|
+
"""Find precise sub-frame timing for contact phase.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
contact_start: Integer contact start frame
|
|
234
|
+
contact_end: Integer contact end frame
|
|
235
|
+
interpolated_phases: Sub-frame precision phases
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Tuple of (contact_start_frac, contact_end_frac)
|
|
239
|
+
"""
|
|
251
240
|
contact_start_frac = float(contact_start)
|
|
252
241
|
contact_end_frac = float(contact_end)
|
|
253
242
|
|
|
254
243
|
# Find the matching ground phase in interpolated_phases
|
|
255
244
|
for start_frac, end_frac, state in interpolated_phases:
|
|
256
|
-
# Match by checking if integer frames are within this phase
|
|
257
245
|
if (
|
|
258
246
|
state == ContactState.ON_GROUND
|
|
259
247
|
and int(start_frac) <= contact_start <= int(end_frac) + 1
|
|
@@ -263,43 +251,82 @@ def calculate_drop_jump_metrics(
|
|
|
263
251
|
contact_end_frac = end_frac
|
|
264
252
|
break
|
|
265
253
|
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
metrics.ground_contact_time = contact_frames_precise / fps
|
|
269
|
-
metrics.contact_start_frame_precise = contact_start_frac
|
|
270
|
-
metrics.contact_end_frame_precise = contact_end_frac
|
|
254
|
+
return contact_start_frac, contact_end_frac
|
|
255
|
+
|
|
271
256
|
|
|
272
|
-
|
|
257
|
+
def _calculate_calibration_scale(
|
|
258
|
+
drop_height_m: float | None,
|
|
259
|
+
phases: list[tuple[int, int, ContactState]],
|
|
260
|
+
air_phases_indexed: list[tuple[int, int, int]],
|
|
261
|
+
foot_y_positions: np.ndarray,
|
|
262
|
+
) -> float:
|
|
263
|
+
"""Calculate calibration scale factor from known drop height.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
drop_height_m: Known drop height in meters
|
|
267
|
+
phases: All phase tuples
|
|
268
|
+
air_phases_indexed: Air phases with indices
|
|
269
|
+
foot_y_positions: Vertical position array
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Scale factor (1.0 if no calibration possible)
|
|
273
|
+
"""
|
|
273
274
|
scale_factor = 1.0
|
|
274
|
-
if drop_height_m is not None and len(phases) >= 2:
|
|
275
|
-
# Find the initial drop by looking for first IN_AIR phase
|
|
276
|
-
# This represents the drop from the box
|
|
277
|
-
|
|
278
|
-
if air_phases_indexed and ground_phases:
|
|
279
|
-
# Get first air phase (the drop)
|
|
280
|
-
first_air_start, first_air_end, _ = air_phases_indexed[0]
|
|
281
|
-
|
|
282
|
-
# Initial position: at start of drop (on the box)
|
|
283
|
-
# Look back a few frames to get stable position on box
|
|
284
|
-
lookback_start = max(0, first_air_start - 5)
|
|
285
|
-
if lookback_start < first_air_start:
|
|
286
|
-
initial_position = float(
|
|
287
|
-
np.mean(foot_y_positions[lookback_start:first_air_start])
|
|
288
|
-
)
|
|
289
|
-
else:
|
|
290
|
-
initial_position = float(foot_y_positions[first_air_start])
|
|
291
|
-
|
|
292
|
-
# Landing position: at the ground after drop
|
|
293
|
-
# Use position at end of first air phase
|
|
294
|
-
landing_position = float(foot_y_positions[first_air_end])
|
|
295
|
-
|
|
296
|
-
# Drop distance in normalized coordinates (y increases downward)
|
|
297
|
-
drop_normalized = landing_position - initial_position
|
|
298
|
-
|
|
299
|
-
if drop_normalized > 0.01: # Sanity check (at least 1% of frame height)
|
|
300
|
-
# Calculate scale factor: real_meters / normalized_distance
|
|
301
|
-
scale_factor = drop_height_m / drop_normalized
|
|
302
275
|
|
|
276
|
+
if drop_height_m is None or len(phases) < 2:
|
|
277
|
+
return scale_factor
|
|
278
|
+
|
|
279
|
+
if not air_phases_indexed:
|
|
280
|
+
return scale_factor
|
|
281
|
+
|
|
282
|
+
# Get first air phase (the drop)
|
|
283
|
+
first_air_start, first_air_end, _ = air_phases_indexed[0]
|
|
284
|
+
|
|
285
|
+
# Initial position: at start of drop (on the box)
|
|
286
|
+
lookback_start = max(0, first_air_start - 5)
|
|
287
|
+
if lookback_start < first_air_start:
|
|
288
|
+
initial_position = float(
|
|
289
|
+
np.mean(foot_y_positions[lookback_start:first_air_start])
|
|
290
|
+
)
|
|
291
|
+
else:
|
|
292
|
+
initial_position = float(foot_y_positions[first_air_start])
|
|
293
|
+
|
|
294
|
+
# Landing position: at the ground after drop
|
|
295
|
+
landing_position = float(foot_y_positions[first_air_end])
|
|
296
|
+
|
|
297
|
+
# Drop distance in normalized coordinates (y increases downward)
|
|
298
|
+
drop_normalized = landing_position - initial_position
|
|
299
|
+
|
|
300
|
+
if drop_normalized > 0.01: # Sanity check
|
|
301
|
+
scale_factor = drop_height_m / drop_normalized
|
|
302
|
+
|
|
303
|
+
return scale_factor
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def _analyze_flight_phase(
|
|
307
|
+
metrics: DropJumpMetrics,
|
|
308
|
+
phases: list[tuple[int, int, ContactState]],
|
|
309
|
+
interpolated_phases: list[tuple[float, float, ContactState]],
|
|
310
|
+
contact_end: int,
|
|
311
|
+
foot_y_positions: np.ndarray,
|
|
312
|
+
fps: float,
|
|
313
|
+
drop_height_m: float | None,
|
|
314
|
+
scale_factor: float,
|
|
315
|
+
kinematic_correction_factor: float,
|
|
316
|
+
) -> None:
|
|
317
|
+
"""Analyze flight phase and calculate jump height metrics.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
metrics: DropJumpMetrics object to populate
|
|
321
|
+
phases: All phase tuples
|
|
322
|
+
interpolated_phases: Sub-frame precision phases
|
|
323
|
+
contact_end: End of contact phase
|
|
324
|
+
foot_y_positions: Vertical position array
|
|
325
|
+
fps: Video frame rate
|
|
326
|
+
drop_height_m: Known drop height (optional)
|
|
327
|
+
scale_factor: Calibration scale factor
|
|
328
|
+
kinematic_correction_factor: Correction for kinematic method
|
|
329
|
+
"""
|
|
303
330
|
# Find flight phase after ground contact
|
|
304
331
|
flight_phases = [
|
|
305
332
|
(start, end)
|
|
@@ -307,94 +334,179 @@ def calculate_drop_jump_metrics(
|
|
|
307
334
|
if state == ContactState.IN_AIR and start > contact_end
|
|
308
335
|
]
|
|
309
336
|
|
|
310
|
-
if flight_phases:
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
metrics.
|
|
359
|
-
|
|
360
|
-
# Choose measurement method based on calibration availability
|
|
361
|
-
if drop_height_m is not None and scale_factor > 1.0:
|
|
362
|
-
# Use calibrated trajectory measurement (most accurate)
|
|
363
|
-
metrics.jump_height = height_normalized * scale_factor
|
|
364
|
-
metrics.jump_height_kinematic = jump_height_kinematic
|
|
365
|
-
else:
|
|
366
|
-
# Apply kinematic correction factor to kinematic method
|
|
367
|
-
# ⚠️ WARNING: Kinematic correction factor is EXPERIMENTAL and UNVALIDATED
|
|
368
|
-
#
|
|
369
|
-
# The kinematic method h = (g × t²) / 8 may underestimate jump height due to:
|
|
370
|
-
# 1. Contact detection timing (may detect landing slightly early/late)
|
|
371
|
-
# 2. Frame rate limitations (30 fps = 33ms intervals between samples)
|
|
372
|
-
# 3. Foot position vs center of mass difference (feet land before CoM peak)
|
|
373
|
-
#
|
|
374
|
-
# Default correction factor is 1.0 (no correction). Historical testing
|
|
375
|
-
# suggested 1.35 could improve accuracy, but:
|
|
376
|
-
# - This value has NOT been validated against gold standards
|
|
377
|
-
# (force plates, motion capture)
|
|
378
|
-
# - The actual correction needed may vary by athlete, jump type, and video quality
|
|
379
|
-
# - Using a correction factor without validation is experimental
|
|
380
|
-
#
|
|
381
|
-
# For validated measurements, use:
|
|
382
|
-
# - Calibrated measurement with --drop-height parameter
|
|
383
|
-
# - Or compare against validated measurement systems
|
|
384
|
-
metrics.jump_height = (
|
|
385
|
-
jump_height_kinematic * kinematic_correction_factor
|
|
386
|
-
)
|
|
387
|
-
metrics.jump_height_kinematic = jump_height_kinematic
|
|
337
|
+
if not flight_phases:
|
|
338
|
+
return
|
|
339
|
+
|
|
340
|
+
flight_start, flight_end = flight_phases[0]
|
|
341
|
+
|
|
342
|
+
# Store integer frame indices
|
|
343
|
+
metrics.flight_start_frame = flight_start
|
|
344
|
+
metrics.flight_end_frame = flight_end
|
|
345
|
+
|
|
346
|
+
# Find precise timing
|
|
347
|
+
flight_start_frac = float(flight_start)
|
|
348
|
+
flight_end_frac = float(flight_end)
|
|
349
|
+
|
|
350
|
+
for start_frac, end_frac, state in interpolated_phases:
|
|
351
|
+
if (
|
|
352
|
+
state == ContactState.IN_AIR
|
|
353
|
+
and int(start_frac) <= flight_start <= int(end_frac) + 1
|
|
354
|
+
and int(start_frac) <= flight_end <= int(end_frac) + 1
|
|
355
|
+
):
|
|
356
|
+
flight_start_frac = start_frac
|
|
357
|
+
flight_end_frac = end_frac
|
|
358
|
+
break
|
|
359
|
+
|
|
360
|
+
# Calculate flight time
|
|
361
|
+
flight_frames_precise = flight_end_frac - flight_start_frac
|
|
362
|
+
metrics.flight_time = flight_frames_precise / fps
|
|
363
|
+
metrics.flight_start_frame_precise = flight_start_frac
|
|
364
|
+
metrics.flight_end_frame_precise = flight_end_frac
|
|
365
|
+
|
|
366
|
+
# Calculate jump height using kinematic method
|
|
367
|
+
g = 9.81 # m/s^2
|
|
368
|
+
jump_height_kinematic = (g * metrics.flight_time**2) / 8
|
|
369
|
+
|
|
370
|
+
# Calculate jump height from trajectory
|
|
371
|
+
takeoff_position = foot_y_positions[flight_start]
|
|
372
|
+
flight_positions = foot_y_positions[flight_start : flight_end + 1]
|
|
373
|
+
|
|
374
|
+
if len(flight_positions) > 0:
|
|
375
|
+
peak_idx = np.argmin(flight_positions)
|
|
376
|
+
metrics.peak_height_frame = int(flight_start + peak_idx)
|
|
377
|
+
peak_position = np.min(flight_positions)
|
|
378
|
+
|
|
379
|
+
height_normalized = float(takeoff_position - peak_position)
|
|
380
|
+
metrics.jump_height_trajectory = height_normalized
|
|
381
|
+
|
|
382
|
+
# Choose measurement method based on calibration availability
|
|
383
|
+
if drop_height_m is not None and scale_factor > 1.0:
|
|
384
|
+
metrics.jump_height = height_normalized * scale_factor
|
|
385
|
+
metrics.jump_height_kinematic = jump_height_kinematic
|
|
388
386
|
else:
|
|
389
|
-
|
|
390
|
-
if drop_height_m is None:
|
|
391
|
-
# Apply kinematic correction factor (see detailed comment above)
|
|
392
|
-
metrics.jump_height = (
|
|
393
|
-
jump_height_kinematic * kinematic_correction_factor
|
|
394
|
-
)
|
|
395
|
-
else:
|
|
396
|
-
metrics.jump_height = jump_height_kinematic
|
|
387
|
+
metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
|
|
397
388
|
metrics.jump_height_kinematic = jump_height_kinematic
|
|
389
|
+
else:
|
|
390
|
+
# Fallback to kinematic if no position data
|
|
391
|
+
if drop_height_m is None:
|
|
392
|
+
metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
|
|
393
|
+
else:
|
|
394
|
+
metrics.jump_height = jump_height_kinematic
|
|
395
|
+
metrics.jump_height_kinematic = jump_height_kinematic
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
def calculate_drop_jump_metrics(
|
|
399
|
+
contact_states: list[ContactState],
|
|
400
|
+
foot_y_positions: np.ndarray,
|
|
401
|
+
fps: float,
|
|
402
|
+
drop_height_m: float | None = None,
|
|
403
|
+
drop_start_frame: int | None = None,
|
|
404
|
+
velocity_threshold: float = 0.02,
|
|
405
|
+
smoothing_window: int = 5,
|
|
406
|
+
polyorder: int = 2,
|
|
407
|
+
use_curvature: bool = True,
|
|
408
|
+
kinematic_correction_factor: float = 1.0,
|
|
409
|
+
) -> DropJumpMetrics:
|
|
410
|
+
"""
|
|
411
|
+
Calculate drop-jump metrics from contact states and positions.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
contact_states: Contact state for each frame
|
|
415
|
+
foot_y_positions: Vertical positions of feet (normalized 0-1)
|
|
416
|
+
fps: Video frame rate
|
|
417
|
+
drop_height_m: Known drop box/platform height in meters for calibration (optional)
|
|
418
|
+
velocity_threshold: Velocity threshold used for contact detection (for interpolation)
|
|
419
|
+
smoothing_window: Window size for velocity/acceleration smoothing (must be odd)
|
|
420
|
+
polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
|
|
421
|
+
use_curvature: Whether to use curvature analysis for refining transitions
|
|
422
|
+
kinematic_correction_factor: Correction factor for kinematic jump height calculation
|
|
423
|
+
(default: 1.0 = no correction). Historical testing suggested 1.35, but this is
|
|
424
|
+
unvalidated. Use calibrated measurement (--drop-height) for validated results.
|
|
425
|
+
|
|
426
|
+
Returns:
|
|
427
|
+
DropJumpMetrics object with calculated values
|
|
428
|
+
"""
|
|
429
|
+
metrics = DropJumpMetrics()
|
|
430
|
+
|
|
431
|
+
# Determine drop start frame
|
|
432
|
+
drop_start_frame_value = _determine_drop_start_frame(
|
|
433
|
+
drop_start_frame, foot_y_positions, fps, smoothing_window
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
# Find contact phases
|
|
437
|
+
phases = find_contact_phases(contact_states)
|
|
438
|
+
interpolated_phases = find_interpolated_phase_transitions_with_curvature(
|
|
439
|
+
foot_y_positions,
|
|
440
|
+
contact_states,
|
|
441
|
+
velocity_threshold,
|
|
442
|
+
smoothing_window,
|
|
443
|
+
polyorder,
|
|
444
|
+
use_curvature,
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
if not phases:
|
|
448
|
+
return metrics
|
|
449
|
+
|
|
450
|
+
# Filter phases to only include those after drop start
|
|
451
|
+
phases, interpolated_phases = _filter_phases_after_drop(
|
|
452
|
+
phases, interpolated_phases, drop_start_frame_value
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
if not phases:
|
|
456
|
+
return metrics
|
|
457
|
+
|
|
458
|
+
# Separate ground and air phases
|
|
459
|
+
ground_phases = [
|
|
460
|
+
(start, end, i)
|
|
461
|
+
for i, (start, end, state) in enumerate(phases)
|
|
462
|
+
if state == ContactState.ON_GROUND
|
|
463
|
+
]
|
|
464
|
+
air_phases_indexed = [
|
|
465
|
+
(start, end, i)
|
|
466
|
+
for i, (start, end, state) in enumerate(phases)
|
|
467
|
+
if state == ContactState.IN_AIR
|
|
468
|
+
]
|
|
469
|
+
|
|
470
|
+
if not ground_phases:
|
|
471
|
+
return metrics
|
|
472
|
+
|
|
473
|
+
# Identify main contact phase
|
|
474
|
+
contact_start, contact_end, _ = _identify_main_contact_phase(
|
|
475
|
+
phases, ground_phases, air_phases_indexed, foot_y_positions
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
# Store integer frame indices
|
|
479
|
+
metrics.contact_start_frame = contact_start
|
|
480
|
+
metrics.contact_end_frame = contact_end
|
|
481
|
+
|
|
482
|
+
# Find precise timing for contact phase
|
|
483
|
+
contact_start_frac, contact_end_frac = _find_precise_phase_timing(
|
|
484
|
+
contact_start, contact_end, interpolated_phases
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
# Calculate ground contact time
|
|
488
|
+
contact_frames_precise = contact_end_frac - contact_start_frac
|
|
489
|
+
metrics.ground_contact_time = contact_frames_precise / fps
|
|
490
|
+
metrics.contact_start_frame_precise = contact_start_frac
|
|
491
|
+
metrics.contact_end_frame_precise = contact_end_frac
|
|
492
|
+
|
|
493
|
+
# Calculate calibration scale factor
|
|
494
|
+
scale_factor = _calculate_calibration_scale(
|
|
495
|
+
drop_height_m, phases, air_phases_indexed, foot_y_positions
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
# Analyze flight phase and calculate jump height
|
|
499
|
+
_analyze_flight_phase(
|
|
500
|
+
metrics,
|
|
501
|
+
phases,
|
|
502
|
+
interpolated_phases,
|
|
503
|
+
contact_end,
|
|
504
|
+
foot_y_positions,
|
|
505
|
+
fps,
|
|
506
|
+
drop_height_m,
|
|
507
|
+
scale_factor,
|
|
508
|
+
kinematic_correction_factor,
|
|
509
|
+
)
|
|
398
510
|
|
|
399
511
|
return metrics
|
|
400
512
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.10.
|
|
3
|
+
Version: 0.10.5
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
kinemotion/__init__.py,sha256=Z85xg29NA-r4IjrSbAkJpMFigyxACFGUb-37AiMp6YY,350
|
|
2
|
-
kinemotion/api.py,sha256=
|
|
2
|
+
kinemotion/api.py,sha256=2MsiHsmmxfpvhHIbDXcZpvsCLROKi4MV8LQpKu2r_a8,20078
|
|
3
3
|
kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
|
|
4
4
|
kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
|
|
5
5
|
kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQIdw,10508
|
|
@@ -11,10 +11,10 @@ kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj
|
|
|
11
11
|
kinemotion/dropjump/analysis.py,sha256=HfJt2t9IsMBiBUz7apIzdxbRH9QqzlFnDVVWcKhU3ow,23291
|
|
12
12
|
kinemotion/dropjump/cli.py,sha256=C6v6E3g1W-KNFc0xUzSjg4wKve1WsPxKvUBJV7LiMNI,26468
|
|
13
13
|
kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
|
|
14
|
-
kinemotion/dropjump/kinematics.py,sha256=
|
|
14
|
+
kinemotion/dropjump/kinematics.py,sha256=RM_O8Kdc6aEiPIu_99N4cu-4EhYSQxtBGASJF_dmQaU,19081
|
|
15
15
|
kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
-
kinemotion-0.10.
|
|
17
|
-
kinemotion-0.10.
|
|
18
|
-
kinemotion-0.10.
|
|
19
|
-
kinemotion-0.10.
|
|
20
|
-
kinemotion-0.10.
|
|
16
|
+
kinemotion-0.10.5.dist-info/METADATA,sha256=I5dXmUcnNNtKS43uCbC_zbMLBMZAg_QpOMhwpnFSYcw,20333
|
|
17
|
+
kinemotion-0.10.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
18
|
+
kinemotion-0.10.5.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
|
|
19
|
+
kinemotion-0.10.5.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
|
|
20
|
+
kinemotion-0.10.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|