kinemotion 0.76.3__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +3 -18
- kinemotion/api.py +7 -27
- kinemotion/cli.py +2 -4
- kinemotion/{countermovement_jump → cmj}/analysis.py +158 -16
- kinemotion/{countermovement_jump → cmj}/api.py +18 -46
- kinemotion/{countermovement_jump → cmj}/cli.py +46 -6
- kinemotion/cmj/debug_overlay.py +457 -0
- kinemotion/{countermovement_jump → cmj}/joint_angles.py +31 -96
- kinemotion/{countermovement_jump → cmj}/metrics_validator.py +271 -176
- kinemotion/{countermovement_jump → cmj}/validation_bounds.py +18 -1
- kinemotion/core/__init__.py +2 -11
- kinemotion/core/auto_tuning.py +107 -149
- kinemotion/core/cli_utils.py +0 -74
- kinemotion/core/debug_overlay_utils.py +15 -142
- kinemotion/core/experimental.py +51 -55
- kinemotion/core/filtering.py +56 -116
- kinemotion/core/pipeline_utils.py +2 -2
- kinemotion/core/pose.py +98 -47
- kinemotion/core/quality.py +6 -4
- kinemotion/core/smoothing.py +51 -65
- kinemotion/core/types.py +0 -15
- kinemotion/core/validation.py +7 -76
- kinemotion/core/video_io.py +27 -41
- kinemotion/{drop_jump → dropjump}/__init__.py +8 -2
- kinemotion/{drop_jump → dropjump}/analysis.py +120 -282
- kinemotion/{drop_jump → dropjump}/api.py +33 -59
- kinemotion/{drop_jump → dropjump}/cli.py +136 -70
- kinemotion/dropjump/debug_overlay.py +182 -0
- kinemotion/{drop_jump → dropjump}/kinematics.py +65 -175
- kinemotion/{drop_jump → dropjump}/metrics_validator.py +51 -25
- kinemotion/{drop_jump → dropjump}/validation_bounds.py +1 -1
- kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx +3 -0
- kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx +3 -0
- {kinemotion-0.76.3.dist-info → kinemotion-2.0.0.dist-info}/METADATA +26 -75
- kinemotion-2.0.0.dist-info/RECORD +49 -0
- kinemotion/core/overlay_constants.py +0 -61
- kinemotion/core/video_analysis_base.py +0 -132
- kinemotion/countermovement_jump/debug_overlay.py +0 -325
- kinemotion/drop_jump/debug_overlay.py +0 -241
- kinemotion/squat_jump/__init__.py +0 -5
- kinemotion/squat_jump/analysis.py +0 -377
- kinemotion/squat_jump/api.py +0 -610
- kinemotion/squat_jump/cli.py +0 -309
- kinemotion/squat_jump/debug_overlay.py +0 -163
- kinemotion/squat_jump/kinematics.py +0 -342
- kinemotion/squat_jump/metrics_validator.py +0 -438
- kinemotion/squat_jump/validation_bounds.py +0 -221
- kinemotion-0.76.3.dist-info/RECORD +0 -57
- /kinemotion/{countermovement_jump → cmj}/__init__.py +0 -0
- /kinemotion/{countermovement_jump → cmj}/kinematics.py +0 -0
- {kinemotion-0.76.3.dist-info → kinemotion-2.0.0.dist-info}/WHEEL +0 -0
- {kinemotion-0.76.3.dist-info → kinemotion-2.0.0.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.76.3.dist-info → kinemotion-2.0.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,241 +0,0 @@
|
|
|
1
|
-
"""Debug overlay rendering for drop jump analysis."""
|
|
2
|
-
|
|
3
|
-
import cv2
|
|
4
|
-
import numpy as np
|
|
5
|
-
|
|
6
|
-
from ..core.debug_overlay_utils import BaseDebugOverlayRenderer
|
|
7
|
-
from ..core.overlay_constants import (
|
|
8
|
-
BLACK,
|
|
9
|
-
COM_CIRCLE_RADIUS,
|
|
10
|
-
COM_OUTLINE_RADIUS,
|
|
11
|
-
CYAN,
|
|
12
|
-
FOOT_CIRCLE_RADIUS,
|
|
13
|
-
FOOT_LANDMARK_RADIUS,
|
|
14
|
-
FOOT_VISIBILITY_THRESHOLD,
|
|
15
|
-
GREEN,
|
|
16
|
-
HIP_MARKER_RADIUS,
|
|
17
|
-
METRICS_BOX_WIDTH,
|
|
18
|
-
ORANGE,
|
|
19
|
-
PHASE_LABEL_LINE_HEIGHT,
|
|
20
|
-
PHASE_LABEL_START_Y,
|
|
21
|
-
RED,
|
|
22
|
-
WHITE,
|
|
23
|
-
Color,
|
|
24
|
-
LandmarkDict,
|
|
25
|
-
)
|
|
26
|
-
from ..core.pose import compute_center_of_mass
|
|
27
|
-
from .analysis import ContactState, compute_average_foot_position
|
|
28
|
-
from .kinematics import DropJumpMetrics
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class DropJumpDebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
32
|
-
"""Renders debug information on video frames."""
|
|
33
|
-
|
|
34
|
-
def _get_contact_state_color(self, contact_state: ContactState) -> Color:
|
|
35
|
-
"""Get color based on ground contact state."""
|
|
36
|
-
return GREEN if contact_state == ContactState.ON_GROUND else RED
|
|
37
|
-
|
|
38
|
-
def _draw_com_visualization(
|
|
39
|
-
self,
|
|
40
|
-
frame: np.ndarray,
|
|
41
|
-
landmarks: LandmarkDict,
|
|
42
|
-
contact_state: ContactState,
|
|
43
|
-
) -> None:
|
|
44
|
-
"""Draw center of mass visualization on frame."""
|
|
45
|
-
com_x, com_y, _ = compute_center_of_mass(landmarks)
|
|
46
|
-
px, py = self._normalize_to_pixels(com_x, com_y)
|
|
47
|
-
|
|
48
|
-
color = self._get_contact_state_color(contact_state)
|
|
49
|
-
cv2.circle(frame, (px, py), COM_CIRCLE_RADIUS, color, -1)
|
|
50
|
-
cv2.circle(frame, (px, py), COM_OUTLINE_RADIUS, WHITE, 2)
|
|
51
|
-
|
|
52
|
-
# Draw hip midpoint reference
|
|
53
|
-
if "left_hip" in landmarks and "right_hip" in landmarks:
|
|
54
|
-
lh_x, lh_y, _ = landmarks["left_hip"]
|
|
55
|
-
rh_x, rh_y, _ = landmarks["right_hip"]
|
|
56
|
-
hip_x, hip_y = self._normalize_to_pixels((lh_x + rh_x) / 2, (lh_y + rh_y) / 2)
|
|
57
|
-
cv2.circle(frame, (hip_x, hip_y), HIP_MARKER_RADIUS, ORANGE, -1)
|
|
58
|
-
cv2.line(frame, (hip_x, hip_y), (px, py), ORANGE, 2)
|
|
59
|
-
|
|
60
|
-
def _draw_foot_visualization(
|
|
61
|
-
self,
|
|
62
|
-
frame: np.ndarray,
|
|
63
|
-
landmarks: LandmarkDict,
|
|
64
|
-
contact_state: ContactState,
|
|
65
|
-
) -> None:
|
|
66
|
-
"""Draw foot position visualization on frame."""
|
|
67
|
-
foot_x, foot_y = compute_average_foot_position(landmarks)
|
|
68
|
-
px, py = self._normalize_to_pixels(foot_x, foot_y)
|
|
69
|
-
|
|
70
|
-
color = self._get_contact_state_color(contact_state)
|
|
71
|
-
cv2.circle(frame, (px, py), FOOT_CIRCLE_RADIUS, color, -1)
|
|
72
|
-
|
|
73
|
-
# Draw individual foot landmarks
|
|
74
|
-
foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
|
|
75
|
-
for key in foot_keys:
|
|
76
|
-
if key in landmarks:
|
|
77
|
-
x, y, vis = landmarks[key]
|
|
78
|
-
if vis > FOOT_VISIBILITY_THRESHOLD:
|
|
79
|
-
lx, ly = self._normalize_to_pixels(x, y)
|
|
80
|
-
cv2.circle(frame, (lx, ly), FOOT_LANDMARK_RADIUS, CYAN, -1)
|
|
81
|
-
|
|
82
|
-
def _draw_phase_labels(
|
|
83
|
-
self,
|
|
84
|
-
frame: np.ndarray,
|
|
85
|
-
frame_idx: int,
|
|
86
|
-
metrics: DropJumpMetrics,
|
|
87
|
-
) -> None:
|
|
88
|
-
"""Draw phase labels (ground contact, flight, peak) on frame."""
|
|
89
|
-
# Phase configurations: (start_frame, end_frame, label, color)
|
|
90
|
-
# For range-based phases (ground contact, flight)
|
|
91
|
-
range_phase_configs = [
|
|
92
|
-
(metrics.contact_start_frame, metrics.contact_end_frame, "GROUND CONTACT", GREEN),
|
|
93
|
-
(metrics.flight_start_frame, metrics.flight_end_frame, "FLIGHT PHASE", RED),
|
|
94
|
-
]
|
|
95
|
-
|
|
96
|
-
y_offset = PHASE_LABEL_START_Y
|
|
97
|
-
for start_frame, end_frame, label, color in range_phase_configs:
|
|
98
|
-
if start_frame and end_frame and start_frame <= frame_idx <= end_frame:
|
|
99
|
-
cv2.putText(
|
|
100
|
-
frame,
|
|
101
|
-
label,
|
|
102
|
-
(10, y_offset),
|
|
103
|
-
cv2.FONT_HERSHEY_SIMPLEX,
|
|
104
|
-
0.7,
|
|
105
|
-
color,
|
|
106
|
-
2,
|
|
107
|
-
)
|
|
108
|
-
y_offset += PHASE_LABEL_LINE_HEIGHT
|
|
109
|
-
|
|
110
|
-
# Single-frame indicator (peak height)
|
|
111
|
-
if metrics.peak_height_frame == frame_idx:
|
|
112
|
-
cv2.putText(
|
|
113
|
-
frame,
|
|
114
|
-
"PEAK HEIGHT",
|
|
115
|
-
(10, y_offset),
|
|
116
|
-
cv2.FONT_HERSHEY_SIMPLEX,
|
|
117
|
-
0.7,
|
|
118
|
-
(255, 0, 255), # Magenta
|
|
119
|
-
2,
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
def _draw_info_box(
|
|
123
|
-
self,
|
|
124
|
-
frame: np.ndarray,
|
|
125
|
-
top_left: tuple[int, int],
|
|
126
|
-
bottom_right: tuple[int, int],
|
|
127
|
-
border_color: Color,
|
|
128
|
-
) -> None:
|
|
129
|
-
"""Draw a filled box with border for displaying information."""
|
|
130
|
-
cv2.rectangle(frame, top_left, bottom_right, BLACK, -1)
|
|
131
|
-
cv2.rectangle(frame, top_left, bottom_right, border_color, 2)
|
|
132
|
-
|
|
133
|
-
def _draw_metrics_summary(
|
|
134
|
-
self, frame: np.ndarray, frame_idx: int, metrics: DropJumpMetrics
|
|
135
|
-
) -> None:
|
|
136
|
-
"""Draw metrics summary in bottom right after flight phase ends."""
|
|
137
|
-
if metrics.flight_end_frame is None or frame_idx < metrics.flight_end_frame:
|
|
138
|
-
return
|
|
139
|
-
|
|
140
|
-
# Build metrics text list
|
|
141
|
-
metrics_text: list[str] = []
|
|
142
|
-
|
|
143
|
-
if metrics.ground_contact_time is not None:
|
|
144
|
-
metrics_text.append(f"Contact Time: {metrics.ground_contact_time * 1000:.0f}ms")
|
|
145
|
-
|
|
146
|
-
if metrics.flight_time is not None:
|
|
147
|
-
metrics_text.append(f"Flight Time: {metrics.flight_time * 1000:.0f}ms")
|
|
148
|
-
|
|
149
|
-
if metrics.jump_height is not None:
|
|
150
|
-
metrics_text.append(f"Jump Height: {metrics.jump_height:.3f}m")
|
|
151
|
-
|
|
152
|
-
# Calculate RSI (Reactive Strength Index)
|
|
153
|
-
if (
|
|
154
|
-
metrics.jump_height is not None
|
|
155
|
-
and metrics.ground_contact_time is not None
|
|
156
|
-
and metrics.ground_contact_time > 0
|
|
157
|
-
):
|
|
158
|
-
rsi = metrics.jump_height / metrics.ground_contact_time
|
|
159
|
-
metrics_text.append(f"RSI: {rsi:.2f}")
|
|
160
|
-
|
|
161
|
-
if not metrics_text:
|
|
162
|
-
return
|
|
163
|
-
|
|
164
|
-
# Calculate box dimensions
|
|
165
|
-
box_height = len(metrics_text) * 30 + 20
|
|
166
|
-
top_left = (self.width - METRICS_BOX_WIDTH, self.height - box_height - 10)
|
|
167
|
-
bottom_right = (self.width - 10, self.height - 10)
|
|
168
|
-
|
|
169
|
-
self._draw_info_box(frame, top_left, bottom_right, GREEN)
|
|
170
|
-
|
|
171
|
-
# Draw metrics text
|
|
172
|
-
text_x = self.width - METRICS_BOX_WIDTH + 10
|
|
173
|
-
text_y = self.height - box_height + 10
|
|
174
|
-
for text in metrics_text:
|
|
175
|
-
cv2.putText(frame, text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, WHITE, 1)
|
|
176
|
-
text_y += 30
|
|
177
|
-
|
|
178
|
-
def render_frame(
|
|
179
|
-
self,
|
|
180
|
-
frame: np.ndarray,
|
|
181
|
-
landmarks: LandmarkDict | None,
|
|
182
|
-
contact_state: ContactState,
|
|
183
|
-
frame_idx: int,
|
|
184
|
-
metrics: DropJumpMetrics | None = None,
|
|
185
|
-
use_com: bool = False,
|
|
186
|
-
) -> np.ndarray:
|
|
187
|
-
"""
|
|
188
|
-
Render debug overlay on frame.
|
|
189
|
-
|
|
190
|
-
Args:
|
|
191
|
-
frame: Original video frame
|
|
192
|
-
landmarks: Pose landmarks for this frame
|
|
193
|
-
contact_state: Ground contact state
|
|
194
|
-
frame_idx: Current frame index
|
|
195
|
-
metrics: Drop-jump metrics (optional)
|
|
196
|
-
use_com: Whether to visualize CoM instead of feet (optional)
|
|
197
|
-
|
|
198
|
-
Returns:
|
|
199
|
-
Frame with debug overlay
|
|
200
|
-
"""
|
|
201
|
-
with self.timer.measure("debug_video_copy"):
|
|
202
|
-
annotated = frame.copy()
|
|
203
|
-
|
|
204
|
-
with self.timer.measure("debug_video_draw"):
|
|
205
|
-
# Draw skeleton and landmarks
|
|
206
|
-
if landmarks:
|
|
207
|
-
self._draw_skeleton(annotated, landmarks)
|
|
208
|
-
if use_com:
|
|
209
|
-
self._draw_com_visualization(annotated, landmarks, contact_state)
|
|
210
|
-
else:
|
|
211
|
-
self._draw_foot_visualization(annotated, landmarks, contact_state)
|
|
212
|
-
|
|
213
|
-
# Draw contact state
|
|
214
|
-
state_color = self._get_contact_state_color(contact_state)
|
|
215
|
-
cv2.putText(
|
|
216
|
-
annotated,
|
|
217
|
-
f"State: {contact_state.value}",
|
|
218
|
-
(10, 30),
|
|
219
|
-
cv2.FONT_HERSHEY_SIMPLEX,
|
|
220
|
-
1,
|
|
221
|
-
state_color,
|
|
222
|
-
2,
|
|
223
|
-
)
|
|
224
|
-
|
|
225
|
-
# Draw frame number
|
|
226
|
-
cv2.putText(
|
|
227
|
-
annotated,
|
|
228
|
-
f"Frame: {frame_idx}",
|
|
229
|
-
(10, 70),
|
|
230
|
-
cv2.FONT_HERSHEY_SIMPLEX,
|
|
231
|
-
0.7,
|
|
232
|
-
WHITE,
|
|
233
|
-
2,
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
# Draw phase labels and metrics summary
|
|
237
|
-
if metrics:
|
|
238
|
-
self._draw_phase_labels(annotated, frame_idx, metrics)
|
|
239
|
-
self._draw_metrics_summary(annotated, frame_idx, metrics)
|
|
240
|
-
|
|
241
|
-
return annotated
|
|
@@ -1,377 +0,0 @@
|
|
|
1
|
-
"""Phase detection logic for Squat Jump (SJ) analysis."""
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
|
|
5
|
-
import numpy as np
|
|
6
|
-
from scipy.signal import savgol_filter
|
|
7
|
-
|
|
8
|
-
from ..core.types import FloatArray
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class SJPhase(Enum):
|
|
12
|
-
"""Phases of a squat jump."""
|
|
13
|
-
|
|
14
|
-
SQUAT_HOLD = "squat_hold"
|
|
15
|
-
CONCENTRIC = "concentric"
|
|
16
|
-
FLIGHT = "flight"
|
|
17
|
-
LANDING = "landing"
|
|
18
|
-
UNKNOWN = "unknown"
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def detect_sj_phases(
|
|
22
|
-
positions: FloatArray,
|
|
23
|
-
fps: float,
|
|
24
|
-
velocity_threshold: float = 0.1,
|
|
25
|
-
window_length: int = 5,
|
|
26
|
-
polyorder: int = 2,
|
|
27
|
-
) -> tuple[int, int, int, int] | None:
|
|
28
|
-
"""Detect phases in a squat jump video.
|
|
29
|
-
|
|
30
|
-
Squat Jump phase detection follows this progression:
|
|
31
|
-
1. Squat hold - static squat position
|
|
32
|
-
2. Concentric - upward movement from squat
|
|
33
|
-
3. Takeoff - feet leave ground
|
|
34
|
-
4. Flight - in the air
|
|
35
|
-
5. Landing - feet contact ground
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
positions: 1D array of vertical positions (normalized coordinates)
|
|
39
|
-
fps: Video frames per second
|
|
40
|
-
velocity_threshold: Threshold for detecting flight phase (m/s)
|
|
41
|
-
window_length: Window size for velocity smoothing
|
|
42
|
-
polyorder: Polynomial order for smoothing
|
|
43
|
-
|
|
44
|
-
Returns:
|
|
45
|
-
Tuple of (squat_hold_start, concentric_start, takeoff_frame, landing_frame)
|
|
46
|
-
or None if phases cannot be detected
|
|
47
|
-
"""
|
|
48
|
-
if len(positions) < 20: # Minimum viable sequence length
|
|
49
|
-
return None
|
|
50
|
-
|
|
51
|
-
# Ensure window length is appropriate for derivative calculations
|
|
52
|
-
if window_length % 2 == 0:
|
|
53
|
-
window_length += 1
|
|
54
|
-
|
|
55
|
-
# Step 1: Detect squat hold start
|
|
56
|
-
squat_hold_start = _detect_squat_start(
|
|
57
|
-
positions,
|
|
58
|
-
fps,
|
|
59
|
-
velocity_threshold=velocity_threshold,
|
|
60
|
-
window_length=window_length,
|
|
61
|
-
polyorder=polyorder,
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
if squat_hold_start is None:
|
|
65
|
-
# If no squat hold detected, start from reasonable beginning
|
|
66
|
-
squat_hold_start = len(positions) // 4
|
|
67
|
-
|
|
68
|
-
# Step 2: Compute signed velocities for phase detection
|
|
69
|
-
if len(positions) < window_length:
|
|
70
|
-
# Fallback for short sequences
|
|
71
|
-
velocities = np.diff(positions, prepend=positions[0])
|
|
72
|
-
else:
|
|
73
|
-
velocities = savgol_filter(
|
|
74
|
-
positions, window_length, polyorder, deriv=1, delta=1.0, mode="interp"
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
# Step 3: Detect takeoff (this marks the start of concentric phase)
|
|
78
|
-
takeoff_frame = _detect_takeoff(
|
|
79
|
-
positions, velocities, fps, velocity_threshold=velocity_threshold
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
if takeoff_frame is None:
|
|
83
|
-
return None
|
|
84
|
-
|
|
85
|
-
# Concentric start begins when upward movement starts after squat hold
|
|
86
|
-
# This is just before takeoff when velocity becomes significantly negative
|
|
87
|
-
concentric_start = takeoff_frame
|
|
88
|
-
# Look backward from takeoff to find where concentric phase truly begins
|
|
89
|
-
for i in range(takeoff_frame - 1, max(squat_hold_start, 0), -1):
|
|
90
|
-
if velocities[i] <= -velocity_threshold * 0.5: # Start of upward movement
|
|
91
|
-
concentric_start = i
|
|
92
|
-
break
|
|
93
|
-
|
|
94
|
-
# Step 4: Detect landing
|
|
95
|
-
landing_frame = _detect_landing(
|
|
96
|
-
positions,
|
|
97
|
-
velocities,
|
|
98
|
-
fps,
|
|
99
|
-
velocity_threshold=velocity_threshold,
|
|
100
|
-
min_flight_frames=5,
|
|
101
|
-
landing_search_window_s=0.5,
|
|
102
|
-
window_length=window_length,
|
|
103
|
-
polyorder=polyorder,
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
if landing_frame is None:
|
|
107
|
-
# Fallback: estimate landing from peak height + typical flight time
|
|
108
|
-
# takeoff_frame is guaranteed to be an int here (we returned earlier if None)
|
|
109
|
-
peak_search_start = takeoff_frame
|
|
110
|
-
peak_search_end = min(len(positions), takeoff_frame + int(fps * 1.0))
|
|
111
|
-
if peak_search_end > peak_search_start:
|
|
112
|
-
flight_positions = positions[peak_search_start:peak_search_end]
|
|
113
|
-
peak_idx = int(np.argmin(flight_positions))
|
|
114
|
-
peak_frame = peak_search_start + peak_idx
|
|
115
|
-
# Estimate landing as 0.3s after peak (typical SJ flight time)
|
|
116
|
-
landing_frame = peak_frame + int(fps * 0.3)
|
|
117
|
-
else:
|
|
118
|
-
return None
|
|
119
|
-
|
|
120
|
-
# Validate the detected phases
|
|
121
|
-
if landing_frame <= takeoff_frame or takeoff_frame <= squat_hold_start:
|
|
122
|
-
# Invalid phase sequence
|
|
123
|
-
return None
|
|
124
|
-
|
|
125
|
-
# Return all detected phases
|
|
126
|
-
return (squat_hold_start, concentric_start, takeoff_frame, landing_frame)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
def _detect_squat_start(
|
|
130
|
-
positions: FloatArray,
|
|
131
|
-
fps: float,
|
|
132
|
-
velocity_threshold: float = 0.1,
|
|
133
|
-
min_hold_duration: float = 0.15,
|
|
134
|
-
min_search_frame: int = 20,
|
|
135
|
-
window_length: int = 5,
|
|
136
|
-
polyorder: int = 2,
|
|
137
|
-
) -> int | None:
|
|
138
|
-
"""Detect start of squat hold phase.
|
|
139
|
-
|
|
140
|
-
Squat hold is detected when the athlete maintains a relatively stable
|
|
141
|
-
position with low velocity for a minimum duration.
|
|
142
|
-
|
|
143
|
-
Args:
|
|
144
|
-
positions: 1D array of vertical positions (normalized coordinates)
|
|
145
|
-
fps: Video frames per second
|
|
146
|
-
velocity_threshold: Maximum velocity to consider stationary (m/s)
|
|
147
|
-
min_hold_duration: Minimum duration to maintain stable position (seconds)
|
|
148
|
-
min_search_frame: Minimum frame to start searching (avoid initial settling)
|
|
149
|
-
window_length: Window size for velocity smoothing
|
|
150
|
-
polyorder: Polynomial order for smoothing
|
|
151
|
-
|
|
152
|
-
Returns:
|
|
153
|
-
Frame index where squat hold begins, or None if not detected
|
|
154
|
-
"""
|
|
155
|
-
if len(positions) < min_search_frame + 10:
|
|
156
|
-
return None
|
|
157
|
-
|
|
158
|
-
# Compute velocity to detect stationary periods
|
|
159
|
-
if len(positions) < window_length:
|
|
160
|
-
velocities = np.abs(np.diff(positions, prepend=positions[0]))
|
|
161
|
-
else:
|
|
162
|
-
if window_length % 2 == 0:
|
|
163
|
-
window_length += 1
|
|
164
|
-
velocities = np.abs(
|
|
165
|
-
savgol_filter(positions, window_length, polyorder, deriv=1, delta=1.0, mode="interp")
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
# Search for stable period with low velocity
|
|
169
|
-
min_hold_frames = int(min_hold_duration * fps)
|
|
170
|
-
start_search = min_search_frame
|
|
171
|
-
end_search = len(positions) - min_hold_frames
|
|
172
|
-
|
|
173
|
-
# Look for consecutive frames with velocity below threshold
|
|
174
|
-
for i in range(start_search, end_search):
|
|
175
|
-
# Check if we have a stable period of sufficient duration
|
|
176
|
-
stable_period = velocities[i : i + min_hold_frames]
|
|
177
|
-
if np.all(stable_period <= velocity_threshold):
|
|
178
|
-
# Found a stable period - return start of this period
|
|
179
|
-
return i
|
|
180
|
-
|
|
181
|
-
return None
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
def _find_takeoff_threshold_crossing(
|
|
185
|
-
velocities: FloatArray,
|
|
186
|
-
search_start: int,
|
|
187
|
-
search_end: int,
|
|
188
|
-
velocity_threshold: float,
|
|
189
|
-
min_duration_frames: int,
|
|
190
|
-
) -> int | None:
|
|
191
|
-
"""Find first frame where velocity exceeds threshold for minimum duration."""
|
|
192
|
-
above_threshold = velocities[search_start:search_end] <= -velocity_threshold
|
|
193
|
-
if not np.any(above_threshold):
|
|
194
|
-
return None
|
|
195
|
-
|
|
196
|
-
threshold_indices = np.nonzero(above_threshold)[0]
|
|
197
|
-
for idx in threshold_indices:
|
|
198
|
-
if idx + min_duration_frames < len(above_threshold):
|
|
199
|
-
if np.all(above_threshold[idx : idx + min_duration_frames]):
|
|
200
|
-
return search_start + idx
|
|
201
|
-
return None
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
def _detect_takeoff(
|
|
205
|
-
positions: FloatArray,
|
|
206
|
-
velocities: FloatArray,
|
|
207
|
-
fps: float,
|
|
208
|
-
velocity_threshold: float = 0.1,
|
|
209
|
-
min_duration_frames: int = 5,
|
|
210
|
-
search_window_s: float = 0.3,
|
|
211
|
-
) -> int | None:
|
|
212
|
-
"""Detect takeoff frame in squat jump.
|
|
213
|
-
|
|
214
|
-
Takeoff occurs at peak upward velocity during the concentric phase.
|
|
215
|
-
For SJ, this is simply the maximum negative velocity after squat hold
|
|
216
|
-
before the upward movement.
|
|
217
|
-
|
|
218
|
-
Args:
|
|
219
|
-
positions: 1D array of vertical positions (normalized coordinates)
|
|
220
|
-
velocities: 1D array of SIGNED vertical velocities (negative = upward)
|
|
221
|
-
fps: Video frames per second
|
|
222
|
-
velocity_threshold: Minimum velocity threshold for takeoff (m/s)
|
|
223
|
-
min_duration_frames: Minimum frames above threshold to confirm takeoff
|
|
224
|
-
search_window_s: Time window to search for takeoff after squat hold (seconds)
|
|
225
|
-
|
|
226
|
-
Returns:
|
|
227
|
-
Frame index where takeoff occurs, or None if not detected
|
|
228
|
-
"""
|
|
229
|
-
if len(positions) == 0 or len(velocities) == 0:
|
|
230
|
-
return None
|
|
231
|
-
|
|
232
|
-
# Find squat start to determine where to begin search
|
|
233
|
-
squat_start = _detect_squat_start(positions, fps)
|
|
234
|
-
if squat_start is None:
|
|
235
|
-
# If no squat start detected, start from reasonable middle point
|
|
236
|
-
squat_start = len(positions) // 3
|
|
237
|
-
|
|
238
|
-
# Search for takeoff after squat start
|
|
239
|
-
search_start = squat_start
|
|
240
|
-
search_end = min(len(velocities), int(squat_start + search_window_s * fps))
|
|
241
|
-
|
|
242
|
-
if search_end <= search_start:
|
|
243
|
-
return None
|
|
244
|
-
|
|
245
|
-
# Focus on concentric phase where velocity becomes negative (upward)
|
|
246
|
-
concentric_velocities = velocities[search_start:search_end]
|
|
247
|
-
|
|
248
|
-
# Find the most negative velocity (peak upward velocity = takeoff)
|
|
249
|
-
takeoff_idx = int(np.argmin(concentric_velocities))
|
|
250
|
-
takeoff_frame = search_start + takeoff_idx
|
|
251
|
-
|
|
252
|
-
# Verify velocity exceeds threshold
|
|
253
|
-
if velocities[takeoff_frame] > -velocity_threshold:
|
|
254
|
-
# Velocity not high enough - actual takeoff may be later
|
|
255
|
-
# Look for frames where velocity exceeds threshold with duration filter
|
|
256
|
-
return _find_takeoff_threshold_crossing(
|
|
257
|
-
velocities, search_start, search_end, velocity_threshold, min_duration_frames
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
return takeoff_frame if velocities[takeoff_frame] <= -velocity_threshold else None
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
def _detect_impact_landing(
|
|
264
|
-
accelerations: FloatArray,
|
|
265
|
-
search_start: int,
|
|
266
|
-
search_end: int,
|
|
267
|
-
) -> int | None:
|
|
268
|
-
"""Detect landing by finding the maximum acceleration spike."""
|
|
269
|
-
landing_accelerations = accelerations[search_start:search_end]
|
|
270
|
-
if len(landing_accelerations) == 0:
|
|
271
|
-
return None
|
|
272
|
-
|
|
273
|
-
# Find maximum acceleration spike (impact)
|
|
274
|
-
landing_idx = int(np.argmax(landing_accelerations))
|
|
275
|
-
return search_start + landing_idx
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
def _refine_landing_by_velocity(
|
|
279
|
-
velocities: FloatArray,
|
|
280
|
-
landing_frame: int,
|
|
281
|
-
) -> int:
|
|
282
|
-
"""Refine landing frame by looking for positive (downward) velocity."""
|
|
283
|
-
if landing_frame < len(velocities) and velocities[landing_frame] < 0:
|
|
284
|
-
# Velocity still upward - landing might not be detected yet
|
|
285
|
-
# Look ahead for where velocity becomes positive
|
|
286
|
-
for i in range(landing_frame, min(landing_frame + 10, len(velocities))):
|
|
287
|
-
if velocities[i] >= 0:
|
|
288
|
-
return i
|
|
289
|
-
return landing_frame
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
def _detect_landing(
|
|
293
|
-
positions: FloatArray,
|
|
294
|
-
velocities: FloatArray,
|
|
295
|
-
fps: float,
|
|
296
|
-
velocity_threshold: float = 0.1,
|
|
297
|
-
min_flight_frames: int = 5,
|
|
298
|
-
landing_search_window_s: float = 0.5,
|
|
299
|
-
window_length: int = 5,
|
|
300
|
-
polyorder: int = 2,
|
|
301
|
-
) -> int | None:
|
|
302
|
-
"""Detect landing frame in squat jump.
|
|
303
|
-
|
|
304
|
-
Landing occurs after peak height when feet contact the ground.
|
|
305
|
-
Similar to CMJ - find position peak, then detect maximum deceleration
|
|
306
|
-
which corresponds to impact.
|
|
307
|
-
|
|
308
|
-
Args:
|
|
309
|
-
positions: 1D array of vertical positions (normalized coordinates)
|
|
310
|
-
velocities: 1D array of SIGNED vertical velocities (negative = up, positive = down)
|
|
311
|
-
fps: Video frames per second
|
|
312
|
-
velocity_threshold: Maximum velocity threshold for landing detection
|
|
313
|
-
min_flight_frames: Minimum frames in flight before landing
|
|
314
|
-
landing_search_window_s: Time window to search for landing after peak (seconds)
|
|
315
|
-
window_length: Window size for velocity smoothing
|
|
316
|
-
polyorder: Polynomial order for smoothing
|
|
317
|
-
|
|
318
|
-
Returns:
|
|
319
|
-
Frame index where landing occurs, or None if not detected
|
|
320
|
-
"""
|
|
321
|
-
if len(positions) == 0 or len(velocities) == 0:
|
|
322
|
-
return None
|
|
323
|
-
|
|
324
|
-
# Find takeoff first (needed to determine where to start peak search)
|
|
325
|
-
takeoff_frame = _detect_takeoff(
|
|
326
|
-
positions, velocities, fps, velocity_threshold, 5, landing_search_window_s
|
|
327
|
-
)
|
|
328
|
-
if takeoff_frame is None:
|
|
329
|
-
return None
|
|
330
|
-
|
|
331
|
-
# Find peak height after takeoff
|
|
332
|
-
search_start = takeoff_frame
|
|
333
|
-
search_duration = int(fps * 0.7) # Search next 0.7 seconds for peak
|
|
334
|
-
search_end = min(len(positions), search_start + search_duration)
|
|
335
|
-
|
|
336
|
-
if search_end <= search_start:
|
|
337
|
-
return None
|
|
338
|
-
|
|
339
|
-
# Find peak height (minimum position value in normalized coords = highest point)
|
|
340
|
-
flight_positions = positions[search_start:search_end]
|
|
341
|
-
peak_frame = search_start + int(np.argmin(flight_positions))
|
|
342
|
-
|
|
343
|
-
# After peak, look for landing using impact detection
|
|
344
|
-
landing_search_start = peak_frame + min_flight_frames
|
|
345
|
-
landing_search_end = min(
|
|
346
|
-
len(positions),
|
|
347
|
-
landing_search_start + int(landing_search_window_s * fps),
|
|
348
|
-
)
|
|
349
|
-
|
|
350
|
-
if landing_search_end <= landing_search_start:
|
|
351
|
-
return None
|
|
352
|
-
|
|
353
|
-
# Compute accelerations for impact detection
|
|
354
|
-
if len(positions) >= 5:
|
|
355
|
-
landing_window = window_length
|
|
356
|
-
if landing_window % 2 == 0:
|
|
357
|
-
landing_window += 1
|
|
358
|
-
# Use polyorder for smoothing (must be at least 2 for deriv=2)
|
|
359
|
-
eff_polyorder = max(2, polyorder)
|
|
360
|
-
accelerations = np.abs(
|
|
361
|
-
savgol_filter(
|
|
362
|
-
positions, landing_window, eff_polyorder, deriv=2, delta=1.0, mode="interp"
|
|
363
|
-
)
|
|
364
|
-
)
|
|
365
|
-
else:
|
|
366
|
-
# Fallback for short sequences
|
|
367
|
-
velocities_abs = np.abs(velocities)
|
|
368
|
-
accelerations = np.abs(np.diff(velocities_abs, prepend=velocities_abs[0]))
|
|
369
|
-
|
|
370
|
-
# Find impact: maximum positive acceleration (deceleration spike)
|
|
371
|
-
landing_frame = _detect_impact_landing(accelerations, landing_search_start, landing_search_end)
|
|
372
|
-
|
|
373
|
-
if landing_frame is None:
|
|
374
|
-
return None
|
|
375
|
-
|
|
376
|
-
# Additional verification: velocity should be positive (downward) at landing
|
|
377
|
-
return _refine_landing_by_velocity(velocities, landing_frame)
|