kinemotion 0.10.3__py3-none-any.whl → 0.10.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/api.py +330 -154
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.4.dist-info}/METADATA +1 -1
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.4.dist-info}/RECORD +6 -6
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.4.dist-info}/WHEEL +0 -0
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.4.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.10.3.dist-info → kinemotion-0.10.4.dist-info}/licenses/LICENSE +0 -0
kinemotion/api.py
CHANGED
|
@@ -9,18 +9,315 @@ from pathlib import Path
|
|
|
9
9
|
import numpy as np
|
|
10
10
|
|
|
11
11
|
from .core.auto_tuning import (
|
|
12
|
+
AnalysisParameters,
|
|
12
13
|
QualityPreset,
|
|
14
|
+
VideoCharacteristics,
|
|
13
15
|
analyze_video_sample,
|
|
14
16
|
auto_tune_parameters,
|
|
15
17
|
)
|
|
16
18
|
from .core.pose import PoseTracker
|
|
17
19
|
from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
18
20
|
from .core.video_io import VideoProcessor
|
|
19
|
-
from .dropjump.analysis import
|
|
21
|
+
from .dropjump.analysis import (
|
|
22
|
+
ContactState,
|
|
23
|
+
compute_average_foot_position,
|
|
24
|
+
detect_ground_contact,
|
|
25
|
+
)
|
|
20
26
|
from .dropjump.debug_overlay import DebugOverlayRenderer
|
|
21
27
|
from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
|
|
22
28
|
|
|
23
29
|
|
|
30
|
+
def _parse_quality_preset(quality: str) -> QualityPreset:
|
|
31
|
+
"""Parse and validate quality preset string.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
quality: Quality preset string ('fast', 'balanced', or 'accurate')
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
QualityPreset enum value
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
ValueError: If quality preset is invalid
|
|
41
|
+
"""
|
|
42
|
+
try:
|
|
43
|
+
return QualityPreset(quality.lower())
|
|
44
|
+
except ValueError as e:
|
|
45
|
+
raise ValueError(
|
|
46
|
+
f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
|
|
47
|
+
) from e
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _determine_confidence_levels(
|
|
51
|
+
quality_preset: QualityPreset,
|
|
52
|
+
detection_confidence: float | None,
|
|
53
|
+
tracking_confidence: float | None,
|
|
54
|
+
) -> tuple[float, float]:
|
|
55
|
+
"""Determine detection and tracking confidence levels.
|
|
56
|
+
|
|
57
|
+
Confidence levels are set based on quality preset and can be overridden
|
|
58
|
+
by expert parameters.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
quality_preset: Quality preset enum
|
|
62
|
+
detection_confidence: Optional expert override for detection confidence
|
|
63
|
+
tracking_confidence: Optional expert override for tracking confidence
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Tuple of (detection_confidence, tracking_confidence)
|
|
67
|
+
"""
|
|
68
|
+
# Set initial confidence from quality preset
|
|
69
|
+
initial_detection_conf = 0.5
|
|
70
|
+
initial_tracking_conf = 0.5
|
|
71
|
+
|
|
72
|
+
if quality_preset == QualityPreset.FAST:
|
|
73
|
+
initial_detection_conf = 0.3
|
|
74
|
+
initial_tracking_conf = 0.3
|
|
75
|
+
elif quality_preset == QualityPreset.ACCURATE:
|
|
76
|
+
initial_detection_conf = 0.6
|
|
77
|
+
initial_tracking_conf = 0.6
|
|
78
|
+
|
|
79
|
+
# Override with expert values if provided
|
|
80
|
+
if detection_confidence is not None:
|
|
81
|
+
initial_detection_conf = detection_confidence
|
|
82
|
+
if tracking_confidence is not None:
|
|
83
|
+
initial_tracking_conf = tracking_confidence
|
|
84
|
+
|
|
85
|
+
return initial_detection_conf, initial_tracking_conf
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _apply_expert_overrides(
|
|
89
|
+
params: AnalysisParameters,
|
|
90
|
+
smoothing_window: int | None,
|
|
91
|
+
velocity_threshold: float | None,
|
|
92
|
+
min_contact_frames: int | None,
|
|
93
|
+
visibility_threshold: float | None,
|
|
94
|
+
) -> AnalysisParameters:
|
|
95
|
+
"""Apply expert parameter overrides to auto-tuned parameters.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
params: Auto-tuned parameters object
|
|
99
|
+
smoothing_window: Optional override for smoothing window
|
|
100
|
+
velocity_threshold: Optional override for velocity threshold
|
|
101
|
+
min_contact_frames: Optional override for minimum contact frames
|
|
102
|
+
visibility_threshold: Optional override for visibility threshold
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Modified params object (mutated in place)
|
|
106
|
+
"""
|
|
107
|
+
if smoothing_window is not None:
|
|
108
|
+
params.smoothing_window = smoothing_window
|
|
109
|
+
if velocity_threshold is not None:
|
|
110
|
+
params.velocity_threshold = velocity_threshold
|
|
111
|
+
if min_contact_frames is not None:
|
|
112
|
+
params.min_contact_frames = min_contact_frames
|
|
113
|
+
if visibility_threshold is not None:
|
|
114
|
+
params.visibility_threshold = visibility_threshold
|
|
115
|
+
return params
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _print_verbose_parameters(
|
|
119
|
+
video: VideoProcessor,
|
|
120
|
+
characteristics: VideoCharacteristics,
|
|
121
|
+
quality_preset: QualityPreset,
|
|
122
|
+
params: AnalysisParameters,
|
|
123
|
+
) -> None:
|
|
124
|
+
"""Print auto-tuned parameters in verbose mode.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
video: Video processor with fps and dimensions
|
|
128
|
+
characteristics: Video analysis characteristics
|
|
129
|
+
quality_preset: Selected quality preset
|
|
130
|
+
params: Auto-tuned parameters
|
|
131
|
+
"""
|
|
132
|
+
print("\n" + "=" * 60)
|
|
133
|
+
print("AUTO-TUNED PARAMETERS")
|
|
134
|
+
print("=" * 60)
|
|
135
|
+
print(f"Video FPS: {video.fps:.2f}")
|
|
136
|
+
print(
|
|
137
|
+
f"Tracking quality: {characteristics.tracking_quality} "
|
|
138
|
+
f"(avg visibility: {characteristics.avg_visibility:.2f})"
|
|
139
|
+
)
|
|
140
|
+
print(f"Quality preset: {quality_preset.value}")
|
|
141
|
+
print("\nSelected parameters:")
|
|
142
|
+
print(f" smoothing_window: {params.smoothing_window}")
|
|
143
|
+
print(f" polyorder: {params.polyorder}")
|
|
144
|
+
print(f" velocity_threshold: {params.velocity_threshold:.4f}")
|
|
145
|
+
print(f" min_contact_frames: {params.min_contact_frames}")
|
|
146
|
+
print(f" visibility_threshold: {params.visibility_threshold}")
|
|
147
|
+
print(f" detection_confidence: {params.detection_confidence}")
|
|
148
|
+
print(f" tracking_confidence: {params.tracking_confidence}")
|
|
149
|
+
print(f" outlier_rejection: {params.outlier_rejection}")
|
|
150
|
+
print(f" bilateral_filter: {params.bilateral_filter}")
|
|
151
|
+
print(f" use_curvature: {params.use_curvature}")
|
|
152
|
+
print("=" * 60 + "\n")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _process_all_frames(
|
|
156
|
+
video: VideoProcessor, tracker: PoseTracker, verbose: bool
|
|
157
|
+
) -> tuple[list, list]:
|
|
158
|
+
"""Process all frames from video and extract pose landmarks.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
video: Video processor to read frames from
|
|
162
|
+
tracker: Pose tracker for landmark detection
|
|
163
|
+
verbose: Print progress messages
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Tuple of (frames, landmarks_sequence)
|
|
167
|
+
|
|
168
|
+
Raises:
|
|
169
|
+
ValueError: If no frames could be processed
|
|
170
|
+
"""
|
|
171
|
+
if verbose:
|
|
172
|
+
print("Tracking pose landmarks...")
|
|
173
|
+
|
|
174
|
+
landmarks_sequence = []
|
|
175
|
+
frames = []
|
|
176
|
+
|
|
177
|
+
while True:
|
|
178
|
+
frame = video.read_frame()
|
|
179
|
+
if frame is None:
|
|
180
|
+
break
|
|
181
|
+
|
|
182
|
+
frames.append(frame)
|
|
183
|
+
landmarks = tracker.process_frame(frame)
|
|
184
|
+
landmarks_sequence.append(landmarks)
|
|
185
|
+
|
|
186
|
+
tracker.close()
|
|
187
|
+
|
|
188
|
+
if not landmarks_sequence:
|
|
189
|
+
raise ValueError("No frames could be processed from video")
|
|
190
|
+
|
|
191
|
+
return frames, landmarks_sequence
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _apply_smoothing(
|
|
195
|
+
landmarks_sequence: list, params: AnalysisParameters, verbose: bool
|
|
196
|
+
) -> list:
|
|
197
|
+
"""Apply smoothing to landmark sequence with auto-tuned parameters.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
landmarks_sequence: Sequence of landmarks from all frames
|
|
201
|
+
params: Auto-tuned parameters containing smoothing settings
|
|
202
|
+
verbose: Print progress messages
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Smoothed landmarks sequence
|
|
206
|
+
"""
|
|
207
|
+
if params.outlier_rejection or params.bilateral_filter:
|
|
208
|
+
if verbose:
|
|
209
|
+
if params.outlier_rejection:
|
|
210
|
+
print("Smoothing landmarks with outlier rejection...")
|
|
211
|
+
if params.bilateral_filter:
|
|
212
|
+
print("Using bilateral temporal filter...")
|
|
213
|
+
return smooth_landmarks_advanced(
|
|
214
|
+
landmarks_sequence,
|
|
215
|
+
window_length=params.smoothing_window,
|
|
216
|
+
polyorder=params.polyorder,
|
|
217
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
218
|
+
use_bilateral=params.bilateral_filter,
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
if verbose:
|
|
222
|
+
print("Smoothing landmarks...")
|
|
223
|
+
return smooth_landmarks(
|
|
224
|
+
landmarks_sequence,
|
|
225
|
+
window_length=params.smoothing_window,
|
|
226
|
+
polyorder=params.polyorder,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _extract_vertical_positions(
|
|
231
|
+
smoothed_landmarks: list,
|
|
232
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
233
|
+
"""Extract vertical foot positions and visibilities from smoothed landmarks.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
smoothed_landmarks: Smoothed landmark sequence
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
Tuple of (vertical_positions, visibilities) as numpy arrays
|
|
240
|
+
"""
|
|
241
|
+
position_list: list[float] = []
|
|
242
|
+
visibilities_list: list[float] = []
|
|
243
|
+
|
|
244
|
+
for frame_landmarks in smoothed_landmarks:
|
|
245
|
+
if frame_landmarks:
|
|
246
|
+
_, foot_y = compute_average_foot_position(frame_landmarks)
|
|
247
|
+
position_list.append(foot_y)
|
|
248
|
+
|
|
249
|
+
# Average visibility of foot landmarks
|
|
250
|
+
foot_vis = []
|
|
251
|
+
for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
|
|
252
|
+
if key in frame_landmarks:
|
|
253
|
+
foot_vis.append(frame_landmarks[key][2])
|
|
254
|
+
visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
|
|
255
|
+
else:
|
|
256
|
+
position_list.append(position_list[-1] if position_list else 0.5)
|
|
257
|
+
visibilities_list.append(0.0)
|
|
258
|
+
|
|
259
|
+
return np.array(position_list), np.array(visibilities_list)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def _generate_outputs(
|
|
263
|
+
metrics: DropJumpMetrics,
|
|
264
|
+
json_output: str | None,
|
|
265
|
+
output_video: str | None,
|
|
266
|
+
frames: list,
|
|
267
|
+
smoothed_landmarks: list,
|
|
268
|
+
contact_states: list[ContactState],
|
|
269
|
+
video: VideoProcessor,
|
|
270
|
+
verbose: bool,
|
|
271
|
+
) -> None:
|
|
272
|
+
"""Generate JSON and debug video outputs if requested.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
metrics: Calculated drop jump metrics
|
|
276
|
+
json_output: Optional path for JSON output
|
|
277
|
+
output_video: Optional path for debug video
|
|
278
|
+
frames: List of video frames
|
|
279
|
+
smoothed_landmarks: Smoothed landmark sequence
|
|
280
|
+
contact_states: Ground contact state for each frame
|
|
281
|
+
video: Video processor with dimensions and fps
|
|
282
|
+
verbose: Print progress messages
|
|
283
|
+
"""
|
|
284
|
+
# Save JSON if requested
|
|
285
|
+
if json_output:
|
|
286
|
+
import json
|
|
287
|
+
|
|
288
|
+
output_path = Path(json_output)
|
|
289
|
+
output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
|
|
290
|
+
if verbose:
|
|
291
|
+
print(f"Metrics written to: {json_output}")
|
|
292
|
+
|
|
293
|
+
# Generate debug video if requested
|
|
294
|
+
if output_video:
|
|
295
|
+
if verbose:
|
|
296
|
+
print(f"Generating debug video: {output_video}")
|
|
297
|
+
|
|
298
|
+
with DebugOverlayRenderer(
|
|
299
|
+
output_video,
|
|
300
|
+
video.width,
|
|
301
|
+
video.height,
|
|
302
|
+
video.display_width,
|
|
303
|
+
video.display_height,
|
|
304
|
+
video.fps,
|
|
305
|
+
) as renderer:
|
|
306
|
+
for i, frame in enumerate(frames):
|
|
307
|
+
annotated = renderer.render_frame(
|
|
308
|
+
frame,
|
|
309
|
+
smoothed_landmarks[i],
|
|
310
|
+
contact_states[i],
|
|
311
|
+
i,
|
|
312
|
+
metrics,
|
|
313
|
+
use_com=False,
|
|
314
|
+
)
|
|
315
|
+
renderer.write_frame(annotated)
|
|
316
|
+
|
|
317
|
+
if verbose:
|
|
318
|
+
print(f"Debug video saved: {output_video}")
|
|
319
|
+
|
|
320
|
+
|
|
24
321
|
@dataclass
|
|
25
322
|
class VideoResult:
|
|
26
323
|
"""Result of processing a single video."""
|
|
@@ -94,12 +391,7 @@ def process_video(
|
|
|
94
391
|
raise FileNotFoundError(f"Video file not found: {video_path}")
|
|
95
392
|
|
|
96
393
|
# Convert quality string to enum
|
|
97
|
-
|
|
98
|
-
quality_preset = QualityPreset(quality.lower())
|
|
99
|
-
except ValueError as e:
|
|
100
|
-
raise ValueError(
|
|
101
|
-
f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
|
|
102
|
-
) from e
|
|
394
|
+
quality_preset = _parse_quality_preset(quality)
|
|
103
395
|
|
|
104
396
|
# Initialize video processor
|
|
105
397
|
with VideoProcessor(video_path) as video:
|
|
@@ -109,138 +401,46 @@ def process_video(
|
|
|
109
401
|
f"{video.frame_count} frames"
|
|
110
402
|
)
|
|
111
403
|
|
|
112
|
-
# Determine
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
if quality_preset == QualityPreset.FAST:
|
|
117
|
-
initial_detection_conf = 0.3
|
|
118
|
-
initial_tracking_conf = 0.3
|
|
119
|
-
elif quality_preset == QualityPreset.ACCURATE:
|
|
120
|
-
initial_detection_conf = 0.6
|
|
121
|
-
initial_tracking_conf = 0.6
|
|
122
|
-
|
|
123
|
-
# Override with expert values if provided
|
|
124
|
-
if detection_confidence is not None:
|
|
125
|
-
initial_detection_conf = detection_confidence
|
|
126
|
-
if tracking_confidence is not None:
|
|
127
|
-
initial_tracking_conf = tracking_confidence
|
|
404
|
+
# Determine detection/tracking confidence levels
|
|
405
|
+
detection_conf, tracking_conf = _determine_confidence_levels(
|
|
406
|
+
quality_preset, detection_confidence, tracking_confidence
|
|
407
|
+
)
|
|
128
408
|
|
|
129
|
-
#
|
|
409
|
+
# Process all frames with pose tracking
|
|
130
410
|
tracker = PoseTracker(
|
|
131
|
-
min_detection_confidence=
|
|
132
|
-
min_tracking_confidence=
|
|
411
|
+
min_detection_confidence=detection_conf,
|
|
412
|
+
min_tracking_confidence=tracking_conf,
|
|
133
413
|
)
|
|
134
|
-
|
|
135
|
-
# Process all frames
|
|
136
|
-
if verbose:
|
|
137
|
-
print("Tracking pose landmarks...")
|
|
138
|
-
|
|
139
|
-
landmarks_sequence = []
|
|
140
|
-
frames = []
|
|
141
|
-
|
|
142
|
-
while True:
|
|
143
|
-
frame = video.read_frame()
|
|
144
|
-
if frame is None:
|
|
145
|
-
break
|
|
146
|
-
|
|
147
|
-
frames.append(frame)
|
|
148
|
-
landmarks = tracker.process_frame(frame)
|
|
149
|
-
landmarks_sequence.append(landmarks)
|
|
150
|
-
|
|
151
|
-
tracker.close()
|
|
152
|
-
|
|
153
|
-
if not landmarks_sequence:
|
|
154
|
-
raise ValueError("No frames could be processed from video")
|
|
414
|
+
frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
|
|
155
415
|
|
|
156
416
|
# Analyze video characteristics and auto-tune parameters
|
|
157
417
|
characteristics = analyze_video_sample(
|
|
158
418
|
landmarks_sequence, video.fps, video.frame_count
|
|
159
419
|
)
|
|
160
|
-
|
|
161
420
|
params = auto_tune_parameters(characteristics, quality_preset)
|
|
162
421
|
|
|
163
422
|
# Apply expert overrides if provided
|
|
164
|
-
|
|
165
|
-
params
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
params.visibility_threshold = visibility_threshold
|
|
423
|
+
params = _apply_expert_overrides(
|
|
424
|
+
params,
|
|
425
|
+
smoothing_window,
|
|
426
|
+
velocity_threshold,
|
|
427
|
+
min_contact_frames,
|
|
428
|
+
visibility_threshold,
|
|
429
|
+
)
|
|
172
430
|
|
|
173
431
|
# Show selected parameters if verbose
|
|
174
432
|
if verbose:
|
|
175
|
-
|
|
176
|
-
print("AUTO-TUNED PARAMETERS")
|
|
177
|
-
print("=" * 60)
|
|
178
|
-
print(f"Video FPS: {video.fps:.2f}")
|
|
179
|
-
print(
|
|
180
|
-
f"Tracking quality: {characteristics.tracking_quality} "
|
|
181
|
-
f"(avg visibility: {characteristics.avg_visibility:.2f})"
|
|
182
|
-
)
|
|
183
|
-
print(f"Quality preset: {quality_preset.value}")
|
|
184
|
-
print("\nSelected parameters:")
|
|
185
|
-
print(f" smoothing_window: {params.smoothing_window}")
|
|
186
|
-
print(f" polyorder: {params.polyorder}")
|
|
187
|
-
print(f" velocity_threshold: {params.velocity_threshold:.4f}")
|
|
188
|
-
print(f" min_contact_frames: {params.min_contact_frames}")
|
|
189
|
-
print(f" visibility_threshold: {params.visibility_threshold}")
|
|
190
|
-
print(f" detection_confidence: {params.detection_confidence}")
|
|
191
|
-
print(f" tracking_confidence: {params.tracking_confidence}")
|
|
192
|
-
print(f" outlier_rejection: {params.outlier_rejection}")
|
|
193
|
-
print(f" bilateral_filter: {params.bilateral_filter}")
|
|
194
|
-
print(f" use_curvature: {params.use_curvature}")
|
|
195
|
-
print("=" * 60 + "\n")
|
|
433
|
+
_print_verbose_parameters(video, characteristics, quality_preset, params)
|
|
196
434
|
|
|
197
435
|
# Apply smoothing with auto-tuned parameters
|
|
198
|
-
|
|
199
|
-
if verbose:
|
|
200
|
-
if params.outlier_rejection:
|
|
201
|
-
print("Smoothing landmarks with outlier rejection...")
|
|
202
|
-
if params.bilateral_filter:
|
|
203
|
-
print("Using bilateral temporal filter...")
|
|
204
|
-
smoothed_landmarks = smooth_landmarks_advanced(
|
|
205
|
-
landmarks_sequence,
|
|
206
|
-
window_length=params.smoothing_window,
|
|
207
|
-
polyorder=params.polyorder,
|
|
208
|
-
use_outlier_rejection=params.outlier_rejection,
|
|
209
|
-
use_bilateral=params.bilateral_filter,
|
|
210
|
-
)
|
|
211
|
-
else:
|
|
212
|
-
if verbose:
|
|
213
|
-
print("Smoothing landmarks...")
|
|
214
|
-
smoothed_landmarks = smooth_landmarks(
|
|
215
|
-
landmarks_sequence,
|
|
216
|
-
window_length=params.smoothing_window,
|
|
217
|
-
polyorder=params.polyorder,
|
|
218
|
-
)
|
|
436
|
+
smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
|
|
219
437
|
|
|
220
438
|
# Extract vertical positions from feet
|
|
221
439
|
if verbose:
|
|
222
440
|
print("Extracting foot positions...")
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
for frame_landmarks in smoothed_landmarks:
|
|
228
|
-
if frame_landmarks:
|
|
229
|
-
_, foot_y = compute_average_foot_position(frame_landmarks)
|
|
230
|
-
position_list.append(foot_y)
|
|
231
|
-
|
|
232
|
-
# Average visibility of foot landmarks
|
|
233
|
-
foot_vis = []
|
|
234
|
-
for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
|
|
235
|
-
if key in frame_landmarks:
|
|
236
|
-
foot_vis.append(frame_landmarks[key][2])
|
|
237
|
-
visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
|
|
238
|
-
else:
|
|
239
|
-
position_list.append(position_list[-1] if position_list else 0.5)
|
|
240
|
-
visibilities_list.append(0.0)
|
|
241
|
-
|
|
242
|
-
vertical_positions: np.ndarray = np.array(position_list)
|
|
243
|
-
visibilities: np.ndarray = np.array(visibilities_list)
|
|
441
|
+
vertical_positions, visibilities = _extract_vertical_positions(
|
|
442
|
+
smoothed_landmarks
|
|
443
|
+
)
|
|
244
444
|
|
|
245
445
|
# Detect ground contact
|
|
246
446
|
contact_states = detect_ground_contact(
|
|
@@ -273,41 +473,17 @@ def process_video(
|
|
|
273
473
|
kinematic_correction_factor=1.0,
|
|
274
474
|
)
|
|
275
475
|
|
|
276
|
-
#
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
if verbose:
|
|
288
|
-
print(f"Generating debug video: {output_video}")
|
|
289
|
-
|
|
290
|
-
with DebugOverlayRenderer(
|
|
291
|
-
output_video,
|
|
292
|
-
video.width,
|
|
293
|
-
video.height,
|
|
294
|
-
video.display_width,
|
|
295
|
-
video.display_height,
|
|
296
|
-
video.fps,
|
|
297
|
-
) as renderer:
|
|
298
|
-
for i, frame in enumerate(frames):
|
|
299
|
-
annotated = renderer.render_frame(
|
|
300
|
-
frame,
|
|
301
|
-
smoothed_landmarks[i],
|
|
302
|
-
contact_states[i],
|
|
303
|
-
i,
|
|
304
|
-
metrics,
|
|
305
|
-
use_com=False,
|
|
306
|
-
)
|
|
307
|
-
renderer.write_frame(annotated)
|
|
308
|
-
|
|
309
|
-
if verbose:
|
|
310
|
-
print(f"Debug video saved: {output_video}")
|
|
476
|
+
# Generate outputs (JSON and debug video)
|
|
477
|
+
_generate_outputs(
|
|
478
|
+
metrics,
|
|
479
|
+
json_output,
|
|
480
|
+
output_video,
|
|
481
|
+
frames,
|
|
482
|
+
smoothed_landmarks,
|
|
483
|
+
contact_states,
|
|
484
|
+
video,
|
|
485
|
+
verbose,
|
|
486
|
+
)
|
|
311
487
|
|
|
312
488
|
if verbose:
|
|
313
489
|
print("Analysis complete!")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.10.
|
|
3
|
+
Version: 0.10.4
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
kinemotion/__init__.py,sha256=Z85xg29NA-r4IjrSbAkJpMFigyxACFGUb-37AiMp6YY,350
|
|
2
|
-
kinemotion/api.py,sha256=
|
|
2
|
+
kinemotion/api.py,sha256=2MsiHsmmxfpvhHIbDXcZpvsCLROKi4MV8LQpKu2r_a8,20078
|
|
3
3
|
kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
|
|
4
4
|
kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
|
|
5
5
|
kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQIdw,10508
|
|
@@ -13,8 +13,8 @@ kinemotion/dropjump/cli.py,sha256=C6v6E3g1W-KNFc0xUzSjg4wKve1WsPxKvUBJV7LiMNI,26
|
|
|
13
13
|
kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
|
|
14
14
|
kinemotion/dropjump/kinematics.py,sha256=4G_7_KWnXiT09G9BduQNIgFtxvwjo2RyH1sP9SU3hSE,17949
|
|
15
15
|
kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
-
kinemotion-0.10.
|
|
17
|
-
kinemotion-0.10.
|
|
18
|
-
kinemotion-0.10.
|
|
19
|
-
kinemotion-0.10.
|
|
20
|
-
kinemotion-0.10.
|
|
16
|
+
kinemotion-0.10.4.dist-info/METADATA,sha256=ExH8OJR5HuQYGhZc7S7lKVJiOtQ03-gWdkkz-zf5Gh4,20333
|
|
17
|
+
kinemotion-0.10.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
18
|
+
kinemotion-0.10.4.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
|
|
19
|
+
kinemotion-0.10.4.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
|
|
20
|
+
kinemotion-0.10.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|