kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +31 -6
- kinemotion/api.py +39 -598
- kinemotion/cli.py +2 -0
- kinemotion/cmj/__init__.py +5 -0
- kinemotion/cmj/analysis.py +621 -0
- kinemotion/cmj/api.py +563 -0
- kinemotion/cmj/cli.py +324 -0
- kinemotion/cmj/debug_overlay.py +457 -0
- kinemotion/cmj/joint_angles.py +307 -0
- kinemotion/cmj/kinematics.py +360 -0
- kinemotion/cmj/metrics_validator.py +767 -0
- kinemotion/cmj/validation_bounds.py +341 -0
- kinemotion/core/__init__.py +28 -0
- kinemotion/core/auto_tuning.py +71 -37
- kinemotion/core/cli_utils.py +60 -0
- kinemotion/core/debug_overlay_utils.py +385 -0
- kinemotion/core/determinism.py +83 -0
- kinemotion/core/experimental.py +103 -0
- kinemotion/core/filtering.py +9 -6
- kinemotion/core/formatting.py +75 -0
- kinemotion/core/metadata.py +231 -0
- kinemotion/core/model_downloader.py +172 -0
- kinemotion/core/pipeline_utils.py +433 -0
- kinemotion/core/pose.py +298 -141
- kinemotion/core/pose_landmarks.py +67 -0
- kinemotion/core/quality.py +393 -0
- kinemotion/core/smoothing.py +250 -154
- kinemotion/core/timing.py +247 -0
- kinemotion/core/types.py +42 -0
- kinemotion/core/validation.py +201 -0
- kinemotion/core/video_io.py +135 -50
- kinemotion/dropjump/__init__.py +1 -1
- kinemotion/dropjump/analysis.py +367 -182
- kinemotion/dropjump/api.py +665 -0
- kinemotion/dropjump/cli.py +156 -466
- kinemotion/dropjump/debug_overlay.py +136 -206
- kinemotion/dropjump/kinematics.py +232 -255
- kinemotion/dropjump/metrics_validator.py +240 -0
- kinemotion/dropjump/validation_bounds.py +157 -0
- kinemotion/models/__init__.py +0 -0
- kinemotion/models/pose_landmarker_lite.task +0 -0
- kinemotion-0.67.0.dist-info/METADATA +726 -0
- kinemotion-0.67.0.dist-info/RECORD +47 -0
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
- kinemotion-0.10.6.dist-info/METADATA +0 -561
- kinemotion-0.10.6.dist-info/RECORD +0 -20
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
kinemotion/cmj/api.py
ADDED
|
@@ -0,0 +1,563 @@
|
|
|
1
|
+
"""Public API for CMJ (Counter Movement Jump) video analysis."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
from numpy.typing import NDArray
|
|
11
|
+
|
|
12
|
+
from ..core.auto_tuning import (
|
|
13
|
+
AnalysisParameters,
|
|
14
|
+
QualityPreset,
|
|
15
|
+
analyze_video_sample,
|
|
16
|
+
auto_tune_parameters,
|
|
17
|
+
)
|
|
18
|
+
from ..core.filtering import reject_outliers
|
|
19
|
+
from ..core.metadata import (
|
|
20
|
+
AlgorithmConfig,
|
|
21
|
+
DetectionConfig,
|
|
22
|
+
ProcessingInfo,
|
|
23
|
+
ResultMetadata,
|
|
24
|
+
SmoothingConfig,
|
|
25
|
+
VideoInfo,
|
|
26
|
+
create_timestamp,
|
|
27
|
+
get_kinemotion_version,
|
|
28
|
+
)
|
|
29
|
+
from ..core.pipeline_utils import (
|
|
30
|
+
apply_expert_overrides,
|
|
31
|
+
apply_smoothing,
|
|
32
|
+
convert_timer_to_stage_names,
|
|
33
|
+
determine_confidence_levels,
|
|
34
|
+
extract_vertical_positions,
|
|
35
|
+
parse_quality_preset,
|
|
36
|
+
print_verbose_parameters,
|
|
37
|
+
process_all_frames,
|
|
38
|
+
process_videos_bulk_generic,
|
|
39
|
+
)
|
|
40
|
+
from ..core.pose import PoseTracker
|
|
41
|
+
from ..core.quality import QualityAssessment, assess_jump_quality
|
|
42
|
+
from ..core.timing import PerformanceTimer, Timer
|
|
43
|
+
from ..core.validation import ValidationResult
|
|
44
|
+
from ..core.video_io import VideoProcessor
|
|
45
|
+
from .analysis import compute_signed_velocity, detect_cmj_phases
|
|
46
|
+
from .debug_overlay import CMJDebugOverlayRenderer
|
|
47
|
+
from .kinematics import CMJMetrics, calculate_cmj_metrics
|
|
48
|
+
from .metrics_validator import CMJMetricsValidator
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@dataclass
|
|
52
|
+
class AnalysisOverrides:
|
|
53
|
+
"""Optional overrides for analysis parameters.
|
|
54
|
+
|
|
55
|
+
Allows fine-tuning of specific analysis parameters beyond quality presets.
|
|
56
|
+
If None, values will be determined by the quality preset.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
smoothing_window: int | None = None
|
|
60
|
+
velocity_threshold: float | None = None
|
|
61
|
+
min_contact_frames: int | None = None
|
|
62
|
+
visibility_threshold: float | None = None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _generate_debug_video(
|
|
66
|
+
output_video: str,
|
|
67
|
+
frames: list[NDArray[np.uint8]],
|
|
68
|
+
frame_indices: list[int],
|
|
69
|
+
smoothed_landmarks: list,
|
|
70
|
+
metrics: CMJMetrics,
|
|
71
|
+
video_fps: float,
|
|
72
|
+
timer: Timer,
|
|
73
|
+
verbose: bool,
|
|
74
|
+
) -> None:
|
|
75
|
+
"""Generate debug video with CMJ analysis overlay."""
|
|
76
|
+
if verbose:
|
|
77
|
+
print(f"Generating debug video: {output_video}")
|
|
78
|
+
|
|
79
|
+
debug_h, debug_w = frames[0].shape[:2]
|
|
80
|
+
step = max(1, int(video_fps / 30.0))
|
|
81
|
+
debug_fps = video_fps / step
|
|
82
|
+
|
|
83
|
+
with timer.measure("debug_video_generation"):
|
|
84
|
+
with CMJDebugOverlayRenderer(
|
|
85
|
+
output_video,
|
|
86
|
+
debug_w,
|
|
87
|
+
debug_h,
|
|
88
|
+
debug_w,
|
|
89
|
+
debug_h,
|
|
90
|
+
debug_fps,
|
|
91
|
+
timer=timer,
|
|
92
|
+
) as renderer:
|
|
93
|
+
for frame, idx in zip(frames, frame_indices, strict=True):
|
|
94
|
+
annotated = renderer.render_frame(frame, smoothed_landmarks[idx], idx, metrics)
|
|
95
|
+
renderer.write_frame(annotated)
|
|
96
|
+
|
|
97
|
+
if verbose:
|
|
98
|
+
print(f"Debug video saved: {output_video}")
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _save_metrics_to_json(
|
|
102
|
+
metrics: CMJMetrics, json_output: str, timer: Timer, verbose: bool
|
|
103
|
+
) -> None:
|
|
104
|
+
"""Save metrics to JSON file."""
|
|
105
|
+
with timer.measure("json_serialization"):
|
|
106
|
+
output_path = Path(json_output)
|
|
107
|
+
metrics_dict = metrics.to_dict()
|
|
108
|
+
json_str = json.dumps(metrics_dict, indent=2)
|
|
109
|
+
output_path.write_text(json_str)
|
|
110
|
+
|
|
111
|
+
if verbose:
|
|
112
|
+
print(f"Metrics written to: {json_output}")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _print_timing_summary(start_time: float, timer: Timer, metrics: CMJMetrics) -> None:
|
|
116
|
+
"""Print verbose timing summary and metrics."""
|
|
117
|
+
total_time = time.time() - start_time
|
|
118
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
119
|
+
|
|
120
|
+
print("\n=== Timing Summary ===")
|
|
121
|
+
for stage, duration in stage_times.items():
|
|
122
|
+
percentage = (duration / total_time) * 100
|
|
123
|
+
dur_ms = duration * 1000
|
|
124
|
+
print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
125
|
+
total_ms = total_time * 1000
|
|
126
|
+
print(f"{'Total':.<40} {total_ms:>6.0f}ms (100.0%)")
|
|
127
|
+
print()
|
|
128
|
+
|
|
129
|
+
print(f"\nJump height: {metrics.jump_height:.3f}m")
|
|
130
|
+
print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
|
|
131
|
+
print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _print_quality_warnings(quality_result: QualityAssessment, verbose: bool) -> None:
|
|
135
|
+
"""Print quality warnings if present."""
|
|
136
|
+
if verbose and quality_result.warnings:
|
|
137
|
+
print("\n⚠️ Quality Warnings:")
|
|
138
|
+
for warning in quality_result.warnings:
|
|
139
|
+
print(f" - {warning}")
|
|
140
|
+
print()
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _print_validation_results(validation_result: ValidationResult, verbose: bool) -> None:
|
|
144
|
+
"""Print validation issues if present."""
|
|
145
|
+
if verbose and validation_result.issues:
|
|
146
|
+
print("\n⚠️ Validation Results:")
|
|
147
|
+
for issue in validation_result.issues:
|
|
148
|
+
print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _create_algorithm_config(params: AnalysisParameters) -> AlgorithmConfig:
|
|
152
|
+
"""Create algorithm configuration from parameters."""
|
|
153
|
+
return AlgorithmConfig(
|
|
154
|
+
detection_method="backward_search",
|
|
155
|
+
tracking_method="mediapipe_pose",
|
|
156
|
+
model_complexity=1,
|
|
157
|
+
smoothing=SmoothingConfig(
|
|
158
|
+
window_size=params.smoothing_window,
|
|
159
|
+
polynomial_order=params.polyorder,
|
|
160
|
+
use_bilateral_filter=params.bilateral_filter,
|
|
161
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
162
|
+
),
|
|
163
|
+
detection=DetectionConfig(
|
|
164
|
+
velocity_threshold=params.velocity_threshold,
|
|
165
|
+
min_contact_frames=params.min_contact_frames,
|
|
166
|
+
visibility_threshold=params.visibility_threshold,
|
|
167
|
+
use_curvature_refinement=params.use_curvature,
|
|
168
|
+
),
|
|
169
|
+
drop_detection=None,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _create_video_info(video_path: str, video: VideoProcessor) -> VideoInfo:
|
|
174
|
+
"""Create video information metadata."""
|
|
175
|
+
return VideoInfo(
|
|
176
|
+
source_path=video_path,
|
|
177
|
+
fps=video.fps,
|
|
178
|
+
width=video.width,
|
|
179
|
+
height=video.height,
|
|
180
|
+
duration_s=video.frame_count / video.fps,
|
|
181
|
+
frame_count=video.frame_count,
|
|
182
|
+
codec=video.codec,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _create_processing_info(
|
|
187
|
+
start_time: float, quality_preset: QualityPreset, timer: Timer
|
|
188
|
+
) -> ProcessingInfo:
|
|
189
|
+
"""Create processing information metadata."""
|
|
190
|
+
processing_time = time.time() - start_time
|
|
191
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
192
|
+
|
|
193
|
+
return ProcessingInfo(
|
|
194
|
+
version=get_kinemotion_version(),
|
|
195
|
+
timestamp=create_timestamp(),
|
|
196
|
+
quality_preset=quality_preset.value,
|
|
197
|
+
processing_time_s=processing_time,
|
|
198
|
+
timing_breakdown=stage_times,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _create_result_metadata(
|
|
203
|
+
quality_result: QualityAssessment,
|
|
204
|
+
video_info: VideoInfo,
|
|
205
|
+
processing_info: ProcessingInfo,
|
|
206
|
+
algorithm_config: AlgorithmConfig,
|
|
207
|
+
) -> ResultMetadata:
|
|
208
|
+
"""Create result metadata from components."""
|
|
209
|
+
return ResultMetadata(
|
|
210
|
+
quality=quality_result,
|
|
211
|
+
video=video_info,
|
|
212
|
+
processing=processing_info,
|
|
213
|
+
algorithm=algorithm_config,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _run_pose_tracking(
|
|
218
|
+
video: VideoProcessor,
|
|
219
|
+
quality_preset: QualityPreset,
|
|
220
|
+
detection_confidence: float | None,
|
|
221
|
+
tracking_confidence: float | None,
|
|
222
|
+
pose_tracker: PoseTracker | None,
|
|
223
|
+
verbose: bool,
|
|
224
|
+
timer: Timer,
|
|
225
|
+
) -> tuple[list[NDArray[np.uint8]], list, list[int]]:
|
|
226
|
+
"""Initialize tracker and process all frames."""
|
|
227
|
+
if verbose:
|
|
228
|
+
print(
|
|
229
|
+
f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
|
|
230
|
+
f"{video.frame_count} frames"
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
det_conf, track_conf = determine_confidence_levels(
|
|
234
|
+
quality_preset, detection_confidence, tracking_confidence
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
if verbose:
|
|
238
|
+
print("Processing all frames with MediaPipe pose tracking...")
|
|
239
|
+
|
|
240
|
+
tracker = pose_tracker or PoseTracker(
|
|
241
|
+
min_detection_confidence=det_conf,
|
|
242
|
+
min_tracking_confidence=track_conf,
|
|
243
|
+
timer=timer,
|
|
244
|
+
)
|
|
245
|
+
should_close_tracker = pose_tracker is None
|
|
246
|
+
|
|
247
|
+
return process_all_frames(video, tracker, verbose, timer, close_tracker=should_close_tracker)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def _get_tuned_parameters(
|
|
251
|
+
video: VideoProcessor,
|
|
252
|
+
landmarks_sequence: list,
|
|
253
|
+
quality_preset: QualityPreset,
|
|
254
|
+
overrides: AnalysisOverrides | None,
|
|
255
|
+
verbose: bool,
|
|
256
|
+
timer: Timer,
|
|
257
|
+
) -> AnalysisParameters:
|
|
258
|
+
"""Analyze sample and tune parameters with expert overrides."""
|
|
259
|
+
with timer.measure("parameter_auto_tuning"):
|
|
260
|
+
characteristics = analyze_video_sample(landmarks_sequence, video.fps, video.frame_count)
|
|
261
|
+
params = auto_tune_parameters(characteristics, quality_preset)
|
|
262
|
+
params = apply_expert_overrides(
|
|
263
|
+
params,
|
|
264
|
+
overrides.smoothing_window if overrides else None,
|
|
265
|
+
overrides.velocity_threshold if overrides else None,
|
|
266
|
+
overrides.min_contact_frames if overrides else None,
|
|
267
|
+
overrides.visibility_threshold if overrides else None,
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
if verbose:
|
|
271
|
+
print_verbose_parameters(video, characteristics, quality_preset, params)
|
|
272
|
+
|
|
273
|
+
return params
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _run_kinematic_analysis(
|
|
277
|
+
video: VideoProcessor,
|
|
278
|
+
smoothed_landmarks: list,
|
|
279
|
+
params: AnalysisParameters,
|
|
280
|
+
verbose: bool,
|
|
281
|
+
timer: Timer,
|
|
282
|
+
) -> tuple[CMJMetrics, NDArray[np.float64], NDArray[np.float64]]:
|
|
283
|
+
"""Extract positions, detect phases, and calculate metrics."""
|
|
284
|
+
if verbose:
|
|
285
|
+
print("Extracting vertical positions (Hip and Foot)...")
|
|
286
|
+
with timer.measure("vertical_position_extraction"):
|
|
287
|
+
vertical_positions, visibilities = extract_vertical_positions(
|
|
288
|
+
smoothed_landmarks, target="hip"
|
|
289
|
+
)
|
|
290
|
+
foot_positions, _ = extract_vertical_positions(smoothed_landmarks, target="foot")
|
|
291
|
+
|
|
292
|
+
if verbose:
|
|
293
|
+
print("Detecting CMJ phases...")
|
|
294
|
+
with timer.measure("phase_detection"):
|
|
295
|
+
phases = detect_cmj_phases(
|
|
296
|
+
vertical_positions,
|
|
297
|
+
video.fps,
|
|
298
|
+
window_length=params.smoothing_window,
|
|
299
|
+
polyorder=params.polyorder,
|
|
300
|
+
landing_positions=foot_positions,
|
|
301
|
+
timer=timer,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
if phases is None:
|
|
305
|
+
raise ValueError("Could not detect CMJ phases in video")
|
|
306
|
+
|
|
307
|
+
standing_end, lowest_point, takeoff_frame, landing_frame = phases
|
|
308
|
+
|
|
309
|
+
if verbose:
|
|
310
|
+
print("Calculating metrics...")
|
|
311
|
+
with timer.measure("metrics_calculation"):
|
|
312
|
+
velocities = compute_signed_velocity(
|
|
313
|
+
vertical_positions,
|
|
314
|
+
window_length=params.smoothing_window,
|
|
315
|
+
polyorder=params.polyorder,
|
|
316
|
+
)
|
|
317
|
+
metrics = calculate_cmj_metrics(
|
|
318
|
+
vertical_positions,
|
|
319
|
+
velocities,
|
|
320
|
+
standing_end,
|
|
321
|
+
lowest_point,
|
|
322
|
+
takeoff_frame,
|
|
323
|
+
landing_frame,
|
|
324
|
+
video.fps,
|
|
325
|
+
tracking_method="hip_hybrid",
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
return metrics, vertical_positions, visibilities
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
def _finalize_analysis_results(
|
|
332
|
+
metrics: CMJMetrics,
|
|
333
|
+
video: VideoProcessor,
|
|
334
|
+
video_path: str,
|
|
335
|
+
vertical_positions: NDArray[np.float64],
|
|
336
|
+
visibilities: NDArray[np.float64],
|
|
337
|
+
params: AnalysisParameters,
|
|
338
|
+
quality_preset: QualityPreset,
|
|
339
|
+
start_time: float,
|
|
340
|
+
timer: Timer,
|
|
341
|
+
verbose: bool,
|
|
342
|
+
) -> None:
|
|
343
|
+
"""Assess quality, validate metrics, and attach metadata."""
|
|
344
|
+
if verbose:
|
|
345
|
+
print("Assessing tracking quality...")
|
|
346
|
+
with timer.measure("quality_assessment"):
|
|
347
|
+
_, outlier_mask = reject_outliers(
|
|
348
|
+
vertical_positions,
|
|
349
|
+
use_ransac=True,
|
|
350
|
+
use_median=True,
|
|
351
|
+
interpolate=False,
|
|
352
|
+
)
|
|
353
|
+
quality_result = assess_jump_quality(
|
|
354
|
+
visibilities=visibilities,
|
|
355
|
+
positions=vertical_positions,
|
|
356
|
+
outlier_mask=outlier_mask,
|
|
357
|
+
fps=video.fps,
|
|
358
|
+
phases_detected=True,
|
|
359
|
+
phase_count=4,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
_print_quality_warnings(quality_result, verbose)
|
|
363
|
+
|
|
364
|
+
with timer.measure("metrics_validation"):
|
|
365
|
+
validator = CMJMetricsValidator()
|
|
366
|
+
validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
|
|
367
|
+
metrics.validation_result = validation_result
|
|
368
|
+
|
|
369
|
+
algorithm_config = _create_algorithm_config(params)
|
|
370
|
+
video_info = _create_video_info(video_path, video)
|
|
371
|
+
processing_info = _create_processing_info(start_time, quality_preset, timer)
|
|
372
|
+
result_metadata = _create_result_metadata(
|
|
373
|
+
quality_result, video_info, processing_info, algorithm_config
|
|
374
|
+
)
|
|
375
|
+
metrics.result_metadata = result_metadata
|
|
376
|
+
|
|
377
|
+
_print_validation_results(validation_result, verbose)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
@dataclass
|
|
381
|
+
class CMJVideoConfig:
|
|
382
|
+
"""Configuration for processing a single CMJ video."""
|
|
383
|
+
|
|
384
|
+
video_path: str
|
|
385
|
+
quality: str = "balanced"
|
|
386
|
+
output_video: str | None = None
|
|
387
|
+
json_output: str | None = None
|
|
388
|
+
overrides: AnalysisOverrides | None = None
|
|
389
|
+
detection_confidence: float | None = None
|
|
390
|
+
tracking_confidence: float | None = None
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
@dataclass
|
|
394
|
+
class CMJVideoResult:
|
|
395
|
+
"""Result of processing a single CMJ video."""
|
|
396
|
+
|
|
397
|
+
video_path: str
|
|
398
|
+
success: bool
|
|
399
|
+
metrics: CMJMetrics | None = None
|
|
400
|
+
error: str | None = None
|
|
401
|
+
processing_time: float = 0.0
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def process_cmj_video(
|
|
405
|
+
video_path: str,
|
|
406
|
+
quality: str = "balanced",
|
|
407
|
+
output_video: str | None = None,
|
|
408
|
+
json_output: str | None = None,
|
|
409
|
+
overrides: AnalysisOverrides | None = None,
|
|
410
|
+
detection_confidence: float | None = None,
|
|
411
|
+
tracking_confidence: float | None = None,
|
|
412
|
+
verbose: bool = False,
|
|
413
|
+
timer: Timer | None = None,
|
|
414
|
+
pose_tracker: PoseTracker | None = None,
|
|
415
|
+
) -> CMJMetrics:
|
|
416
|
+
"""
|
|
417
|
+
Process a single CMJ video and return metrics.
|
|
418
|
+
|
|
419
|
+
CMJ (Counter Movement Jump) is performed at floor level without a drop box.
|
|
420
|
+
Athletes start standing, perform a countermovement (eccentric phase), then
|
|
421
|
+
jump upward (concentric phase).
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
video_path: Path to the input video file
|
|
425
|
+
quality: Analysis quality preset ("fast", "balanced", or "accurate")
|
|
426
|
+
output_video: Optional path for debug video output
|
|
427
|
+
json_output: Optional path for JSON metrics output
|
|
428
|
+
overrides: Optional AnalysisOverrides with parameter fine-tuning
|
|
429
|
+
detection_confidence: Optional override for pose detection confidence
|
|
430
|
+
tracking_confidence: Optional override for pose tracking confidence
|
|
431
|
+
verbose: Print processing details
|
|
432
|
+
timer: Optional Timer for measuring operations
|
|
433
|
+
pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
CMJMetrics object containing analysis results
|
|
437
|
+
|
|
438
|
+
Raises:
|
|
439
|
+
ValueError: If video cannot be processed or parameters are invalid
|
|
440
|
+
FileNotFoundError: If video file does not exist
|
|
441
|
+
"""
|
|
442
|
+
if not Path(video_path).exists():
|
|
443
|
+
raise FileNotFoundError(f"Video file not found: {video_path}")
|
|
444
|
+
|
|
445
|
+
start_time = time.time()
|
|
446
|
+
timer = timer or PerformanceTimer()
|
|
447
|
+
quality_preset = parse_quality_preset(quality)
|
|
448
|
+
|
|
449
|
+
with timer.measure("video_initialization"):
|
|
450
|
+
with VideoProcessor(video_path, timer=timer) as video:
|
|
451
|
+
# 1. Pose Tracking
|
|
452
|
+
frames, landmarks_sequence, frame_indices = _run_pose_tracking(
|
|
453
|
+
video,
|
|
454
|
+
quality_preset,
|
|
455
|
+
detection_confidence,
|
|
456
|
+
tracking_confidence,
|
|
457
|
+
pose_tracker,
|
|
458
|
+
verbose,
|
|
459
|
+
timer,
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# 2. Parameters & Smoothing
|
|
463
|
+
params = _get_tuned_parameters(
|
|
464
|
+
video, landmarks_sequence, quality_preset, overrides, verbose, timer
|
|
465
|
+
)
|
|
466
|
+
smoothed_landmarks = apply_smoothing(landmarks_sequence, params, verbose, timer)
|
|
467
|
+
|
|
468
|
+
# 3. Kinematic Analysis
|
|
469
|
+
metrics, vertical_positions, visibilities = _run_kinematic_analysis(
|
|
470
|
+
video, smoothed_landmarks, params, verbose, timer
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# 4. Debug Video Generation (Optional)
|
|
474
|
+
if output_video:
|
|
475
|
+
_generate_debug_video(
|
|
476
|
+
output_video,
|
|
477
|
+
frames,
|
|
478
|
+
frame_indices,
|
|
479
|
+
smoothed_landmarks,
|
|
480
|
+
metrics,
|
|
481
|
+
video.fps,
|
|
482
|
+
timer,
|
|
483
|
+
verbose,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# 5. Finalization (Quality, Metadata, Validation)
|
|
487
|
+
_finalize_analysis_results(
|
|
488
|
+
metrics,
|
|
489
|
+
video,
|
|
490
|
+
video_path,
|
|
491
|
+
vertical_positions,
|
|
492
|
+
visibilities,
|
|
493
|
+
params,
|
|
494
|
+
quality_preset,
|
|
495
|
+
start_time,
|
|
496
|
+
timer,
|
|
497
|
+
verbose,
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
if json_output:
|
|
501
|
+
_save_metrics_to_json(metrics, json_output, timer, verbose)
|
|
502
|
+
|
|
503
|
+
if verbose:
|
|
504
|
+
_print_timing_summary(start_time, timer, metrics)
|
|
505
|
+
|
|
506
|
+
return metrics
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
def process_cmj_videos_bulk(
|
|
510
|
+
configs: list[CMJVideoConfig],
|
|
511
|
+
max_workers: int = 4,
|
|
512
|
+
progress_callback: Callable[[CMJVideoResult], None] | None = None,
|
|
513
|
+
) -> list[CMJVideoResult]:
|
|
514
|
+
"""
|
|
515
|
+
Process multiple CMJ videos in parallel.
|
|
516
|
+
"""
|
|
517
|
+
|
|
518
|
+
def error_factory(video_path: str, error_msg: str) -> CMJVideoResult:
|
|
519
|
+
return CMJVideoResult(video_path=video_path, success=False, error=error_msg)
|
|
520
|
+
|
|
521
|
+
return process_videos_bulk_generic(
|
|
522
|
+
configs,
|
|
523
|
+
_process_cmj_video_wrapper,
|
|
524
|
+
error_factory,
|
|
525
|
+
max_workers,
|
|
526
|
+
progress_callback,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
|
|
531
|
+
"""Wrapper function for parallel CMJ processing."""
|
|
532
|
+
start_time = time.time()
|
|
533
|
+
|
|
534
|
+
try:
|
|
535
|
+
metrics = process_cmj_video(
|
|
536
|
+
video_path=config.video_path,
|
|
537
|
+
quality=config.quality,
|
|
538
|
+
output_video=config.output_video,
|
|
539
|
+
json_output=config.json_output,
|
|
540
|
+
overrides=config.overrides,
|
|
541
|
+
detection_confidence=config.detection_confidence,
|
|
542
|
+
tracking_confidence=config.tracking_confidence,
|
|
543
|
+
verbose=False,
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
processing_time = time.time() - start_time
|
|
547
|
+
|
|
548
|
+
return CMJVideoResult(
|
|
549
|
+
video_path=config.video_path,
|
|
550
|
+
success=True,
|
|
551
|
+
metrics=metrics,
|
|
552
|
+
processing_time=processing_time,
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
except Exception as e:
|
|
556
|
+
processing_time = time.time() - start_time
|
|
557
|
+
|
|
558
|
+
return CMJVideoResult(
|
|
559
|
+
video_path=config.video_path,
|
|
560
|
+
success=False,
|
|
561
|
+
error=str(e),
|
|
562
|
+
processing_time=processing_time,
|
|
563
|
+
)
|