kinemotion 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +14 -0
- kinemotion/api.py +428 -0
- kinemotion/cli.py +20 -0
- kinemotion/core/__init__.py +40 -0
- kinemotion/core/auto_tuning.py +289 -0
- kinemotion/core/filtering.py +345 -0
- kinemotion/core/pose.py +220 -0
- kinemotion/core/smoothing.py +366 -0
- kinemotion/core/video_io.py +166 -0
- kinemotion/dropjump/__init__.py +29 -0
- kinemotion/dropjump/analysis.py +639 -0
- kinemotion/dropjump/cli.py +738 -0
- kinemotion/dropjump/debug_overlay.py +252 -0
- kinemotion/dropjump/kinematics.py +439 -0
- kinemotion/py.typed +0 -0
- kinemotion-0.10.2.dist-info/METADATA +561 -0
- kinemotion-0.10.2.dist-info/RECORD +20 -0
- kinemotion-0.10.2.dist-info/WHEEL +4 -0
- kinemotion-0.10.2.dist-info/entry_points.txt +2 -0
- kinemotion-0.10.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,738 @@
|
|
|
1
|
+
"""Command-line interface for drop jump analysis."""
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
import glob
|
|
5
|
+
import json
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
import click
|
|
11
|
+
import numpy as np
|
|
12
|
+
|
|
13
|
+
from ..api import VideoConfig, VideoResult, process_videos_bulk
|
|
14
|
+
from ..core.auto_tuning import (
|
|
15
|
+
QualityPreset,
|
|
16
|
+
analyze_video_sample,
|
|
17
|
+
auto_tune_parameters,
|
|
18
|
+
)
|
|
19
|
+
from ..core.pose import PoseTracker
|
|
20
|
+
from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
21
|
+
from ..core.video_io import VideoProcessor
|
|
22
|
+
from .analysis import (
|
|
23
|
+
compute_average_foot_position,
|
|
24
|
+
detect_ground_contact,
|
|
25
|
+
)
|
|
26
|
+
from .debug_overlay import DebugOverlayRenderer
|
|
27
|
+
from .kinematics import calculate_drop_jump_metrics
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@click.command(name="dropjump-analyze")
|
|
31
|
+
@click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
|
|
32
|
+
@click.option(
|
|
33
|
+
"--output",
|
|
34
|
+
"-o",
|
|
35
|
+
type=click.Path(),
|
|
36
|
+
help="Path for debug video output (optional)",
|
|
37
|
+
)
|
|
38
|
+
@click.option(
|
|
39
|
+
"--json-output",
|
|
40
|
+
"-j",
|
|
41
|
+
type=click.Path(),
|
|
42
|
+
help="Path for JSON metrics output (default: stdout)",
|
|
43
|
+
)
|
|
44
|
+
@click.option(
|
|
45
|
+
"--drop-height",
|
|
46
|
+
type=float,
|
|
47
|
+
required=True,
|
|
48
|
+
help=(
|
|
49
|
+
"Height of drop box/platform in meters (e.g., 0.40 for 40cm box) - "
|
|
50
|
+
"REQUIRED for accurate calibration"
|
|
51
|
+
),
|
|
52
|
+
)
|
|
53
|
+
@click.option(
|
|
54
|
+
"--quality",
|
|
55
|
+
type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
|
|
56
|
+
default="balanced",
|
|
57
|
+
help=(
|
|
58
|
+
"Analysis quality preset: "
|
|
59
|
+
"fast (quick, less precise), "
|
|
60
|
+
"balanced (default, good for most cases), "
|
|
61
|
+
"accurate (research-grade, slower)"
|
|
62
|
+
),
|
|
63
|
+
show_default=True,
|
|
64
|
+
)
|
|
65
|
+
@click.option(
|
|
66
|
+
"--verbose",
|
|
67
|
+
"-v",
|
|
68
|
+
is_flag=True,
|
|
69
|
+
help="Show auto-selected parameters and analysis details",
|
|
70
|
+
)
|
|
71
|
+
# Batch processing options
|
|
72
|
+
@click.option(
|
|
73
|
+
"--batch",
|
|
74
|
+
is_flag=True,
|
|
75
|
+
help="Enable batch processing mode for multiple videos",
|
|
76
|
+
)
|
|
77
|
+
@click.option(
|
|
78
|
+
"--workers",
|
|
79
|
+
type=int,
|
|
80
|
+
default=4,
|
|
81
|
+
help="Number of parallel workers for batch processing (default: 4)",
|
|
82
|
+
show_default=True,
|
|
83
|
+
)
|
|
84
|
+
@click.option(
|
|
85
|
+
"--output-dir",
|
|
86
|
+
type=click.Path(),
|
|
87
|
+
help="Directory for debug video outputs (batch mode only)",
|
|
88
|
+
)
|
|
89
|
+
@click.option(
|
|
90
|
+
"--json-output-dir",
|
|
91
|
+
type=click.Path(),
|
|
92
|
+
help="Directory for JSON metrics outputs (batch mode only)",
|
|
93
|
+
)
|
|
94
|
+
@click.option(
|
|
95
|
+
"--csv-summary",
|
|
96
|
+
type=click.Path(),
|
|
97
|
+
help="Path for CSV summary export (batch mode only)",
|
|
98
|
+
)
|
|
99
|
+
# Expert parameters (hidden in help, but always available for advanced users)
|
|
100
|
+
@click.option(
|
|
101
|
+
"--drop-start-frame",
|
|
102
|
+
type=int,
|
|
103
|
+
default=None,
|
|
104
|
+
help="[EXPERT] Manually specify frame where drop begins (overrides auto-detection)",
|
|
105
|
+
)
|
|
106
|
+
@click.option(
|
|
107
|
+
"--smoothing-window",
|
|
108
|
+
type=int,
|
|
109
|
+
default=None,
|
|
110
|
+
help="[EXPERT] Override auto-tuned smoothing window size",
|
|
111
|
+
)
|
|
112
|
+
@click.option(
|
|
113
|
+
"--velocity-threshold",
|
|
114
|
+
type=float,
|
|
115
|
+
default=None,
|
|
116
|
+
help="[EXPERT] Override auto-tuned velocity threshold",
|
|
117
|
+
)
|
|
118
|
+
@click.option(
|
|
119
|
+
"--min-contact-frames",
|
|
120
|
+
type=int,
|
|
121
|
+
default=None,
|
|
122
|
+
help="[EXPERT] Override auto-tuned minimum contact frames",
|
|
123
|
+
)
|
|
124
|
+
@click.option(
|
|
125
|
+
"--visibility-threshold",
|
|
126
|
+
type=float,
|
|
127
|
+
default=None,
|
|
128
|
+
help="[EXPERT] Override visibility threshold",
|
|
129
|
+
)
|
|
130
|
+
@click.option(
|
|
131
|
+
"--detection-confidence",
|
|
132
|
+
type=float,
|
|
133
|
+
default=None,
|
|
134
|
+
help="[EXPERT] Override pose detection confidence",
|
|
135
|
+
)
|
|
136
|
+
@click.option(
|
|
137
|
+
"--tracking-confidence",
|
|
138
|
+
type=float,
|
|
139
|
+
default=None,
|
|
140
|
+
help="[EXPERT] Override pose tracking confidence",
|
|
141
|
+
)
|
|
142
|
+
def dropjump_analyze(
|
|
143
|
+
video_path: tuple[str, ...],
|
|
144
|
+
output: str | None,
|
|
145
|
+
json_output: str | None,
|
|
146
|
+
drop_height: float,
|
|
147
|
+
quality: str,
|
|
148
|
+
verbose: bool,
|
|
149
|
+
batch: bool,
|
|
150
|
+
workers: int,
|
|
151
|
+
output_dir: str | None,
|
|
152
|
+
json_output_dir: str | None,
|
|
153
|
+
csv_summary: str | None,
|
|
154
|
+
drop_start_frame: int | None,
|
|
155
|
+
smoothing_window: int | None,
|
|
156
|
+
velocity_threshold: float | None,
|
|
157
|
+
min_contact_frames: int | None,
|
|
158
|
+
visibility_threshold: float | None,
|
|
159
|
+
detection_confidence: float | None,
|
|
160
|
+
tracking_confidence: float | None,
|
|
161
|
+
) -> None:
|
|
162
|
+
"""
|
|
163
|
+
Analyze drop-jump video(s) to estimate ground contact time, flight time, and jump height.
|
|
164
|
+
|
|
165
|
+
Uses intelligent auto-tuning to select optimal parameters based on video characteristics.
|
|
166
|
+
Parameters are automatically adjusted for frame rate, tracking quality, and analysis preset.
|
|
167
|
+
|
|
168
|
+
VIDEO_PATH: Path(s) to video file(s). Supports glob patterns in batch mode
|
|
169
|
+
(e.g., "videos/*.mp4").
|
|
170
|
+
|
|
171
|
+
Examples:
|
|
172
|
+
|
|
173
|
+
\b
|
|
174
|
+
# Single video
|
|
175
|
+
kinemotion dropjump-analyze video.mp4 --drop-height 0.40
|
|
176
|
+
|
|
177
|
+
\b
|
|
178
|
+
# Batch mode with glob pattern
|
|
179
|
+
kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 --workers 4
|
|
180
|
+
|
|
181
|
+
\b
|
|
182
|
+
# Batch with output directories
|
|
183
|
+
kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 \\
|
|
184
|
+
--json-output-dir results/ --csv-summary summary.csv
|
|
185
|
+
"""
|
|
186
|
+
# Expand glob patterns and collect all video files
|
|
187
|
+
video_files: list[str] = []
|
|
188
|
+
for pattern in video_path:
|
|
189
|
+
expanded = glob.glob(pattern)
|
|
190
|
+
if expanded:
|
|
191
|
+
video_files.extend(expanded)
|
|
192
|
+
elif Path(pattern).exists():
|
|
193
|
+
# Direct path (not a glob pattern)
|
|
194
|
+
video_files.append(pattern)
|
|
195
|
+
else:
|
|
196
|
+
click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
|
|
197
|
+
|
|
198
|
+
if not video_files:
|
|
199
|
+
click.echo("Error: No video files found", err=True)
|
|
200
|
+
sys.exit(1)
|
|
201
|
+
|
|
202
|
+
# Determine if batch mode should be used
|
|
203
|
+
use_batch = batch or len(video_files) > 1
|
|
204
|
+
|
|
205
|
+
if use_batch:
|
|
206
|
+
_process_batch(
|
|
207
|
+
video_files,
|
|
208
|
+
drop_height,
|
|
209
|
+
quality,
|
|
210
|
+
workers,
|
|
211
|
+
output_dir,
|
|
212
|
+
json_output_dir,
|
|
213
|
+
csv_summary,
|
|
214
|
+
drop_start_frame,
|
|
215
|
+
smoothing_window,
|
|
216
|
+
velocity_threshold,
|
|
217
|
+
min_contact_frames,
|
|
218
|
+
visibility_threshold,
|
|
219
|
+
detection_confidence,
|
|
220
|
+
tracking_confidence,
|
|
221
|
+
)
|
|
222
|
+
else:
|
|
223
|
+
# Single video mode (original behavior)
|
|
224
|
+
_process_single(
|
|
225
|
+
video_files[0],
|
|
226
|
+
output,
|
|
227
|
+
json_output,
|
|
228
|
+
drop_height,
|
|
229
|
+
quality,
|
|
230
|
+
verbose,
|
|
231
|
+
drop_start_frame,
|
|
232
|
+
smoothing_window,
|
|
233
|
+
velocity_threshold,
|
|
234
|
+
min_contact_frames,
|
|
235
|
+
visibility_threshold,
|
|
236
|
+
detection_confidence,
|
|
237
|
+
tracking_confidence,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _process_single(
|
|
242
|
+
video_path: str,
|
|
243
|
+
output: str | None,
|
|
244
|
+
json_output: str | None,
|
|
245
|
+
drop_height: float,
|
|
246
|
+
quality: str,
|
|
247
|
+
verbose: bool,
|
|
248
|
+
drop_start_frame: int | None,
|
|
249
|
+
smoothing_window: int | None,
|
|
250
|
+
velocity_threshold: float | None,
|
|
251
|
+
min_contact_frames: int | None,
|
|
252
|
+
visibility_threshold: float | None,
|
|
253
|
+
detection_confidence: float | None,
|
|
254
|
+
tracking_confidence: float | None,
|
|
255
|
+
) -> None:
|
|
256
|
+
"""Process a single video (original CLI behavior)."""
|
|
257
|
+
click.echo(f"Analyzing video: {video_path}", err=True)
|
|
258
|
+
|
|
259
|
+
# Convert quality string to enum
|
|
260
|
+
quality_preset = QualityPreset(quality.lower())
|
|
261
|
+
|
|
262
|
+
try:
|
|
263
|
+
# Initialize video processor
|
|
264
|
+
with VideoProcessor(video_path) as video:
|
|
265
|
+
click.echo(
|
|
266
|
+
f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
|
|
267
|
+
f"{video.frame_count} frames",
|
|
268
|
+
err=True,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
# ================================================================
|
|
272
|
+
# STEP 1: Auto-tune parameters based on video characteristics
|
|
273
|
+
# ================================================================
|
|
274
|
+
|
|
275
|
+
# Analyze video characteristics from a sample to determine optimal parameters
|
|
276
|
+
# We'll use detection/tracking confidence from quality preset for initial tracking
|
|
277
|
+
initial_detection_conf = 0.5
|
|
278
|
+
initial_tracking_conf = 0.5
|
|
279
|
+
|
|
280
|
+
if quality_preset == QualityPreset.FAST:
|
|
281
|
+
initial_detection_conf = 0.3
|
|
282
|
+
initial_tracking_conf = 0.3
|
|
283
|
+
elif quality_preset == QualityPreset.ACCURATE:
|
|
284
|
+
initial_detection_conf = 0.6
|
|
285
|
+
initial_tracking_conf = 0.6
|
|
286
|
+
|
|
287
|
+
# Override with expert values if provided
|
|
288
|
+
if detection_confidence is not None:
|
|
289
|
+
initial_detection_conf = detection_confidence
|
|
290
|
+
if tracking_confidence is not None:
|
|
291
|
+
initial_tracking_conf = tracking_confidence
|
|
292
|
+
|
|
293
|
+
# Initialize pose tracker
|
|
294
|
+
tracker = PoseTracker(
|
|
295
|
+
min_detection_confidence=initial_detection_conf,
|
|
296
|
+
min_tracking_confidence=initial_tracking_conf,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
# Process all frames
|
|
300
|
+
click.echo("Tracking pose landmarks...", err=True)
|
|
301
|
+
landmarks_sequence = []
|
|
302
|
+
frames = []
|
|
303
|
+
|
|
304
|
+
bar: Any
|
|
305
|
+
with click.progressbar(
|
|
306
|
+
length=video.frame_count, label="Processing frames"
|
|
307
|
+
) as bar:
|
|
308
|
+
while True:
|
|
309
|
+
frame = video.read_frame()
|
|
310
|
+
if frame is None:
|
|
311
|
+
break
|
|
312
|
+
|
|
313
|
+
frames.append(frame)
|
|
314
|
+
landmarks = tracker.process_frame(frame)
|
|
315
|
+
landmarks_sequence.append(landmarks)
|
|
316
|
+
|
|
317
|
+
bar.update(1)
|
|
318
|
+
|
|
319
|
+
tracker.close()
|
|
320
|
+
|
|
321
|
+
if not landmarks_sequence:
|
|
322
|
+
click.echo("Error: No frames processed", err=True)
|
|
323
|
+
sys.exit(1)
|
|
324
|
+
|
|
325
|
+
# ================================================================
|
|
326
|
+
# STEP 2: Analyze video characteristics and auto-tune parameters
|
|
327
|
+
# ================================================================
|
|
328
|
+
|
|
329
|
+
characteristics = analyze_video_sample(
|
|
330
|
+
landmarks_sequence, video.fps, video.frame_count
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
# Auto-tune parameters based on video characteristics
|
|
334
|
+
params = auto_tune_parameters(characteristics, quality_preset)
|
|
335
|
+
|
|
336
|
+
# Apply expert overrides if provided
|
|
337
|
+
if smoothing_window is not None:
|
|
338
|
+
params.smoothing_window = smoothing_window
|
|
339
|
+
if velocity_threshold is not None:
|
|
340
|
+
params.velocity_threshold = velocity_threshold
|
|
341
|
+
if min_contact_frames is not None:
|
|
342
|
+
params.min_contact_frames = min_contact_frames
|
|
343
|
+
if visibility_threshold is not None:
|
|
344
|
+
params.visibility_threshold = visibility_threshold
|
|
345
|
+
|
|
346
|
+
# Show selected parameters if verbose
|
|
347
|
+
if verbose:
|
|
348
|
+
click.echo("\n" + "=" * 60, err=True)
|
|
349
|
+
click.echo("AUTO-TUNED PARAMETERS", err=True)
|
|
350
|
+
click.echo("=" * 60, err=True)
|
|
351
|
+
click.echo(f"Video FPS: {video.fps:.2f}", err=True)
|
|
352
|
+
click.echo(
|
|
353
|
+
f"Tracking quality: {characteristics.tracking_quality} "
|
|
354
|
+
f"(avg visibility: {characteristics.avg_visibility:.2f})",
|
|
355
|
+
err=True,
|
|
356
|
+
)
|
|
357
|
+
click.echo(f"Quality preset: {quality_preset.value}", err=True)
|
|
358
|
+
click.echo("\nSelected parameters:", err=True)
|
|
359
|
+
click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
|
|
360
|
+
click.echo(f" polyorder: {params.polyorder}", err=True)
|
|
361
|
+
click.echo(
|
|
362
|
+
f" velocity_threshold: {params.velocity_threshold:.4f}", err=True
|
|
363
|
+
)
|
|
364
|
+
click.echo(
|
|
365
|
+
f" min_contact_frames: {params.min_contact_frames}", err=True
|
|
366
|
+
)
|
|
367
|
+
click.echo(
|
|
368
|
+
f" visibility_threshold: {params.visibility_threshold}", err=True
|
|
369
|
+
)
|
|
370
|
+
click.echo(
|
|
371
|
+
f" detection_confidence: {params.detection_confidence}", err=True
|
|
372
|
+
)
|
|
373
|
+
click.echo(
|
|
374
|
+
f" tracking_confidence: {params.tracking_confidence}", err=True
|
|
375
|
+
)
|
|
376
|
+
click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
|
|
377
|
+
click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
|
|
378
|
+
click.echo(f" use_curvature: {params.use_curvature}", err=True)
|
|
379
|
+
click.echo("=" * 60 + "\n", err=True)
|
|
380
|
+
|
|
381
|
+
# ================================================================
|
|
382
|
+
# STEP 3: Apply smoothing with auto-tuned parameters
|
|
383
|
+
# ================================================================
|
|
384
|
+
|
|
385
|
+
# Smooth landmarks using auto-tuned parameters
|
|
386
|
+
if params.outlier_rejection or params.bilateral_filter:
|
|
387
|
+
if params.outlier_rejection:
|
|
388
|
+
click.echo(
|
|
389
|
+
"Smoothing landmarks with outlier rejection...", err=True
|
|
390
|
+
)
|
|
391
|
+
if params.bilateral_filter:
|
|
392
|
+
click.echo(
|
|
393
|
+
"Using bilateral temporal filter for edge-preserving smoothing...",
|
|
394
|
+
err=True,
|
|
395
|
+
)
|
|
396
|
+
smoothed_landmarks = smooth_landmarks_advanced(
|
|
397
|
+
landmarks_sequence,
|
|
398
|
+
window_length=params.smoothing_window,
|
|
399
|
+
polyorder=params.polyorder,
|
|
400
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
401
|
+
use_bilateral=params.bilateral_filter,
|
|
402
|
+
)
|
|
403
|
+
else:
|
|
404
|
+
click.echo("Smoothing landmarks...", err=True)
|
|
405
|
+
smoothed_landmarks = smooth_landmarks(
|
|
406
|
+
landmarks_sequence,
|
|
407
|
+
window_length=params.smoothing_window,
|
|
408
|
+
polyorder=params.polyorder,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
# Extract vertical positions from feet
|
|
412
|
+
click.echo("Extracting foot positions...", err=True)
|
|
413
|
+
|
|
414
|
+
position_list: list[float] = []
|
|
415
|
+
visibilities_list: list[float] = []
|
|
416
|
+
|
|
417
|
+
for frame_landmarks in smoothed_landmarks:
|
|
418
|
+
if frame_landmarks:
|
|
419
|
+
# Use average foot position
|
|
420
|
+
_, foot_y = compute_average_foot_position(frame_landmarks)
|
|
421
|
+
position_list.append(foot_y)
|
|
422
|
+
|
|
423
|
+
# Average visibility of foot landmarks
|
|
424
|
+
foot_vis = []
|
|
425
|
+
for key in [
|
|
426
|
+
"left_ankle",
|
|
427
|
+
"right_ankle",
|
|
428
|
+
"left_heel",
|
|
429
|
+
"right_heel",
|
|
430
|
+
]:
|
|
431
|
+
if key in frame_landmarks:
|
|
432
|
+
foot_vis.append(frame_landmarks[key][2])
|
|
433
|
+
visibilities_list.append(
|
|
434
|
+
float(np.mean(foot_vis)) if foot_vis else 0.0
|
|
435
|
+
)
|
|
436
|
+
else:
|
|
437
|
+
# Use previous position if available, otherwise default
|
|
438
|
+
position_list.append(position_list[-1] if position_list else 0.5)
|
|
439
|
+
visibilities_list.append(0.0)
|
|
440
|
+
|
|
441
|
+
vertical_positions: np.ndarray = np.array(position_list)
|
|
442
|
+
visibilities: np.ndarray = np.array(visibilities_list)
|
|
443
|
+
|
|
444
|
+
# Detect ground contact using auto-tuned parameters
|
|
445
|
+
contact_states = detect_ground_contact(
|
|
446
|
+
vertical_positions,
|
|
447
|
+
velocity_threshold=params.velocity_threshold,
|
|
448
|
+
min_contact_frames=params.min_contact_frames,
|
|
449
|
+
visibility_threshold=params.visibility_threshold,
|
|
450
|
+
visibilities=visibilities,
|
|
451
|
+
window_length=params.smoothing_window,
|
|
452
|
+
polyorder=params.polyorder,
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# Calculate metrics
|
|
456
|
+
click.echo("Calculating metrics...", err=True)
|
|
457
|
+
click.echo(
|
|
458
|
+
f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
|
|
459
|
+
err=True,
|
|
460
|
+
)
|
|
461
|
+
metrics = calculate_drop_jump_metrics(
|
|
462
|
+
contact_states,
|
|
463
|
+
vertical_positions,
|
|
464
|
+
video.fps,
|
|
465
|
+
drop_height_m=drop_height,
|
|
466
|
+
drop_start_frame=drop_start_frame,
|
|
467
|
+
velocity_threshold=params.velocity_threshold,
|
|
468
|
+
smoothing_window=params.smoothing_window,
|
|
469
|
+
polyorder=params.polyorder,
|
|
470
|
+
use_curvature=params.use_curvature,
|
|
471
|
+
kinematic_correction_factor=1.0, # Always 1.0 now (no experimental correction)
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
# Output metrics as JSON
|
|
475
|
+
metrics_dict = metrics.to_dict()
|
|
476
|
+
metrics_json = json.dumps(metrics_dict, indent=2)
|
|
477
|
+
|
|
478
|
+
if json_output:
|
|
479
|
+
output_path = Path(json_output)
|
|
480
|
+
output_path.write_text(metrics_json)
|
|
481
|
+
click.echo(f"Metrics written to: {json_output}", err=True)
|
|
482
|
+
else:
|
|
483
|
+
click.echo(metrics_json)
|
|
484
|
+
|
|
485
|
+
# Generate debug video if requested
|
|
486
|
+
if output:
|
|
487
|
+
click.echo(f"Generating debug video: {output}", err=True)
|
|
488
|
+
if (
|
|
489
|
+
video.display_width != video.width
|
|
490
|
+
or video.display_height != video.height
|
|
491
|
+
):
|
|
492
|
+
click.echo(
|
|
493
|
+
f"Source video encoded: {video.width}x{video.height}",
|
|
494
|
+
err=True,
|
|
495
|
+
)
|
|
496
|
+
click.echo(
|
|
497
|
+
f"Output dimensions: {video.display_width}x{video.display_height} "
|
|
498
|
+
f"(respecting display aspect ratio)",
|
|
499
|
+
err=True,
|
|
500
|
+
)
|
|
501
|
+
else:
|
|
502
|
+
click.echo(
|
|
503
|
+
f"Output dimensions: {video.width}x{video.height} "
|
|
504
|
+
f"(matching source video aspect ratio)",
|
|
505
|
+
err=True,
|
|
506
|
+
)
|
|
507
|
+
with DebugOverlayRenderer(
|
|
508
|
+
output,
|
|
509
|
+
video.width,
|
|
510
|
+
video.height,
|
|
511
|
+
video.display_width,
|
|
512
|
+
video.display_height,
|
|
513
|
+
video.fps,
|
|
514
|
+
) as renderer:
|
|
515
|
+
render_bar: Any
|
|
516
|
+
with click.progressbar(
|
|
517
|
+
length=len(frames), label="Rendering frames"
|
|
518
|
+
) as render_bar:
|
|
519
|
+
for i, frame in enumerate(frames):
|
|
520
|
+
annotated = renderer.render_frame(
|
|
521
|
+
frame,
|
|
522
|
+
smoothed_landmarks[i],
|
|
523
|
+
contact_states[i],
|
|
524
|
+
i,
|
|
525
|
+
metrics,
|
|
526
|
+
use_com=False,
|
|
527
|
+
)
|
|
528
|
+
renderer.write_frame(annotated)
|
|
529
|
+
render_bar.update(1)
|
|
530
|
+
|
|
531
|
+
click.echo(f"Debug video saved: {output}", err=True)
|
|
532
|
+
|
|
533
|
+
click.echo("Analysis complete!", err=True)
|
|
534
|
+
|
|
535
|
+
except Exception as e:
|
|
536
|
+
click.echo(f"Error: {str(e)}", err=True)
|
|
537
|
+
sys.exit(1)
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
def _process_batch(
|
|
541
|
+
video_files: list[str],
|
|
542
|
+
drop_height: float,
|
|
543
|
+
quality: str,
|
|
544
|
+
workers: int,
|
|
545
|
+
output_dir: str | None,
|
|
546
|
+
json_output_dir: str | None,
|
|
547
|
+
csv_summary: str | None,
|
|
548
|
+
drop_start_frame: int | None,
|
|
549
|
+
smoothing_window: int | None,
|
|
550
|
+
velocity_threshold: float | None,
|
|
551
|
+
min_contact_frames: int | None,
|
|
552
|
+
visibility_threshold: float | None,
|
|
553
|
+
detection_confidence: float | None,
|
|
554
|
+
tracking_confidence: float | None,
|
|
555
|
+
) -> None:
|
|
556
|
+
"""Process multiple videos in batch mode using parallel processing."""
|
|
557
|
+
click.echo(
|
|
558
|
+
f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True
|
|
559
|
+
)
|
|
560
|
+
click.echo("=" * 70, err=True)
|
|
561
|
+
|
|
562
|
+
# Create output directories if specified
|
|
563
|
+
if output_dir:
|
|
564
|
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
|
565
|
+
click.echo(f"Debug videos will be saved to: {output_dir}", err=True)
|
|
566
|
+
|
|
567
|
+
if json_output_dir:
|
|
568
|
+
Path(json_output_dir).mkdir(parents=True, exist_ok=True)
|
|
569
|
+
click.echo(f"JSON metrics will be saved to: {json_output_dir}", err=True)
|
|
570
|
+
|
|
571
|
+
# Build configurations for each video
|
|
572
|
+
configs: list[VideoConfig] = []
|
|
573
|
+
for video_file in video_files:
|
|
574
|
+
video_name = Path(video_file).stem
|
|
575
|
+
|
|
576
|
+
# Determine output paths
|
|
577
|
+
debug_video = None
|
|
578
|
+
if output_dir:
|
|
579
|
+
debug_video = str(Path(output_dir) / f"{video_name}_debug.mp4")
|
|
580
|
+
|
|
581
|
+
json_file = None
|
|
582
|
+
if json_output_dir:
|
|
583
|
+
json_file = str(Path(json_output_dir) / f"{video_name}.json")
|
|
584
|
+
|
|
585
|
+
config = VideoConfig(
|
|
586
|
+
video_path=video_file,
|
|
587
|
+
drop_height=drop_height,
|
|
588
|
+
quality=quality,
|
|
589
|
+
output_video=debug_video,
|
|
590
|
+
json_output=json_file,
|
|
591
|
+
drop_start_frame=drop_start_frame,
|
|
592
|
+
smoothing_window=smoothing_window,
|
|
593
|
+
velocity_threshold=velocity_threshold,
|
|
594
|
+
min_contact_frames=min_contact_frames,
|
|
595
|
+
visibility_threshold=visibility_threshold,
|
|
596
|
+
detection_confidence=detection_confidence,
|
|
597
|
+
tracking_confidence=tracking_confidence,
|
|
598
|
+
)
|
|
599
|
+
configs.append(config)
|
|
600
|
+
|
|
601
|
+
# Progress callback
|
|
602
|
+
completed = 0
|
|
603
|
+
|
|
604
|
+
def show_progress(result: VideoResult) -> None:
|
|
605
|
+
nonlocal completed
|
|
606
|
+
completed += 1
|
|
607
|
+
status = "✓" if result.success else "✗"
|
|
608
|
+
video_name = Path(result.video_path).name
|
|
609
|
+
click.echo(
|
|
610
|
+
f"[{completed}/{len(configs)}] {status} {video_name} "
|
|
611
|
+
f"({result.processing_time:.1f}s)",
|
|
612
|
+
err=True,
|
|
613
|
+
)
|
|
614
|
+
if not result.success:
|
|
615
|
+
click.echo(f" Error: {result.error}", err=True)
|
|
616
|
+
|
|
617
|
+
# Process all videos
|
|
618
|
+
click.echo("\nProcessing videos...", err=True)
|
|
619
|
+
results = process_videos_bulk(
|
|
620
|
+
configs, max_workers=workers, progress_callback=show_progress
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
# Generate summary
|
|
624
|
+
click.echo("\n" + "=" * 70, err=True)
|
|
625
|
+
click.echo("BATCH PROCESSING SUMMARY", err=True)
|
|
626
|
+
click.echo("=" * 70, err=True)
|
|
627
|
+
|
|
628
|
+
successful = [r for r in results if r.success]
|
|
629
|
+
failed = [r for r in results if not r.success]
|
|
630
|
+
|
|
631
|
+
click.echo(f"Total videos: {len(results)}", err=True)
|
|
632
|
+
click.echo(f"Successful: {len(successful)}", err=True)
|
|
633
|
+
click.echo(f"Failed: {len(failed)}", err=True)
|
|
634
|
+
|
|
635
|
+
if successful:
|
|
636
|
+
# Calculate average metrics
|
|
637
|
+
with_gct = [
|
|
638
|
+
r
|
|
639
|
+
for r in successful
|
|
640
|
+
if r.metrics and r.metrics.ground_contact_time is not None
|
|
641
|
+
]
|
|
642
|
+
with_flight = [
|
|
643
|
+
r for r in successful if r.metrics and r.metrics.flight_time is not None
|
|
644
|
+
]
|
|
645
|
+
with_jump = [
|
|
646
|
+
r for r in successful if r.metrics and r.metrics.jump_height is not None
|
|
647
|
+
]
|
|
648
|
+
|
|
649
|
+
if with_gct:
|
|
650
|
+
# Type assertion: filtering ensures metrics and ground_contact_time are not None
|
|
651
|
+
avg_gct = sum(
|
|
652
|
+
r.metrics.ground_contact_time * 1000
|
|
653
|
+
for r in with_gct
|
|
654
|
+
if r.metrics and r.metrics.ground_contact_time is not None
|
|
655
|
+
) / len(with_gct)
|
|
656
|
+
click.echo(f"\nAverage ground contact time: {avg_gct:.1f} ms", err=True)
|
|
657
|
+
|
|
658
|
+
if with_flight:
|
|
659
|
+
# Type assertion: filtering ensures metrics and flight_time are not None
|
|
660
|
+
avg_flight = sum(
|
|
661
|
+
r.metrics.flight_time * 1000
|
|
662
|
+
for r in with_flight
|
|
663
|
+
if r.metrics and r.metrics.flight_time is not None
|
|
664
|
+
) / len(with_flight)
|
|
665
|
+
click.echo(f"Average flight time: {avg_flight:.1f} ms", err=True)
|
|
666
|
+
|
|
667
|
+
if with_jump:
|
|
668
|
+
# Type assertion: filtering ensures metrics and jump_height are not None
|
|
669
|
+
avg_jump = sum(
|
|
670
|
+
r.metrics.jump_height
|
|
671
|
+
for r in with_jump
|
|
672
|
+
if r.metrics and r.metrics.jump_height is not None
|
|
673
|
+
) / len(with_jump)
|
|
674
|
+
click.echo(
|
|
675
|
+
f"Average jump height: {avg_jump:.3f} m ({avg_jump * 100:.1f} cm)",
|
|
676
|
+
err=True,
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
# Export CSV summary if requested
|
|
680
|
+
if csv_summary and successful:
|
|
681
|
+
click.echo(f"\nExporting CSV summary to: {csv_summary}", err=True)
|
|
682
|
+
Path(csv_summary).parent.mkdir(parents=True, exist_ok=True)
|
|
683
|
+
|
|
684
|
+
with open(csv_summary, "w", newline="") as f:
|
|
685
|
+
writer = csv.writer(f)
|
|
686
|
+
|
|
687
|
+
# Header
|
|
688
|
+
writer.writerow(
|
|
689
|
+
[
|
|
690
|
+
"Video",
|
|
691
|
+
"Ground Contact Time (ms)",
|
|
692
|
+
"Flight Time (ms)",
|
|
693
|
+
"Jump Height (m)",
|
|
694
|
+
"Processing Time (s)",
|
|
695
|
+
"Status",
|
|
696
|
+
]
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# Data rows
|
|
700
|
+
for result in results:
|
|
701
|
+
if result.success and result.metrics:
|
|
702
|
+
writer.writerow(
|
|
703
|
+
[
|
|
704
|
+
Path(result.video_path).name,
|
|
705
|
+
(
|
|
706
|
+
f"{result.metrics.ground_contact_time * 1000:.1f}"
|
|
707
|
+
if result.metrics.ground_contact_time
|
|
708
|
+
else "N/A"
|
|
709
|
+
),
|
|
710
|
+
(
|
|
711
|
+
f"{result.metrics.flight_time * 1000:.1f}"
|
|
712
|
+
if result.metrics.flight_time
|
|
713
|
+
else "N/A"
|
|
714
|
+
),
|
|
715
|
+
(
|
|
716
|
+
f"{result.metrics.jump_height:.3f}"
|
|
717
|
+
if result.metrics.jump_height
|
|
718
|
+
else "N/A"
|
|
719
|
+
),
|
|
720
|
+
f"{result.processing_time:.2f}",
|
|
721
|
+
"Success",
|
|
722
|
+
]
|
|
723
|
+
)
|
|
724
|
+
else:
|
|
725
|
+
writer.writerow(
|
|
726
|
+
[
|
|
727
|
+
Path(result.video_path).name,
|
|
728
|
+
"N/A",
|
|
729
|
+
"N/A",
|
|
730
|
+
"N/A",
|
|
731
|
+
f"{result.processing_time:.2f}",
|
|
732
|
+
f"Failed: {result.error}",
|
|
733
|
+
]
|
|
734
|
+
)
|
|
735
|
+
|
|
736
|
+
click.echo("CSV summary written successfully", err=True)
|
|
737
|
+
|
|
738
|
+
click.echo("\nBatch processing complete!", err=True)
|