kinemotion 0.70.1__py3-none-any.whl → 0.71.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinemotion/__init__.py +4 -1
- kinemotion/cmj/analysis.py +79 -30
- kinemotion/cmj/api.py +16 -39
- kinemotion/cmj/cli.py +0 -21
- kinemotion/cmj/debug_overlay.py +154 -286
- kinemotion/cmj/joint_angles.py +96 -31
- kinemotion/cmj/metrics_validator.py +30 -51
- kinemotion/cmj/validation_bounds.py +1 -18
- kinemotion/core/__init__.py +0 -2
- kinemotion/core/auto_tuning.py +91 -99
- kinemotion/core/debug_overlay_utils.py +142 -15
- kinemotion/core/experimental.py +55 -51
- kinemotion/core/filtering.py +15 -11
- kinemotion/core/overlay_constants.py +61 -0
- kinemotion/core/pose.py +67 -499
- kinemotion/core/smoothing.py +65 -51
- kinemotion/core/types.py +15 -0
- kinemotion/core/validation.py +6 -7
- kinemotion/core/video_io.py +14 -9
- kinemotion/dropjump/__init__.py +2 -2
- kinemotion/dropjump/analysis.py +67 -44
- kinemotion/dropjump/api.py +12 -44
- kinemotion/dropjump/cli.py +63 -105
- kinemotion/dropjump/debug_overlay.py +124 -65
- kinemotion/dropjump/validation_bounds.py +1 -1
- kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx +0 -0
- kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx +0 -0
- {kinemotion-0.70.1.dist-info → kinemotion-0.71.1.dist-info}/METADATA +1 -5
- kinemotion-0.71.1.dist-info/RECORD +50 -0
- kinemotion/core/rtmpose_cpu.py +0 -626
- kinemotion/core/rtmpose_wrapper.py +0 -190
- kinemotion-0.70.1.dist-info/RECORD +0 -51
- {kinemotion-0.70.1.dist-info → kinemotion-0.71.1.dist-info}/WHEEL +0 -0
- {kinemotion-0.70.1.dist-info → kinemotion-0.71.1.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.70.1.dist-info → kinemotion-0.71.1.dist-info}/licenses/LICENSE +0 -0
kinemotion/dropjump/cli.py
CHANGED
|
@@ -5,6 +5,7 @@ import json
|
|
|
5
5
|
import sys
|
|
6
6
|
from dataclasses import dataclass
|
|
7
7
|
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
8
9
|
|
|
9
10
|
import click
|
|
10
11
|
|
|
@@ -19,6 +20,9 @@ from .api import (
|
|
|
19
20
|
process_dropjump_videos_bulk,
|
|
20
21
|
)
|
|
21
22
|
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from .api import AnalysisOverrides
|
|
25
|
+
|
|
22
26
|
|
|
23
27
|
@dataclass
|
|
24
28
|
class AnalysisParameters:
|
|
@@ -31,7 +35,6 @@ class AnalysisParameters:
|
|
|
31
35
|
visibility_threshold: float | None = None
|
|
32
36
|
detection_confidence: float | None = None
|
|
33
37
|
tracking_confidence: float | None = None
|
|
34
|
-
pose_backend: str | None = None
|
|
35
38
|
|
|
36
39
|
|
|
37
40
|
@click.command(name="dropjump-analyze")
|
|
@@ -66,23 +69,6 @@ class AnalysisParameters:
|
|
|
66
69
|
is_flag=True,
|
|
67
70
|
help="Show auto-selected parameters and analysis details",
|
|
68
71
|
)
|
|
69
|
-
@click.option(
|
|
70
|
-
"--pose-backend",
|
|
71
|
-
type=click.Choice(
|
|
72
|
-
["auto", "mediapipe", "rtmpose-cpu", "rtmpose-cuda", "rtmpose-coreml"],
|
|
73
|
-
case_sensitive=False,
|
|
74
|
-
),
|
|
75
|
-
default="auto",
|
|
76
|
-
help=(
|
|
77
|
-
"Pose tracking backend: "
|
|
78
|
-
"auto (detect best), "
|
|
79
|
-
"mediapipe (baseline), "
|
|
80
|
-
"rtmpose-cpu (optimized CPU), "
|
|
81
|
-
"rtmpose-cuda (NVIDIA GPU), "
|
|
82
|
-
"rtmpose-coreml (Apple Silicon)"
|
|
83
|
-
),
|
|
84
|
-
show_default=True,
|
|
85
|
-
)
|
|
86
72
|
# Batch processing options
|
|
87
73
|
@click.option(
|
|
88
74
|
"--batch",
|
|
@@ -161,7 +147,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
|
|
|
161
147
|
json_output: str | None,
|
|
162
148
|
quality: str,
|
|
163
149
|
verbose: bool,
|
|
164
|
-
pose_backend: str,
|
|
165
150
|
batch: bool,
|
|
166
151
|
workers: int,
|
|
167
152
|
output_dir: str | None,
|
|
@@ -231,7 +216,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
|
|
|
231
216
|
json_output_dir,
|
|
232
217
|
csv_summary,
|
|
233
218
|
expert_params,
|
|
234
|
-
pose_backend,
|
|
235
219
|
)
|
|
236
220
|
else:
|
|
237
221
|
# Single video mode (original behavior)
|
|
@@ -242,10 +226,37 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
|
|
|
242
226
|
quality,
|
|
243
227
|
verbose,
|
|
244
228
|
expert_params,
|
|
245
|
-
pose_backend,
|
|
246
229
|
)
|
|
247
230
|
|
|
248
231
|
|
|
232
|
+
def _create_overrides_if_needed(params: AnalysisParameters) -> "AnalysisOverrides | None":
|
|
233
|
+
"""Create AnalysisOverrides if any override parameters are set.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
params: Expert parameters from CLI
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
AnalysisOverrides if any relevant parameters are non-None, else None
|
|
240
|
+
"""
|
|
241
|
+
from .api import AnalysisOverrides
|
|
242
|
+
|
|
243
|
+
if any(
|
|
244
|
+
[
|
|
245
|
+
params.smoothing_window is not None,
|
|
246
|
+
params.velocity_threshold is not None,
|
|
247
|
+
params.min_contact_frames is not None,
|
|
248
|
+
params.visibility_threshold is not None,
|
|
249
|
+
]
|
|
250
|
+
):
|
|
251
|
+
return AnalysisOverrides(
|
|
252
|
+
smoothing_window=params.smoothing_window,
|
|
253
|
+
velocity_threshold=params.velocity_threshold,
|
|
254
|
+
min_contact_frames=params.min_contact_frames,
|
|
255
|
+
visibility_threshold=params.visibility_threshold,
|
|
256
|
+
)
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
|
|
249
260
|
def _process_single(
|
|
250
261
|
video_path: str,
|
|
251
262
|
output: str | None,
|
|
@@ -253,30 +264,12 @@ def _process_single(
|
|
|
253
264
|
quality: str,
|
|
254
265
|
verbose: bool,
|
|
255
266
|
expert_params: AnalysisParameters,
|
|
256
|
-
pose_backend: str,
|
|
257
267
|
) -> None:
|
|
258
268
|
"""Process a single video by calling the API."""
|
|
259
269
|
click.echo(f"Analyzing video: {video_path}", err=True)
|
|
260
270
|
|
|
261
271
|
try:
|
|
262
|
-
|
|
263
|
-
from .api import AnalysisOverrides
|
|
264
|
-
|
|
265
|
-
overrides = None
|
|
266
|
-
if any(
|
|
267
|
-
[
|
|
268
|
-
expert_params.smoothing_window is not None,
|
|
269
|
-
expert_params.velocity_threshold is not None,
|
|
270
|
-
expert_params.min_contact_frames is not None,
|
|
271
|
-
expert_params.visibility_threshold is not None,
|
|
272
|
-
]
|
|
273
|
-
):
|
|
274
|
-
overrides = AnalysisOverrides(
|
|
275
|
-
smoothing_window=expert_params.smoothing_window,
|
|
276
|
-
velocity_threshold=expert_params.velocity_threshold,
|
|
277
|
-
min_contact_frames=expert_params.min_contact_frames,
|
|
278
|
-
visibility_threshold=expert_params.visibility_threshold,
|
|
279
|
-
)
|
|
272
|
+
overrides = _create_overrides_if_needed(expert_params)
|
|
280
273
|
|
|
281
274
|
# Call the API function (handles all processing logic)
|
|
282
275
|
metrics = process_dropjump_video(
|
|
@@ -288,7 +281,6 @@ def _process_single(
|
|
|
288
281
|
overrides=overrides,
|
|
289
282
|
detection_confidence=expert_params.detection_confidence,
|
|
290
283
|
tracking_confidence=expert_params.tracking_confidence,
|
|
291
|
-
pose_backend=pose_backend,
|
|
292
284
|
verbose=verbose,
|
|
293
285
|
)
|
|
294
286
|
|
|
@@ -332,7 +324,6 @@ def _create_video_configs(
|
|
|
332
324
|
output_dir: str | None,
|
|
333
325
|
json_output_dir: str | None,
|
|
334
326
|
expert_params: AnalysisParameters,
|
|
335
|
-
pose_backend: str,
|
|
336
327
|
) -> list[DropJumpVideoConfig]:
|
|
337
328
|
"""Build configuration objects for each video.
|
|
338
329
|
|
|
@@ -352,24 +343,7 @@ def _create_video_configs(
|
|
|
352
343
|
video_file, output_dir, json_output_dir
|
|
353
344
|
)
|
|
354
345
|
|
|
355
|
-
|
|
356
|
-
from .api import AnalysisOverrides
|
|
357
|
-
|
|
358
|
-
overrides = None
|
|
359
|
-
if any(
|
|
360
|
-
[
|
|
361
|
-
expert_params.smoothing_window is not None,
|
|
362
|
-
expert_params.velocity_threshold is not None,
|
|
363
|
-
expert_params.min_contact_frames is not None,
|
|
364
|
-
expert_params.visibility_threshold is not None,
|
|
365
|
-
]
|
|
366
|
-
):
|
|
367
|
-
overrides = AnalysisOverrides(
|
|
368
|
-
smoothing_window=expert_params.smoothing_window,
|
|
369
|
-
velocity_threshold=expert_params.velocity_threshold,
|
|
370
|
-
min_contact_frames=expert_params.min_contact_frames,
|
|
371
|
-
visibility_threshold=expert_params.visibility_threshold,
|
|
372
|
-
)
|
|
346
|
+
overrides = _create_overrides_if_needed(expert_params)
|
|
373
347
|
|
|
374
348
|
config = DropJumpVideoConfig(
|
|
375
349
|
video_path=video_file,
|
|
@@ -380,7 +354,6 @@ def _create_video_configs(
|
|
|
380
354
|
overrides=overrides,
|
|
381
355
|
detection_confidence=expert_params.detection_confidence,
|
|
382
356
|
tracking_confidence=expert_params.tracking_confidence,
|
|
383
|
-
pose_backend=expert_params.pose_backend,
|
|
384
357
|
)
|
|
385
358
|
configs.append(config)
|
|
386
359
|
|
|
@@ -405,35 +378,33 @@ def _compute_batch_statistics(results: list[DropJumpVideoResult]) -> None:
|
|
|
405
378
|
click.echo(f"Failed: {len(failed)}", err=True)
|
|
406
379
|
|
|
407
380
|
if successful:
|
|
408
|
-
# Calculate average metrics
|
|
409
|
-
|
|
410
|
-
r
|
|
381
|
+
# Calculate average metrics from results with non-None values
|
|
382
|
+
gct_values = [
|
|
383
|
+
r.metrics.ground_contact_time * 1000
|
|
384
|
+
for r in successful
|
|
385
|
+
if r.metrics and r.metrics.ground_contact_time is not None
|
|
411
386
|
]
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
387
|
+
flight_values = [
|
|
388
|
+
r.metrics.flight_time * 1000
|
|
389
|
+
for r in successful
|
|
390
|
+
if r.metrics and r.metrics.flight_time is not None
|
|
391
|
+
]
|
|
392
|
+
jump_values = [
|
|
393
|
+
r.metrics.jump_height
|
|
394
|
+
for r in successful
|
|
395
|
+
if r.metrics and r.metrics.jump_height is not None
|
|
396
|
+
]
|
|
397
|
+
|
|
398
|
+
if gct_values:
|
|
399
|
+
avg_gct = sum(gct_values) / len(gct_values)
|
|
421
400
|
click.echo(f"\nAverage ground contact time: {avg_gct:.1f} ms", err=True)
|
|
422
401
|
|
|
423
|
-
if
|
|
424
|
-
avg_flight = sum(
|
|
425
|
-
r.metrics.flight_time * 1000
|
|
426
|
-
for r in with_flight
|
|
427
|
-
if r.metrics and r.metrics.flight_time is not None
|
|
428
|
-
) / len(with_flight)
|
|
402
|
+
if flight_values:
|
|
403
|
+
avg_flight = sum(flight_values) / len(flight_values)
|
|
429
404
|
click.echo(f"Average flight time: {avg_flight:.1f} ms", err=True)
|
|
430
405
|
|
|
431
|
-
if
|
|
432
|
-
avg_jump = sum(
|
|
433
|
-
r.metrics.jump_height
|
|
434
|
-
for r in with_jump
|
|
435
|
-
if r.metrics and r.metrics.jump_height is not None
|
|
436
|
-
) / len(with_jump)
|
|
406
|
+
if jump_values:
|
|
407
|
+
avg_jump = sum(jump_values) / len(jump_values)
|
|
437
408
|
click.echo(
|
|
438
409
|
f"Average jump height: {avg_jump:.3f} m ({avg_jump * 100:.1f} cm)",
|
|
439
410
|
err=True,
|
|
@@ -478,38 +449,27 @@ def _create_csv_row_from_result(result: DropJumpVideoResult) -> list[str]:
|
|
|
478
449
|
processing_time = f"{result.processing_time:.2f}"
|
|
479
450
|
|
|
480
451
|
if result.success and result.metrics:
|
|
481
|
-
|
|
482
|
-
video_name,
|
|
452
|
+
metrics_data = [
|
|
483
453
|
_format_time_metric(result.metrics.ground_contact_time),
|
|
484
454
|
_format_time_metric(result.metrics.flight_time),
|
|
485
455
|
_format_distance_metric(result.metrics.jump_height),
|
|
486
|
-
processing_time,
|
|
487
|
-
"Success",
|
|
488
|
-
]
|
|
489
|
-
else:
|
|
490
|
-
return [
|
|
491
|
-
video_name,
|
|
492
|
-
"N/A",
|
|
493
|
-
"N/A",
|
|
494
|
-
"N/A",
|
|
495
|
-
processing_time,
|
|
496
|
-
f"Failed: {result.error}",
|
|
497
456
|
]
|
|
457
|
+
return [video_name, *metrics_data, processing_time, "Success"]
|
|
458
|
+
|
|
459
|
+
return [video_name, "N/A", "N/A", "N/A", processing_time, f"Failed: {result.error}"]
|
|
498
460
|
|
|
499
461
|
|
|
500
462
|
def _write_csv_summary(
|
|
501
463
|
csv_summary: str | None,
|
|
502
464
|
results: list[DropJumpVideoResult],
|
|
503
|
-
successful: list[DropJumpVideoResult],
|
|
504
465
|
) -> None:
|
|
505
466
|
"""Write CSV summary of batch processing results.
|
|
506
467
|
|
|
507
468
|
Args:
|
|
508
469
|
csv_summary: Path to CSV output file
|
|
509
470
|
results: All processing results
|
|
510
|
-
successful: Successful processing results
|
|
511
471
|
"""
|
|
512
|
-
if not csv_summary
|
|
472
|
+
if not csv_summary:
|
|
513
473
|
return
|
|
514
474
|
|
|
515
475
|
click.echo(f"\nExporting CSV summary to: {csv_summary}", err=True)
|
|
@@ -545,7 +505,6 @@ def _process_batch(
|
|
|
545
505
|
json_output_dir: str | None,
|
|
546
506
|
csv_summary: str | None,
|
|
547
507
|
expert_params: AnalysisParameters,
|
|
548
|
-
pose_backend: str,
|
|
549
508
|
) -> None:
|
|
550
509
|
"""Process multiple videos in batch mode using parallel processing."""
|
|
551
510
|
click.echo(f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True)
|
|
@@ -556,7 +515,7 @@ def _process_batch(
|
|
|
556
515
|
|
|
557
516
|
# Create video configurations
|
|
558
517
|
configs = _create_video_configs(
|
|
559
|
-
video_files, quality, output_dir, json_output_dir, expert_params
|
|
518
|
+
video_files, quality, output_dir, json_output_dir, expert_params
|
|
560
519
|
)
|
|
561
520
|
|
|
562
521
|
# Progress callback
|
|
@@ -584,7 +543,6 @@ def _process_batch(
|
|
|
584
543
|
_compute_batch_statistics(results)
|
|
585
544
|
|
|
586
545
|
# Export CSV summary if requested
|
|
587
|
-
|
|
588
|
-
_write_csv_summary(csv_summary, results, successful)
|
|
546
|
+
_write_csv_summary(csv_summary, results)
|
|
589
547
|
|
|
590
548
|
click.echo("\nBatch processing complete!", err=True)
|
|
@@ -4,61 +4,80 @@ import cv2
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
|
|
6
6
|
from ..core.debug_overlay_utils import BaseDebugOverlayRenderer
|
|
7
|
+
from ..core.overlay_constants import (
|
|
8
|
+
BLACK,
|
|
9
|
+
COM_CIRCLE_RADIUS,
|
|
10
|
+
COM_OUTLINE_RADIUS,
|
|
11
|
+
CYAN,
|
|
12
|
+
FOOT_CIRCLE_RADIUS,
|
|
13
|
+
FOOT_LANDMARK_RADIUS,
|
|
14
|
+
FOOT_VISIBILITY_THRESHOLD,
|
|
15
|
+
GREEN,
|
|
16
|
+
HIP_MARKER_RADIUS,
|
|
17
|
+
METRICS_BOX_WIDTH,
|
|
18
|
+
ORANGE,
|
|
19
|
+
PHASE_LABEL_LINE_HEIGHT,
|
|
20
|
+
PHASE_LABEL_START_Y,
|
|
21
|
+
RED,
|
|
22
|
+
WHITE,
|
|
23
|
+
Color,
|
|
24
|
+
LandmarkDict,
|
|
25
|
+
)
|
|
7
26
|
from ..core.pose import compute_center_of_mass
|
|
8
27
|
from .analysis import ContactState, compute_average_foot_position
|
|
9
28
|
from .kinematics import DropJumpMetrics
|
|
10
29
|
|
|
11
30
|
|
|
12
|
-
class
|
|
31
|
+
class DropJumpDebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
13
32
|
"""Renders debug information on video frames."""
|
|
14
33
|
|
|
34
|
+
def _get_contact_state_color(self, contact_state: ContactState) -> Color:
|
|
35
|
+
"""Get color based on ground contact state."""
|
|
36
|
+
return GREEN if contact_state == ContactState.ON_GROUND else RED
|
|
37
|
+
|
|
15
38
|
def _draw_com_visualization(
|
|
16
39
|
self,
|
|
17
40
|
frame: np.ndarray,
|
|
18
|
-
landmarks:
|
|
41
|
+
landmarks: LandmarkDict,
|
|
19
42
|
contact_state: ContactState,
|
|
20
43
|
) -> None:
|
|
21
44
|
"""Draw center of mass visualization on frame."""
|
|
22
45
|
com_x, com_y, _ = compute_center_of_mass(landmarks)
|
|
23
|
-
px =
|
|
24
|
-
py = int(com_y * self.height)
|
|
46
|
+
px, py = self._normalize_to_pixels(com_x, com_y)
|
|
25
47
|
|
|
26
|
-
color = (
|
|
27
|
-
cv2.circle(frame, (px, py),
|
|
28
|
-
cv2.circle(frame, (px, py),
|
|
48
|
+
color = self._get_contact_state_color(contact_state)
|
|
49
|
+
cv2.circle(frame, (px, py), COM_CIRCLE_RADIUS, color, -1)
|
|
50
|
+
cv2.circle(frame, (px, py), COM_OUTLINE_RADIUS, WHITE, 2)
|
|
29
51
|
|
|
30
52
|
# Draw hip midpoint reference
|
|
31
53
|
if "left_hip" in landmarks and "right_hip" in landmarks:
|
|
32
54
|
lh_x, lh_y, _ = landmarks["left_hip"]
|
|
33
55
|
rh_x, rh_y, _ = landmarks["right_hip"]
|
|
34
|
-
hip_x =
|
|
35
|
-
|
|
36
|
-
cv2.
|
|
37
|
-
cv2.line(frame, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
|
|
56
|
+
hip_x, hip_y = self._normalize_to_pixels((lh_x + rh_x) / 2, (lh_y + rh_y) / 2)
|
|
57
|
+
cv2.circle(frame, (hip_x, hip_y), HIP_MARKER_RADIUS, ORANGE, -1)
|
|
58
|
+
cv2.line(frame, (hip_x, hip_y), (px, py), ORANGE, 2)
|
|
38
59
|
|
|
39
60
|
def _draw_foot_visualization(
|
|
40
61
|
self,
|
|
41
62
|
frame: np.ndarray,
|
|
42
|
-
landmarks:
|
|
63
|
+
landmarks: LandmarkDict,
|
|
43
64
|
contact_state: ContactState,
|
|
44
65
|
) -> None:
|
|
45
66
|
"""Draw foot position visualization on frame."""
|
|
46
67
|
foot_x, foot_y = compute_average_foot_position(landmarks)
|
|
47
|
-
px =
|
|
48
|
-
py = int(foot_y * self.height)
|
|
68
|
+
px, py = self._normalize_to_pixels(foot_x, foot_y)
|
|
49
69
|
|
|
50
|
-
color = (
|
|
51
|
-
cv2.circle(frame, (px, py),
|
|
70
|
+
color = self._get_contact_state_color(contact_state)
|
|
71
|
+
cv2.circle(frame, (px, py), FOOT_CIRCLE_RADIUS, color, -1)
|
|
52
72
|
|
|
53
73
|
# Draw individual foot landmarks
|
|
54
74
|
foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
|
|
55
75
|
for key in foot_keys:
|
|
56
76
|
if key in landmarks:
|
|
57
77
|
x, y, vis = landmarks[key]
|
|
58
|
-
if vis >
|
|
59
|
-
lx =
|
|
60
|
-
ly
|
|
61
|
-
cv2.circle(frame, (lx, ly), 5, (255, 255, 0), -1)
|
|
78
|
+
if vis > FOOT_VISIBILITY_THRESHOLD:
|
|
79
|
+
lx, ly = self._normalize_to_pixels(x, y)
|
|
80
|
+
cv2.circle(frame, (lx, ly), FOOT_LANDMARK_RADIUS, CYAN, -1)
|
|
62
81
|
|
|
63
82
|
def _draw_phase_labels(
|
|
64
83
|
self,
|
|
@@ -67,58 +86,99 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
|
67
86
|
metrics: DropJumpMetrics,
|
|
68
87
|
) -> None:
|
|
69
88
|
"""Draw phase labels (ground contact, flight, peak) on frame."""
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
metrics.
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
89
|
+
# Phase configurations: (start_frame, end_frame, label, color)
|
|
90
|
+
# For range-based phases (ground contact, flight)
|
|
91
|
+
range_phase_configs = [
|
|
92
|
+
(metrics.contact_start_frame, metrics.contact_end_frame, "GROUND CONTACT", GREEN),
|
|
93
|
+
(metrics.flight_start_frame, metrics.flight_end_frame, "FLIGHT PHASE", RED),
|
|
94
|
+
]
|
|
95
|
+
|
|
96
|
+
y_offset = PHASE_LABEL_START_Y
|
|
97
|
+
for start_frame, end_frame, label, color in range_phase_configs:
|
|
98
|
+
if start_frame and end_frame and start_frame <= frame_idx <= end_frame:
|
|
99
|
+
cv2.putText(
|
|
100
|
+
frame,
|
|
101
|
+
label,
|
|
102
|
+
(10, y_offset),
|
|
103
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
104
|
+
0.7,
|
|
105
|
+
color,
|
|
106
|
+
2,
|
|
107
|
+
)
|
|
108
|
+
y_offset += PHASE_LABEL_LINE_HEIGHT
|
|
109
|
+
|
|
110
|
+
# Single-frame indicator (peak height)
|
|
111
|
+
if metrics.peak_height_frame == frame_idx:
|
|
78
112
|
cv2.putText(
|
|
79
113
|
frame,
|
|
80
|
-
"
|
|
114
|
+
"PEAK HEIGHT",
|
|
81
115
|
(10, y_offset),
|
|
82
116
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
83
117
|
0.7,
|
|
84
|
-
(0, 255,
|
|
118
|
+
(255, 0, 255), # Magenta
|
|
85
119
|
2,
|
|
86
120
|
)
|
|
87
|
-
y_offset += 40
|
|
88
121
|
|
|
89
|
-
|
|
122
|
+
def _draw_info_box(
|
|
123
|
+
self,
|
|
124
|
+
frame: np.ndarray,
|
|
125
|
+
top_left: tuple[int, int],
|
|
126
|
+
bottom_right: tuple[int, int],
|
|
127
|
+
border_color: Color,
|
|
128
|
+
) -> None:
|
|
129
|
+
"""Draw a filled box with border for displaying information."""
|
|
130
|
+
cv2.rectangle(frame, top_left, bottom_right, BLACK, -1)
|
|
131
|
+
cv2.rectangle(frame, top_left, bottom_right, border_color, 2)
|
|
132
|
+
|
|
133
|
+
def _draw_metrics_summary(
|
|
134
|
+
self, frame: np.ndarray, frame_idx: int, metrics: DropJumpMetrics
|
|
135
|
+
) -> None:
|
|
136
|
+
"""Draw metrics summary in bottom right after flight phase ends."""
|
|
137
|
+
if metrics.flight_end_frame is None or frame_idx < metrics.flight_end_frame:
|
|
138
|
+
return
|
|
139
|
+
|
|
140
|
+
# Build metrics text list
|
|
141
|
+
metrics_text: list[str] = []
|
|
142
|
+
|
|
143
|
+
if metrics.ground_contact_time is not None:
|
|
144
|
+
metrics_text.append(f"Contact Time: {metrics.ground_contact_time * 1000:.0f}ms")
|
|
145
|
+
|
|
146
|
+
if metrics.flight_time is not None:
|
|
147
|
+
metrics_text.append(f"Flight Time: {metrics.flight_time * 1000:.0f}ms")
|
|
148
|
+
|
|
149
|
+
if metrics.jump_height is not None:
|
|
150
|
+
metrics_text.append(f"Jump Height: {metrics.jump_height:.3f}m")
|
|
151
|
+
|
|
152
|
+
# Calculate RSI (Reactive Strength Index)
|
|
90
153
|
if (
|
|
91
|
-
metrics.
|
|
92
|
-
and metrics.
|
|
93
|
-
and metrics.
|
|
154
|
+
metrics.jump_height is not None
|
|
155
|
+
and metrics.ground_contact_time is not None
|
|
156
|
+
and metrics.ground_contact_time > 0
|
|
94
157
|
):
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
"FLIGHT PHASE",
|
|
98
|
-
(10, y_offset),
|
|
99
|
-
cv2.FONT_HERSHEY_SIMPLEX,
|
|
100
|
-
0.7,
|
|
101
|
-
(0, 0, 255),
|
|
102
|
-
2,
|
|
103
|
-
)
|
|
104
|
-
y_offset += 40
|
|
158
|
+
rsi = metrics.jump_height / metrics.ground_contact_time
|
|
159
|
+
metrics_text.append(f"RSI: {rsi:.2f}")
|
|
105
160
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
161
|
+
if not metrics_text:
|
|
162
|
+
return
|
|
163
|
+
|
|
164
|
+
# Calculate box dimensions
|
|
165
|
+
box_height = len(metrics_text) * 30 + 20
|
|
166
|
+
top_left = (self.width - METRICS_BOX_WIDTH, self.height - box_height - 10)
|
|
167
|
+
bottom_right = (self.width - 10, self.height - 10)
|
|
168
|
+
|
|
169
|
+
self._draw_info_box(frame, top_left, bottom_right, GREEN)
|
|
170
|
+
|
|
171
|
+
# Draw metrics text
|
|
172
|
+
text_x = self.width - METRICS_BOX_WIDTH + 10
|
|
173
|
+
text_y = self.height - box_height + 10
|
|
174
|
+
for text in metrics_text:
|
|
175
|
+
cv2.putText(frame, text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, WHITE, 1)
|
|
176
|
+
text_y += 30
|
|
117
177
|
|
|
118
178
|
def render_frame(
|
|
119
179
|
self,
|
|
120
180
|
frame: np.ndarray,
|
|
121
|
-
landmarks:
|
|
181
|
+
landmarks: LandmarkDict | None,
|
|
122
182
|
contact_state: ContactState,
|
|
123
183
|
frame_idx: int,
|
|
124
184
|
metrics: DropJumpMetrics | None = None,
|
|
@@ -141,16 +201,17 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
|
141
201
|
with self.timer.measure("debug_video_copy"):
|
|
142
202
|
annotated = frame.copy()
|
|
143
203
|
|
|
144
|
-
|
|
145
|
-
# Draw landmarks
|
|
204
|
+
with self.timer.measure("debug_video_draw"):
|
|
205
|
+
# Draw skeleton and landmarks
|
|
146
206
|
if landmarks:
|
|
207
|
+
self._draw_skeleton(annotated, landmarks)
|
|
147
208
|
if use_com:
|
|
148
209
|
self._draw_com_visualization(annotated, landmarks, contact_state)
|
|
149
210
|
else:
|
|
150
211
|
self._draw_foot_visualization(annotated, landmarks, contact_state)
|
|
151
212
|
|
|
152
213
|
# Draw contact state
|
|
153
|
-
state_color = (
|
|
214
|
+
state_color = self._get_contact_state_color(contact_state)
|
|
154
215
|
cv2.putText(
|
|
155
216
|
annotated,
|
|
156
217
|
f"State: {contact_state.value}",
|
|
@@ -168,15 +229,13 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
|
168
229
|
(10, 70),
|
|
169
230
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
170
231
|
0.7,
|
|
171
|
-
|
|
232
|
+
WHITE,
|
|
172
233
|
2,
|
|
173
234
|
)
|
|
174
235
|
|
|
175
|
-
# Draw phase labels
|
|
236
|
+
# Draw phase labels and metrics summary
|
|
176
237
|
if metrics:
|
|
177
238
|
self._draw_phase_labels(annotated, frame_idx, metrics)
|
|
178
|
-
|
|
179
|
-
with self.timer.measure("debug_video_draw"):
|
|
180
|
-
_draw_overlays()
|
|
239
|
+
self._draw_metrics_summary(annotated, frame_idx, metrics)
|
|
181
240
|
|
|
182
241
|
return annotated
|
|
@@ -124,7 +124,7 @@ def _classify_combined_score(combined_score: float) -> AthleteProfile:
|
|
|
124
124
|
return AthleteProfile.ELITE
|
|
125
125
|
|
|
126
126
|
|
|
127
|
-
def estimate_athlete_profile(metrics: MetricsDict,
|
|
127
|
+
def estimate_athlete_profile(metrics: MetricsDict, _gender: str | None = None) -> AthleteProfile:
|
|
128
128
|
"""Estimate athlete profile from drop jump metrics.
|
|
129
129
|
|
|
130
130
|
Uses jump_height and contact_time to classify athlete level.
|
kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx
CHANGED
|
Binary file
|
|
Binary file
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.71.1
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -23,13 +23,9 @@ Requires-Python: <3.13,>=3.10
|
|
|
23
23
|
Requires-Dist: click>=8.1.7
|
|
24
24
|
Requires-Dist: mediapipe>=0.10.30
|
|
25
25
|
Requires-Dist: numpy>=1.26.0
|
|
26
|
-
Requires-Dist: onnxruntime-gpu>=1.23.2; sys_platform == 'linux' and platform_machine == 'x86_64'
|
|
27
|
-
Requires-Dist: onnxruntime>=1.23.2; sys_platform != 'linux' or platform_machine != 'x86_64'
|
|
28
26
|
Requires-Dist: opencv-python>=4.9.0
|
|
29
27
|
Requires-Dist: platformdirs>=4.0.0
|
|
30
|
-
Requires-Dist: rtmlib>=0.0.13
|
|
31
28
|
Requires-Dist: scipy>=1.11.0
|
|
32
|
-
Requires-Dist: torch>=2.0.0
|
|
33
29
|
Requires-Dist: tqdm>=4.67.1
|
|
34
30
|
Requires-Dist: typing-extensions>=4.15.0
|
|
35
31
|
Description-Content-Type: text/markdown
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
kinemotion/__init__.py,sha256=HkD8habCcfxGobxZcACOStla-L1nYHMIZp0th00Q3E8,1061
|
|
2
|
+
kinemotion/api.py,sha256=uG1e4bTnj2c-6cbZJEZ_LjMwFdaG32ba2KcK_XjE_NI,1040
|
|
3
|
+
kinemotion/cli.py,sha256=_Us9krSce4GUKtlLIPrFUhKmPWURzeJ1-ydR_YU2VGw,626
|
|
4
|
+
kinemotion/cmj/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
|
|
5
|
+
kinemotion/cmj/analysis.py,sha256=EQydClIbNkIj-FmCZGaPQe-COVW8fbO3139i9z1vomA,23643
|
|
6
|
+
kinemotion/cmj/api.py,sha256=P_lbqEqAKPO5n1Xn4IQZKNj9nLaO3ljkN2PgqvExGXU,18435
|
|
7
|
+
kinemotion/cmj/cli.py,sha256=P2b77IIw6kqTSIkncxlShzhmjIwqMFBNd-pZxYP-TsI,9918
|
|
8
|
+
kinemotion/cmj/debug_overlay.py,sha256=vF5Apiz8zDRpgrVzf52manLW99m1kHQAPSdUkar5rPs,11474
|
|
9
|
+
kinemotion/cmj/joint_angles.py,sha256=by5M4LDtUfd2_Z9DmcgUl0nsvarsBYjgsE8KWWYcn08,11255
|
|
10
|
+
kinemotion/cmj/kinematics.py,sha256=KwA8uSj3g1SeNf0NXMSHsp3gIw6Gfa-6QWIwdYdRXYw,13362
|
|
11
|
+
kinemotion/cmj/metrics_validator.py,sha256=IQofafpwLCXER3ucZXNfiJKFFKPOVxXnC4BNLHOMnNY,30013
|
|
12
|
+
kinemotion/cmj/validation_bounds.py,sha256=-0iXDhH-RntiGZi_Co22V6qtA5D-hLzkrPkVcfoNd2U,11343
|
|
13
|
+
kinemotion/core/__init__.py,sha256=8hMvfNK7v_eqswuk_J5s5FRGvPtp2-R4kasVMGchFkM,1766
|
|
14
|
+
kinemotion/core/auto_tuning.py,sha256=dF2opupuphbTd6sZIDyXX8hwedLaNlMiH-hT7PGqnfU,10251
|
|
15
|
+
kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
|
|
16
|
+
kinemotion/core/debug_overlay_utils.py,sha256=QaVkHuFZpXUrdiMlm8ylQn6baJOj8jcZeiV4kDqODt0,17441
|
|
17
|
+
kinemotion/core/determinism.py,sha256=Frw-KAOvAxTL_XtxoWpXCjMbQPUKEAusK6JctlkeuRo,2509
|
|
18
|
+
kinemotion/core/experimental.py,sha256=G1EpkmWQ8d-rPaN1n0P7mF6XUzrbW0Br3nVkIzJ1D9M,3694
|
|
19
|
+
kinemotion/core/filtering.py,sha256=7KUeclXqZpNQA8WKNocDwhCxZpwwtizI3wvAEyq9SBo,11603
|
|
20
|
+
kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
|
|
21
|
+
kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
|
|
22
|
+
kinemotion/core/model_downloader.py,sha256=mqhJBHGaNe0aN9qbcBqvcTk9FDd7xaHqEcwD-fyP89c,5205
|
|
23
|
+
kinemotion/core/overlay_constants.py,sha256=zZreHHWe00p2XuCJsbRFqN6g-AAUAnx53LwKqHm1Bl8,1438
|
|
24
|
+
kinemotion/core/pipeline_utils.py,sha256=B5jMXoiLaTh02uGA2MIe1uZLVSRGZ5nxbARuvdrjDrQ,15161
|
|
25
|
+
kinemotion/core/pose.py,sha256=Z795p0EnaTUeWHO8FuApFcMGTLwZ47JOjs5f5TzRvdk,14224
|
|
26
|
+
kinemotion/core/pose_landmarks.py,sha256=LcEbL5K5xKia6dCzWf6Ft18UIE1CLMMqCZ3KUjwUDzM,1558
|
|
27
|
+
kinemotion/core/quality.py,sha256=VUkRL2N6B7lfIZ2pE9han_U68JwarmZz1U0ygHkgkhE,13022
|
|
28
|
+
kinemotion/core/smoothing.py,sha256=F1DCsnvPBi62XJLygOJ5MkNlRa7BCLg_E9ORtCWcoKk,16562
|
|
29
|
+
kinemotion/core/timing.py,sha256=ITX77q4hbtajRuWfgwYhws8nCvOeKFlEdKjCu8lD9_w,7938
|
|
30
|
+
kinemotion/core/types.py,sha256=m141buSkEsqflt5VFaTHtRq_IcimjI3_T_EfaNpIVxY,1652
|
|
31
|
+
kinemotion/core/validation.py,sha256=rrhpI24Iq8WGtNaMg0beTWMbEGccdKF-f-pk-FCKJzI,6749
|
|
32
|
+
kinemotion/core/video_io.py,sha256=84IxC1n3HvYK28MSa5fqumdzlPDhP8k9IPB3OCvWku0,9198
|
|
33
|
+
kinemotion/dropjump/__init__.py,sha256=yBbEbPdY6sqozWtTvfbvuUZnrVWSSjBp61xK34M29F4,878
|
|
34
|
+
kinemotion/dropjump/analysis.py,sha256=Tc41jVctG4zJZOyYqM1SiM95mnF2xz4vcieGJ6vYi2M,29099
|
|
35
|
+
kinemotion/dropjump/api.py,sha256=5qBj05e6Zo-H4-UjBOIt_CYyDqLVcPhwyyLG04eJYMU,20639
|
|
36
|
+
kinemotion/dropjump/cli.py,sha256=FaBX637x7VcLcB8HupaZCkVS7sp8C0YuaKM0h-DBNIA,15906
|
|
37
|
+
kinemotion/dropjump/debug_overlay.py,sha256=X4mvCi5Qi1gnvSZZAsUs-0ZRUx9mVBbEUznOFO21HO8,8470
|
|
38
|
+
kinemotion/dropjump/kinematics.py,sha256=dx4PuXKfKMKcsc_HX6sXj8rHXf9ksiZIOAIkJ4vBlY4,19637
|
|
39
|
+
kinemotion/dropjump/metrics_validator.py,sha256=lSfo4Lm5FHccl8ijUP6SA-kcSh50LS9hF8UIyWxcnW8,9243
|
|
40
|
+
kinemotion/dropjump/validation_bounds.py,sha256=k31qy-kCXTiCTx0RPo2t8yZ-faLxqGO-AeF05QfBFb0,5125
|
|
41
|
+
kinemotion/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
|
+
kinemotion/models/pose_landmarker_lite.task,sha256=WZKeHR7pUodzXd2DOxnPSsRtKbx6_du_Z1PEWWkNV0o,5777746
|
|
43
|
+
kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx,sha256=dfZTq8kbhv8RxWiXS0HUIJNCUpxYTBN45dFIorPflEs,133
|
|
44
|
+
kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx,sha256=UsutHVQ6GP3X5pCcp52EN8q7o2J3d-TnxZqlF48kY6I,133
|
|
45
|
+
kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
46
|
+
kinemotion-0.71.1.dist-info/METADATA,sha256=D2ZS9pNSi3NAaSkYKoQ09PZaJ-B3qWDHPLChu3oxdbY,26125
|
|
47
|
+
kinemotion-0.71.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
48
|
+
kinemotion-0.71.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
|
|
49
|
+
kinemotion-0.71.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
|
|
50
|
+
kinemotion-0.71.1.dist-info/RECORD,,
|