kinemotion 0.11.7__tar.gz → 0.12.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.pre-commit-config.yaml +1 -1
- {kinemotion-0.11.7 → kinemotion-0.12.0}/CHANGELOG.md +29 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/CLAUDE.md +2 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/PKG-INFO +1 -1
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/CAMERA_SETUP.md +1 -1
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/CAMERA_SETUP_ES.md +1 -1
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/CMJ_GUIDE.md +6 -6
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/REAL_TIME_ANALYSIS.md +4 -4
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/TRIPLE_EXTENSION.md +10 -10
- {kinemotion-0.11.7 → kinemotion-0.12.0}/pyproject.toml +1 -1
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/api.py +6 -11
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cmj/debug_overlay.py +9 -12
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/cli_utils.py +7 -99
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/debug_overlay_utils.py +1 -24
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/video_io.py +1 -5
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/dropjump/analysis.py +69 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/dropjump/cli.py +5 -26
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/dropjump/kinematics.py +35 -74
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_api.py +15 -21
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_kinematics.py +7 -5
- {kinemotion-0.11.7 → kinemotion-0.12.0}/uv.lock +1 -1
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.dockerignore +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.github/ISSUE_TEMPLATE/config.yml +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.github/pull_request_template.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.github/workflows/release.yml +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.gitignore +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/.tool-versions +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/CODE_OF_CONDUCT.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/CONTRIBUTING.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/Dockerfile +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/GEMINI.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/LICENSE +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/README.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/SECURITY.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/BULK_PROCESSING.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/ERRORS_FINDINGS.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/FRAMERATE.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/IMU_METADATA_PRESERVATION.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/PARAMETERS.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/docs/VALIDATION_PLAN.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/examples/bulk/README.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/examples/bulk/bulk_processing.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/examples/bulk/simple_example.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/examples/programmatic_usage.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/samples/cmjs/README.md +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/__init__.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cli.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cmj/__init__.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cmj/analysis.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cmj/cli.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cmj/joint_angles.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/cmj/kinematics.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/__init__.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/auto_tuning.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/filtering.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/pose.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/core/smoothing.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/dropjump/__init__.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/dropjump/debug_overlay.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/src/kinemotion/py.typed +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/__init__.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_adaptive_threshold.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_aspect_ratio.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_cmj_analysis.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_cmj_kinematics.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_com_estimation.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_contact_detection.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_filtering.py +0 -0
- {kinemotion-0.11.7 → kinemotion-0.12.0}/tests/test_polyorder.py +0 -0
|
@@ -39,7 +39,7 @@ repos:
|
|
|
39
39
|
additional_dependencies:
|
|
40
40
|
- mdformat-gfm>=0.3.5 # GitHub Flavored Markdown
|
|
41
41
|
- mdformat-tables # Table formatting
|
|
42
|
-
exclude: ^CLAUDE\.md$
|
|
42
|
+
exclude: (^CLAUDE\.md$|^CHANGELOG\.md$)
|
|
43
43
|
|
|
44
44
|
- repo: https://github.com/compilerla/conventional-pre-commit
|
|
45
45
|
rev: v4.3.0
|
|
@@ -7,6 +7,35 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
7
7
|
|
|
8
8
|
<!-- version list -->
|
|
9
9
|
|
|
10
|
+
## v0.12.0 (2025-11-06)
|
|
11
|
+
|
|
12
|
+
### Documentation
|
|
13
|
+
|
|
14
|
+
- Update claude.md
|
|
15
|
+
([`b4d93d9`](https://github.com/feniix/kinemotion/commit/b4d93d94259fbfe86101c256910fcfc07c8dfcc2))
|
|
16
|
+
|
|
17
|
+
### Features
|
|
18
|
+
|
|
19
|
+
- **dropjump**: Calculate jump height from flight time like CMJ
|
|
20
|
+
([`f7d96a2`](https://github.com/feniix/kinemotion/commit/f7d96a253b287d58215fd64bd1e598784cb098f4))
|
|
21
|
+
|
|
22
|
+
- **dropjump**: Improve landing detection with position stabilization
|
|
23
|
+
([`6d19938`](https://github.com/feniix/kinemotion/commit/6d199382485a80a975911c51444b2c18aa32c428))
|
|
24
|
+
|
|
25
|
+
### Refactoring
|
|
26
|
+
|
|
27
|
+
- **core**: Remove unused code and fix vulture warnings
|
|
28
|
+
([`16328e2`](https://github.com/feniix/kinemotion/commit/16328e299a0e15f7f0f0e87d133e1f662dc59d0b))
|
|
29
|
+
|
|
30
|
+
- **core**: Rename AutoTunedParams to AnalysisParameters for consistency
|
|
31
|
+
([`2b6e59b`](https://github.com/feniix/kinemotion/commit/2b6e59b832769224b600e23bf4141af5d6159169))
|
|
32
|
+
|
|
33
|
+
### Testing
|
|
34
|
+
|
|
35
|
+
- Update tests for kinematic-based height calculation
|
|
36
|
+
([`308469e`](https://github.com/feniix/kinemotion/commit/308469e978c53a971a4a20352cfffd72a3c9e6cd))
|
|
37
|
+
|
|
38
|
+
|
|
10
39
|
## v0.11.7 (2025-11-06)
|
|
11
40
|
|
|
12
41
|
### Bug Fixes
|
|
@@ -254,6 +254,8 @@ chore(release): 0.11.0 [skip ci]
|
|
|
254
254
|
feat!: change API signature for process_video
|
|
255
255
|
```
|
|
256
256
|
|
|
257
|
+
**Important**: Commit messages must never reference Claude or AI assistance. Keep messages professional and focused on the technical changes.
|
|
258
|
+
|
|
257
259
|
## MCP Servers
|
|
258
260
|
|
|
259
261
|
Configured in `.mcp.json`: web-search, sequential-thinking, context7, etc.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.12.0
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -12,7 +12,7 @@ Proper camera positioning is critical for accurate drop jump analysis. The curre
|
|
|
12
12
|
|
|
13
13
|
### Required Camera Position
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
Camera must be positioned at a side view angle, perpendicular to the sagittal plane (90°).
|
|
16
16
|
|
|
17
17
|
#### Camera Positioning Diagram
|
|
18
18
|
|
|
@@ -12,7 +12,7 @@ El posicionamiento adecuado de la cámara es crítico para un análisis preciso
|
|
|
12
12
|
|
|
13
13
|
### Posición Requerida de la Cámara
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
La cámara debe posicionarse en ángulo de vista lateral, perpendicular al plano sagital (90°).
|
|
16
16
|
|
|
17
17
|
#### Diagrama de Posicionamiento de Cámara
|
|
18
18
|
|
|
@@ -61,7 +61,7 @@ print(f"Eccentric duration: {metrics.eccentric_duration*1000:.0f}ms")
|
|
|
61
61
|
|
|
62
62
|
### Movement Characteristics
|
|
63
63
|
|
|
64
|
-
|
|
64
|
+
1. **Countermovement Depth** (m) - Vertical distance during eccentric phase
|
|
65
65
|
|
|
66
66
|
- Represents how deep the athlete squats
|
|
67
67
|
- Typical range: 0.20-0.40m
|
|
@@ -90,7 +90,7 @@ print(f"Eccentric duration: {metrics.eccentric_duration*1000:.0f}ms")
|
|
|
90
90
|
|
|
91
91
|
### Velocity Profile
|
|
92
92
|
|
|
93
|
-
|
|
93
|
+
1. **Peak Eccentric Velocity** (m/s) - Maximum downward speed
|
|
94
94
|
|
|
95
95
|
- Indicates countermovement speed
|
|
96
96
|
- Typical range: 0.5-1.5 m/s
|
|
@@ -102,10 +102,10 @@ print(f"Eccentric duration: {metrics.eccentric_duration*1000:.0f}ms")
|
|
|
102
102
|
|
|
103
103
|
### Triple Extension (in debug video)
|
|
104
104
|
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
105
|
+
1. **Ankle Angle** - Dorsiflexion/plantarflexion
|
|
106
|
+
1. **Knee Angle** - Flexion/extension
|
|
107
|
+
1. **Hip Angle** - Flexion/extension
|
|
108
|
+
1. **Trunk Tilt** - Forward/backward lean
|
|
109
109
|
|
|
110
110
|
**Note**: Ankle/knee angles have limited visibility in lateral view videos (~20-30% of frames). Trunk angle is available throughout. See docs/TRIPLE_EXTENSION.md for details.
|
|
111
111
|
|
|
@@ -244,7 +244,7 @@ ______________________________________________________________________
|
|
|
244
244
|
|
|
245
245
|
**Test video**: 236 frames @ 29.58fps
|
|
246
246
|
|
|
247
|
-
```
|
|
247
|
+
```text
|
|
248
248
|
Processing time breakdown:
|
|
249
249
|
- MediaPipe tracking: ~5-6 seconds
|
|
250
250
|
- Smoothing: ~0.1 seconds
|
|
@@ -681,7 +681,7 @@ ______________________________________________________________________
|
|
|
681
681
|
|
|
682
682
|
## Recommendation Matrix
|
|
683
683
|
|
|
684
|
-
### Choose Offline (Current) If
|
|
684
|
+
### Choose Offline (Current) If
|
|
685
685
|
|
|
686
686
|
- ✅ Maximum accuracy required (research, validation)
|
|
687
687
|
- ✅ Processing pre-recorded videos
|
|
@@ -689,7 +689,7 @@ ______________________________________________________________________
|
|
|
689
689
|
- ✅ Want triple extension with full coverage
|
|
690
690
|
- ✅ Publication-quality data needed
|
|
691
691
|
|
|
692
|
-
### Choose Near Real-Time If
|
|
692
|
+
### Choose Near Real-Time If
|
|
693
693
|
|
|
694
694
|
- ✅ Need quick results (1-2 sec acceptable)
|
|
695
695
|
- ✅ Coaching/training applications
|
|
@@ -697,7 +697,7 @@ ______________________________________________________________________
|
|
|
697
697
|
- ✅ Want to maintain accuracy
|
|
698
698
|
- ✅ Building mobile/web app
|
|
699
699
|
|
|
700
|
-
### Choose True Real-Time If
|
|
700
|
+
### Choose True Real-Time If
|
|
701
701
|
|
|
702
702
|
- ⚠️ Instant feedback critical (\<100ms)
|
|
703
703
|
- ⚠️ Interactive applications (games, VR)
|
|
@@ -49,7 +49,7 @@ The CMJ debug video now includes **triple extension tracking** - real-time visua
|
|
|
49
49
|
|
|
50
50
|
**At Lowest Point (Countermovement Bottom):**
|
|
51
51
|
|
|
52
|
-
```
|
|
52
|
+
```text
|
|
53
53
|
Ankle: 70-90° (neutral to slight dorsiflexion)
|
|
54
54
|
Knee: 90-110° (moderate squat)
|
|
55
55
|
Hip: 90-110° (hip flexion)
|
|
@@ -58,7 +58,7 @@ Trunk: 0-20° (slight forward lean)
|
|
|
58
58
|
|
|
59
59
|
**At Takeoff (Leaving Ground):**
|
|
60
60
|
|
|
61
|
-
```
|
|
61
|
+
```text
|
|
62
62
|
Ankle: 110-130° (strong plantarflexion)
|
|
63
63
|
Knee: 160-180° (near full extension)
|
|
64
64
|
Hip: 170-180° (full extension)
|
|
@@ -67,7 +67,7 @@ Trunk: 0-10° (nearly vertical)
|
|
|
67
67
|
|
|
68
68
|
**During Flight:**
|
|
69
69
|
|
|
70
|
-
```
|
|
70
|
+
```text
|
|
71
71
|
All joints: ~180° (full extension)
|
|
72
72
|
```
|
|
73
73
|
|
|
@@ -139,23 +139,23 @@ All joints: ~180° (full extension)
|
|
|
139
139
|
|
|
140
140
|
### Poor Extension Patterns
|
|
141
141
|
|
|
142
|
-
|
|
142
|
+
#### Problem 1: Incomplete knee extension
|
|
143
143
|
|
|
144
|
-
```
|
|
144
|
+
```text
|
|
145
145
|
Takeoff: Ankle 120°, Knee 150°, Hip 175°
|
|
146
146
|
→ Power leak: Not fully utilizing leg strength
|
|
147
147
|
```
|
|
148
148
|
|
|
149
|
-
|
|
149
|
+
#### Problem 2: Sequential extension (not simultaneous)
|
|
150
150
|
|
|
151
|
-
```
|
|
151
|
+
```text
|
|
152
152
|
Early concentric: Hip 170°, Knee 120°, Ankle 80°
|
|
153
153
|
→ Poor coordination: Extending in sequence instead of together
|
|
154
154
|
```
|
|
155
155
|
|
|
156
|
-
|
|
156
|
+
#### Problem 3: Excessive trunk lean
|
|
157
157
|
|
|
158
|
-
```
|
|
158
|
+
```text
|
|
159
159
|
Takeoff: Trunk 30° forward
|
|
160
160
|
→ Sub-optimal: Reduces vertical force component
|
|
161
161
|
```
|
|
@@ -272,7 +272,7 @@ The triple extension feature has been tested with:
|
|
|
272
272
|
|
|
273
273
|
**Debug video shows:**
|
|
274
274
|
|
|
275
|
-
```
|
|
275
|
+
```text
|
|
276
276
|
Frame 140-155 (Concentric phase):
|
|
277
277
|
┌─────────────────────┐
|
|
278
278
|
│ TRIPLE EXTENSION │
|
|
@@ -337,7 +337,6 @@ class VideoConfig:
|
|
|
337
337
|
"""Configuration for processing a single video."""
|
|
338
338
|
|
|
339
339
|
video_path: str
|
|
340
|
-
drop_height: float
|
|
341
340
|
quality: str = "balanced"
|
|
342
341
|
output_video: str | None = None
|
|
343
342
|
json_output: str | None = None
|
|
@@ -352,7 +351,6 @@ class VideoConfig:
|
|
|
352
351
|
|
|
353
352
|
def process_video(
|
|
354
353
|
video_path: str,
|
|
355
|
-
drop_height: float,
|
|
356
354
|
quality: str = "balanced",
|
|
357
355
|
output_video: str | None = None,
|
|
358
356
|
json_output: str | None = None,
|
|
@@ -368,9 +366,10 @@ def process_video(
|
|
|
368
366
|
"""
|
|
369
367
|
Process a single drop jump video and return metrics.
|
|
370
368
|
|
|
369
|
+
Jump height is calculated from flight time using kinematic formula (h = g*t²/8).
|
|
370
|
+
|
|
371
371
|
Args:
|
|
372
372
|
video_path: Path to the input video file
|
|
373
|
-
drop_height: Height of drop box/platform in meters (e.g., 0.40 for 40cm)
|
|
374
373
|
quality: Analysis quality preset ("fast", "balanced", or "accurate")
|
|
375
374
|
output_video: Optional path for debug video output
|
|
376
375
|
json_output: Optional path for JSON metrics output
|
|
@@ -459,15 +458,12 @@ def process_video(
|
|
|
459
458
|
# Calculate metrics
|
|
460
459
|
if verbose:
|
|
461
460
|
print("Calculating metrics...")
|
|
462
|
-
print(
|
|
463
|
-
f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)"
|
|
464
|
-
)
|
|
465
461
|
|
|
466
462
|
metrics = calculate_drop_jump_metrics(
|
|
467
463
|
contact_states,
|
|
468
464
|
vertical_positions,
|
|
469
465
|
video.fps,
|
|
470
|
-
drop_height_m=
|
|
466
|
+
drop_height_m=None,
|
|
471
467
|
drop_start_frame=drop_start_frame,
|
|
472
468
|
velocity_threshold=params.velocity_threshold,
|
|
473
469
|
smoothing_window=params.smoothing_window,
|
|
@@ -513,9 +509,9 @@ def process_videos_bulk(
|
|
|
513
509
|
|
|
514
510
|
Example:
|
|
515
511
|
>>> configs = [
|
|
516
|
-
... VideoConfig("video1.mp4"
|
|
517
|
-
... VideoConfig("video2.mp4",
|
|
518
|
-
... VideoConfig("video3.mp4",
|
|
512
|
+
... VideoConfig("video1.mp4"),
|
|
513
|
+
... VideoConfig("video2.mp4", quality="accurate"),
|
|
514
|
+
... VideoConfig("video3.mp4", output_video="debug3.mp4"),
|
|
519
515
|
... ]
|
|
520
516
|
>>> results = process_videos_bulk(configs, max_workers=4)
|
|
521
517
|
>>> for result in results:
|
|
@@ -573,7 +569,6 @@ def _process_video_wrapper(config: VideoConfig) -> VideoResult:
|
|
|
573
569
|
try:
|
|
574
570
|
metrics = process_video(
|
|
575
571
|
video_path=config.video_path,
|
|
576
|
-
drop_height=config.drop_height,
|
|
577
572
|
quality=config.quality,
|
|
578
573
|
output_video=config.output_video,
|
|
579
574
|
json_output=config.json_output,
|
|
@@ -242,18 +242,15 @@ class CMJDebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
|
242
242
|
y_offset += 30
|
|
243
243
|
|
|
244
244
|
# Draw angle arcs at joints for visual feedback (only if angle is available)
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
if
|
|
250
|
-
self._draw_angle_arc(
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
self._draw_angle_arc(
|
|
255
|
-
frame, landmarks, f"{side_used}_hip", angles["hip_angle"]
|
|
256
|
-
)
|
|
245
|
+
ankle_angle = angles.get("ankle_angle")
|
|
246
|
+
if ankle_angle is not None:
|
|
247
|
+
self._draw_angle_arc(frame, landmarks, f"{side_used}_ankle", ankle_angle)
|
|
248
|
+
knee_angle = angles.get("knee_angle")
|
|
249
|
+
if knee_angle is not None:
|
|
250
|
+
self._draw_angle_arc(frame, landmarks, f"{side_used}_knee", knee_angle)
|
|
251
|
+
hip_angle = angles.get("hip_angle")
|
|
252
|
+
if hip_angle is not None:
|
|
253
|
+
self._draw_angle_arc(frame, landmarks, f"{side_used}_hip", hip_angle)
|
|
257
254
|
|
|
258
255
|
def _draw_angle_arc(
|
|
259
256
|
self,
|
|
@@ -5,7 +5,7 @@ from typing import Any, Protocol
|
|
|
5
5
|
|
|
6
6
|
import click
|
|
7
7
|
|
|
8
|
-
from .auto_tuning import
|
|
8
|
+
from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
|
|
9
9
|
from .pose import PoseTracker
|
|
10
10
|
from .smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
11
11
|
from .video_io import VideoProcessor
|
|
@@ -85,8 +85,8 @@ def track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list,
|
|
|
85
85
|
|
|
86
86
|
|
|
87
87
|
def apply_expert_param_overrides(
|
|
88
|
-
params:
|
|
89
|
-
) ->
|
|
88
|
+
params: AnalysisParameters, expert_params: ExpertParameters
|
|
89
|
+
) -> AnalysisParameters:
|
|
90
90
|
"""Apply expert parameter overrides to auto-tuned parameters.
|
|
91
91
|
|
|
92
92
|
Args:
|
|
@@ -110,7 +110,7 @@ def apply_expert_param_overrides(
|
|
|
110
110
|
def print_auto_tuned_params(
|
|
111
111
|
video: VideoProcessor,
|
|
112
112
|
quality_preset: QualityPreset,
|
|
113
|
-
params:
|
|
113
|
+
params: AnalysisParameters,
|
|
114
114
|
characteristics: VideoCharacteristics | None = None,
|
|
115
115
|
extra_params: dict[str, Any] | None = None,
|
|
116
116
|
) -> None:
|
|
@@ -159,7 +159,9 @@ def print_auto_tuned_params(
|
|
|
159
159
|
click.echo("=" * 60 + "\n", err=True)
|
|
160
160
|
|
|
161
161
|
|
|
162
|
-
def smooth_landmark_sequence(
|
|
162
|
+
def smooth_landmark_sequence(
|
|
163
|
+
landmarks_sequence: list, params: AnalysisParameters
|
|
164
|
+
) -> list:
|
|
163
165
|
"""Apply smoothing to landmark sequence.
|
|
164
166
|
|
|
165
167
|
Args:
|
|
@@ -209,98 +211,4 @@ def common_output_options(func: Callable) -> Callable: # type: ignore[type-arg]
|
|
|
209
211
|
)(func)
|
|
210
212
|
return func
|
|
211
213
|
|
|
212
|
-
|
|
213
|
-
def common_quality_options(func: Callable) -> Callable: # type: ignore[type-arg]
|
|
214
|
-
"""Add quality and verbose options to CLI command."""
|
|
215
|
-
func = click.option(
|
|
216
|
-
"--quality",
|
|
217
|
-
type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
|
|
218
|
-
default="balanced",
|
|
219
|
-
help=(
|
|
220
|
-
"Analysis quality preset: "
|
|
221
|
-
"fast (quick, less precise), "
|
|
222
|
-
"balanced (default, good for most cases), "
|
|
223
|
-
"accurate (research-grade, slower)"
|
|
224
|
-
),
|
|
225
|
-
show_default=True,
|
|
226
|
-
)(func)
|
|
227
|
-
func = click.option(
|
|
228
|
-
"--verbose",
|
|
229
|
-
"-v",
|
|
230
|
-
is_flag=True,
|
|
231
|
-
help="Show auto-selected parameters and analysis details",
|
|
232
|
-
)(func)
|
|
233
|
-
return func
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
def common_batch_options(func: Callable) -> Callable: # type: ignore[type-arg]
|
|
237
|
-
"""Add batch processing options to CLI command."""
|
|
238
|
-
func = click.option(
|
|
239
|
-
"--batch",
|
|
240
|
-
is_flag=True,
|
|
241
|
-
help="Enable batch processing mode for multiple videos",
|
|
242
|
-
)(func)
|
|
243
|
-
func = click.option(
|
|
244
|
-
"--workers",
|
|
245
|
-
type=int,
|
|
246
|
-
default=4,
|
|
247
|
-
help="Number of parallel workers for batch processing (default: 4)",
|
|
248
|
-
show_default=True,
|
|
249
|
-
)(func)
|
|
250
|
-
func = click.option(
|
|
251
|
-
"--output-dir",
|
|
252
|
-
type=click.Path(),
|
|
253
|
-
help="Directory for debug video outputs (batch mode only)",
|
|
254
|
-
)(func)
|
|
255
|
-
func = click.option(
|
|
256
|
-
"--json-output-dir",
|
|
257
|
-
type=click.Path(),
|
|
258
|
-
help="Directory for JSON metrics outputs (batch mode only)",
|
|
259
|
-
)(func)
|
|
260
|
-
func = click.option(
|
|
261
|
-
"--csv-summary",
|
|
262
|
-
type=click.Path(),
|
|
263
|
-
help="Path for CSV summary export (batch mode only)",
|
|
264
|
-
)(func)
|
|
265
|
-
return func
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
def common_expert_options(func: Callable) -> Callable: # type: ignore[type-arg]
|
|
269
|
-
"""Add expert parameter options to CLI command."""
|
|
270
|
-
func = click.option(
|
|
271
|
-
"--smoothing-window",
|
|
272
|
-
type=int,
|
|
273
|
-
default=None,
|
|
274
|
-
help="[EXPERT] Override auto-tuned smoothing window size",
|
|
275
|
-
)(func)
|
|
276
|
-
func = click.option(
|
|
277
|
-
"--velocity-threshold",
|
|
278
|
-
type=float,
|
|
279
|
-
default=None,
|
|
280
|
-
help="[EXPERT] Override auto-tuned velocity threshold",
|
|
281
|
-
)(func)
|
|
282
|
-
func = click.option(
|
|
283
|
-
"--min-contact-frames",
|
|
284
|
-
type=int,
|
|
285
|
-
default=None,
|
|
286
|
-
help="[EXPERT] Override auto-tuned minimum contact frames",
|
|
287
|
-
)(func)
|
|
288
|
-
func = click.option(
|
|
289
|
-
"--visibility-threshold",
|
|
290
|
-
type=float,
|
|
291
|
-
default=None,
|
|
292
|
-
help="[EXPERT] Override visibility threshold for landmarks",
|
|
293
|
-
)(func)
|
|
294
|
-
func = click.option(
|
|
295
|
-
"--detection-confidence",
|
|
296
|
-
type=float,
|
|
297
|
-
default=None,
|
|
298
|
-
help="[EXPERT] Override MediaPipe detection confidence (0.0-1.0)",
|
|
299
|
-
)(func)
|
|
300
|
-
func = click.option(
|
|
301
|
-
"--tracking-confidence",
|
|
302
|
-
type=float,
|
|
303
|
-
default=None,
|
|
304
|
-
help="[EXPERT] Override MediaPipe tracking confidence (0.0-1.0)",
|
|
305
|
-
)(func)
|
|
306
214
|
return func
|
|
@@ -48,29 +48,6 @@ def create_video_writer(
|
|
|
48
48
|
return writer, needs_resize
|
|
49
49
|
|
|
50
50
|
|
|
51
|
-
def prepare_frame_for_overlay(
|
|
52
|
-
frame: np.ndarray, needs_resize: bool, display_width: int, display_height: int
|
|
53
|
-
) -> np.ndarray:
|
|
54
|
-
"""
|
|
55
|
-
Prepare frame for overlay rendering by resizing if needed.
|
|
56
|
-
|
|
57
|
-
Args:
|
|
58
|
-
frame: Original video frame
|
|
59
|
-
needs_resize: Whether frame needs resizing
|
|
60
|
-
display_width: Target display width
|
|
61
|
-
display_height: Target display height
|
|
62
|
-
|
|
63
|
-
Returns:
|
|
64
|
-
Prepared frame ready for overlay
|
|
65
|
-
"""
|
|
66
|
-
# Apply SAR correction if needed
|
|
67
|
-
if needs_resize:
|
|
68
|
-
frame = cv2.resize(
|
|
69
|
-
frame, (display_width, display_height), interpolation=cv2.INTER_LINEAR
|
|
70
|
-
)
|
|
71
|
-
return frame
|
|
72
|
-
|
|
73
|
-
|
|
74
51
|
def write_overlay_frame(
|
|
75
52
|
writer: cv2.VideoWriter, frame: np.ndarray, width: int, height: int
|
|
76
53
|
) -> None:
|
|
@@ -162,5 +139,5 @@ class BaseDebugOverlayRenderer:
|
|
|
162
139
|
def __enter__(self) -> "BaseDebugOverlayRenderer":
|
|
163
140
|
return self
|
|
164
141
|
|
|
165
|
-
def __exit__(self,
|
|
142
|
+
def __exit__(self, _exc_type, _exc_val, _exc_tb) -> None: # type: ignore[no-untyped-def]
|
|
166
143
|
self.close()
|
|
@@ -151,10 +151,6 @@ class VideoProcessor:
|
|
|
151
151
|
|
|
152
152
|
return frame
|
|
153
153
|
|
|
154
|
-
def reset(self) -> None:
|
|
155
|
-
"""Reset video to beginning."""
|
|
156
|
-
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
|
157
|
-
|
|
158
154
|
def close(self) -> None:
|
|
159
155
|
"""Release video capture."""
|
|
160
156
|
self.cap.release()
|
|
@@ -162,5 +158,5 @@ class VideoProcessor:
|
|
|
162
158
|
def __enter__(self) -> "VideoProcessor":
|
|
163
159
|
return self
|
|
164
160
|
|
|
165
|
-
def __exit__(self,
|
|
161
|
+
def __exit__(self, _exc_type, _exc_val, _exc_tb) -> None: # type: ignore[no-untyped-def]
|
|
166
162
|
self.close()
|
|
@@ -602,6 +602,75 @@ def find_interpolated_phase_transitions_with_curvature(
|
|
|
602
602
|
return refined_phases
|
|
603
603
|
|
|
604
604
|
|
|
605
|
+
def find_landing_from_acceleration(
|
|
606
|
+
positions: np.ndarray,
|
|
607
|
+
accelerations: np.ndarray,
|
|
608
|
+
takeoff_frame: int,
|
|
609
|
+
fps: float,
|
|
610
|
+
search_duration: float = 0.7,
|
|
611
|
+
) -> int:
|
|
612
|
+
"""
|
|
613
|
+
Find landing frame by detecting impact acceleration after takeoff.
|
|
614
|
+
|
|
615
|
+
Similar to CMJ landing detection, looks for maximum positive acceleration
|
|
616
|
+
(deceleration on ground impact) after the jump peak.
|
|
617
|
+
|
|
618
|
+
Args:
|
|
619
|
+
positions: Array of vertical positions (normalized 0-1)
|
|
620
|
+
accelerations: Array of accelerations (second derivative)
|
|
621
|
+
takeoff_frame: Frame at takeoff (end of ground contact)
|
|
622
|
+
fps: Video frame rate
|
|
623
|
+
search_duration: Duration in seconds to search for landing (default: 0.7s)
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
Landing frame index (integer)
|
|
627
|
+
"""
|
|
628
|
+
# Find peak height (minimum y value = highest point)
|
|
629
|
+
search_start = takeoff_frame
|
|
630
|
+
search_end = min(len(positions), takeoff_frame + int(fps * search_duration))
|
|
631
|
+
|
|
632
|
+
if search_end <= search_start:
|
|
633
|
+
return min(len(positions) - 1, takeoff_frame + int(fps * 0.3))
|
|
634
|
+
|
|
635
|
+
flight_positions = positions[search_start:search_end]
|
|
636
|
+
peak_idx = int(np.argmin(flight_positions))
|
|
637
|
+
peak_frame = search_start + peak_idx
|
|
638
|
+
|
|
639
|
+
# After peak, look for landing (impact with ground)
|
|
640
|
+
# Landing is detected by maximum positive acceleration (deceleration on impact)
|
|
641
|
+
landing_search_start = peak_frame + 2
|
|
642
|
+
landing_search_end = min(len(accelerations), landing_search_start + int(fps * 0.6))
|
|
643
|
+
|
|
644
|
+
if landing_search_end <= landing_search_start:
|
|
645
|
+
return min(len(positions) - 1, peak_frame + int(fps * 0.2))
|
|
646
|
+
|
|
647
|
+
# Find impact: maximum positive acceleration after peak
|
|
648
|
+
landing_accelerations = accelerations[landing_search_start:landing_search_end]
|
|
649
|
+
impact_idx = int(np.argmax(landing_accelerations))
|
|
650
|
+
impact_frame = landing_search_start + impact_idx
|
|
651
|
+
|
|
652
|
+
# After acceleration peak, look for position stabilization (full ground contact)
|
|
653
|
+
# Check where vertical position stops decreasing (athlete stops compressing)
|
|
654
|
+
stabilization_search_start = impact_frame
|
|
655
|
+
stabilization_search_end = min(len(positions), impact_frame + int(fps * 0.2))
|
|
656
|
+
|
|
657
|
+
landing_frame = impact_frame
|
|
658
|
+
if stabilization_search_end > stabilization_search_start + 3:
|
|
659
|
+
# Find where position reaches maximum (lowest point) and starts stabilizing
|
|
660
|
+
search_positions = positions[
|
|
661
|
+
stabilization_search_start:stabilization_search_end
|
|
662
|
+
]
|
|
663
|
+
|
|
664
|
+
# Look for the frame where position reaches its maximum (deepest landing)
|
|
665
|
+
max_pos_idx = int(np.argmax(search_positions))
|
|
666
|
+
|
|
667
|
+
# Landing is just after max position (athlete at deepest landing compression)
|
|
668
|
+
landing_frame = stabilization_search_start + max_pos_idx
|
|
669
|
+
landing_frame = min(len(positions) - 1, landing_frame)
|
|
670
|
+
|
|
671
|
+
return landing_frame
|
|
672
|
+
|
|
673
|
+
|
|
605
674
|
def compute_average_foot_position(
|
|
606
675
|
landmarks: dict[str, tuple[float, float, float]],
|
|
607
676
|
) -> tuple[float, float]:
|
|
@@ -62,15 +62,6 @@ class AnalysisParameters:
|
|
|
62
62
|
type=click.Path(),
|
|
63
63
|
help="Path for JSON metrics output (default: stdout)",
|
|
64
64
|
)
|
|
65
|
-
@click.option(
|
|
66
|
-
"--drop-height",
|
|
67
|
-
type=float,
|
|
68
|
-
required=True,
|
|
69
|
-
help=(
|
|
70
|
-
"Height of drop box/platform in meters (e.g., 0.40 for 40cm box) - "
|
|
71
|
-
"REQUIRED for accurate calibration"
|
|
72
|
-
),
|
|
73
|
-
)
|
|
74
65
|
@click.option(
|
|
75
66
|
"--quality",
|
|
76
67
|
type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
|
|
@@ -164,7 +155,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameter
|
|
|
164
155
|
video_path: tuple[str, ...],
|
|
165
156
|
output: str | None,
|
|
166
157
|
json_output: str | None,
|
|
167
|
-
drop_height: float,
|
|
168
158
|
quality: str,
|
|
169
159
|
verbose: bool,
|
|
170
160
|
batch: bool,
|
|
@@ -193,15 +183,15 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameter
|
|
|
193
183
|
|
|
194
184
|
\b
|
|
195
185
|
# Single video
|
|
196
|
-
kinemotion dropjump-analyze video.mp4
|
|
186
|
+
kinemotion dropjump-analyze video.mp4
|
|
197
187
|
|
|
198
188
|
\b
|
|
199
189
|
# Batch mode with glob pattern
|
|
200
|
-
kinemotion dropjump-analyze videos/*.mp4 --batch --
|
|
190
|
+
kinemotion dropjump-analyze videos/*.mp4 --batch --workers 4
|
|
201
191
|
|
|
202
192
|
\b
|
|
203
193
|
# Batch with output directories
|
|
204
|
-
kinemotion dropjump-analyze videos/*.mp4 --batch
|
|
194
|
+
kinemotion dropjump-analyze videos/*.mp4 --batch \\
|
|
205
195
|
--json-output-dir results/ --csv-summary summary.csv
|
|
206
196
|
"""
|
|
207
197
|
# Expand glob patterns and collect all video files
|
|
@@ -237,7 +227,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameter
|
|
|
237
227
|
if use_batch:
|
|
238
228
|
_process_batch(
|
|
239
229
|
video_files,
|
|
240
|
-
drop_height,
|
|
241
230
|
quality,
|
|
242
231
|
workers,
|
|
243
232
|
output_dir,
|
|
@@ -251,7 +240,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameter
|
|
|
251
240
|
video_files[0],
|
|
252
241
|
output,
|
|
253
242
|
json_output,
|
|
254
|
-
drop_height,
|
|
255
243
|
quality,
|
|
256
244
|
verbose,
|
|
257
245
|
expert_params,
|
|
@@ -356,7 +344,6 @@ def _process_single(
|
|
|
356
344
|
video_path: str,
|
|
357
345
|
output: str | None,
|
|
358
346
|
json_output: str | None,
|
|
359
|
-
drop_height: float,
|
|
360
347
|
quality: str,
|
|
361
348
|
verbose: bool,
|
|
362
349
|
expert_params: AnalysisParameters,
|
|
@@ -422,15 +409,11 @@ def _process_single(
|
|
|
422
409
|
|
|
423
410
|
# Calculate metrics
|
|
424
411
|
click.echo("Calculating metrics...", err=True)
|
|
425
|
-
click.echo(
|
|
426
|
-
f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
|
|
427
|
-
err=True,
|
|
428
|
-
)
|
|
429
412
|
metrics = calculate_drop_jump_metrics(
|
|
430
413
|
contact_states,
|
|
431
414
|
vertical_positions,
|
|
432
415
|
video.fps,
|
|
433
|
-
drop_height_m=
|
|
416
|
+
drop_height_m=None,
|
|
434
417
|
drop_start_frame=expert_params.drop_start_frame,
|
|
435
418
|
velocity_threshold=params.velocity_threshold,
|
|
436
419
|
smoothing_window=params.smoothing_window,
|
|
@@ -480,7 +463,6 @@ def _setup_batch_output_dirs(
|
|
|
480
463
|
|
|
481
464
|
def _create_video_configs(
|
|
482
465
|
video_files: list[str],
|
|
483
|
-
drop_height: float,
|
|
484
466
|
quality: str,
|
|
485
467
|
output_dir: str | None,
|
|
486
468
|
json_output_dir: str | None,
|
|
@@ -490,7 +472,6 @@ def _create_video_configs(
|
|
|
490
472
|
|
|
491
473
|
Args:
|
|
492
474
|
video_files: List of video file paths
|
|
493
|
-
drop_height: Drop height in meters
|
|
494
475
|
quality: Quality preset
|
|
495
476
|
output_dir: Debug video output directory
|
|
496
477
|
json_output_dir: JSON metrics output directory
|
|
@@ -513,7 +494,6 @@ def _create_video_configs(
|
|
|
513
494
|
|
|
514
495
|
config = VideoConfig(
|
|
515
496
|
video_path=video_file,
|
|
516
|
-
drop_height=drop_height,
|
|
517
497
|
quality=quality,
|
|
518
498
|
output_video=debug_video,
|
|
519
499
|
json_output=json_file,
|
|
@@ -662,7 +642,6 @@ def _write_csv_summary(
|
|
|
662
642
|
|
|
663
643
|
def _process_batch(
|
|
664
644
|
video_files: list[str],
|
|
665
|
-
drop_height: float,
|
|
666
645
|
quality: str,
|
|
667
646
|
workers: int,
|
|
668
647
|
output_dir: str | None,
|
|
@@ -681,7 +660,7 @@ def _process_batch(
|
|
|
681
660
|
|
|
682
661
|
# Create video configurations
|
|
683
662
|
configs = _create_video_configs(
|
|
684
|
-
video_files,
|
|
663
|
+
video_files, quality, output_dir, json_output_dir, expert_params
|
|
685
664
|
)
|
|
686
665
|
|
|
687
666
|
# Progress callback
|
|
@@ -2,11 +2,13 @@
|
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
|
|
5
|
+
from ..core.smoothing import compute_acceleration_from_derivative
|
|
5
6
|
from .analysis import (
|
|
6
7
|
ContactState,
|
|
7
8
|
detect_drop_start,
|
|
8
9
|
find_contact_phases,
|
|
9
10
|
find_interpolated_phase_transitions_with_curvature,
|
|
11
|
+
find_landing_from_acceleration,
|
|
10
12
|
)
|
|
11
13
|
|
|
12
14
|
|
|
@@ -123,14 +125,13 @@ def _determine_drop_start_frame(
|
|
|
123
125
|
"""
|
|
124
126
|
if drop_start_frame is None:
|
|
125
127
|
# Auto-detect where drop jump actually starts (skip initial stationary period)
|
|
126
|
-
|
|
128
|
+
return detect_drop_start(
|
|
127
129
|
foot_y_positions,
|
|
128
130
|
fps,
|
|
129
131
|
min_stationary_duration=0.5,
|
|
130
132
|
position_change_threshold=0.005,
|
|
131
133
|
smoothing_window=smoothing_window,
|
|
132
134
|
)
|
|
133
|
-
return detected_frame if detected_frame is not None else 0
|
|
134
135
|
return drop_start_frame
|
|
135
136
|
|
|
136
137
|
|
|
@@ -313,9 +314,14 @@ def _analyze_flight_phase(
|
|
|
313
314
|
drop_height_m: float | None,
|
|
314
315
|
scale_factor: float,
|
|
315
316
|
kinematic_correction_factor: float,
|
|
317
|
+
smoothing_window: int,
|
|
318
|
+
polyorder: int,
|
|
316
319
|
) -> None:
|
|
317
320
|
"""Analyze flight phase and calculate jump height metrics.
|
|
318
321
|
|
|
322
|
+
Uses acceleration-based landing detection (like CMJ) for accurate flight time,
|
|
323
|
+
then calculates jump height using kinematic formula h = g*t²/8.
|
|
324
|
+
|
|
319
325
|
Args:
|
|
320
326
|
metrics: DropJumpMetrics object to populate
|
|
321
327
|
phases: All phase tuples
|
|
@@ -323,38 +329,41 @@ def _analyze_flight_phase(
|
|
|
323
329
|
contact_end: End of contact phase
|
|
324
330
|
foot_y_positions: Vertical position array
|
|
325
331
|
fps: Video frame rate
|
|
326
|
-
drop_height_m: Known drop height (optional)
|
|
332
|
+
drop_height_m: Known drop height (optional, for RSI calculation)
|
|
327
333
|
scale_factor: Calibration scale factor
|
|
328
334
|
kinematic_correction_factor: Correction for kinematic method
|
|
335
|
+
smoothing_window: Window size for acceleration computation
|
|
336
|
+
polyorder: Polynomial order for Savitzky-Golay filter
|
|
329
337
|
"""
|
|
330
|
-
# Find
|
|
331
|
-
|
|
332
|
-
(start, end)
|
|
333
|
-
for start, end, state in phases
|
|
334
|
-
if state == ContactState.IN_AIR and start > contact_end
|
|
335
|
-
]
|
|
338
|
+
# Find takeoff frame (end of ground contact)
|
|
339
|
+
flight_start = contact_end
|
|
336
340
|
|
|
337
|
-
|
|
338
|
-
|
|
341
|
+
# Compute accelerations for landing detection
|
|
342
|
+
accelerations = compute_acceleration_from_derivative(
|
|
343
|
+
foot_y_positions, window_length=smoothing_window, polyorder=polyorder
|
|
344
|
+
)
|
|
339
345
|
|
|
340
|
-
|
|
346
|
+
# Use acceleration-based landing detection (like CMJ)
|
|
347
|
+
# This finds the actual ground impact, not just when velocity drops
|
|
348
|
+
flight_end = find_landing_from_acceleration(
|
|
349
|
+
foot_y_positions, accelerations, flight_start, fps, search_duration=0.7
|
|
350
|
+
)
|
|
341
351
|
|
|
342
352
|
# Store integer frame indices
|
|
343
353
|
metrics.flight_start_frame = flight_start
|
|
344
354
|
metrics.flight_end_frame = flight_end
|
|
345
355
|
|
|
346
|
-
# Find precise timing
|
|
356
|
+
# Find precise sub-frame timing for takeoff
|
|
347
357
|
flight_start_frac = float(flight_start)
|
|
348
358
|
flight_end_frac = float(flight_end)
|
|
349
359
|
|
|
350
360
|
for start_frac, end_frac, state in interpolated_phases:
|
|
351
361
|
if (
|
|
352
|
-
state == ContactState.
|
|
362
|
+
state == ContactState.ON_GROUND
|
|
353
363
|
and int(start_frac) <= flight_start <= int(end_frac) + 1
|
|
354
|
-
and int(start_frac) <= flight_end <= int(end_frac) + 1
|
|
355
364
|
):
|
|
356
|
-
|
|
357
|
-
|
|
365
|
+
# Use end of ground contact as precise takeoff
|
|
366
|
+
flight_start_frac = end_frac
|
|
358
367
|
break
|
|
359
368
|
|
|
360
369
|
# Calculate flight time
|
|
@@ -363,11 +372,16 @@ def _analyze_flight_phase(
|
|
|
363
372
|
metrics.flight_start_frame_precise = flight_start_frac
|
|
364
373
|
metrics.flight_end_frame_precise = flight_end_frac
|
|
365
374
|
|
|
366
|
-
# Calculate jump height using kinematic method
|
|
375
|
+
# Calculate jump height using kinematic method (like CMJ)
|
|
376
|
+
# h = g * t² / 8
|
|
367
377
|
g = 9.81 # m/s^2
|
|
368
378
|
jump_height_kinematic = (g * metrics.flight_time**2) / 8
|
|
369
379
|
|
|
370
|
-
#
|
|
380
|
+
# Always use kinematic method for jump height (like CMJ)
|
|
381
|
+
metrics.jump_height = jump_height_kinematic
|
|
382
|
+
metrics.jump_height_kinematic = jump_height_kinematic
|
|
383
|
+
|
|
384
|
+
# Calculate trajectory-based height for reference
|
|
371
385
|
takeoff_position = foot_y_positions[flight_start]
|
|
372
386
|
flight_positions = foot_y_positions[flight_start : flight_end + 1]
|
|
373
387
|
|
|
@@ -379,21 +393,6 @@ def _analyze_flight_phase(
|
|
|
379
393
|
height_normalized = float(takeoff_position - peak_position)
|
|
380
394
|
metrics.jump_height_trajectory = height_normalized
|
|
381
395
|
|
|
382
|
-
# Choose measurement method based on calibration availability
|
|
383
|
-
if drop_height_m is not None and scale_factor > 1.0:
|
|
384
|
-
metrics.jump_height = height_normalized * scale_factor
|
|
385
|
-
metrics.jump_height_kinematic = jump_height_kinematic
|
|
386
|
-
else:
|
|
387
|
-
metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
|
|
388
|
-
metrics.jump_height_kinematic = jump_height_kinematic
|
|
389
|
-
else:
|
|
390
|
-
# Fallback to kinematic if no position data
|
|
391
|
-
if drop_height_m is None:
|
|
392
|
-
metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
|
|
393
|
-
else:
|
|
394
|
-
metrics.jump_height = jump_height_kinematic
|
|
395
|
-
metrics.jump_height_kinematic = jump_height_kinematic
|
|
396
|
-
|
|
397
396
|
|
|
398
397
|
def calculate_drop_jump_metrics(
|
|
399
398
|
contact_states: list[ContactState],
|
|
@@ -506,46 +505,8 @@ def calculate_drop_jump_metrics(
|
|
|
506
505
|
drop_height_m,
|
|
507
506
|
scale_factor,
|
|
508
507
|
kinematic_correction_factor,
|
|
508
|
+
smoothing_window,
|
|
509
|
+
polyorder,
|
|
509
510
|
)
|
|
510
511
|
|
|
511
512
|
return metrics
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
def estimate_jump_height_from_trajectory(
|
|
515
|
-
foot_y_positions: np.ndarray,
|
|
516
|
-
flight_start: int,
|
|
517
|
-
flight_end: int,
|
|
518
|
-
pixel_to_meter_ratio: float | None = None,
|
|
519
|
-
) -> float:
|
|
520
|
-
"""
|
|
521
|
-
Estimate jump height from position trajectory.
|
|
522
|
-
|
|
523
|
-
Args:
|
|
524
|
-
foot_y_positions: Vertical positions of feet (normalized or pixels)
|
|
525
|
-
flight_start: Frame where flight begins
|
|
526
|
-
flight_end: Frame where flight ends
|
|
527
|
-
pixel_to_meter_ratio: Conversion factor from pixels to meters
|
|
528
|
-
|
|
529
|
-
Returns:
|
|
530
|
-
Estimated jump height in meters (or normalized units if no calibration)
|
|
531
|
-
"""
|
|
532
|
-
if flight_end < flight_start:
|
|
533
|
-
return 0.0
|
|
534
|
-
|
|
535
|
-
# Get position at takeoff (end of contact) and peak (minimum y during flight)
|
|
536
|
-
takeoff_position = foot_y_positions[flight_start]
|
|
537
|
-
flight_positions = foot_y_positions[flight_start : flight_end + 1]
|
|
538
|
-
|
|
539
|
-
if len(flight_positions) == 0:
|
|
540
|
-
return 0.0
|
|
541
|
-
|
|
542
|
-
peak_position = np.min(flight_positions)
|
|
543
|
-
|
|
544
|
-
# Height difference (in normalized coordinates, y increases downward)
|
|
545
|
-
height_diff = takeoff_position - peak_position
|
|
546
|
-
|
|
547
|
-
# Convert to meters if calibration available
|
|
548
|
-
if pixel_to_meter_ratio is not None:
|
|
549
|
-
return float(height_diff * pixel_to_meter_ratio)
|
|
550
|
-
|
|
551
|
-
return float(height_diff)
|
|
@@ -19,7 +19,6 @@ def test_process_video_returns_metrics(sample_video_path: str) -> None:
|
|
|
19
19
|
"""Test that process_video returns DropJumpMetrics object."""
|
|
20
20
|
metrics = process_video(
|
|
21
21
|
video_path=sample_video_path,
|
|
22
|
-
drop_height=0.40,
|
|
23
22
|
quality="fast", # Use fast for quicker tests
|
|
24
23
|
verbose=False,
|
|
25
24
|
)
|
|
@@ -37,7 +36,6 @@ def test_process_video_with_json_output(sample_video_path: str) -> None:
|
|
|
37
36
|
|
|
38
37
|
process_video(
|
|
39
38
|
video_path=sample_video_path,
|
|
40
|
-
drop_height=0.40,
|
|
41
39
|
json_output=str(json_path),
|
|
42
40
|
quality="fast",
|
|
43
41
|
)
|
|
@@ -64,7 +62,6 @@ def test_process_video_with_debug_output(sample_video_path: str) -> None:
|
|
|
64
62
|
|
|
65
63
|
metrics = process_video(
|
|
66
64
|
video_path=sample_video_path,
|
|
67
|
-
drop_height=0.40,
|
|
68
65
|
output_video=str(output_path),
|
|
69
66
|
quality="fast",
|
|
70
67
|
)
|
|
@@ -86,7 +83,6 @@ def test_process_video_invalid_quality(tmp_path: Path) -> None:
|
|
|
86
83
|
with pytest.raises(ValueError, match="Invalid quality preset"):
|
|
87
84
|
process_video(
|
|
88
85
|
video_path=str(dummy_video),
|
|
89
|
-
drop_height=0.40,
|
|
90
86
|
quality="invalid",
|
|
91
87
|
)
|
|
92
88
|
|
|
@@ -96,7 +92,6 @@ def test_process_video_file_not_found() -> None:
|
|
|
96
92
|
with pytest.raises(FileNotFoundError, match="Video file not found"):
|
|
97
93
|
process_video(
|
|
98
94
|
video_path="nonexistent_video.mp4",
|
|
99
|
-
drop_height=0.40,
|
|
100
95
|
)
|
|
101
96
|
|
|
102
97
|
|
|
@@ -107,7 +102,6 @@ def test_process_video_quality_presets(sample_video_path: str) -> None:
|
|
|
107
102
|
for quality in qualities:
|
|
108
103
|
metrics = process_video(
|
|
109
104
|
video_path=sample_video_path,
|
|
110
|
-
drop_height=0.40,
|
|
111
105
|
quality=quality,
|
|
112
106
|
verbose=False,
|
|
113
107
|
)
|
|
@@ -121,7 +115,6 @@ def test_process_video_with_expert_overrides(sample_video_path: str) -> None:
|
|
|
121
115
|
"""Test that expert parameter overrides work."""
|
|
122
116
|
metrics = process_video(
|
|
123
117
|
video_path=sample_video_path,
|
|
124
|
-
drop_height=0.40,
|
|
125
118
|
smoothing_window=7,
|
|
126
119
|
velocity_threshold=0.025,
|
|
127
120
|
min_contact_frames=5,
|
|
@@ -136,12 +129,10 @@ def test_video_config_creation() -> None:
|
|
|
136
129
|
"""Test VideoConfig dataclass creation."""
|
|
137
130
|
config = VideoConfig(
|
|
138
131
|
video_path="test.mp4",
|
|
139
|
-
drop_height=0.40,
|
|
140
132
|
quality="balanced",
|
|
141
133
|
)
|
|
142
134
|
|
|
143
135
|
assert config.video_path == "test.mp4"
|
|
144
|
-
assert config.drop_height == pytest.approx(0.40)
|
|
145
136
|
assert config.quality == "balanced"
|
|
146
137
|
assert config.output_video is None
|
|
147
138
|
assert config.json_output is None
|
|
@@ -171,8 +162,8 @@ def test_video_result_creation() -> None:
|
|
|
171
162
|
def test_process_videos_bulk_success(sample_video_path: str) -> None:
|
|
172
163
|
"""Test bulk processing of multiple videos."""
|
|
173
164
|
configs = [
|
|
174
|
-
VideoConfig(video_path=sample_video_path,
|
|
175
|
-
VideoConfig(video_path=sample_video_path,
|
|
165
|
+
VideoConfig(video_path=sample_video_path, quality="fast"),
|
|
166
|
+
VideoConfig(video_path=sample_video_path, quality="fast"),
|
|
176
167
|
]
|
|
177
168
|
|
|
178
169
|
results = process_videos_bulk(configs, max_workers=2)
|
|
@@ -191,8 +182,12 @@ def test_process_videos_bulk_success(sample_video_path: str) -> None:
|
|
|
191
182
|
def test_process_videos_bulk_with_failure() -> None:
|
|
192
183
|
"""Test bulk processing handles failures gracefully."""
|
|
193
184
|
configs = [
|
|
194
|
-
VideoConfig(
|
|
195
|
-
|
|
185
|
+
VideoConfig(
|
|
186
|
+
video_path="nonexistent1.mp4",
|
|
187
|
+
),
|
|
188
|
+
VideoConfig(
|
|
189
|
+
video_path="nonexistent2.mp4",
|
|
190
|
+
),
|
|
196
191
|
]
|
|
197
192
|
|
|
198
193
|
results = process_videos_bulk(configs, max_workers=2)
|
|
@@ -210,9 +205,11 @@ def test_process_videos_bulk_with_failure() -> None:
|
|
|
210
205
|
def test_process_videos_bulk_mixed_results(sample_video_path: str) -> None:
|
|
211
206
|
"""Test bulk processing with mix of successful and failed videos."""
|
|
212
207
|
configs = [
|
|
213
|
-
VideoConfig(video_path=sample_video_path,
|
|
214
|
-
VideoConfig(
|
|
215
|
-
|
|
208
|
+
VideoConfig(video_path=sample_video_path, quality="fast"),
|
|
209
|
+
VideoConfig(
|
|
210
|
+
video_path="nonexistent.mp4",
|
|
211
|
+
),
|
|
212
|
+
VideoConfig(video_path=sample_video_path, quality="fast"),
|
|
216
213
|
]
|
|
217
214
|
|
|
218
215
|
results = process_videos_bulk(configs, max_workers=2)
|
|
@@ -238,8 +235,8 @@ def test_process_videos_bulk_mixed_results(sample_video_path: str) -> None:
|
|
|
238
235
|
def test_process_videos_bulk_progress_callback(sample_video_path: str) -> None:
|
|
239
236
|
"""Test that progress callback is called for each video."""
|
|
240
237
|
configs = [
|
|
241
|
-
VideoConfig(video_path=sample_video_path,
|
|
242
|
-
VideoConfig(video_path=sample_video_path,
|
|
238
|
+
VideoConfig(video_path=sample_video_path, quality="fast"),
|
|
239
|
+
VideoConfig(video_path=sample_video_path, quality="fast"),
|
|
243
240
|
]
|
|
244
241
|
|
|
245
242
|
callback_results = []
|
|
@@ -266,18 +263,15 @@ def test_process_videos_bulk_different_parameters(sample_video_path: str) -> Non
|
|
|
266
263
|
configs = [
|
|
267
264
|
VideoConfig(
|
|
268
265
|
video_path=sample_video_path,
|
|
269
|
-
drop_height=0.40,
|
|
270
266
|
quality="fast",
|
|
271
267
|
),
|
|
272
268
|
VideoConfig(
|
|
273
269
|
video_path=sample_video_path,
|
|
274
|
-
drop_height=0.40,
|
|
275
270
|
quality="balanced",
|
|
276
271
|
json_output=str(Path(tmpdir) / "video2.json"),
|
|
277
272
|
),
|
|
278
273
|
VideoConfig(
|
|
279
274
|
video_path=sample_video_path,
|
|
280
|
-
drop_height=0.40,
|
|
281
275
|
quality="fast",
|
|
282
276
|
smoothing_window=7,
|
|
283
277
|
),
|
|
@@ -32,9 +32,10 @@ def test_calculate_metrics_basic():
|
|
|
32
32
|
assert metrics.ground_contact_time is not None
|
|
33
33
|
assert 0.25 < metrics.ground_contact_time < 0.40 # Approximately 8-12 frames
|
|
34
34
|
|
|
35
|
-
# Flight time
|
|
35
|
+
# Flight time: acceleration-based landing detection finds impact earlier
|
|
36
|
+
# than simple phase boundary, typically 13-17 frames instead of 20
|
|
36
37
|
assert metrics.flight_time is not None
|
|
37
|
-
assert 0.
|
|
38
|
+
assert 0.35 < metrics.flight_time < 0.65 # Approximately 10-20 frames
|
|
38
39
|
|
|
39
40
|
# Jump height should be calculated from flight time
|
|
40
41
|
assert metrics.jump_height is not None
|
|
@@ -46,9 +47,10 @@ def test_calculate_metrics_basic():
|
|
|
46
47
|
assert metrics.flight_start_frame_precise is not None
|
|
47
48
|
assert metrics.flight_end_frame_precise is not None
|
|
48
49
|
|
|
49
|
-
# Fractional frames should be close to integer frames
|
|
50
|
-
|
|
51
|
-
assert abs(metrics.
|
|
50
|
+
# Fractional frames should be reasonably close to integer frames
|
|
51
|
+
# (within 2 frames due to sub-frame interpolation and phase detection)
|
|
52
|
+
assert abs(metrics.contact_start_frame_precise - metrics.contact_start_frame) < 2.0
|
|
53
|
+
assert abs(metrics.flight_start_frame_precise - metrics.flight_start_frame) < 2.0
|
|
52
54
|
|
|
53
55
|
|
|
54
56
|
def test_metrics_to_dict():
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|