kinemotion 0.6.4__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (40) hide show
  1. {kinemotion-0.6.4 → kinemotion-0.7.0}/.pre-commit-config.yaml +5 -4
  2. {kinemotion-0.6.4 → kinemotion-0.7.0}/CHANGELOG.md +8 -0
  3. {kinemotion-0.6.4 → kinemotion-0.7.0}/CLAUDE.md +236 -35
  4. {kinemotion-0.6.4 → kinemotion-0.7.0}/PKG-INFO +1 -1
  5. {kinemotion-0.6.4 → kinemotion-0.7.0}/examples/programmatic_usage.py +1 -2
  6. {kinemotion-0.6.4 → kinemotion-0.7.0}/pyproject.toml +3 -1
  7. kinemotion-0.7.0/src/kinemotion/core/auto_tuning.py +289 -0
  8. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/core/filtering.py +1 -1
  9. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/core/smoothing.py +10 -11
  10. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/core/video_io.py +52 -8
  11. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/dropjump/analysis.py +121 -4
  12. kinemotion-0.7.0/src/kinemotion/dropjump/cli.py +416 -0
  13. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/dropjump/debug_overlay.py +11 -5
  14. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/dropjump/kinematics.py +45 -5
  15. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_adaptive_threshold.py +6 -2
  16. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_aspect_ratio.py +6 -2
  17. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_com_estimation.py +3 -1
  18. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_filtering.py +1 -3
  19. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_polyorder.py +3 -1
  20. kinemotion-0.6.4/src/kinemotion/dropjump/cli.py +0 -361
  21. {kinemotion-0.6.4 → kinemotion-0.7.0}/.github/workflows/release.yml +0 -0
  22. {kinemotion-0.6.4 → kinemotion-0.7.0}/.gitignore +0 -0
  23. {kinemotion-0.6.4 → kinemotion-0.7.0}/.tool-versions +0 -0
  24. {kinemotion-0.6.4 → kinemotion-0.7.0}/GEMINI.md +0 -0
  25. {kinemotion-0.6.4 → kinemotion-0.7.0}/LICENSE +0 -0
  26. {kinemotion-0.6.4 → kinemotion-0.7.0}/README.md +0 -0
  27. {kinemotion-0.6.4 → kinemotion-0.7.0}/docs/ERRORS_FINDINGS.md +0 -0
  28. {kinemotion-0.6.4 → kinemotion-0.7.0}/docs/FRAMERATE.md +0 -0
  29. {kinemotion-0.6.4 → kinemotion-0.7.0}/docs/IMPLEMENTATION_PLAN.md +0 -0
  30. {kinemotion-0.6.4 → kinemotion-0.7.0}/docs/IMU_METADATA_PRESERVATION.md +0 -0
  31. {kinemotion-0.6.4 → kinemotion-0.7.0}/docs/PARAMETERS.md +0 -0
  32. {kinemotion-0.6.4 → kinemotion-0.7.0}/docs/VALIDATION_PLAN.md +0 -0
  33. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/__init__.py +0 -0
  34. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/cli.py +0 -0
  35. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/core/__init__.py +0 -0
  36. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/core/pose.py +0 -0
  37. {kinemotion-0.6.4 → kinemotion-0.7.0}/src/kinemotion/dropjump/__init__.py +0 -0
  38. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/__init__.py +0 -0
  39. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_contact_detection.py +0 -0
  40. {kinemotion-0.6.4 → kinemotion-0.7.0}/tests/test_kinematics.py +0 -0
@@ -15,19 +15,20 @@ repos:
15
15
  - id: mixed-line-ending
16
16
 
17
17
  - repo: https://github.com/psf/black
18
- rev: 23.12.1
18
+ rev: 25.9.0
19
19
  hooks:
20
20
  - id: black
21
21
 
22
22
  - repo: https://github.com/astral-sh/ruff-pre-commit
23
- rev: v0.1.9
23
+ rev: v0.14.3
24
24
  hooks:
25
25
  - id: ruff
26
26
  args: [--fix, --exit-non-zero-on-fix]
27
27
 
28
28
  - repo: https://github.com/pre-commit/mirrors-mypy
29
- rev: v1.7.1
29
+ rev: v1.18.2
30
30
  hooks:
31
31
  - id: mypy
32
32
  args: [--ignore-missing-imports, --no-strict-optional]
33
- exclude: ^tests/
33
+ files: ^src/
34
+ exclude: ^(tests/|examples/)
@@ -7,6 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  <!-- version list -->
9
9
 
10
+ ## v0.7.0 (2025-11-01)
11
+
12
+ ### Features
13
+
14
+ - Add intelligent auto-tuning and video rotation handling
15
+ ([`7b35f67`](https://github.com/feniix/kinemotion/commit/7b35f6790dd8b6714f3e42389555107a043d486c))
16
+
17
+
10
18
  ## v0.6.4 (2025-10-26)
11
19
 
12
20
  ### Bug Fixes
@@ -240,6 +240,48 @@ self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
240
240
  - Runtime validation in `write_frame()` ensures every frame matches expected encoded dimensions
241
241
  - Raises `ValueError` if aspect ratio would be corrupted
242
242
 
243
+ ### Video Rotation Handling (core/video_io.py)
244
+
245
+ **IMPORTANT**: The tool automatically handles video rotation metadata from mobile devices. OpenCV ignores rotation metadata, so we extract and apply it manually.
246
+
247
+ #### Rotation Metadata Extraction (`core/video_io.py:65-126`)
248
+
249
+ - **Display Matrix Metadata**: iPhones and other mobile devices store rotation in `side_data_list`
250
+ - Common rotation values: -90° (portrait right), 90° (portrait left), 180° (upside down)
251
+ - OpenCV's `VideoCapture.read()` ignores this metadata (known OpenCV issue #26876)
252
+ - Extracted using ffprobe from the same call that extracts SAR metadata
253
+ - **Automatic Frame Rotation**: Applied in `read_frame()` method using `cv2.rotate()`
254
+ - -90° / 270° → `cv2.ROTATE_90_CLOCKWISE`
255
+ - 90° / -270° → `cv2.ROTATE_90_COUNTERCLOCKWISE`
256
+ - ±180° → `cv2.ROTATE_180`
257
+ - **Dimension Updates**: Width and height are swapped after 90°/-90° rotations
258
+
259
+ ```python
260
+ # Rotation extraction from side_data_list
261
+ side_data_list = stream.get("side_data_list", [])
262
+ for side_data in side_data_list:
263
+ if side_data.get("side_data_type") == "Display Matrix":
264
+ self.rotation = int(side_data.get("rotation", 0))
265
+
266
+ # Automatic rotation in read_frame()
267
+ if self.rotation == -90 or self.rotation == 270:
268
+ frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
269
+ ```
270
+
271
+ **Why this matters:**
272
+
273
+ - Without rotation handling, portrait videos are processed sideways
274
+ - MediaPipe would detect poses on rotated frames (person lying horizontally)
275
+ - Output videos would have incorrect orientation
276
+ - Jump analysis would fail due to incorrect gravity axis
277
+
278
+ **Example:**
279
+
280
+ - iPhone video encoded as 1920x1080 (landscape) with -90° rotation metadata
281
+ - Should be displayed as 1080x1920 (portrait)
282
+ - Tool automatically rotates frames and updates dimensions
283
+ - Output video correctly shows 1080x1920 portrait orientation
284
+
243
285
  ### Sub-Frame Interpolation (contact_detection.py:113-227)
244
286
 
245
287
  **IMPORTANT**: The tool uses sub-frame interpolation with derivative-based velocity to achieve timing precision beyond frame boundaries.
@@ -462,30 +504,129 @@ Modify `smooth_landmarks()` in `core/smoothing.py:9`:
462
504
  - `window_length`: Controls smoothing strength (must be odd, default: 5)
463
505
  - `polyorder`: Polynomial order for Savitzky-Golay filter (default: 2)
464
506
 
465
- ### Parameter Tuning
507
+ ### Intelligent Auto-Tuning System
466
508
 
467
- **IMPORTANT**: See `docs/PARAMETERS.md` for comprehensive guide on all CLI parameters.
509
+ **NEW**: The tool now features intelligent auto-tuning that eliminates the need for manual parameter adjustment!
468
510
 
469
- Quick reference for `dropjump-analyze`:
511
+ #### How It Works (core/auto_tuning.py)
470
512
 
471
- - **smoothing-window**: Trajectory smoothness (↑ for noisy video)
472
- - **velocity-threshold**: Contact sensitivity (↓ to detect brief contacts)
473
- - **min-contact-frames**: Temporal filter (↑ to remove false contacts)
474
- - **visibility-threshold**: Landmark confidence (↓ for occluded landmarks)
475
- - **detection-confidence**: Pose detection strictness (MediaPipe)
476
- - **tracking-confidence**: Tracking persistence (MediaPipe)
477
- - **drop-height**: Drop box height in meters for calibration (e.g., 0.40 for 40cm)
478
- - **use-curvature**: Enable trajectory curvature analysis (default: enabled)
513
+ The auto-tuning system analyzes your video and automatically selects optimal parameters:
479
514
 
480
- **Note**: Drop jump analysis always uses foot-based tracking with fixed velocity thresholds because typical drop jump videos are ~3 seconds long without a stationary baseline period. The `--use-com` and `--adaptive-threshold` options (available in `core/` modules) require longer videos (~5+ seconds) with 3 seconds of standing baseline, making them suitable for future jump types like CMJ (countermovement jump) but not drop jumps.
515
+ **Phase 1: Video Analysis**
516
+ - Extracts frame rate from video metadata
517
+ - Analyzes landmark visibility quality (average MediaPipe confidence scores)
518
+ - Measures position variance (tracking stability)
519
+ - Detects drop jump pattern (stationary period on platform)
481
520
 
482
- The detailed guide includes:
521
+ **Phase 2: Automatic Parameter Selection**
483
522
 
484
- - How each parameter works internally
485
- - Frame rate considerations
486
- - Scenario-based recommended settings
487
- - Debugging workflow with visual indicators
488
- - Parameter interaction effects
523
+ **FPS-based scaling** (maintains consistent temporal resolution):
524
+ ```python
525
+ velocity_threshold = 0.02 × (30 / fps)
526
+ # 30fps 0.020, 60fps → 0.010, 120fps → 0.005
527
+
528
+ min_contact_frames = round(3 × (fps / 30))
529
+ # 30fps → 3 frames (100ms), 60fps → 6 frames (100ms)
530
+
531
+ smoothing_window = 5 if fps ≤ 30 else 3
532
+ # Higher fps → less smoothing (better temporal resolution)
533
+ ```
534
+
535
+ **Quality-based adjustments** (adapts to tracking quality):
536
+ - High visibility (>0.7): Minimal smoothing, no bilateral filter
537
+ - Medium visibility (0.4-0.7): Moderate smoothing, enable bilateral filter
538
+ - Low visibility (<0.4): Aggressive smoothing, bilateral filter, lower confidence thresholds
539
+
540
+ **Always enabled** (proven beneficial, no downsides):
541
+ - Outlier rejection (removes tracking glitches)
542
+ - Trajectory curvature analysis (sub-frame precision)
543
+ - Drop start auto-detection (skips stationary period)
544
+ - Polyorder 2 (optimal for parabolic jump motion)
545
+
546
+ #### Quality Presets
547
+
548
+ **`--quality fast`** (50% faster, good for batch processing)
549
+ - Velocity threshold ×1.5 (less sensitive)
550
+ - Reduced smoothing (-2 frames)
551
+ - Skips bilateral filter
552
+ - Lower detection confidence (0.3)
553
+
554
+ **`--quality balanced`** (default, best for most cases)
555
+ - FPS-adjusted parameters
556
+ - Adaptive smoothing based on quality
557
+ - All accuracy features enabled
558
+
559
+ **`--quality accurate`** (research-grade, slower)
560
+ - Velocity threshold ×0.5 (more sensitive)
561
+ - Increased smoothing (+2 frames)
562
+ - Always enables bilateral filter
563
+ - Higher detection confidence (0.6)
564
+
565
+ #### User-Facing Parameters
566
+
567
+ **Reduced from 13 → 2 required + 2 optional:**
568
+
569
+ **Required:**
570
+ - `--drop-height`: Box height in meters (e.g., 0.40 for 40cm) - REQUIRED for accurate calibration
571
+
572
+ **Optional:**
573
+ - `--output`: Debug video path
574
+ - `--json-output`: Metrics JSON path
575
+ - `--quality`: fast/balanced/accurate (default: balanced)
576
+ - `--verbose`: Show auto-selected parameters
577
+
578
+ **Expert overrides** (rarely needed):
579
+ - `--drop-start-frame`: Manual drop start frame
580
+ - `--smoothing-window`: Override auto-tuned smoothing
581
+ - `--velocity-threshold`: Override auto-tuned threshold
582
+ - `--min-contact-frames`: Override auto-tuned minimum
583
+ - `--visibility-threshold`: Override visibility threshold
584
+ - `--detection-confidence`: Override MediaPipe detection
585
+ - `--tracking-confidence`: Override MediaPipe tracking
586
+
587
+ #### Migration from Manual Parameters
588
+
589
+ **Old way** (complex, error-prone):
590
+ ```bash
591
+ # User had to know these magic numbers for 60fps video
592
+ uv run kinemotion dropjump-analyze video.mp4 \
593
+ --smoothing-window 3 \
594
+ --velocity-threshold 0.01 \
595
+ --min-contact-frames 6 \
596
+ --outlier-rejection \
597
+ --use-curvature
598
+ ```
599
+
600
+ **New way** (simple, automatic):
601
+ ```bash
602
+ # Just works - auto-detects 60fps and adjusts all parameters
603
+ uv run kinemotion dropjump-analyze video.mp4
604
+ ```
605
+
606
+ #### Viewing Auto-Selected Parameters
607
+
608
+ Use `--verbose` to see what parameters were automatically selected:
609
+
610
+ ```bash
611
+ uv run kinemotion dropjump-analyze video.mp4 --verbose
612
+
613
+ # Output shows:
614
+ # ============================================================
615
+ # AUTO-TUNED PARAMETERS
616
+ # ============================================================
617
+ # Video FPS: 59.98
618
+ # Tracking quality: high (avg visibility: 0.79)
619
+ # Quality preset: balanced
620
+ #
621
+ # Selected parameters:
622
+ # smoothing_window: 3
623
+ # velocity_threshold: 0.0100
624
+ # min_contact_frames: 6
625
+ # ...
626
+ # ============================================================
627
+ ```
628
+
629
+ **Note**: See `docs/PARAMETERS.md` for detailed explanation of what each parameter does internally (useful for expert mode overrides).
489
630
 
490
631
  ### Working with Different Video Formats
491
632
 
@@ -576,6 +717,10 @@ If mypy reports errors:
576
717
 
577
718
  ## CLI Usage Examples
578
719
 
720
+ **NEW: The tool now features intelligent auto-tuning!** Parameters are automatically adjusted based on video frame rate, tracking quality, and analysis preset. No manual parameter tuning required.
721
+
722
+ ### Simple Usage (Recommended)
723
+
579
724
  ```bash
580
725
  # Show main command help
581
726
  uv run kinemotion --help
@@ -583,35 +728,91 @@ uv run kinemotion --help
583
728
  # Show subcommand help
584
729
  uv run kinemotion dropjump-analyze --help
585
730
 
586
- # Basic analysis (JSON to stdout)
587
- uv run kinemotion dropjump-analyze video.mp4
731
+ # Basic analysis (JSON to stdout) - JUST WORKS!
732
+ # Drop-height is REQUIRED - specify your box height in meters
733
+ # Auto-detects fps, tracking quality, and selects optimal parameters
734
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40
588
735
 
589
736
  # Save metrics to file
590
- uv run kinemotion dropjump-analyze video.mp4 --json-output results.json
737
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40 --json-output results.json
591
738
 
592
739
  # Generate debug video
593
- uv run kinemotion dropjump-analyze video.mp4 --output debug.mp4
740
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40 --output debug.mp4
594
741
 
595
- # Drop jump with calibration (40cm box)
596
- uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40
742
+ # Complete analysis with all outputs
743
+ uv run kinemotion dropjump-analyze video.mp4 \
744
+ --drop-height 0.40 \
745
+ --output debug.mp4 \
746
+ --json-output metrics.json
747
+
748
+ # See what parameters were auto-selected
749
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40 --verbose
750
+
751
+ # Different box heights (examples)
752
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.30 # 30cm box
753
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.60 # 60cm box
754
+ ```
597
755
 
598
- # Custom parameters for noisy video
756
+ ### Quality Presets
757
+
758
+ ```bash
759
+ # Fast analysis (quick, less precise)
760
+ # - 50% faster processing
761
+ # - Good for batch processing or initial assessment
762
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40 --quality fast
763
+
764
+ # Balanced analysis (default)
765
+ # - Good accuracy/speed tradeoff
766
+ # - Best for most use cases
767
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40 --quality balanced
768
+
769
+ # Accurate analysis (research-grade, slower)
770
+ # - Maximum accuracy
771
+ # - More aggressive smoothing and filtering
772
+ # - Best for publication-quality data
773
+ uv run kinemotion dropjump-analyze video.mp4 --drop-height 0.40 --quality accurate
774
+ ```
775
+
776
+ ### Expert Mode (Advanced Users Only)
777
+
778
+ ```bash
779
+ # Override specific auto-tuned parameters
599
780
  uv run kinemotion dropjump-analyze video.mp4 \
781
+ --drop-height 0.40 \
782
+ --expert \
600
783
  --smoothing-window 7 \
601
- --velocity-threshold 0.01 \
602
- --min-contact-frames 5
784
+ --velocity-threshold 0.015
603
785
 
604
- # Full analysis with calibration and all outputs
786
+ # Manual drop start frame (if auto-detection fails)
605
787
  uv run kinemotion dropjump-analyze video.mp4 \
606
- --output debug.mp4 \
607
- --json-output metrics.json \
608
788
  --drop-height 0.40 \
609
- --smoothing-window 7
789
+ --drop-start-frame 120
790
+ ```
610
791
 
611
- # Regular jump (no calibration, uses corrected kinematic method)
612
- uv run kinemotion dropjump-analyze jump.mp4 \
613
- --output debug.mp4 \
614
- --json-output metrics.json
792
+ ### Auto-Tuning Examples
793
+
794
+ ```bash
795
+ # 30fps video - auto-selects:
796
+ # velocity_threshold: 0.020
797
+ # min_contact_frames: 3
798
+ # smoothing_window: 5
799
+ uv run kinemotion dropjump-analyze video_30fps.mp4 --drop-height 0.40
800
+
801
+ # 60fps video - auto-selects:
802
+ # velocity_threshold: 0.010
803
+ # min_contact_frames: 6
804
+ # smoothing_window: 3
805
+ uv run kinemotion dropjump-analyze video_60fps.mp4 --drop-height 0.40
806
+
807
+ # Low quality video (avg visibility < 0.4) - auto-enables:
808
+ # bilateral_filter: True
809
+ # smoothing_window: +2 adjustment
810
+ uv run kinemotion dropjump-analyze low_quality.mp4 --drop-height 0.40
811
+
812
+ # High quality video (avg visibility > 0.7) - optimizes:
813
+ # bilateral_filter: False (not needed)
814
+ # smoothing_window: minimal (preserve detail)
815
+ uv run kinemotion dropjump-analyze high_quality.mp4 --drop-height 0.40
615
816
  ```
616
817
 
617
818
  ## MCP Server Configuration
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.6.4
3
+ Version: 0.7.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,7 +1,6 @@
1
1
  """Example of using drop-jump analysis programmatically."""
2
2
 
3
3
  import numpy as np
4
-
5
4
  from dropjump.contact_detection import (
6
5
  compute_average_foot_position,
7
6
  detect_ground_contact,
@@ -12,7 +11,7 @@ from dropjump.smoothing import smooth_landmarks
12
11
  from dropjump.video_io import VideoProcessor
13
12
 
14
13
 
15
- def analyze_video(video_path: str):
14
+ def analyze_video(video_path: str) -> dict:
16
15
  """
17
16
  Analyze a drop-jump video and return metrics.
18
17
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "kinemotion"
3
- version = "0.6.4"
3
+ version = "0.7.0"
4
4
  description = "Video-based kinematic analysis for athletic performance"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10,<3.13"
@@ -84,6 +84,8 @@ warn_redundant_casts = true
84
84
  warn_unused_ignores = true
85
85
  warn_no_return = true
86
86
  strict_equality = true
87
+ files = ["src"]
88
+ exclude = ["tests", "examples"]
87
89
 
88
90
  [[tool.mypy.overrides]]
89
91
  module = [
@@ -0,0 +1,289 @@
1
+ """Automatic parameter tuning based on video characteristics."""
2
+
3
+ from dataclasses import dataclass
4
+ from enum import Enum
5
+
6
+ import numpy as np
7
+
8
+
9
+ class QualityPreset(str, Enum):
10
+ """Quality presets for analysis."""
11
+
12
+ FAST = "fast" # Quick analysis, lower precision
13
+ BALANCED = "balanced" # Default: good balance of speed and accuracy
14
+ ACCURATE = "accurate" # Research-grade analysis, slower
15
+
16
+
17
+ @dataclass
18
+ class VideoCharacteristics:
19
+ """Characteristics extracted from video analysis."""
20
+
21
+ fps: float
22
+ frame_count: int
23
+ avg_visibility: float # Average landmark visibility (0-1)
24
+ position_variance: float # Variance in foot positions
25
+ has_stable_period: bool # Whether video has initial stationary period
26
+ tracking_quality: str # "low", "medium", "high"
27
+
28
+
29
+ @dataclass
30
+ class AnalysisParameters:
31
+ """Auto-tuned parameters for drop jump analysis."""
32
+
33
+ smoothing_window: int
34
+ polyorder: int
35
+ velocity_threshold: float
36
+ min_contact_frames: int
37
+ visibility_threshold: float
38
+ detection_confidence: float
39
+ tracking_confidence: float
40
+ outlier_rejection: bool
41
+ bilateral_filter: bool
42
+ use_curvature: bool
43
+
44
+ def to_dict(self) -> dict:
45
+ """Convert to dictionary."""
46
+ return {
47
+ "smoothing_window": self.smoothing_window,
48
+ "polyorder": self.polyorder,
49
+ "velocity_threshold": self.velocity_threshold,
50
+ "min_contact_frames": self.min_contact_frames,
51
+ "visibility_threshold": self.visibility_threshold,
52
+ "detection_confidence": self.detection_confidence,
53
+ "tracking_confidence": self.tracking_confidence,
54
+ "outlier_rejection": self.outlier_rejection,
55
+ "bilateral_filter": self.bilateral_filter,
56
+ "use_curvature": self.use_curvature,
57
+ }
58
+
59
+
60
+ def analyze_tracking_quality(avg_visibility: float) -> str:
61
+ """
62
+ Classify tracking quality based on average landmark visibility.
63
+
64
+ Args:
65
+ avg_visibility: Average visibility score across all tracked landmarks
66
+
67
+ Returns:
68
+ Quality classification: "low", "medium", or "high"
69
+ """
70
+ if avg_visibility < 0.4:
71
+ return "low"
72
+ elif avg_visibility < 0.7:
73
+ return "medium"
74
+ else:
75
+ return "high"
76
+
77
+
78
+ def auto_tune_parameters(
79
+ characteristics: VideoCharacteristics,
80
+ quality_preset: QualityPreset = QualityPreset.BALANCED,
81
+ ) -> AnalysisParameters:
82
+ """
83
+ Automatically tune analysis parameters based on video characteristics.
84
+
85
+ This function implements heuristics to select optimal parameters without
86
+ requiring user expertise in video analysis or kinematic tracking.
87
+
88
+ Key principles:
89
+ 1. FPS-based scaling: Higher fps needs lower velocity thresholds
90
+ 2. Quality-based smoothing: Noisy video needs more smoothing
91
+ 3. Always enable proven features: outlier rejection, curvature analysis
92
+ 4. Preset modifiers: fast/balanced/accurate adjust base parameters
93
+
94
+ Args:
95
+ characteristics: Analyzed video characteristics
96
+ quality_preset: Quality vs speed tradeoff
97
+
98
+ Returns:
99
+ AnalysisParameters with auto-tuned values
100
+ """
101
+ fps = characteristics.fps
102
+ quality = characteristics.tracking_quality
103
+
104
+ # =================================================================
105
+ # STEP 1: FPS-based baseline parameters
106
+ # These scale automatically with frame rate to maintain consistent
107
+ # temporal resolution and sensitivity
108
+ # =================================================================
109
+
110
+ # Velocity threshold: Scale inversely with fps
111
+ # At 30fps, feet move ~2% of frame per frame when "stationary"
112
+ # At 60fps, feet move ~1% of frame per frame when "stationary"
113
+ # Formula: threshold = 0.02 * (30 / fps)
114
+ base_velocity_threshold = 0.02 * (30.0 / fps)
115
+
116
+ # Min contact frames: Scale with fps to maintain same time duration
117
+ # Goal: ~100ms minimum contact (3 frames @ 30fps, 6 frames @ 60fps)
118
+ # Formula: frames = round(3 * (fps / 30))
119
+ base_min_contact_frames = max(2, round(3.0 * (fps / 30.0)))
120
+
121
+ # Smoothing window: Decrease with higher fps for better temporal resolution
122
+ # Lower fps (30fps): 5-frame window = 167ms
123
+ # Higher fps (60fps): 3-frame window = 50ms (same temporal resolution)
124
+ if fps <= 30:
125
+ base_smoothing_window = 5
126
+ elif fps <= 60:
127
+ base_smoothing_window = 3
128
+ else:
129
+ base_smoothing_window = 3 # Even at 120fps, 3 is minimum for Savitzky-Golay
130
+
131
+ # =================================================================
132
+ # STEP 2: Quality-based adjustments
133
+ # Adapt smoothing and filtering based on tracking quality
134
+ # =================================================================
135
+
136
+ smoothing_adjustment = 0
137
+ enable_bilateral = False
138
+
139
+ if quality == "low":
140
+ # Poor tracking quality: aggressive smoothing and filtering
141
+ smoothing_adjustment = +2
142
+ enable_bilateral = True
143
+ elif quality == "medium":
144
+ # Moderate quality: slight smoothing increase
145
+ smoothing_adjustment = +1
146
+ enable_bilateral = True
147
+ else: # high quality
148
+ # Good tracking: preserve detail, minimal smoothing
149
+ smoothing_adjustment = 0
150
+ enable_bilateral = False
151
+
152
+ # =================================================================
153
+ # STEP 3: Apply quality preset modifiers
154
+ # User can choose speed vs accuracy tradeoff
155
+ # =================================================================
156
+
157
+ if quality_preset == QualityPreset.FAST:
158
+ # Fast: Trade accuracy for speed
159
+ velocity_threshold = base_velocity_threshold * 1.5 # Less sensitive
160
+ min_contact_frames = max(2, int(base_min_contact_frames * 0.67))
161
+ smoothing_window = max(3, base_smoothing_window - 2 + smoothing_adjustment)
162
+ bilateral_filter = False # Skip expensive filtering
163
+ detection_confidence = 0.3
164
+ tracking_confidence = 0.3
165
+
166
+ elif quality_preset == QualityPreset.ACCURATE:
167
+ # Accurate: Maximize accuracy, accept slower processing
168
+ velocity_threshold = base_velocity_threshold * 0.5 # More sensitive
169
+ min_contact_frames = (
170
+ base_min_contact_frames # Don't increase (would miss brief)
171
+ )
172
+ smoothing_window = min(11, base_smoothing_window + 2 + smoothing_adjustment)
173
+ bilateral_filter = True # Always use for best accuracy
174
+ detection_confidence = 0.6
175
+ tracking_confidence = 0.6
176
+
177
+ else: # QualityPreset.BALANCED (default)
178
+ # Balanced: Good accuracy, reasonable speed
179
+ velocity_threshold = base_velocity_threshold
180
+ min_contact_frames = base_min_contact_frames
181
+ smoothing_window = max(3, base_smoothing_window + smoothing_adjustment)
182
+ bilateral_filter = enable_bilateral
183
+ detection_confidence = 0.5
184
+ tracking_confidence = 0.5
185
+
186
+ # Ensure smoothing window is odd (required for Savitzky-Golay)
187
+ if smoothing_window % 2 == 0:
188
+ smoothing_window += 1
189
+
190
+ # =================================================================
191
+ # STEP 4: Set fixed optimal values
192
+ # These are always the same regardless of video characteristics
193
+ # =================================================================
194
+
195
+ # Polyorder: Always 2 (quadratic) - optimal for jump physics (parabolic motion)
196
+ polyorder = 2
197
+
198
+ # Visibility threshold: Standard MediaPipe threshold
199
+ visibility_threshold = 0.5
200
+
201
+ # Always enable proven accuracy features
202
+ outlier_rejection = True # Removes tracking glitches (minimal cost)
203
+ use_curvature = True # Trajectory curvature analysis (minimal cost)
204
+
205
+ return AnalysisParameters(
206
+ smoothing_window=smoothing_window,
207
+ polyorder=polyorder,
208
+ velocity_threshold=velocity_threshold,
209
+ min_contact_frames=min_contact_frames,
210
+ visibility_threshold=visibility_threshold,
211
+ detection_confidence=detection_confidence,
212
+ tracking_confidence=tracking_confidence,
213
+ outlier_rejection=outlier_rejection,
214
+ bilateral_filter=bilateral_filter,
215
+ use_curvature=use_curvature,
216
+ )
217
+
218
+
219
+ def analyze_video_sample(
220
+ landmarks_sequence: list[dict[str, tuple[float, float, float]] | None],
221
+ fps: float,
222
+ frame_count: int,
223
+ ) -> VideoCharacteristics:
224
+ """
225
+ Analyze video characteristics from a sample of frames.
226
+
227
+ This function should be called after tracking the first 30-60 frames
228
+ to understand video quality and characteristics.
229
+
230
+ Args:
231
+ landmarks_sequence: Tracked landmarks from sample frames
232
+ fps: Video frame rate
233
+ frame_count: Total number of frames in video
234
+
235
+ Returns:
236
+ VideoCharacteristics with analyzed properties
237
+ """
238
+ # Calculate average landmark visibility
239
+ visibilities = []
240
+ positions = []
241
+
242
+ for frame_landmarks in landmarks_sequence:
243
+ if frame_landmarks:
244
+ # Collect visibility scores from foot landmarks
245
+ foot_keys = [
246
+ "left_ankle",
247
+ "right_ankle",
248
+ "left_heel",
249
+ "right_heel",
250
+ "left_foot_index",
251
+ "right_foot_index",
252
+ ]
253
+
254
+ frame_vis = []
255
+ frame_y_positions = []
256
+
257
+ for key in foot_keys:
258
+ if key in frame_landmarks:
259
+ _, y, vis = frame_landmarks[key] # x not needed for analysis
260
+ frame_vis.append(vis)
261
+ frame_y_positions.append(y)
262
+
263
+ if frame_vis:
264
+ visibilities.append(float(np.mean(frame_vis)))
265
+ if frame_y_positions:
266
+ positions.append(float(np.mean(frame_y_positions)))
267
+
268
+ # Compute metrics
269
+ avg_visibility = float(np.mean(visibilities)) if visibilities else 0.5
270
+ position_variance = float(np.var(positions)) if len(positions) > 1 else 0.0
271
+
272
+ # Determine tracking quality
273
+ tracking_quality = analyze_tracking_quality(avg_visibility)
274
+
275
+ # Check for stable period (indicates drop jump from elevated platform)
276
+ # Simple check: do first 30 frames have low variance?
277
+ has_stable_period = False
278
+ if len(positions) >= 30:
279
+ first_30_std = float(np.std(positions[:30]))
280
+ has_stable_period = first_30_std < 0.01 # Very stable = on platform
281
+
282
+ return VideoCharacteristics(
283
+ fps=fps,
284
+ frame_count=frame_count,
285
+ avg_visibility=avg_visibility,
286
+ position_variance=position_variance,
287
+ has_stable_period=has_stable_period,
288
+ tracking_quality=tracking_quality,
289
+ )