kinemotion 0.34.0__py3-none-any.whl → 0.35.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/api.py +8 -5
- kinemotion/cmj/analysis.py +70 -102
- kinemotion/{core/cmj_metrics_validator.py → cmj/metrics_validator.py} +27 -123
- kinemotion/{core/cmj_validation_bounds.py → cmj/validation_bounds.py} +1 -58
- kinemotion/core/auto_tuning.py +6 -4
- kinemotion/core/cli_utils.py +26 -0
- kinemotion/core/experimental.py +103 -0
- kinemotion/core/filtering.py +7 -0
- kinemotion/core/smoothing.py +34 -0
- kinemotion/core/validation.py +198 -0
- kinemotion/dropjump/__init__.py +1 -1
- kinemotion/dropjump/analysis.py +23 -34
- kinemotion/dropjump/kinematics.py +7 -1
- kinemotion/{core/dropjump_metrics_validator.py → dropjump/metrics_validator.py} +20 -114
- kinemotion/{core/dropjump_validation_bounds.py → dropjump/validation_bounds.py} +1 -58
- {kinemotion-0.34.0.dist-info → kinemotion-0.35.1.dist-info}/METADATA +1 -1
- kinemotion-0.35.1.dist-info/RECORD +37 -0
- kinemotion-0.34.0.dist-info/RECORD +0 -35
- {kinemotion-0.34.0.dist-info → kinemotion-0.35.1.dist-info}/WHEEL +0 -0
- {kinemotion-0.34.0.dist-info → kinemotion-0.35.1.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.34.0.dist-info → kinemotion-0.35.1.dist-info}/licenses/LICENSE +0 -0
kinemotion/core/cli_utils.py
CHANGED
|
@@ -6,6 +6,7 @@ from typing import Any, Protocol
|
|
|
6
6
|
import click
|
|
7
7
|
|
|
8
8
|
from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
|
|
9
|
+
from .experimental import unused
|
|
9
10
|
from .pose import PoseTracker
|
|
10
11
|
from .smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
11
12
|
from .video_io import VideoProcessor
|
|
@@ -22,6 +23,11 @@ class ExpertParameters(Protocol):
|
|
|
22
23
|
visibility_threshold: float | None
|
|
23
24
|
|
|
24
25
|
|
|
26
|
+
@unused(
|
|
27
|
+
reason="Not called by analysis pipeline - remnant from CLI refactoring",
|
|
28
|
+
remove_in="1.0.0",
|
|
29
|
+
since="0.34.0",
|
|
30
|
+
)
|
|
25
31
|
def determine_initial_confidence(
|
|
26
32
|
quality_preset: QualityPreset,
|
|
27
33
|
expert_params: ExpertParameters,
|
|
@@ -54,6 +60,11 @@ def determine_initial_confidence(
|
|
|
54
60
|
return initial_detection_conf, initial_tracking_conf
|
|
55
61
|
|
|
56
62
|
|
|
63
|
+
@unused(
|
|
64
|
+
reason="Not called by analysis pipeline - remnant from CLI refactoring",
|
|
65
|
+
remove_in="1.0.0",
|
|
66
|
+
since="0.34.0",
|
|
67
|
+
)
|
|
57
68
|
def track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
|
|
58
69
|
"""Track pose landmarks in all video frames.
|
|
59
70
|
|
|
@@ -84,6 +95,11 @@ def track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list,
|
|
|
84
95
|
return frames, landmarks_sequence
|
|
85
96
|
|
|
86
97
|
|
|
98
|
+
@unused(
|
|
99
|
+
reason="Not called by analysis pipeline - remnant from CLI refactoring",
|
|
100
|
+
remove_in="1.0.0",
|
|
101
|
+
since="0.34.0",
|
|
102
|
+
)
|
|
87
103
|
def apply_expert_param_overrides(
|
|
88
104
|
params: AnalysisParameters, expert_params: ExpertParameters
|
|
89
105
|
) -> AnalysisParameters:
|
|
@@ -107,6 +123,11 @@ def apply_expert_param_overrides(
|
|
|
107
123
|
return params
|
|
108
124
|
|
|
109
125
|
|
|
126
|
+
@unused(
|
|
127
|
+
reason="Not called by analysis pipeline - remnant from CLI refactoring",
|
|
128
|
+
remove_in="1.0.0",
|
|
129
|
+
since="0.34.0",
|
|
130
|
+
)
|
|
110
131
|
def print_auto_tuned_params(
|
|
111
132
|
video: VideoProcessor,
|
|
112
133
|
quality_preset: QualityPreset,
|
|
@@ -161,6 +182,11 @@ def print_auto_tuned_params(
|
|
|
161
182
|
click.echo("=" * 60 + "\n", err=True)
|
|
162
183
|
|
|
163
184
|
|
|
185
|
+
@unused(
|
|
186
|
+
reason="Not called by analysis pipeline - remnant from CLI refactoring",
|
|
187
|
+
remove_in="1.0.0",
|
|
188
|
+
since="0.34.0",
|
|
189
|
+
)
|
|
164
190
|
def smooth_landmark_sequence(
|
|
165
191
|
landmarks_sequence: list, params: AnalysisParameters
|
|
166
192
|
) -> list:
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Decorators for marking experimental and unused features.
|
|
2
|
+
|
|
3
|
+
These decorators help identify code that is implemented but not yet
|
|
4
|
+
integrated into the main pipeline, making it easier to track features
|
|
5
|
+
for future enhancement or cleanup.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import functools
|
|
9
|
+
import warnings
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
from typing import TypeVar
|
|
12
|
+
|
|
13
|
+
F = TypeVar("F", bound=Callable)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def experimental(
|
|
17
|
+
reason: str, *, issue: int | None = None, since: str | None = None
|
|
18
|
+
) -> Callable[[F], F]:
|
|
19
|
+
"""Mark a feature as experimental/not fully integrated.
|
|
20
|
+
|
|
21
|
+
Experimental features are working implementations that haven't been
|
|
22
|
+
fully integrated into the main pipeline. They emit warnings when called
|
|
23
|
+
to alert developers they're using untested/unstable APIs.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
reason: Why this is experimental (e.g., "API unstable", "needs validation")
|
|
27
|
+
issue: Optional GitHub issue number for tracking integration
|
|
28
|
+
since: Optional version when this became experimental
|
|
29
|
+
|
|
30
|
+
Example:
|
|
31
|
+
>>> @experimental("API may change", issue=123, since="0.34.0")
|
|
32
|
+
... def new_feature():
|
|
33
|
+
... pass
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Decorated function that warns on use
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def decorator(func: F) -> F:
|
|
40
|
+
@functools.wraps(func)
|
|
41
|
+
def wrapper(*args, **kwargs): # type: ignore
|
|
42
|
+
msg = f"{func.__name__} is experimental: {reason}"
|
|
43
|
+
if issue:
|
|
44
|
+
msg += f" (GitHub issue #{issue})"
|
|
45
|
+
if since:
|
|
46
|
+
msg += f" [since v{since}]"
|
|
47
|
+
warnings.warn(msg, FutureWarning, stacklevel=2)
|
|
48
|
+
return func(*args, **kwargs)
|
|
49
|
+
|
|
50
|
+
# Add metadata for documentation/tooling
|
|
51
|
+
wrapper.__experimental__ = True # type: ignore[attr-defined]
|
|
52
|
+
wrapper.__experimental_reason__ = reason # type: ignore[attr-defined]
|
|
53
|
+
if issue:
|
|
54
|
+
wrapper.__experimental_issue__ = issue # type: ignore[attr-defined]
|
|
55
|
+
if since:
|
|
56
|
+
wrapper.__experimental_since__ = since # type: ignore[attr-defined]
|
|
57
|
+
|
|
58
|
+
return wrapper # type: ignore[return-value]
|
|
59
|
+
|
|
60
|
+
return decorator
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def unused(
|
|
64
|
+
reason: str, *, remove_in: str | None = None, since: str | None = None
|
|
65
|
+
) -> Callable[[F], F]:
|
|
66
|
+
"""Mark a feature as implemented but not integrated into pipeline.
|
|
67
|
+
|
|
68
|
+
Unused features are fully working implementations that aren't called
|
|
69
|
+
by the main analysis pipeline. Unlike @experimental, these don't emit
|
|
70
|
+
warnings when called (they work fine), but are marked for tracking.
|
|
71
|
+
|
|
72
|
+
Use this for:
|
|
73
|
+
- Features awaiting CLI integration
|
|
74
|
+
- Alternative implementations not yet exposed
|
|
75
|
+
- Code kept for backward compatibility
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
reason: Why this is unused (e.g., "awaiting CLI parameter")
|
|
79
|
+
remove_in: Optional version when this might be removed if not integrated
|
|
80
|
+
since: Optional version when this became unused
|
|
81
|
+
|
|
82
|
+
Example:
|
|
83
|
+
>>> @unused("Not called by pipeline", remove_in="1.0.0", since="0.34.0")
|
|
84
|
+
... def calculate_adaptive_threshold():
|
|
85
|
+
... pass
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Original function with metadata attached (no runtime behavior change)
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
def decorator(func: F) -> F:
|
|
92
|
+
# Don't wrap - we don't want warnings when calling it
|
|
93
|
+
# Just attach metadata for documentation/cleanup tools
|
|
94
|
+
func.__unused__ = True # type: ignore[attr-defined]
|
|
95
|
+
func.__unused_reason__ = reason # type: ignore[attr-defined]
|
|
96
|
+
if remove_in:
|
|
97
|
+
func.__unused_remove_in__ = remove_in # type: ignore[attr-defined]
|
|
98
|
+
if since:
|
|
99
|
+
func.__unused_since__ = since # type: ignore[attr-defined]
|
|
100
|
+
|
|
101
|
+
return func
|
|
102
|
+
|
|
103
|
+
return decorator
|
kinemotion/core/filtering.py
CHANGED
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
from scipy.signal import medfilt
|
|
5
5
|
|
|
6
|
+
from .experimental import unused
|
|
7
|
+
|
|
6
8
|
|
|
7
9
|
def detect_outliers_ransac(
|
|
8
10
|
positions: np.ndarray,
|
|
@@ -226,6 +228,11 @@ def reject_outliers(
|
|
|
226
228
|
return cleaned_positions, outlier_mask
|
|
227
229
|
|
|
228
230
|
|
|
231
|
+
@unused(
|
|
232
|
+
reason="Not called by analysis pipeline - alternative adaptive smoothing approach",
|
|
233
|
+
remove_in="1.0.0",
|
|
234
|
+
since="0.34.0",
|
|
235
|
+
)
|
|
229
236
|
def adaptive_smooth_window(
|
|
230
237
|
positions: np.ndarray,
|
|
231
238
|
base_window: int = 5,
|
kinemotion/core/smoothing.py
CHANGED
|
@@ -419,3 +419,37 @@ def smooth_landmarks_advanced(
|
|
|
419
419
|
return _smooth_landmarks_core(
|
|
420
420
|
landmark_sequence, window_length, polyorder, advanced_smoother
|
|
421
421
|
)
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def interpolate_threshold_crossing(
|
|
425
|
+
vel_before: float,
|
|
426
|
+
vel_after: float,
|
|
427
|
+
velocity_threshold: float,
|
|
428
|
+
) -> float:
|
|
429
|
+
"""
|
|
430
|
+
Find fractional offset where velocity crosses threshold between two frames.
|
|
431
|
+
|
|
432
|
+
Uses linear interpolation assuming velocity changes linearly between frames.
|
|
433
|
+
|
|
434
|
+
Args:
|
|
435
|
+
vel_before: Velocity at frame boundary N (absolute value)
|
|
436
|
+
vel_after: Velocity at frame boundary N+1 (absolute value)
|
|
437
|
+
velocity_threshold: Threshold value
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
Fractional offset from frame N (0.0 to 1.0)
|
|
441
|
+
"""
|
|
442
|
+
# Handle edge cases
|
|
443
|
+
if abs(vel_after - vel_before) < 1e-9: # Velocity not changing
|
|
444
|
+
return 0.5
|
|
445
|
+
|
|
446
|
+
# Linear interpolation: at what fraction t does velocity equal threshold?
|
|
447
|
+
# vel(t) = vel_before + t * (vel_after - vel_before)
|
|
448
|
+
# Solve for t when vel(t) = threshold:
|
|
449
|
+
# threshold = vel_before + t * (vel_after - vel_before)
|
|
450
|
+
# t = (threshold - vel_before) / (vel_after - vel_before)
|
|
451
|
+
|
|
452
|
+
t = (velocity_threshold - vel_before) / (vel_after - vel_before)
|
|
453
|
+
|
|
454
|
+
# Clamp to [0, 1] range
|
|
455
|
+
return float(max(0.0, min(1.0, t)))
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""Shared validation infrastructure for jump metrics.
|
|
2
|
+
|
|
3
|
+
Provides base classes and enums for validating Counter Movement Jump (CMJ)
|
|
4
|
+
and Drop Jump metrics against physiological bounds.
|
|
5
|
+
|
|
6
|
+
Contains:
|
|
7
|
+
- ValidationSeverity: Severity levels for issues (ERROR, WARNING, INFO)
|
|
8
|
+
- ValidationIssue: Single validation issue dataclass
|
|
9
|
+
- ValidationResult: Aggregated validation results
|
|
10
|
+
- AthleteProfile: Athlete performance categories
|
|
11
|
+
- MetricBounds: Physiological bounds for any metric
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from abc import ABC, abstractmethod
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from enum import Enum
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ValidationSeverity(Enum):
|
|
20
|
+
"""Severity level for validation issues."""
|
|
21
|
+
|
|
22
|
+
ERROR = "ERROR" # Metrics invalid, likely data corruption
|
|
23
|
+
WARNING = "WARNING" # Metrics valid but unusual, needs review
|
|
24
|
+
INFO = "INFO" # Normal variation, informational only
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ValidationIssue:
|
|
29
|
+
"""Single validation issue."""
|
|
30
|
+
|
|
31
|
+
severity: ValidationSeverity
|
|
32
|
+
metric: str
|
|
33
|
+
message: str
|
|
34
|
+
value: float | None = None
|
|
35
|
+
bounds: tuple[float, float] | None = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AthleteProfile(Enum):
|
|
39
|
+
"""Athlete performance categories for metric bounds."""
|
|
40
|
+
|
|
41
|
+
ELDERLY = "elderly" # 70+, deconditioned
|
|
42
|
+
UNTRAINED = "untrained" # Sedentary, no training
|
|
43
|
+
RECREATIONAL = "recreational" # Fitness class, moderate activity
|
|
44
|
+
TRAINED = "trained" # Regular athlete, 3-5 years training
|
|
45
|
+
ELITE = "elite" # Competitive athlete, college/professional level
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class MetricBounds:
|
|
50
|
+
"""Physiological bounds for a single metric.
|
|
51
|
+
|
|
52
|
+
Attributes:
|
|
53
|
+
absolute_min: Absolute minimum value (error threshold)
|
|
54
|
+
practical_min: Practical minimum for weakest athletes
|
|
55
|
+
recreational_min: Minimum for recreational athletes
|
|
56
|
+
recreational_max: Maximum for recreational athletes
|
|
57
|
+
elite_min: Minimum for elite athletes
|
|
58
|
+
elite_max: Maximum for elite athletes
|
|
59
|
+
absolute_max: Absolute maximum value (error threshold)
|
|
60
|
+
unit: Unit of measurement (e.g., "m", "s", "m/s", "degrees")
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
absolute_min: float
|
|
64
|
+
practical_min: float
|
|
65
|
+
recreational_min: float
|
|
66
|
+
recreational_max: float
|
|
67
|
+
elite_min: float
|
|
68
|
+
elite_max: float
|
|
69
|
+
absolute_max: float
|
|
70
|
+
unit: str
|
|
71
|
+
|
|
72
|
+
def contains(self, value: float, profile: AthleteProfile) -> bool:
|
|
73
|
+
"""Check if value is within bounds for athlete profile."""
|
|
74
|
+
if profile == AthleteProfile.ELDERLY:
|
|
75
|
+
return self.practical_min <= value <= self.recreational_max
|
|
76
|
+
elif profile == AthleteProfile.UNTRAINED:
|
|
77
|
+
return self.practical_min <= value <= self.recreational_max
|
|
78
|
+
elif profile == AthleteProfile.RECREATIONAL:
|
|
79
|
+
return self.recreational_min <= value <= self.recreational_max
|
|
80
|
+
elif profile == AthleteProfile.TRAINED:
|
|
81
|
+
# Trained athletes: midpoint between recreational and elite
|
|
82
|
+
trained_min = (self.recreational_min + self.elite_min) / 2
|
|
83
|
+
trained_max = (self.recreational_max + self.elite_max) / 2
|
|
84
|
+
return trained_min <= value <= trained_max
|
|
85
|
+
elif profile == AthleteProfile.ELITE:
|
|
86
|
+
return self.elite_min <= value <= self.elite_max
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
def is_physically_possible(self, value: float) -> bool:
|
|
90
|
+
"""Check if value is within absolute physiological limits."""
|
|
91
|
+
return self.absolute_min <= value <= self.absolute_max
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@dataclass
|
|
95
|
+
class ValidationResult:
|
|
96
|
+
"""Base validation result for jump metrics."""
|
|
97
|
+
|
|
98
|
+
issues: list[ValidationIssue] = field(default_factory=list)
|
|
99
|
+
status: str = "PASS" # "PASS", "PASS_WITH_WARNINGS", "FAIL"
|
|
100
|
+
athlete_profile: AthleteProfile | None = None
|
|
101
|
+
|
|
102
|
+
def add_error(
|
|
103
|
+
self,
|
|
104
|
+
metric: str,
|
|
105
|
+
message: str,
|
|
106
|
+
value: float | None = None,
|
|
107
|
+
bounds: tuple[float, float] | None = None,
|
|
108
|
+
) -> None:
|
|
109
|
+
"""Add error-level issue."""
|
|
110
|
+
self.issues.append(
|
|
111
|
+
ValidationIssue(
|
|
112
|
+
severity=ValidationSeverity.ERROR,
|
|
113
|
+
metric=metric,
|
|
114
|
+
message=message,
|
|
115
|
+
value=value,
|
|
116
|
+
bounds=bounds,
|
|
117
|
+
)
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
def add_warning(
|
|
121
|
+
self,
|
|
122
|
+
metric: str,
|
|
123
|
+
message: str,
|
|
124
|
+
value: float | None = None,
|
|
125
|
+
bounds: tuple[float, float] | None = None,
|
|
126
|
+
) -> None:
|
|
127
|
+
"""Add warning-level issue."""
|
|
128
|
+
self.issues.append(
|
|
129
|
+
ValidationIssue(
|
|
130
|
+
severity=ValidationSeverity.WARNING,
|
|
131
|
+
metric=metric,
|
|
132
|
+
message=message,
|
|
133
|
+
value=value,
|
|
134
|
+
bounds=bounds,
|
|
135
|
+
)
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
def add_info(
|
|
139
|
+
self,
|
|
140
|
+
metric: str,
|
|
141
|
+
message: str,
|
|
142
|
+
value: float | None = None,
|
|
143
|
+
) -> None:
|
|
144
|
+
"""Add info-level issue."""
|
|
145
|
+
self.issues.append(
|
|
146
|
+
ValidationIssue(
|
|
147
|
+
severity=ValidationSeverity.INFO,
|
|
148
|
+
metric=metric,
|
|
149
|
+
message=message,
|
|
150
|
+
value=value,
|
|
151
|
+
)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def finalize_status(self) -> None:
|
|
155
|
+
"""Determine final pass/fail status based on issues."""
|
|
156
|
+
has_errors = any(
|
|
157
|
+
issue.severity == ValidationSeverity.ERROR for issue in self.issues
|
|
158
|
+
)
|
|
159
|
+
has_warnings = any(
|
|
160
|
+
issue.severity == ValidationSeverity.WARNING for issue in self.issues
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
if has_errors:
|
|
164
|
+
self.status = "FAIL"
|
|
165
|
+
elif has_warnings:
|
|
166
|
+
self.status = "PASS_WITH_WARNINGS"
|
|
167
|
+
else:
|
|
168
|
+
self.status = "PASS"
|
|
169
|
+
|
|
170
|
+
@abstractmethod
|
|
171
|
+
def to_dict(self) -> dict:
|
|
172
|
+
"""Convert validation result to JSON-serializable dictionary."""
|
|
173
|
+
pass
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class MetricsValidator(ABC):
|
|
177
|
+
"""Base validator for jump metrics."""
|
|
178
|
+
|
|
179
|
+
def __init__(self, assumed_profile: AthleteProfile | None = None):
|
|
180
|
+
"""Initialize validator.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
assumed_profile: If provided, validate against this specific profile.
|
|
184
|
+
Otherwise, estimate from metrics.
|
|
185
|
+
"""
|
|
186
|
+
self.assumed_profile = assumed_profile
|
|
187
|
+
|
|
188
|
+
@abstractmethod
|
|
189
|
+
def validate(self, metrics: dict) -> ValidationResult:
|
|
190
|
+
"""Validate metrics comprehensively.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
metrics: Dictionary with metric values
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
ValidationResult with all issues and status
|
|
197
|
+
"""
|
|
198
|
+
pass
|
kinemotion/dropjump/__init__.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
"""Drop jump analysis module."""
|
|
2
2
|
|
|
3
|
+
from ..core.smoothing import interpolate_threshold_crossing
|
|
3
4
|
from .analysis import (
|
|
4
5
|
ContactState,
|
|
5
6
|
calculate_adaptive_threshold,
|
|
6
7
|
compute_average_foot_position,
|
|
7
8
|
detect_ground_contact,
|
|
8
9
|
find_interpolated_phase_transitions_with_curvature,
|
|
9
|
-
interpolate_threshold_crossing,
|
|
10
10
|
refine_transition_with_curvature,
|
|
11
11
|
)
|
|
12
12
|
from .debug_overlay import DebugOverlayRenderer
|
kinemotion/dropjump/analysis.py
CHANGED
|
@@ -4,9 +4,11 @@ from enum import Enum
|
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
|
|
7
|
+
from ..core.experimental import unused
|
|
7
8
|
from ..core.smoothing import (
|
|
8
9
|
compute_acceleration_from_derivative,
|
|
9
10
|
compute_velocity_from_derivative,
|
|
11
|
+
interpolate_threshold_crossing,
|
|
10
12
|
)
|
|
11
13
|
|
|
12
14
|
|
|
@@ -18,6 +20,11 @@ class ContactState(Enum):
|
|
|
18
20
|
UNKNOWN = "unknown"
|
|
19
21
|
|
|
20
22
|
|
|
23
|
+
@unused(
|
|
24
|
+
reason="Not called by analysis pipeline - awaiting CLI integration",
|
|
25
|
+
remove_in="1.0.0",
|
|
26
|
+
since="0.34.0",
|
|
27
|
+
)
|
|
21
28
|
def calculate_adaptive_threshold(
|
|
22
29
|
positions: np.ndarray,
|
|
23
30
|
fps: float,
|
|
@@ -29,6 +36,18 @@ def calculate_adaptive_threshold(
|
|
|
29
36
|
"""
|
|
30
37
|
Calculate adaptive velocity threshold based on baseline motion characteristics.
|
|
31
38
|
|
|
39
|
+
.. warning::
|
|
40
|
+
**Status: Implemented but Not Integrated**
|
|
41
|
+
|
|
42
|
+
This function is fully implemented and tested but not called by the
|
|
43
|
+
analysis pipeline. See ``docs/development/errors-findings.md`` for details.
|
|
44
|
+
|
|
45
|
+
**To integrate**: Add CLI parameter ``--use-adaptive-threshold`` and
|
|
46
|
+
call this function before contact detection.
|
|
47
|
+
|
|
48
|
+
**Roadmap**: Planned for Phase 2 if users report issues with varying
|
|
49
|
+
video conditions.
|
|
50
|
+
|
|
32
51
|
Analyzes the first few seconds of video (assumed to be relatively stationary,
|
|
33
52
|
e.g., athlete standing on box) to determine the noise floor, then sets threshold
|
|
34
53
|
as a multiple of this baseline noise.
|
|
@@ -405,40 +424,6 @@ def find_contact_phases(
|
|
|
405
424
|
return phases
|
|
406
425
|
|
|
407
426
|
|
|
408
|
-
def interpolate_threshold_crossing(
|
|
409
|
-
vel_before: float,
|
|
410
|
-
vel_after: float,
|
|
411
|
-
velocity_threshold: float,
|
|
412
|
-
) -> float:
|
|
413
|
-
"""
|
|
414
|
-
Find fractional offset where velocity crosses threshold between two frames.
|
|
415
|
-
|
|
416
|
-
Uses linear interpolation assuming velocity changes linearly between frames.
|
|
417
|
-
|
|
418
|
-
Args:
|
|
419
|
-
vel_before: Velocity at frame boundary N (absolute value)
|
|
420
|
-
vel_after: Velocity at frame boundary N+1 (absolute value)
|
|
421
|
-
velocity_threshold: Threshold value
|
|
422
|
-
|
|
423
|
-
Returns:
|
|
424
|
-
Fractional offset from frame N (0.0 to 1.0)
|
|
425
|
-
"""
|
|
426
|
-
# Handle edge cases
|
|
427
|
-
if abs(vel_after - vel_before) < 1e-9: # Velocity not changing
|
|
428
|
-
return 0.5
|
|
429
|
-
|
|
430
|
-
# Linear interpolation: at what fraction t does velocity equal threshold?
|
|
431
|
-
# vel(t) = vel_before + t * (vel_after - vel_before)
|
|
432
|
-
# Solve for t when vel(t) = threshold:
|
|
433
|
-
# threshold = vel_before + t * (vel_after - vel_before)
|
|
434
|
-
# t = (threshold - vel_before) / (vel_after - vel_before)
|
|
435
|
-
|
|
436
|
-
t = (velocity_threshold - vel_before) / (vel_after - vel_before)
|
|
437
|
-
|
|
438
|
-
# Clamp to [0, 1] range
|
|
439
|
-
return float(max(0.0, min(1.0, t)))
|
|
440
|
-
|
|
441
|
-
|
|
442
427
|
def _interpolate_phase_start(
|
|
443
428
|
start_idx: int,
|
|
444
429
|
state: ContactState,
|
|
@@ -831,6 +816,10 @@ def _calculate_average_visibility(
|
|
|
831
816
|
return float(np.mean(foot_vis)) if foot_vis else 0.0
|
|
832
817
|
|
|
833
818
|
|
|
819
|
+
@unused(
|
|
820
|
+
reason="Alternative implementation not called by pipeline",
|
|
821
|
+
since="0.34.0",
|
|
822
|
+
)
|
|
834
823
|
def extract_foot_positions_and_visibilities(
|
|
835
824
|
smoothed_landmarks: list[dict[str, tuple[float, float, float]] | None],
|
|
836
825
|
) -> tuple[np.ndarray, np.ndarray]:
|
|
@@ -57,6 +57,7 @@ class DropJumpMetrics:
|
|
|
57
57
|
self.jump_height: float | None = None
|
|
58
58
|
self.jump_height_kinematic: float | None = None # From flight time
|
|
59
59
|
self.jump_height_trajectory: float | None = None # From position tracking
|
|
60
|
+
self.drop_start_frame: int | None = None # Frame when athlete leaves box
|
|
60
61
|
self.contact_start_frame: int | None = None
|
|
61
62
|
self.contact_end_frame: int | None = None
|
|
62
63
|
self.flight_start_frame: int | None = None
|
|
@@ -164,7 +165,7 @@ def _determine_drop_start_frame(
|
|
|
164
165
|
foot_y_positions,
|
|
165
166
|
fps,
|
|
166
167
|
min_stationary_duration=0.5,
|
|
167
|
-
position_change_threshold=0.005
|
|
168
|
+
position_change_threshold=0.01, # Improved from 0.005 for better accuracy
|
|
168
169
|
smoothing_window=smoothing_window,
|
|
169
170
|
)
|
|
170
171
|
return drop_start_frame
|
|
@@ -412,6 +413,11 @@ def calculate_drop_jump_metrics(
|
|
|
412
413
|
drop_start_frame, foot_y_positions, fps, smoothing_window
|
|
413
414
|
)
|
|
414
415
|
|
|
416
|
+
# Store drop start frame in metrics
|
|
417
|
+
metrics.drop_start_frame = (
|
|
418
|
+
drop_start_frame_value if drop_start_frame_value > 0 else None
|
|
419
|
+
)
|
|
420
|
+
|
|
415
421
|
# Find contact phases
|
|
416
422
|
phases = find_contact_phases(contact_states)
|
|
417
423
|
interpolated_phases = find_interpolated_phase_transitions_with_curvature(
|