kinemotion 0.33.2__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,103 @@
1
+ """Decorators for marking experimental and unused features.
2
+
3
+ These decorators help identify code that is implemented but not yet
4
+ integrated into the main pipeline, making it easier to track features
5
+ for future enhancement or cleanup.
6
+ """
7
+
8
+ import functools
9
+ import warnings
10
+ from collections.abc import Callable
11
+ from typing import TypeVar
12
+
13
+ F = TypeVar("F", bound=Callable)
14
+
15
+
16
+ def experimental(
17
+ reason: str, *, issue: int | None = None, since: str | None = None
18
+ ) -> Callable[[F], F]:
19
+ """Mark a feature as experimental/not fully integrated.
20
+
21
+ Experimental features are working implementations that haven't been
22
+ fully integrated into the main pipeline. They emit warnings when called
23
+ to alert developers they're using untested/unstable APIs.
24
+
25
+ Args:
26
+ reason: Why this is experimental (e.g., "API unstable", "needs validation")
27
+ issue: Optional GitHub issue number for tracking integration
28
+ since: Optional version when this became experimental
29
+
30
+ Example:
31
+ >>> @experimental("API may change", issue=123, since="0.34.0")
32
+ ... def new_feature():
33
+ ... pass
34
+
35
+ Returns:
36
+ Decorated function that warns on use
37
+ """
38
+
39
+ def decorator(func: F) -> F:
40
+ @functools.wraps(func)
41
+ def wrapper(*args, **kwargs): # type: ignore
42
+ msg = f"{func.__name__} is experimental: {reason}"
43
+ if issue:
44
+ msg += f" (GitHub issue #{issue})"
45
+ if since:
46
+ msg += f" [since v{since}]"
47
+ warnings.warn(msg, FutureWarning, stacklevel=2)
48
+ return func(*args, **kwargs)
49
+
50
+ # Add metadata for documentation/tooling
51
+ wrapper.__experimental__ = True # type: ignore[attr-defined]
52
+ wrapper.__experimental_reason__ = reason # type: ignore[attr-defined]
53
+ if issue:
54
+ wrapper.__experimental_issue__ = issue # type: ignore[attr-defined]
55
+ if since:
56
+ wrapper.__experimental_since__ = since # type: ignore[attr-defined]
57
+
58
+ return wrapper # type: ignore[return-value]
59
+
60
+ return decorator
61
+
62
+
63
+ def unused(
64
+ reason: str, *, remove_in: str | None = None, since: str | None = None
65
+ ) -> Callable[[F], F]:
66
+ """Mark a feature as implemented but not integrated into pipeline.
67
+
68
+ Unused features are fully working implementations that aren't called
69
+ by the main analysis pipeline. Unlike @experimental, these don't emit
70
+ warnings when called (they work fine), but are marked for tracking.
71
+
72
+ Use this for:
73
+ - Features awaiting CLI integration
74
+ - Alternative implementations not yet exposed
75
+ - Code kept for backward compatibility
76
+
77
+ Args:
78
+ reason: Why this is unused (e.g., "awaiting CLI parameter")
79
+ remove_in: Optional version when this might be removed if not integrated
80
+ since: Optional version when this became unused
81
+
82
+ Example:
83
+ >>> @unused("Not called by pipeline", remove_in="1.0.0", since="0.34.0")
84
+ ... def calculate_adaptive_threshold():
85
+ ... pass
86
+
87
+ Returns:
88
+ Original function with metadata attached (no runtime behavior change)
89
+ """
90
+
91
+ def decorator(func: F) -> F:
92
+ # Don't wrap - we don't want warnings when calling it
93
+ # Just attach metadata for documentation/cleanup tools
94
+ func.__unused__ = True # type: ignore[attr-defined]
95
+ func.__unused_reason__ = reason # type: ignore[attr-defined]
96
+ if remove_in:
97
+ func.__unused_remove_in__ = remove_in # type: ignore[attr-defined]
98
+ if since:
99
+ func.__unused_since__ = since # type: ignore[attr-defined]
100
+
101
+ return func
102
+
103
+ return decorator
@@ -3,6 +3,8 @@
3
3
  import numpy as np
4
4
  from scipy.signal import medfilt
5
5
 
6
+ from .experimental import unused
7
+
6
8
 
7
9
  def detect_outliers_ransac(
8
10
  positions: np.ndarray,
@@ -226,6 +228,11 @@ def reject_outliers(
226
228
  return cleaned_positions, outlier_mask
227
229
 
228
230
 
231
+ @unused(
232
+ reason="Not called by analysis pipeline - alternative adaptive smoothing approach",
233
+ remove_in="1.0.0",
234
+ since="0.34.0",
235
+ )
229
236
  def adaptive_smooth_window(
230
237
  positions: np.ndarray,
231
238
  base_window: int = 5,
@@ -419,3 +419,37 @@ def smooth_landmarks_advanced(
419
419
  return _smooth_landmarks_core(
420
420
  landmark_sequence, window_length, polyorder, advanced_smoother
421
421
  )
422
+
423
+
424
+ def interpolate_threshold_crossing(
425
+ vel_before: float,
426
+ vel_after: float,
427
+ velocity_threshold: float,
428
+ ) -> float:
429
+ """
430
+ Find fractional offset where velocity crosses threshold between two frames.
431
+
432
+ Uses linear interpolation assuming velocity changes linearly between frames.
433
+
434
+ Args:
435
+ vel_before: Velocity at frame boundary N (absolute value)
436
+ vel_after: Velocity at frame boundary N+1 (absolute value)
437
+ velocity_threshold: Threshold value
438
+
439
+ Returns:
440
+ Fractional offset from frame N (0.0 to 1.0)
441
+ """
442
+ # Handle edge cases
443
+ if abs(vel_after - vel_before) < 1e-9: # Velocity not changing
444
+ return 0.5
445
+
446
+ # Linear interpolation: at what fraction t does velocity equal threshold?
447
+ # vel(t) = vel_before + t * (vel_after - vel_before)
448
+ # Solve for t when vel(t) = threshold:
449
+ # threshold = vel_before + t * (vel_after - vel_before)
450
+ # t = (threshold - vel_before) / (vel_after - vel_before)
451
+
452
+ t = (velocity_threshold - vel_before) / (vel_after - vel_before)
453
+
454
+ # Clamp to [0, 1] range
455
+ return float(max(0.0, min(1.0, t)))
@@ -0,0 +1,198 @@
1
+ """Shared validation infrastructure for jump metrics.
2
+
3
+ Provides base classes and enums for validating Counter Movement Jump (CMJ)
4
+ and Drop Jump metrics against physiological bounds.
5
+
6
+ Contains:
7
+ - ValidationSeverity: Severity levels for issues (ERROR, WARNING, INFO)
8
+ - ValidationIssue: Single validation issue dataclass
9
+ - ValidationResult: Aggregated validation results
10
+ - AthleteProfile: Athlete performance categories
11
+ - MetricBounds: Physiological bounds for any metric
12
+ """
13
+
14
+ from abc import ABC, abstractmethod
15
+ from dataclasses import dataclass, field
16
+ from enum import Enum
17
+
18
+
19
+ class ValidationSeverity(Enum):
20
+ """Severity level for validation issues."""
21
+
22
+ ERROR = "ERROR" # Metrics invalid, likely data corruption
23
+ WARNING = "WARNING" # Metrics valid but unusual, needs review
24
+ INFO = "INFO" # Normal variation, informational only
25
+
26
+
27
+ @dataclass
28
+ class ValidationIssue:
29
+ """Single validation issue."""
30
+
31
+ severity: ValidationSeverity
32
+ metric: str
33
+ message: str
34
+ value: float | None = None
35
+ bounds: tuple[float, float] | None = None
36
+
37
+
38
+ class AthleteProfile(Enum):
39
+ """Athlete performance categories for metric bounds."""
40
+
41
+ ELDERLY = "elderly" # 70+, deconditioned
42
+ UNTRAINED = "untrained" # Sedentary, no training
43
+ RECREATIONAL = "recreational" # Fitness class, moderate activity
44
+ TRAINED = "trained" # Regular athlete, 3-5 years training
45
+ ELITE = "elite" # Competitive athlete, college/professional level
46
+
47
+
48
+ @dataclass
49
+ class MetricBounds:
50
+ """Physiological bounds for a single metric.
51
+
52
+ Attributes:
53
+ absolute_min: Absolute minimum value (error threshold)
54
+ practical_min: Practical minimum for weakest athletes
55
+ recreational_min: Minimum for recreational athletes
56
+ recreational_max: Maximum for recreational athletes
57
+ elite_min: Minimum for elite athletes
58
+ elite_max: Maximum for elite athletes
59
+ absolute_max: Absolute maximum value (error threshold)
60
+ unit: Unit of measurement (e.g., "m", "s", "m/s", "degrees")
61
+ """
62
+
63
+ absolute_min: float
64
+ practical_min: float
65
+ recreational_min: float
66
+ recreational_max: float
67
+ elite_min: float
68
+ elite_max: float
69
+ absolute_max: float
70
+ unit: str
71
+
72
+ def contains(self, value: float, profile: AthleteProfile) -> bool:
73
+ """Check if value is within bounds for athlete profile."""
74
+ if profile == AthleteProfile.ELDERLY:
75
+ return self.practical_min <= value <= self.recreational_max
76
+ elif profile == AthleteProfile.UNTRAINED:
77
+ return self.practical_min <= value <= self.recreational_max
78
+ elif profile == AthleteProfile.RECREATIONAL:
79
+ return self.recreational_min <= value <= self.recreational_max
80
+ elif profile == AthleteProfile.TRAINED:
81
+ # Trained athletes: midpoint between recreational and elite
82
+ trained_min = (self.recreational_min + self.elite_min) / 2
83
+ trained_max = (self.recreational_max + self.elite_max) / 2
84
+ return trained_min <= value <= trained_max
85
+ elif profile == AthleteProfile.ELITE:
86
+ return self.elite_min <= value <= self.elite_max
87
+ return False
88
+
89
+ def is_physically_possible(self, value: float) -> bool:
90
+ """Check if value is within absolute physiological limits."""
91
+ return self.absolute_min <= value <= self.absolute_max
92
+
93
+
94
+ @dataclass
95
+ class ValidationResult:
96
+ """Base validation result for jump metrics."""
97
+
98
+ issues: list[ValidationIssue] = field(default_factory=list)
99
+ status: str = "PASS" # "PASS", "PASS_WITH_WARNINGS", "FAIL"
100
+ athlete_profile: AthleteProfile | None = None
101
+
102
+ def add_error(
103
+ self,
104
+ metric: str,
105
+ message: str,
106
+ value: float | None = None,
107
+ bounds: tuple[float, float] | None = None,
108
+ ) -> None:
109
+ """Add error-level issue."""
110
+ self.issues.append(
111
+ ValidationIssue(
112
+ severity=ValidationSeverity.ERROR,
113
+ metric=metric,
114
+ message=message,
115
+ value=value,
116
+ bounds=bounds,
117
+ )
118
+ )
119
+
120
+ def add_warning(
121
+ self,
122
+ metric: str,
123
+ message: str,
124
+ value: float | None = None,
125
+ bounds: tuple[float, float] | None = None,
126
+ ) -> None:
127
+ """Add warning-level issue."""
128
+ self.issues.append(
129
+ ValidationIssue(
130
+ severity=ValidationSeverity.WARNING,
131
+ metric=metric,
132
+ message=message,
133
+ value=value,
134
+ bounds=bounds,
135
+ )
136
+ )
137
+
138
+ def add_info(
139
+ self,
140
+ metric: str,
141
+ message: str,
142
+ value: float | None = None,
143
+ ) -> None:
144
+ """Add info-level issue."""
145
+ self.issues.append(
146
+ ValidationIssue(
147
+ severity=ValidationSeverity.INFO,
148
+ metric=metric,
149
+ message=message,
150
+ value=value,
151
+ )
152
+ )
153
+
154
+ def finalize_status(self) -> None:
155
+ """Determine final pass/fail status based on issues."""
156
+ has_errors = any(
157
+ issue.severity == ValidationSeverity.ERROR for issue in self.issues
158
+ )
159
+ has_warnings = any(
160
+ issue.severity == ValidationSeverity.WARNING for issue in self.issues
161
+ )
162
+
163
+ if has_errors:
164
+ self.status = "FAIL"
165
+ elif has_warnings:
166
+ self.status = "PASS_WITH_WARNINGS"
167
+ else:
168
+ self.status = "PASS"
169
+
170
+ @abstractmethod
171
+ def to_dict(self) -> dict:
172
+ """Convert validation result to JSON-serializable dictionary."""
173
+ pass
174
+
175
+
176
+ class MetricsValidator(ABC):
177
+ """Base validator for jump metrics."""
178
+
179
+ def __init__(self, assumed_profile: AthleteProfile | None = None):
180
+ """Initialize validator.
181
+
182
+ Args:
183
+ assumed_profile: If provided, validate against this specific profile.
184
+ Otherwise, estimate from metrics.
185
+ """
186
+ self.assumed_profile = assumed_profile
187
+
188
+ @abstractmethod
189
+ def validate(self, metrics: dict) -> ValidationResult:
190
+ """Validate metrics comprehensively.
191
+
192
+ Args:
193
+ metrics: Dictionary with metric values
194
+
195
+ Returns:
196
+ ValidationResult with all issues and status
197
+ """
198
+ pass
@@ -1,12 +1,12 @@
1
1
  """Drop jump analysis module."""
2
2
 
3
+ from ..core.smoothing import interpolate_threshold_crossing
3
4
  from .analysis import (
4
5
  ContactState,
5
6
  calculate_adaptive_threshold,
6
7
  compute_average_foot_position,
7
8
  detect_ground_contact,
8
9
  find_interpolated_phase_transitions_with_curvature,
9
- interpolate_threshold_crossing,
10
10
  refine_transition_with_curvature,
11
11
  )
12
12
  from .debug_overlay import DebugOverlayRenderer
@@ -4,9 +4,11 @@ from enum import Enum
4
4
 
5
5
  import numpy as np
6
6
 
7
+ from ..core.experimental import unused
7
8
  from ..core.smoothing import (
8
9
  compute_acceleration_from_derivative,
9
10
  compute_velocity_from_derivative,
11
+ interpolate_threshold_crossing,
10
12
  )
11
13
 
12
14
 
@@ -18,6 +20,11 @@ class ContactState(Enum):
18
20
  UNKNOWN = "unknown"
19
21
 
20
22
 
23
+ @unused(
24
+ reason="Not called by analysis pipeline - awaiting CLI integration",
25
+ remove_in="1.0.0",
26
+ since="0.34.0",
27
+ )
21
28
  def calculate_adaptive_threshold(
22
29
  positions: np.ndarray,
23
30
  fps: float,
@@ -29,6 +36,18 @@ def calculate_adaptive_threshold(
29
36
  """
30
37
  Calculate adaptive velocity threshold based on baseline motion characteristics.
31
38
 
39
+ .. warning::
40
+ **Status: Implemented but Not Integrated**
41
+
42
+ This function is fully implemented and tested but not called by the
43
+ analysis pipeline. See ``docs/development/errors-findings.md`` for details.
44
+
45
+ **To integrate**: Add CLI parameter ``--use-adaptive-threshold`` and
46
+ call this function before contact detection.
47
+
48
+ **Roadmap**: Planned for Phase 2 if users report issues with varying
49
+ video conditions.
50
+
32
51
  Analyzes the first few seconds of video (assumed to be relatively stationary,
33
52
  e.g., athlete standing on box) to determine the noise floor, then sets threshold
34
53
  as a multiple of this baseline noise.
@@ -405,40 +424,6 @@ def find_contact_phases(
405
424
  return phases
406
425
 
407
426
 
408
- def interpolate_threshold_crossing(
409
- vel_before: float,
410
- vel_after: float,
411
- velocity_threshold: float,
412
- ) -> float:
413
- """
414
- Find fractional offset where velocity crosses threshold between two frames.
415
-
416
- Uses linear interpolation assuming velocity changes linearly between frames.
417
-
418
- Args:
419
- vel_before: Velocity at frame boundary N (absolute value)
420
- vel_after: Velocity at frame boundary N+1 (absolute value)
421
- velocity_threshold: Threshold value
422
-
423
- Returns:
424
- Fractional offset from frame N (0.0 to 1.0)
425
- """
426
- # Handle edge cases
427
- if abs(vel_after - vel_before) < 1e-9: # Velocity not changing
428
- return 0.5
429
-
430
- # Linear interpolation: at what fraction t does velocity equal threshold?
431
- # vel(t) = vel_before + t * (vel_after - vel_before)
432
- # Solve for t when vel(t) = threshold:
433
- # threshold = vel_before + t * (vel_after - vel_before)
434
- # t = (threshold - vel_before) / (vel_after - vel_before)
435
-
436
- t = (velocity_threshold - vel_before) / (vel_after - vel_before)
437
-
438
- # Clamp to [0, 1] range
439
- return float(max(0.0, min(1.0, t)))
440
-
441
-
442
427
  def _interpolate_phase_start(
443
428
  start_idx: int,
444
429
  state: ContactState,
@@ -831,6 +816,10 @@ def _calculate_average_visibility(
831
816
  return float(np.mean(foot_vis)) if foot_vis else 0.0
832
817
 
833
818
 
819
+ @unused(
820
+ reason="Alternative implementation not called by pipeline",
821
+ since="0.34.0",
822
+ )
834
823
  def extract_foot_positions_and_visibilities(
835
824
  smoothed_landmarks: list[dict[str, tuple[float, float, float]] | None],
836
825
  ) -> tuple[np.ndarray, np.ndarray]:
@@ -7,114 +7,26 @@ Provides severity levels (ERROR, WARNING, INFO) for different categories
7
7
  of metric issues.
8
8
  """
9
9
 
10
- from dataclasses import dataclass, field
11
- from enum import Enum
10
+ from dataclasses import dataclass
12
11
 
13
- from kinemotion.core.dropjump_validation_bounds import (
14
- AthleteProfile,
12
+ from kinemotion.core.validation import (
13
+ MetricsValidator,
14
+ ValidationResult,
15
+ )
16
+ from kinemotion.dropjump.validation_bounds import (
15
17
  DropJumpBounds,
16
18
  estimate_athlete_profile,
17
19
  )
18
20
 
19
21
 
20
- class ValidationSeverity(Enum):
21
- """Severity level for validation issues."""
22
-
23
- ERROR = "ERROR" # Metrics invalid, likely data corruption
24
- WARNING = "WARNING" # Metrics valid but unusual, needs review
25
- INFO = "INFO" # Normal variation, informational only
26
-
27
-
28
22
  @dataclass
29
- class ValidationIssue:
30
- """Single validation issue."""
23
+ class DropJumpValidationResult(ValidationResult):
24
+ """Drop jump-specific validation result."""
31
25
 
32
- severity: ValidationSeverity
33
- metric: str
34
- message: str
35
- value: float | None = None
36
- bounds: tuple[float, float] | None = None
37
-
38
-
39
- @dataclass
40
- class ValidationResult:
41
- """Complete validation result for drop jump metrics."""
42
-
43
- issues: list[ValidationIssue] = field(default_factory=list)
44
- status: str = "PASS" # "PASS", "PASS_WITH_WARNINGS", "FAIL"
45
- athlete_profile: AthleteProfile | None = None
46
26
  rsi: float | None = None
47
27
  contact_flight_ratio: float | None = None
48
28
  height_kinematic_trajectory_consistency: float | None = None # % error
49
29
 
50
- def add_error(
51
- self,
52
- metric: str,
53
- message: str,
54
- value: float | None = None,
55
- bounds: tuple[float, float] | None = None,
56
- ) -> None:
57
- """Add error-level issue."""
58
- self.issues.append(
59
- ValidationIssue(
60
- severity=ValidationSeverity.ERROR,
61
- metric=metric,
62
- message=message,
63
- value=value,
64
- bounds=bounds,
65
- )
66
- )
67
-
68
- def add_warning(
69
- self,
70
- metric: str,
71
- message: str,
72
- value: float | None = None,
73
- bounds: tuple[float, float] | None = None,
74
- ) -> None:
75
- """Add warning-level issue."""
76
- self.issues.append(
77
- ValidationIssue(
78
- severity=ValidationSeverity.WARNING,
79
- metric=metric,
80
- message=message,
81
- value=value,
82
- bounds=bounds,
83
- )
84
- )
85
-
86
- def add_info(
87
- self,
88
- metric: str,
89
- message: str,
90
- value: float | None = None,
91
- ) -> None:
92
- """Add info-level issue."""
93
- self.issues.append(
94
- ValidationIssue(
95
- severity=ValidationSeverity.INFO,
96
- metric=metric,
97
- message=message,
98
- value=value,
99
- )
100
- )
101
-
102
- def finalize_status(self) -> None:
103
- """Determine final pass/fail status based on issues."""
104
- has_errors = any(
105
- issue.severity == ValidationSeverity.ERROR for issue in self.issues
106
- )
107
- has_warnings = any(
108
- issue.severity == ValidationSeverity.WARNING for issue in self.issues
109
- )
110
-
111
- if has_errors:
112
- self.status = "FAIL"
113
- elif has_warnings:
114
- self.status = "PASS_WITH_WARNINGS"
115
- else:
116
- self.status = "PASS"
117
-
118
30
  def to_dict(self) -> dict:
119
31
  """Convert validation result to JSON-serializable dictionary.
120
32
 
@@ -144,28 +56,19 @@ class ValidationResult:
144
56
  }
145
57
 
146
58
 
147
- class DropJumpMetricsValidator:
59
+ class DropJumpMetricsValidator(MetricsValidator):
148
60
  """Comprehensive drop jump metrics validator."""
149
61
 
150
- def __init__(self, assumed_profile: AthleteProfile | None = None):
151
- """Initialize validator.
152
-
153
- Args:
154
- assumed_profile: If provided, validate against this specific profile.
155
- Otherwise, estimate from metrics.
156
- """
157
- self.assumed_profile = assumed_profile
158
-
159
- def validate(self, metrics: dict) -> ValidationResult:
62
+ def validate(self, metrics: dict) -> DropJumpValidationResult:
160
63
  """Validate drop jump metrics comprehensively.
161
64
 
162
65
  Args:
163
66
  metrics: Dictionary with drop jump metric values
164
67
 
165
68
  Returns:
166
- ValidationResult with all issues and status
69
+ DropJumpValidationResult with all issues and status
167
70
  """
168
- result = ValidationResult()
71
+ result = DropJumpValidationResult()
169
72
 
170
73
  # Estimate athlete profile if not provided
171
74
  if self.assumed_profile:
@@ -208,7 +111,7 @@ class DropJumpMetricsValidator:
208
111
  return result
209
112
 
210
113
  def _check_contact_time(
211
- self, contact_time_ms: float, result: ValidationResult
114
+ self, contact_time_ms: float, result: DropJumpValidationResult
212
115
  ) -> None:
213
116
  """Validate contact time."""
214
117
  contact_time_s = contact_time_ms / 1000.0
@@ -233,7 +136,7 @@ class DropJumpMetricsValidator:
233
136
  )
234
137
 
235
138
  def _check_flight_time(
236
- self, flight_time_ms: float, result: ValidationResult
139
+ self, flight_time_ms: float, result: DropJumpValidationResult
237
140
  ) -> None:
238
141
  """Validate flight time."""
239
142
  flight_time_s = flight_time_ms / 1000.0
@@ -257,7 +160,7 @@ class DropJumpMetricsValidator:
257
160
  )
258
161
 
259
162
  def _check_jump_height(
260
- self, jump_height_m: float, result: ValidationResult
163
+ self, jump_height_m: float, result: DropJumpValidationResult
261
164
  ) -> None:
262
165
  """Validate jump height."""
263
166
  bounds = DropJumpBounds.JUMP_HEIGHT
@@ -280,7 +183,10 @@ class DropJumpMetricsValidator:
280
183
  )
281
184
 
282
185
  def _check_rsi(
283
- self, contact_time_ms: float, flight_time_ms: float, result: ValidationResult
186
+ self,
187
+ contact_time_ms: float,
188
+ flight_time_ms: float,
189
+ result: DropJumpValidationResult,
284
190
  ) -> None:
285
191
  """Validate RSI and cross-check consistency."""
286
192
  contact_time_s = contact_time_ms / 1000.0
@@ -313,7 +219,7 @@ class DropJumpMetricsValidator:
313
219
  self,
314
220
  jump_height_kinematic_m: float,
315
221
  jump_height_trajectory_m: float,
316
- result: ValidationResult,
222
+ result: DropJumpValidationResult,
317
223
  ) -> None:
318
224
  """Validate consistency between kinematic and trajectory-based heights.
319
225