kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (48) hide show
  1. kinemotion/__init__.py +31 -6
  2. kinemotion/api.py +39 -598
  3. kinemotion/cli.py +2 -0
  4. kinemotion/cmj/__init__.py +5 -0
  5. kinemotion/cmj/analysis.py +621 -0
  6. kinemotion/cmj/api.py +563 -0
  7. kinemotion/cmj/cli.py +324 -0
  8. kinemotion/cmj/debug_overlay.py +457 -0
  9. kinemotion/cmj/joint_angles.py +307 -0
  10. kinemotion/cmj/kinematics.py +360 -0
  11. kinemotion/cmj/metrics_validator.py +767 -0
  12. kinemotion/cmj/validation_bounds.py +341 -0
  13. kinemotion/core/__init__.py +28 -0
  14. kinemotion/core/auto_tuning.py +71 -37
  15. kinemotion/core/cli_utils.py +60 -0
  16. kinemotion/core/debug_overlay_utils.py +385 -0
  17. kinemotion/core/determinism.py +83 -0
  18. kinemotion/core/experimental.py +103 -0
  19. kinemotion/core/filtering.py +9 -6
  20. kinemotion/core/formatting.py +75 -0
  21. kinemotion/core/metadata.py +231 -0
  22. kinemotion/core/model_downloader.py +172 -0
  23. kinemotion/core/pipeline_utils.py +433 -0
  24. kinemotion/core/pose.py +298 -141
  25. kinemotion/core/pose_landmarks.py +67 -0
  26. kinemotion/core/quality.py +393 -0
  27. kinemotion/core/smoothing.py +250 -154
  28. kinemotion/core/timing.py +247 -0
  29. kinemotion/core/types.py +42 -0
  30. kinemotion/core/validation.py +201 -0
  31. kinemotion/core/video_io.py +135 -50
  32. kinemotion/dropjump/__init__.py +1 -1
  33. kinemotion/dropjump/analysis.py +367 -182
  34. kinemotion/dropjump/api.py +665 -0
  35. kinemotion/dropjump/cli.py +156 -466
  36. kinemotion/dropjump/debug_overlay.py +136 -206
  37. kinemotion/dropjump/kinematics.py +232 -255
  38. kinemotion/dropjump/metrics_validator.py +240 -0
  39. kinemotion/dropjump/validation_bounds.py +157 -0
  40. kinemotion/models/__init__.py +0 -0
  41. kinemotion/models/pose_landmarker_lite.task +0 -0
  42. kinemotion-0.67.0.dist-info/METADATA +726 -0
  43. kinemotion-0.67.0.dist-info/RECORD +47 -0
  44. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
  45. kinemotion-0.10.6.dist-info/METADATA +0 -561
  46. kinemotion-0.10.6.dist-info/RECORD +0 -20
  47. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
  48. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,247 @@
1
+ """Timing utilities for performance profiling.
2
+
3
+ This module implements a hybrid instrumentation pattern combining:
4
+ 1. Protocol-based type safety (structural subtyping)
5
+ 2. Null Object Pattern (zero overhead when disabled)
6
+ 3. High-precision timing (time.perf_counter)
7
+ 4. Memory optimization (__slots__)
8
+ 5. Accumulation support (for loops and repeated measurements)
9
+
10
+ Performance Characteristics:
11
+ - PerformanceTimer overhead: ~200ns per measurement
12
+ - NullTimer overhead: ~20ns per measurement
13
+ - Memory: 32 bytes per timer instance
14
+ - Precision: ~1 microsecond (perf_counter)
15
+
16
+ Example:
17
+ # Active timing
18
+ timer = PerformanceTimer()
19
+ with timer.measure("video_processing"):
20
+ process_video(frames)
21
+ metrics = timer.get_metrics()
22
+
23
+ # Zero-overhead timing (disabled)
24
+ tracker = PoseTracker(timer=NULL_TIMER)
25
+ # No timing overhead, but maintains API compatibility
26
+ """
27
+
28
+ import time
29
+ from contextlib import AbstractContextManager
30
+ from typing import Protocol, runtime_checkable
31
+
32
+
33
+ @runtime_checkable
34
+ class Timer(Protocol):
35
+ """Protocol for timer implementations.
36
+
37
+ Enables type-safe substitution of PerformanceTimer with NullTimer.
38
+ Uses structural subtyping - any class implementing these methods
39
+ conforms to the protocol.
40
+ """
41
+
42
+ def measure(self, name: str) -> AbstractContextManager[None]:
43
+ """Context manager to measure execution time of a block.
44
+
45
+ Args:
46
+ name: Name of the step being measured (e.g., "pose_tracking")
47
+
48
+ Returns:
49
+ Context manager that measures execution time
50
+ """
51
+ ...
52
+
53
+ def get_metrics(self) -> dict[str, float]:
54
+ """Retrieve all collected timing metrics.
55
+
56
+ Returns:
57
+ Dictionary mapping operation names to durations in seconds
58
+ """
59
+ ...
60
+
61
+
62
+ class _NullContext(AbstractContextManager[None]):
63
+ """Singleton null context manager with zero overhead.
64
+
65
+ Implements the context manager protocol but performs no operations.
66
+ Optimized away by the Python interpreter for minimal overhead.
67
+ """
68
+
69
+ __slots__ = ()
70
+
71
+ def __enter__(self) -> None:
72
+ """No-op entry - returns immediately."""
73
+ return None
74
+
75
+ def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> bool:
76
+ """No-op exit - returns immediately.
77
+
78
+ Args:
79
+ exc_type: Exception type (ignored)
80
+ exc_val: Exception value (ignored)
81
+ exc_tb: Exception traceback (ignored)
82
+
83
+ Returns:
84
+ False (does not suppress exceptions)
85
+ """
86
+ return False
87
+
88
+
89
+ class NullTimer:
90
+ """No-op timer implementing the Null Object Pattern.
91
+
92
+ Provides zero-overhead instrumentation when profiling is disabled.
93
+ All methods are no-ops that optimize away at runtime.
94
+
95
+ Performance: ~20-30 nanoseconds overhead per measure() call.
96
+ This is negligible compared to any actual work being measured.
97
+
98
+ Use Cases:
99
+ - Production deployments (profiling disabled)
100
+ - Performance-critical paths
101
+ - Testing without timing dependencies
102
+
103
+ Example:
104
+ # Use global singleton for zero allocation overhead
105
+ tracker = PoseTracker(timer=NULL_TIMER)
106
+
107
+ # No overhead - measure() call optimizes to nothing
108
+ with tracker.timer.measure("operation"):
109
+ do_work()
110
+ """
111
+
112
+ __slots__ = ()
113
+
114
+ def measure(self, name: str) -> AbstractContextManager[None]:
115
+ """Return a no-op context manager.
116
+
117
+ This method does nothing and is optimized away by the Python interpreter.
118
+ The context manager protocol (__enter__/__exit__) has minimal overhead.
119
+
120
+ Args:
121
+ name: Operation name (unused in no-op implementation)
122
+
123
+ Returns:
124
+ Singleton null context manager
125
+ """
126
+ del name # Intentionally unused - satisfies Timer protocol
127
+ return _NULL_CONTEXT
128
+
129
+ def get_metrics(self) -> dict[str, float]:
130
+ """Return empty metrics dictionary.
131
+
132
+ Returns:
133
+ Empty dictionary (no metrics collected)
134
+ """
135
+ return {}
136
+
137
+
138
+ # Singleton instances for global reuse
139
+ # Use these instead of creating new instances to avoid allocation overhead
140
+ _NULL_CONTEXT = _NullContext()
141
+ NULL_TIMER: Timer = NullTimer()
142
+
143
+
144
+ class _MeasureContext(AbstractContextManager[None]):
145
+ """Optimized context manager for active timing.
146
+
147
+ Uses __slots__ for memory efficiency and perf_counter for precision.
148
+ Accumulates durations for repeated measurements of the same operation.
149
+ """
150
+
151
+ __slots__ = ("_metrics", "_name", "_start")
152
+
153
+ def __init__(self, metrics: dict[str, float], name: str) -> None:
154
+ """Initialize measurement context.
155
+
156
+ Args:
157
+ metrics: Dictionary to store timing results
158
+ name: Name of the operation being measured
159
+ """
160
+ self._metrics = metrics
161
+ self._name = name
162
+ self._start = 0.0
163
+
164
+ def __enter__(self) -> None:
165
+ """Start timing measurement using high-precision counter."""
166
+ self._start = time.perf_counter()
167
+ return None
168
+
169
+ def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> bool:
170
+ """Complete timing measurement and accumulate duration.
171
+
172
+ Accumulates duration if the same operation is measured multiple times.
173
+ This is useful for measuring operations in loops.
174
+
175
+ Args:
176
+ exc_type: Exception type (if any)
177
+ exc_val: Exception value (if any)
178
+ exc_tb: Exception traceback (if any)
179
+
180
+ Returns:
181
+ False (does not suppress exceptions)
182
+ """
183
+ duration = time.perf_counter() - self._start
184
+ # Accumulate for repeated measurements (e.g., in loops)
185
+ self._metrics[self._name] = self._metrics.get(self._name, 0.0) + duration
186
+ return False
187
+
188
+
189
+ class PerformanceTimer:
190
+ """High-precision timer for tracking execution duration of named steps.
191
+
192
+ Uses time.perf_counter() for high-resolution monotonic timing.
193
+ Suitable for development, profiling, and performance analysis.
194
+
195
+ Accumulates timing data for repeated measurements of the same operation,
196
+ making it suitable for measuring operations in loops.
197
+
198
+ Precision: ~1 microsecond on most platforms
199
+ Overhead: ~200 nanoseconds per measurement
200
+
201
+ Example:
202
+ timer = PerformanceTimer()
203
+
204
+ # Measure single operation
205
+ with timer.measure("video_initialization"):
206
+ initialize_video(path)
207
+
208
+ # Measure in loop (accumulates)
209
+ for frame in frames:
210
+ with timer.measure("pose_tracking"):
211
+ track_pose(frame)
212
+
213
+ metrics = timer.get_metrics()
214
+ print(f"Total pose tracking: {metrics['pose_tracking']:.3f}s")
215
+ """
216
+
217
+ __slots__ = ("metrics",)
218
+
219
+ def __init__(self) -> None:
220
+ """Initialize timer with empty metrics dictionary."""
221
+ self.metrics: dict[str, float] = {}
222
+
223
+ def measure(self, name: str) -> AbstractContextManager[None]:
224
+ """Context manager to measure execution time of a block.
225
+
226
+ Uses perf_counter() for high-resolution monotonic timing.
227
+ More precise and reliable than time.time() for performance measurement.
228
+
229
+ Args:
230
+ name: Name of the step being measured (e.g., "pose_tracking")
231
+
232
+ Returns:
233
+ Context manager that measures execution time
234
+
235
+ Note:
236
+ perf_counter() is monotonic - not affected by system clock adjustments.
237
+ Repeated measurements of the same operation name will accumulate.
238
+ """
239
+ return _MeasureContext(self.metrics, name)
240
+
241
+ def get_metrics(self) -> dict[str, float]:
242
+ """Get collected timing metrics in seconds.
243
+
244
+ Returns:
245
+ A copy of the metrics dictionary to prevent external modification.
246
+ """
247
+ return self.metrics.copy()
@@ -0,0 +1,42 @@
1
+ """Central type definitions for the kinemotion package.
2
+
3
+ This module provides all type aliases used throughout the codebase to ensure
4
+ consistent typing and better IDE support.
5
+ """
6
+
7
+ from typing import Any, TypeAlias
8
+
9
+ import numpy as np
10
+ from numpy.typing import NDArray
11
+
12
+ # NumPy array types for various use cases
13
+ FloatArray: TypeAlias = NDArray[np.floating[Any]]
14
+ Float64Array: TypeAlias = NDArray[np.float64]
15
+ IntArray: TypeAlias = NDArray[np.integer[Any]]
16
+ UInt8Array: TypeAlias = NDArray[np.uint8]
17
+ BoolArray: TypeAlias = NDArray[np.bool_]
18
+
19
+ # MediaPipe landmark types
20
+ # Using dict-based representation since MediaPipe lacks proper type stubs
21
+ LandmarkCoord: TypeAlias = tuple[float, float, float] # (x, y, visibility)
22
+ LandmarkFrame: TypeAlias = dict[str, LandmarkCoord] | None
23
+ LandmarkSequence: TypeAlias = list[LandmarkFrame]
24
+
25
+ # Metrics dictionary type
26
+ # Uses Any because metrics can contain:
27
+ # - Simple values: float, int, str
28
+ # - Nested dicts: e.g. "triple_extension" contains angle data
29
+ # - Wrapper structures: e.g. {"data": {...actual metrics...}}
30
+ MetricsDict: TypeAlias = dict[str, Any]
31
+
32
+ __all__ = [
33
+ "FloatArray",
34
+ "Float64Array",
35
+ "IntArray",
36
+ "UInt8Array",
37
+ "BoolArray",
38
+ "LandmarkCoord",
39
+ "LandmarkFrame",
40
+ "LandmarkSequence",
41
+ "MetricsDict",
42
+ ]
@@ -0,0 +1,201 @@
1
+ """Shared validation infrastructure for jump metrics.
2
+
3
+ Provides base classes and enums for validating Counter Movement Jump (CMJ)
4
+ and Drop Jump metrics against physiological bounds.
5
+
6
+ Contains:
7
+ - ValidationSeverity: Severity levels for issues (ERROR, WARNING, INFO)
8
+ - ValidationIssue: Single validation issue dataclass
9
+ - ValidationResult: Aggregated validation results
10
+ - AthleteProfile: Athlete performance categories
11
+ - MetricBounds: Physiological bounds for any metric
12
+ """
13
+
14
+ from abc import ABC, abstractmethod
15
+ from dataclasses import dataclass, field
16
+ from enum import Enum
17
+
18
+
19
+ class ValidationSeverity(Enum):
20
+ """Severity level for validation issues."""
21
+
22
+ ERROR = "ERROR" # Metrics invalid, likely data corruption
23
+ WARNING = "WARNING" # Metrics valid but unusual, needs review
24
+ INFO = "INFO" # Normal variation, informational only
25
+
26
+
27
+ @dataclass
28
+ class ValidationIssue:
29
+ """Single validation issue."""
30
+
31
+ severity: ValidationSeverity
32
+ metric: str
33
+ message: str
34
+ value: float | None = None
35
+ bounds: tuple[float, float] | None = None
36
+
37
+
38
+ class AthleteProfile(Enum):
39
+ """Athlete performance categories for metric bounds."""
40
+
41
+ ELDERLY = "elderly" # 70+, deconditioned
42
+ UNTRAINED = "untrained" # Sedentary, no training
43
+ RECREATIONAL = "recreational" # Fitness class, moderate activity
44
+ TRAINED = "trained" # Regular athlete, 3-5 years training
45
+ ELITE = "elite" # Competitive athlete, college/professional level
46
+
47
+
48
+ @dataclass
49
+ class MetricBounds:
50
+ """Physiological bounds for a single metric across athlete performance levels.
51
+
52
+ Defines nested ranges for validating metrics: absolute limits mark impossible
53
+ values (likely data corruption), while performance-level ranges assess whether
54
+ results are typical for an athlete's training background.
55
+
56
+ Bounds are ordered: absolute_min < practical_min < recreational_min < elite_min
57
+ and elite_max < recreational_max < absolute_max (symmetric about typical values).
58
+
59
+ Attributes:
60
+ absolute_min: Absolute minimum (error threshold, marks data corruption)
61
+ practical_min: Minimum for untrained/elderly athletes
62
+ recreational_min: Minimum for recreational athletes (moderate activity)
63
+ recreational_max: Maximum for recreational athletes
64
+ elite_min: Minimum for elite athletes (competitive level)
65
+ elite_max: Maximum for elite athletes
66
+ absolute_max: Absolute maximum (error threshold, marks data corruption)
67
+ unit: Unit of measurement (e.g., "m", "s", "m/s", "degrees")
68
+ """
69
+
70
+ absolute_min: float
71
+ practical_min: float
72
+ recreational_min: float
73
+ recreational_max: float
74
+ elite_min: float
75
+ elite_max: float
76
+ absolute_max: float
77
+ unit: str
78
+
79
+ def contains(self, value: float, profile: AthleteProfile) -> bool:
80
+ """Check if value is within bounds for athlete profile."""
81
+ if profile == AthleteProfile.ELDERLY:
82
+ return self.practical_min <= value <= self.recreational_max
83
+ elif profile == AthleteProfile.UNTRAINED:
84
+ return self.practical_min <= value <= self.recreational_max
85
+ elif profile == AthleteProfile.RECREATIONAL:
86
+ return self.recreational_min <= value <= self.recreational_max
87
+ elif profile == AthleteProfile.TRAINED:
88
+ # Trained athletes: midpoint between recreational and elite
89
+ trained_min = (self.recreational_min + self.elite_min) / 2
90
+ trained_max = (self.recreational_max + self.elite_max) / 2
91
+ return trained_min <= value <= trained_max
92
+ elif profile == AthleteProfile.ELITE:
93
+ return self.elite_min <= value <= self.elite_max
94
+ return False
95
+
96
+ def is_physically_possible(self, value: float) -> bool:
97
+ """Check if value is within absolute physiological limits."""
98
+ return self.absolute_min <= value <= self.absolute_max
99
+
100
+
101
+ @dataclass
102
+ class ValidationResult:
103
+ """Base validation result for jump metrics."""
104
+
105
+ issues: list[ValidationIssue] = field(default_factory=list)
106
+ status: str = "PASS" # "PASS", "PASS_WITH_WARNINGS", "FAIL"
107
+ athlete_profile: AthleteProfile | None = None
108
+
109
+ def add_error(
110
+ self,
111
+ metric: str,
112
+ message: str,
113
+ value: float | None = None,
114
+ bounds: tuple[float, float] | None = None,
115
+ ) -> None:
116
+ """Add error-level issue."""
117
+ self.issues.append(
118
+ ValidationIssue(
119
+ severity=ValidationSeverity.ERROR,
120
+ metric=metric,
121
+ message=message,
122
+ value=value,
123
+ bounds=bounds,
124
+ )
125
+ )
126
+
127
+ def add_warning(
128
+ self,
129
+ metric: str,
130
+ message: str,
131
+ value: float | None = None,
132
+ bounds: tuple[float, float] | None = None,
133
+ ) -> None:
134
+ """Add warning-level issue."""
135
+ self.issues.append(
136
+ ValidationIssue(
137
+ severity=ValidationSeverity.WARNING,
138
+ metric=metric,
139
+ message=message,
140
+ value=value,
141
+ bounds=bounds,
142
+ )
143
+ )
144
+
145
+ def add_info(
146
+ self,
147
+ metric: str,
148
+ message: str,
149
+ value: float | None = None,
150
+ ) -> None:
151
+ """Add info-level issue."""
152
+ self.issues.append(
153
+ ValidationIssue(
154
+ severity=ValidationSeverity.INFO,
155
+ metric=metric,
156
+ message=message,
157
+ value=value,
158
+ )
159
+ )
160
+
161
+ def finalize_status(self) -> None:
162
+ """Determine final pass/fail status based on issues."""
163
+ has_errors = any(issue.severity == ValidationSeverity.ERROR for issue in self.issues)
164
+ has_warnings = any(issue.severity == ValidationSeverity.WARNING for issue in self.issues)
165
+
166
+ if has_errors:
167
+ self.status = "FAIL"
168
+ elif has_warnings:
169
+ self.status = "PASS_WITH_WARNINGS"
170
+ else:
171
+ self.status = "PASS"
172
+
173
+ @abstractmethod
174
+ def to_dict(self) -> dict:
175
+ """Convert validation result to JSON-serializable dictionary."""
176
+ pass
177
+
178
+
179
+ class MetricsValidator(ABC):
180
+ """Base validator for jump metrics."""
181
+
182
+ def __init__(self, assumed_profile: AthleteProfile | None = None):
183
+ """Initialize validator.
184
+
185
+ Args:
186
+ assumed_profile: If provided, validate against this specific profile.
187
+ Otherwise, estimate from metrics.
188
+ """
189
+ self.assumed_profile = assumed_profile
190
+
191
+ @abstractmethod
192
+ def validate(self, metrics: dict) -> ValidationResult:
193
+ """Validate metrics comprehensively.
194
+
195
+ Args:
196
+ metrics: Dictionary with metric values
197
+
198
+ Returns:
199
+ ValidationResult with all issues and status
200
+ """
201
+ pass