kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +31 -6
- kinemotion/api.py +39 -598
- kinemotion/cli.py +2 -0
- kinemotion/cmj/__init__.py +5 -0
- kinemotion/cmj/analysis.py +621 -0
- kinemotion/cmj/api.py +563 -0
- kinemotion/cmj/cli.py +324 -0
- kinemotion/cmj/debug_overlay.py +457 -0
- kinemotion/cmj/joint_angles.py +307 -0
- kinemotion/cmj/kinematics.py +360 -0
- kinemotion/cmj/metrics_validator.py +767 -0
- kinemotion/cmj/validation_bounds.py +341 -0
- kinemotion/core/__init__.py +28 -0
- kinemotion/core/auto_tuning.py +71 -37
- kinemotion/core/cli_utils.py +60 -0
- kinemotion/core/debug_overlay_utils.py +385 -0
- kinemotion/core/determinism.py +83 -0
- kinemotion/core/experimental.py +103 -0
- kinemotion/core/filtering.py +9 -6
- kinemotion/core/formatting.py +75 -0
- kinemotion/core/metadata.py +231 -0
- kinemotion/core/model_downloader.py +172 -0
- kinemotion/core/pipeline_utils.py +433 -0
- kinemotion/core/pose.py +298 -141
- kinemotion/core/pose_landmarks.py +67 -0
- kinemotion/core/quality.py +393 -0
- kinemotion/core/smoothing.py +250 -154
- kinemotion/core/timing.py +247 -0
- kinemotion/core/types.py +42 -0
- kinemotion/core/validation.py +201 -0
- kinemotion/core/video_io.py +135 -50
- kinemotion/dropjump/__init__.py +1 -1
- kinemotion/dropjump/analysis.py +367 -182
- kinemotion/dropjump/api.py +665 -0
- kinemotion/dropjump/cli.py +156 -466
- kinemotion/dropjump/debug_overlay.py +136 -206
- kinemotion/dropjump/kinematics.py +232 -255
- kinemotion/dropjump/metrics_validator.py +240 -0
- kinemotion/dropjump/validation_bounds.py +157 -0
- kinemotion/models/__init__.py +0 -0
- kinemotion/models/pose_landmarker_lite.task +0 -0
- kinemotion-0.67.0.dist-info/METADATA +726 -0
- kinemotion-0.67.0.dist-info/RECORD +47 -0
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
- kinemotion-0.10.6.dist-info/METADATA +0 -561
- kinemotion-0.10.6.dist-info/RECORD +0 -20
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""Metadata structures for analysis results."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
|
|
6
|
+
from .quality import QualityAssessment
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class VideoInfo:
|
|
11
|
+
"""Information about the source video.
|
|
12
|
+
|
|
13
|
+
Attributes:
|
|
14
|
+
source_path: Path to the source video file
|
|
15
|
+
fps: Actual frames per second (measured from video)
|
|
16
|
+
width: Video width in pixels
|
|
17
|
+
height: Video height in pixels
|
|
18
|
+
duration_s: Total video duration in seconds
|
|
19
|
+
frame_count: Total number of frames
|
|
20
|
+
codec: Video codec (e.g., "h264", "hevc") or None if unknown
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
source_path: str
|
|
24
|
+
fps: float
|
|
25
|
+
width: int
|
|
26
|
+
height: int
|
|
27
|
+
duration_s: float
|
|
28
|
+
frame_count: int
|
|
29
|
+
codec: str | None = None
|
|
30
|
+
|
|
31
|
+
def to_dict(self) -> dict:
|
|
32
|
+
"""Convert to JSON-serializable dictionary."""
|
|
33
|
+
return {
|
|
34
|
+
"source_path": self.source_path,
|
|
35
|
+
"fps": round(self.fps, 2),
|
|
36
|
+
"resolution": {"width": self.width, "height": self.height},
|
|
37
|
+
"duration_s": round(self.duration_s, 2),
|
|
38
|
+
"frame_count": self.frame_count,
|
|
39
|
+
"codec": self.codec,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class ProcessingInfo:
|
|
45
|
+
"""Information about processing context.
|
|
46
|
+
|
|
47
|
+
Attributes:
|
|
48
|
+
version: Kinemotion version string (e.g., "0.26.0")
|
|
49
|
+
timestamp: ISO 8601 timestamp of when analysis was performed
|
|
50
|
+
quality_preset: Quality preset used ("fast", "balanced", "accurate")
|
|
51
|
+
processing_time_s: Time taken to process video in seconds
|
|
52
|
+
timing_breakdown: Optional dict mapping stage names to duration in seconds
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
version: str
|
|
56
|
+
timestamp: str
|
|
57
|
+
quality_preset: str
|
|
58
|
+
processing_time_s: float
|
|
59
|
+
timing_breakdown: dict[str, float] | None = None
|
|
60
|
+
|
|
61
|
+
def to_dict(self) -> dict:
|
|
62
|
+
"""Convert to JSON-serializable dictionary."""
|
|
63
|
+
result: dict = {
|
|
64
|
+
"version": self.version,
|
|
65
|
+
"timestamp": self.timestamp,
|
|
66
|
+
"quality_preset": self.quality_preset,
|
|
67
|
+
"processing_time_s": round(self.processing_time_s, 3),
|
|
68
|
+
}
|
|
69
|
+
if self.timing_breakdown:
|
|
70
|
+
result["timing_breakdown_ms"] = {
|
|
71
|
+
stage: round(duration * 1000, 1)
|
|
72
|
+
for stage, duration in self.timing_breakdown.items()
|
|
73
|
+
}
|
|
74
|
+
return result
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class SmoothingConfig:
|
|
79
|
+
"""Smoothing algorithm configuration.
|
|
80
|
+
|
|
81
|
+
Attributes:
|
|
82
|
+
window_size: Savitzky-Golay window size
|
|
83
|
+
polynomial_order: Polynomial degree for SG filter
|
|
84
|
+
use_bilateral_filter: Whether bilateral temporal filtering was used
|
|
85
|
+
use_outlier_rejection: Whether RANSAC/median outlier rejection was used
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
window_size: int
|
|
89
|
+
polynomial_order: int
|
|
90
|
+
use_bilateral_filter: bool
|
|
91
|
+
use_outlier_rejection: bool
|
|
92
|
+
|
|
93
|
+
def to_dict(self) -> dict:
|
|
94
|
+
"""Convert to JSON-serializable dictionary."""
|
|
95
|
+
return {
|
|
96
|
+
"window_size": self.window_size,
|
|
97
|
+
"polynomial_order": self.polynomial_order,
|
|
98
|
+
"use_bilateral_filter": self.use_bilateral_filter,
|
|
99
|
+
"use_outlier_rejection": self.use_outlier_rejection,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@dataclass
|
|
104
|
+
class DetectionConfig:
|
|
105
|
+
"""Detection algorithm configuration.
|
|
106
|
+
|
|
107
|
+
Attributes:
|
|
108
|
+
velocity_threshold: Velocity threshold for contact/flight detection
|
|
109
|
+
min_contact_frames: Minimum consecutive frames to confirm contact
|
|
110
|
+
visibility_threshold: Minimum landmark visibility to trust
|
|
111
|
+
use_curvature_refinement: Whether acceleration-based refinement was used
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
velocity_threshold: float
|
|
115
|
+
min_contact_frames: int
|
|
116
|
+
visibility_threshold: float
|
|
117
|
+
use_curvature_refinement: bool
|
|
118
|
+
|
|
119
|
+
def to_dict(self) -> dict:
|
|
120
|
+
"""Convert to JSON-serializable dictionary."""
|
|
121
|
+
return {
|
|
122
|
+
"velocity_threshold": round(self.velocity_threshold, 4),
|
|
123
|
+
"min_contact_frames": self.min_contact_frames,
|
|
124
|
+
"visibility_threshold": round(self.visibility_threshold, 2),
|
|
125
|
+
"use_curvature_refinement": self.use_curvature_refinement,
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
@dataclass
|
|
130
|
+
class DropDetectionConfig:
|
|
131
|
+
"""Drop jump-specific detection configuration.
|
|
132
|
+
|
|
133
|
+
Attributes:
|
|
134
|
+
auto_detect_drop_start: Whether automatic drop start detection was used
|
|
135
|
+
detected_drop_frame: Frame where drop was detected (None if manual)
|
|
136
|
+
min_stationary_duration_s: Minimum standing time before drop
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
auto_detect_drop_start: bool
|
|
140
|
+
detected_drop_frame: int | None
|
|
141
|
+
min_stationary_duration_s: float
|
|
142
|
+
|
|
143
|
+
def to_dict(self) -> dict:
|
|
144
|
+
"""Convert to JSON-serializable dictionary."""
|
|
145
|
+
return {
|
|
146
|
+
"auto_detect_drop_start": self.auto_detect_drop_start,
|
|
147
|
+
"detected_drop_frame": self.detected_drop_frame,
|
|
148
|
+
"min_stationary_duration_s": round(self.min_stationary_duration_s, 2),
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@dataclass
|
|
153
|
+
class AlgorithmConfig:
|
|
154
|
+
"""Complete algorithm configuration for reproducibility.
|
|
155
|
+
|
|
156
|
+
Attributes:
|
|
157
|
+
detection_method: Algorithm used ("backward_search" for CMJ,
|
|
158
|
+
"forward_search" for drop)
|
|
159
|
+
tracking_method: Pose tracking method ("mediapipe_pose")
|
|
160
|
+
model_complexity: MediaPipe model complexity (0, 1, or 2)
|
|
161
|
+
smoothing: Smoothing configuration
|
|
162
|
+
detection: Detection configuration
|
|
163
|
+
drop_detection: Drop detection config (drop jump only, None for CMJ)
|
|
164
|
+
"""
|
|
165
|
+
|
|
166
|
+
detection_method: str
|
|
167
|
+
tracking_method: str
|
|
168
|
+
model_complexity: int
|
|
169
|
+
smoothing: SmoothingConfig
|
|
170
|
+
detection: DetectionConfig
|
|
171
|
+
drop_detection: DropDetectionConfig | None = None
|
|
172
|
+
|
|
173
|
+
def to_dict(self) -> dict:
|
|
174
|
+
"""Convert to JSON-serializable dictionary."""
|
|
175
|
+
result = {
|
|
176
|
+
"detection_method": self.detection_method,
|
|
177
|
+
"tracking_method": self.tracking_method,
|
|
178
|
+
"model_complexity": self.model_complexity,
|
|
179
|
+
"smoothing": self.smoothing.to_dict(),
|
|
180
|
+
"detection": self.detection.to_dict(),
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
if self.drop_detection is not None:
|
|
184
|
+
result["drop_detection"] = self.drop_detection.to_dict()
|
|
185
|
+
|
|
186
|
+
return result
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@dataclass
|
|
190
|
+
class ResultMetadata:
|
|
191
|
+
"""Complete metadata for analysis results.
|
|
192
|
+
|
|
193
|
+
Attributes:
|
|
194
|
+
quality: Quality assessment with confidence and warnings
|
|
195
|
+
video: Source video information
|
|
196
|
+
processing: Processing context and timing
|
|
197
|
+
algorithm: Algorithm configuration used
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
quality: QualityAssessment
|
|
201
|
+
video: VideoInfo
|
|
202
|
+
processing: ProcessingInfo
|
|
203
|
+
algorithm: AlgorithmConfig
|
|
204
|
+
|
|
205
|
+
def to_dict(self) -> dict:
|
|
206
|
+
"""Convert to JSON-serializable dictionary."""
|
|
207
|
+
return {
|
|
208
|
+
"quality": self.quality.to_dict(),
|
|
209
|
+
"video": self.video.to_dict(),
|
|
210
|
+
"processing": self.processing.to_dict(),
|
|
211
|
+
"algorithm": self.algorithm.to_dict(),
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def create_timestamp() -> str:
|
|
216
|
+
"""Create ISO 8601 timestamp for current time in UTC."""
|
|
217
|
+
return datetime.now(timezone.utc).isoformat()
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def get_kinemotion_version() -> str:
|
|
221
|
+
"""Get current kinemotion version.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
Version string (e.g., "0.26.0")
|
|
225
|
+
"""
|
|
226
|
+
try:
|
|
227
|
+
from importlib.metadata import version
|
|
228
|
+
|
|
229
|
+
return version("kinemotion")
|
|
230
|
+
except Exception:
|
|
231
|
+
return "unknown"
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""Model file loader for MediaPipe Tasks API.
|
|
2
|
+
|
|
3
|
+
The Tasks API requires model files (.task). This module handles:
|
|
4
|
+
1. Using bundled model files (included in package)
|
|
5
|
+
2. Fallback to downloading and caching if bundled file not found
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import hashlib
|
|
11
|
+
import urllib.request
|
|
12
|
+
from importlib.resources import as_file, files
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
from platformdirs import user_cache_dir
|
|
16
|
+
|
|
17
|
+
# Model URLs from Google's MediaPipe model storage
|
|
18
|
+
MODEL_URLS: dict[str, str] = {
|
|
19
|
+
"lite": "https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task",
|
|
20
|
+
"full": "https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_full/float16/1/pose_landmarker_full.task",
|
|
21
|
+
"heavy": "https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_heavy/float16/1/pose_landmarker_heavy.task",
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
# Expected SHA256 hashes for model verification (placeholder - should be filled in)
|
|
25
|
+
MODEL_HASHES: dict[str, str] = {
|
|
26
|
+
"lite": "",
|
|
27
|
+
"full": "",
|
|
28
|
+
"heavy": "",
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _get_bundled_model_path(model_type: str) -> Path | None:
|
|
33
|
+
"""Get the path to a bundled model file in the package.
|
|
34
|
+
|
|
35
|
+
For zip installs, copies the model to the cache directory first.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
model_type: Model variant ("lite", "full", or "heavy")
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Path to model file, or None if not found
|
|
42
|
+
"""
|
|
43
|
+
import shutil
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
model_filename = f"pose_landmarker_{model_type}.task"
|
|
47
|
+
package = files("kinemotion.models") / model_filename
|
|
48
|
+
if package.is_file():
|
|
49
|
+
# For editable installs, files are directly accessible
|
|
50
|
+
# Check if we can use it directly (it's a real file path)
|
|
51
|
+
try:
|
|
52
|
+
# Try to get the path without extraction
|
|
53
|
+
direct_path = Path(str(package))
|
|
54
|
+
if direct_path.is_file():
|
|
55
|
+
return direct_path
|
|
56
|
+
except (TypeError, ValueError):
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
# For zip installs, extract to cache
|
|
60
|
+
cache_dir = get_model_cache_dir()
|
|
61
|
+
cached_model = cache_dir / model_filename
|
|
62
|
+
|
|
63
|
+
# Only copy if not already cached
|
|
64
|
+
if not cached_model.exists():
|
|
65
|
+
with as_file(package) as extracted:
|
|
66
|
+
shutil.copy(extracted, cached_model)
|
|
67
|
+
|
|
68
|
+
return cached_model
|
|
69
|
+
except Exception:
|
|
70
|
+
# Package data not available
|
|
71
|
+
pass
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def get_model_cache_dir() -> Path:
|
|
76
|
+
"""Get the cache directory for model files.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Path to the cache directory (platform-specific)
|
|
80
|
+
"""
|
|
81
|
+
cache_dir = Path(user_cache_dir("kinemotion", appauthor=False))
|
|
82
|
+
models_dir = cache_dir / "models"
|
|
83
|
+
models_dir.mkdir(parents=True, exist_ok=True)
|
|
84
|
+
return models_dir
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def get_model_path(model_type: str = "heavy") -> Path:
|
|
88
|
+
"""Get the path to a model file.
|
|
89
|
+
|
|
90
|
+
Priority order:
|
|
91
|
+
1. Bundled model in package (no download needed)
|
|
92
|
+
2. Cached model from previous download
|
|
93
|
+
3. Download from Google's storage
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
model_type: Model variant ("lite", "full", or "heavy")
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Path to the model file
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
ValueError: If model_type is not recognized
|
|
103
|
+
"""
|
|
104
|
+
if model_type not in MODEL_URLS:
|
|
105
|
+
valid_types = ", ".join(MODEL_URLS.keys())
|
|
106
|
+
raise ValueError(f"Unknown model type: {model_type}. Choose from: {valid_types}")
|
|
107
|
+
|
|
108
|
+
# 1. Try bundled model first (fastest - no download)
|
|
109
|
+
bundled_path = _get_bundled_model_path(model_type)
|
|
110
|
+
if bundled_path is not None:
|
|
111
|
+
return bundled_path
|
|
112
|
+
|
|
113
|
+
# 2. Check cache
|
|
114
|
+
cache_dir = get_model_cache_dir()
|
|
115
|
+
model_filename = f"pose_landmarker_{model_type}.task"
|
|
116
|
+
model_path = cache_dir / model_filename
|
|
117
|
+
|
|
118
|
+
if model_path.exists():
|
|
119
|
+
return model_path
|
|
120
|
+
|
|
121
|
+
# 3. Download the model
|
|
122
|
+
url = MODEL_URLS[model_type]
|
|
123
|
+
_download_file(url, model_path)
|
|
124
|
+
|
|
125
|
+
return model_path
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _download_file(url: str, destination: Path) -> None:
|
|
129
|
+
"""Download a file from URL to destination.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
url: Source URL
|
|
133
|
+
destination: Destination path
|
|
134
|
+
|
|
135
|
+
Raises:
|
|
136
|
+
urllib.error.URLError: If download fails
|
|
137
|
+
"""
|
|
138
|
+
temp_path = destination.with_suffix(".tmp")
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
with urllib.request.urlopen(url) as response:
|
|
142
|
+
with temp_path.open("wb") as f:
|
|
143
|
+
while chunk := response.read(8192):
|
|
144
|
+
f.write(chunk)
|
|
145
|
+
temp_path.replace(destination)
|
|
146
|
+
except Exception:
|
|
147
|
+
# Clean up temp file on error
|
|
148
|
+
if temp_path.exists():
|
|
149
|
+
temp_path.unlink()
|
|
150
|
+
raise
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def verify_model_hash(model_path: Path, expected_hash: str) -> bool:
|
|
154
|
+
"""Verify a model file's SHA256 hash.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
model_path: Path to the model file
|
|
158
|
+
expected_hash: Expected SHA256 hash
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
True if hash matches, False otherwise
|
|
162
|
+
"""
|
|
163
|
+
sha256_hash = hashlib.sha256()
|
|
164
|
+
|
|
165
|
+
with model_path.open("rb") as f:
|
|
166
|
+
for chunk in iter(lambda: f.read(4096), b""):
|
|
167
|
+
sha256_hash.update(chunk)
|
|
168
|
+
|
|
169
|
+
return sha256_hash.hexdigest() == expected_hash
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
__all__ = ["get_model_path", "get_model_cache_dir", "verify_model_hash", "MODEL_URLS"]
|