kinemotion 0.24.0__py3-none-any.whl → 0.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,222 @@
1
+ """Metadata structures for analysis results."""
2
+
3
+ from dataclasses import dataclass
4
+ from datetime import datetime, timezone
5
+
6
+ from .quality import QualityAssessment
7
+
8
+
9
+ @dataclass
10
+ class VideoInfo:
11
+ """Information about the source video.
12
+
13
+ Attributes:
14
+ source_path: Path to the source video file
15
+ fps: Actual frames per second (measured from video)
16
+ width: Video width in pixels
17
+ height: Video height in pixels
18
+ duration_s: Total video duration in seconds
19
+ frame_count: Total number of frames
20
+ codec: Video codec (e.g., "h264", "hevc") or None if unknown
21
+ """
22
+
23
+ source_path: str
24
+ fps: float
25
+ width: int
26
+ height: int
27
+ duration_s: float
28
+ frame_count: int
29
+ codec: str | None = None
30
+
31
+ def to_dict(self) -> dict:
32
+ """Convert to JSON-serializable dictionary."""
33
+ return {
34
+ "source_path": self.source_path,
35
+ "fps": round(self.fps, 2),
36
+ "resolution": {"width": self.width, "height": self.height},
37
+ "duration_s": round(self.duration_s, 2),
38
+ "frame_count": self.frame_count,
39
+ "codec": self.codec,
40
+ }
41
+
42
+
43
+ @dataclass
44
+ class ProcessingInfo:
45
+ """Information about processing context.
46
+
47
+ Attributes:
48
+ version: Kinemotion version string (e.g., "0.26.0")
49
+ timestamp: ISO 8601 timestamp of when analysis was performed
50
+ quality_preset: Quality preset used ("fast", "balanced", "accurate")
51
+ processing_time_s: Time taken to process video in seconds
52
+ """
53
+
54
+ version: str
55
+ timestamp: str
56
+ quality_preset: str
57
+ processing_time_s: float
58
+
59
+ def to_dict(self) -> dict:
60
+ """Convert to JSON-serializable dictionary."""
61
+ return {
62
+ "version": self.version,
63
+ "timestamp": self.timestamp,
64
+ "quality_preset": self.quality_preset,
65
+ "processing_time_s": round(self.processing_time_s, 3),
66
+ }
67
+
68
+
69
+ @dataclass
70
+ class SmoothingConfig:
71
+ """Smoothing algorithm configuration.
72
+
73
+ Attributes:
74
+ window_size: Savitzky-Golay window size
75
+ polynomial_order: Polynomial degree for SG filter
76
+ use_bilateral_filter: Whether bilateral temporal filtering was used
77
+ use_outlier_rejection: Whether RANSAC/median outlier rejection was used
78
+ """
79
+
80
+ window_size: int
81
+ polynomial_order: int
82
+ use_bilateral_filter: bool
83
+ use_outlier_rejection: bool
84
+
85
+ def to_dict(self) -> dict:
86
+ """Convert to JSON-serializable dictionary."""
87
+ return {
88
+ "window_size": self.window_size,
89
+ "polynomial_order": self.polynomial_order,
90
+ "use_bilateral_filter": self.use_bilateral_filter,
91
+ "use_outlier_rejection": self.use_outlier_rejection,
92
+ }
93
+
94
+
95
+ @dataclass
96
+ class DetectionConfig:
97
+ """Detection algorithm configuration.
98
+
99
+ Attributes:
100
+ velocity_threshold: Velocity threshold for contact/flight detection
101
+ min_contact_frames: Minimum consecutive frames to confirm contact
102
+ visibility_threshold: Minimum landmark visibility to trust
103
+ use_curvature_refinement: Whether acceleration-based refinement was used
104
+ """
105
+
106
+ velocity_threshold: float
107
+ min_contact_frames: int
108
+ visibility_threshold: float
109
+ use_curvature_refinement: bool
110
+
111
+ def to_dict(self) -> dict:
112
+ """Convert to JSON-serializable dictionary."""
113
+ return {
114
+ "velocity_threshold": round(self.velocity_threshold, 4),
115
+ "min_contact_frames": self.min_contact_frames,
116
+ "visibility_threshold": round(self.visibility_threshold, 2),
117
+ "use_curvature_refinement": self.use_curvature_refinement,
118
+ }
119
+
120
+
121
+ @dataclass
122
+ class DropDetectionConfig:
123
+ """Drop jump-specific detection configuration.
124
+
125
+ Attributes:
126
+ auto_detect_drop_start: Whether automatic drop start detection was used
127
+ detected_drop_frame: Frame where drop was detected (None if manual)
128
+ min_stationary_duration_s: Minimum standing time before drop
129
+ """
130
+
131
+ auto_detect_drop_start: bool
132
+ detected_drop_frame: int | None
133
+ min_stationary_duration_s: float
134
+
135
+ def to_dict(self) -> dict:
136
+ """Convert to JSON-serializable dictionary."""
137
+ return {
138
+ "auto_detect_drop_start": self.auto_detect_drop_start,
139
+ "detected_drop_frame": self.detected_drop_frame,
140
+ "min_stationary_duration_s": round(self.min_stationary_duration_s, 2),
141
+ }
142
+
143
+
144
+ @dataclass
145
+ class AlgorithmConfig:
146
+ """Complete algorithm configuration for reproducibility.
147
+
148
+ Attributes:
149
+ detection_method: Algorithm used ("backward_search" for CMJ, "forward_search" for drop)
150
+ tracking_method: Pose tracking method ("mediapipe_pose")
151
+ model_complexity: MediaPipe model complexity (0, 1, or 2)
152
+ smoothing: Smoothing configuration
153
+ detection: Detection configuration
154
+ drop_detection: Drop detection config (drop jump only, None for CMJ)
155
+ """
156
+
157
+ detection_method: str
158
+ tracking_method: str
159
+ model_complexity: int
160
+ smoothing: SmoothingConfig
161
+ detection: DetectionConfig
162
+ drop_detection: DropDetectionConfig | None = None
163
+
164
+ def to_dict(self) -> dict:
165
+ """Convert to JSON-serializable dictionary."""
166
+ result = {
167
+ "detection_method": self.detection_method,
168
+ "tracking_method": self.tracking_method,
169
+ "model_complexity": self.model_complexity,
170
+ "smoothing": self.smoothing.to_dict(),
171
+ "detection": self.detection.to_dict(),
172
+ }
173
+
174
+ if self.drop_detection is not None:
175
+ result["drop_detection"] = self.drop_detection.to_dict()
176
+
177
+ return result
178
+
179
+
180
+ @dataclass
181
+ class ResultMetadata:
182
+ """Complete metadata for analysis results.
183
+
184
+ Attributes:
185
+ quality: Quality assessment with confidence and warnings
186
+ video: Source video information
187
+ processing: Processing context and timing
188
+ algorithm: Algorithm configuration used
189
+ """
190
+
191
+ quality: QualityAssessment
192
+ video: VideoInfo
193
+ processing: ProcessingInfo
194
+ algorithm: AlgorithmConfig
195
+
196
+ def to_dict(self) -> dict:
197
+ """Convert to JSON-serializable dictionary."""
198
+ return {
199
+ "quality": self.quality.to_dict(),
200
+ "video": self.video.to_dict(),
201
+ "processing": self.processing.to_dict(),
202
+ "algorithm": self.algorithm.to_dict(),
203
+ }
204
+
205
+
206
+ def create_timestamp() -> str:
207
+ """Create ISO 8601 timestamp for current time in UTC."""
208
+ return datetime.now(timezone.utc).isoformat()
209
+
210
+
211
+ def get_kinemotion_version() -> str:
212
+ """Get current kinemotion version.
213
+
214
+ Returns:
215
+ Version string (e.g., "0.26.0")
216
+ """
217
+ try:
218
+ from importlib.metadata import version
219
+
220
+ return version("kinemotion")
221
+ except Exception:
222
+ return "unknown"
@@ -0,0 +1,396 @@
1
+ """Quality assessment and confidence scoring for pose tracking and analysis."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Literal
5
+
6
+ import numpy as np
7
+ from numpy.typing import NDArray
8
+
9
+
10
+ @dataclass
11
+ class QualityIndicators:
12
+ """Detailed quality indicators for pose tracking and analysis.
13
+
14
+ Attributes:
15
+ avg_visibility: Mean visibility score across all key landmarks (0-1)
16
+ min_visibility: Minimum visibility score encountered (0-1)
17
+ tracking_stable: Whether landmark tracking was stable (low jitter)
18
+ phase_detection_clear: Whether phase transitions were clearly detected
19
+ outliers_detected: Number of outlier frames detected and corrected
20
+ outlier_percentage: Percentage of frames with outliers (0-100)
21
+ position_variance: Variance in position tracking (lower is more stable)
22
+ fps: Video frame rate (higher is better for accuracy)
23
+ """
24
+
25
+ avg_visibility: float
26
+ min_visibility: float
27
+ tracking_stable: bool
28
+ phase_detection_clear: bool
29
+ outliers_detected: int
30
+ outlier_percentage: float
31
+ position_variance: float
32
+ fps: float
33
+
34
+
35
+ @dataclass
36
+ class QualityAssessment:
37
+ """Overall quality assessment with confidence level and warnings.
38
+
39
+ Attributes:
40
+ confidence: Overall confidence level (high/medium/low)
41
+ quality_indicators: Detailed quality metrics
42
+ warnings: List of warning messages for user
43
+ quality_score: Numerical quality score (0-100)
44
+ """
45
+
46
+ confidence: Literal["high", "medium", "low"]
47
+ quality_indicators: QualityIndicators
48
+ warnings: list[str]
49
+ quality_score: float
50
+
51
+ def to_dict(self) -> dict:
52
+ """Convert quality assessment to JSON-serializable dictionary."""
53
+ return {
54
+ "confidence": self.confidence,
55
+ "quality_score": round(self.quality_score, 1),
56
+ "quality_indicators": {
57
+ "avg_visibility": round(self.quality_indicators.avg_visibility, 3),
58
+ "min_visibility": round(self.quality_indicators.min_visibility, 3),
59
+ "tracking_stable": self.quality_indicators.tracking_stable,
60
+ "phase_detection_clear": self.quality_indicators.phase_detection_clear,
61
+ "outliers_detected": self.quality_indicators.outliers_detected,
62
+ "outlier_percentage": round(
63
+ self.quality_indicators.outlier_percentage, 1
64
+ ),
65
+ "position_variance": round(
66
+ self.quality_indicators.position_variance, 6
67
+ ),
68
+ "fps": round(self.quality_indicators.fps, 1),
69
+ },
70
+ "warnings": self.warnings,
71
+ }
72
+
73
+
74
+ def calculate_position_stability(
75
+ positions: NDArray[np.float64],
76
+ window_size: int = 10,
77
+ ) -> float:
78
+ """
79
+ Calculate position tracking stability using rolling variance.
80
+
81
+ Lower variance indicates more stable tracking (less jitter).
82
+
83
+ Args:
84
+ positions: Array of position values (e.g., foot y-positions)
85
+ window_size: Window size for rolling variance calculation
86
+
87
+ Returns:
88
+ Mean rolling variance (lower is better)
89
+ """
90
+ if len(positions) < window_size:
91
+ return float(np.var(positions))
92
+
93
+ # Calculate rolling variance
94
+ rolling_vars = []
95
+ for i in range(len(positions) - window_size + 1):
96
+ window = positions[i : i + window_size]
97
+ rolling_vars.append(np.var(window))
98
+
99
+ return float(np.mean(rolling_vars))
100
+
101
+
102
+ def assess_tracking_quality(
103
+ visibilities: NDArray[np.float64],
104
+ positions: NDArray[np.float64],
105
+ outlier_mask: NDArray[np.bool_] | None,
106
+ fps: float,
107
+ phases_detected: bool = True,
108
+ phase_count: int = 0,
109
+ ) -> QualityAssessment:
110
+ """
111
+ Assess overall tracking quality and assign confidence level.
112
+
113
+ Evaluates multiple quality indicators to determine confidence:
114
+ - Landmark visibility (MediaPipe confidence scores)
115
+ - Tracking stability (position variance, jitter)
116
+ - Outlier detection (frames requiring correction)
117
+ - Phase detection success (clear transitions found)
118
+ - Frame rate (higher = better temporal resolution)
119
+
120
+ Args:
121
+ visibilities: Array of visibility scores for each frame (0-1)
122
+ positions: Array of tracked positions (normalized coordinates)
123
+ outlier_mask: Boolean array marking outlier frames (None if no outliers)
124
+ fps: Video frame rate
125
+ phases_detected: Whether jump phases were successfully detected
126
+ phase_count: Number of phases detected (0 if failed)
127
+
128
+ Returns:
129
+ QualityAssessment object with confidence level, indicators, and warnings
130
+ """
131
+ # Calculate visibility metrics
132
+ avg_visibility = float(np.mean(visibilities))
133
+ min_visibility = float(np.min(visibilities))
134
+
135
+ # Calculate tracking stability
136
+ position_variance = calculate_position_stability(positions)
137
+ tracking_stable = position_variance < 0.001 # Threshold for stable tracking
138
+
139
+ # Count outliers
140
+ outliers_detected = 0
141
+ outlier_percentage = 0.0
142
+ if outlier_mask is not None:
143
+ outliers_detected = int(np.sum(outlier_mask))
144
+ outlier_percentage = (outliers_detected / len(outlier_mask)) * 100
145
+
146
+ # Assess phase detection clarity
147
+ phase_detection_clear = phases_detected and phase_count >= 2
148
+
149
+ # Create quality indicators
150
+ indicators = QualityIndicators(
151
+ avg_visibility=avg_visibility,
152
+ min_visibility=min_visibility,
153
+ tracking_stable=tracking_stable,
154
+ phase_detection_clear=phase_detection_clear,
155
+ outliers_detected=outliers_detected,
156
+ outlier_percentage=outlier_percentage,
157
+ position_variance=position_variance,
158
+ fps=fps,
159
+ )
160
+
161
+ # Calculate overall quality score (0-100)
162
+ quality_score = _calculate_quality_score(indicators)
163
+
164
+ # Determine confidence level
165
+ confidence = _determine_confidence_level(quality_score)
166
+
167
+ # Generate warnings
168
+ warnings = _generate_warnings(indicators, confidence)
169
+
170
+ return QualityAssessment(
171
+ confidence=confidence,
172
+ quality_indicators=indicators,
173
+ warnings=warnings,
174
+ quality_score=quality_score,
175
+ )
176
+
177
+
178
+ def _calculate_quality_score(indicators: QualityIndicators) -> float:
179
+ """
180
+ Calculate numerical quality score (0-100) from quality indicators.
181
+
182
+ Weighted combination of different quality factors:
183
+ - Visibility: 40% weight (most critical)
184
+ - Tracking stability: 25% weight
185
+ - Outlier rate: 20% weight
186
+ - Phase detection: 10% weight
187
+ - Frame rate: 5% weight
188
+
189
+ Args:
190
+ indicators: Quality indicators object
191
+
192
+ Returns:
193
+ Quality score from 0 (worst) to 100 (best)
194
+ """
195
+ # Visibility score (40% weight)
196
+ # Perfect: avg_vis=1.0, min_vis>0.8
197
+ visibility_score = indicators.avg_visibility * 100
198
+ if indicators.min_visibility < 0.5:
199
+ visibility_score *= 0.7 # Penalty for low minimum visibility
200
+
201
+ # Tracking stability score (25% weight)
202
+ # Perfect: position_variance < 0.0005
203
+ # Good: position_variance < 0.001
204
+ # Medium: position_variance < 0.003
205
+ if indicators.position_variance < 0.0005:
206
+ stability_score = 100.0
207
+ elif indicators.position_variance < 0.001:
208
+ stability_score = 85.0
209
+ elif indicators.position_variance < 0.003:
210
+ stability_score = 65.0
211
+ else:
212
+ stability_score = max(0.0, 100 - indicators.position_variance * 10000)
213
+
214
+ # Outlier score (20% weight)
215
+ # Perfect: 0% outliers
216
+ # Good: <5% outliers
217
+ # Acceptable: <10% outliers
218
+ outlier_score = max(0.0, 100 - indicators.outlier_percentage * 10)
219
+
220
+ # Phase detection score (10% weight)
221
+ phase_score = 100.0 if indicators.phase_detection_clear else 50.0
222
+
223
+ # Frame rate score (5% weight)
224
+ # Perfect: 60fps+
225
+ # Good: 30-60fps
226
+ # Poor: <30fps
227
+ if indicators.fps >= 60:
228
+ fps_score = 100.0
229
+ elif indicators.fps >= 30:
230
+ fps_score = 80.0
231
+ elif indicators.fps >= 24:
232
+ fps_score = 60.0
233
+ else:
234
+ fps_score = 40.0
235
+
236
+ # Weighted combination
237
+ quality_score = (
238
+ visibility_score * 0.40
239
+ + stability_score * 0.25
240
+ + outlier_score * 0.20
241
+ + phase_score * 0.10
242
+ + fps_score * 0.05
243
+ )
244
+
245
+ return float(np.clip(quality_score, 0, 100))
246
+
247
+
248
+ def _determine_confidence_level(
249
+ quality_score: float,
250
+ ) -> Literal["high", "medium", "low"]:
251
+ """
252
+ Determine confidence level from quality score.
253
+
254
+ Thresholds:
255
+ - High: quality_score >= 75
256
+ - Medium: quality_score >= 50
257
+ - Low: quality_score < 50
258
+
259
+ Args:
260
+ quality_score: Numerical quality score (0-100)
261
+
262
+ Returns:
263
+ Confidence level: "high", "medium", or "low"
264
+ """
265
+ if quality_score >= 75:
266
+ return "high"
267
+ elif quality_score >= 50:
268
+ return "medium"
269
+ else:
270
+ return "low"
271
+
272
+
273
+ def _generate_warnings(
274
+ indicators: QualityIndicators,
275
+ confidence: Literal["high", "medium", "low"],
276
+ ) -> list[str]:
277
+ """
278
+ Generate user-facing warning messages based on quality indicators.
279
+
280
+ Args:
281
+ indicators: Quality indicators object
282
+ confidence: Overall confidence level
283
+
284
+ Returns:
285
+ List of warning messages (empty if no warnings)
286
+ """
287
+ warnings: list[str] = []
288
+
289
+ # Visibility warnings
290
+ if indicators.avg_visibility < 0.7:
291
+ warnings.append(
292
+ f"Poor landmark visibility (avg {indicators.avg_visibility:.2f}). "
293
+ "Check lighting, camera angle, and ensure full body is visible."
294
+ )
295
+ elif indicators.avg_visibility < 0.8:
296
+ warnings.append(
297
+ f"Moderate landmark visibility (avg {indicators.avg_visibility:.2f}). "
298
+ "Results may be less accurate."
299
+ )
300
+
301
+ if indicators.min_visibility < 0.5:
302
+ warnings.append(
303
+ f"Very low visibility detected ({indicators.min_visibility:.2f}). "
304
+ "Some frames may have occlusion or tracking loss."
305
+ )
306
+
307
+ # Tracking stability warnings
308
+ if not indicators.tracking_stable:
309
+ warnings.append(
310
+ f"Unstable landmark tracking detected (variance {indicators.position_variance:.4f}). "
311
+ "This may indicate jitter or occlusion. Consider better lighting or camera position."
312
+ )
313
+
314
+ # Outlier warnings
315
+ if indicators.outlier_percentage > 10:
316
+ warnings.append(
317
+ f"High outlier rate ({indicators.outlier_percentage:.1f}%). "
318
+ f"{indicators.outliers_detected} frames required correction. "
319
+ "This may reduce measurement accuracy."
320
+ )
321
+ elif indicators.outlier_percentage > 5:
322
+ warnings.append(
323
+ f"Moderate outlier rate ({indicators.outlier_percentage:.1f}%). "
324
+ f"{indicators.outliers_detected} frames were corrected."
325
+ )
326
+
327
+ # Phase detection warnings
328
+ if not indicators.phase_detection_clear:
329
+ warnings.append(
330
+ "Unclear phase transitions detected. "
331
+ "Jump phases may not be accurately identified. "
332
+ "Check if full jump is captured in video."
333
+ )
334
+
335
+ # Frame rate warnings
336
+ if indicators.fps < 30:
337
+ warnings.append(
338
+ f"Low frame rate ({indicators.fps:.0f} fps). "
339
+ "Recommend recording at 30fps or higher for better accuracy. "
340
+ "Validated apps use 120-240fps."
341
+ )
342
+ elif indicators.fps < 60:
343
+ warnings.append(
344
+ f"Frame rate is {indicators.fps:.0f} fps. "
345
+ "Consider 60fps or higher for improved temporal resolution. "
346
+ "Validated apps (MyJump) use 120-240fps."
347
+ )
348
+
349
+ # Overall confidence warning
350
+ if confidence == "low":
351
+ warnings.append(
352
+ "⚠️ LOW CONFIDENCE: Results may be unreliable. "
353
+ "Review quality indicators and consider re-recording with better conditions."
354
+ )
355
+ elif confidence == "medium":
356
+ warnings.append(
357
+ "⚠️ MEDIUM CONFIDENCE: Results should be interpreted with caution. "
358
+ "Check quality indicators for specific issues."
359
+ )
360
+
361
+ return warnings
362
+
363
+
364
+ def assess_jump_quality(
365
+ visibilities: NDArray[np.float64],
366
+ positions: NDArray[np.float64],
367
+ outlier_mask: NDArray[np.bool_] | None,
368
+ fps: float,
369
+ phases_detected: bool = True,
370
+ phase_count: int = 0,
371
+ ) -> QualityAssessment:
372
+ """
373
+ Convenience function for assessing jump analysis quality.
374
+
375
+ This is the main entry point for quality assessment, called from
376
+ dropjump and CMJ analysis modules.
377
+
378
+ Args:
379
+ visibilities: Array of visibility scores (0-1)
380
+ positions: Array of tracked positions
381
+ outlier_mask: Boolean array marking outliers (None if none detected)
382
+ fps: Video frame rate
383
+ phases_detected: Whether phases were successfully detected
384
+ phase_count: Number of phases detected
385
+
386
+ Returns:
387
+ QualityAssessment with confidence, indicators, and warnings
388
+ """
389
+ return assess_tracking_quality(
390
+ visibilities=visibilities,
391
+ positions=positions,
392
+ outlier_mask=outlier_mask,
393
+ fps=fps,
394
+ phases_detected=phases_detected,
395
+ phase_count=phase_count,
396
+ )