kinemotion 0.47.1__py3-none-any.whl → 0.47.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,541 @@
1
+ """Public API for drop jump video analysis."""
2
+
3
+ import json
4
+ import time
5
+ from collections.abc import Callable
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from numpy.typing import NDArray
12
+
13
+ from ..core.auto_tuning import (
14
+ AnalysisParameters,
15
+ QualityPreset,
16
+ analyze_video_sample,
17
+ auto_tune_parameters,
18
+ )
19
+ from ..core.filtering import reject_outliers
20
+ from ..core.metadata import (
21
+ AlgorithmConfig,
22
+ DetectionConfig,
23
+ DropDetectionConfig,
24
+ ProcessingInfo,
25
+ ResultMetadata,
26
+ SmoothingConfig,
27
+ VideoInfo,
28
+ create_timestamp,
29
+ get_kinemotion_version,
30
+ )
31
+ from ..core.pipeline_utils import (
32
+ apply_expert_overrides,
33
+ apply_smoothing,
34
+ convert_timer_to_stage_names,
35
+ determine_confidence_levels,
36
+ extract_vertical_positions,
37
+ parse_quality_preset,
38
+ print_verbose_parameters,
39
+ process_all_frames,
40
+ process_videos_bulk_generic,
41
+ )
42
+ from ..core.pose import PoseTracker
43
+ from ..core.quality import QualityAssessment, assess_jump_quality
44
+ from ..core.timing import NULL_TIMER, PerformanceTimer, Timer
45
+ from ..core.video_io import VideoProcessor
46
+ from .analysis import (
47
+ detect_ground_contact,
48
+ find_contact_phases,
49
+ )
50
+ from .debug_overlay import DebugOverlayRenderer
51
+ from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
52
+ from .metrics_validator import DropJumpMetricsValidator
53
+
54
+
55
+ @dataclass
56
+ class DropJumpVideoResult:
57
+ """Result of processing a single drop jump video."""
58
+
59
+ video_path: str
60
+ success: bool
61
+ metrics: DropJumpMetrics | None = None
62
+ error: str | None = None
63
+ processing_time: float = 0.0
64
+
65
+
66
+ @dataclass
67
+ class DropJumpVideoConfig:
68
+ """Configuration for processing a single drop jump video."""
69
+
70
+ video_path: str
71
+ quality: str = "balanced"
72
+ output_video: str | None = None
73
+ json_output: str | None = None
74
+ drop_start_frame: int | None = None
75
+ smoothing_window: int | None = None
76
+ velocity_threshold: float | None = None
77
+ min_contact_frames: int | None = None
78
+ visibility_threshold: float | None = None
79
+ detection_confidence: float | None = None
80
+ tracking_confidence: float | None = None
81
+
82
+
83
+ def _assess_dropjump_quality(
84
+ vertical_positions: "NDArray",
85
+ visibilities: "NDArray",
86
+ contact_states: list,
87
+ fps: float,
88
+ ) -> tuple:
89
+ """Assess tracking quality and detect phases.
90
+
91
+ Returns:
92
+ Tuple of (quality_result, outlier_mask, phases_detected, phase_count)
93
+ """
94
+ _, outlier_mask = reject_outliers(
95
+ vertical_positions,
96
+ use_ransac=True,
97
+ use_median=True,
98
+ interpolate=False,
99
+ )
100
+
101
+ phases = find_contact_phases(contact_states)
102
+ phases_detected = len(phases) > 0
103
+ phase_count = len(phases)
104
+
105
+ quality_result = assess_jump_quality(
106
+ visibilities=visibilities,
107
+ positions=vertical_positions,
108
+ outlier_mask=outlier_mask,
109
+ fps=fps,
110
+ phases_detected=phases_detected,
111
+ phase_count=phase_count,
112
+ )
113
+
114
+ return quality_result, outlier_mask, phases_detected, phase_count
115
+
116
+
117
+ def _build_dropjump_metadata(
118
+ video_path: str,
119
+ video: "VideoProcessor",
120
+ params: "AnalysisParameters",
121
+ quality_result: QualityAssessment,
122
+ drop_start_frame: int | None,
123
+ metrics: DropJumpMetrics,
124
+ processing_time: float,
125
+ quality_preset: "QualityPreset",
126
+ timer: Timer,
127
+ ) -> ResultMetadata:
128
+ """Build complete result metadata."""
129
+ drop_frame = None
130
+ if drop_start_frame is None and metrics.drop_start_frame is not None:
131
+ drop_frame = metrics.drop_start_frame
132
+ elif drop_start_frame is not None:
133
+ drop_frame = drop_start_frame
134
+
135
+ algorithm_config = AlgorithmConfig(
136
+ detection_method="forward_search",
137
+ tracking_method="mediapipe_pose",
138
+ model_complexity=1,
139
+ smoothing=SmoothingConfig(
140
+ window_size=params.smoothing_window,
141
+ polynomial_order=params.polyorder,
142
+ use_bilateral_filter=params.bilateral_filter,
143
+ use_outlier_rejection=params.outlier_rejection,
144
+ ),
145
+ detection=DetectionConfig(
146
+ velocity_threshold=params.velocity_threshold,
147
+ min_contact_frames=params.min_contact_frames,
148
+ visibility_threshold=params.visibility_threshold,
149
+ use_curvature_refinement=params.use_curvature,
150
+ ),
151
+ drop_detection=DropDetectionConfig(
152
+ auto_detect_drop_start=(drop_start_frame is None),
153
+ detected_drop_frame=drop_frame,
154
+ min_stationary_duration_s=0.5,
155
+ ),
156
+ )
157
+
158
+ video_info = VideoInfo(
159
+ source_path=video_path,
160
+ fps=video.fps,
161
+ width=video.width,
162
+ height=video.height,
163
+ duration_s=video.frame_count / video.fps,
164
+ frame_count=video.frame_count,
165
+ codec=video.codec,
166
+ )
167
+
168
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
169
+
170
+ processing_info = ProcessingInfo(
171
+ version=get_kinemotion_version(),
172
+ timestamp=create_timestamp(),
173
+ quality_preset=quality_preset.value,
174
+ processing_time_s=processing_time,
175
+ timing_breakdown=stage_times,
176
+ )
177
+
178
+ return ResultMetadata(
179
+ quality=quality_result,
180
+ video=video_info,
181
+ processing=processing_info,
182
+ algorithm=algorithm_config,
183
+ )
184
+
185
+
186
+ def _save_dropjump_json(
187
+ json_output: str,
188
+ metrics: DropJumpMetrics,
189
+ timer: Timer,
190
+ verbose: bool,
191
+ ) -> None:
192
+ """Save metrics to JSON file."""
193
+ with timer.measure("json_serialization"):
194
+ output_path = Path(json_output)
195
+ metrics_dict = metrics.to_dict()
196
+ json_str = json.dumps(metrics_dict, indent=2)
197
+ output_path.write_text(json_str)
198
+
199
+ if verbose:
200
+ print(f"Metrics written to: {json_output}")
201
+
202
+
203
+ def _print_dropjump_summary(
204
+ start_time: float,
205
+ timer: Timer,
206
+ ) -> None:
207
+ """Print verbose timing summary."""
208
+ total_time = time.time() - start_time
209
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
210
+
211
+ print("\n=== Timing Summary ===")
212
+ for stage, duration in stage_times.items():
213
+ percentage = (duration / total_time) * 100
214
+ dur_ms = duration * 1000
215
+ print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
216
+ total_ms = total_time * 1000
217
+ print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
218
+ print()
219
+ print("Analysis complete!")
220
+
221
+
222
+ def _generate_debug_video(
223
+ output_video: str,
224
+ frames: list,
225
+ frame_indices: list[int],
226
+ video_fps: float,
227
+ smoothed_landmarks: list,
228
+ contact_states: list,
229
+ metrics: DropJumpMetrics,
230
+ timer: Timer | None,
231
+ verbose: bool,
232
+ ) -> None:
233
+ """Generate debug video with overlay."""
234
+ if verbose:
235
+ print(f"Generating debug video: {output_video}")
236
+
237
+ if not frames:
238
+ return
239
+
240
+ timer = timer or NULL_TIMER
241
+ debug_h, debug_w = frames[0].shape[:2]
242
+
243
+ if video_fps > 30:
244
+ debug_fps = video_fps / (video_fps / 30.0)
245
+ else:
246
+ debug_fps = video_fps
247
+
248
+ if len(frames) < len(smoothed_landmarks):
249
+ step = max(1, int(video_fps / 30.0))
250
+ debug_fps = video_fps / step
251
+
252
+ def _render_frames(renderer: DebugOverlayRenderer) -> None:
253
+ for frame, idx in zip(frames, frame_indices, strict=True):
254
+ annotated = renderer.render_frame(
255
+ frame,
256
+ smoothed_landmarks[idx],
257
+ contact_states[idx],
258
+ idx,
259
+ metrics,
260
+ use_com=False,
261
+ )
262
+ renderer.write_frame(annotated)
263
+
264
+ renderer_context = DebugOverlayRenderer(
265
+ output_video,
266
+ debug_w,
267
+ debug_h,
268
+ debug_w,
269
+ debug_h,
270
+ debug_fps,
271
+ timer=timer,
272
+ )
273
+
274
+ with timer.measure("debug_video_generation"):
275
+ with renderer_context as renderer:
276
+ _render_frames(renderer)
277
+
278
+ if verbose:
279
+ print(f"Debug video saved: {output_video}")
280
+
281
+
282
+ def process_dropjump_video(
283
+ video_path: str,
284
+ quality: str = "balanced",
285
+ output_video: str | None = None,
286
+ json_output: str | None = None,
287
+ drop_start_frame: int | None = None,
288
+ smoothing_window: int | None = None,
289
+ velocity_threshold: float | None = None,
290
+ min_contact_frames: int | None = None,
291
+ visibility_threshold: float | None = None,
292
+ detection_confidence: float | None = None,
293
+ tracking_confidence: float | None = None,
294
+ verbose: bool = False,
295
+ timer: Timer | None = None,
296
+ pose_tracker: "PoseTracker | None" = None,
297
+ ) -> DropJumpMetrics:
298
+ """
299
+ Process a single drop jump video and return metrics.
300
+
301
+ Jump height is calculated from flight time using kinematic formula (h = g*t²/8).
302
+
303
+ Args:
304
+ video_path: Path to the input video file
305
+ quality: Analysis quality preset ("fast", "balanced", or "accurate")
306
+ output_video: Optional path for debug video output
307
+ json_output: Optional path for JSON metrics output
308
+ drop_start_frame: Optional manual drop start frame
309
+ smoothing_window: Optional override for smoothing window
310
+ velocity_threshold: Optional override for velocity threshold
311
+ min_contact_frames: Optional override for minimum contact frames
312
+ visibility_threshold: Optional override for visibility threshold
313
+ detection_confidence: Optional override for pose detection confidence
314
+ tracking_confidence: Optional override for pose tracking confidence
315
+ verbose: Print processing details
316
+ timer: Optional Timer for measuring operations
317
+ pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
318
+
319
+ Returns:
320
+ DropJumpMetrics object containing analysis results
321
+
322
+ Raises:
323
+ ValueError: If video cannot be processed or parameters are invalid
324
+ FileNotFoundError: If video file does not exist
325
+ """
326
+ if not Path(video_path).exists():
327
+ raise FileNotFoundError(f"Video file not found: {video_path}")
328
+
329
+ from ..core.determinism import set_deterministic_mode
330
+
331
+ set_deterministic_mode(seed=42)
332
+
333
+ start_time = time.time()
334
+ if timer is None:
335
+ timer = PerformanceTimer()
336
+
337
+ quality_preset = parse_quality_preset(quality)
338
+
339
+ with timer.measure("video_initialization"):
340
+ with VideoProcessor(video_path, timer=timer) as video:
341
+ detection_conf, tracking_conf = determine_confidence_levels(
342
+ quality_preset, detection_confidence, tracking_confidence
343
+ )
344
+
345
+ if verbose:
346
+ print("Processing all frames with MediaPipe pose tracking...")
347
+
348
+ tracker = pose_tracker
349
+ should_close_tracker = False
350
+
351
+ if tracker is None:
352
+ tracker = PoseTracker(
353
+ min_detection_confidence=detection_conf,
354
+ min_tracking_confidence=tracking_conf,
355
+ timer=timer,
356
+ )
357
+ should_close_tracker = True
358
+
359
+ frames, landmarks_sequence, frame_indices = process_all_frames(
360
+ video, tracker, verbose, timer, close_tracker=should_close_tracker
361
+ )
362
+
363
+ with timer.measure("parameter_auto_tuning"):
364
+ characteristics = analyze_video_sample(
365
+ landmarks_sequence, video.fps, video.frame_count
366
+ )
367
+ params = auto_tune_parameters(characteristics, quality_preset)
368
+
369
+ params = apply_expert_overrides(
370
+ params,
371
+ smoothing_window,
372
+ velocity_threshold,
373
+ min_contact_frames,
374
+ visibility_threshold,
375
+ )
376
+
377
+ if verbose:
378
+ print_verbose_parameters(
379
+ video, characteristics, quality_preset, params
380
+ )
381
+
382
+ smoothed_landmarks = apply_smoothing(
383
+ landmarks_sequence, params, verbose, timer
384
+ )
385
+
386
+ if verbose:
387
+ print("Extracting foot positions...")
388
+ with timer.measure("vertical_position_extraction"):
389
+ vertical_positions, visibilities = extract_vertical_positions(
390
+ smoothed_landmarks
391
+ )
392
+
393
+ if verbose:
394
+ print("Detecting ground contact...")
395
+ with timer.measure("ground_contact_detection"):
396
+ contact_states = detect_ground_contact(
397
+ vertical_positions,
398
+ velocity_threshold=params.velocity_threshold,
399
+ min_contact_frames=params.min_contact_frames,
400
+ visibility_threshold=params.visibility_threshold,
401
+ visibilities=visibilities,
402
+ window_length=params.smoothing_window,
403
+ polyorder=params.polyorder,
404
+ timer=timer,
405
+ )
406
+
407
+ if verbose:
408
+ print("Calculating metrics...")
409
+ with timer.measure("metrics_calculation"):
410
+ metrics = calculate_drop_jump_metrics(
411
+ contact_states,
412
+ vertical_positions,
413
+ video.fps,
414
+ drop_start_frame=drop_start_frame,
415
+ velocity_threshold=params.velocity_threshold,
416
+ smoothing_window=params.smoothing_window,
417
+ polyorder=params.polyorder,
418
+ use_curvature=params.use_curvature,
419
+ timer=timer,
420
+ )
421
+
422
+ if verbose:
423
+ print("Assessing tracking quality...")
424
+ with timer.measure("quality_assessment"):
425
+ quality_result, _, _, _ = _assess_dropjump_quality(
426
+ vertical_positions, visibilities, contact_states, video.fps
427
+ )
428
+
429
+ if verbose and quality_result.warnings:
430
+ print("\n⚠️ Quality Warnings:")
431
+ for warning in quality_result.warnings:
432
+ print(f" - {warning}")
433
+ print()
434
+
435
+ if output_video:
436
+ _generate_debug_video(
437
+ output_video,
438
+ frames,
439
+ frame_indices,
440
+ video.fps,
441
+ smoothed_landmarks,
442
+ contact_states,
443
+ metrics,
444
+ timer,
445
+ verbose,
446
+ )
447
+
448
+ with timer.measure("metrics_validation"):
449
+ validator = DropJumpMetricsValidator()
450
+ validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
451
+ metrics.validation_result = validation_result
452
+
453
+ if verbose and validation_result.issues:
454
+ print("\n⚠️ Validation Results:")
455
+ for issue in validation_result.issues:
456
+ print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
457
+
458
+ processing_time = time.time() - start_time
459
+ result_metadata = _build_dropjump_metadata(
460
+ video_path,
461
+ video,
462
+ params,
463
+ quality_result,
464
+ drop_start_frame,
465
+ metrics,
466
+ processing_time,
467
+ quality_preset,
468
+ timer,
469
+ )
470
+ metrics.result_metadata = result_metadata
471
+
472
+ if json_output:
473
+ _save_dropjump_json(json_output, metrics, timer, verbose)
474
+
475
+ if verbose:
476
+ _print_dropjump_summary(start_time, timer)
477
+
478
+ return metrics
479
+
480
+
481
+ def process_dropjump_videos_bulk(
482
+ configs: list[DropJumpVideoConfig],
483
+ max_workers: int = 4,
484
+ progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
485
+ ) -> list[DropJumpVideoResult]:
486
+ """
487
+ Process multiple drop jump videos in parallel.
488
+ """
489
+
490
+ def error_factory(video_path: str, error_msg: str) -> DropJumpVideoResult:
491
+ return DropJumpVideoResult(
492
+ video_path=video_path, success=False, error=error_msg
493
+ )
494
+
495
+ return process_videos_bulk_generic(
496
+ configs,
497
+ _process_dropjump_video_wrapper,
498
+ error_factory,
499
+ max_workers,
500
+ progress_callback,
501
+ )
502
+
503
+
504
+ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
505
+ """Wrapper function for parallel processing."""
506
+ start_time = time.time()
507
+
508
+ try:
509
+ metrics = process_dropjump_video(
510
+ video_path=config.video_path,
511
+ quality=config.quality,
512
+ output_video=config.output_video,
513
+ json_output=config.json_output,
514
+ drop_start_frame=config.drop_start_frame,
515
+ smoothing_window=config.smoothing_window,
516
+ velocity_threshold=config.velocity_threshold,
517
+ min_contact_frames=config.min_contact_frames,
518
+ visibility_threshold=config.visibility_threshold,
519
+ detection_confidence=config.detection_confidence,
520
+ tracking_confidence=config.tracking_confidence,
521
+ verbose=False,
522
+ )
523
+
524
+ processing_time = time.time() - start_time
525
+
526
+ return DropJumpVideoResult(
527
+ video_path=config.video_path,
528
+ success=True,
529
+ metrics=metrics,
530
+ processing_time=processing_time,
531
+ )
532
+
533
+ except Exception as e:
534
+ processing_time = time.time() - start_time
535
+
536
+ return DropJumpVideoResult(
537
+ video_path=config.video_path,
538
+ success=False,
539
+ error=str(e),
540
+ processing_time=processing_time,
541
+ )
@@ -8,16 +8,16 @@ from pathlib import Path
8
8
 
9
9
  import click
10
10
 
11
- from ..api import (
11
+ from ..core.cli_utils import (
12
+ collect_video_files,
13
+ generate_batch_output_paths,
14
+ )
15
+ from .api import (
12
16
  DropJumpVideoConfig,
13
17
  DropJumpVideoResult,
14
18
  process_dropjump_video,
15
19
  process_dropjump_videos_bulk,
16
20
  )
17
- from ..core.cli_utils import (
18
- collect_video_files,
19
- generate_batch_output_paths,
20
- )
21
21
 
22
22
 
23
23
  @dataclass
@@ -70,6 +70,59 @@ class DropJumpBounds:
70
70
  )
71
71
 
72
72
 
73
+ def _score_jump_height(jump_height: float) -> float:
74
+ """Convert jump height to athlete profile score (0-4).
75
+
76
+ Args:
77
+ jump_height: Jump height in meters
78
+
79
+ Returns:
80
+ Score from 0 (elderly) to 4 (elite)
81
+ """
82
+ thresholds = [(0.25, 0), (0.35, 1), (0.50, 2), (0.70, 3)]
83
+ for threshold, score in thresholds:
84
+ if jump_height < threshold:
85
+ return float(score)
86
+ return 4.0 # Elite
87
+
88
+
89
+ def _score_contact_time(contact_time_s: float) -> float:
90
+ """Convert contact time to athlete profile score (0-4).
91
+
92
+ Args:
93
+ contact_time_s: Ground contact time in seconds
94
+
95
+ Returns:
96
+ Score from 0 (elderly) to 4 (elite)
97
+ """
98
+ thresholds = [(0.60, 0), (0.50, 1), (0.45, 2), (0.40, 3)]
99
+ for threshold, score in thresholds:
100
+ if contact_time_s > threshold:
101
+ return float(score)
102
+ return 4.0 # Elite
103
+
104
+
105
+ def _classify_combined_score(combined_score: float) -> AthleteProfile:
106
+ """Classify combined score into athlete profile.
107
+
108
+ Args:
109
+ combined_score: Weighted score from height and contact time
110
+
111
+ Returns:
112
+ Athlete profile classification
113
+ """
114
+ thresholds = [
115
+ (1.0, AthleteProfile.ELDERLY),
116
+ (1.7, AthleteProfile.UNTRAINED),
117
+ (2.7, AthleteProfile.RECREATIONAL),
118
+ (3.7, AthleteProfile.TRAINED),
119
+ ]
120
+ for threshold, profile in thresholds:
121
+ if combined_score < threshold:
122
+ return profile
123
+ return AthleteProfile.ELITE
124
+
125
+
73
126
  def estimate_athlete_profile(
74
127
  metrics: dict, gender: str | None = None
75
128
  ) -> AthleteProfile:
@@ -92,48 +145,14 @@ def estimate_athlete_profile(
92
145
  contact_time = metrics.get("data", {}).get("ground_contact_time_ms")
93
146
 
94
147
  if jump_height is None or contact_time is None:
95
- return AthleteProfile.RECREATIONAL # Default
148
+ return AthleteProfile.RECREATIONAL
96
149
 
97
- # Convert contact_time from ms to seconds
98
150
  contact_time_s = contact_time / 1000.0
99
151
 
100
- # Decision logic: Use weighted combination to avoid over-weighting single metrics
101
- # Calculate profile scores based on each metric
102
- height_score = 0.0
103
- if jump_height < 0.25:
104
- height_score = 0 # Elderly
105
- elif jump_height < 0.35:
106
- height_score = 1 # Untrained
107
- elif jump_height < 0.50:
108
- height_score = 2 # Recreational
109
- elif jump_height < 0.70:
110
- height_score = 3 # Trained
111
- else:
112
- height_score = 4 # Elite
113
-
114
- contact_score = 0.0
115
- if contact_time_s > 0.60:
116
- contact_score = 0 # Elderly
117
- elif contact_time_s > 0.50:
118
- contact_score = 1 # Untrained
119
- elif contact_time_s > 0.45:
120
- contact_score = 2 # Recreational
121
- elif contact_time_s > 0.40:
122
- contact_score = 3 # Trained
123
- else:
124
- contact_score = 4 # Elite
125
-
126
- # Weight height more heavily (70%) than contact time (30%)
152
+ # Calculate weighted combination: height (70%) + contact time (30%)
127
153
  # Height is more reliable indicator across populations
154
+ height_score = _score_jump_height(jump_height)
155
+ contact_score = _score_contact_time(contact_time_s)
128
156
  combined_score = (height_score * 0.70) + (contact_score * 0.30)
129
157
 
130
- if combined_score < 1.0:
131
- return AthleteProfile.ELDERLY
132
- elif combined_score < 1.7:
133
- return AthleteProfile.UNTRAINED
134
- elif combined_score < 2.7:
135
- return AthleteProfile.RECREATIONAL
136
- elif combined_score < 3.7:
137
- return AthleteProfile.TRAINED
138
- else:
139
- return AthleteProfile.ELITE
158
+ return _classify_combined_score(combined_score)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.47.1
3
+ Version: 0.47.3
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion