kinemotion 0.24.0__py3-none-any.whl → 0.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -18,13 +18,27 @@ from .core.auto_tuning import (
18
18
  analyze_video_sample,
19
19
  auto_tune_parameters,
20
20
  )
21
+ from .core.filtering import reject_outliers
22
+ from .core.metadata import (
23
+ AlgorithmConfig,
24
+ DetectionConfig,
25
+ DropDetectionConfig,
26
+ ProcessingInfo,
27
+ ResultMetadata,
28
+ SmoothingConfig,
29
+ VideoInfo,
30
+ create_timestamp,
31
+ get_kinemotion_version,
32
+ )
21
33
  from .core.pose import PoseTracker
34
+ from .core.quality import assess_jump_quality
22
35
  from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
23
36
  from .core.video_io import VideoProcessor
24
37
  from .dropjump.analysis import (
25
38
  ContactState,
26
39
  compute_average_foot_position,
27
40
  detect_ground_contact,
41
+ find_contact_phases,
28
42
  )
29
43
  from .dropjump.debug_overlay import DebugOverlayRenderer
30
44
  from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
@@ -392,6 +406,9 @@ def process_dropjump_video(
392
406
  if not Path(video_path).exists():
393
407
  raise FileNotFoundError(f"Video file not found: {video_path}")
394
408
 
409
+ # Start timing
410
+ start_time = time.time()
411
+
395
412
  # Convert quality string to enum
396
413
  quality_preset = _parse_quality_preset(quality)
397
414
 
@@ -470,6 +487,98 @@ def process_dropjump_video(
470
487
  use_curvature=params.use_curvature,
471
488
  )
472
489
 
490
+ # Assess quality and add confidence scores
491
+ if verbose:
492
+ print("Assessing tracking quality...")
493
+
494
+ # Detect outliers for quality scoring (doesn't affect results, just for assessment)
495
+ _, outlier_mask = reject_outliers(
496
+ vertical_positions,
497
+ use_ransac=True,
498
+ use_median=True,
499
+ interpolate=False, # Don't modify, just detect
500
+ )
501
+
502
+ # Count phases for quality assessment
503
+ phases = find_contact_phases(contact_states)
504
+ phases_detected = len(phases) > 0
505
+ phase_count = len(phases)
506
+
507
+ # Perform quality assessment
508
+ quality_result = assess_jump_quality(
509
+ visibilities=visibilities,
510
+ positions=vertical_positions,
511
+ outlier_mask=outlier_mask,
512
+ fps=video.fps,
513
+ phases_detected=phases_detected,
514
+ phase_count=phase_count,
515
+ )
516
+
517
+ # Build complete metadata
518
+ processing_time = time.time() - start_time
519
+
520
+ video_info = VideoInfo(
521
+ source_path=video_path,
522
+ fps=video.fps,
523
+ width=video.width,
524
+ height=video.height,
525
+ duration_s=video.frame_count / video.fps,
526
+ frame_count=video.frame_count,
527
+ codec=None,
528
+ )
529
+
530
+ processing_info = ProcessingInfo(
531
+ version=get_kinemotion_version(),
532
+ timestamp=create_timestamp(),
533
+ quality_preset=quality_preset.value,
534
+ processing_time_s=processing_time,
535
+ )
536
+
537
+ # Check if drop start was auto-detected
538
+ drop_frame = None
539
+ if drop_start_frame is None and metrics.contact_start_frame is not None:
540
+ # Auto-detected
541
+ drop_frame = metrics.contact_start_frame
542
+
543
+ algorithm_config = AlgorithmConfig(
544
+ detection_method="forward_search",
545
+ tracking_method="mediapipe_pose",
546
+ model_complexity=1,
547
+ smoothing=SmoothingConfig(
548
+ window_size=params.smoothing_window,
549
+ polynomial_order=params.polyorder,
550
+ use_bilateral_filter=params.bilateral_filter,
551
+ use_outlier_rejection=params.outlier_rejection,
552
+ ),
553
+ detection=DetectionConfig(
554
+ velocity_threshold=params.velocity_threshold,
555
+ min_contact_frames=params.min_contact_frames,
556
+ visibility_threshold=params.visibility_threshold,
557
+ use_curvature_refinement=params.use_curvature,
558
+ ),
559
+ drop_detection=DropDetectionConfig(
560
+ auto_detect_drop_start=(drop_start_frame is None),
561
+ detected_drop_frame=drop_frame,
562
+ min_stationary_duration_s=0.5,
563
+ ),
564
+ )
565
+
566
+ result_metadata = ResultMetadata(
567
+ quality=quality_result,
568
+ video=video_info,
569
+ processing=processing_info,
570
+ algorithm=algorithm_config,
571
+ )
572
+
573
+ # Attach complete metadata to metrics
574
+ metrics.result_metadata = result_metadata
575
+
576
+ if verbose and quality_result.warnings:
577
+ print("\n⚠️ Quality Warnings:")
578
+ for warning in quality_result.warnings:
579
+ print(f" - {warning}")
580
+ print()
581
+
473
582
  # Generate outputs (JSON and debug video)
474
583
  _generate_outputs(
475
584
  metrics,
@@ -726,6 +835,9 @@ def process_cmj_video(
726
835
  if not Path(video_path).exists():
727
836
  raise FileNotFoundError(f"Video file not found: {video_path}")
728
837
 
838
+ # Start timing
839
+ start_time = time.time()
840
+
729
841
  # Convert quality string to enum
730
842
  quality_preset = _parse_quality_preset(quality)
731
843
 
@@ -772,7 +884,9 @@ def process_cmj_video(
772
884
  # Extract foot positions
773
885
  if verbose:
774
886
  print("Extracting foot positions...")
775
- vertical_positions, _ = _extract_vertical_positions(smoothed_landmarks)
887
+ vertical_positions, visibilities = _extract_vertical_positions(
888
+ smoothed_landmarks
889
+ )
776
890
  tracking_method = "foot"
777
891
 
778
892
  # Detect CMJ phases
@@ -815,6 +929,87 @@ def process_cmj_video(
815
929
  tracking_method=tracking_method,
816
930
  )
817
931
 
932
+ # Assess quality and add confidence scores
933
+ if verbose:
934
+ print("Assessing tracking quality...")
935
+
936
+ # Detect outliers for quality scoring (doesn't affect results, just for assessment)
937
+ _, outlier_mask = reject_outliers(
938
+ vertical_positions,
939
+ use_ransac=True,
940
+ use_median=True,
941
+ interpolate=False, # Don't modify, just detect
942
+ )
943
+
944
+ # Phases detected successfully if we got here
945
+ phases_detected = True
946
+ phase_count = 4 # standing, eccentric, concentric, flight
947
+
948
+ # Perform quality assessment
949
+ quality_result = assess_jump_quality(
950
+ visibilities=visibilities,
951
+ positions=vertical_positions,
952
+ outlier_mask=outlier_mask,
953
+ fps=video.fps,
954
+ phases_detected=phases_detected,
955
+ phase_count=phase_count,
956
+ )
957
+
958
+ # Build complete metadata
959
+ processing_time = time.time() - start_time
960
+
961
+ video_info = VideoInfo(
962
+ source_path=video_path,
963
+ fps=video.fps,
964
+ width=video.width,
965
+ height=video.height,
966
+ duration_s=video.frame_count / video.fps,
967
+ frame_count=video.frame_count,
968
+ codec=None, # TODO: Extract from video metadata if available
969
+ )
970
+
971
+ processing_info = ProcessingInfo(
972
+ version=get_kinemotion_version(),
973
+ timestamp=create_timestamp(),
974
+ quality_preset=quality_preset.value,
975
+ processing_time_s=processing_time,
976
+ )
977
+
978
+ algorithm_config = AlgorithmConfig(
979
+ detection_method="backward_search",
980
+ tracking_method="mediapipe_pose",
981
+ model_complexity=1,
982
+ smoothing=SmoothingConfig(
983
+ window_size=params.smoothing_window,
984
+ polynomial_order=params.polyorder,
985
+ use_bilateral_filter=params.bilateral_filter,
986
+ use_outlier_rejection=params.outlier_rejection,
987
+ ),
988
+ detection=DetectionConfig(
989
+ velocity_threshold=params.velocity_threshold,
990
+ min_contact_frames=params.min_contact_frames,
991
+ visibility_threshold=params.visibility_threshold,
992
+ use_curvature_refinement=params.use_curvature,
993
+ ),
994
+ drop_detection=None, # CMJ doesn't have drop detection
995
+ )
996
+
997
+ result_metadata = ResultMetadata(
998
+ quality=quality_result,
999
+ video=video_info,
1000
+ processing=processing_info,
1001
+ algorithm=algorithm_config,
1002
+ )
1003
+
1004
+ # Attach complete metadata to metrics
1005
+ metrics.result_metadata = result_metadata
1006
+
1007
+ if verbose and quality_result.warnings:
1008
+ print("\n⚠️ Quality Warnings:")
1009
+ for warning in quality_result.warnings:
1010
+ print(f" - {warning}")
1011
+ print()
1012
+
818
1013
  # Generate outputs if requested
819
1014
  _generate_cmj_outputs(
820
1015
  output_video,
kinemotion/cmj/cli.py CHANGED
@@ -8,26 +8,10 @@ from pathlib import Path
8
8
  from typing import Any
9
9
 
10
10
  import click
11
- import numpy as np
12
11
 
13
- from ..core.auto_tuning import (
14
- QualityPreset,
15
- analyze_video_sample,
16
- auto_tune_parameters,
17
- )
18
- from ..core.cli_utils import (
19
- apply_expert_param_overrides,
20
- common_output_options,
21
- determine_initial_confidence,
22
- print_auto_tuned_params,
23
- smooth_landmark_sequence,
24
- track_all_frames,
25
- )
26
- from ..core.pose import PoseTracker
27
- from ..core.video_io import VideoProcessor
28
- from .analysis import detect_cmj_phases
29
- from .debug_overlay import CMJDebugOverlayRenderer
30
- from .kinematics import CMJMetrics, calculate_cmj_metrics
12
+ from ..api import process_cmj_video
13
+ from ..core.auto_tuning import QualityPreset
14
+ from ..core.cli_utils import common_output_options
31
15
 
32
16
 
33
17
  @dataclass
@@ -288,44 +272,6 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
288
272
  sys.exit(1)
289
273
 
290
274
 
291
- def _get_foot_position(frame_landmarks: dict | None, last_position: float) -> float:
292
- """Extract average foot position from frame landmarks."""
293
- if not frame_landmarks:
294
- return last_position
295
-
296
- # Average foot position (ankles and heels)
297
- foot_y_values = []
298
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
299
- if key in frame_landmarks:
300
- foot_y_values.append(frame_landmarks[key][1])
301
-
302
- if foot_y_values:
303
- return float(np.mean(foot_y_values))
304
- return last_position
305
-
306
-
307
- def _extract_positions_from_landmarks(
308
- smoothed_landmarks: list,
309
- ) -> tuple[np.ndarray, str]:
310
- """Extract vertical foot positions from landmarks.
311
-
312
- Args:
313
- smoothed_landmarks: Smoothed landmark sequence
314
-
315
- Returns:
316
- Tuple of (positions array, tracking method name)
317
- """
318
- click.echo("Extracting foot positions...", err=True)
319
- position_list: list[float] = []
320
-
321
- for frame_landmarks in smoothed_landmarks:
322
- last_pos = position_list[-1] if position_list else 0.5
323
- position = _get_foot_position(frame_landmarks, last_pos)
324
- position_list.append(position)
325
-
326
- return np.array(position_list), "foot"
327
-
328
-
329
275
  def _process_single(
330
276
  video_path: str,
331
277
  output: str | None,
@@ -334,169 +280,35 @@ def _process_single(
334
280
  verbose: bool,
335
281
  expert_params: AnalysisParameters,
336
282
  ) -> None:
337
- """Process a single CMJ video."""
283
+ """Process a single CMJ video by calling the API."""
338
284
  try:
339
- with VideoProcessor(video_path) as video:
340
- click.echo(
341
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
342
- f"{video.frame_count} frames",
343
- err=True,
344
- )
345
-
346
- # Determine confidence levels
347
- detection_conf, tracking_conf = determine_initial_confidence(
348
- quality_preset, expert_params
349
- )
350
-
351
- # Track all frames
352
- tracker = PoseTracker(
353
- min_detection_confidence=detection_conf,
354
- min_tracking_confidence=tracking_conf,
355
- )
356
- frames, landmarks_sequence = track_all_frames(video, tracker)
357
-
358
- if not landmarks_sequence:
359
- click.echo("Error: No frames processed", err=True)
360
- sys.exit(1)
361
-
362
- # Auto-tune parameters
363
- characteristics = analyze_video_sample(
364
- landmarks_sequence, video.fps, video.frame_count
365
- )
366
- params = auto_tune_parameters(characteristics, quality_preset)
367
- params = apply_expert_param_overrides(params, expert_params)
368
-
369
- # Calculate countermovement threshold (FPS-adjusted)
370
- # Base: +0.015 at 30fps (POSITIVE for downward motion in normalized coords)
371
- countermovement_threshold = 0.015 * (30.0 / video.fps)
372
- if expert_params.countermovement_threshold is not None:
373
- countermovement_threshold = expert_params.countermovement_threshold
374
-
375
- # Show parameters if verbose
376
- if verbose:
377
- print_auto_tuned_params(
378
- video,
379
- quality_preset,
380
- params,
381
- extra_params={
382
- "countermovement_threshold": countermovement_threshold
383
- },
384
- )
385
-
386
- # Apply smoothing
387
- smoothed_landmarks = smooth_landmark_sequence(landmarks_sequence, params)
388
-
389
- # Extract foot positions
390
- vertical_positions, tracking_method = _extract_positions_from_landmarks(
391
- smoothed_landmarks
392
- )
393
-
394
- # Detect CMJ phases
395
- click.echo("Detecting CMJ phases...", err=True)
396
- phases = detect_cmj_phases(
397
- vertical_positions,
398
- video.fps,
399
- window_length=params.smoothing_window,
400
- polyorder=params.polyorder,
401
- )
402
-
403
- if phases is None:
404
- click.echo("Error: Could not detect CMJ phases", err=True)
405
- sys.exit(1)
406
-
407
- standing_end, lowest_point, takeoff_frame, landing_frame = phases
408
-
409
- # Calculate metrics
410
- click.echo("Calculating metrics...", err=True)
411
-
412
- # Compute SIGNED velocities for CMJ metrics (need direction info)
413
- from .analysis import compute_signed_velocity
414
-
415
- velocities = compute_signed_velocity(
416
- vertical_positions,
417
- window_length=params.smoothing_window,
418
- polyorder=params.polyorder,
419
- )
420
-
421
- metrics = calculate_cmj_metrics(
422
- vertical_positions,
423
- velocities,
424
- standing_end,
425
- lowest_point,
426
- takeoff_frame,
427
- landing_frame,
428
- video.fps,
429
- tracking_method=tracking_method,
430
- )
431
-
432
- # Output results
433
- _output_results(metrics, json_output)
285
+ # Call the API function (handles all processing logic)
286
+ metrics = process_cmj_video(
287
+ video_path=video_path,
288
+ quality=quality_preset.value,
289
+ output_video=output,
290
+ json_output=json_output,
291
+ smoothing_window=expert_params.smoothing_window,
292
+ velocity_threshold=expert_params.velocity_threshold,
293
+ min_contact_frames=expert_params.min_contact_frames,
294
+ visibility_threshold=expert_params.visibility_threshold,
295
+ detection_confidence=expert_params.detection_confidence,
296
+ tracking_confidence=expert_params.tracking_confidence,
297
+ verbose=verbose,
298
+ )
434
299
 
435
- # Generate debug video if requested
436
- if output:
437
- _create_debug_video(output, video, frames, smoothed_landmarks, metrics)
300
+ # Print formatted summary to stdout
301
+ _output_results(metrics, json_output=None) # Don't write JSON (API already did)
438
302
 
439
303
  except Exception as e:
440
304
  click.echo(f"Error processing video: {e}", err=True)
441
- import traceback
305
+ if verbose:
306
+ import traceback
442
307
 
443
- traceback.print_exc()
308
+ traceback.print_exc()
444
309
  sys.exit(1)
445
310
 
446
311
 
447
- def _create_debug_video(
448
- output: str,
449
- video: VideoProcessor,
450
- frames: list,
451
- smoothed_landmarks: list,
452
- metrics: CMJMetrics,
453
- ) -> None:
454
- """Generate debug video with overlays.
455
-
456
- Args:
457
- output: Output video path
458
- video: Video processor
459
- frames: Video frames
460
- smoothed_landmarks: Smoothed landmarks
461
- metrics: Calculated metrics
462
- """
463
- click.echo(f"Generating debug video: {output}", err=True)
464
- if video.display_width != video.width or video.display_height != video.height:
465
- click.echo(f"Source video encoded: {video.width}x{video.height}", err=True)
466
- click.echo(
467
- f"Output dimensions: {video.display_width}x{video.display_height} "
468
- f"(respecting display aspect ratio)",
469
- err=True,
470
- )
471
- else:
472
- click.echo(
473
- f"Output dimensions: {video.width}x{video.height} "
474
- f"(matching source video aspect ratio)",
475
- err=True,
476
- )
477
-
478
- with CMJDebugOverlayRenderer(
479
- output,
480
- video.width,
481
- video.height,
482
- video.display_width,
483
- video.display_height,
484
- video.fps,
485
- ) as renderer:
486
- render_bar: Any
487
- with click.progressbar(
488
- length=len(frames), label="Rendering frames"
489
- ) as render_bar:
490
- for i, frame in enumerate(frames):
491
- annotated = renderer.render_frame(
492
- frame, smoothed_landmarks[i], i, metrics
493
- )
494
- renderer.write_frame(annotated)
495
- render_bar.update(1)
496
-
497
- click.echo(f"Debug video saved: {output}", err=True)
498
-
499
-
500
312
  def _output_results(metrics: Any, json_output: str | None) -> None:
501
313
  """Output analysis results."""
502
314
  results = metrics.to_dict()
@@ -1,14 +1,18 @@
1
1
  """Counter Movement Jump (CMJ) metrics calculation."""
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import TypedDict
4
+ from typing import TYPE_CHECKING, TypedDict
5
5
 
6
6
  import numpy as np
7
7
  from numpy.typing import NDArray
8
8
 
9
+ if TYPE_CHECKING:
10
+ from ..core.metadata import ResultMetadata
11
+ from ..core.quality import QualityAssessment
9
12
 
10
- class CMJMetricsDict(TypedDict, total=False):
11
- """Type-safe dictionary for CMJ metrics JSON output."""
13
+
14
+ class CMJDataDict(TypedDict, total=False):
15
+ """Type-safe dictionary for CMJ measurement data."""
12
16
 
13
17
  jump_height_m: float
14
18
  flight_time_s: float
@@ -23,10 +27,16 @@ class CMJMetricsDict(TypedDict, total=False):
23
27
  lowest_point_frame: float
24
28
  takeoff_frame: float
25
29
  landing_frame: float
26
- video_fps: float
27
30
  tracking_method: str
28
31
 
29
32
 
33
+ class CMJResultDict(TypedDict):
34
+ """Type-safe dictionary for complete CMJ result with data and metadata."""
35
+
36
+ data: CMJDataDict
37
+ metadata: dict # ResultMetadata.to_dict()
38
+
39
+
30
40
  @dataclass
31
41
  class CMJMetrics:
32
42
  """Metrics for a counter movement jump analysis.
@@ -47,6 +57,7 @@ class CMJMetrics:
47
57
  landing_frame: Frame where athlete lands
48
58
  video_fps: Frames per second of the analyzed video
49
59
  tracking_method: Method used for tracking ("foot" or "com")
60
+ quality_assessment: Optional quality assessment with confidence and warnings
50
61
  """
51
62
 
52
63
  jump_height: float
@@ -64,14 +75,16 @@ class CMJMetrics:
64
75
  landing_frame: float
65
76
  video_fps: float
66
77
  tracking_method: str
78
+ quality_assessment: "QualityAssessment | None" = None
79
+ result_metadata: "ResultMetadata | None" = None
67
80
 
68
- def to_dict(self) -> CMJMetricsDict:
69
- """Convert metrics to JSON-serializable dictionary.
81
+ def to_dict(self) -> CMJResultDict:
82
+ """Convert metrics to JSON-serializable dictionary with data/metadata structure.
70
83
 
71
84
  Returns:
72
- Dictionary with all metrics, converting NumPy types to Python types.
85
+ Dictionary with nested data and metadata structure.
73
86
  """
74
- return {
87
+ data: CMJDataDict = {
75
88
  "jump_height_m": float(self.jump_height),
76
89
  "flight_time_s": float(self.flight_time),
77
90
  "countermovement_depth_m": float(self.countermovement_depth),
@@ -93,10 +106,21 @@ class CMJMetrics:
93
106
  "lowest_point_frame": float(self.lowest_point_frame),
94
107
  "takeoff_frame": float(self.takeoff_frame),
95
108
  "landing_frame": float(self.landing_frame),
96
- "video_fps": float(self.video_fps),
97
109
  "tracking_method": self.tracking_method,
98
110
  }
99
111
 
112
+ # Build metadata from ResultMetadata if available, otherwise use legacy quality
113
+ if self.result_metadata is not None:
114
+ metadata = self.result_metadata.to_dict()
115
+ elif self.quality_assessment is not None:
116
+ # Fallback for backwards compatibility during transition
117
+ metadata = {"quality": self.quality_assessment.to_dict()}
118
+ else:
119
+ # No metadata available
120
+ metadata = {}
121
+
122
+ return {"data": data, "metadata": metadata}
123
+
100
124
 
101
125
  def calculate_cmj_metrics(
102
126
  positions: NDArray[np.float64],
@@ -9,6 +9,12 @@ from .filtering import (
9
9
  remove_outliers,
10
10
  )
11
11
  from .pose import PoseTracker, compute_center_of_mass
12
+ from .quality import (
13
+ QualityAssessment,
14
+ QualityIndicators,
15
+ assess_jump_quality,
16
+ calculate_position_stability,
17
+ )
12
18
  from .smoothing import (
13
19
  compute_acceleration_from_derivative,
14
20
  compute_velocity,
@@ -35,6 +41,11 @@ __all__ = [
35
41
  "reject_outliers",
36
42
  "adaptive_smooth_window",
37
43
  "bilateral_temporal_filter",
44
+ # Quality Assessment
45
+ "QualityAssessment",
46
+ "QualityIndicators",
47
+ "assess_jump_quality",
48
+ "calculate_position_stability",
38
49
  # Video I/O
39
50
  "VideoProcessor",
40
51
  ]