kinemotion 0.11.0__tar.gz → 0.11.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {kinemotion-0.11.0 → kinemotion-0.11.2}/CHANGELOG.md +25 -0
  2. {kinemotion-0.11.0 → kinemotion-0.11.2}/PKG-INFO +1 -1
  3. {kinemotion-0.11.0 → kinemotion-0.11.2}/pyproject.toml +1 -1
  4. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/api.py +57 -39
  5. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cmj/analysis.py +73 -93
  6. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cmj/cli.py +86 -48
  7. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cmj/debug_overlay.py +200 -165
  8. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_cmj_analysis.py +4 -16
  9. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_cmj_kinematics.py +8 -7
  10. {kinemotion-0.11.0 → kinemotion-0.11.2}/uv.lock +1 -1
  11. {kinemotion-0.11.0 → kinemotion-0.11.2}/.dockerignore +0 -0
  12. {kinemotion-0.11.0 → kinemotion-0.11.2}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  13. {kinemotion-0.11.0 → kinemotion-0.11.2}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  14. {kinemotion-0.11.0 → kinemotion-0.11.2}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  15. {kinemotion-0.11.0 → kinemotion-0.11.2}/.github/pull_request_template.md +0 -0
  16. {kinemotion-0.11.0 → kinemotion-0.11.2}/.github/workflows/release.yml +0 -0
  17. {kinemotion-0.11.0 → kinemotion-0.11.2}/.gitignore +0 -0
  18. {kinemotion-0.11.0 → kinemotion-0.11.2}/.pre-commit-config.yaml +0 -0
  19. {kinemotion-0.11.0 → kinemotion-0.11.2}/.tool-versions +0 -0
  20. {kinemotion-0.11.0 → kinemotion-0.11.2}/CLAUDE.md +0 -0
  21. {kinemotion-0.11.0 → kinemotion-0.11.2}/CODE_OF_CONDUCT.md +0 -0
  22. {kinemotion-0.11.0 → kinemotion-0.11.2}/CONTRIBUTING.md +0 -0
  23. {kinemotion-0.11.0 → kinemotion-0.11.2}/Dockerfile +0 -0
  24. {kinemotion-0.11.0 → kinemotion-0.11.2}/GEMINI.md +0 -0
  25. {kinemotion-0.11.0 → kinemotion-0.11.2}/LICENSE +0 -0
  26. {kinemotion-0.11.0 → kinemotion-0.11.2}/README.md +0 -0
  27. {kinemotion-0.11.0 → kinemotion-0.11.2}/SECURITY.md +0 -0
  28. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/BULK_PROCESSING.md +0 -0
  29. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/CAMERA_SETUP.md +0 -0
  30. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/CAMERA_SETUP_ES.md +0 -0
  31. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/CMJ_GUIDE.md +0 -0
  32. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/ERRORS_FINDINGS.md +0 -0
  33. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/FRAMERATE.md +0 -0
  34. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/IMU_METADATA_PRESERVATION.md +0 -0
  35. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/PARAMETERS.md +0 -0
  36. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/REAL_TIME_ANALYSIS.md +0 -0
  37. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/TRIPLE_EXTENSION.md +0 -0
  38. {kinemotion-0.11.0 → kinemotion-0.11.2}/docs/VALIDATION_PLAN.md +0 -0
  39. {kinemotion-0.11.0 → kinemotion-0.11.2}/examples/bulk/README.md +0 -0
  40. {kinemotion-0.11.0 → kinemotion-0.11.2}/examples/bulk/bulk_processing.py +0 -0
  41. {kinemotion-0.11.0 → kinemotion-0.11.2}/examples/bulk/simple_example.py +0 -0
  42. {kinemotion-0.11.0 → kinemotion-0.11.2}/examples/programmatic_usage.py +0 -0
  43. {kinemotion-0.11.0 → kinemotion-0.11.2}/samples/cmjs/README.md +0 -0
  44. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/__init__.py +0 -0
  45. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cli.py +0 -0
  46. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cmj/__init__.py +0 -0
  47. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cmj/joint_angles.py +0 -0
  48. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/cmj/kinematics.py +0 -0
  49. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/core/__init__.py +0 -0
  50. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/core/auto_tuning.py +0 -0
  51. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/core/filtering.py +0 -0
  52. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/core/pose.py +0 -0
  53. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/core/smoothing.py +0 -0
  54. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/core/video_io.py +0 -0
  55. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/dropjump/__init__.py +0 -0
  56. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/dropjump/analysis.py +0 -0
  57. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/dropjump/cli.py +0 -0
  58. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/dropjump/debug_overlay.py +0 -0
  59. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/dropjump/kinematics.py +0 -0
  60. {kinemotion-0.11.0 → kinemotion-0.11.2}/src/kinemotion/py.typed +0 -0
  61. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/__init__.py +0 -0
  62. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_adaptive_threshold.py +0 -0
  63. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_api.py +0 -0
  64. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_aspect_ratio.py +0 -0
  65. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_com_estimation.py +0 -0
  66. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_contact_detection.py +0 -0
  67. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_filtering.py +0 -0
  68. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_kinematics.py +0 -0
  69. {kinemotion-0.11.0 → kinemotion-0.11.2}/tests/test_polyorder.py +0 -0
@@ -7,6 +7,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  <!-- version list -->
9
9
 
10
+ ## v0.11.2 (2025-11-06)
11
+
12
+ ### Bug Fixes
13
+
14
+ - **cmj**: Reduce cognitive complexity in _extract_positions_from_landmarks
15
+ ([`9772df6`](https://github.com/feniix/kinemotion/commit/9772df69ca8fb2a46726614dd0adda3795cf0ad1))
16
+
17
+ - **cmj**: Reduce cognitive complexity in cmj_analyze CLI function
18
+ ([`e9c7200`](https://github.com/feniix/kinemotion/commit/e9c720081df171d2b18150a5b370c4471fdf9b19))
19
+
20
+ - **cmj**: Reduce cognitive complexity in debug overlay rendering
21
+ ([`11f35c4`](https://github.com/feniix/kinemotion/commit/11f35c4cf675301bccfef376e12c0ed06470e259))
22
+
23
+ - **cmj**: Remove unused variable and parameters in api and analysis
24
+ ([`e8ef607`](https://github.com/feniix/kinemotion/commit/e8ef60735711f4c715d53049477362284efca433))
25
+
26
+
27
+ ## v0.11.1 (2025-11-06)
28
+
29
+ ### Bug Fixes
30
+
31
+ - **cmj**: Remove unused parameters and fix code quality issues
32
+ ([`72a1e43`](https://github.com/feniix/kinemotion/commit/72a1e43ec107e5b1c132efb10a08a09ea2864ae4))
33
+
34
+
10
35
  ## v0.11.0 (2025-11-06)
11
36
 
12
37
  ### Documentation
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.11.0
3
+ Version: 0.11.2
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "kinemotion"
3
- version = "0.11.0"
3
+ version = "0.11.2"
4
4
  description = "Video-based kinematic analysis for athletic performance"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10,<3.13"
@@ -638,6 +638,50 @@ class CMJVideoResult:
638
638
  processing_time: float = 0.0
639
639
 
640
640
 
641
+ def _generate_cmj_outputs(
642
+ output_video: str | None,
643
+ json_output: str | None,
644
+ metrics: CMJMetrics,
645
+ frames: list,
646
+ smoothed_landmarks: list,
647
+ video_width: int,
648
+ video_height: int,
649
+ video_display_width: int,
650
+ video_display_height: int,
651
+ video_fps: float,
652
+ verbose: bool,
653
+ ) -> None:
654
+ """Generate JSON and debug video outputs for CMJ analysis."""
655
+ if json_output:
656
+ import json
657
+
658
+ output_path = Path(json_output)
659
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
660
+ if verbose:
661
+ print(f"Metrics written to: {json_output}")
662
+
663
+ if output_video:
664
+ if verbose:
665
+ print(f"Generating debug video: {output_video}")
666
+
667
+ with CMJDebugOverlayRenderer(
668
+ output_video,
669
+ video_width,
670
+ video_height,
671
+ video_display_width,
672
+ video_display_height,
673
+ video_fps,
674
+ ) as renderer:
675
+ for i, frame in enumerate(frames):
676
+ annotated = renderer.render_frame(
677
+ frame, smoothed_landmarks[i], i, metrics
678
+ )
679
+ renderer.write_frame(annotated)
680
+
681
+ if verbose:
682
+ print(f"Debug video saved: {output_video}")
683
+
684
+
641
685
  def process_cmj_video(
642
686
  video_path: str,
643
687
  quality: str = "balanced",
@@ -741,12 +785,6 @@ def process_cmj_video(
741
785
  vertical_positions, _ = _extract_vertical_positions(smoothed_landmarks)
742
786
  tracking_method = "foot"
743
787
 
744
- # Calculate countermovement threshold (FPS-adjusted)
745
- # POSITIVE threshold for downward motion (squatting) in normalized coordinates
746
- cm_threshold = countermovement_threshold
747
- if cm_threshold is None:
748
- cm_threshold = 0.015 * (30.0 / video.fps)
749
-
750
788
  # Detect CMJ phases
751
789
  if verbose:
752
790
  print("Detecting CMJ phases...")
@@ -754,11 +792,6 @@ def process_cmj_video(
754
792
  phases = detect_cmj_phases(
755
793
  vertical_positions,
756
794
  video.fps,
757
- velocity_threshold=params.velocity_threshold,
758
- countermovement_threshold=cm_threshold,
759
- min_contact_frames=params.min_contact_frames,
760
- min_eccentric_frames=params.min_contact_frames,
761
- use_curvature=params.use_curvature,
762
795
  window_length=params.smoothing_window,
763
796
  polyorder=params.polyorder,
764
797
  )
@@ -793,34 +826,19 @@ def process_cmj_video(
793
826
  )
794
827
 
795
828
  # Generate outputs if requested
796
- if json_output:
797
- import json
798
-
799
- output_path = Path(json_output)
800
- output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
801
- if verbose:
802
- print(f"Metrics written to: {json_output}")
803
-
804
- if output_video:
805
- if verbose:
806
- print(f"Generating debug video: {output_video}")
807
-
808
- with CMJDebugOverlayRenderer(
809
- output_video,
810
- video.width,
811
- video.height,
812
- video.display_width,
813
- video.display_height,
814
- video.fps,
815
- ) as renderer:
816
- for i, frame in enumerate(frames):
817
- annotated = renderer.render_frame(
818
- frame, smoothed_landmarks[i], i, metrics
819
- )
820
- renderer.write_frame(annotated)
821
-
822
- if verbose:
823
- print(f"Debug video saved: {output_video}")
829
+ _generate_cmj_outputs(
830
+ output_video,
831
+ json_output,
832
+ metrics,
833
+ frames,
834
+ smoothed_landmarks,
835
+ video.width,
836
+ video.height,
837
+ video.display_width,
838
+ video.display_height,
839
+ video.fps,
840
+ verbose,
841
+ )
824
842
 
825
843
  if verbose:
826
844
  print(f"\nJump height: {metrics.jump_height:.3f}m")
@@ -102,7 +102,6 @@ def find_standing_phase(
102
102
 
103
103
  def find_countermovement_start(
104
104
  velocities: np.ndarray,
105
- fps: float,
106
105
  countermovement_threshold: float = 0.015,
107
106
  min_eccentric_frames: int = 3,
108
107
  standing_start: int | None = None,
@@ -114,7 +113,6 @@ def find_countermovement_start(
114
113
 
115
114
  Args:
116
115
  velocities: Array of SIGNED vertical velocities
117
- fps: Video frame rate
118
116
  countermovement_threshold: Velocity threshold for detecting downward motion (POSITIVE)
119
117
  min_eccentric_frames: Minimum consecutive frames of downward motion
120
118
  standing_start: Optional frame where standing phase ended
@@ -143,7 +141,6 @@ def find_countermovement_start(
143
141
  def find_lowest_point(
144
142
  positions: np.ndarray,
145
143
  velocities: np.ndarray,
146
- eccentric_start: int | None = None,
147
144
  min_search_frame: int = 80,
148
145
  ) -> int:
149
146
  """
@@ -155,7 +152,6 @@ def find_lowest_point(
155
152
  Args:
156
153
  positions: Array of vertical positions (higher value = lower in video)
157
154
  velocities: Array of SIGNED vertical velocities (positive=down, negative=up)
158
- eccentric_start: Optional frame where eccentric phase started
159
155
  min_search_frame: Minimum frame to start searching (default: frame 80)
160
156
 
161
157
  Returns:
@@ -283,8 +279,6 @@ def find_cmj_takeoff_from_velocity_peak(
283
279
  velocities: np.ndarray,
284
280
  lowest_point_frame: int,
285
281
  fps: float,
286
- window_length: int = 5,
287
- polyorder: int = 2,
288
282
  ) -> float:
289
283
  """
290
284
  Find CMJ takeoff frame as peak upward velocity during concentric phase.
@@ -297,8 +291,6 @@ def find_cmj_takeoff_from_velocity_peak(
297
291
  velocities: Array of SIGNED vertical velocities (negative = upward)
298
292
  lowest_point_frame: Frame at lowest point
299
293
  fps: Video frame rate
300
- window_length: Window size for derivative calculations
301
- polyorder: Polynomial order for Savitzky-Golay filter
302
294
 
303
295
  Returns:
304
296
  Takeoff frame with fractional precision.
@@ -381,9 +373,6 @@ def find_interpolated_takeoff_landing(
381
373
  positions: np.ndarray,
382
374
  velocities: np.ndarray,
383
375
  lowest_point_frame: int,
384
- velocity_threshold: float = 0.02,
385
- min_flight_frames: int = 3,
386
- use_curvature: bool = True,
387
376
  window_length: int = 5,
388
377
  polyorder: int = 2,
389
378
  ) -> tuple[float, float] | None:
@@ -397,9 +386,6 @@ def find_interpolated_takeoff_landing(
397
386
  positions: Array of vertical positions
398
387
  velocities: Array of vertical velocities
399
388
  lowest_point_frame: Frame at lowest point
400
- velocity_threshold: Velocity threshold (unused for CMJ, kept for API compatibility)
401
- min_flight_frames: Minimum consecutive frames for valid flight phase
402
- use_curvature: Whether to use trajectory curvature refinement
403
389
  window_length: Window size for derivative calculations
404
390
  polyorder: Polynomial order for Savitzky-Golay filter
405
391
 
@@ -417,7 +403,7 @@ def find_interpolated_takeoff_landing(
417
403
 
418
404
  # Find takeoff using peak velocity method (CMJ-specific)
419
405
  takeoff_frame = find_cmj_takeoff_from_velocity_peak(
420
- positions, velocities, lowest_point_frame, fps, window_length, polyorder
406
+ positions, velocities, lowest_point_frame, fps
421
407
  )
422
408
 
423
409
  # Find landing using position peak and impact detection
@@ -428,14 +414,76 @@ def find_interpolated_takeoff_landing(
428
414
  return (takeoff_frame, landing_frame)
429
415
 
430
416
 
417
+ def _find_takeoff_frame(
418
+ velocities: np.ndarray, peak_height_frame: int, fps: float
419
+ ) -> float:
420
+ """Find takeoff frame as peak upward velocity before peak height."""
421
+ takeoff_search_start = max(0, peak_height_frame - int(fps * 0.35))
422
+ takeoff_search_end = peak_height_frame - 2
423
+
424
+ takeoff_velocities = velocities[takeoff_search_start:takeoff_search_end]
425
+
426
+ if len(takeoff_velocities) > 0:
427
+ peak_vel_idx = int(np.argmin(takeoff_velocities))
428
+ return float(takeoff_search_start + peak_vel_idx)
429
+ else:
430
+ return float(peak_height_frame - int(fps * 0.3))
431
+
432
+
433
+ def _find_lowest_frame(
434
+ velocities: np.ndarray, positions: np.ndarray, takeoff_frame: float, fps: float
435
+ ) -> float:
436
+ """Find lowest point frame before takeoff."""
437
+ lowest_search_start = max(0, int(takeoff_frame) - int(fps * 0.4))
438
+ lowest_search_end = int(takeoff_frame)
439
+
440
+ # Find where velocity crosses from positive to negative
441
+ for i in range(lowest_search_end - 1, lowest_search_start, -1):
442
+ if i > 0 and velocities[i] < 0 and velocities[i - 1] >= 0:
443
+ return float(i)
444
+
445
+ # Fallback: use maximum position
446
+ lowest_positions = positions[lowest_search_start:lowest_search_end]
447
+ if len(lowest_positions) > 0:
448
+ lowest_idx = int(np.argmax(lowest_positions))
449
+ return float(lowest_search_start + lowest_idx)
450
+ else:
451
+ return float(int(takeoff_frame) - int(fps * 0.2))
452
+
453
+
454
+ def _find_landing_frame(
455
+ accelerations: np.ndarray, peak_height_frame: int, fps: float
456
+ ) -> float:
457
+ """Find landing frame after peak height."""
458
+ landing_search_start = peak_height_frame
459
+ landing_search_end = min(len(accelerations), peak_height_frame + int(fps * 0.5))
460
+ landing_accelerations = accelerations[landing_search_start:landing_search_end]
461
+
462
+ if len(landing_accelerations) > 0:
463
+ landing_idx = int(np.argmin(landing_accelerations))
464
+ return float(landing_search_start + landing_idx)
465
+ else:
466
+ return float(peak_height_frame + int(fps * 0.3))
467
+
468
+
469
+ def _find_standing_end(velocities: np.ndarray, lowest_point: float) -> float | None:
470
+ """Find end of standing phase before lowest point."""
471
+ if lowest_point <= 20:
472
+ return None
473
+
474
+ standing_search = velocities[: int(lowest_point)]
475
+ low_vel = np.abs(standing_search) < 0.005
476
+ if np.any(low_vel):
477
+ standing_frames = np.nonzero(low_vel)[0]
478
+ if len(standing_frames) > 10:
479
+ return float(standing_frames[-1])
480
+
481
+ return None
482
+
483
+
431
484
  def detect_cmj_phases(
432
485
  positions: np.ndarray,
433
486
  fps: float,
434
- velocity_threshold: float = 0.02,
435
- countermovement_threshold: float = -0.015,
436
- min_contact_frames: int = 3,
437
- min_eccentric_frames: int = 3,
438
- use_curvature: bool = True,
439
487
  window_length: int = 5,
440
488
  polyorder: int = 2,
441
489
  ) -> tuple[float | None, float, float, float] | None:
@@ -451,11 +499,6 @@ def detect_cmj_phases(
451
499
  Args:
452
500
  positions: Array of vertical positions (normalized 0-1)
453
501
  fps: Video frame rate
454
- velocity_threshold: Velocity threshold (not used)
455
- countermovement_threshold: Velocity threshold (not used)
456
- min_contact_frames: Minimum frames for ground contact
457
- min_eccentric_frames: Minimum frames for eccentric phase
458
- use_curvature: Whether to use trajectory curvature refinement
459
502
  window_length: Window size for derivative calculations
460
503
  polyorder: Polynomial order for Savitzky-Golay filter
461
504
 
@@ -473,76 +516,13 @@ def detect_cmj_phases(
473
516
 
474
517
  # Step 1: Find peak height (global minimum y = highest point in frame)
475
518
  peak_height_frame = int(np.argmin(positions))
476
-
477
519
  if peak_height_frame < 10:
478
520
  return None # Peak too early, invalid
479
521
 
480
- # Step 2: Find takeoff as peak upward velocity
481
- # Takeoff occurs at maximum upward velocity (most negative) before peak height
482
- # Typical: 0.3 seconds before peak (9 frames at 30fps)
483
- takeoff_search_start = max(0, peak_height_frame - int(fps * 0.35))
484
- takeoff_search_end = peak_height_frame - 2 # Must be at least 2 frames before peak
485
-
486
- takeoff_velocities = velocities[takeoff_search_start:takeoff_search_end]
487
-
488
- if len(takeoff_velocities) > 0:
489
- # Takeoff = peak upward velocity (most negative)
490
- peak_vel_idx = int(np.argmin(takeoff_velocities))
491
- takeoff_frame = float(takeoff_search_start + peak_vel_idx)
492
- else:
493
- # Fallback
494
- takeoff_frame = float(peak_height_frame - int(fps * 0.3))
495
-
496
- # Step 3: Find lowest point (countermovement bottom) before takeoff
497
- # This is where velocity crosses from positive (squatting) to negative (jumping)
498
- # Search backward from takeoff for where velocity was last positive/zero
499
- lowest_search_start = max(0, int(takeoff_frame) - int(fps * 0.4))
500
- lowest_search_end = int(takeoff_frame)
501
-
502
- # Find where velocity crosses from positive to negative (transition point)
503
- lowest_frame_found = None
504
- for i in range(lowest_search_end - 1, lowest_search_start, -1):
505
- if i > 0:
506
- # Look for velocity crossing from positive/zero to negative
507
- if velocities[i] < 0 and velocities[i - 1] >= 0:
508
- lowest_frame_found = float(i)
509
- break
510
-
511
- # Fallback: use maximum position (lowest point in frame) if no velocity crossing
512
- if lowest_frame_found is None:
513
- lowest_positions = positions[lowest_search_start:lowest_search_end]
514
- if len(lowest_positions) > 0:
515
- lowest_idx = int(np.argmax(lowest_positions))
516
- lowest_point = float(lowest_search_start + lowest_idx)
517
- else:
518
- lowest_point = float(int(takeoff_frame) - int(fps * 0.2))
519
- else:
520
- lowest_point = lowest_frame_found
521
-
522
- # Step 4: Find landing (impact after peak height)
523
- # Landing shows as large negative acceleration spike (impact deceleration)
524
- landing_search_start = peak_height_frame
525
- landing_search_end = min(len(accelerations), peak_height_frame + int(fps * 0.5))
526
- landing_accelerations = accelerations[landing_search_start:landing_search_end]
527
-
528
- if len(landing_accelerations) > 0:
529
- # Find most negative acceleration (maximum impact deceleration)
530
- # Landing acceleration should be around -0.008 to -0.010
531
- landing_idx = int(np.argmin(landing_accelerations)) # Most negative = impact
532
- landing_frame = float(landing_search_start + landing_idx)
533
- else:
534
- landing_frame = float(peak_height_frame + int(fps * 0.3))
535
-
536
- # Optional: Find standing phase (not critical)
537
- standing_end = None
538
- if lowest_point > 20:
539
- # Look for low-velocity period before lowest point
540
- standing_search = velocities[: int(lowest_point)]
541
- low_vel = np.abs(standing_search) < 0.005
542
- if np.any(low_vel):
543
- # Find last low-velocity frame before countermovement
544
- standing_frames = np.where(low_vel)[0]
545
- if len(standing_frames) > 10:
546
- standing_end = float(standing_frames[-1])
522
+ # Step 2-4: Find all phases using helper functions
523
+ takeoff_frame = _find_takeoff_frame(velocities, peak_height_frame, fps)
524
+ lowest_point = _find_lowest_frame(velocities, positions, takeoff_frame, fps)
525
+ landing_frame = _find_landing_frame(accelerations, peak_height_frame, fps)
526
+ standing_end = _find_standing_end(velocities, lowest_point)
547
527
 
548
528
  return (standing_end, lowest_point, takeoff_frame, landing_frame)
@@ -39,6 +39,64 @@ class AnalysisParameters:
39
39
  tracking_confidence: float | None = None
40
40
 
41
41
 
42
+ def _collect_video_files(video_path: tuple[str, ...]) -> list[str]:
43
+ """Expand glob patterns and collect all video files."""
44
+ video_files: list[str] = []
45
+ for pattern in video_path:
46
+ expanded = glob.glob(pattern)
47
+ if expanded:
48
+ video_files.extend(expanded)
49
+ elif Path(pattern).exists():
50
+ video_files.append(pattern)
51
+ else:
52
+ click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
53
+ return video_files
54
+
55
+
56
+ def _generate_output_paths(
57
+ video: str, output_dir: str | None, json_output_dir: str | None
58
+ ) -> tuple[str | None, str | None]:
59
+ """Generate output paths for debug video and JSON."""
60
+ out_path = None
61
+ json_path = None
62
+ if output_dir:
63
+ out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
64
+ if json_output_dir:
65
+ json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
66
+ return out_path, json_path
67
+
68
+
69
+ def _process_batch_videos(
70
+ video_files: list[str],
71
+ output_dir: str | None,
72
+ json_output_dir: str | None,
73
+ quality_preset: QualityPreset,
74
+ verbose: bool,
75
+ expert_params: AnalysisParameters,
76
+ workers: int,
77
+ ) -> None:
78
+ """Process multiple videos in batch mode."""
79
+ click.echo(
80
+ f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
81
+ err=True,
82
+ )
83
+ click.echo("Note: Batch processing not yet fully implemented", err=True)
84
+ click.echo("Processing videos sequentially...", err=True)
85
+
86
+ for video in video_files:
87
+ try:
88
+ click.echo(f"\nProcessing: {video}", err=True)
89
+ out_path, json_path = _generate_output_paths(
90
+ video, output_dir, json_output_dir
91
+ )
92
+ _process_single(
93
+ video, out_path, json_path, quality_preset, verbose, expert_params
94
+ )
95
+ except Exception as e:
96
+ click.echo(f"Error processing {video}: {e}", err=True)
97
+ continue
98
+
99
+
42
100
  @click.command(name="cmj-analyze")
43
101
  @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
44
102
  @click.option(
@@ -189,15 +247,7 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
189
247
  --json-output-dir results/ --csv-summary summary.csv
190
248
  """
191
249
  # Expand glob patterns and collect all video files
192
- video_files: list[str] = []
193
- for pattern in video_path:
194
- expanded = glob.glob(pattern)
195
- if expanded:
196
- video_files.extend(expanded)
197
- elif Path(pattern).exists():
198
- video_files.append(pattern)
199
- else:
200
- click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
250
+ video_files = _collect_video_files(video_path)
201
251
 
202
252
  if not video_files:
203
253
  click.echo("Error: No video files found", err=True)
@@ -220,27 +270,15 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
220
270
  )
221
271
 
222
272
  if use_batch:
223
- click.echo(
224
- f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
225
- err=True,
273
+ _process_batch_videos(
274
+ video_files,
275
+ output_dir,
276
+ json_output_dir,
277
+ quality_preset,
278
+ verbose,
279
+ expert_params,
280
+ workers,
226
281
  )
227
- click.echo("Note: Batch processing not yet fully implemented", err=True)
228
- click.echo("Processing videos sequentially...", err=True)
229
- for video in video_files:
230
- try:
231
- click.echo(f"\nProcessing: {video}", err=True)
232
- out_path = None
233
- json_path = None
234
- if output_dir:
235
- out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
236
- if json_output_dir:
237
- json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
238
- _process_single(
239
- video, out_path, json_path, quality_preset, verbose, expert_params
240
- )
241
- except Exception as e:
242
- click.echo(f"Error processing {video}: {e}", err=True)
243
- continue
244
282
  else:
245
283
  # Single video mode
246
284
  try:
@@ -376,6 +414,22 @@ def _smooth_landmark_sequence(
376
414
  )
377
415
 
378
416
 
417
+ def _get_foot_position(frame_landmarks: dict | None, last_position: float) -> float:
418
+ """Extract average foot position from frame landmarks."""
419
+ if not frame_landmarks:
420
+ return last_position
421
+
422
+ # Average foot position (ankles and heels)
423
+ foot_y_values = []
424
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
425
+ if key in frame_landmarks:
426
+ foot_y_values.append(frame_landmarks[key][1])
427
+
428
+ if foot_y_values:
429
+ return float(np.mean(foot_y_values))
430
+ return last_position
431
+
432
+
379
433
  def _extract_positions_from_landmarks(
380
434
  smoothed_landmarks: list,
381
435
  ) -> tuple[np.ndarray, str]:
@@ -391,20 +445,9 @@ def _extract_positions_from_landmarks(
391
445
  position_list: list[float] = []
392
446
 
393
447
  for frame_landmarks in smoothed_landmarks:
394
- if frame_landmarks:
395
- # Average foot position (ankles and heels)
396
- foot_y_values = []
397
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
398
- if key in frame_landmarks:
399
- foot_y_values.append(frame_landmarks[key][1])
400
-
401
- if foot_y_values:
402
- avg_y = float(np.mean(foot_y_values))
403
- position_list.append(avg_y)
404
- else:
405
- position_list.append(position_list[-1] if position_list else 0.5)
406
- else:
407
- position_list.append(position_list[-1] if position_list else 0.5)
448
+ last_pos = position_list[-1] if position_list else 0.5
449
+ position = _get_foot_position(frame_landmarks, last_pos)
450
+ position_list.append(position)
408
451
 
409
452
  return np.array(position_list), "foot"
410
453
 
@@ -474,11 +517,6 @@ def _process_single(
474
517
  phases = detect_cmj_phases(
475
518
  vertical_positions,
476
519
  video.fps,
477
- velocity_threshold=params.velocity_threshold,
478
- countermovement_threshold=countermovement_threshold,
479
- min_contact_frames=params.min_contact_frames,
480
- min_eccentric_frames=params.min_contact_frames,
481
- use_curvature=params.use_curvature,
482
520
  window_length=params.smoothing_window,
483
521
  polyorder=params.polyorder,
484
522
  )