kinemotion 0.11.0__py3-none-any.whl → 0.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -638,6 +638,50 @@ class CMJVideoResult:
638
638
  processing_time: float = 0.0
639
639
 
640
640
 
641
+ def _generate_cmj_outputs(
642
+ output_video: str | None,
643
+ json_output: str | None,
644
+ metrics: CMJMetrics,
645
+ frames: list,
646
+ smoothed_landmarks: list,
647
+ video_width: int,
648
+ video_height: int,
649
+ video_display_width: int,
650
+ video_display_height: int,
651
+ video_fps: float,
652
+ verbose: bool,
653
+ ) -> None:
654
+ """Generate JSON and debug video outputs for CMJ analysis."""
655
+ if json_output:
656
+ import json
657
+
658
+ output_path = Path(json_output)
659
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
660
+ if verbose:
661
+ print(f"Metrics written to: {json_output}")
662
+
663
+ if output_video:
664
+ if verbose:
665
+ print(f"Generating debug video: {output_video}")
666
+
667
+ with CMJDebugOverlayRenderer(
668
+ output_video,
669
+ video_width,
670
+ video_height,
671
+ video_display_width,
672
+ video_display_height,
673
+ video_fps,
674
+ ) as renderer:
675
+ for i, frame in enumerate(frames):
676
+ annotated = renderer.render_frame(
677
+ frame, smoothed_landmarks[i], i, metrics
678
+ )
679
+ renderer.write_frame(annotated)
680
+
681
+ if verbose:
682
+ print(f"Debug video saved: {output_video}")
683
+
684
+
641
685
  def process_cmj_video(
642
686
  video_path: str,
643
687
  quality: str = "balanced",
@@ -741,12 +785,6 @@ def process_cmj_video(
741
785
  vertical_positions, _ = _extract_vertical_positions(smoothed_landmarks)
742
786
  tracking_method = "foot"
743
787
 
744
- # Calculate countermovement threshold (FPS-adjusted)
745
- # POSITIVE threshold for downward motion (squatting) in normalized coordinates
746
- cm_threshold = countermovement_threshold
747
- if cm_threshold is None:
748
- cm_threshold = 0.015 * (30.0 / video.fps)
749
-
750
788
  # Detect CMJ phases
751
789
  if verbose:
752
790
  print("Detecting CMJ phases...")
@@ -754,11 +792,6 @@ def process_cmj_video(
754
792
  phases = detect_cmj_phases(
755
793
  vertical_positions,
756
794
  video.fps,
757
- velocity_threshold=params.velocity_threshold,
758
- countermovement_threshold=cm_threshold,
759
- min_contact_frames=params.min_contact_frames,
760
- min_eccentric_frames=params.min_contact_frames,
761
- use_curvature=params.use_curvature,
762
795
  window_length=params.smoothing_window,
763
796
  polyorder=params.polyorder,
764
797
  )
@@ -793,34 +826,19 @@ def process_cmj_video(
793
826
  )
794
827
 
795
828
  # Generate outputs if requested
796
- if json_output:
797
- import json
798
-
799
- output_path = Path(json_output)
800
- output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
801
- if verbose:
802
- print(f"Metrics written to: {json_output}")
803
-
804
- if output_video:
805
- if verbose:
806
- print(f"Generating debug video: {output_video}")
807
-
808
- with CMJDebugOverlayRenderer(
809
- output_video,
810
- video.width,
811
- video.height,
812
- video.display_width,
813
- video.display_height,
814
- video.fps,
815
- ) as renderer:
816
- for i, frame in enumerate(frames):
817
- annotated = renderer.render_frame(
818
- frame, smoothed_landmarks[i], i, metrics
819
- )
820
- renderer.write_frame(annotated)
821
-
822
- if verbose:
823
- print(f"Debug video saved: {output_video}")
829
+ _generate_cmj_outputs(
830
+ output_video,
831
+ json_output,
832
+ metrics,
833
+ frames,
834
+ smoothed_landmarks,
835
+ video.width,
836
+ video.height,
837
+ video.display_width,
838
+ video.display_height,
839
+ video.fps,
840
+ verbose,
841
+ )
824
842
 
825
843
  if verbose:
826
844
  print(f"\nJump height: {metrics.jump_height:.3f}m")
@@ -102,7 +102,6 @@ def find_standing_phase(
102
102
 
103
103
  def find_countermovement_start(
104
104
  velocities: np.ndarray,
105
- fps: float,
106
105
  countermovement_threshold: float = 0.015,
107
106
  min_eccentric_frames: int = 3,
108
107
  standing_start: int | None = None,
@@ -114,7 +113,6 @@ def find_countermovement_start(
114
113
 
115
114
  Args:
116
115
  velocities: Array of SIGNED vertical velocities
117
- fps: Video frame rate
118
116
  countermovement_threshold: Velocity threshold for detecting downward motion (POSITIVE)
119
117
  min_eccentric_frames: Minimum consecutive frames of downward motion
120
118
  standing_start: Optional frame where standing phase ended
@@ -143,7 +141,6 @@ def find_countermovement_start(
143
141
  def find_lowest_point(
144
142
  positions: np.ndarray,
145
143
  velocities: np.ndarray,
146
- eccentric_start: int | None = None,
147
144
  min_search_frame: int = 80,
148
145
  ) -> int:
149
146
  """
@@ -155,7 +152,6 @@ def find_lowest_point(
155
152
  Args:
156
153
  positions: Array of vertical positions (higher value = lower in video)
157
154
  velocities: Array of SIGNED vertical velocities (positive=down, negative=up)
158
- eccentric_start: Optional frame where eccentric phase started
159
155
  min_search_frame: Minimum frame to start searching (default: frame 80)
160
156
 
161
157
  Returns:
@@ -283,8 +279,6 @@ def find_cmj_takeoff_from_velocity_peak(
283
279
  velocities: np.ndarray,
284
280
  lowest_point_frame: int,
285
281
  fps: float,
286
- window_length: int = 5,
287
- polyorder: int = 2,
288
282
  ) -> float:
289
283
  """
290
284
  Find CMJ takeoff frame as peak upward velocity during concentric phase.
@@ -297,8 +291,6 @@ def find_cmj_takeoff_from_velocity_peak(
297
291
  velocities: Array of SIGNED vertical velocities (negative = upward)
298
292
  lowest_point_frame: Frame at lowest point
299
293
  fps: Video frame rate
300
- window_length: Window size for derivative calculations
301
- polyorder: Polynomial order for Savitzky-Golay filter
302
294
 
303
295
  Returns:
304
296
  Takeoff frame with fractional precision.
@@ -381,9 +373,6 @@ def find_interpolated_takeoff_landing(
381
373
  positions: np.ndarray,
382
374
  velocities: np.ndarray,
383
375
  lowest_point_frame: int,
384
- velocity_threshold: float = 0.02,
385
- min_flight_frames: int = 3,
386
- use_curvature: bool = True,
387
376
  window_length: int = 5,
388
377
  polyorder: int = 2,
389
378
  ) -> tuple[float, float] | None:
@@ -397,9 +386,6 @@ def find_interpolated_takeoff_landing(
397
386
  positions: Array of vertical positions
398
387
  velocities: Array of vertical velocities
399
388
  lowest_point_frame: Frame at lowest point
400
- velocity_threshold: Velocity threshold (unused for CMJ, kept for API compatibility)
401
- min_flight_frames: Minimum consecutive frames for valid flight phase
402
- use_curvature: Whether to use trajectory curvature refinement
403
389
  window_length: Window size for derivative calculations
404
390
  polyorder: Polynomial order for Savitzky-Golay filter
405
391
 
@@ -417,7 +403,7 @@ def find_interpolated_takeoff_landing(
417
403
 
418
404
  # Find takeoff using peak velocity method (CMJ-specific)
419
405
  takeoff_frame = find_cmj_takeoff_from_velocity_peak(
420
- positions, velocities, lowest_point_frame, fps, window_length, polyorder
406
+ positions, velocities, lowest_point_frame, fps
421
407
  )
422
408
 
423
409
  # Find landing using position peak and impact detection
@@ -428,14 +414,76 @@ def find_interpolated_takeoff_landing(
428
414
  return (takeoff_frame, landing_frame)
429
415
 
430
416
 
417
+ def _find_takeoff_frame(
418
+ velocities: np.ndarray, peak_height_frame: int, fps: float
419
+ ) -> float:
420
+ """Find takeoff frame as peak upward velocity before peak height."""
421
+ takeoff_search_start = max(0, peak_height_frame - int(fps * 0.35))
422
+ takeoff_search_end = peak_height_frame - 2
423
+
424
+ takeoff_velocities = velocities[takeoff_search_start:takeoff_search_end]
425
+
426
+ if len(takeoff_velocities) > 0:
427
+ peak_vel_idx = int(np.argmin(takeoff_velocities))
428
+ return float(takeoff_search_start + peak_vel_idx)
429
+ else:
430
+ return float(peak_height_frame - int(fps * 0.3))
431
+
432
+
433
+ def _find_lowest_frame(
434
+ velocities: np.ndarray, positions: np.ndarray, takeoff_frame: float, fps: float
435
+ ) -> float:
436
+ """Find lowest point frame before takeoff."""
437
+ lowest_search_start = max(0, int(takeoff_frame) - int(fps * 0.4))
438
+ lowest_search_end = int(takeoff_frame)
439
+
440
+ # Find where velocity crosses from positive to negative
441
+ for i in range(lowest_search_end - 1, lowest_search_start, -1):
442
+ if i > 0 and velocities[i] < 0 and velocities[i - 1] >= 0:
443
+ return float(i)
444
+
445
+ # Fallback: use maximum position
446
+ lowest_positions = positions[lowest_search_start:lowest_search_end]
447
+ if len(lowest_positions) > 0:
448
+ lowest_idx = int(np.argmax(lowest_positions))
449
+ return float(lowest_search_start + lowest_idx)
450
+ else:
451
+ return float(int(takeoff_frame) - int(fps * 0.2))
452
+
453
+
454
+ def _find_landing_frame(
455
+ accelerations: np.ndarray, peak_height_frame: int, fps: float
456
+ ) -> float:
457
+ """Find landing frame after peak height."""
458
+ landing_search_start = peak_height_frame
459
+ landing_search_end = min(len(accelerations), peak_height_frame + int(fps * 0.5))
460
+ landing_accelerations = accelerations[landing_search_start:landing_search_end]
461
+
462
+ if len(landing_accelerations) > 0:
463
+ landing_idx = int(np.argmin(landing_accelerations))
464
+ return float(landing_search_start + landing_idx)
465
+ else:
466
+ return float(peak_height_frame + int(fps * 0.3))
467
+
468
+
469
+ def _find_standing_end(velocities: np.ndarray, lowest_point: float) -> float | None:
470
+ """Find end of standing phase before lowest point."""
471
+ if lowest_point <= 20:
472
+ return None
473
+
474
+ standing_search = velocities[: int(lowest_point)]
475
+ low_vel = np.abs(standing_search) < 0.005
476
+ if np.any(low_vel):
477
+ standing_frames = np.nonzero(low_vel)[0]
478
+ if len(standing_frames) > 10:
479
+ return float(standing_frames[-1])
480
+
481
+ return None
482
+
483
+
431
484
  def detect_cmj_phases(
432
485
  positions: np.ndarray,
433
486
  fps: float,
434
- velocity_threshold: float = 0.02,
435
- countermovement_threshold: float = -0.015,
436
- min_contact_frames: int = 3,
437
- min_eccentric_frames: int = 3,
438
- use_curvature: bool = True,
439
487
  window_length: int = 5,
440
488
  polyorder: int = 2,
441
489
  ) -> tuple[float | None, float, float, float] | None:
@@ -451,11 +499,6 @@ def detect_cmj_phases(
451
499
  Args:
452
500
  positions: Array of vertical positions (normalized 0-1)
453
501
  fps: Video frame rate
454
- velocity_threshold: Velocity threshold (not used)
455
- countermovement_threshold: Velocity threshold (not used)
456
- min_contact_frames: Minimum frames for ground contact
457
- min_eccentric_frames: Minimum frames for eccentric phase
458
- use_curvature: Whether to use trajectory curvature refinement
459
502
  window_length: Window size for derivative calculations
460
503
  polyorder: Polynomial order for Savitzky-Golay filter
461
504
 
@@ -473,76 +516,13 @@ def detect_cmj_phases(
473
516
 
474
517
  # Step 1: Find peak height (global minimum y = highest point in frame)
475
518
  peak_height_frame = int(np.argmin(positions))
476
-
477
519
  if peak_height_frame < 10:
478
520
  return None # Peak too early, invalid
479
521
 
480
- # Step 2: Find takeoff as peak upward velocity
481
- # Takeoff occurs at maximum upward velocity (most negative) before peak height
482
- # Typical: 0.3 seconds before peak (9 frames at 30fps)
483
- takeoff_search_start = max(0, peak_height_frame - int(fps * 0.35))
484
- takeoff_search_end = peak_height_frame - 2 # Must be at least 2 frames before peak
485
-
486
- takeoff_velocities = velocities[takeoff_search_start:takeoff_search_end]
487
-
488
- if len(takeoff_velocities) > 0:
489
- # Takeoff = peak upward velocity (most negative)
490
- peak_vel_idx = int(np.argmin(takeoff_velocities))
491
- takeoff_frame = float(takeoff_search_start + peak_vel_idx)
492
- else:
493
- # Fallback
494
- takeoff_frame = float(peak_height_frame - int(fps * 0.3))
495
-
496
- # Step 3: Find lowest point (countermovement bottom) before takeoff
497
- # This is where velocity crosses from positive (squatting) to negative (jumping)
498
- # Search backward from takeoff for where velocity was last positive/zero
499
- lowest_search_start = max(0, int(takeoff_frame) - int(fps * 0.4))
500
- lowest_search_end = int(takeoff_frame)
501
-
502
- # Find where velocity crosses from positive to negative (transition point)
503
- lowest_frame_found = None
504
- for i in range(lowest_search_end - 1, lowest_search_start, -1):
505
- if i > 0:
506
- # Look for velocity crossing from positive/zero to negative
507
- if velocities[i] < 0 and velocities[i - 1] >= 0:
508
- lowest_frame_found = float(i)
509
- break
510
-
511
- # Fallback: use maximum position (lowest point in frame) if no velocity crossing
512
- if lowest_frame_found is None:
513
- lowest_positions = positions[lowest_search_start:lowest_search_end]
514
- if len(lowest_positions) > 0:
515
- lowest_idx = int(np.argmax(lowest_positions))
516
- lowest_point = float(lowest_search_start + lowest_idx)
517
- else:
518
- lowest_point = float(int(takeoff_frame) - int(fps * 0.2))
519
- else:
520
- lowest_point = lowest_frame_found
521
-
522
- # Step 4: Find landing (impact after peak height)
523
- # Landing shows as large negative acceleration spike (impact deceleration)
524
- landing_search_start = peak_height_frame
525
- landing_search_end = min(len(accelerations), peak_height_frame + int(fps * 0.5))
526
- landing_accelerations = accelerations[landing_search_start:landing_search_end]
527
-
528
- if len(landing_accelerations) > 0:
529
- # Find most negative acceleration (maximum impact deceleration)
530
- # Landing acceleration should be around -0.008 to -0.010
531
- landing_idx = int(np.argmin(landing_accelerations)) # Most negative = impact
532
- landing_frame = float(landing_search_start + landing_idx)
533
- else:
534
- landing_frame = float(peak_height_frame + int(fps * 0.3))
535
-
536
- # Optional: Find standing phase (not critical)
537
- standing_end = None
538
- if lowest_point > 20:
539
- # Look for low-velocity period before lowest point
540
- standing_search = velocities[: int(lowest_point)]
541
- low_vel = np.abs(standing_search) < 0.005
542
- if np.any(low_vel):
543
- # Find last low-velocity frame before countermovement
544
- standing_frames = np.where(low_vel)[0]
545
- if len(standing_frames) > 10:
546
- standing_end = float(standing_frames[-1])
522
+ # Step 2-4: Find all phases using helper functions
523
+ takeoff_frame = _find_takeoff_frame(velocities, peak_height_frame, fps)
524
+ lowest_point = _find_lowest_frame(velocities, positions, takeoff_frame, fps)
525
+ landing_frame = _find_landing_frame(accelerations, peak_height_frame, fps)
526
+ standing_end = _find_standing_end(velocities, lowest_point)
547
527
 
548
528
  return (standing_end, lowest_point, takeoff_frame, landing_frame)
kinemotion/cmj/cli.py CHANGED
@@ -39,6 +39,64 @@ class AnalysisParameters:
39
39
  tracking_confidence: float | None = None
40
40
 
41
41
 
42
+ def _collect_video_files(video_path: tuple[str, ...]) -> list[str]:
43
+ """Expand glob patterns and collect all video files."""
44
+ video_files: list[str] = []
45
+ for pattern in video_path:
46
+ expanded = glob.glob(pattern)
47
+ if expanded:
48
+ video_files.extend(expanded)
49
+ elif Path(pattern).exists():
50
+ video_files.append(pattern)
51
+ else:
52
+ click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
53
+ return video_files
54
+
55
+
56
+ def _generate_output_paths(
57
+ video: str, output_dir: str | None, json_output_dir: str | None
58
+ ) -> tuple[str | None, str | None]:
59
+ """Generate output paths for debug video and JSON."""
60
+ out_path = None
61
+ json_path = None
62
+ if output_dir:
63
+ out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
64
+ if json_output_dir:
65
+ json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
66
+ return out_path, json_path
67
+
68
+
69
+ def _process_batch_videos(
70
+ video_files: list[str],
71
+ output_dir: str | None,
72
+ json_output_dir: str | None,
73
+ quality_preset: QualityPreset,
74
+ verbose: bool,
75
+ expert_params: AnalysisParameters,
76
+ workers: int,
77
+ ) -> None:
78
+ """Process multiple videos in batch mode."""
79
+ click.echo(
80
+ f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
81
+ err=True,
82
+ )
83
+ click.echo("Note: Batch processing not yet fully implemented", err=True)
84
+ click.echo("Processing videos sequentially...", err=True)
85
+
86
+ for video in video_files:
87
+ try:
88
+ click.echo(f"\nProcessing: {video}", err=True)
89
+ out_path, json_path = _generate_output_paths(
90
+ video, output_dir, json_output_dir
91
+ )
92
+ _process_single(
93
+ video, out_path, json_path, quality_preset, verbose, expert_params
94
+ )
95
+ except Exception as e:
96
+ click.echo(f"Error processing {video}: {e}", err=True)
97
+ continue
98
+
99
+
42
100
  @click.command(name="cmj-analyze")
43
101
  @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
44
102
  @click.option(
@@ -189,15 +247,7 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
189
247
  --json-output-dir results/ --csv-summary summary.csv
190
248
  """
191
249
  # Expand glob patterns and collect all video files
192
- video_files: list[str] = []
193
- for pattern in video_path:
194
- expanded = glob.glob(pattern)
195
- if expanded:
196
- video_files.extend(expanded)
197
- elif Path(pattern).exists():
198
- video_files.append(pattern)
199
- else:
200
- click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
250
+ video_files = _collect_video_files(video_path)
201
251
 
202
252
  if not video_files:
203
253
  click.echo("Error: No video files found", err=True)
@@ -220,27 +270,15 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
220
270
  )
221
271
 
222
272
  if use_batch:
223
- click.echo(
224
- f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
225
- err=True,
273
+ _process_batch_videos(
274
+ video_files,
275
+ output_dir,
276
+ json_output_dir,
277
+ quality_preset,
278
+ verbose,
279
+ expert_params,
280
+ workers,
226
281
  )
227
- click.echo("Note: Batch processing not yet fully implemented", err=True)
228
- click.echo("Processing videos sequentially...", err=True)
229
- for video in video_files:
230
- try:
231
- click.echo(f"\nProcessing: {video}", err=True)
232
- out_path = None
233
- json_path = None
234
- if output_dir:
235
- out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
236
- if json_output_dir:
237
- json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
238
- _process_single(
239
- video, out_path, json_path, quality_preset, verbose, expert_params
240
- )
241
- except Exception as e:
242
- click.echo(f"Error processing {video}: {e}", err=True)
243
- continue
244
282
  else:
245
283
  # Single video mode
246
284
  try:
@@ -376,6 +414,22 @@ def _smooth_landmark_sequence(
376
414
  )
377
415
 
378
416
 
417
+ def _get_foot_position(frame_landmarks: dict | None, last_position: float) -> float:
418
+ """Extract average foot position from frame landmarks."""
419
+ if not frame_landmarks:
420
+ return last_position
421
+
422
+ # Average foot position (ankles and heels)
423
+ foot_y_values = []
424
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
425
+ if key in frame_landmarks:
426
+ foot_y_values.append(frame_landmarks[key][1])
427
+
428
+ if foot_y_values:
429
+ return float(np.mean(foot_y_values))
430
+ return last_position
431
+
432
+
379
433
  def _extract_positions_from_landmarks(
380
434
  smoothed_landmarks: list,
381
435
  ) -> tuple[np.ndarray, str]:
@@ -391,20 +445,9 @@ def _extract_positions_from_landmarks(
391
445
  position_list: list[float] = []
392
446
 
393
447
  for frame_landmarks in smoothed_landmarks:
394
- if frame_landmarks:
395
- # Average foot position (ankles and heels)
396
- foot_y_values = []
397
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
398
- if key in frame_landmarks:
399
- foot_y_values.append(frame_landmarks[key][1])
400
-
401
- if foot_y_values:
402
- avg_y = float(np.mean(foot_y_values))
403
- position_list.append(avg_y)
404
- else:
405
- position_list.append(position_list[-1] if position_list else 0.5)
406
- else:
407
- position_list.append(position_list[-1] if position_list else 0.5)
448
+ last_pos = position_list[-1] if position_list else 0.5
449
+ position = _get_foot_position(frame_landmarks, last_pos)
450
+ position_list.append(position)
408
451
 
409
452
  return np.array(position_list), "foot"
410
453
 
@@ -474,11 +517,6 @@ def _process_single(
474
517
  phases = detect_cmj_phases(
475
518
  vertical_positions,
476
519
  video.fps,
477
- velocity_threshold=params.velocity_threshold,
478
- countermovement_threshold=countermovement_threshold,
479
- min_contact_frames=params.min_contact_frames,
480
- min_eccentric_frames=params.min_contact_frames,
481
- use_curvature=params.use_curvature,
482
520
  window_length=params.smoothing_window,
483
521
  polyorder=params.polyorder,
484
522
  )
@@ -98,6 +98,76 @@ class CMJDebugOverlayRenderer:
98
98
  }
99
99
  return colors.get(phase, (128, 128, 128))
100
100
 
101
+ def _get_skeleton_segments(
102
+ self, side_prefix: str
103
+ ) -> list[tuple[str, str, tuple[int, int, int], int]]:
104
+ """Get skeleton segments for one side of the body."""
105
+ return [
106
+ (f"{side_prefix}heel", f"{side_prefix}ankle", (0, 255, 255), 3), # Foot
107
+ (
108
+ f"{side_prefix}heel",
109
+ f"{side_prefix}foot_index",
110
+ (0, 255, 255),
111
+ 2,
112
+ ), # Alt foot
113
+ (f"{side_prefix}ankle", f"{side_prefix}knee", (255, 100, 100), 4), # Shin
114
+ (f"{side_prefix}knee", f"{side_prefix}hip", (100, 255, 100), 4), # Femur
115
+ (
116
+ f"{side_prefix}hip",
117
+ f"{side_prefix}shoulder",
118
+ (100, 100, 255),
119
+ 4,
120
+ ), # Trunk
121
+ (f"{side_prefix}shoulder", "nose", (150, 150, 255), 2), # Neck
122
+ ]
123
+
124
+ def _draw_segment(
125
+ self,
126
+ frame: np.ndarray,
127
+ landmarks: dict[str, tuple[float, float, float]],
128
+ start_key: str,
129
+ end_key: str,
130
+ color: tuple[int, int, int],
131
+ thickness: int,
132
+ ) -> None:
133
+ """Draw a single skeleton segment if both endpoints are visible."""
134
+ if start_key not in landmarks or end_key not in landmarks:
135
+ return
136
+
137
+ start_vis = landmarks[start_key][2]
138
+ end_vis = landmarks[end_key][2]
139
+
140
+ # Very low threshold to show as much as possible
141
+ if start_vis > 0.2 and end_vis > 0.2:
142
+ start_x = int(landmarks[start_key][0] * self.width)
143
+ start_y = int(landmarks[start_key][1] * self.height)
144
+ end_x = int(landmarks[end_key][0] * self.width)
145
+ end_y = int(landmarks[end_key][1] * self.height)
146
+
147
+ cv2.line(frame, (start_x, start_y), (end_x, end_y), color, thickness)
148
+
149
+ def _draw_joints(
150
+ self,
151
+ frame: np.ndarray,
152
+ landmarks: dict[str, tuple[float, float, float]],
153
+ side_prefix: str,
154
+ ) -> None:
155
+ """Draw joint circles for one side of the body."""
156
+ joint_keys = [
157
+ f"{side_prefix}heel",
158
+ f"{side_prefix}foot_index",
159
+ f"{side_prefix}ankle",
160
+ f"{side_prefix}knee",
161
+ f"{side_prefix}hip",
162
+ f"{side_prefix}shoulder",
163
+ ]
164
+ for key in joint_keys:
165
+ if key in landmarks and landmarks[key][2] > 0.2:
166
+ jx = int(landmarks[key][0] * self.width)
167
+ jy = int(landmarks[key][1] * self.height)
168
+ cv2.circle(frame, (jx, jy), 6, (255, 255, 255), -1)
169
+ cv2.circle(frame, (jx, jy), 8, (0, 0, 0), 2)
170
+
101
171
  def _draw_skeleton(
102
172
  self, frame: np.ndarray, landmarks: dict[str, tuple[float, float, float]]
103
173
  ) -> None:
@@ -112,68 +182,16 @@ class CMJDebugOverlayRenderer:
112
182
  """
113
183
  # Try both sides and draw all visible segments
114
184
  for side_prefix in ["right_", "left_"]:
115
- segments = [
116
- (f"{side_prefix}heel", f"{side_prefix}ankle", (0, 255, 255), 3), # Foot
117
- (
118
- f"{side_prefix}heel",
119
- f"{side_prefix}foot_index",
120
- (0, 255, 255),
121
- 2,
122
- ), # Alt foot
123
- (
124
- f"{side_prefix}ankle",
125
- f"{side_prefix}knee",
126
- (255, 100, 100),
127
- 4,
128
- ), # Shin
129
- (
130
- f"{side_prefix}knee",
131
- f"{side_prefix}hip",
132
- (100, 255, 100),
133
- 4,
134
- ), # Femur
135
- (
136
- f"{side_prefix}hip",
137
- f"{side_prefix}shoulder",
138
- (100, 100, 255),
139
- 4,
140
- ), # Trunk
141
- # Additional segments for better visualization
142
- (f"{side_prefix}shoulder", "nose", (150, 150, 255), 2), # Neck
143
- ]
185
+ segments = self._get_skeleton_segments(side_prefix)
144
186
 
145
187
  # Draw ALL visible segments (not just one side)
146
188
  for start_key, end_key, color, thickness in segments:
147
- if start_key in landmarks and end_key in landmarks:
148
- start_vis = landmarks[start_key][2]
149
- end_vis = landmarks[end_key][2]
150
-
151
- # Very low threshold to show as much as possible
152
- if start_vis > 0.2 and end_vis > 0.2:
153
- start_x = int(landmarks[start_key][0] * self.width)
154
- start_y = int(landmarks[start_key][1] * self.height)
155
- end_x = int(landmarks[end_key][0] * self.width)
156
- end_y = int(landmarks[end_key][1] * self.height)
157
-
158
- cv2.line(
159
- frame, (start_x, start_y), (end_x, end_y), color, thickness
160
- )
189
+ self._draw_segment(
190
+ frame, landmarks, start_key, end_key, color, thickness
191
+ )
161
192
 
162
193
  # Draw joints as circles for this side
163
- joint_keys = [
164
- f"{side_prefix}heel",
165
- f"{side_prefix}foot_index",
166
- f"{side_prefix}ankle",
167
- f"{side_prefix}knee",
168
- f"{side_prefix}hip",
169
- f"{side_prefix}shoulder",
170
- ]
171
- for key in joint_keys:
172
- if key in landmarks and landmarks[key][2] > 0.2:
173
- jx = int(landmarks[key][0] * self.width)
174
- jy = int(landmarks[key][1] * self.height)
175
- cv2.circle(frame, (jx, jy), 6, (255, 255, 255), -1)
176
- cv2.circle(frame, (jx, jy), 8, (0, 0, 0), 2)
194
+ self._draw_joints(frame, landmarks, side_prefix)
177
195
 
178
196
  # Always draw nose (head position) if visible
179
197
  if "nose" in landmarks and landmarks["nose"][2] > 0.2:
@@ -316,6 +334,126 @@ class CMJDebugOverlayRenderer:
316
334
  # Draw arc (simplified as a circle for now)
317
335
  cv2.circle(frame, (jx, jy), radius, arc_color, 2)
318
336
 
337
+ def _draw_foot_landmarks(
338
+ self,
339
+ frame: np.ndarray,
340
+ landmarks: dict[str, tuple[float, float, float]],
341
+ phase_color: tuple[int, int, int],
342
+ ) -> None:
343
+ """Draw foot landmarks and average position."""
344
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
345
+ foot_positions = []
346
+
347
+ for key in foot_keys:
348
+ if key in landmarks:
349
+ x, y, vis = landmarks[key]
350
+ if vis > 0.5:
351
+ lx = int(x * self.width)
352
+ ly = int(y * self.height)
353
+ foot_positions.append((lx, ly))
354
+ cv2.circle(frame, (lx, ly), 5, (255, 255, 0), -1)
355
+
356
+ # Draw average foot position with phase color
357
+ if foot_positions:
358
+ avg_x = int(np.mean([p[0] for p in foot_positions]))
359
+ avg_y = int(np.mean([p[1] for p in foot_positions]))
360
+ cv2.circle(frame, (avg_x, avg_y), 12, phase_color, -1)
361
+ cv2.circle(frame, (avg_x, avg_y), 14, (255, 255, 255), 2)
362
+
363
+ def _draw_phase_banner(
364
+ self, frame: np.ndarray, phase: str | None, phase_color: tuple[int, int, int]
365
+ ) -> None:
366
+ """Draw phase indicator banner."""
367
+ if not phase:
368
+ return
369
+
370
+ phase_text = f"Phase: {phase.upper()}"
371
+ text_size = cv2.getTextSize(phase_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
372
+ cv2.rectangle(frame, (5, 5), (text_size[0] + 15, 45), phase_color, -1)
373
+ cv2.putText(
374
+ frame, phase_text, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2
375
+ )
376
+
377
+ def _draw_key_frame_markers(
378
+ self, frame: np.ndarray, frame_idx: int, metrics: CMJMetrics
379
+ ) -> None:
380
+ """Draw markers for key frames (standing start, lowest, takeoff, landing)."""
381
+ y_offset = 120
382
+ markers = []
383
+
384
+ if metrics.standing_start_frame and frame_idx == int(
385
+ metrics.standing_start_frame
386
+ ):
387
+ markers.append("COUNTERMOVEMENT START")
388
+
389
+ if frame_idx == int(metrics.lowest_point_frame):
390
+ markers.append("LOWEST POINT")
391
+
392
+ if frame_idx == int(metrics.takeoff_frame):
393
+ markers.append("TAKEOFF")
394
+
395
+ if frame_idx == int(metrics.landing_frame):
396
+ markers.append("LANDING")
397
+
398
+ for marker in markers:
399
+ cv2.putText(
400
+ frame,
401
+ marker,
402
+ (10, y_offset),
403
+ cv2.FONT_HERSHEY_SIMPLEX,
404
+ 0.7,
405
+ (255, 255, 0),
406
+ 2,
407
+ )
408
+ y_offset += 35
409
+
410
+ def _draw_metrics_summary(
411
+ self, frame: np.ndarray, frame_idx: int, metrics: CMJMetrics
412
+ ) -> None:
413
+ """Draw metrics summary in bottom right (last 30 frames)."""
414
+ total_frames = int(metrics.landing_frame) + 30
415
+ if frame_idx < total_frames - 30:
416
+ return
417
+
418
+ metrics_text = [
419
+ f"Jump Height: {metrics.jump_height:.3f}m",
420
+ f"Flight Time: {metrics.flight_time*1000:.0f}ms",
421
+ f"CM Depth: {metrics.countermovement_depth:.3f}m",
422
+ f"Ecc Duration: {metrics.eccentric_duration*1000:.0f}ms",
423
+ f"Con Duration: {metrics.concentric_duration*1000:.0f}ms",
424
+ ]
425
+
426
+ # Draw background
427
+ box_height = len(metrics_text) * 30 + 20
428
+ cv2.rectangle(
429
+ frame,
430
+ (self.width - 320, self.height - box_height - 10),
431
+ (self.width - 10, self.height - 10),
432
+ (0, 0, 0),
433
+ -1,
434
+ )
435
+ cv2.rectangle(
436
+ frame,
437
+ (self.width - 320, self.height - box_height - 10),
438
+ (self.width - 10, self.height - 10),
439
+ (0, 255, 0),
440
+ 2,
441
+ )
442
+
443
+ # Draw metrics text
444
+ text_y = self.height - box_height + 10
445
+ for text in metrics_text:
446
+ cv2.putText(
447
+ frame,
448
+ text,
449
+ (self.width - 310, text_y),
450
+ cv2.FONT_HERSHEY_SIMPLEX,
451
+ 0.6,
452
+ (255, 255, 255),
453
+ 1,
454
+ )
455
+ text_y += 30
456
+
319
457
  def render_frame(
320
458
  self,
321
459
  frame: np.ndarray,
@@ -346,47 +484,12 @@ class CMJDebugOverlayRenderer:
346
484
 
347
485
  # Draw skeleton and triple extension if landmarks available
348
486
  if landmarks:
349
- # Draw skeleton segments for triple extension
350
487
  self._draw_skeleton(annotated, landmarks)
351
-
352
- # Draw joint angles
353
488
  self._draw_joint_angles(annotated, landmarks, phase_color)
354
-
355
- # Draw foot landmarks
356
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
357
- foot_positions = []
358
-
359
- for key in foot_keys:
360
- if key in landmarks:
361
- x, y, vis = landmarks[key]
362
- if vis > 0.5:
363
- lx = int(x * self.width)
364
- ly = int(y * self.height)
365
- foot_positions.append((lx, ly))
366
- cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
367
-
368
- # Draw average foot position with phase color
369
- if foot_positions:
370
- avg_x = int(np.mean([p[0] for p in foot_positions]))
371
- avg_y = int(np.mean([p[1] for p in foot_positions]))
372
- cv2.circle(annotated, (avg_x, avg_y), 12, phase_color, -1)
373
- cv2.circle(annotated, (avg_x, avg_y), 14, (255, 255, 255), 2)
489
+ self._draw_foot_landmarks(annotated, landmarks, phase_color)
374
490
 
375
491
  # Draw phase indicator banner
376
- if phase:
377
- # Phase name with background
378
- phase_text = f"Phase: {phase.upper()}"
379
- text_size = cv2.getTextSize(phase_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
380
- cv2.rectangle(annotated, (5, 5), (text_size[0] + 15, 45), phase_color, -1)
381
- cv2.putText(
382
- annotated,
383
- phase_text,
384
- (10, 35),
385
- cv2.FONT_HERSHEY_SIMPLEX,
386
- 1,
387
- (0, 0, 0),
388
- 2,
389
- )
492
+ self._draw_phase_banner(annotated, phase, phase_color)
390
493
 
391
494
  # Draw frame number
392
495
  cv2.putText(
@@ -399,78 +502,10 @@ class CMJDebugOverlayRenderer:
399
502
  2,
400
503
  )
401
504
 
402
- # Draw key frame markers
505
+ # Draw key frame markers and metrics summary
403
506
  if metrics:
404
- y_offset = 120
405
- markers = []
406
-
407
- if metrics.standing_start_frame and frame_idx == int(
408
- metrics.standing_start_frame
409
- ):
410
- markers.append("COUNTERMOVEMENT START")
411
-
412
- if frame_idx == int(metrics.lowest_point_frame):
413
- markers.append("LOWEST POINT")
414
-
415
- if frame_idx == int(metrics.takeoff_frame):
416
- markers.append("TAKEOFF")
417
-
418
- if frame_idx == int(metrics.landing_frame):
419
- markers.append("LANDING")
420
-
421
- for marker in markers:
422
- cv2.putText(
423
- annotated,
424
- marker,
425
- (10, y_offset),
426
- cv2.FONT_HERSHEY_SIMPLEX,
427
- 0.7,
428
- (255, 255, 0),
429
- 2,
430
- )
431
- y_offset += 35
432
-
433
- # Draw metrics summary in bottom right (last 30 frames)
434
- total_frames = int(metrics.landing_frame) + 30
435
- if frame_idx >= total_frames - 30:
436
- metrics_text = [
437
- f"Jump Height: {metrics.jump_height:.3f}m",
438
- f"Flight Time: {metrics.flight_time*1000:.0f}ms",
439
- f"CM Depth: {metrics.countermovement_depth:.3f}m",
440
- f"Ecc Duration: {metrics.eccentric_duration*1000:.0f}ms",
441
- f"Con Duration: {metrics.concentric_duration*1000:.0f}ms",
442
- ]
443
-
444
- # Draw background
445
- box_height = len(metrics_text) * 30 + 20
446
- cv2.rectangle(
447
- annotated,
448
- (self.width - 320, self.height - box_height - 10),
449
- (self.width - 10, self.height - 10),
450
- (0, 0, 0),
451
- -1,
452
- )
453
- cv2.rectangle(
454
- annotated,
455
- (self.width - 320, self.height - box_height - 10),
456
- (self.width - 10, self.height - 10),
457
- (0, 255, 0),
458
- 2,
459
- )
460
-
461
- # Draw metrics text
462
- text_y = self.height - box_height + 10
463
- for text in metrics_text:
464
- cv2.putText(
465
- annotated,
466
- text,
467
- (self.width - 310, text_y),
468
- cv2.FONT_HERSHEY_SIMPLEX,
469
- 0.6,
470
- (255, 255, 255),
471
- 1,
472
- )
473
- text_y += 30
507
+ self._draw_key_frame_markers(annotated, frame_idx, metrics)
508
+ self._draw_metrics_summary(annotated, frame_idx, metrics)
474
509
 
475
510
  return annotated
476
511
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.11.0
3
+ Version: 0.11.2
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,10 +1,10 @@
1
1
  kinemotion/__init__.py,sha256=REBC9wrwYC_grvCS00qEOyign65Zc1sc-5buLpyqQxA,654
2
- kinemotion/api.py,sha256=HYd8OnCZ9eJq7wn38dFVta5llTjqB9P0wKEwXPAXu6M,31666
2
+ kinemotion/api.py,sha256=Cm3DXuNcInecJ2BoRMYBLRUk1ImDSn3ZP82H31TU4Gw,31721
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
- kinemotion/cmj/analysis.py,sha256=uzUouxeCE3H5dad_AQ8PhtpgVC8DfPE32AMDqihD82o,19887
6
- kinemotion/cmj/cli.py,sha256=kyPJpxvLZGfzdqv4FGzQVNmR2uUSOg3a6_I0d1VszVw,20874
7
- kinemotion/cmj/debug_overlay.py,sha256=TVDrZ16TJClftM_zhkrCzBLMs87SfYDa8H-eqfzQJ4c,17976
5
+ kinemotion/cmj/analysis.py,sha256=4HYGn4VDIB6oExAees-VcPfpNgWOltpgwjyNTU7YAb4,18263
6
+ kinemotion/cmj/cli.py,sha256=XcLlBPVyBWffI_UTQZN7wxlnm971LL9-ynkBoL3T37I,21567
7
+ kinemotion/cmj/debug_overlay.py,sha256=ELrSYQ9LmLV81bJS5w9i2c4VwRS0EYAUnMehMHU7VGc,18724
8
8
  kinemotion/cmj/joint_angles.py,sha256=8ucpDGPvbt4iX3tx9eVxJEUv0laTm2Y58_--VzJCogE,9113
9
9
  kinemotion/cmj/kinematics.py,sha256=Xl_PlC2OqMoA-zOc3SRB_GqI0AgLlJol5FTPe5J_qLc,7573
10
10
  kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
@@ -19,8 +19,8 @@ kinemotion/dropjump/cli.py,sha256=zo23qoYSpC_2BcScy-JOilcGcWGM0j3Xv0lpO0_n0wk,27
19
19
  kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
20
20
  kinemotion/dropjump/kinematics.py,sha256=RM_O8Kdc6aEiPIu_99N4cu-4EhYSQxtBGASJF_dmQaU,19081
21
21
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- kinemotion-0.11.0.dist-info/METADATA,sha256=JaV-num_wEFgMB_tl3sxOJmUGl0FddtS5bkfQmGH3Gc,18990
23
- kinemotion-0.11.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
24
- kinemotion-0.11.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
25
- kinemotion-0.11.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
26
- kinemotion-0.11.0.dist-info/RECORD,,
22
+ kinemotion-0.11.2.dist-info/METADATA,sha256=DrLLms6trTMJYlfaibiR0y49jDd7D-zuYQTblJTq44Q,18990
23
+ kinemotion-0.11.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
24
+ kinemotion-0.11.2.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
25
+ kinemotion-0.11.2.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
26
+ kinemotion-0.11.2.dist-info/RECORD,,