kinemotion 0.11.1__py3-none-any.whl → 0.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -638,6 +638,50 @@ class CMJVideoResult:
638
638
  processing_time: float = 0.0
639
639
 
640
640
 
641
+ def _generate_cmj_outputs(
642
+ output_video: str | None,
643
+ json_output: str | None,
644
+ metrics: CMJMetrics,
645
+ frames: list,
646
+ smoothed_landmarks: list,
647
+ video_width: int,
648
+ video_height: int,
649
+ video_display_width: int,
650
+ video_display_height: int,
651
+ video_fps: float,
652
+ verbose: bool,
653
+ ) -> None:
654
+ """Generate JSON and debug video outputs for CMJ analysis."""
655
+ if json_output:
656
+ import json
657
+
658
+ output_path = Path(json_output)
659
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
660
+ if verbose:
661
+ print(f"Metrics written to: {json_output}")
662
+
663
+ if output_video:
664
+ if verbose:
665
+ print(f"Generating debug video: {output_video}")
666
+
667
+ with CMJDebugOverlayRenderer(
668
+ output_video,
669
+ video_width,
670
+ video_height,
671
+ video_display_width,
672
+ video_display_height,
673
+ video_fps,
674
+ ) as renderer:
675
+ for i, frame in enumerate(frames):
676
+ annotated = renderer.render_frame(
677
+ frame, smoothed_landmarks[i], i, metrics
678
+ )
679
+ renderer.write_frame(annotated)
680
+
681
+ if verbose:
682
+ print(f"Debug video saved: {output_video}")
683
+
684
+
641
685
  def process_cmj_video(
642
686
  video_path: str,
643
687
  quality: str = "balanced",
@@ -741,12 +785,6 @@ def process_cmj_video(
741
785
  vertical_positions, _ = _extract_vertical_positions(smoothed_landmarks)
742
786
  tracking_method = "foot"
743
787
 
744
- # Calculate countermovement threshold (FPS-adjusted)
745
- # POSITIVE threshold for downward motion (squatting) in normalized coordinates
746
- cm_threshold = countermovement_threshold
747
- if cm_threshold is None:
748
- cm_threshold = 0.015 * (30.0 / video.fps)
749
-
750
788
  # Detect CMJ phases
751
789
  if verbose:
752
790
  print("Detecting CMJ phases...")
@@ -788,34 +826,19 @@ def process_cmj_video(
788
826
  )
789
827
 
790
828
  # Generate outputs if requested
791
- if json_output:
792
- import json
793
-
794
- output_path = Path(json_output)
795
- output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
796
- if verbose:
797
- print(f"Metrics written to: {json_output}")
798
-
799
- if output_video:
800
- if verbose:
801
- print(f"Generating debug video: {output_video}")
802
-
803
- with CMJDebugOverlayRenderer(
804
- output_video,
805
- video.width,
806
- video.height,
807
- video.display_width,
808
- video.display_height,
809
- video.fps,
810
- ) as renderer:
811
- for i, frame in enumerate(frames):
812
- annotated = renderer.render_frame(
813
- frame, smoothed_landmarks[i], i, metrics
814
- )
815
- renderer.write_frame(annotated)
816
-
817
- if verbose:
818
- print(f"Debug video saved: {output_video}")
829
+ _generate_cmj_outputs(
830
+ output_video,
831
+ json_output,
832
+ metrics,
833
+ frames,
834
+ smoothed_landmarks,
835
+ video.width,
836
+ video.height,
837
+ video.display_width,
838
+ video.display_height,
839
+ video.fps,
840
+ verbose,
841
+ )
819
842
 
820
843
  if verbose:
821
844
  print(f"\nJump height: {metrics.jump_height:.3f}m")
@@ -279,8 +279,6 @@ def find_cmj_takeoff_from_velocity_peak(
279
279
  velocities: np.ndarray,
280
280
  lowest_point_frame: int,
281
281
  fps: float,
282
- window_length: int = 5,
283
- polyorder: int = 2,
284
282
  ) -> float:
285
283
  """
286
284
  Find CMJ takeoff frame as peak upward velocity during concentric phase.
@@ -293,8 +291,6 @@ def find_cmj_takeoff_from_velocity_peak(
293
291
  velocities: Array of SIGNED vertical velocities (negative = upward)
294
292
  lowest_point_frame: Frame at lowest point
295
293
  fps: Video frame rate
296
- window_length: Window size for derivative calculations
297
- polyorder: Polynomial order for Savitzky-Golay filter
298
294
 
299
295
  Returns:
300
296
  Takeoff frame with fractional precision.
@@ -407,7 +403,7 @@ def find_interpolated_takeoff_landing(
407
403
 
408
404
  # Find takeoff using peak velocity method (CMJ-specific)
409
405
  takeoff_frame = find_cmj_takeoff_from_velocity_peak(
410
- positions, velocities, lowest_point_frame, fps, window_length, polyorder
406
+ positions, velocities, lowest_point_frame, fps
411
407
  )
412
408
 
413
409
  # Find landing using position peak and impact detection
kinemotion/cmj/cli.py CHANGED
@@ -39,6 +39,64 @@ class AnalysisParameters:
39
39
  tracking_confidence: float | None = None
40
40
 
41
41
 
42
+ def _collect_video_files(video_path: tuple[str, ...]) -> list[str]:
43
+ """Expand glob patterns and collect all video files."""
44
+ video_files: list[str] = []
45
+ for pattern in video_path:
46
+ expanded = glob.glob(pattern)
47
+ if expanded:
48
+ video_files.extend(expanded)
49
+ elif Path(pattern).exists():
50
+ video_files.append(pattern)
51
+ else:
52
+ click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
53
+ return video_files
54
+
55
+
56
+ def _generate_output_paths(
57
+ video: str, output_dir: str | None, json_output_dir: str | None
58
+ ) -> tuple[str | None, str | None]:
59
+ """Generate output paths for debug video and JSON."""
60
+ out_path = None
61
+ json_path = None
62
+ if output_dir:
63
+ out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
64
+ if json_output_dir:
65
+ json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
66
+ return out_path, json_path
67
+
68
+
69
+ def _process_batch_videos(
70
+ video_files: list[str],
71
+ output_dir: str | None,
72
+ json_output_dir: str | None,
73
+ quality_preset: QualityPreset,
74
+ verbose: bool,
75
+ expert_params: AnalysisParameters,
76
+ workers: int,
77
+ ) -> None:
78
+ """Process multiple videos in batch mode."""
79
+ click.echo(
80
+ f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
81
+ err=True,
82
+ )
83
+ click.echo("Note: Batch processing not yet fully implemented", err=True)
84
+ click.echo("Processing videos sequentially...", err=True)
85
+
86
+ for video in video_files:
87
+ try:
88
+ click.echo(f"\nProcessing: {video}", err=True)
89
+ out_path, json_path = _generate_output_paths(
90
+ video, output_dir, json_output_dir
91
+ )
92
+ _process_single(
93
+ video, out_path, json_path, quality_preset, verbose, expert_params
94
+ )
95
+ except Exception as e:
96
+ click.echo(f"Error processing {video}: {e}", err=True)
97
+ continue
98
+
99
+
42
100
  @click.command(name="cmj-analyze")
43
101
  @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
44
102
  @click.option(
@@ -189,15 +247,7 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
189
247
  --json-output-dir results/ --csv-summary summary.csv
190
248
  """
191
249
  # Expand glob patterns and collect all video files
192
- video_files: list[str] = []
193
- for pattern in video_path:
194
- expanded = glob.glob(pattern)
195
- if expanded:
196
- video_files.extend(expanded)
197
- elif Path(pattern).exists():
198
- video_files.append(pattern)
199
- else:
200
- click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
250
+ video_files = _collect_video_files(video_path)
201
251
 
202
252
  if not video_files:
203
253
  click.echo("Error: No video files found", err=True)
@@ -220,27 +270,15 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
220
270
  )
221
271
 
222
272
  if use_batch:
223
- click.echo(
224
- f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
225
- err=True,
273
+ _process_batch_videos(
274
+ video_files,
275
+ output_dir,
276
+ json_output_dir,
277
+ quality_preset,
278
+ verbose,
279
+ expert_params,
280
+ workers,
226
281
  )
227
- click.echo("Note: Batch processing not yet fully implemented", err=True)
228
- click.echo("Processing videos sequentially...", err=True)
229
- for video in video_files:
230
- try:
231
- click.echo(f"\nProcessing: {video}", err=True)
232
- out_path = None
233
- json_path = None
234
- if output_dir:
235
- out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
236
- if json_output_dir:
237
- json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
238
- _process_single(
239
- video, out_path, json_path, quality_preset, verbose, expert_params
240
- )
241
- except Exception as e:
242
- click.echo(f"Error processing {video}: {e}", err=True)
243
- continue
244
282
  else:
245
283
  # Single video mode
246
284
  try:
@@ -376,6 +414,22 @@ def _smooth_landmark_sequence(
376
414
  )
377
415
 
378
416
 
417
+ def _get_foot_position(frame_landmarks: dict | None, last_position: float) -> float:
418
+ """Extract average foot position from frame landmarks."""
419
+ if not frame_landmarks:
420
+ return last_position
421
+
422
+ # Average foot position (ankles and heels)
423
+ foot_y_values = []
424
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
425
+ if key in frame_landmarks:
426
+ foot_y_values.append(frame_landmarks[key][1])
427
+
428
+ if foot_y_values:
429
+ return float(np.mean(foot_y_values))
430
+ return last_position
431
+
432
+
379
433
  def _extract_positions_from_landmarks(
380
434
  smoothed_landmarks: list,
381
435
  ) -> tuple[np.ndarray, str]:
@@ -391,20 +445,9 @@ def _extract_positions_from_landmarks(
391
445
  position_list: list[float] = []
392
446
 
393
447
  for frame_landmarks in smoothed_landmarks:
394
- if frame_landmarks:
395
- # Average foot position (ankles and heels)
396
- foot_y_values = []
397
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
398
- if key in frame_landmarks:
399
- foot_y_values.append(frame_landmarks[key][1])
400
-
401
- if foot_y_values:
402
- avg_y = float(np.mean(foot_y_values))
403
- position_list.append(avg_y)
404
- else:
405
- position_list.append(position_list[-1] if position_list else 0.5)
406
- else:
407
- position_list.append(position_list[-1] if position_list else 0.5)
448
+ last_pos = position_list[-1] if position_list else 0.5
449
+ position = _get_foot_position(frame_landmarks, last_pos)
450
+ position_list.append(position)
408
451
 
409
452
  return np.array(position_list), "foot"
410
453
 
@@ -98,6 +98,76 @@ class CMJDebugOverlayRenderer:
98
98
  }
99
99
  return colors.get(phase, (128, 128, 128))
100
100
 
101
+ def _get_skeleton_segments(
102
+ self, side_prefix: str
103
+ ) -> list[tuple[str, str, tuple[int, int, int], int]]:
104
+ """Get skeleton segments for one side of the body."""
105
+ return [
106
+ (f"{side_prefix}heel", f"{side_prefix}ankle", (0, 255, 255), 3), # Foot
107
+ (
108
+ f"{side_prefix}heel",
109
+ f"{side_prefix}foot_index",
110
+ (0, 255, 255),
111
+ 2,
112
+ ), # Alt foot
113
+ (f"{side_prefix}ankle", f"{side_prefix}knee", (255, 100, 100), 4), # Shin
114
+ (f"{side_prefix}knee", f"{side_prefix}hip", (100, 255, 100), 4), # Femur
115
+ (
116
+ f"{side_prefix}hip",
117
+ f"{side_prefix}shoulder",
118
+ (100, 100, 255),
119
+ 4,
120
+ ), # Trunk
121
+ (f"{side_prefix}shoulder", "nose", (150, 150, 255), 2), # Neck
122
+ ]
123
+
124
+ def _draw_segment(
125
+ self,
126
+ frame: np.ndarray,
127
+ landmarks: dict[str, tuple[float, float, float]],
128
+ start_key: str,
129
+ end_key: str,
130
+ color: tuple[int, int, int],
131
+ thickness: int,
132
+ ) -> None:
133
+ """Draw a single skeleton segment if both endpoints are visible."""
134
+ if start_key not in landmarks or end_key not in landmarks:
135
+ return
136
+
137
+ start_vis = landmarks[start_key][2]
138
+ end_vis = landmarks[end_key][2]
139
+
140
+ # Very low threshold to show as much as possible
141
+ if start_vis > 0.2 and end_vis > 0.2:
142
+ start_x = int(landmarks[start_key][0] * self.width)
143
+ start_y = int(landmarks[start_key][1] * self.height)
144
+ end_x = int(landmarks[end_key][0] * self.width)
145
+ end_y = int(landmarks[end_key][1] * self.height)
146
+
147
+ cv2.line(frame, (start_x, start_y), (end_x, end_y), color, thickness)
148
+
149
+ def _draw_joints(
150
+ self,
151
+ frame: np.ndarray,
152
+ landmarks: dict[str, tuple[float, float, float]],
153
+ side_prefix: str,
154
+ ) -> None:
155
+ """Draw joint circles for one side of the body."""
156
+ joint_keys = [
157
+ f"{side_prefix}heel",
158
+ f"{side_prefix}foot_index",
159
+ f"{side_prefix}ankle",
160
+ f"{side_prefix}knee",
161
+ f"{side_prefix}hip",
162
+ f"{side_prefix}shoulder",
163
+ ]
164
+ for key in joint_keys:
165
+ if key in landmarks and landmarks[key][2] > 0.2:
166
+ jx = int(landmarks[key][0] * self.width)
167
+ jy = int(landmarks[key][1] * self.height)
168
+ cv2.circle(frame, (jx, jy), 6, (255, 255, 255), -1)
169
+ cv2.circle(frame, (jx, jy), 8, (0, 0, 0), 2)
170
+
101
171
  def _draw_skeleton(
102
172
  self, frame: np.ndarray, landmarks: dict[str, tuple[float, float, float]]
103
173
  ) -> None:
@@ -112,68 +182,16 @@ class CMJDebugOverlayRenderer:
112
182
  """
113
183
  # Try both sides and draw all visible segments
114
184
  for side_prefix in ["right_", "left_"]:
115
- segments = [
116
- (f"{side_prefix}heel", f"{side_prefix}ankle", (0, 255, 255), 3), # Foot
117
- (
118
- f"{side_prefix}heel",
119
- f"{side_prefix}foot_index",
120
- (0, 255, 255),
121
- 2,
122
- ), # Alt foot
123
- (
124
- f"{side_prefix}ankle",
125
- f"{side_prefix}knee",
126
- (255, 100, 100),
127
- 4,
128
- ), # Shin
129
- (
130
- f"{side_prefix}knee",
131
- f"{side_prefix}hip",
132
- (100, 255, 100),
133
- 4,
134
- ), # Femur
135
- (
136
- f"{side_prefix}hip",
137
- f"{side_prefix}shoulder",
138
- (100, 100, 255),
139
- 4,
140
- ), # Trunk
141
- # Additional segments for better visualization
142
- (f"{side_prefix}shoulder", "nose", (150, 150, 255), 2), # Neck
143
- ]
185
+ segments = self._get_skeleton_segments(side_prefix)
144
186
 
145
187
  # Draw ALL visible segments (not just one side)
146
188
  for start_key, end_key, color, thickness in segments:
147
- if start_key in landmarks and end_key in landmarks:
148
- start_vis = landmarks[start_key][2]
149
- end_vis = landmarks[end_key][2]
150
-
151
- # Very low threshold to show as much as possible
152
- if start_vis > 0.2 and end_vis > 0.2:
153
- start_x = int(landmarks[start_key][0] * self.width)
154
- start_y = int(landmarks[start_key][1] * self.height)
155
- end_x = int(landmarks[end_key][0] * self.width)
156
- end_y = int(landmarks[end_key][1] * self.height)
157
-
158
- cv2.line(
159
- frame, (start_x, start_y), (end_x, end_y), color, thickness
160
- )
189
+ self._draw_segment(
190
+ frame, landmarks, start_key, end_key, color, thickness
191
+ )
161
192
 
162
193
  # Draw joints as circles for this side
163
- joint_keys = [
164
- f"{side_prefix}heel",
165
- f"{side_prefix}foot_index",
166
- f"{side_prefix}ankle",
167
- f"{side_prefix}knee",
168
- f"{side_prefix}hip",
169
- f"{side_prefix}shoulder",
170
- ]
171
- for key in joint_keys:
172
- if key in landmarks and landmarks[key][2] > 0.2:
173
- jx = int(landmarks[key][0] * self.width)
174
- jy = int(landmarks[key][1] * self.height)
175
- cv2.circle(frame, (jx, jy), 6, (255, 255, 255), -1)
176
- cv2.circle(frame, (jx, jy), 8, (0, 0, 0), 2)
194
+ self._draw_joints(frame, landmarks, side_prefix)
177
195
 
178
196
  # Always draw nose (head position) if visible
179
197
  if "nose" in landmarks and landmarks["nose"][2] > 0.2:
@@ -316,6 +334,126 @@ class CMJDebugOverlayRenderer:
316
334
  # Draw arc (simplified as a circle for now)
317
335
  cv2.circle(frame, (jx, jy), radius, arc_color, 2)
318
336
 
337
+ def _draw_foot_landmarks(
338
+ self,
339
+ frame: np.ndarray,
340
+ landmarks: dict[str, tuple[float, float, float]],
341
+ phase_color: tuple[int, int, int],
342
+ ) -> None:
343
+ """Draw foot landmarks and average position."""
344
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
345
+ foot_positions = []
346
+
347
+ for key in foot_keys:
348
+ if key in landmarks:
349
+ x, y, vis = landmarks[key]
350
+ if vis > 0.5:
351
+ lx = int(x * self.width)
352
+ ly = int(y * self.height)
353
+ foot_positions.append((lx, ly))
354
+ cv2.circle(frame, (lx, ly), 5, (255, 255, 0), -1)
355
+
356
+ # Draw average foot position with phase color
357
+ if foot_positions:
358
+ avg_x = int(np.mean([p[0] for p in foot_positions]))
359
+ avg_y = int(np.mean([p[1] for p in foot_positions]))
360
+ cv2.circle(frame, (avg_x, avg_y), 12, phase_color, -1)
361
+ cv2.circle(frame, (avg_x, avg_y), 14, (255, 255, 255), 2)
362
+
363
+ def _draw_phase_banner(
364
+ self, frame: np.ndarray, phase: str | None, phase_color: tuple[int, int, int]
365
+ ) -> None:
366
+ """Draw phase indicator banner."""
367
+ if not phase:
368
+ return
369
+
370
+ phase_text = f"Phase: {phase.upper()}"
371
+ text_size = cv2.getTextSize(phase_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
372
+ cv2.rectangle(frame, (5, 5), (text_size[0] + 15, 45), phase_color, -1)
373
+ cv2.putText(
374
+ frame, phase_text, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2
375
+ )
376
+
377
+ def _draw_key_frame_markers(
378
+ self, frame: np.ndarray, frame_idx: int, metrics: CMJMetrics
379
+ ) -> None:
380
+ """Draw markers for key frames (standing start, lowest, takeoff, landing)."""
381
+ y_offset = 120
382
+ markers = []
383
+
384
+ if metrics.standing_start_frame and frame_idx == int(
385
+ metrics.standing_start_frame
386
+ ):
387
+ markers.append("COUNTERMOVEMENT START")
388
+
389
+ if frame_idx == int(metrics.lowest_point_frame):
390
+ markers.append("LOWEST POINT")
391
+
392
+ if frame_idx == int(metrics.takeoff_frame):
393
+ markers.append("TAKEOFF")
394
+
395
+ if frame_idx == int(metrics.landing_frame):
396
+ markers.append("LANDING")
397
+
398
+ for marker in markers:
399
+ cv2.putText(
400
+ frame,
401
+ marker,
402
+ (10, y_offset),
403
+ cv2.FONT_HERSHEY_SIMPLEX,
404
+ 0.7,
405
+ (255, 255, 0),
406
+ 2,
407
+ )
408
+ y_offset += 35
409
+
410
+ def _draw_metrics_summary(
411
+ self, frame: np.ndarray, frame_idx: int, metrics: CMJMetrics
412
+ ) -> None:
413
+ """Draw metrics summary in bottom right (last 30 frames)."""
414
+ total_frames = int(metrics.landing_frame) + 30
415
+ if frame_idx < total_frames - 30:
416
+ return
417
+
418
+ metrics_text = [
419
+ f"Jump Height: {metrics.jump_height:.3f}m",
420
+ f"Flight Time: {metrics.flight_time*1000:.0f}ms",
421
+ f"CM Depth: {metrics.countermovement_depth:.3f}m",
422
+ f"Ecc Duration: {metrics.eccentric_duration*1000:.0f}ms",
423
+ f"Con Duration: {metrics.concentric_duration*1000:.0f}ms",
424
+ ]
425
+
426
+ # Draw background
427
+ box_height = len(metrics_text) * 30 + 20
428
+ cv2.rectangle(
429
+ frame,
430
+ (self.width - 320, self.height - box_height - 10),
431
+ (self.width - 10, self.height - 10),
432
+ (0, 0, 0),
433
+ -1,
434
+ )
435
+ cv2.rectangle(
436
+ frame,
437
+ (self.width - 320, self.height - box_height - 10),
438
+ (self.width - 10, self.height - 10),
439
+ (0, 255, 0),
440
+ 2,
441
+ )
442
+
443
+ # Draw metrics text
444
+ text_y = self.height - box_height + 10
445
+ for text in metrics_text:
446
+ cv2.putText(
447
+ frame,
448
+ text,
449
+ (self.width - 310, text_y),
450
+ cv2.FONT_HERSHEY_SIMPLEX,
451
+ 0.6,
452
+ (255, 255, 255),
453
+ 1,
454
+ )
455
+ text_y += 30
456
+
319
457
  def render_frame(
320
458
  self,
321
459
  frame: np.ndarray,
@@ -346,47 +484,12 @@ class CMJDebugOverlayRenderer:
346
484
 
347
485
  # Draw skeleton and triple extension if landmarks available
348
486
  if landmarks:
349
- # Draw skeleton segments for triple extension
350
487
  self._draw_skeleton(annotated, landmarks)
351
-
352
- # Draw joint angles
353
488
  self._draw_joint_angles(annotated, landmarks, phase_color)
354
-
355
- # Draw foot landmarks
356
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
357
- foot_positions = []
358
-
359
- for key in foot_keys:
360
- if key in landmarks:
361
- x, y, vis = landmarks[key]
362
- if vis > 0.5:
363
- lx = int(x * self.width)
364
- ly = int(y * self.height)
365
- foot_positions.append((lx, ly))
366
- cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
367
-
368
- # Draw average foot position with phase color
369
- if foot_positions:
370
- avg_x = int(np.mean([p[0] for p in foot_positions]))
371
- avg_y = int(np.mean([p[1] for p in foot_positions]))
372
- cv2.circle(annotated, (avg_x, avg_y), 12, phase_color, -1)
373
- cv2.circle(annotated, (avg_x, avg_y), 14, (255, 255, 255), 2)
489
+ self._draw_foot_landmarks(annotated, landmarks, phase_color)
374
490
 
375
491
  # Draw phase indicator banner
376
- if phase:
377
- # Phase name with background
378
- phase_text = f"Phase: {phase.upper()}"
379
- text_size = cv2.getTextSize(phase_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
380
- cv2.rectangle(annotated, (5, 5), (text_size[0] + 15, 45), phase_color, -1)
381
- cv2.putText(
382
- annotated,
383
- phase_text,
384
- (10, 35),
385
- cv2.FONT_HERSHEY_SIMPLEX,
386
- 1,
387
- (0, 0, 0),
388
- 2,
389
- )
492
+ self._draw_phase_banner(annotated, phase, phase_color)
390
493
 
391
494
  # Draw frame number
392
495
  cv2.putText(
@@ -399,78 +502,10 @@ class CMJDebugOverlayRenderer:
399
502
  2,
400
503
  )
401
504
 
402
- # Draw key frame markers
505
+ # Draw key frame markers and metrics summary
403
506
  if metrics:
404
- y_offset = 120
405
- markers = []
406
-
407
- if metrics.standing_start_frame and frame_idx == int(
408
- metrics.standing_start_frame
409
- ):
410
- markers.append("COUNTERMOVEMENT START")
411
-
412
- if frame_idx == int(metrics.lowest_point_frame):
413
- markers.append("LOWEST POINT")
414
-
415
- if frame_idx == int(metrics.takeoff_frame):
416
- markers.append("TAKEOFF")
417
-
418
- if frame_idx == int(metrics.landing_frame):
419
- markers.append("LANDING")
420
-
421
- for marker in markers:
422
- cv2.putText(
423
- annotated,
424
- marker,
425
- (10, y_offset),
426
- cv2.FONT_HERSHEY_SIMPLEX,
427
- 0.7,
428
- (255, 255, 0),
429
- 2,
430
- )
431
- y_offset += 35
432
-
433
- # Draw metrics summary in bottom right (last 30 frames)
434
- total_frames = int(metrics.landing_frame) + 30
435
- if frame_idx >= total_frames - 30:
436
- metrics_text = [
437
- f"Jump Height: {metrics.jump_height:.3f}m",
438
- f"Flight Time: {metrics.flight_time*1000:.0f}ms",
439
- f"CM Depth: {metrics.countermovement_depth:.3f}m",
440
- f"Ecc Duration: {metrics.eccentric_duration*1000:.0f}ms",
441
- f"Con Duration: {metrics.concentric_duration*1000:.0f}ms",
442
- ]
443
-
444
- # Draw background
445
- box_height = len(metrics_text) * 30 + 20
446
- cv2.rectangle(
447
- annotated,
448
- (self.width - 320, self.height - box_height - 10),
449
- (self.width - 10, self.height - 10),
450
- (0, 0, 0),
451
- -1,
452
- )
453
- cv2.rectangle(
454
- annotated,
455
- (self.width - 320, self.height - box_height - 10),
456
- (self.width - 10, self.height - 10),
457
- (0, 255, 0),
458
- 2,
459
- )
460
-
461
- # Draw metrics text
462
- text_y = self.height - box_height + 10
463
- for text in metrics_text:
464
- cv2.putText(
465
- annotated,
466
- text,
467
- (self.width - 310, text_y),
468
- cv2.FONT_HERSHEY_SIMPLEX,
469
- 0.6,
470
- (255, 255, 255),
471
- 1,
472
- )
473
- text_y += 30
507
+ self._draw_key_frame_markers(annotated, frame_idx, metrics)
508
+ self._draw_metrics_summary(annotated, frame_idx, metrics)
474
509
 
475
510
  return annotated
476
511
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.11.1
3
+ Version: 0.11.2
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,10 +1,10 @@
1
1
  kinemotion/__init__.py,sha256=REBC9wrwYC_grvCS00qEOyign65Zc1sc-5buLpyqQxA,654
2
- kinemotion/api.py,sha256=mbI57PFvRK7iNU3p4PNdweuHCCU0HP9nLeUk9fx-b2g,31390
2
+ kinemotion/api.py,sha256=Cm3DXuNcInecJ2BoRMYBLRUk1ImDSn3ZP82H31TU4Gw,31721
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
- kinemotion/cmj/analysis.py,sha256=ciH5f5SC1IJjG0j8nkyZRu7QYR-Al6bWCBIfOW4Q1xQ,18466
6
- kinemotion/cmj/cli.py,sha256=HUXat4xecCZ7JOiyo4zfcOk5xJLFpUn5ZaTDVAJL6qo,20565
7
- kinemotion/cmj/debug_overlay.py,sha256=TVDrZ16TJClftM_zhkrCzBLMs87SfYDa8H-eqfzQJ4c,17976
5
+ kinemotion/cmj/analysis.py,sha256=4HYGn4VDIB6oExAees-VcPfpNgWOltpgwjyNTU7YAb4,18263
6
+ kinemotion/cmj/cli.py,sha256=XcLlBPVyBWffI_UTQZN7wxlnm971LL9-ynkBoL3T37I,21567
7
+ kinemotion/cmj/debug_overlay.py,sha256=ELrSYQ9LmLV81bJS5w9i2c4VwRS0EYAUnMehMHU7VGc,18724
8
8
  kinemotion/cmj/joint_angles.py,sha256=8ucpDGPvbt4iX3tx9eVxJEUv0laTm2Y58_--VzJCogE,9113
9
9
  kinemotion/cmj/kinematics.py,sha256=Xl_PlC2OqMoA-zOc3SRB_GqI0AgLlJol5FTPe5J_qLc,7573
10
10
  kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
@@ -19,8 +19,8 @@ kinemotion/dropjump/cli.py,sha256=zo23qoYSpC_2BcScy-JOilcGcWGM0j3Xv0lpO0_n0wk,27
19
19
  kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
20
20
  kinemotion/dropjump/kinematics.py,sha256=RM_O8Kdc6aEiPIu_99N4cu-4EhYSQxtBGASJF_dmQaU,19081
21
21
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- kinemotion-0.11.1.dist-info/METADATA,sha256=XRgrT5bV--WLFpW-pDbZAQpo0N5RekTUvtFMntKcQoA,18990
23
- kinemotion-0.11.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
24
- kinemotion-0.11.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
25
- kinemotion-0.11.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
26
- kinemotion-0.11.1.dist-info/RECORD,,
22
+ kinemotion-0.11.2.dist-info/METADATA,sha256=DrLLms6trTMJYlfaibiR0y49jDd7D-zuYQTblJTq44Q,18990
23
+ kinemotion-0.11.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
24
+ kinemotion-0.11.2.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
25
+ kinemotion-0.11.2.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
26
+ kinemotion-0.11.2.dist-info/RECORD,,