kinemotion 0.75.0__py3-none-any.whl → 0.76.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,309 @@
1
+ """Command-line interface for squat jump (SJ) analysis."""
2
+
3
+ import json
4
+ import sys
5
+ from dataclasses import dataclass
6
+
7
+ import click
8
+
9
+ from ..core.auto_tuning import QualityPreset
10
+ from ..core.cli_utils import (
11
+ batch_processing_options,
12
+ collect_video_files,
13
+ common_output_options,
14
+ generate_batch_output_paths,
15
+ quality_option,
16
+ verbose_option,
17
+ )
18
+ from .api import AnalysisOverrides, process_sj_video
19
+ from .kinematics import SJMetrics
20
+
21
+
22
+ @dataclass
23
+ class AnalysisParameters:
24
+ """Expert parameters for SJ analysis customization."""
25
+
26
+ smoothing_window: int | None = None
27
+ velocity_threshold: float | None = None
28
+ squat_hold_threshold: float | None = None
29
+ min_contact_frames: int | None = None
30
+ visibility_threshold: float | None = None
31
+ detection_confidence: float | None = None
32
+ tracking_confidence: float | None = None
33
+ mass_kg: float | None = None # For power calculations
34
+
35
+
36
+ def _process_batch_videos(
37
+ video_files: list[str],
38
+ output_dir: str | None,
39
+ json_output_dir: str | None,
40
+ quality_preset: QualityPreset,
41
+ verbose: bool,
42
+ expert_params: AnalysisParameters,
43
+ workers: int,
44
+ ) -> None:
45
+ """Process multiple videos in batch mode."""
46
+ click.echo(
47
+ f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
48
+ err=True,
49
+ )
50
+ click.echo("Note: Batch processing not yet fully implemented", err=True)
51
+ click.echo("Processing videos sequentially...", err=True)
52
+
53
+ for video in video_files:
54
+ try:
55
+ click.echo(f"\nProcessing: {video}", err=True)
56
+ out_path, json_path = generate_batch_output_paths(video, output_dir, json_output_dir)
57
+ _process_single(video, out_path, json_path, quality_preset, verbose, expert_params)
58
+ except Exception as e:
59
+ click.echo(f"Error processing {video}: {e}", err=True)
60
+ continue
61
+
62
+
63
+ @click.command(name="sj-analyze")
64
+ @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
65
+ @common_output_options
66
+ @quality_option
67
+ @verbose_option
68
+ @batch_processing_options
69
+ @click.option(
70
+ "--mass",
71
+ type=float,
72
+ required=True,
73
+ help="Athlete mass in kilograms (required for power calculations)",
74
+ )
75
+ # Expert parameters (hidden in help, but always available for advanced users)
76
+ @click.option(
77
+ "--smoothing-window",
78
+ type=int,
79
+ default=None,
80
+ help="[EXPERT] Override auto-tuned smoothing window size",
81
+ )
82
+ @click.option(
83
+ "--velocity-threshold",
84
+ type=float,
85
+ default=None,
86
+ help="[EXPERT] Override auto-tuned velocity threshold for flight detection",
87
+ )
88
+ @click.option(
89
+ "--squat-hold-threshold",
90
+ type=float,
91
+ default=None,
92
+ help="[EXPERT] Override auto-tuned squat hold threshold",
93
+ )
94
+ @click.option(
95
+ "--min-contact-frames",
96
+ type=int,
97
+ default=None,
98
+ help="[EXPERT] Override auto-tuned minimum contact frames",
99
+ )
100
+ @click.option(
101
+ "--visibility-threshold",
102
+ type=float,
103
+ default=None,
104
+ help="[EXPERT] Override visibility threshold",
105
+ )
106
+ @click.option(
107
+ "--detection-confidence",
108
+ type=float,
109
+ default=None,
110
+ help="[EXPERT] Override pose detection confidence",
111
+ )
112
+ @click.option(
113
+ "--tracking-confidence",
114
+ type=float,
115
+ default=None,
116
+ help="[EXPERT] Override pose tracking confidence",
117
+ )
118
+ def sj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters
119
+ # for each option
120
+ video_path: tuple[str, ...],
121
+ output: str | None,
122
+ json_output: str | None,
123
+ quality: str,
124
+ verbose: bool,
125
+ batch: bool,
126
+ workers: int,
127
+ output_dir: str | None,
128
+ json_output_dir: str | None,
129
+ csv_summary: str | None,
130
+ mass: float,
131
+ smoothing_window: int | None,
132
+ velocity_threshold: float | None,
133
+ squat_hold_threshold: float | None,
134
+ min_contact_frames: int | None,
135
+ visibility_threshold: float | None,
136
+ detection_confidence: float | None,
137
+ tracking_confidence: float | None,
138
+ ) -> None:
139
+ """
140
+ Analyze squat jump (SJ) video(s) to estimate jump performance metrics.
141
+
142
+ ⚠️ EXPERIMENTAL: Squat Jump analysis is new and awaiting validation studies.
143
+ Power/force calculations use validated Sayers regression but SJ-specific
144
+ phase detection may need refinement based on real-world data.
145
+
146
+ Squat Jump starts from a static squat position and focuses on explosive
147
+ upward movement without countermovement. Power calculations require
148
+ athlete mass for force and power metrics.
149
+
150
+ Uses intelligent auto-tuning to select optimal parameters based on video
151
+ characteristics. Parameters are automatically adjusted for frame rate,
152
+ tracking quality, and analysis preset.
153
+
154
+ VIDEO_PATH: Path(s) to video file(s). Supports glob patterns in batch mode.
155
+
156
+ Examples:
157
+
158
+ \b
159
+ # Basic analysis
160
+ kinemotion sj-analyze video.mp4 --mass 75.0
161
+
162
+ \b
163
+ # With debug video output
164
+ kinemotion sj-analyze video.mp4 --mass 75.0 --output debug.mp4
165
+
166
+ \b
167
+ # Batch mode with glob pattern
168
+ kinemotion sj-analyze videos/*.mp4 --batch --workers 4 --mass 75.0
169
+
170
+ \b
171
+ # Batch with output directories
172
+ kinemotion sj-analyze videos/*.mp4 --batch --mass 75.0 \\
173
+ --json-output-dir results/ --csv-summary summary.csv
174
+ """
175
+ # Warn user that SJ is experimental
176
+ click.echo(
177
+ "⚠️ WARNING: Squat Jump analysis is experimental (since v0.74.0)",
178
+ err=True,
179
+ )
180
+ click.echo(
181
+ " Power/force calculations use validated Sayers regression, but",
182
+ err=True,
183
+ )
184
+ click.echo(
185
+ " SJ-specific phase detection may need refinement based on real-world data.",
186
+ err=True,
187
+ )
188
+ click.echo(err=True)
189
+ # Expand glob patterns and collect all video files
190
+ video_files = collect_video_files(video_path)
191
+
192
+ if not video_files:
193
+ click.echo("Error: No video files found", err=True)
194
+ sys.exit(1)
195
+
196
+ # Determine if batch mode should be used
197
+ use_batch = batch or len(video_files) > 1
198
+
199
+ quality_preset = QualityPreset(quality.lower())
200
+
201
+ # Group expert parameters
202
+ expert_params = AnalysisParameters(
203
+ smoothing_window=smoothing_window,
204
+ velocity_threshold=velocity_threshold,
205
+ squat_hold_threshold=squat_hold_threshold,
206
+ min_contact_frames=min_contact_frames,
207
+ visibility_threshold=visibility_threshold,
208
+ detection_confidence=detection_confidence,
209
+ tracking_confidence=tracking_confidence,
210
+ mass_kg=mass,
211
+ )
212
+
213
+ if use_batch:
214
+ _process_batch_videos(
215
+ video_files,
216
+ output_dir,
217
+ json_output_dir,
218
+ quality_preset,
219
+ verbose,
220
+ expert_params,
221
+ workers,
222
+ )
223
+ else:
224
+ # Single video mode
225
+ try:
226
+ _process_single(
227
+ video_files[0],
228
+ output,
229
+ json_output,
230
+ quality_preset,
231
+ verbose,
232
+ expert_params,
233
+ )
234
+ except Exception as e:
235
+ click.echo(f"Error: {e}", err=True)
236
+ sys.exit(1)
237
+
238
+
239
+ def _process_single(
240
+ video_path: str,
241
+ output: str | None,
242
+ json_output: str | None,
243
+ quality_preset: QualityPreset,
244
+ verbose: bool,
245
+ expert_params: AnalysisParameters,
246
+ ) -> None:
247
+ """Process a single SJ video by calling the API."""
248
+ try:
249
+ # Create overrides from expert parameters
250
+ overrides = AnalysisOverrides(
251
+ smoothing_window=expert_params.smoothing_window,
252
+ velocity_threshold=expert_params.velocity_threshold,
253
+ min_contact_frames=expert_params.min_contact_frames,
254
+ visibility_threshold=expert_params.visibility_threshold,
255
+ )
256
+
257
+ # Call the API function (handles all processing logic)
258
+ metrics = process_sj_video(
259
+ video_path=video_path,
260
+ quality=quality_preset.value,
261
+ output_video=output,
262
+ json_output=json_output,
263
+ overrides=overrides,
264
+ detection_confidence=expert_params.detection_confidence,
265
+ tracking_confidence=expert_params.tracking_confidence,
266
+ mass_kg=expert_params.mass_kg,
267
+ verbose=verbose,
268
+ )
269
+
270
+ # Print formatted summary to stdout
271
+ _output_results(metrics, json_output=None) # Don't write JSON (API already did)
272
+
273
+ except Exception as e:
274
+ click.echo(f"Error processing video: {e}", err=True)
275
+ if verbose:
276
+ import traceback
277
+
278
+ traceback.print_exc()
279
+ sys.exit(1)
280
+
281
+
282
+ def _output_results(metrics: SJMetrics, json_output: str | None) -> None:
283
+ """Output analysis results."""
284
+ results = metrics.to_dict()
285
+
286
+ # Output JSON
287
+ if json_output:
288
+ with open(json_output, "w") as f:
289
+ json.dump(results, f, indent=2)
290
+ click.echo(f"Metrics saved to: {json_output}", err=True)
291
+ else:
292
+ # Output to stdout
293
+ print(json.dumps(results, indent=2))
294
+
295
+ # Print summary
296
+ click.echo("\n" + "=" * 60, err=True)
297
+ click.echo("SJ ANALYSIS RESULTS", err=True)
298
+ click.echo("=" * 60, err=True)
299
+ click.echo(f"Jump height: {metrics.jump_height:.3f} m", err=True)
300
+ click.echo(f"Flight time: {metrics.flight_time * 1000:.1f} ms", err=True)
301
+ click.echo(f"Squat hold duration: {metrics.squat_hold_duration * 1000:.1f} ms", err=True)
302
+ click.echo(f"Concentric duration: {metrics.concentric_duration * 1000:.1f} ms", err=True)
303
+ click.echo(f"Peak concentric velocity: {metrics.peak_concentric_velocity:.3f} m/s", err=True)
304
+ if metrics.peak_power is not None:
305
+ click.echo(f"Peak power: {metrics.peak_power:.0f} W", err=True)
306
+ click.echo(f"Mean power: {metrics.mean_power:.0f} W", err=True)
307
+ if metrics.peak_force is not None:
308
+ click.echo(f"Peak force: {metrics.peak_force:.0f} N", err=True)
309
+ click.echo("=" * 60, err=True)
@@ -0,0 +1,215 @@
1
+ """Debug overlay visualization for Squat Jump analysis."""
2
+
3
+ from typing import Any
4
+
5
+ import cv2
6
+ import numpy as np
7
+
8
+
9
+ class SquatJumpDebugOverlayRenderer:
10
+ """Debug overlay renderer for Squat Jump analysis results."""
11
+
12
+ def __init__(
13
+ self,
14
+ output_path: str,
15
+ input_width: int,
16
+ input_height: int,
17
+ output_width: int,
18
+ output_height: int,
19
+ fps: float,
20
+ timer: Any = None,
21
+ ):
22
+ """Initialize debug overlay renderer.
23
+
24
+ Args:
25
+ output_path: Path to output video file
26
+ input_width: Width of input frames
27
+ input_height: Height of input frames
28
+ output_width: Width of output video
29
+ output_height: Height of output video
30
+ fps: Frames per second for output video
31
+ timer: Optional timer for performance profiling
32
+ """
33
+ self.output_path = output_path
34
+ self.input_width = input_width
35
+ self.input_height = input_height
36
+ self.output_width = output_width
37
+ self.output_height = output_height
38
+ self.fps = fps
39
+ self.timer = timer
40
+
41
+ self.writer = None
42
+ self.frame_count = 0
43
+
44
+ def __enter__(self):
45
+ """Enter context manager and initialize video writer."""
46
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore[attr-defined]
47
+ self.writer = cv2.VideoWriter(
48
+ self.output_path,
49
+ fourcc,
50
+ self.fps,
51
+ (self.output_width, self.output_height),
52
+ )
53
+ return self
54
+
55
+ def __exit__(
56
+ self,
57
+ exc_type: type[BaseException] | None,
58
+ exc_val: BaseException | None,
59
+ exc_tb: Any,
60
+ ) -> None:
61
+ """Exit context manager and release video writer."""
62
+ if self.writer:
63
+ self.writer.release()
64
+
65
+ def write_frame(self, frame: np.ndarray) -> None:
66
+ """Write a frame to the output video.
67
+
68
+ Args:
69
+ frame: Annotated frame to write
70
+ """
71
+ if self.writer:
72
+ self.writer.write(frame)
73
+ self.frame_count += 1
74
+
75
+ def render_frame(
76
+ self,
77
+ frame: np.ndarray,
78
+ landmarks: list | None,
79
+ frame_index: int,
80
+ metrics: Any = None,
81
+ ) -> np.ndarray:
82
+ """Render debug overlay on a single frame.
83
+
84
+ Args:
85
+ frame: Input frame (BGR format)
86
+ landmarks: Pose landmarks for the frame
87
+ frame_index: Frame index for timeline display
88
+ metrics: Analysis metrics for data display
89
+
90
+ Returns:
91
+ Annotated frame with debug overlay
92
+ """
93
+ # Create a copy to avoid modifying the original
94
+ annotated_frame = frame.copy()
95
+
96
+ # Resize if needed
97
+ if self.input_width != self.output_width or self.input_height != self.output_height:
98
+ annotated_frame = cv2.resize(
99
+ annotated_frame,
100
+ (self.output_width, self.output_height),
101
+ interpolation=cv2.INTER_LINEAR,
102
+ )
103
+
104
+ # TODO: Implement by Computer Vision Engineer
105
+ # This is a placeholder function that needs to be implemented
106
+
107
+ # Placeholder: Just draw basic info
108
+ self._draw_frame_info(annotated_frame, frame_index)
109
+
110
+ # Placeholder: Draw landmarks if available
111
+ if landmarks:
112
+ self._draw_landmarks(annotated_frame, landmarks)
113
+
114
+ # Placeholder: Draw metrics if available
115
+ if metrics:
116
+ self._draw_metrics(annotated_frame, metrics)
117
+
118
+ return annotated_frame
119
+
120
+ def _draw_frame_info(self, frame: np.ndarray, frame_index: int) -> None:
121
+ """Draw frame information overlay.
122
+
123
+ Args:
124
+ frame: Frame to draw on
125
+ frame_index: Current frame index
126
+ """
127
+ # Draw frame counter
128
+ cv2.putText(
129
+ frame,
130
+ f"Frame: {frame_index}",
131
+ (10, 30),
132
+ cv2.FONT_HERSHEY_SIMPLEX,
133
+ 0.7,
134
+ (0, 255, 0),
135
+ 2,
136
+ cv2.LINE_AA,
137
+ )
138
+
139
+ def _draw_landmarks(self, frame: np.ndarray, landmarks: list) -> None:
140
+ """Draw pose landmarks on frame.
141
+
142
+ Args:
143
+ frame: Frame to draw on
144
+ landmarks: Pose landmarks data
145
+ """
146
+ # TODO: Implement by Computer Vision Engineer
147
+ # This should draw key joints and connections based on landmarks
148
+ pass
149
+
150
+ def _draw_metrics(self, frame: np.ndarray, metrics: Any) -> None:
151
+ """Draw metrics information on frame.
152
+
153
+ Args:
154
+ frame: Frame to draw on
155
+ metrics: Metrics object with analysis results
156
+ """
157
+ # TODO: Implement by Computer Vision Engineer
158
+ # This should display current phase, key metrics, and highlights
159
+ y_offset = 60
160
+
161
+ # Placeholder: Display some basic info
162
+ if hasattr(metrics, "jump_height"):
163
+ text = f"Jump Height: {metrics.jump_height:.3f} m"
164
+ cv2.putText(
165
+ frame,
166
+ text,
167
+ (10, y_offset),
168
+ cv2.FONT_HERSHEY_SIMPLEX,
169
+ 0.6,
170
+ (255, 255, 255),
171
+ 2,
172
+ cv2.LINE_AA,
173
+ )
174
+ y_offset += 30
175
+
176
+ if hasattr(metrics, "flight_time"):
177
+ text = f"Flight Time: {metrics.flight_time * 1000:.1f} ms"
178
+ cv2.putText(
179
+ frame,
180
+ text,
181
+ (10, y_offset),
182
+ cv2.FONT_HERSHEY_SIMPLEX,
183
+ 0.6,
184
+ (255, 255, 255),
185
+ 2,
186
+ cv2.LINE_AA,
187
+ )
188
+ y_offset += 30
189
+
190
+ if hasattr(metrics, "squat_hold_duration"):
191
+ text = f"Squat Hold: {metrics.squat_hold_duration * 1000:.1f} ms"
192
+ cv2.putText(
193
+ frame,
194
+ text,
195
+ (10, y_offset),
196
+ cv2.FONT_HERSHEY_SIMPLEX,
197
+ 0.6,
198
+ (255, 255, 255),
199
+ 2,
200
+ cv2.LINE_AA,
201
+ )
202
+ y_offset += 30
203
+
204
+ if hasattr(metrics, "concentric_duration"):
205
+ text = f"Concentric: {metrics.concentric_duration * 1000:.1f} ms"
206
+ cv2.putText(
207
+ frame,
208
+ text,
209
+ (10, y_offset),
210
+ cv2.FONT_HERSHEY_SIMPLEX,
211
+ 0.6,
212
+ (255, 255, 255),
213
+ 2,
214
+ cv2.LINE_AA,
215
+ )