kinemotion 0.73.0__py3-none-any.whl → 0.74.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -34,9 +34,20 @@ from .timing import (
34
34
  PerformanceTimer,
35
35
  Timer,
36
36
  )
37
+ from .video_analysis_base import (
38
+ AnalysisOverrides,
39
+ JumpAnalysisPipeline,
40
+ VideoAnalysisConfig,
41
+ VideoAnalysisResult,
42
+ )
37
43
  from .video_io import VideoProcessor
38
44
 
39
45
  __all__ = [
46
+ # Video Analysis Base
47
+ "AnalysisOverrides",
48
+ "JumpAnalysisPipeline",
49
+ "VideoAnalysisConfig",
50
+ "VideoAnalysisResult",
40
51
  # Pose tracking
41
52
  "MediaPipePoseTracker",
42
53
  "PoseTrackerFactory",
@@ -137,6 +137,73 @@ def analyze_tracking_quality(avg_visibility: float) -> str:
137
137
  return "high"
138
138
 
139
139
 
140
+ def _compute_fps_baseline_parameters(fps: float) -> tuple[float, int, int]:
141
+ """Compute FPS-based baseline parameters.
142
+
143
+ Args:
144
+ fps: Video frame rate
145
+
146
+ Returns:
147
+ Tuple of (base_velocity_threshold, base_min_contact_frames, base_smoothing_window)
148
+ """
149
+ # Base velocity threshold: 0.012 at 30fps, scaled inversely by fps
150
+ # Must exceed typical MediaPipe landmark jitter (0.5-2% per frame)
151
+ # Previous value of 0.004 was below noise floor, causing false IN_AIR detections
152
+ base_velocity_threshold = 0.012 * (30.0 / fps)
153
+ base_min_contact_frames = max(2, round(3.0 * (fps / 30.0)))
154
+
155
+ # Smoothing window: Decrease with higher fps for better temporal resolution
156
+ base_smoothing_window = 3 if fps > 30 else 5
157
+
158
+ return base_velocity_threshold, base_min_contact_frames, base_smoothing_window
159
+
160
+
161
+ def _compute_smoothing_window(
162
+ fps: float,
163
+ preset: _PresetConfig,
164
+ quality_adj: _QualityAdjustment,
165
+ ) -> int:
166
+ """Compute smoothing window from FPS, preset, and quality adjustments.
167
+
168
+ Args:
169
+ fps: Video frame rate
170
+ preset: Quality preset configuration
171
+ quality_adj: Quality-based adjustments
172
+
173
+ Returns:
174
+ Odd smoothing window size (required for Savitzky-Golay filter)
175
+ """
176
+ _, _, base_smoothing_window = _compute_fps_baseline_parameters(fps)
177
+
178
+ # Smoothing window = base + preset offset + quality adjustment
179
+ smoothing_window = base_smoothing_window + preset.smoothing_offset + quality_adj.smoothing_add
180
+ smoothing_window = max(3, min(11, smoothing_window))
181
+
182
+ # Ensure smoothing window is odd (required for Savitzky-Golay)
183
+ if smoothing_window % 2 == 0:
184
+ smoothing_window += 1
185
+
186
+ return smoothing_window
187
+
188
+
189
+ def _resolve_bilateral_filter(
190
+ preset: _PresetConfig,
191
+ quality_adj: _QualityAdjustment,
192
+ ) -> bool:
193
+ """Resolve whether to enable bilateral filtering.
194
+
195
+ Args:
196
+ preset: Quality preset configuration
197
+ quality_adj: Quality-based adjustments
198
+
199
+ Returns:
200
+ True if bilateral filtering should be enabled
201
+ """
202
+ if preset.force_bilateral is not None:
203
+ return preset.force_bilateral
204
+ return quality_adj.enable_bilateral
205
+
206
+
140
207
  def auto_tune_parameters(
141
208
  characteristics: VideoCharacteristics,
142
209
  quality_preset: QualityPreset = QualityPreset.BALANCED,
@@ -163,42 +230,22 @@ def auto_tune_parameters(
163
230
  fps = characteristics.fps
164
231
  quality = characteristics.tracking_quality
165
232
 
166
- # Get preset configuration
233
+ # Get preset configuration and quality-based adjustments
167
234
  preset = _PRESET_CONFIGS[quality_preset]
168
-
169
- # Get quality-based adjustments
170
235
  quality_adj = _QUALITY_ADJUSTMENTS[quality]
171
236
 
172
237
  # Compute FPS-based baseline parameters
173
- # Base velocity threshold: 0.012 at 30fps, scaled inversely by fps
174
- # Must exceed typical MediaPipe landmark jitter (0.5-2% per frame)
175
- # Previous value of 0.004 was below noise floor, causing false IN_AIR detections
176
- base_velocity_threshold = 0.012 * (30.0 / fps)
177
- base_min_contact_frames = max(2, round(3.0 * (fps / 30.0)))
178
-
179
- # Smoothing window: Decrease with higher fps for better temporal resolution
180
- if fps <= 30:
181
- base_smoothing_window = 5
182
- else:
183
- base_smoothing_window = 3 # 60fps+ use 3-frame window
238
+ base_velocity_threshold, base_min_contact_frames, _ = _compute_fps_baseline_parameters(fps)
184
239
 
185
- # Apply preset modifiers and quality adjustments
240
+ # Apply preset modifiers
186
241
  velocity_threshold = base_velocity_threshold * preset.velocity_multiplier
187
242
  min_contact_frames = max(2, int(base_min_contact_frames * preset.contact_frames_multiplier))
188
243
 
189
- # Smoothing window = base + preset offset + quality adjustment
190
- smoothing_window = base_smoothing_window + preset.smoothing_offset + quality_adj.smoothing_add
191
- smoothing_window = max(3, min(11, smoothing_window))
244
+ # Compute smoothing window with preset and quality adjustments
245
+ smoothing_window = _compute_smoothing_window(fps, preset, quality_adj)
192
246
 
193
- # Ensure smoothing window is odd (required for Savitzky-Golay)
194
- if smoothing_window % 2 == 0:
195
- smoothing_window += 1
196
-
197
- # Bilateral filtering: preset can override, otherwise use quality-based
198
- if preset.force_bilateral is not None:
199
- bilateral_filter = preset.force_bilateral
200
- else:
201
- bilateral_filter = quality_adj.enable_bilateral
247
+ # Resolve bilateral filtering setting
248
+ bilateral_filter = _resolve_bilateral_filter(preset, quality_adj)
202
249
 
203
250
  # Fixed optimal values
204
251
  polyorder = 2 # Quadratic - optimal for parabolic motion
@@ -24,6 +24,80 @@ def common_output_options(func: Callable) -> Callable: # type: ignore[type-arg]
24
24
  return func
25
25
 
26
26
 
27
+ def quality_option(func: Callable) -> Callable: # type: ignore[type-arg]
28
+ """Add quality preset option to CLI command."""
29
+ return click.option(
30
+ "--quality",
31
+ type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
32
+ default="balanced",
33
+ help=(
34
+ "Analysis quality preset: "
35
+ "fast (quick, less precise), "
36
+ "balanced (default, good for most cases), "
37
+ "accurate (research-grade, slower)"
38
+ ),
39
+ show_default=True,
40
+ )(func)
41
+
42
+
43
+ def verbose_option(func: Callable) -> Callable: # type: ignore[type-arg]
44
+ """Add verbose flag to CLI command."""
45
+ return click.option(
46
+ "--verbose",
47
+ "-v",
48
+ is_flag=True,
49
+ help="Show auto-selected parameters and analysis details",
50
+ )(func)
51
+
52
+
53
+ def batch_processing_options(func: Callable) -> Callable: # type: ignore[type-arg]
54
+ """Add batch processing options to CLI command."""
55
+ func = click.option(
56
+ "--batch",
57
+ is_flag=True,
58
+ help="Enable batch processing mode for multiple videos",
59
+ )(func)
60
+ func = click.option(
61
+ "--workers",
62
+ type=int,
63
+ default=4,
64
+ help="Number of parallel workers for batch processing (default: 4)",
65
+ show_default=True,
66
+ )(func)
67
+ func = click.option(
68
+ "--output-dir",
69
+ type=click.Path(),
70
+ help="Directory for debug video outputs (batch mode only)",
71
+ )(func)
72
+ func = click.option(
73
+ "--json-output-dir",
74
+ type=click.Path(),
75
+ help="Directory for JSON metrics outputs (batch mode only)",
76
+ )(func)
77
+ func = click.option(
78
+ "--csv-summary",
79
+ type=click.Path(),
80
+ help="Path for CSV summary export (batch mode only)",
81
+ )(func)
82
+ return func
83
+
84
+
85
+ def common_analysis_options(func: Callable) -> Callable: # type: ignore[type-arg]
86
+ """Add all common analysis options (output, quality, verbose, batch).
87
+
88
+ Combines:
89
+ - common_output_options (--output, --json-output)
90
+ - quality_option (--quality)
91
+ - verbose_option (--verbose)
92
+ - batch_processing_options (--batch, --workers, --output-dir, etc.)
93
+ """
94
+ func = common_output_options(func)
95
+ func = quality_option(func)
96
+ func = verbose_option(func)
97
+ func = batch_processing_options(func)
98
+ return func
99
+
100
+
27
101
  def collect_video_files(video_path: tuple[str, ...]) -> list[str]:
28
102
  """Expand glob patterns and collect all video files."""
29
103
  video_files: list[str] = []
@@ -86,13 +86,11 @@ def calculate_position_stability(
86
86
  if len(positions) < window_size:
87
87
  return float(np.var(positions))
88
88
 
89
- # Calculate rolling variance
90
- rolling_vars = []
91
- for i in range(len(positions) - window_size + 1):
92
- window = positions[i : i + window_size]
93
- rolling_vars.append(np.var(window))
89
+ # Vectorized rolling variance using sliding window view
90
+ from numpy.lib.stride_tricks import sliding_window_view
94
91
 
95
- return float(np.mean(rolling_vars))
92
+ windows = sliding_window_view(positions, window_size)
93
+ return float(np.mean(np.var(windows, axis=1)))
96
94
 
97
95
 
98
96
  def assess_tracking_quality(
@@ -198,3 +198,73 @@ class MetricsValidator(ABC):
198
198
  ValidationResult with all issues and status
199
199
  """
200
200
  pass
201
+
202
+ def _validate_metric_with_bounds(
203
+ self,
204
+ name: str,
205
+ value: float,
206
+ bounds: MetricBounds,
207
+ profile: AthleteProfile | None,
208
+ result: ValidationResult,
209
+ error_suffix: str = "physically impossible",
210
+ format_str: str = "{value}",
211
+ ) -> None:
212
+ """Generic validation for metrics with physical and profile bounds.
213
+
214
+ Args:
215
+ name: Metric name for messages
216
+ value: Metric value
217
+ bounds: Bounds definition
218
+ profile: Athlete profile for expected ranges (can be None)
219
+ result: Validation result to add issues to
220
+ error_suffix: Description for out-of-bounds errors
221
+ format_str: Format string for value display
222
+ """
223
+ formatted_value = format_str.format(value=value)
224
+ display_name = name.replace("_", " ").title()
225
+
226
+ if not bounds.is_physically_possible(value):
227
+ result.add_error(
228
+ name,
229
+ f"{display_name} {formatted_value} {error_suffix}",
230
+ value=value,
231
+ bounds=(bounds.absolute_min, bounds.absolute_max),
232
+ )
233
+ elif profile is not None and bounds.contains(value, profile):
234
+ result.add_info(
235
+ name,
236
+ f"{display_name} {formatted_value} within expected range for {profile.value}",
237
+ value=value,
238
+ )
239
+ elif profile is not None:
240
+ expected_min, expected_max = self._get_profile_range(profile, bounds)
241
+ result.add_warning(
242
+ name,
243
+ f"{display_name} {formatted_value} outside typical range "
244
+ f"[{expected_min:.3f}-{expected_max:.3f}] for {profile.value}",
245
+ value=value,
246
+ bounds=(expected_min, expected_max),
247
+ )
248
+
249
+ @staticmethod
250
+ def _get_profile_range(profile: AthleteProfile, bounds: MetricBounds) -> tuple[float, float]:
251
+ """Get min/max bounds for specific profile.
252
+
253
+ Args:
254
+ profile: Athlete profile
255
+ bounds: Metric bounds definition
256
+
257
+ Returns:
258
+ Tuple of (min, max) bounds for the profile
259
+ """
260
+ profile_ranges = {
261
+ AthleteProfile.ELDERLY: (bounds.practical_min, bounds.recreational_max),
262
+ AthleteProfile.UNTRAINED: (bounds.practical_min, bounds.recreational_max),
263
+ AthleteProfile.RECREATIONAL: (bounds.recreational_min, bounds.recreational_max),
264
+ AthleteProfile.TRAINED: (
265
+ (bounds.recreational_min + bounds.elite_min) / 2,
266
+ (bounds.recreational_max + bounds.elite_max) / 2,
267
+ ),
268
+ AthleteProfile.ELITE: (bounds.elite_min, bounds.elite_max),
269
+ }
270
+ return profile_ranges.get(profile, (bounds.absolute_min, bounds.absolute_max))
@@ -0,0 +1,132 @@
1
+ """Base types and patterns for video analysis APIs.
2
+
3
+ This module defines shared infrastructure for jump-type-specific analysis modules.
4
+ Each jump type (CMJ, Drop Jump, etc.) has its own analysis algorithms, but they
5
+ share common patterns for:
6
+
7
+ 1. Configuration (VideoConfig dataclass)
8
+ 2. Results (VideoResult dataclass)
9
+ 3. Parameter overrides (AnalysisOverrides dataclass)
10
+ 4. Bulk processing utilities
11
+
12
+ To add a new jump type:
13
+ 1. Create a new module: src/kinemotion/{jump_type}/
14
+ 2. Implement analysis algorithms in {jump_type}/analysis.py
15
+ 3. Use the patterns in this module for API structure
16
+ 4. Import process_videos_bulk_generic from pipeline_utils for bulk processing
17
+ """
18
+
19
+ from abc import ABC, abstractmethod
20
+ from dataclasses import dataclass
21
+ from pathlib import Path
22
+ from typing import TYPE_CHECKING
23
+
24
+ if TYPE_CHECKING:
25
+ from ..auto_tuning import QualityPreset
26
+ from ..timing import Timer
27
+
28
+ __all__ = [
29
+ "AnalysisOverrides",
30
+ "VideoAnalysisConfig",
31
+ "VideoAnalysisResult",
32
+ "JumpAnalysisPipeline",
33
+ ]
34
+
35
+
36
+ @dataclass
37
+ class AnalysisOverrides:
38
+ """Optional overrides for analysis parameters.
39
+
40
+ Allows fine-tuning of specific analysis parameters beyond quality presets.
41
+ If None, values will be determined by the quality preset.
42
+
43
+ Common overrides across all jump types:
44
+ - smoothing_window: Number of frames for Savitzky-Golay smoothing
45
+ - velocity_threshold: Threshold for phase detection
46
+ - min_contact_frames: Minimum frames for ground contact
47
+ - visibility_threshold: Minimum landmark visibility (0-1)
48
+ """
49
+
50
+ smoothing_window: int | None = None
51
+ velocity_threshold: float | None = None
52
+ min_contact_frames: int | None = None
53
+ visibility_threshold: float | None = None
54
+
55
+
56
+ @dataclass
57
+ class VideoAnalysisConfig:
58
+ """Base configuration for video analysis.
59
+
60
+ Subclasses should add jump-type-specific fields (e.g., drop_start_frame
61
+ for Drop Jump, or additional overrides for CMJ).
62
+ """
63
+
64
+ video_path: str
65
+ quality: str = "balanced"
66
+ output_video: str | None = None
67
+ json_output: str | None = None
68
+ overrides: AnalysisOverrides | None = None
69
+ detection_confidence: float | None = None
70
+ tracking_confidence: float | None = None
71
+ verbose: bool = False
72
+ timer: "Timer | None" = None
73
+
74
+
75
+ @dataclass
76
+ class VideoAnalysisResult:
77
+ """Base result for video analysis.
78
+
79
+ Subclasses should add jump-type-specific fields.
80
+ """
81
+
82
+ video_path: str
83
+ success: bool
84
+ metrics: object | None = None # Will be CMJMetrics, DropJumpMetrics, etc.
85
+ error: str | None = None
86
+ processing_time: float = 0.0
87
+
88
+
89
+ class JumpAnalysisPipeline(ABC):
90
+ """Abstract base class for jump analysis pipelines.
91
+
92
+ Defines the common structure for processing jump videos. Each jump type
93
+ implements the specific analysis logic while following this pattern.
94
+
95
+ Example:
96
+ class CMJPipeline(JumpAnalysisPipeline):
97
+ def analyze(self) -> CMJMetrics:
98
+ # CMJ-specific analysis (backward search algorithm)
99
+ ...
100
+
101
+ class DropJumpPipeline(JumpAnalysisPipeline):
102
+ def analyze(self) -> DropJumpMetrics:
103
+ # Drop jump-specific analysis (forward search algorithm)
104
+ ...
105
+ """
106
+
107
+ def __init__(
108
+ self,
109
+ video_path: str,
110
+ quality_preset: "QualityPreset",
111
+ overrides: AnalysisOverrides | None,
112
+ timer: "Timer",
113
+ ) -> None:
114
+ """Initialize the analysis pipeline."""
115
+ self.video_path = video_path
116
+ self.quality_preset = quality_preset
117
+ self.overrides = overrides
118
+ self.timer = timer
119
+
120
+ @abstractmethod
121
+ def analyze(self) -> object:
122
+ """Run the jump-specific analysis algorithm.
123
+
124
+ Returns:
125
+ Metrics object with jump-type-specific results.
126
+ """
127
+ ...
128
+
129
+ def validate_video_exists(self) -> None:
130
+ """Validate that the input video file exists."""
131
+ if not Path(self.video_path).exists():
132
+ raise FileNotFoundError(f"Video file not found: {self.video_path}")
@@ -50,9 +50,27 @@ class VideoProcessor:
50
50
  self._current_timestamp_ms: int = 0 # Timestamp for the current frame
51
51
 
52
52
  # Read first frame to get actual dimensions
53
- # This is critical for preserving aspect ratio, especially with mobile videos
54
- # that have rotation metadata. OpenCV properties (CAP_PROP_FRAME_WIDTH/HEIGHT)
55
- # may return incorrect dimensions, so we read the actual frame data.
53
+ self._extract_dimensions_from_frame()
54
+
55
+ # Initialize metadata placeholders
56
+ self.rotation = 0 # Will be set by _extract_video_metadata()
57
+ self.codec: str | None = None # Will be set by _extract_video_metadata()
58
+
59
+ # Initialize display dimensions (may be adjusted by SAR metadata)
60
+ self.display_width = self.width
61
+ self.display_height = self.height
62
+ self._extract_video_metadata()
63
+
64
+ # Apply rotation to dimensions if needed
65
+ self._apply_rotation_to_dimensions()
66
+
67
+ def _extract_dimensions_from_frame(self) -> None:
68
+ """Extract video dimensions by reading the first frame.
69
+
70
+ This is critical for preserving aspect ratio, especially with mobile videos
71
+ that have rotation metadata. OpenCV properties (CAP_PROP_FRAME_WIDTH/HEIGHT)
72
+ may return incorrect dimensions, so we read the actual frame data.
73
+ """
56
74
  ret, first_frame = self.cap.read()
57
75
  if ret:
58
76
  # frame.shape is (height, width, channels) - extract actual dimensions
@@ -63,22 +81,13 @@ class VideoProcessor:
63
81
  self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
64
82
  self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
65
83
 
66
- # Extract rotation metadata from video (iPhones store rotation in
67
- # side_data_list). OpenCV ignores rotation metadata, so we need to
68
- # extract and apply it manually
69
- self.rotation = 0 # Will be set by _extract_video_metadata()
70
-
71
- # Extract codec information from video metadata
72
- self.codec: str | None = None # Will be set by _extract_video_metadata()
73
-
74
- # Calculate display dimensions considering SAR (Sample Aspect Ratio)
75
- # Mobile videos often have non-square pixels encoded in SAR metadata
76
- # OpenCV doesn't directly expose SAR, but we need to handle display correctly
77
- self.display_width = self.width
78
- self.display_height = self.height
79
- self._extract_video_metadata()
84
+ def _apply_rotation_to_dimensions(self) -> None:
85
+ """Swap width/height for 90/-90 degree rotations.
80
86
 
81
- # Apply rotation to dimensions if needed
87
+ Extract rotation metadata from video (iPhones store rotation in
88
+ side_data_list). OpenCV ignores rotation metadata, so we need to
89
+ extract and apply it manually.
90
+ """
82
91
  if self.rotation in [90, -90, 270]:
83
92
  # Swap dimensions for 90/-90 degree rotations
84
93
  self.width, self.height = self.height, self.width
@@ -55,103 +55,6 @@ class CMJPhase(Enum):
55
55
  UNKNOWN = "unknown"
56
56
 
57
57
 
58
- @unused(
59
- reason="Alternative implementation not called by pipeline",
60
- since="0.34.0",
61
- )
62
- def find_standing_phase(
63
- positions: FloatArray,
64
- velocities: FloatArray,
65
- fps: float,
66
- min_standing_duration: float = 0.5,
67
- velocity_threshold: float = 0.01,
68
- ) -> int | None:
69
- """
70
- Find the end of standing phase (start of countermovement).
71
-
72
- Looks for a period of low velocity (standing) followed by consistent
73
- downward motion.
74
-
75
- Args:
76
- positions: Array of vertical positions (normalized 0-1)
77
- velocities: Array of vertical velocities
78
- fps: Video frame rate
79
- min_standing_duration: Minimum standing duration in seconds (default: 0.5s)
80
- velocity_threshold: Velocity threshold for standing detection
81
-
82
- Returns:
83
- Frame index where countermovement begins, or None if not detected.
84
- """
85
- min_standing_frames = int(fps * min_standing_duration)
86
-
87
- if len(positions) < min_standing_frames:
88
- return None
89
-
90
- # Find periods of low velocity (standing)
91
- is_standing = np.abs(velocities) < velocity_threshold
92
-
93
- # Look for first sustained standing period
94
- standing_count = 0
95
- standing_end = None
96
-
97
- for i in range(len(is_standing)):
98
- if is_standing[i]:
99
- standing_count += 1
100
- if standing_count >= min_standing_frames:
101
- standing_end = i
102
- else:
103
- if standing_end is not None:
104
- # Found end of standing phase
105
- return standing_end
106
- standing_count = 0
107
-
108
- return None
109
-
110
-
111
- @unused(
112
- reason="Alternative implementation not called by pipeline",
113
- since="0.34.0",
114
- )
115
- def find_countermovement_start(
116
- velocities: FloatArray,
117
- countermovement_threshold: float = 0.015,
118
- min_eccentric_frames: int = 3,
119
- standing_start: int | None = None,
120
- ) -> int | None:
121
- """
122
- Find the start of countermovement (eccentric phase).
123
-
124
- Detects when velocity becomes consistently positive (downward motion in
125
- normalized coords).
126
-
127
- Args:
128
- velocities: Array of SIGNED vertical velocities
129
- countermovement_threshold: Velocity threshold for detecting downward
130
- motion (POSITIVE)
131
- min_eccentric_frames: Minimum consecutive frames of downward motion
132
- standing_start: Optional frame where standing phase ended
133
-
134
- Returns:
135
- Frame index where countermovement begins, or None if not detected.
136
- """
137
- start_frame = standing_start if standing_start is not None else 0
138
-
139
- # Look for sustained downward velocity (POSITIVE in normalized coords)
140
- is_downward = velocities[start_frame:] > countermovement_threshold
141
- consecutive_count = 0
142
-
143
- for i in range(len(is_downward)):
144
- if is_downward[i]:
145
- consecutive_count += 1
146
- if consecutive_count >= min_eccentric_frames:
147
- # Found start of eccentric phase
148
- return start_frame + i - consecutive_count + 1
149
- else:
150
- consecutive_count = 0
151
-
152
- return None
153
-
154
-
155
58
  def find_lowest_point(
156
59
  positions: FloatArray,
157
60
  velocities: FloatArray,
@@ -393,6 +393,24 @@ class CMJVideoConfig:
393
393
  overrides: AnalysisOverrides | None = None
394
394
  detection_confidence: float | None = None
395
395
  tracking_confidence: float | None = None
396
+ verbose: bool = False
397
+ timer: Timer | None = None
398
+ pose_tracker: "MediaPipePoseTracker | None" = None
399
+
400
+ def to_kwargs(self) -> dict:
401
+ """Convert config to kwargs dict for process_cmj_video."""
402
+ return {
403
+ "video_path": self.video_path,
404
+ "quality": self.quality,
405
+ "output_video": self.output_video,
406
+ "json_output": self.json_output,
407
+ "overrides": self.overrides,
408
+ "detection_confidence": self.detection_confidence,
409
+ "tracking_confidence": self.tracking_confidence,
410
+ "verbose": self.verbose,
411
+ "timer": self.timer,
412
+ "pose_tracker": self.pose_tracker,
413
+ }
396
414
 
397
415
 
398
416
  @dataclass
@@ -511,6 +529,23 @@ def process_cmj_video(
511
529
  return metrics
512
530
 
513
531
 
532
+ def process_cmj_video_from_config(
533
+ config: CMJVideoConfig,
534
+ ) -> CMJMetrics:
535
+ """Process a CMJ video using a configuration object.
536
+
537
+ This is a convenience wrapper around process_cmj_video that
538
+ accepts a CMJVideoConfig instead of individual parameters.
539
+
540
+ Args:
541
+ config: Configuration object containing all analysis parameters
542
+
543
+ Returns:
544
+ CMJMetrics object containing analysis results
545
+ """
546
+ return process_cmj_video(**config.to_kwargs())
547
+
548
+
514
549
  def process_cmj_videos_bulk(
515
550
  configs: list[CMJVideoConfig],
516
551
  max_workers: int = 4,
@@ -537,17 +572,8 @@ def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
537
572
  start_time = time.perf_counter()
538
573
 
539
574
  try:
540
- metrics = process_cmj_video(
541
- video_path=config.video_path,
542
- quality=config.quality,
543
- output_video=config.output_video,
544
- json_output=config.json_output,
545
- overrides=config.overrides,
546
- detection_confidence=config.detection_confidence,
547
- tracking_confidence=config.tracking_confidence,
548
- verbose=False,
549
- )
550
-
575
+ # Use convenience wrapper to avoid parameter unpacking
576
+ metrics = process_cmj_video_from_config(config)
551
577
  processing_time = time.perf_counter() - start_time
552
578
 
553
579
  return CMJVideoResult(