kinemotion 0.44.0__py3-none-any.whl → 0.45.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -1,22 +1,16 @@
1
1
  "Public API for programmatic use of kinemotion analysis."
2
2
 
3
+ import json
3
4
  import time
4
5
  from collections.abc import Callable
5
- from concurrent.futures import ProcessPoolExecutor, as_completed
6
6
  from dataclasses import dataclass
7
7
  from pathlib import Path
8
8
 
9
- import cv2
10
- import numpy as np
11
-
12
- from .cmj.analysis import compute_average_hip_position, detect_cmj_phases
9
+ from .cmj.analysis import detect_cmj_phases
13
10
  from .cmj.debug_overlay import CMJDebugOverlayRenderer
14
11
  from .cmj.kinematics import CMJMetrics, calculate_cmj_metrics
15
12
  from .cmj.metrics_validator import CMJMetricsValidator
16
13
  from .core.auto_tuning import (
17
- AnalysisParameters,
18
- QualityPreset,
19
- VideoCharacteristics,
20
14
  analyze_video_sample,
21
15
  auto_tune_parameters,
22
16
  )
@@ -32,13 +26,22 @@ from .core.metadata import (
32
26
  create_timestamp,
33
27
  get_kinemotion_version,
34
28
  )
29
+ from .core.pipeline_utils import (
30
+ apply_expert_overrides,
31
+ apply_smoothing,
32
+ convert_timer_to_stage_names,
33
+ determine_confidence_levels,
34
+ extract_vertical_positions,
35
+ parse_quality_preset,
36
+ print_verbose_parameters,
37
+ process_all_frames,
38
+ process_videos_bulk_generic,
39
+ )
35
40
  from .core.pose import PoseTracker
36
41
  from .core.quality import assess_jump_quality
37
- from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
38
42
  from .core.timing import PerformanceTimer
39
43
  from .core.video_io import VideoProcessor
40
44
  from .dropjump.analysis import (
41
- compute_average_foot_position,
42
45
  detect_ground_contact,
43
46
  find_contact_phases,
44
47
  )
@@ -47,391 +50,6 @@ from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
47
50
  from .dropjump.metrics_validator import DropJumpMetricsValidator
48
51
 
49
52
 
50
- def _parse_quality_preset(quality: str) -> QualityPreset:
51
- """Parse and validate quality preset string.
52
-
53
- Args:
54
- quality: Quality preset string ('fast', 'balanced', or 'accurate')
55
-
56
- Returns:
57
- QualityPreset enum value
58
-
59
- Raises:
60
- ValueError: If quality preset is invalid
61
- """
62
- try:
63
- return QualityPreset(quality.lower())
64
- except ValueError as e:
65
- raise ValueError(
66
- f"Invalid quality preset: {quality}. "
67
- "Must be 'fast', 'balanced', or 'accurate'"
68
- ) from e
69
-
70
-
71
- def _determine_confidence_levels(
72
- quality_preset: QualityPreset,
73
- detection_confidence: float | None,
74
- tracking_confidence: float | None,
75
- ) -> tuple[float, float]:
76
- """Determine detection and tracking confidence levels.
77
-
78
- Confidence levels are set based on quality preset and can be overridden
79
- by expert parameters.
80
-
81
- Args:
82
- quality_preset: Quality preset enum
83
- detection_confidence: Optional expert override for detection confidence
84
- tracking_confidence: Optional expert override for tracking confidence
85
-
86
- Returns:
87
- Tuple of (detection_confidence, tracking_confidence)
88
- """
89
- # Set initial confidence from quality preset
90
- initial_detection_conf = 0.5
91
- initial_tracking_conf = 0.5
92
-
93
- if quality_preset == QualityPreset.FAST:
94
- initial_detection_conf = 0.3
95
- initial_tracking_conf = 0.3
96
- elif quality_preset == QualityPreset.ACCURATE:
97
- initial_detection_conf = 0.6
98
- initial_tracking_conf = 0.6
99
-
100
- # Override with expert values if provided
101
- if detection_confidence is not None:
102
- initial_detection_conf = detection_confidence
103
- if tracking_confidence is not None:
104
- initial_tracking_conf = tracking_confidence
105
-
106
- return initial_detection_conf, initial_tracking_conf
107
-
108
-
109
- def _apply_expert_overrides(
110
- params: AnalysisParameters,
111
- smoothing_window: int | None,
112
- velocity_threshold: float | None,
113
- min_contact_frames: int | None,
114
- visibility_threshold: float | None,
115
- ) -> AnalysisParameters:
116
- """Apply expert parameter overrides to auto-tuned parameters.
117
-
118
- Args:
119
- params: Auto-tuned parameters object
120
- smoothing_window: Optional override for smoothing window
121
- velocity_threshold: Optional override for velocity threshold
122
- min_contact_frames: Optional override for minimum contact frames
123
- visibility_threshold: Optional override for visibility threshold
124
-
125
- Returns:
126
- Modified params object (mutated in place)
127
- """
128
- if smoothing_window is not None:
129
- params.smoothing_window = smoothing_window
130
- if velocity_threshold is not None:
131
- params.velocity_threshold = velocity_threshold
132
- if min_contact_frames is not None:
133
- params.min_contact_frames = min_contact_frames
134
- if visibility_threshold is not None:
135
- params.visibility_threshold = visibility_threshold
136
- return params
137
-
138
-
139
- def _print_verbose_parameters(
140
- video: VideoProcessor,
141
- characteristics: VideoCharacteristics,
142
- quality_preset: QualityPreset,
143
- params: AnalysisParameters,
144
- ) -> None:
145
- """Print auto-tuned parameters in verbose mode.
146
-
147
- Args:
148
- video: Video processor with fps and dimensions
149
- characteristics: Video analysis characteristics
150
- quality_preset: Selected quality preset
151
- params: Auto-tuned parameters
152
- """
153
- print("\n" + "=" * 60)
154
- print("AUTO-TUNED PARAMETERS")
155
- print("=" * 60)
156
- print(f"Video FPS: {video.fps:.2f}")
157
- print(
158
- f"Tracking quality: {characteristics.tracking_quality} "
159
- f"(avg visibility: {characteristics.avg_visibility:.2f})"
160
- )
161
- print(f"Quality preset: {quality_preset.value}")
162
- print("\nSelected parameters:")
163
- print(f" smoothing_window: {params.smoothing_window}")
164
- print(f" polyorder: {params.polyorder}")
165
- print(f" velocity_threshold: {params.velocity_threshold:.4f}")
166
- print(f" min_contact_frames: {params.min_contact_frames}")
167
- print(f" visibility_threshold: {params.visibility_threshold}")
168
- print(f" detection_confidence: {params.detection_confidence}")
169
- print(f" tracking_confidence: {params.tracking_confidence}")
170
- print(f" outlier_rejection: {params.outlier_rejection}")
171
- print(f" bilateral_filter: {params.bilateral_filter}")
172
- print(f" use_curvature: {params.use_curvature}")
173
- print("=" * 60 + "\n")
174
-
175
-
176
- def _process_all_frames(
177
- video: VideoProcessor,
178
- tracker: PoseTracker,
179
- verbose: bool,
180
- timer: PerformanceTimer | None = None,
181
- close_tracker: bool = True,
182
- target_debug_fps: float = 30.0,
183
- max_debug_dim: int = 720,
184
- ) -> tuple[list, list, list]:
185
- """Process all frames from video and extract pose landmarks.
186
-
187
- Optimizes memory and speed by:
188
- 1. Decimating frames stored for debug video (to target_debug_fps)
189
- 2. Pre-resizing stored frames (to max_debug_dim)
190
-
191
- Args:
192
- video: Video processor to read frames from
193
- tracker: Pose tracker for landmark detection
194
- verbose: Print progress messages
195
- timer: Optional PerformanceTimer for measuring operations
196
- close_tracker: Whether to close the tracker after processing (default: True)
197
- target_debug_fps: Target FPS for debug video (default: 30.0)
198
- max_debug_dim: Max dimension for debug video frames (default: 720)
199
-
200
- Returns:
201
- Tuple of (debug_frames, landmarks_sequence, frame_indices)
202
-
203
- Raises:
204
- ValueError: If no frames could be processed
205
- """
206
- if verbose:
207
- print("Tracking pose landmarks...")
208
-
209
- landmarks_sequence = []
210
- debug_frames = []
211
- frame_indices = []
212
-
213
- # Calculate decimation and resize parameters
214
- step = max(1, int(video.fps / target_debug_fps))
215
-
216
- # Calculate resize dimensions maintaining aspect ratio
217
- # Logic mirrors BaseDebugOverlayRenderer to ensure consistency
218
- w, h = video.display_width, video.display_height
219
- scale = 1.0
220
- if max(w, h) > max_debug_dim:
221
- scale = max_debug_dim / max(w, h)
222
-
223
- debug_w = int(w * scale) // 2 * 2
224
- debug_h = int(h * scale) // 2 * 2
225
- should_resize = (debug_w != video.width) or (debug_h != video.height)
226
-
227
- frame_idx = 0
228
-
229
- if timer:
230
- with timer.measure("pose_tracking"):
231
- while True:
232
- frame = video.read_frame()
233
- if frame is None:
234
- break
235
-
236
- # 1. Track on FULL resolution frame (preserves accuracy)
237
- landmarks = tracker.process_frame(frame)
238
- landmarks_sequence.append(landmarks)
239
-
240
- # 2. Store frame for debug video ONLY if matches step
241
- if frame_idx % step == 0:
242
- # Pre-resize to save memory and later encoding time
243
- if should_resize:
244
- # Use simple linear interpolation for speed (debug only)
245
- processed_frame = cv2.resize(
246
- frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
247
- )
248
- else:
249
- processed_frame = frame
250
-
251
- debug_frames.append(processed_frame)
252
- frame_indices.append(frame_idx)
253
-
254
- frame_idx += 1
255
- else:
256
- while True:
257
- frame = video.read_frame()
258
- if frame is None:
259
- break
260
-
261
- landmarks = tracker.process_frame(frame)
262
- landmarks_sequence.append(landmarks)
263
-
264
- if frame_idx % step == 0:
265
- if should_resize:
266
- processed_frame = cv2.resize(
267
- frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
268
- )
269
- else:
270
- processed_frame = frame
271
-
272
- debug_frames.append(processed_frame)
273
- frame_indices.append(frame_idx)
274
-
275
- frame_idx += 1
276
-
277
- if close_tracker:
278
- tracker.close()
279
-
280
- if not landmarks_sequence:
281
- raise ValueError("No frames could be processed from video")
282
-
283
- return debug_frames, landmarks_sequence, frame_indices
284
-
285
-
286
- def _apply_smoothing(
287
- landmarks_sequence: list,
288
- params: AnalysisParameters,
289
- verbose: bool,
290
- timer: PerformanceTimer | None = None,
291
- ) -> list:
292
- """Apply smoothing to landmark sequence with auto-tuned parameters.
293
-
294
- Args:
295
- landmarks_sequence: Sequence of landmarks from all frames
296
- params: Auto-tuned parameters containing smoothing settings
297
- verbose: Print progress messages
298
- timer: Optional PerformanceTimer for measuring operations
299
-
300
- Returns:
301
- Smoothed landmarks sequence
302
- """
303
- if params.outlier_rejection or params.bilateral_filter:
304
- if verbose:
305
- if params.outlier_rejection:
306
- print("Smoothing landmarks with outlier rejection...")
307
- if params.bilateral_filter:
308
- print("Using bilateral temporal filter...")
309
- if timer:
310
- with timer.measure("smoothing"):
311
- return smooth_landmarks_advanced(
312
- landmarks_sequence,
313
- window_length=params.smoothing_window,
314
- polyorder=params.polyorder,
315
- use_outlier_rejection=params.outlier_rejection,
316
- use_bilateral=params.bilateral_filter,
317
- )
318
- else:
319
- return smooth_landmarks_advanced(
320
- landmarks_sequence,
321
- window_length=params.smoothing_window,
322
- polyorder=params.polyorder,
323
- use_outlier_rejection=params.outlier_rejection,
324
- use_bilateral=params.bilateral_filter,
325
- )
326
- else:
327
- if verbose:
328
- print("Smoothing landmarks...")
329
- if timer:
330
- with timer.measure("smoothing"):
331
- return smooth_landmarks(
332
- landmarks_sequence,
333
- window_length=params.smoothing_window,
334
- polyorder=params.polyorder,
335
- )
336
- else:
337
- return smooth_landmarks(
338
- landmarks_sequence,
339
- window_length=params.smoothing_window,
340
- polyorder=params.polyorder,
341
- )
342
-
343
-
344
- def _calculate_foot_visibility(frame_landmarks: dict) -> float:
345
- """Calculate average visibility of foot landmarks.
346
-
347
- Args:
348
- frame_landmarks: Dictionary of landmarks for a frame
349
-
350
- Returns:
351
- Average visibility value (0-1)
352
- """
353
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
354
- foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
355
- return float(np.mean(foot_vis)) if foot_vis else 0.0
356
-
357
-
358
- def _extract_vertical_positions(
359
- smoothed_landmarks: list,
360
- target: str = "foot",
361
- ) -> tuple[np.ndarray, np.ndarray]:
362
- """Extract vertical positions and visibilities from smoothed landmarks.
363
-
364
- Args:
365
- smoothed_landmarks: Smoothed landmark sequence
366
- target: Tracking target "foot" or "hip" (default: "foot")
367
-
368
- Returns:
369
- Tuple of (vertical_positions, visibilities) as numpy arrays
370
- """
371
- position_list: list[float] = []
372
- visibilities_list: list[float] = []
373
-
374
- for frame_landmarks in smoothed_landmarks:
375
- if frame_landmarks:
376
- if target == "hip":
377
- _, y = compute_average_hip_position(frame_landmarks)
378
- # For hips, we can use average visibility of hips if needed,
379
- # but currently _calculate_foot_visibility is specific to feet.
380
- # We'll stick to foot visibility for now as it indicates
381
- # overall leg tracking quality, or we could implement
382
- # _calculate_hip_visibility. For simplicity, we'll use foot
383
- # visibility as a proxy for "body visibility" or just use 1.0
384
- # since hips are usually visible if feet are. Actually, let's
385
- # just use foot visibility for consistency in quality checks.
386
- vis = _calculate_foot_visibility(frame_landmarks)
387
- else:
388
- _, y = compute_average_foot_position(frame_landmarks)
389
- vis = _calculate_foot_visibility(frame_landmarks)
390
-
391
- position_list.append(y)
392
- visibilities_list.append(vis)
393
- else:
394
- position_list.append(position_list[-1] if position_list else 0.5)
395
- visibilities_list.append(0.0)
396
-
397
- return np.array(position_list), np.array(visibilities_list)
398
-
399
-
400
- def _convert_timer_to_stage_names(
401
- timer_metrics: dict[str, float],
402
- ) -> dict[str, float]:
403
- """Convert timer metric names to human-readable stage names.
404
-
405
- Args:
406
- timer_metrics: Dictionary from PerformanceTimer.get_metrics()
407
-
408
- Returns:
409
- Dictionary with human-readable stage names as keys
410
- """
411
- mapping = {
412
- "video_initialization": "Video initialization",
413
- "pose_tracking": "Pose tracking",
414
- "parameter_auto_tuning": "Parameter auto-tuning",
415
- "smoothing": "Smoothing",
416
- "vertical_position_extraction": "Vertical position extraction",
417
- "ground_contact_detection": "Ground contact detection",
418
- "metrics_calculation": "Metrics calculation",
419
- "quality_assessment": "Quality assessment",
420
- "metadata_building": "Metadata building",
421
- "metrics_validation": "Metrics validation",
422
- "phase_detection": "Phase detection",
423
- "json_serialization": "JSON serialization",
424
- "debug_video_generation": "Debug video generation",
425
- "debug_video_reencode": "Debug video re-encoding",
426
- "frame_rotation": "Frame rotation",
427
- "debug_video_resize": "Debug video resizing",
428
- "debug_video_copy": "Debug video frame copy",
429
- "debug_video_draw": "Debug video drawing",
430
- "debug_video_write": "Debug video encoding",
431
- }
432
- return {mapping.get(k, k): v for k, v in timer_metrics.items()}
433
-
434
-
435
53
  @dataclass
436
54
  class DropJumpVideoResult:
437
55
  """Result of processing a single drop jump video."""
@@ -460,6 +78,69 @@ class DropJumpVideoConfig:
460
78
  tracking_confidence: float | None = None
461
79
 
462
80
 
81
+ def _generate_debug_video(
82
+ output_video: str,
83
+ frames: list,
84
+ frame_indices: list[int],
85
+ video_fps: float,
86
+ smoothed_landmarks: list,
87
+ contact_states: list,
88
+ metrics: DropJumpMetrics,
89
+ timer: PerformanceTimer | None,
90
+ verbose: bool,
91
+ ) -> None:
92
+ """Generate debug video with overlay."""
93
+ if verbose:
94
+ print(f"Generating debug video: {output_video}")
95
+
96
+ if not frames:
97
+ return
98
+
99
+ debug_h, debug_w = frames[0].shape[:2]
100
+
101
+ if video_fps > 30:
102
+ debug_fps = video_fps / (video_fps / 30.0)
103
+ else:
104
+ debug_fps = video_fps
105
+
106
+ if len(frames) < len(smoothed_landmarks):
107
+ step = max(1, int(video_fps / 30.0))
108
+ debug_fps = video_fps / step
109
+
110
+ def _render_frames(renderer: DebugOverlayRenderer) -> None:
111
+ for frame, idx in zip(frames, frame_indices, strict=True):
112
+ annotated = renderer.render_frame(
113
+ frame,
114
+ smoothed_landmarks[idx],
115
+ contact_states[idx],
116
+ idx,
117
+ metrics,
118
+ use_com=False,
119
+ )
120
+ renderer.write_frame(annotated)
121
+
122
+ renderer_context = DebugOverlayRenderer(
123
+ output_video,
124
+ debug_w,
125
+ debug_h,
126
+ debug_w,
127
+ debug_h,
128
+ debug_fps,
129
+ timer=timer,
130
+ )
131
+
132
+ if timer:
133
+ with timer.measure("debug_video_generation"):
134
+ with renderer_context as renderer:
135
+ _render_frames(renderer)
136
+ else:
137
+ with renderer_context as renderer:
138
+ _render_frames(renderer)
139
+
140
+ if verbose:
141
+ print(f"Debug video saved: {output_video}")
142
+
143
+
463
144
  def process_dropjump_video(
464
145
  video_path: str,
465
146
  quality: str = "balanced",
@@ -507,34 +188,25 @@ def process_dropjump_video(
507
188
  if not Path(video_path).exists():
508
189
  raise FileNotFoundError(f"Video file not found: {video_path}")
509
190
 
510
- # Set deterministic mode for drop jump reproducibility
511
- # Note: MediaPipe has inherent non-determinism (Google issue #3945)
512
- # This improves consistency but cannot eliminate all variation
513
191
  from .core.determinism import set_deterministic_mode
514
192
 
515
193
  set_deterministic_mode(seed=42)
516
194
 
517
- # Start overall timing
518
195
  start_time = time.time()
519
196
  if timer is None:
520
197
  timer = PerformanceTimer()
521
198
 
522
- # Convert quality string to enum
523
- quality_preset = _parse_quality_preset(quality)
199
+ quality_preset = parse_quality_preset(quality)
524
200
 
525
- # Load video
526
201
  with timer.measure("video_initialization"):
527
202
  with VideoProcessor(video_path, timer=timer) as video:
528
- # Determine detection/tracking confidence levels
529
- detection_conf, tracking_conf = _determine_confidence_levels(
203
+ detection_conf, tracking_conf = determine_confidence_levels(
530
204
  quality_preset, detection_confidence, tracking_confidence
531
205
  )
532
206
 
533
- # Process all frames with pose tracking
534
207
  if verbose:
535
208
  print("Processing all frames with MediaPipe pose tracking...")
536
209
 
537
- # Use provided tracker or create new one
538
210
  tracker = pose_tracker
539
211
  should_close_tracker = False
540
212
 
@@ -546,19 +218,17 @@ def process_dropjump_video(
546
218
  )
547
219
  should_close_tracker = True
548
220
 
549
- frames, landmarks_sequence, frame_indices = _process_all_frames(
221
+ frames, landmarks_sequence, frame_indices = process_all_frames(
550
222
  video, tracker, verbose, timer, close_tracker=should_close_tracker
551
223
  )
552
224
 
553
- # Auto-tune parameters
554
225
  with timer.measure("parameter_auto_tuning"):
555
226
  characteristics = analyze_video_sample(
556
227
  landmarks_sequence, video.fps, video.frame_count
557
228
  )
558
229
  params = auto_tune_parameters(characteristics, quality_preset)
559
230
 
560
- # Apply expert overrides if provided
561
- params = _apply_expert_overrides(
231
+ params = apply_expert_overrides(
562
232
  params,
563
233
  smoothing_window,
564
234
  velocity_threshold,
@@ -566,26 +236,22 @@ def process_dropjump_video(
566
236
  visibility_threshold,
567
237
  )
568
238
 
569
- # Show selected parameters if verbose
570
239
  if verbose:
571
- _print_verbose_parameters(
240
+ print_verbose_parameters(
572
241
  video, characteristics, quality_preset, params
573
242
  )
574
243
 
575
- # Apply smoothing with auto-tuned parameters
576
- smoothed_landmarks = _apply_smoothing(
244
+ smoothed_landmarks = apply_smoothing(
577
245
  landmarks_sequence, params, verbose, timer
578
246
  )
579
247
 
580
- # Extract vertical positions from feet
581
248
  if verbose:
582
249
  print("Extracting foot positions...")
583
250
  with timer.measure("vertical_position_extraction"):
584
- vertical_positions, visibilities = _extract_vertical_positions(
251
+ vertical_positions, visibilities = extract_vertical_positions(
585
252
  smoothed_landmarks
586
253
  )
587
254
 
588
- # Detect ground contact
589
255
  if verbose:
590
256
  print("Detecting ground contact...")
591
257
  with timer.measure("ground_contact_detection"):
@@ -599,7 +265,6 @@ def process_dropjump_video(
599
265
  polyorder=params.polyorder,
600
266
  )
601
267
 
602
- # Calculate metrics
603
268
  if verbose:
604
269
  print("Calculating metrics...")
605
270
  with timer.measure("metrics_calculation"):
@@ -614,24 +279,20 @@ def process_dropjump_video(
614
279
  use_curvature=params.use_curvature,
615
280
  )
616
281
 
617
- # Assess quality and add confidence scores
618
282
  if verbose:
619
283
  print("Assessing tracking quality...")
620
284
  with timer.measure("quality_assessment"):
621
- # Detect outliers for quality scoring (doesn't affect results)
622
285
  _, outlier_mask = reject_outliers(
623
286
  vertical_positions,
624
287
  use_ransac=True,
625
288
  use_median=True,
626
- interpolate=False, # Don't modify, just detect
289
+ interpolate=False,
627
290
  )
628
291
 
629
- # Count phases for quality assessment
630
292
  phases = find_contact_phases(contact_states)
631
293
  phases_detected = len(phases) > 0
632
294
  phase_count = len(phases)
633
295
 
634
- # Perform quality assessment
635
296
  quality_result = assess_jump_quality(
636
297
  visibilities=visibilities,
637
298
  positions=vertical_positions,
@@ -641,13 +302,10 @@ def process_dropjump_video(
641
302
  phase_count=phase_count,
642
303
  )
643
304
 
644
- # Build algorithm configuration early (but attach metadata later)
645
305
  drop_frame = None
646
306
  if drop_start_frame is None and metrics.drop_start_frame is not None:
647
- # Auto-detected drop start from box
648
307
  drop_frame = metrics.drop_start_frame
649
308
  elif drop_start_frame is not None:
650
- # Manual drop start provided
651
309
  drop_frame = drop_start_frame
652
310
 
653
311
  algorithm_config = AlgorithmConfig(
@@ -689,72 +347,19 @@ def process_dropjump_video(
689
347
  print(f" - {warning}")
690
348
  print()
691
349
 
692
- # Generate debug video (but not JSON yet - we need to attach metadata first)
693
350
  if output_video:
694
- if verbose:
695
- print(f"Generating debug video: {output_video}")
696
-
697
- # Determine debug video properties from the pre-processed frames
698
- debug_h, debug_w = frames[0].shape[:2]
699
- if video.fps > 30:
700
- debug_fps = video.fps / (video.fps / 30.0)
701
- else:
702
- debug_fps = video.fps
703
- # Use approximate 30fps if decimated, or actual if not
704
- if len(frames) < len(landmarks_sequence):
705
- # Re-calculate step to get precise FPS
706
- step = max(1, int(video.fps / 30.0))
707
- debug_fps = video.fps / step
708
-
709
- if timer:
710
- with timer.measure("debug_video_generation"):
711
- with DebugOverlayRenderer(
712
- output_video,
713
- debug_w, # Encoded width = pre-resized width
714
- debug_h, # Encoded height
715
- debug_w, # Display width (already corrected)
716
- debug_h, # Display height
717
- debug_fps,
718
- timer=timer,
719
- ) as renderer:
720
- for frame, idx in zip(frames, frame_indices, strict=True):
721
- annotated = renderer.render_frame(
722
- frame,
723
- smoothed_landmarks[idx],
724
- contact_states[idx],
725
- idx,
726
- metrics,
727
- use_com=False,
728
- )
729
- renderer.write_frame(annotated)
730
- # Capture re-encoding duration separately
731
- with timer.measure("debug_video_reencode"):
732
- pass # Re-encoding happens in context manager __exit__
733
- else:
734
- with DebugOverlayRenderer(
735
- output_video,
736
- debug_w,
737
- debug_h,
738
- debug_w,
739
- debug_h,
740
- debug_fps,
741
- timer=timer,
742
- ) as renderer:
743
- for frame, idx in zip(frames, frame_indices, strict=True):
744
- annotated = renderer.render_frame(
745
- frame,
746
- smoothed_landmarks[idx],
747
- contact_states[idx],
748
- idx,
749
- metrics,
750
- use_com=False,
751
- )
752
- renderer.write_frame(annotated)
753
-
754
- if verbose:
755
- print(f"Debug video saved: {output_video}")
351
+ _generate_debug_video(
352
+ output_video,
353
+ frames,
354
+ frame_indices,
355
+ video.fps,
356
+ smoothed_landmarks,
357
+ contact_states,
358
+ metrics,
359
+ timer,
360
+ verbose,
361
+ )
756
362
 
757
- # Validate metrics against physiological bounds
758
363
  with timer.measure("metrics_validation"):
759
364
  validator = DropJumpMetricsValidator()
760
365
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
@@ -765,10 +370,8 @@ def process_dropjump_video(
765
370
  for issue in validation_result.issues:
766
371
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
767
372
 
768
- # NOW create ProcessingInfo with complete timing breakdown
769
- # (includes debug video generation timing)
770
373
  processing_time = time.time() - start_time
771
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
374
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
772
375
 
773
376
  processing_info = ProcessingInfo(
774
377
  version=get_kinemotion_version(),
@@ -785,34 +388,27 @@ def process_dropjump_video(
785
388
  algorithm=algorithm_config,
786
389
  )
787
390
 
788
- # Attach complete metadata to metrics
789
391
  metrics.result_metadata = result_metadata
790
392
 
791
- # NOW write JSON after metadata is attached
792
393
  if json_output:
793
394
  if timer:
794
395
  with timer.measure("json_serialization"):
795
396
  output_path = Path(json_output)
796
397
  metrics_dict = metrics.to_dict()
797
- import json
798
-
799
398
  json_str = json.dumps(metrics_dict, indent=2)
800
399
  output_path.write_text(json_str)
801
400
  else:
802
401
  output_path = Path(json_output)
803
402
  metrics_dict = metrics.to_dict()
804
- import json
805
-
806
403
  json_str = json.dumps(metrics_dict, indent=2)
807
404
  output_path.write_text(json_str)
808
405
 
809
406
  if verbose:
810
407
  print(f"Metrics written to: {json_output}")
811
408
 
812
- # Print timing summary if verbose
813
409
  if verbose:
814
410
  total_time = time.time() - start_time
815
- stage_times_verbose = _convert_timer_to_stage_names(timer.get_metrics())
411
+ stage_times_verbose = convert_timer_to_stage_names(timer.get_metrics())
816
412
 
817
413
  print("\n=== Timing Summary ===")
818
414
  for stage, duration in stage_times_verbose.items():
@@ -833,76 +429,25 @@ def process_dropjump_videos_bulk(
833
429
  progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
834
430
  ) -> list[DropJumpVideoResult]:
835
431
  """
836
- Process multiple drop jump videos in parallel using ProcessPoolExecutor.
837
-
838
- Args:
839
- configs: List of DropJumpVideoConfig objects specifying video paths
840
- and parameters
841
- max_workers: Maximum number of parallel workers (default: 4)
842
- progress_callback: Optional callback function called after each video
843
- completes.
844
- Receives DropJumpVideoResult object.
845
-
846
- Returns:
847
- List of DropJumpVideoResult objects, one per input video, in completion order
848
-
849
- Example:
850
- >>> configs = [
851
- ... DropJumpVideoConfig("video1.mp4"),
852
- ... DropJumpVideoConfig("video2.mp4", quality="accurate"),
853
- ... DropJumpVideoConfig("video3.mp4", output_video="debug3.mp4"),
854
- ... ]
855
- >>> results = process_dropjump_videos_bulk(configs, max_workers=4)
856
- >>> for result in results:
857
- ... if result.success:
858
- ... print(f"{result.video_path}: {result.metrics.jump_height_m:.3f}m")
859
- ... else:
860
- ... print(f"{result.video_path}: FAILED - {result.error}")
432
+ Process multiple drop jump videos in parallel.
861
433
  """
862
- results: list[DropJumpVideoResult] = []
863
-
864
- # Use ProcessPoolExecutor for CPU-bound video processing
865
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
866
- # Submit all jobs
867
- future_to_config = {
868
- executor.submit(_process_dropjump_video_wrapper, config): config
869
- for config in configs
870
- }
871
-
872
- # Process results as they complete
873
- for future in as_completed(future_to_config):
874
- config = future_to_config[future]
875
- result: DropJumpVideoResult
876
-
877
- try:
878
- result = future.result()
879
- except Exception as exc:
880
- # Handle unexpected errors
881
- result = DropJumpVideoResult(
882
- video_path=config.video_path,
883
- success=False,
884
- error=f"Unexpected error: {str(exc)}",
885
- )
886
-
887
- results.append(result)
888
434
 
889
- # Call progress callback if provided
890
- if progress_callback:
891
- progress_callback(result)
435
+ def error_factory(video_path: str, error_msg: str) -> DropJumpVideoResult:
436
+ return DropJumpVideoResult(
437
+ video_path=video_path, success=False, error=error_msg
438
+ )
892
439
 
893
- return results
440
+ return process_videos_bulk_generic(
441
+ configs,
442
+ _process_dropjump_video_wrapper,
443
+ error_factory,
444
+ max_workers,
445
+ progress_callback,
446
+ )
894
447
 
895
448
 
896
449
  def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
897
- """
898
- Wrapper function for parallel processing. Must be picklable (top-level function).
899
-
900
- Args:
901
- config: DropJumpVideoConfig object with processing parameters
902
-
903
- Returns:
904
- DropJumpVideoResult object with metrics or error information
905
- """
450
+ """Wrapper function for parallel processing."""
906
451
  start_time = time.time()
907
452
 
908
453
  try:
@@ -918,7 +463,7 @@ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVide
918
463
  visibility_threshold=config.visibility_threshold,
919
464
  detection_confidence=config.detection_confidence,
920
465
  tracking_confidence=config.tracking_confidence,
921
- verbose=False, # Disable verbose in parallel mode
466
+ verbose=False,
922
467
  )
923
468
 
924
469
  processing_time = time.time() - start_time
@@ -1014,28 +559,16 @@ def process_cmj_video(
1014
559
  Raises:
1015
560
  ValueError: If video cannot be processed or parameters are invalid
1016
561
  FileNotFoundError: If video file does not exist
1017
-
1018
- Example:
1019
- >>> metrics = process_cmj_video(
1020
- ... "athlete_cmj.mp4",
1021
- ... quality="balanced",
1022
- ... verbose=True
1023
- ... )
1024
- >>> print(f"Jump height: {metrics.jump_height:.3f}m")
1025
- >>> print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
1026
562
  """
1027
563
  if not Path(video_path).exists():
1028
564
  raise FileNotFoundError(f"Video file not found: {video_path}")
1029
565
 
1030
- # Start overall timing
1031
566
  start_time = time.time()
1032
567
  if timer is None:
1033
568
  timer = PerformanceTimer()
1034
569
 
1035
- # Convert quality string to enum
1036
- quality_preset = _parse_quality_preset(quality)
570
+ quality_preset = parse_quality_preset(quality)
1037
571
 
1038
- # Initialize video processor
1039
572
  with timer.measure("video_initialization"):
1040
573
  with VideoProcessor(video_path, timer=timer) as video:
1041
574
  if verbose:
@@ -1044,16 +577,13 @@ def process_cmj_video(
1044
577
  f"{video.frame_count} frames"
1045
578
  )
1046
579
 
1047
- # Determine confidence levels
1048
- det_conf, track_conf = _determine_confidence_levels(
580
+ det_conf, track_conf = determine_confidence_levels(
1049
581
  quality_preset, detection_confidence, tracking_confidence
1050
582
  )
1051
583
 
1052
- # Track all frames
1053
584
  if verbose:
1054
585
  print("Processing all frames with MediaPipe pose tracking...")
1055
586
 
1056
- # Use provided tracker or create new one
1057
587
  tracker = pose_tracker
1058
588
  should_close_tracker = False
1059
589
 
@@ -1065,19 +595,17 @@ def process_cmj_video(
1065
595
  )
1066
596
  should_close_tracker = True
1067
597
 
1068
- frames, landmarks_sequence, frame_indices = _process_all_frames(
598
+ frames, landmarks_sequence, frame_indices = process_all_frames(
1069
599
  video, tracker, verbose, timer, close_tracker=should_close_tracker
1070
600
  )
1071
601
 
1072
- # Auto-tune parameters
1073
602
  with timer.measure("parameter_auto_tuning"):
1074
603
  characteristics = analyze_video_sample(
1075
604
  landmarks_sequence, video.fps, video.frame_count
1076
605
  )
1077
606
  params = auto_tune_parameters(characteristics, quality_preset)
1078
607
 
1079
- # Apply expert overrides
1080
- params = _apply_expert_overrides(
608
+ params = apply_expert_overrides(
1081
609
  params,
1082
610
  smoothing_window,
1083
611
  velocity_threshold,
@@ -1086,32 +614,27 @@ def process_cmj_video(
1086
614
  )
1087
615
 
1088
616
  if verbose:
1089
- _print_verbose_parameters(
617
+ print_verbose_parameters(
1090
618
  video, characteristics, quality_preset, params
1091
619
  )
1092
620
 
1093
- # Apply smoothing
1094
- smoothed_landmarks = _apply_smoothing(
621
+ smoothed_landmarks = apply_smoothing(
1095
622
  landmarks_sequence, params, verbose, timer
1096
623
  )
1097
624
 
1098
- # Extract vertical positions
1099
625
  if verbose:
1100
626
  print("Extracting vertical positions (Hip and Foot)...")
1101
627
  with timer.measure("vertical_position_extraction"):
1102
- # Primary: Hips (for depth, velocity, general phases)
1103
- vertical_positions, visibilities = _extract_vertical_positions(
628
+ vertical_positions, visibilities = extract_vertical_positions(
1104
629
  smoothed_landmarks, target="hip"
1105
630
  )
1106
631
 
1107
- # Secondary: Feet (for precise landing detection)
1108
- foot_positions, _ = _extract_vertical_positions(
632
+ foot_positions, _ = extract_vertical_positions(
1109
633
  smoothed_landmarks, target="foot"
1110
634
  )
1111
635
 
1112
636
  tracking_method = "hip_hybrid"
1113
637
 
1114
- # Detect CMJ phases
1115
638
  if verbose:
1116
639
  print("Detecting CMJ phases...")
1117
640
  with timer.measure("phase_detection"):
@@ -1120,7 +643,7 @@ def process_cmj_video(
1120
643
  video.fps,
1121
644
  window_length=params.smoothing_window,
1122
645
  polyorder=params.polyorder,
1123
- landing_positions=foot_positions, # Use feet for landing
646
+ landing_positions=foot_positions,
1124
647
  )
1125
648
 
1126
649
  if phases is None:
@@ -1128,11 +651,9 @@ def process_cmj_video(
1128
651
 
1129
652
  standing_end, lowest_point, takeoff_frame, landing_frame = phases
1130
653
 
1131
- # Calculate metrics
1132
654
  if verbose:
1133
655
  print("Calculating metrics...")
1134
656
  with timer.measure("metrics_calculation"):
1135
- # Use signed velocity for CMJ (need direction information)
1136
657
  from .cmj.analysis import compute_signed_velocity
1137
658
 
1138
659
  velocities = compute_signed_velocity(
@@ -1152,23 +673,19 @@ def process_cmj_video(
1152
673
  tracking_method=tracking_method,
1153
674
  )
1154
675
 
1155
- # Assess quality and add confidence scores
1156
676
  if verbose:
1157
677
  print("Assessing tracking quality...")
1158
678
  with timer.measure("quality_assessment"):
1159
- # Detect outliers for quality scoring (doesn't affect results)
1160
679
  _, outlier_mask = reject_outliers(
1161
680
  vertical_positions,
1162
681
  use_ransac=True,
1163
682
  use_median=True,
1164
- interpolate=False, # Don't modify, just detect
683
+ interpolate=False,
1165
684
  )
1166
685
 
1167
- # Phases detected successfully if we got here
1168
686
  phases_detected = True
1169
- phase_count = 4 # standing, eccentric, concentric, flight
687
+ phase_count = 4
1170
688
 
1171
- # Perform quality assessment
1172
689
  quality_result = assess_jump_quality(
1173
690
  visibilities=visibilities,
1174
691
  positions=vertical_positions,
@@ -1178,7 +695,6 @@ def process_cmj_video(
1178
695
  phase_count=phase_count,
1179
696
  )
1180
697
 
1181
- # Build algorithm config early (but attach metadata later)
1182
698
  algorithm_config = AlgorithmConfig(
1183
699
  detection_method="backward_search",
1184
700
  tracking_method="mediapipe_pose",
@@ -1195,7 +711,7 @@ def process_cmj_video(
1195
711
  visibility_threshold=params.visibility_threshold,
1196
712
  use_curvature_refinement=params.use_curvature,
1197
713
  ),
1198
- drop_detection=None, # CMJ doesn't have drop detection
714
+ drop_detection=None,
1199
715
  )
1200
716
 
1201
717
  video_info = VideoInfo(
@@ -1214,12 +730,10 @@ def process_cmj_video(
1214
730
  print(f" - {warning}")
1215
731
  print()
1216
732
 
1217
- # Generate debug video (but not JSON yet - we need to attach metadata first)
1218
733
  if output_video:
1219
734
  if verbose:
1220
735
  print(f"Generating debug video: {output_video}")
1221
736
 
1222
- # Determine debug video properties from the pre-processed frames
1223
737
  debug_h, debug_w = frames[0].shape[:2]
1224
738
  step = max(1, int(video.fps / 30.0))
1225
739
  debug_fps = video.fps / step
@@ -1233,16 +747,13 @@ def process_cmj_video(
1233
747
  debug_w,
1234
748
  debug_h,
1235
749
  debug_fps,
1236
- timer=timer, # Passing timer here too
750
+ timer=timer,
1237
751
  ) as renderer:
1238
752
  for frame, idx in zip(frames, frame_indices, strict=True):
1239
753
  annotated = renderer.render_frame(
1240
754
  frame, smoothed_landmarks[idx], idx, metrics
1241
755
  )
1242
756
  renderer.write_frame(annotated)
1243
- # Capture re-encoding duration separately
1244
- with timer.measure("debug_video_reencode"):
1245
- pass # Re-encoding happens in context manager __exit__
1246
757
  else:
1247
758
  with CMJDebugOverlayRenderer(
1248
759
  output_video,
@@ -1251,7 +762,7 @@ def process_cmj_video(
1251
762
  debug_w,
1252
763
  debug_h,
1253
764
  debug_fps,
1254
- timer=timer, # Passing timer here too
765
+ timer=timer,
1255
766
  ) as renderer:
1256
767
  for frame, idx in zip(frames, frame_indices, strict=True):
1257
768
  annotated = renderer.render_frame(
@@ -1262,16 +773,13 @@ def process_cmj_video(
1262
773
  if verbose:
1263
774
  print(f"Debug video saved: {output_video}")
1264
775
 
1265
- # Validate metrics against physiological bounds
1266
776
  with timer.measure("metrics_validation"):
1267
777
  validator = CMJMetricsValidator()
1268
778
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
1269
779
  metrics.validation_result = validation_result
1270
780
 
1271
- # NOW create ProcessingInfo with complete timing breakdown
1272
- # (includes debug video generation timing)
1273
781
  processing_time = time.time() - start_time
1274
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
782
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
1275
783
 
1276
784
  processing_info = ProcessingInfo(
1277
785
  version=get_kinemotion_version(),
@@ -1288,24 +796,18 @@ def process_cmj_video(
1288
796
  algorithm=algorithm_config,
1289
797
  )
1290
798
 
1291
- # Attach complete metadata to metrics
1292
799
  metrics.result_metadata = result_metadata
1293
800
 
1294
- # NOW write JSON after metadata is attached
1295
801
  if json_output:
1296
802
  if timer:
1297
803
  with timer.measure("json_serialization"):
1298
804
  output_path = Path(json_output)
1299
805
  metrics_dict = metrics.to_dict()
1300
- import json
1301
-
1302
806
  json_str = json.dumps(metrics_dict, indent=2)
1303
807
  output_path.write_text(json_str)
1304
808
  else:
1305
809
  output_path = Path(json_output)
1306
810
  metrics_dict = metrics.to_dict()
1307
- import json
1308
-
1309
811
  json_str = json.dumps(metrics_dict, indent=2)
1310
812
  output_path.write_text(json_str)
1311
813
 
@@ -1317,16 +819,15 @@ def process_cmj_video(
1317
819
  for issue in validation_result.issues:
1318
820
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
1319
821
 
1320
- # Print timing summary if verbose
1321
822
  if verbose:
1322
823
  total_time = time.time() - start_time
1323
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
824
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
1324
825
 
1325
826
  print("\n=== Timing Summary ===")
1326
827
  for stage, duration in stage_times.items():
1327
828
  percentage = (duration / total_time) * 100
1328
829
  dur_ms = duration * 1000
1329
- print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
830
+ print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
1330
831
  total_ms = total_time * 1000
1331
832
  print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
1332
833
  print()
@@ -1344,72 +845,23 @@ def process_cmj_videos_bulk(
1344
845
  progress_callback: Callable[[CMJVideoResult], None] | None = None,
1345
846
  ) -> list[CMJVideoResult]:
1346
847
  """
1347
- Process multiple CMJ videos in parallel using ProcessPoolExecutor.
1348
-
1349
- Args:
1350
- configs: List of CMJVideoConfig objects specifying video paths and parameters
1351
- max_workers: Maximum number of parallel workers (default: 4)
1352
- progress_callback: Optional callback function called after each video completes.
1353
- Receives CMJVideoResult object.
1354
-
1355
- Returns:
1356
- List of CMJVideoResult objects, one per input video, in completion order
1357
-
1358
- Example:
1359
- >>> configs = [
1360
- ... CMJVideoConfig("video1.mp4"),
1361
- ... CMJVideoConfig("video2.mp4", quality="accurate"),
1362
- ... CMJVideoConfig("video3.mp4", output_video="debug3.mp4"),
1363
- ... ]
1364
- >>> results = process_cmj_videos_bulk(configs, max_workers=4)
1365
- >>> for result in results:
1366
- ... if result.success:
1367
- ... print(f"{result.video_path}: {result.metrics.jump_height:.3f}m")
1368
- ... else:
1369
- ... print(f"{result.video_path}: FAILED - {result.error}")
848
+ Process multiple CMJ videos in parallel.
1370
849
  """
1371
- results: list[CMJVideoResult] = []
1372
-
1373
- # Use ProcessPoolExecutor for CPU-bound video processing
1374
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
1375
- # Submit all jobs
1376
- future_to_config = {
1377
- executor.submit(_process_cmj_video_wrapper, config): config
1378
- for config in configs
1379
- }
1380
-
1381
- # Process results as they complete
1382
- for future in as_completed(future_to_config):
1383
- config = future_to_config[future]
1384
- result: CMJVideoResult
1385
-
1386
- try:
1387
- result = future.result()
1388
- results.append(result)
1389
- except Exception as e:
1390
- result = CMJVideoResult(
1391
- video_path=config.video_path, success=False, error=str(e)
1392
- )
1393
- results.append(result)
1394
850
 
1395
- # Call progress callback if provided
1396
- if progress_callback:
1397
- progress_callback(result)
851
+ def error_factory(video_path: str, error_msg: str) -> CMJVideoResult:
852
+ return CMJVideoResult(video_path=video_path, success=False, error=error_msg)
1398
853
 
1399
- return results
854
+ return process_videos_bulk_generic(
855
+ configs,
856
+ _process_cmj_video_wrapper,
857
+ error_factory,
858
+ max_workers,
859
+ progress_callback,
860
+ )
1400
861
 
1401
862
 
1402
863
  def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
1403
- """
1404
- Wrapper function for parallel CMJ processing. Must be picklable
1405
- (top-level function).
1406
-
1407
- Args:
1408
- config: CMJVideoConfig object with processing parameters
1409
-
1410
- Returns:
1411
- CMJVideoResult object with metrics or error information
1412
- """
864
+ """Wrapper function for parallel CMJ processing."""
1413
865
  start_time = time.time()
1414
866
 
1415
867
  try:
@@ -1424,7 +876,7 @@ def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
1424
876
  visibility_threshold=config.visibility_threshold,
1425
877
  detection_confidence=config.detection_confidence,
1426
878
  tracking_confidence=config.tracking_confidence,
1427
- verbose=False, # Disable verbose in parallel mode
879
+ verbose=False,
1428
880
  )
1429
881
 
1430
882
  processing_time = time.time() - start_time