kinemotion 0.43.0__py3-none-any.whl → 0.45.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -1,21 +1,16 @@
1
1
  "Public API for programmatic use of kinemotion analysis."
2
2
 
3
+ import json
3
4
  import time
4
5
  from collections.abc import Callable
5
- from concurrent.futures import ProcessPoolExecutor, as_completed
6
6
  from dataclasses import dataclass
7
7
  from pathlib import Path
8
8
 
9
- import numpy as np
10
-
11
- from .cmj.analysis import compute_average_hip_position, detect_cmj_phases
9
+ from .cmj.analysis import detect_cmj_phases
12
10
  from .cmj.debug_overlay import CMJDebugOverlayRenderer
13
11
  from .cmj.kinematics import CMJMetrics, calculate_cmj_metrics
14
12
  from .cmj.metrics_validator import CMJMetricsValidator
15
13
  from .core.auto_tuning import (
16
- AnalysisParameters,
17
- QualityPreset,
18
- VideoCharacteristics,
19
14
  analyze_video_sample,
20
15
  auto_tune_parameters,
21
16
  )
@@ -31,13 +26,22 @@ from .core.metadata import (
31
26
  create_timestamp,
32
27
  get_kinemotion_version,
33
28
  )
29
+ from .core.pipeline_utils import (
30
+ apply_expert_overrides,
31
+ apply_smoothing,
32
+ convert_timer_to_stage_names,
33
+ determine_confidence_levels,
34
+ extract_vertical_positions,
35
+ parse_quality_preset,
36
+ print_verbose_parameters,
37
+ process_all_frames,
38
+ process_videos_bulk_generic,
39
+ )
34
40
  from .core.pose import PoseTracker
35
41
  from .core.quality import assess_jump_quality
36
- from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
37
42
  from .core.timing import PerformanceTimer
38
43
  from .core.video_io import VideoProcessor
39
44
  from .dropjump.analysis import (
40
- compute_average_foot_position,
41
45
  detect_ground_contact,
42
46
  find_contact_phases,
43
47
  )
@@ -46,338 +50,6 @@ from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
46
50
  from .dropjump.metrics_validator import DropJumpMetricsValidator
47
51
 
48
52
 
49
- def _parse_quality_preset(quality: str) -> QualityPreset:
50
- """Parse and validate quality preset string.
51
-
52
- Args:
53
- quality: Quality preset string ('fast', 'balanced', or 'accurate')
54
-
55
- Returns:
56
- QualityPreset enum value
57
-
58
- Raises:
59
- ValueError: If quality preset is invalid
60
- """
61
- try:
62
- return QualityPreset(quality.lower())
63
- except ValueError as e:
64
- raise ValueError(
65
- f"Invalid quality preset: {quality}. "
66
- "Must be 'fast', 'balanced', or 'accurate'"
67
- ) from e
68
-
69
-
70
- def _determine_confidence_levels(
71
- quality_preset: QualityPreset,
72
- detection_confidence: float | None,
73
- tracking_confidence: float | None,
74
- ) -> tuple[float, float]:
75
- """Determine detection and tracking confidence levels.
76
-
77
- Confidence levels are set based on quality preset and can be overridden
78
- by expert parameters.
79
-
80
- Args:
81
- quality_preset: Quality preset enum
82
- detection_confidence: Optional expert override for detection confidence
83
- tracking_confidence: Optional expert override for tracking confidence
84
-
85
- Returns:
86
- Tuple of (detection_confidence, tracking_confidence)
87
- """
88
- # Set initial confidence from quality preset
89
- initial_detection_conf = 0.5
90
- initial_tracking_conf = 0.5
91
-
92
- if quality_preset == QualityPreset.FAST:
93
- initial_detection_conf = 0.3
94
- initial_tracking_conf = 0.3
95
- elif quality_preset == QualityPreset.ACCURATE:
96
- initial_detection_conf = 0.6
97
- initial_tracking_conf = 0.6
98
-
99
- # Override with expert values if provided
100
- if detection_confidence is not None:
101
- initial_detection_conf = detection_confidence
102
- if tracking_confidence is not None:
103
- initial_tracking_conf = tracking_confidence
104
-
105
- return initial_detection_conf, initial_tracking_conf
106
-
107
-
108
- def _apply_expert_overrides(
109
- params: AnalysisParameters,
110
- smoothing_window: int | None,
111
- velocity_threshold: float | None,
112
- min_contact_frames: int | None,
113
- visibility_threshold: float | None,
114
- ) -> AnalysisParameters:
115
- """Apply expert parameter overrides to auto-tuned parameters.
116
-
117
- Args:
118
- params: Auto-tuned parameters object
119
- smoothing_window: Optional override for smoothing window
120
- velocity_threshold: Optional override for velocity threshold
121
- min_contact_frames: Optional override for minimum contact frames
122
- visibility_threshold: Optional override for visibility threshold
123
-
124
- Returns:
125
- Modified params object (mutated in place)
126
- """
127
- if smoothing_window is not None:
128
- params.smoothing_window = smoothing_window
129
- if velocity_threshold is not None:
130
- params.velocity_threshold = velocity_threshold
131
- if min_contact_frames is not None:
132
- params.min_contact_frames = min_contact_frames
133
- if visibility_threshold is not None:
134
- params.visibility_threshold = visibility_threshold
135
- return params
136
-
137
-
138
- def _print_verbose_parameters(
139
- video: VideoProcessor,
140
- characteristics: VideoCharacteristics,
141
- quality_preset: QualityPreset,
142
- params: AnalysisParameters,
143
- ) -> None:
144
- """Print auto-tuned parameters in verbose mode.
145
-
146
- Args:
147
- video: Video processor with fps and dimensions
148
- characteristics: Video analysis characteristics
149
- quality_preset: Selected quality preset
150
- params: Auto-tuned parameters
151
- """
152
- print("\n" + "=" * 60)
153
- print("AUTO-TUNED PARAMETERS")
154
- print("=" * 60)
155
- print(f"Video FPS: {video.fps:.2f}")
156
- print(
157
- f"Tracking quality: {characteristics.tracking_quality} "
158
- f"(avg visibility: {characteristics.avg_visibility:.2f})"
159
- )
160
- print(f"Quality preset: {quality_preset.value}")
161
- print("\nSelected parameters:")
162
- print(f" smoothing_window: {params.smoothing_window}")
163
- print(f" polyorder: {params.polyorder}")
164
- print(f" velocity_threshold: {params.velocity_threshold:.4f}")
165
- print(f" min_contact_frames: {params.min_contact_frames}")
166
- print(f" visibility_threshold: {params.visibility_threshold}")
167
- print(f" detection_confidence: {params.detection_confidence}")
168
- print(f" tracking_confidence: {params.tracking_confidence}")
169
- print(f" outlier_rejection: {params.outlier_rejection}")
170
- print(f" bilateral_filter: {params.bilateral_filter}")
171
- print(f" use_curvature: {params.use_curvature}")
172
- print("=" * 60 + "\n")
173
-
174
-
175
- def _process_all_frames(
176
- video: VideoProcessor,
177
- tracker: PoseTracker,
178
- verbose: bool,
179
- timer: PerformanceTimer | None = None,
180
- close_tracker: bool = True,
181
- ) -> tuple[list, list]:
182
- """Process all frames from video and extract pose landmarks.
183
-
184
- Args:
185
- video: Video processor to read frames from
186
- tracker: Pose tracker for landmark detection
187
- verbose: Print progress messages
188
- timer: Optional PerformanceTimer for measuring operations
189
- close_tracker: Whether to close the tracker after processing (default: True)
190
-
191
- Returns:
192
- Tuple of (frames, landmarks_sequence)
193
-
194
- Raises:
195
- ValueError: If no frames could be processed
196
- """
197
- if verbose:
198
- print("Tracking pose landmarks...")
199
-
200
- landmarks_sequence = []
201
- frames = []
202
-
203
- if timer:
204
- with timer.measure("pose_tracking"):
205
- while True:
206
- frame = video.read_frame()
207
- if frame is None:
208
- break
209
-
210
- frames.append(frame)
211
- landmarks = tracker.process_frame(frame)
212
- landmarks_sequence.append(landmarks)
213
- else:
214
- while True:
215
- frame = video.read_frame()
216
- if frame is None:
217
- break
218
-
219
- frames.append(frame)
220
- landmarks = tracker.process_frame(frame)
221
- landmarks_sequence.append(landmarks)
222
-
223
- if close_tracker:
224
- tracker.close()
225
-
226
- if not landmarks_sequence:
227
- raise ValueError("No frames could be processed from video")
228
-
229
- return frames, landmarks_sequence
230
-
231
-
232
- def _apply_smoothing(
233
- landmarks_sequence: list,
234
- params: AnalysisParameters,
235
- verbose: bool,
236
- timer: PerformanceTimer | None = None,
237
- ) -> list:
238
- """Apply smoothing to landmark sequence with auto-tuned parameters.
239
-
240
- Args:
241
- landmarks_sequence: Sequence of landmarks from all frames
242
- params: Auto-tuned parameters containing smoothing settings
243
- verbose: Print progress messages
244
- timer: Optional PerformanceTimer for measuring operations
245
-
246
- Returns:
247
- Smoothed landmarks sequence
248
- """
249
- if params.outlier_rejection or params.bilateral_filter:
250
- if verbose:
251
- if params.outlier_rejection:
252
- print("Smoothing landmarks with outlier rejection...")
253
- if params.bilateral_filter:
254
- print("Using bilateral temporal filter...")
255
- if timer:
256
- with timer.measure("smoothing"):
257
- return smooth_landmarks_advanced(
258
- landmarks_sequence,
259
- window_length=params.smoothing_window,
260
- polyorder=params.polyorder,
261
- use_outlier_rejection=params.outlier_rejection,
262
- use_bilateral=params.bilateral_filter,
263
- )
264
- else:
265
- return smooth_landmarks_advanced(
266
- landmarks_sequence,
267
- window_length=params.smoothing_window,
268
- polyorder=params.polyorder,
269
- use_outlier_rejection=params.outlier_rejection,
270
- use_bilateral=params.bilateral_filter,
271
- )
272
- else:
273
- if verbose:
274
- print("Smoothing landmarks...")
275
- if timer:
276
- with timer.measure("smoothing"):
277
- return smooth_landmarks(
278
- landmarks_sequence,
279
- window_length=params.smoothing_window,
280
- polyorder=params.polyorder,
281
- )
282
- else:
283
- return smooth_landmarks(
284
- landmarks_sequence,
285
- window_length=params.smoothing_window,
286
- polyorder=params.polyorder,
287
- )
288
-
289
-
290
- def _calculate_foot_visibility(frame_landmarks: dict) -> float:
291
- """Calculate average visibility of foot landmarks.
292
-
293
- Args:
294
- frame_landmarks: Dictionary of landmarks for a frame
295
-
296
- Returns:
297
- Average visibility value (0-1)
298
- """
299
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
300
- foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
301
- return float(np.mean(foot_vis)) if foot_vis else 0.0
302
-
303
-
304
- def _extract_vertical_positions(
305
- smoothed_landmarks: list,
306
- target: str = "foot",
307
- ) -> tuple[np.ndarray, np.ndarray]:
308
- """Extract vertical positions and visibilities from smoothed landmarks.
309
-
310
- Args:
311
- smoothed_landmarks: Smoothed landmark sequence
312
- target: Tracking target "foot" or "hip" (default: "foot")
313
-
314
- Returns:
315
- Tuple of (vertical_positions, visibilities) as numpy arrays
316
- """
317
- position_list: list[float] = []
318
- visibilities_list: list[float] = []
319
-
320
- for frame_landmarks in smoothed_landmarks:
321
- if frame_landmarks:
322
- if target == "hip":
323
- _, y = compute_average_hip_position(frame_landmarks)
324
- # For hips, we can use average visibility of hips if needed,
325
- # but currently _calculate_foot_visibility is specific to feet.
326
- # We'll stick to foot visibility for now as it indicates
327
- # overall leg tracking quality, or we could implement
328
- # _calculate_hip_visibility. For simplicity, we'll use foot
329
- # visibility as a proxy for "body visibility" or just use 1.0
330
- # since hips are usually visible if feet are. Actually, let's
331
- # just use foot visibility for consistency in quality checks.
332
- vis = _calculate_foot_visibility(frame_landmarks)
333
- else:
334
- _, y = compute_average_foot_position(frame_landmarks)
335
- vis = _calculate_foot_visibility(frame_landmarks)
336
-
337
- position_list.append(y)
338
- visibilities_list.append(vis)
339
- else:
340
- position_list.append(position_list[-1] if position_list else 0.5)
341
- visibilities_list.append(0.0)
342
-
343
- return np.array(position_list), np.array(visibilities_list)
344
-
345
-
346
- def _convert_timer_to_stage_names(
347
- timer_metrics: dict[str, float],
348
- ) -> dict[str, float]:
349
- """Convert timer metric names to human-readable stage names.
350
-
351
- Args:
352
- timer_metrics: Dictionary from PerformanceTimer.get_metrics()
353
-
354
- Returns:
355
- Dictionary with human-readable stage names as keys
356
- """
357
- mapping = {
358
- "video_initialization": "Video initialization",
359
- "pose_tracking": "Pose tracking",
360
- "parameter_auto_tuning": "Parameter auto-tuning",
361
- "smoothing": "Smoothing",
362
- "vertical_position_extraction": "Vertical position extraction",
363
- "ground_contact_detection": "Ground contact detection",
364
- "metrics_calculation": "Metrics calculation",
365
- "quality_assessment": "Quality assessment",
366
- "metadata_building": "Metadata building",
367
- "metrics_validation": "Metrics validation",
368
- "phase_detection": "Phase detection",
369
- "json_serialization": "JSON serialization",
370
- "debug_video_generation": "Debug video generation",
371
- "debug_video_reencode": "Debug video re-encoding",
372
- "frame_rotation": "Frame rotation",
373
- "debug_video_resize": "Debug video resizing",
374
- "debug_video_copy": "Debug video frame copy",
375
- "debug_video_draw": "Debug video drawing",
376
- "debug_video_write": "Debug video encoding",
377
- }
378
- return {mapping.get(k, k): v for k, v in timer_metrics.items()}
379
-
380
-
381
53
  @dataclass
382
54
  class DropJumpVideoResult:
383
55
  """Result of processing a single drop jump video."""
@@ -453,34 +125,25 @@ def process_dropjump_video(
453
125
  if not Path(video_path).exists():
454
126
  raise FileNotFoundError(f"Video file not found: {video_path}")
455
127
 
456
- # Set deterministic mode for drop jump reproducibility
457
- # Note: MediaPipe has inherent non-determinism (Google issue #3945)
458
- # This improves consistency but cannot eliminate all variation
459
128
  from .core.determinism import set_deterministic_mode
460
129
 
461
130
  set_deterministic_mode(seed=42)
462
131
 
463
- # Start overall timing
464
132
  start_time = time.time()
465
133
  if timer is None:
466
134
  timer = PerformanceTimer()
467
135
 
468
- # Convert quality string to enum
469
- quality_preset = _parse_quality_preset(quality)
136
+ quality_preset = parse_quality_preset(quality)
470
137
 
471
- # Load video
472
138
  with timer.measure("video_initialization"):
473
139
  with VideoProcessor(video_path, timer=timer) as video:
474
- # Determine detection/tracking confidence levels
475
- detection_conf, tracking_conf = _determine_confidence_levels(
140
+ detection_conf, tracking_conf = determine_confidence_levels(
476
141
  quality_preset, detection_confidence, tracking_confidence
477
142
  )
478
143
 
479
- # Process all frames with pose tracking
480
144
  if verbose:
481
145
  print("Processing all frames with MediaPipe pose tracking...")
482
146
 
483
- # Use provided tracker or create new one
484
147
  tracker = pose_tracker
485
148
  should_close_tracker = False
486
149
 
@@ -492,19 +155,17 @@ def process_dropjump_video(
492
155
  )
493
156
  should_close_tracker = True
494
157
 
495
- frames, landmarks_sequence = _process_all_frames(
158
+ frames, landmarks_sequence, frame_indices = process_all_frames(
496
159
  video, tracker, verbose, timer, close_tracker=should_close_tracker
497
160
  )
498
161
 
499
- # Analyze video characteristics and auto-tune parameters
500
162
  with timer.measure("parameter_auto_tuning"):
501
163
  characteristics = analyze_video_sample(
502
164
  landmarks_sequence, video.fps, video.frame_count
503
165
  )
504
166
  params = auto_tune_parameters(characteristics, quality_preset)
505
167
 
506
- # Apply expert overrides if provided
507
- params = _apply_expert_overrides(
168
+ params = apply_expert_overrides(
508
169
  params,
509
170
  smoothing_window,
510
171
  velocity_threshold,
@@ -512,26 +173,22 @@ def process_dropjump_video(
512
173
  visibility_threshold,
513
174
  )
514
175
 
515
- # Show selected parameters if verbose
516
176
  if verbose:
517
- _print_verbose_parameters(
177
+ print_verbose_parameters(
518
178
  video, characteristics, quality_preset, params
519
179
  )
520
180
 
521
- # Apply smoothing with auto-tuned parameters
522
- smoothed_landmarks = _apply_smoothing(
181
+ smoothed_landmarks = apply_smoothing(
523
182
  landmarks_sequence, params, verbose, timer
524
183
  )
525
184
 
526
- # Extract vertical positions from feet
527
185
  if verbose:
528
186
  print("Extracting foot positions...")
529
187
  with timer.measure("vertical_position_extraction"):
530
- vertical_positions, visibilities = _extract_vertical_positions(
188
+ vertical_positions, visibilities = extract_vertical_positions(
531
189
  smoothed_landmarks
532
190
  )
533
191
 
534
- # Detect ground contact
535
192
  if verbose:
536
193
  print("Detecting ground contact...")
537
194
  with timer.measure("ground_contact_detection"):
@@ -545,7 +202,6 @@ def process_dropjump_video(
545
202
  polyorder=params.polyorder,
546
203
  )
547
204
 
548
- # Calculate metrics
549
205
  if verbose:
550
206
  print("Calculating metrics...")
551
207
  with timer.measure("metrics_calculation"):
@@ -560,24 +216,20 @@ def process_dropjump_video(
560
216
  use_curvature=params.use_curvature,
561
217
  )
562
218
 
563
- # Assess quality and add confidence scores
564
219
  if verbose:
565
220
  print("Assessing tracking quality...")
566
221
  with timer.measure("quality_assessment"):
567
- # Detect outliers for quality scoring (doesn't affect results)
568
222
  _, outlier_mask = reject_outliers(
569
223
  vertical_positions,
570
224
  use_ransac=True,
571
225
  use_median=True,
572
- interpolate=False, # Don't modify, just detect
226
+ interpolate=False,
573
227
  )
574
228
 
575
- # Count phases for quality assessment
576
229
  phases = find_contact_phases(contact_states)
577
230
  phases_detected = len(phases) > 0
578
231
  phase_count = len(phases)
579
232
 
580
- # Perform quality assessment
581
233
  quality_result = assess_jump_quality(
582
234
  visibilities=visibilities,
583
235
  positions=vertical_positions,
@@ -587,13 +239,10 @@ def process_dropjump_video(
587
239
  phase_count=phase_count,
588
240
  )
589
241
 
590
- # Build algorithm configuration early (but attach metadata later)
591
242
  drop_frame = None
592
243
  if drop_start_frame is None and metrics.drop_start_frame is not None:
593
- # Auto-detected drop start from box
594
244
  drop_frame = metrics.drop_start_frame
595
245
  elif drop_start_frame is not None:
596
- # Manual drop start provided
597
246
  drop_frame = drop_start_frame
598
247
 
599
248
  algorithm_config = AlgorithmConfig(
@@ -635,51 +284,58 @@ def process_dropjump_video(
635
284
  print(f" - {warning}")
636
285
  print()
637
286
 
638
- # Generate debug video (but not JSON yet - we need to attach metadata first)
639
287
  if output_video:
640
288
  if verbose:
641
289
  print(f"Generating debug video: {output_video}")
642
290
 
291
+ debug_h, debug_w = frames[0].shape[:2]
292
+ if video.fps > 30:
293
+ debug_fps = video.fps / (video.fps / 30.0)
294
+ else:
295
+ debug_fps = video.fps
296
+ if len(frames) < len(landmarks_sequence):
297
+ step = max(1, int(video.fps / 30.0))
298
+ debug_fps = video.fps / step
299
+
643
300
  if timer:
644
301
  with timer.measure("debug_video_generation"):
645
302
  with DebugOverlayRenderer(
646
303
  output_video,
647
- video.width,
648
- video.height,
649
- video.display_width,
650
- video.display_height,
651
- video.fps,
304
+ debug_w,
305
+ debug_h,
306
+ debug_w,
307
+ debug_h,
308
+ debug_fps,
652
309
  timer=timer,
653
310
  ) as renderer:
654
- for i, frame in enumerate(frames):
311
+ for frame, idx in zip(frames, frame_indices, strict=True):
655
312
  annotated = renderer.render_frame(
656
313
  frame,
657
- smoothed_landmarks[i],
658
- contact_states[i],
659
- i,
314
+ smoothed_landmarks[idx],
315
+ contact_states[idx],
316
+ idx,
660
317
  metrics,
661
318
  use_com=False,
662
319
  )
663
320
  renderer.write_frame(annotated)
664
- # Capture re-encoding duration separately
665
321
  with timer.measure("debug_video_reencode"):
666
- pass # Re-encoding happens in context manager __exit__
322
+ pass
667
323
  else:
668
324
  with DebugOverlayRenderer(
669
325
  output_video,
670
- video.width,
671
- video.height,
672
- video.display_width,
673
- video.display_height,
674
- video.fps,
326
+ debug_w,
327
+ debug_h,
328
+ debug_w,
329
+ debug_h,
330
+ debug_fps,
675
331
  timer=timer,
676
332
  ) as renderer:
677
- for i, frame in enumerate(frames):
333
+ for frame, idx in zip(frames, frame_indices, strict=True):
678
334
  annotated = renderer.render_frame(
679
335
  frame,
680
- smoothed_landmarks[i],
681
- contact_states[i],
682
- i,
336
+ smoothed_landmarks[idx],
337
+ contact_states[idx],
338
+ idx,
683
339
  metrics,
684
340
  use_com=False,
685
341
  )
@@ -688,7 +344,6 @@ def process_dropjump_video(
688
344
  if verbose:
689
345
  print(f"Debug video saved: {output_video}")
690
346
 
691
- # Validate metrics against physiological bounds
692
347
  with timer.measure("metrics_validation"):
693
348
  validator = DropJumpMetricsValidator()
694
349
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
@@ -699,10 +354,8 @@ def process_dropjump_video(
699
354
  for issue in validation_result.issues:
700
355
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
701
356
 
702
- # NOW create ProcessingInfo with complete timing breakdown
703
- # (includes debug video generation timing)
704
357
  processing_time = time.time() - start_time
705
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
358
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
706
359
 
707
360
  processing_info = ProcessingInfo(
708
361
  version=get_kinemotion_version(),
@@ -719,34 +372,27 @@ def process_dropjump_video(
719
372
  algorithm=algorithm_config,
720
373
  )
721
374
 
722
- # Attach complete metadata to metrics
723
375
  metrics.result_metadata = result_metadata
724
376
 
725
- # NOW write JSON after metadata is attached
726
377
  if json_output:
727
378
  if timer:
728
379
  with timer.measure("json_serialization"):
729
380
  output_path = Path(json_output)
730
381
  metrics_dict = metrics.to_dict()
731
- import json
732
-
733
382
  json_str = json.dumps(metrics_dict, indent=2)
734
383
  output_path.write_text(json_str)
735
384
  else:
736
385
  output_path = Path(json_output)
737
386
  metrics_dict = metrics.to_dict()
738
- import json
739
-
740
387
  json_str = json.dumps(metrics_dict, indent=2)
741
388
  output_path.write_text(json_str)
742
389
 
743
390
  if verbose:
744
391
  print(f"Metrics written to: {json_output}")
745
392
 
746
- # Print timing summary if verbose
747
393
  if verbose:
748
394
  total_time = time.time() - start_time
749
- stage_times_verbose = _convert_timer_to_stage_names(timer.get_metrics())
395
+ stage_times_verbose = convert_timer_to_stage_names(timer.get_metrics())
750
396
 
751
397
  print("\n=== Timing Summary ===")
752
398
  for stage, duration in stage_times_verbose.items():
@@ -767,76 +413,25 @@ def process_dropjump_videos_bulk(
767
413
  progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
768
414
  ) -> list[DropJumpVideoResult]:
769
415
  """
770
- Process multiple drop jump videos in parallel using ProcessPoolExecutor.
771
-
772
- Args:
773
- configs: List of DropJumpVideoConfig objects specifying video paths
774
- and parameters
775
- max_workers: Maximum number of parallel workers (default: 4)
776
- progress_callback: Optional callback function called after each video
777
- completes.
778
- Receives DropJumpVideoResult object.
779
-
780
- Returns:
781
- List of DropJumpVideoResult objects, one per input video, in completion order
782
-
783
- Example:
784
- >>> configs = [
785
- ... DropJumpVideoConfig("video1.mp4"),
786
- ... DropJumpVideoConfig("video2.mp4", quality="accurate"),
787
- ... DropJumpVideoConfig("video3.mp4", output_video="debug3.mp4"),
788
- ... ]
789
- >>> results = process_dropjump_videos_bulk(configs, max_workers=4)
790
- >>> for result in results:
791
- ... if result.success:
792
- ... print(f"{result.video_path}: {result.metrics.jump_height_m:.3f}m")
793
- ... else:
794
- ... print(f"{result.video_path}: FAILED - {result.error}")
416
+ Process multiple drop jump videos in parallel.
795
417
  """
796
- results: list[DropJumpVideoResult] = []
797
-
798
- # Use ProcessPoolExecutor for CPU-bound video processing
799
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
800
- # Submit all jobs
801
- future_to_config = {
802
- executor.submit(_process_dropjump_video_wrapper, config): config
803
- for config in configs
804
- }
805
-
806
- # Process results as they complete
807
- for future in as_completed(future_to_config):
808
- config = future_to_config[future]
809
- result: DropJumpVideoResult
810
-
811
- try:
812
- result = future.result()
813
- except Exception as exc:
814
- # Handle unexpected errors
815
- result = DropJumpVideoResult(
816
- video_path=config.video_path,
817
- success=False,
818
- error=f"Unexpected error: {str(exc)}",
819
- )
820
418
 
821
- results.append(result)
822
-
823
- # Call progress callback if provided
824
- if progress_callback:
825
- progress_callback(result)
419
+ def error_factory(video_path: str, error_msg: str) -> DropJumpVideoResult:
420
+ return DropJumpVideoResult(
421
+ video_path=video_path, success=False, error=error_msg
422
+ )
826
423
 
827
- return results
424
+ return process_videos_bulk_generic(
425
+ configs,
426
+ _process_dropjump_video_wrapper,
427
+ error_factory,
428
+ max_workers,
429
+ progress_callback,
430
+ )
828
431
 
829
432
 
830
433
  def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
831
- """
832
- Wrapper function for parallel processing. Must be picklable (top-level function).
833
-
834
- Args:
835
- config: DropJumpVideoConfig object with processing parameters
836
-
837
- Returns:
838
- DropJumpVideoResult object with metrics or error information
839
- """
434
+ """Wrapper function for parallel processing."""
840
435
  start_time = time.time()
841
436
 
842
437
  try:
@@ -852,7 +447,7 @@ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVide
852
447
  visibility_threshold=config.visibility_threshold,
853
448
  detection_confidence=config.detection_confidence,
854
449
  tracking_confidence=config.tracking_confidence,
855
- verbose=False, # Disable verbose in parallel mode
450
+ verbose=False,
856
451
  )
857
452
 
858
453
  processing_time = time.time() - start_time
@@ -948,28 +543,16 @@ def process_cmj_video(
948
543
  Raises:
949
544
  ValueError: If video cannot be processed or parameters are invalid
950
545
  FileNotFoundError: If video file does not exist
951
-
952
- Example:
953
- >>> metrics = process_cmj_video(
954
- ... "athlete_cmj.mp4",
955
- ... quality="balanced",
956
- ... verbose=True
957
- ... )
958
- >>> print(f"Jump height: {metrics.jump_height:.3f}m")
959
- >>> print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
960
546
  """
961
547
  if not Path(video_path).exists():
962
548
  raise FileNotFoundError(f"Video file not found: {video_path}")
963
549
 
964
- # Start overall timing
965
550
  start_time = time.time()
966
551
  if timer is None:
967
552
  timer = PerformanceTimer()
968
553
 
969
- # Convert quality string to enum
970
- quality_preset = _parse_quality_preset(quality)
554
+ quality_preset = parse_quality_preset(quality)
971
555
 
972
- # Initialize video processor
973
556
  with timer.measure("video_initialization"):
974
557
  with VideoProcessor(video_path, timer=timer) as video:
975
558
  if verbose:
@@ -978,16 +561,13 @@ def process_cmj_video(
978
561
  f"{video.frame_count} frames"
979
562
  )
980
563
 
981
- # Determine confidence levels
982
- det_conf, track_conf = _determine_confidence_levels(
564
+ det_conf, track_conf = determine_confidence_levels(
983
565
  quality_preset, detection_confidence, tracking_confidence
984
566
  )
985
567
 
986
- # Track all frames
987
568
  if verbose:
988
569
  print("Processing all frames with MediaPipe pose tracking...")
989
570
 
990
- # Use provided tracker or create new one
991
571
  tracker = pose_tracker
992
572
  should_close_tracker = False
993
573
 
@@ -999,19 +579,17 @@ def process_cmj_video(
999
579
  )
1000
580
  should_close_tracker = True
1001
581
 
1002
- frames, landmarks_sequence = _process_all_frames(
582
+ frames, landmarks_sequence, frame_indices = process_all_frames(
1003
583
  video, tracker, verbose, timer, close_tracker=should_close_tracker
1004
584
  )
1005
585
 
1006
- # Auto-tune parameters
1007
586
  with timer.measure("parameter_auto_tuning"):
1008
587
  characteristics = analyze_video_sample(
1009
588
  landmarks_sequence, video.fps, video.frame_count
1010
589
  )
1011
590
  params = auto_tune_parameters(characteristics, quality_preset)
1012
591
 
1013
- # Apply expert overrides
1014
- params = _apply_expert_overrides(
592
+ params = apply_expert_overrides(
1015
593
  params,
1016
594
  smoothing_window,
1017
595
  velocity_threshold,
@@ -1020,32 +598,27 @@ def process_cmj_video(
1020
598
  )
1021
599
 
1022
600
  if verbose:
1023
- _print_verbose_parameters(
601
+ print_verbose_parameters(
1024
602
  video, characteristics, quality_preset, params
1025
603
  )
1026
604
 
1027
- # Apply smoothing
1028
- smoothed_landmarks = _apply_smoothing(
605
+ smoothed_landmarks = apply_smoothing(
1029
606
  landmarks_sequence, params, verbose, timer
1030
607
  )
1031
608
 
1032
- # Extract vertical positions
1033
609
  if verbose:
1034
610
  print("Extracting vertical positions (Hip and Foot)...")
1035
611
  with timer.measure("vertical_position_extraction"):
1036
- # Primary: Hips (for depth, velocity, general phases)
1037
- vertical_positions, visibilities = _extract_vertical_positions(
612
+ vertical_positions, visibilities = extract_vertical_positions(
1038
613
  smoothed_landmarks, target="hip"
1039
614
  )
1040
615
 
1041
- # Secondary: Feet (for precise landing detection)
1042
- foot_positions, _ = _extract_vertical_positions(
616
+ foot_positions, _ = extract_vertical_positions(
1043
617
  smoothed_landmarks, target="foot"
1044
618
  )
1045
619
 
1046
620
  tracking_method = "hip_hybrid"
1047
621
 
1048
- # Detect CMJ phases
1049
622
  if verbose:
1050
623
  print("Detecting CMJ phases...")
1051
624
  with timer.measure("phase_detection"):
@@ -1054,7 +627,7 @@ def process_cmj_video(
1054
627
  video.fps,
1055
628
  window_length=params.smoothing_window,
1056
629
  polyorder=params.polyorder,
1057
- landing_positions=foot_positions, # Use feet for landing
630
+ landing_positions=foot_positions,
1058
631
  )
1059
632
 
1060
633
  if phases is None:
@@ -1062,11 +635,9 @@ def process_cmj_video(
1062
635
 
1063
636
  standing_end, lowest_point, takeoff_frame, landing_frame = phases
1064
637
 
1065
- # Calculate metrics
1066
638
  if verbose:
1067
639
  print("Calculating metrics...")
1068
640
  with timer.measure("metrics_calculation"):
1069
- # Use signed velocity for CMJ (need direction information)
1070
641
  from .cmj.analysis import compute_signed_velocity
1071
642
 
1072
643
  velocities = compute_signed_velocity(
@@ -1086,23 +657,19 @@ def process_cmj_video(
1086
657
  tracking_method=tracking_method,
1087
658
  )
1088
659
 
1089
- # Assess quality and add confidence scores
1090
660
  if verbose:
1091
661
  print("Assessing tracking quality...")
1092
662
  with timer.measure("quality_assessment"):
1093
- # Detect outliers for quality scoring (doesn't affect results)
1094
663
  _, outlier_mask = reject_outliers(
1095
664
  vertical_positions,
1096
665
  use_ransac=True,
1097
666
  use_median=True,
1098
- interpolate=False, # Don't modify, just detect
667
+ interpolate=False,
1099
668
  )
1100
669
 
1101
- # Phases detected successfully if we got here
1102
670
  phases_detected = True
1103
- phase_count = 4 # standing, eccentric, concentric, flight
671
+ phase_count = 4
1104
672
 
1105
- # Perform quality assessment
1106
673
  quality_result = assess_jump_quality(
1107
674
  visibilities=visibilities,
1108
675
  positions=vertical_positions,
@@ -1112,7 +679,6 @@ def process_cmj_video(
1112
679
  phase_count=phase_count,
1113
680
  )
1114
681
 
1115
- # Build algorithm config early (but attach metadata later)
1116
682
  algorithm_config = AlgorithmConfig(
1117
683
  detection_method="backward_search",
1118
684
  tracking_method="mediapipe_pose",
@@ -1129,7 +695,7 @@ def process_cmj_video(
1129
695
  visibility_threshold=params.visibility_threshold,
1130
696
  use_curvature_refinement=params.use_curvature,
1131
697
  ),
1132
- drop_detection=None, # CMJ doesn't have drop detection
698
+ drop_detection=None,
1133
699
  )
1134
700
 
1135
701
  video_info = VideoInfo(
@@ -1148,59 +714,58 @@ def process_cmj_video(
1148
714
  print(f" - {warning}")
1149
715
  print()
1150
716
 
1151
- # Generate debug video (but not JSON yet - we need to attach metadata first)
1152
717
  if output_video:
1153
718
  if verbose:
1154
719
  print(f"Generating debug video: {output_video}")
1155
720
 
721
+ debug_h, debug_w = frames[0].shape[:2]
722
+ step = max(1, int(video.fps / 30.0))
723
+ debug_fps = video.fps / step
724
+
1156
725
  if timer:
1157
726
  with timer.measure("debug_video_generation"):
1158
727
  with CMJDebugOverlayRenderer(
1159
728
  output_video,
1160
- video.width,
1161
- video.height,
1162
- video.display_width,
1163
- video.display_height,
1164
- video.fps,
1165
- timer=timer, # Passing timer here too
729
+ debug_w,
730
+ debug_h,
731
+ debug_w,
732
+ debug_h,
733
+ debug_fps,
734
+ timer=timer,
1166
735
  ) as renderer:
1167
- for i, frame in enumerate(frames):
736
+ for frame, idx in zip(frames, frame_indices, strict=True):
1168
737
  annotated = renderer.render_frame(
1169
- frame, smoothed_landmarks[i], i, metrics
738
+ frame, smoothed_landmarks[idx], idx, metrics
1170
739
  )
1171
740
  renderer.write_frame(annotated)
1172
- # Capture re-encoding duration separately
1173
741
  with timer.measure("debug_video_reencode"):
1174
- pass # Re-encoding happens in context manager __exit__
742
+ pass
1175
743
  else:
1176
744
  with CMJDebugOverlayRenderer(
1177
745
  output_video,
1178
- video.width,
1179
- video.height,
1180
- video.display_width,
1181
- video.display_height,
1182
- video.fps,
1183
- timer=timer, # Passing timer here too
746
+ debug_w,
747
+ debug_h,
748
+ debug_w,
749
+ debug_h,
750
+ debug_fps,
751
+ timer=timer,
1184
752
  ) as renderer:
1185
- for i, frame in enumerate(frames):
753
+ for frame, idx in zip(frames, frame_indices, strict=True):
1186
754
  annotated = renderer.render_frame(
1187
- frame, smoothed_landmarks[i], i, metrics
755
+ frame, smoothed_landmarks[idx], idx, metrics
1188
756
  )
1189
757
  renderer.write_frame(annotated)
1190
758
 
1191
759
  if verbose:
1192
760
  print(f"Debug video saved: {output_video}")
1193
761
 
1194
- # Validate metrics against physiological bounds
1195
762
  with timer.measure("metrics_validation"):
1196
763
  validator = CMJMetricsValidator()
1197
764
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
1198
765
  metrics.validation_result = validation_result
1199
766
 
1200
- # NOW create ProcessingInfo with complete timing breakdown
1201
- # (includes debug video generation timing)
1202
767
  processing_time = time.time() - start_time
1203
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
768
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
1204
769
 
1205
770
  processing_info = ProcessingInfo(
1206
771
  version=get_kinemotion_version(),
@@ -1217,24 +782,18 @@ def process_cmj_video(
1217
782
  algorithm=algorithm_config,
1218
783
  )
1219
784
 
1220
- # Attach complete metadata to metrics
1221
785
  metrics.result_metadata = result_metadata
1222
786
 
1223
- # NOW write JSON after metadata is attached
1224
787
  if json_output:
1225
788
  if timer:
1226
789
  with timer.measure("json_serialization"):
1227
790
  output_path = Path(json_output)
1228
791
  metrics_dict = metrics.to_dict()
1229
- import json
1230
-
1231
792
  json_str = json.dumps(metrics_dict, indent=2)
1232
793
  output_path.write_text(json_str)
1233
794
  else:
1234
795
  output_path = Path(json_output)
1235
796
  metrics_dict = metrics.to_dict()
1236
- import json
1237
-
1238
797
  json_str = json.dumps(metrics_dict, indent=2)
1239
798
  output_path.write_text(json_str)
1240
799
 
@@ -1246,16 +805,15 @@ def process_cmj_video(
1246
805
  for issue in validation_result.issues:
1247
806
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
1248
807
 
1249
- # Print timing summary if verbose
1250
808
  if verbose:
1251
809
  total_time = time.time() - start_time
1252
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
810
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
1253
811
 
1254
812
  print("\n=== Timing Summary ===")
1255
813
  for stage, duration in stage_times.items():
1256
814
  percentage = (duration / total_time) * 100
1257
815
  dur_ms = duration * 1000
1258
- print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
816
+ print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
1259
817
  total_ms = total_time * 1000
1260
818
  print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
1261
819
  print()
@@ -1273,72 +831,23 @@ def process_cmj_videos_bulk(
1273
831
  progress_callback: Callable[[CMJVideoResult], None] | None = None,
1274
832
  ) -> list[CMJVideoResult]:
1275
833
  """
1276
- Process multiple CMJ videos in parallel using ProcessPoolExecutor.
1277
-
1278
- Args:
1279
- configs: List of CMJVideoConfig objects specifying video paths and parameters
1280
- max_workers: Maximum number of parallel workers (default: 4)
1281
- progress_callback: Optional callback function called after each video completes.
1282
- Receives CMJVideoResult object.
1283
-
1284
- Returns:
1285
- List of CMJVideoResult objects, one per input video, in completion order
1286
-
1287
- Example:
1288
- >>> configs = [
1289
- ... CMJVideoConfig("video1.mp4"),
1290
- ... CMJVideoConfig("video2.mp4", quality="accurate"),
1291
- ... CMJVideoConfig("video3.mp4", output_video="debug3.mp4"),
1292
- ... ]
1293
- >>> results = process_cmj_videos_bulk(configs, max_workers=4)
1294
- >>> for result in results:
1295
- ... if result.success:
1296
- ... print(f"{result.video_path}: {result.metrics.jump_height:.3f}m")
1297
- ... else:
1298
- ... print(f"{result.video_path}: FAILED - {result.error}")
834
+ Process multiple CMJ videos in parallel.
1299
835
  """
1300
- results: list[CMJVideoResult] = []
1301
-
1302
- # Use ProcessPoolExecutor for CPU-bound video processing
1303
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
1304
- # Submit all jobs
1305
- future_to_config = {
1306
- executor.submit(_process_cmj_video_wrapper, config): config
1307
- for config in configs
1308
- }
1309
-
1310
- # Process results as they complete
1311
- for future in as_completed(future_to_config):
1312
- config = future_to_config[future]
1313
- result: CMJVideoResult
1314
-
1315
- try:
1316
- result = future.result()
1317
- results.append(result)
1318
- except Exception as e:
1319
- result = CMJVideoResult(
1320
- video_path=config.video_path, success=False, error=str(e)
1321
- )
1322
- results.append(result)
1323
836
 
1324
- # Call progress callback if provided
1325
- if progress_callback:
1326
- progress_callback(result)
837
+ def error_factory(video_path: str, error_msg: str) -> CMJVideoResult:
838
+ return CMJVideoResult(video_path=video_path, success=False, error=error_msg)
1327
839
 
1328
- return results
840
+ return process_videos_bulk_generic(
841
+ configs,
842
+ _process_cmj_video_wrapper,
843
+ error_factory,
844
+ max_workers,
845
+ progress_callback,
846
+ )
1329
847
 
1330
848
 
1331
849
  def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
1332
- """
1333
- Wrapper function for parallel CMJ processing. Must be picklable
1334
- (top-level function).
1335
-
1336
- Args:
1337
- config: CMJVideoConfig object with processing parameters
1338
-
1339
- Returns:
1340
- CMJVideoResult object with metrics or error information
1341
- """
850
+ """Wrapper function for parallel CMJ processing."""
1342
851
  start_time = time.time()
1343
852
 
1344
853
  try:
@@ -1353,7 +862,7 @@ def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
1353
862
  visibility_threshold=config.visibility_threshold,
1354
863
  detection_confidence=config.detection_confidence,
1355
864
  tracking_confidence=config.tracking_confidence,
1356
- verbose=False, # Disable verbose in parallel mode
865
+ verbose=False,
1357
866
  )
1358
867
 
1359
868
  processing_time = time.time() - start_time