kinemotion 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py ADDED
@@ -0,0 +1,946 @@
1
+ """Public API for programmatic use of kinemotion analysis."""
2
+
3
+ import time
4
+ from collections.abc import Callable
5
+ from concurrent.futures import ProcessPoolExecutor, as_completed
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ import numpy as np
10
+
11
+ from .cmj.analysis import detect_cmj_phases
12
+ from .cmj.debug_overlay import CMJDebugOverlayRenderer
13
+ from .cmj.kinematics import CMJMetrics, calculate_cmj_metrics
14
+ from .core.auto_tuning import (
15
+ AnalysisParameters,
16
+ QualityPreset,
17
+ VideoCharacteristics,
18
+ analyze_video_sample,
19
+ auto_tune_parameters,
20
+ )
21
+ from .core.pose import PoseTracker
22
+ from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
23
+ from .core.video_io import VideoProcessor
24
+ from .dropjump.analysis import (
25
+ ContactState,
26
+ compute_average_foot_position,
27
+ detect_ground_contact,
28
+ )
29
+ from .dropjump.debug_overlay import DebugOverlayRenderer
30
+ from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
31
+
32
+
33
+ def _parse_quality_preset(quality: str) -> QualityPreset:
34
+ """Parse and validate quality preset string.
35
+
36
+ Args:
37
+ quality: Quality preset string ('fast', 'balanced', or 'accurate')
38
+
39
+ Returns:
40
+ QualityPreset enum value
41
+
42
+ Raises:
43
+ ValueError: If quality preset is invalid
44
+ """
45
+ try:
46
+ return QualityPreset(quality.lower())
47
+ except ValueError as e:
48
+ raise ValueError(
49
+ f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
50
+ ) from e
51
+
52
+
53
+ def _determine_confidence_levels(
54
+ quality_preset: QualityPreset,
55
+ detection_confidence: float | None,
56
+ tracking_confidence: float | None,
57
+ ) -> tuple[float, float]:
58
+ """Determine detection and tracking confidence levels.
59
+
60
+ Confidence levels are set based on quality preset and can be overridden
61
+ by expert parameters.
62
+
63
+ Args:
64
+ quality_preset: Quality preset enum
65
+ detection_confidence: Optional expert override for detection confidence
66
+ tracking_confidence: Optional expert override for tracking confidence
67
+
68
+ Returns:
69
+ Tuple of (detection_confidence, tracking_confidence)
70
+ """
71
+ # Set initial confidence from quality preset
72
+ initial_detection_conf = 0.5
73
+ initial_tracking_conf = 0.5
74
+
75
+ if quality_preset == QualityPreset.FAST:
76
+ initial_detection_conf = 0.3
77
+ initial_tracking_conf = 0.3
78
+ elif quality_preset == QualityPreset.ACCURATE:
79
+ initial_detection_conf = 0.6
80
+ initial_tracking_conf = 0.6
81
+
82
+ # Override with expert values if provided
83
+ if detection_confidence is not None:
84
+ initial_detection_conf = detection_confidence
85
+ if tracking_confidence is not None:
86
+ initial_tracking_conf = tracking_confidence
87
+
88
+ return initial_detection_conf, initial_tracking_conf
89
+
90
+
91
+ def _apply_expert_overrides(
92
+ params: AnalysisParameters,
93
+ smoothing_window: int | None,
94
+ velocity_threshold: float | None,
95
+ min_contact_frames: int | None,
96
+ visibility_threshold: float | None,
97
+ ) -> AnalysisParameters:
98
+ """Apply expert parameter overrides to auto-tuned parameters.
99
+
100
+ Args:
101
+ params: Auto-tuned parameters object
102
+ smoothing_window: Optional override for smoothing window
103
+ velocity_threshold: Optional override for velocity threshold
104
+ min_contact_frames: Optional override for minimum contact frames
105
+ visibility_threshold: Optional override for visibility threshold
106
+
107
+ Returns:
108
+ Modified params object (mutated in place)
109
+ """
110
+ if smoothing_window is not None:
111
+ params.smoothing_window = smoothing_window
112
+ if velocity_threshold is not None:
113
+ params.velocity_threshold = velocity_threshold
114
+ if min_contact_frames is not None:
115
+ params.min_contact_frames = min_contact_frames
116
+ if visibility_threshold is not None:
117
+ params.visibility_threshold = visibility_threshold
118
+ return params
119
+
120
+
121
+ def _print_verbose_parameters(
122
+ video: VideoProcessor,
123
+ characteristics: VideoCharacteristics,
124
+ quality_preset: QualityPreset,
125
+ params: AnalysisParameters,
126
+ ) -> None:
127
+ """Print auto-tuned parameters in verbose mode.
128
+
129
+ Args:
130
+ video: Video processor with fps and dimensions
131
+ characteristics: Video analysis characteristics
132
+ quality_preset: Selected quality preset
133
+ params: Auto-tuned parameters
134
+ """
135
+ print("\n" + "=" * 60)
136
+ print("AUTO-TUNED PARAMETERS")
137
+ print("=" * 60)
138
+ print(f"Video FPS: {video.fps:.2f}")
139
+ print(
140
+ f"Tracking quality: {characteristics.tracking_quality} "
141
+ f"(avg visibility: {characteristics.avg_visibility:.2f})"
142
+ )
143
+ print(f"Quality preset: {quality_preset.value}")
144
+ print("\nSelected parameters:")
145
+ print(f" smoothing_window: {params.smoothing_window}")
146
+ print(f" polyorder: {params.polyorder}")
147
+ print(f" velocity_threshold: {params.velocity_threshold:.4f}")
148
+ print(f" min_contact_frames: {params.min_contact_frames}")
149
+ print(f" visibility_threshold: {params.visibility_threshold}")
150
+ print(f" detection_confidence: {params.detection_confidence}")
151
+ print(f" tracking_confidence: {params.tracking_confidence}")
152
+ print(f" outlier_rejection: {params.outlier_rejection}")
153
+ print(f" bilateral_filter: {params.bilateral_filter}")
154
+ print(f" use_curvature: {params.use_curvature}")
155
+ print("=" * 60 + "\n")
156
+
157
+
158
+ def _process_all_frames(
159
+ video: VideoProcessor, tracker: PoseTracker, verbose: bool
160
+ ) -> tuple[list, list]:
161
+ """Process all frames from video and extract pose landmarks.
162
+
163
+ Args:
164
+ video: Video processor to read frames from
165
+ tracker: Pose tracker for landmark detection
166
+ verbose: Print progress messages
167
+
168
+ Returns:
169
+ Tuple of (frames, landmarks_sequence)
170
+
171
+ Raises:
172
+ ValueError: If no frames could be processed
173
+ """
174
+ if verbose:
175
+ print("Tracking pose landmarks...")
176
+
177
+ landmarks_sequence = []
178
+ frames = []
179
+
180
+ while True:
181
+ frame = video.read_frame()
182
+ if frame is None:
183
+ break
184
+
185
+ frames.append(frame)
186
+ landmarks = tracker.process_frame(frame)
187
+ landmarks_sequence.append(landmarks)
188
+
189
+ tracker.close()
190
+
191
+ if not landmarks_sequence:
192
+ raise ValueError("No frames could be processed from video")
193
+
194
+ return frames, landmarks_sequence
195
+
196
+
197
+ def _apply_smoothing(
198
+ landmarks_sequence: list, params: AnalysisParameters, verbose: bool
199
+ ) -> list:
200
+ """Apply smoothing to landmark sequence with auto-tuned parameters.
201
+
202
+ Args:
203
+ landmarks_sequence: Sequence of landmarks from all frames
204
+ params: Auto-tuned parameters containing smoothing settings
205
+ verbose: Print progress messages
206
+
207
+ Returns:
208
+ Smoothed landmarks sequence
209
+ """
210
+ if params.outlier_rejection or params.bilateral_filter:
211
+ if verbose:
212
+ if params.outlier_rejection:
213
+ print("Smoothing landmarks with outlier rejection...")
214
+ if params.bilateral_filter:
215
+ print("Using bilateral temporal filter...")
216
+ return smooth_landmarks_advanced(
217
+ landmarks_sequence,
218
+ window_length=params.smoothing_window,
219
+ polyorder=params.polyorder,
220
+ use_outlier_rejection=params.outlier_rejection,
221
+ use_bilateral=params.bilateral_filter,
222
+ )
223
+ else:
224
+ if verbose:
225
+ print("Smoothing landmarks...")
226
+ return smooth_landmarks(
227
+ landmarks_sequence,
228
+ window_length=params.smoothing_window,
229
+ polyorder=params.polyorder,
230
+ )
231
+
232
+
233
+ def _extract_vertical_positions(
234
+ smoothed_landmarks: list,
235
+ ) -> tuple[np.ndarray, np.ndarray]:
236
+ """Extract vertical foot positions and visibilities from smoothed landmarks.
237
+
238
+ Args:
239
+ smoothed_landmarks: Smoothed landmark sequence
240
+
241
+ Returns:
242
+ Tuple of (vertical_positions, visibilities) as numpy arrays
243
+ """
244
+ position_list: list[float] = []
245
+ visibilities_list: list[float] = []
246
+
247
+ for frame_landmarks in smoothed_landmarks:
248
+ if frame_landmarks:
249
+ _, foot_y = compute_average_foot_position(frame_landmarks)
250
+ position_list.append(foot_y)
251
+
252
+ # Average visibility of foot landmarks
253
+ foot_vis = []
254
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
255
+ if key in frame_landmarks:
256
+ foot_vis.append(frame_landmarks[key][2])
257
+ visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
258
+ else:
259
+ position_list.append(position_list[-1] if position_list else 0.5)
260
+ visibilities_list.append(0.0)
261
+
262
+ return np.array(position_list), np.array(visibilities_list)
263
+
264
+
265
+ def _generate_outputs(
266
+ metrics: DropJumpMetrics,
267
+ json_output: str | None,
268
+ output_video: str | None,
269
+ frames: list,
270
+ smoothed_landmarks: list,
271
+ contact_states: list[ContactState],
272
+ video: VideoProcessor,
273
+ verbose: bool,
274
+ ) -> None:
275
+ """Generate JSON and debug video outputs if requested.
276
+
277
+ Args:
278
+ metrics: Calculated drop jump metrics
279
+ json_output: Optional path for JSON output
280
+ output_video: Optional path for debug video
281
+ frames: List of video frames
282
+ smoothed_landmarks: Smoothed landmark sequence
283
+ contact_states: Ground contact state for each frame
284
+ video: Video processor with dimensions and fps
285
+ verbose: Print progress messages
286
+ """
287
+ # Save JSON if requested
288
+ if json_output:
289
+ import json
290
+
291
+ output_path = Path(json_output)
292
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
293
+ if verbose:
294
+ print(f"Metrics written to: {json_output}")
295
+
296
+ # Generate debug video if requested
297
+ if output_video:
298
+ if verbose:
299
+ print(f"Generating debug video: {output_video}")
300
+
301
+ with DebugOverlayRenderer(
302
+ output_video,
303
+ video.width,
304
+ video.height,
305
+ video.display_width,
306
+ video.display_height,
307
+ video.fps,
308
+ ) as renderer:
309
+ for i, frame in enumerate(frames):
310
+ annotated = renderer.render_frame(
311
+ frame,
312
+ smoothed_landmarks[i],
313
+ contact_states[i],
314
+ i,
315
+ metrics,
316
+ use_com=False,
317
+ )
318
+ renderer.write_frame(annotated)
319
+
320
+ if verbose:
321
+ print(f"Debug video saved: {output_video}")
322
+
323
+
324
+ @dataclass
325
+ class DropJumpVideoResult:
326
+ """Result of processing a single drop jump video."""
327
+
328
+ video_path: str
329
+ success: bool
330
+ metrics: DropJumpMetrics | None = None
331
+ error: str | None = None
332
+ processing_time: float = 0.0
333
+
334
+
335
+ @dataclass
336
+ class DropJumpVideoConfig:
337
+ """Configuration for processing a single drop jump video."""
338
+
339
+ video_path: str
340
+ quality: str = "balanced"
341
+ output_video: str | None = None
342
+ json_output: str | None = None
343
+ drop_start_frame: int | None = None
344
+ smoothing_window: int | None = None
345
+ velocity_threshold: float | None = None
346
+ min_contact_frames: int | None = None
347
+ visibility_threshold: float | None = None
348
+ detection_confidence: float | None = None
349
+ tracking_confidence: float | None = None
350
+
351
+
352
+ def process_dropjump_video(
353
+ video_path: str,
354
+ quality: str = "balanced",
355
+ output_video: str | None = None,
356
+ json_output: str | None = None,
357
+ drop_start_frame: int | None = None,
358
+ smoothing_window: int | None = None,
359
+ velocity_threshold: float | None = None,
360
+ min_contact_frames: int | None = None,
361
+ visibility_threshold: float | None = None,
362
+ detection_confidence: float | None = None,
363
+ tracking_confidence: float | None = None,
364
+ verbose: bool = False,
365
+ ) -> DropJumpMetrics:
366
+ """
367
+ Process a single drop jump video and return metrics.
368
+
369
+ Jump height is calculated from flight time using kinematic formula (h = g*t²/8).
370
+
371
+ Args:
372
+ video_path: Path to the input video file
373
+ quality: Analysis quality preset ("fast", "balanced", or "accurate")
374
+ output_video: Optional path for debug video output
375
+ json_output: Optional path for JSON metrics output
376
+ drop_start_frame: Optional manual drop start frame
377
+ smoothing_window: Optional override for smoothing window
378
+ velocity_threshold: Optional override for velocity threshold
379
+ min_contact_frames: Optional override for minimum contact frames
380
+ visibility_threshold: Optional override for visibility threshold
381
+ detection_confidence: Optional override for pose detection confidence
382
+ tracking_confidence: Optional override for pose tracking confidence
383
+ verbose: Print processing details
384
+
385
+ Returns:
386
+ DropJumpMetrics object containing analysis results
387
+
388
+ Raises:
389
+ ValueError: If video cannot be processed or parameters are invalid
390
+ FileNotFoundError: If video file does not exist
391
+ """
392
+ if not Path(video_path).exists():
393
+ raise FileNotFoundError(f"Video file not found: {video_path}")
394
+
395
+ # Convert quality string to enum
396
+ quality_preset = _parse_quality_preset(quality)
397
+
398
+ # Initialize video processor
399
+ with VideoProcessor(video_path) as video:
400
+ if verbose:
401
+ print(
402
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
403
+ f"{video.frame_count} frames"
404
+ )
405
+
406
+ # Determine detection/tracking confidence levels
407
+ detection_conf, tracking_conf = _determine_confidence_levels(
408
+ quality_preset, detection_confidence, tracking_confidence
409
+ )
410
+
411
+ # Process all frames with pose tracking
412
+ tracker = PoseTracker(
413
+ min_detection_confidence=detection_conf,
414
+ min_tracking_confidence=tracking_conf,
415
+ )
416
+ frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
417
+
418
+ # Analyze video characteristics and auto-tune parameters
419
+ characteristics = analyze_video_sample(
420
+ landmarks_sequence, video.fps, video.frame_count
421
+ )
422
+ params = auto_tune_parameters(characteristics, quality_preset)
423
+
424
+ # Apply expert overrides if provided
425
+ params = _apply_expert_overrides(
426
+ params,
427
+ smoothing_window,
428
+ velocity_threshold,
429
+ min_contact_frames,
430
+ visibility_threshold,
431
+ )
432
+
433
+ # Show selected parameters if verbose
434
+ if verbose:
435
+ _print_verbose_parameters(video, characteristics, quality_preset, params)
436
+
437
+ # Apply smoothing with auto-tuned parameters
438
+ smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
439
+
440
+ # Extract vertical positions from feet
441
+ if verbose:
442
+ print("Extracting foot positions...")
443
+ vertical_positions, visibilities = _extract_vertical_positions(
444
+ smoothed_landmarks
445
+ )
446
+
447
+ # Detect ground contact
448
+ contact_states = detect_ground_contact(
449
+ vertical_positions,
450
+ velocity_threshold=params.velocity_threshold,
451
+ min_contact_frames=params.min_contact_frames,
452
+ visibility_threshold=params.visibility_threshold,
453
+ visibilities=visibilities,
454
+ window_length=params.smoothing_window,
455
+ polyorder=params.polyorder,
456
+ )
457
+
458
+ # Calculate metrics
459
+ if verbose:
460
+ print("Calculating metrics...")
461
+
462
+ metrics = calculate_drop_jump_metrics(
463
+ contact_states,
464
+ vertical_positions,
465
+ video.fps,
466
+ drop_start_frame=drop_start_frame,
467
+ velocity_threshold=params.velocity_threshold,
468
+ smoothing_window=params.smoothing_window,
469
+ polyorder=params.polyorder,
470
+ use_curvature=params.use_curvature,
471
+ )
472
+
473
+ # Generate outputs (JSON and debug video)
474
+ _generate_outputs(
475
+ metrics,
476
+ json_output,
477
+ output_video,
478
+ frames,
479
+ smoothed_landmarks,
480
+ contact_states,
481
+ video,
482
+ verbose,
483
+ )
484
+
485
+ if verbose:
486
+ print("Analysis complete!")
487
+
488
+ return metrics
489
+
490
+
491
+ def process_dropjump_videos_bulk(
492
+ configs: list[DropJumpVideoConfig],
493
+ max_workers: int = 4,
494
+ progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
495
+ ) -> list[DropJumpVideoResult]:
496
+ """
497
+ Process multiple drop jump videos in parallel using ProcessPoolExecutor.
498
+
499
+ Args:
500
+ configs: List of DropJumpVideoConfig objects specifying video paths and parameters
501
+ max_workers: Maximum number of parallel workers (default: 4)
502
+ progress_callback: Optional callback function called after each video completes.
503
+ Receives DropJumpVideoResult object.
504
+
505
+ Returns:
506
+ List of DropJumpVideoResult objects, one per input video, in completion order
507
+
508
+ Example:
509
+ >>> configs = [
510
+ ... DropJumpVideoConfig("video1.mp4"),
511
+ ... DropJumpVideoConfig("video2.mp4", quality="accurate"),
512
+ ... DropJumpVideoConfig("video3.mp4", output_video="debug3.mp4"),
513
+ ... ]
514
+ >>> results = process_dropjump_videos_bulk(configs, max_workers=4)
515
+ >>> for result in results:
516
+ ... if result.success:
517
+ ... print(f"{result.video_path}: {result.metrics.jump_height_m:.3f}m")
518
+ ... else:
519
+ ... print(f"{result.video_path}: FAILED - {result.error}")
520
+ """
521
+ results: list[DropJumpVideoResult] = []
522
+
523
+ # Use ProcessPoolExecutor for CPU-bound video processing
524
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
525
+ # Submit all jobs
526
+ future_to_config = {
527
+ executor.submit(_process_dropjump_video_wrapper, config): config
528
+ for config in configs
529
+ }
530
+
531
+ # Process results as they complete
532
+ for future in as_completed(future_to_config):
533
+ config = future_to_config[future]
534
+ result: DropJumpVideoResult
535
+
536
+ try:
537
+ result = future.result()
538
+ except Exception as exc:
539
+ # Handle unexpected errors
540
+ result = DropJumpVideoResult(
541
+ video_path=config.video_path,
542
+ success=False,
543
+ error=f"Unexpected error: {str(exc)}",
544
+ )
545
+
546
+ results.append(result)
547
+
548
+ # Call progress callback if provided
549
+ if progress_callback:
550
+ progress_callback(result)
551
+
552
+ return results
553
+
554
+
555
+ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
556
+ """
557
+ Wrapper function for parallel processing. Must be picklable (top-level function).
558
+
559
+ Args:
560
+ config: DropJumpVideoConfig object with processing parameters
561
+
562
+ Returns:
563
+ DropJumpVideoResult object with metrics or error information
564
+ """
565
+ start_time = time.time()
566
+
567
+ try:
568
+ metrics = process_dropjump_video(
569
+ video_path=config.video_path,
570
+ quality=config.quality,
571
+ output_video=config.output_video,
572
+ json_output=config.json_output,
573
+ drop_start_frame=config.drop_start_frame,
574
+ smoothing_window=config.smoothing_window,
575
+ velocity_threshold=config.velocity_threshold,
576
+ min_contact_frames=config.min_contact_frames,
577
+ visibility_threshold=config.visibility_threshold,
578
+ detection_confidence=config.detection_confidence,
579
+ tracking_confidence=config.tracking_confidence,
580
+ verbose=False, # Disable verbose in parallel mode
581
+ )
582
+
583
+ processing_time = time.time() - start_time
584
+
585
+ return DropJumpVideoResult(
586
+ video_path=config.video_path,
587
+ success=True,
588
+ metrics=metrics,
589
+ processing_time=processing_time,
590
+ )
591
+
592
+ except Exception as e:
593
+ processing_time = time.time() - start_time
594
+
595
+ return DropJumpVideoResult(
596
+ video_path=config.video_path,
597
+ success=False,
598
+ error=str(e),
599
+ processing_time=processing_time,
600
+ )
601
+
602
+
603
+ # ========== CMJ Analysis API ==========
604
+
605
+
606
+ @dataclass
607
+ class CMJVideoConfig:
608
+ """Configuration for processing a single CMJ video."""
609
+
610
+ video_path: str
611
+ quality: str = "balanced"
612
+ output_video: str | None = None
613
+ json_output: str | None = None
614
+ smoothing_window: int | None = None
615
+ velocity_threshold: float | None = None
616
+ min_contact_frames: int | None = None
617
+ visibility_threshold: float | None = None
618
+ detection_confidence: float | None = None
619
+ tracking_confidence: float | None = None
620
+
621
+
622
+ @dataclass
623
+ class CMJVideoResult:
624
+ """Result of processing a single CMJ video."""
625
+
626
+ video_path: str
627
+ success: bool
628
+ metrics: CMJMetrics | None = None
629
+ error: str | None = None
630
+ processing_time: float = 0.0
631
+
632
+
633
+ def _generate_cmj_outputs(
634
+ output_video: str | None,
635
+ json_output: str | None,
636
+ metrics: CMJMetrics,
637
+ frames: list,
638
+ smoothed_landmarks: list,
639
+ video_width: int,
640
+ video_height: int,
641
+ video_display_width: int,
642
+ video_display_height: int,
643
+ video_fps: float,
644
+ verbose: bool,
645
+ ) -> None:
646
+ """Generate JSON and debug video outputs for CMJ analysis."""
647
+ if json_output:
648
+ import json
649
+
650
+ output_path = Path(json_output)
651
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
652
+ if verbose:
653
+ print(f"Metrics written to: {json_output}")
654
+
655
+ if output_video:
656
+ if verbose:
657
+ print(f"Generating debug video: {output_video}")
658
+
659
+ with CMJDebugOverlayRenderer(
660
+ output_video,
661
+ video_width,
662
+ video_height,
663
+ video_display_width,
664
+ video_display_height,
665
+ video_fps,
666
+ ) as renderer:
667
+ for i, frame in enumerate(frames):
668
+ annotated = renderer.render_frame(
669
+ frame, smoothed_landmarks[i], i, metrics
670
+ )
671
+ renderer.write_frame(annotated)
672
+
673
+ if verbose:
674
+ print(f"Debug video saved: {output_video}")
675
+
676
+
677
+ def process_cmj_video(
678
+ video_path: str,
679
+ quality: str = "balanced",
680
+ output_video: str | None = None,
681
+ json_output: str | None = None,
682
+ smoothing_window: int | None = None,
683
+ velocity_threshold: float | None = None,
684
+ min_contact_frames: int | None = None,
685
+ visibility_threshold: float | None = None,
686
+ detection_confidence: float | None = None,
687
+ tracking_confidence: float | None = None,
688
+ verbose: bool = False,
689
+ ) -> CMJMetrics:
690
+ """
691
+ Process a single CMJ video and return metrics.
692
+
693
+ CMJ (Counter Movement Jump) is performed at floor level without a drop box.
694
+ Athletes start standing, perform a countermovement (eccentric phase), then
695
+ jump upward (concentric phase).
696
+
697
+ Args:
698
+ video_path: Path to the input video file
699
+ quality: Analysis quality preset ("fast", "balanced", or "accurate")
700
+ output_video: Optional path for debug video output
701
+ json_output: Optional path for JSON metrics output
702
+ smoothing_window: Optional override for smoothing window
703
+ velocity_threshold: Optional override for velocity threshold
704
+ min_contact_frames: Optional override for minimum contact frames
705
+ visibility_threshold: Optional override for visibility threshold
706
+ detection_confidence: Optional override for pose detection confidence
707
+ tracking_confidence: Optional override for pose tracking confidence
708
+ verbose: Print processing details
709
+
710
+ Returns:
711
+ CMJMetrics object containing analysis results
712
+
713
+ Raises:
714
+ ValueError: If video cannot be processed or parameters are invalid
715
+ FileNotFoundError: If video file does not exist
716
+
717
+ Example:
718
+ >>> metrics = process_cmj_video(
719
+ ... "athlete_cmj.mp4",
720
+ ... quality="balanced",
721
+ ... verbose=True
722
+ ... )
723
+ >>> print(f"Jump height: {metrics.jump_height:.3f}m")
724
+ >>> print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
725
+ """
726
+ if not Path(video_path).exists():
727
+ raise FileNotFoundError(f"Video file not found: {video_path}")
728
+
729
+ # Convert quality string to enum
730
+ quality_preset = _parse_quality_preset(quality)
731
+
732
+ # Initialize video processor
733
+ with VideoProcessor(video_path) as video:
734
+ if verbose:
735
+ print(
736
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
737
+ f"{video.frame_count} frames"
738
+ )
739
+
740
+ # Determine confidence levels
741
+ det_conf, track_conf = _determine_confidence_levels(
742
+ quality_preset, detection_confidence, tracking_confidence
743
+ )
744
+
745
+ # Track all frames
746
+ tracker = PoseTracker(
747
+ min_detection_confidence=det_conf, min_tracking_confidence=track_conf
748
+ )
749
+ frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
750
+
751
+ # Auto-tune parameters
752
+ characteristics = analyze_video_sample(
753
+ landmarks_sequence, video.fps, video.frame_count
754
+ )
755
+ params = auto_tune_parameters(characteristics, quality_preset)
756
+
757
+ # Apply expert overrides
758
+ params = _apply_expert_overrides(
759
+ params,
760
+ smoothing_window,
761
+ velocity_threshold,
762
+ min_contact_frames,
763
+ visibility_threshold,
764
+ )
765
+
766
+ if verbose:
767
+ _print_verbose_parameters(video, characteristics, quality_preset, params)
768
+
769
+ # Apply smoothing
770
+ smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
771
+
772
+ # Extract foot positions
773
+ if verbose:
774
+ print("Extracting foot positions...")
775
+ vertical_positions, _ = _extract_vertical_positions(smoothed_landmarks)
776
+ tracking_method = "foot"
777
+
778
+ # Detect CMJ phases
779
+ if verbose:
780
+ print("Detecting CMJ phases...")
781
+
782
+ phases = detect_cmj_phases(
783
+ vertical_positions,
784
+ video.fps,
785
+ window_length=params.smoothing_window,
786
+ polyorder=params.polyorder,
787
+ )
788
+
789
+ if phases is None:
790
+ raise ValueError("Could not detect CMJ phases in video")
791
+
792
+ standing_end, lowest_point, takeoff_frame, landing_frame = phases
793
+
794
+ # Calculate metrics
795
+ if verbose:
796
+ print("Calculating metrics...")
797
+
798
+ # Use signed velocity for CMJ (need direction information)
799
+ from .cmj.analysis import compute_signed_velocity
800
+
801
+ velocities = compute_signed_velocity(
802
+ vertical_positions,
803
+ window_length=params.smoothing_window,
804
+ polyorder=params.polyorder,
805
+ )
806
+
807
+ metrics = calculate_cmj_metrics(
808
+ vertical_positions,
809
+ velocities,
810
+ standing_end,
811
+ lowest_point,
812
+ takeoff_frame,
813
+ landing_frame,
814
+ video.fps,
815
+ tracking_method=tracking_method,
816
+ )
817
+
818
+ # Generate outputs if requested
819
+ _generate_cmj_outputs(
820
+ output_video,
821
+ json_output,
822
+ metrics,
823
+ frames,
824
+ smoothed_landmarks,
825
+ video.width,
826
+ video.height,
827
+ video.display_width,
828
+ video.display_height,
829
+ video.fps,
830
+ verbose,
831
+ )
832
+
833
+ if verbose:
834
+ print(f"\nJump height: {metrics.jump_height:.3f}m")
835
+ print(f"Flight time: {metrics.flight_time*1000:.1f}ms")
836
+ print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
837
+
838
+ return metrics
839
+
840
+
841
+ def process_cmj_videos_bulk(
842
+ configs: list[CMJVideoConfig],
843
+ max_workers: int = 4,
844
+ progress_callback: Callable[[CMJVideoResult], None] | None = None,
845
+ ) -> list[CMJVideoResult]:
846
+ """
847
+ Process multiple CMJ videos in parallel using ProcessPoolExecutor.
848
+
849
+ Args:
850
+ configs: List of CMJVideoConfig objects specifying video paths and parameters
851
+ max_workers: Maximum number of parallel workers (default: 4)
852
+ progress_callback: Optional callback function called after each video completes.
853
+ Receives CMJVideoResult object.
854
+
855
+ Returns:
856
+ List of CMJVideoResult objects, one per input video, in completion order
857
+
858
+ Example:
859
+ >>> configs = [
860
+ ... CMJVideoConfig("video1.mp4"),
861
+ ... CMJVideoConfig("video2.mp4", quality="accurate"),
862
+ ... CMJVideoConfig("video3.mp4", output_video="debug3.mp4"),
863
+ ... ]
864
+ >>> results = process_cmj_videos_bulk(configs, max_workers=4)
865
+ >>> for result in results:
866
+ ... if result.success:
867
+ ... print(f"{result.video_path}: {result.metrics.jump_height:.3f}m")
868
+ ... else:
869
+ ... print(f"{result.video_path}: FAILED - {result.error}")
870
+ """
871
+ results: list[CMJVideoResult] = []
872
+
873
+ # Use ProcessPoolExecutor for CPU-bound video processing
874
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
875
+ # Submit all jobs
876
+ future_to_config = {
877
+ executor.submit(_process_cmj_video_wrapper, config): config
878
+ for config in configs
879
+ }
880
+
881
+ # Process results as they complete
882
+ for future in as_completed(future_to_config):
883
+ config = future_to_config[future]
884
+ result: CMJVideoResult
885
+
886
+ try:
887
+ result = future.result()
888
+ results.append(result)
889
+ except Exception as e:
890
+ result = CMJVideoResult(
891
+ video_path=config.video_path, success=False, error=str(e)
892
+ )
893
+ results.append(result)
894
+
895
+ # Call progress callback if provided
896
+ if progress_callback:
897
+ progress_callback(result)
898
+
899
+ return results
900
+
901
+
902
+ def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
903
+ """
904
+ Wrapper function for parallel CMJ processing. Must be picklable (top-level function).
905
+
906
+ Args:
907
+ config: CMJVideoConfig object with processing parameters
908
+
909
+ Returns:
910
+ CMJVideoResult object with metrics or error information
911
+ """
912
+ start_time = time.time()
913
+
914
+ try:
915
+ metrics = process_cmj_video(
916
+ video_path=config.video_path,
917
+ quality=config.quality,
918
+ output_video=config.output_video,
919
+ json_output=config.json_output,
920
+ smoothing_window=config.smoothing_window,
921
+ velocity_threshold=config.velocity_threshold,
922
+ min_contact_frames=config.min_contact_frames,
923
+ visibility_threshold=config.visibility_threshold,
924
+ detection_confidence=config.detection_confidence,
925
+ tracking_confidence=config.tracking_confidence,
926
+ verbose=False, # Disable verbose in parallel mode
927
+ )
928
+
929
+ processing_time = time.time() - start_time
930
+
931
+ return CMJVideoResult(
932
+ video_path=config.video_path,
933
+ success=True,
934
+ metrics=metrics,
935
+ processing_time=processing_time,
936
+ )
937
+
938
+ except Exception as e:
939
+ processing_time = time.time() - start_time
940
+
941
+ return CMJVideoResult(
942
+ video_path=config.video_path,
943
+ success=False,
944
+ error=str(e),
945
+ processing_time=processing_time,
946
+ )