kinemotion 0.39.1__py3-none-any.whl → 0.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -34,6 +34,7 @@ from .core.metadata import (
34
34
  from .core.pose import PoseTracker
35
35
  from .core.quality import assess_jump_quality
36
36
  from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
37
+ from .core.timing import PerformanceTimer
37
38
  from .core.video_io import VideoProcessor
38
39
  from .dropjump.analysis import (
39
40
  ContactState,
@@ -173,7 +174,10 @@ def _print_verbose_parameters(
173
174
 
174
175
 
175
176
  def _process_all_frames(
176
- video: VideoProcessor, tracker: PoseTracker, verbose: bool
177
+ video: VideoProcessor,
178
+ tracker: PoseTracker,
179
+ verbose: bool,
180
+ timer: PerformanceTimer | None = None,
177
181
  ) -> tuple[list, list]:
178
182
  """Process all frames from video and extract pose landmarks.
179
183
 
@@ -181,6 +185,7 @@ def _process_all_frames(
181
185
  video: Video processor to read frames from
182
186
  tracker: Pose tracker for landmark detection
183
187
  verbose: Print progress messages
188
+ timer: Optional PerformanceTimer for measuring operations
184
189
 
185
190
  Returns:
186
191
  Tuple of (frames, landmarks_sequence)
@@ -194,14 +199,25 @@ def _process_all_frames(
194
199
  landmarks_sequence = []
195
200
  frames = []
196
201
 
197
- while True:
198
- frame = video.read_frame()
199
- if frame is None:
200
- break
202
+ if timer:
203
+ with timer.measure("pose_tracking"):
204
+ while True:
205
+ frame = video.read_frame()
206
+ if frame is None:
207
+ break
208
+
209
+ frames.append(frame)
210
+ landmarks = tracker.process_frame(frame)
211
+ landmarks_sequence.append(landmarks)
212
+ else:
213
+ while True:
214
+ frame = video.read_frame()
215
+ if frame is None:
216
+ break
201
217
 
202
- frames.append(frame)
203
- landmarks = tracker.process_frame(frame)
204
- landmarks_sequence.append(landmarks)
218
+ frames.append(frame)
219
+ landmarks = tracker.process_frame(frame)
220
+ landmarks_sequence.append(landmarks)
205
221
 
206
222
  tracker.close()
207
223
 
@@ -212,7 +228,10 @@ def _process_all_frames(
212
228
 
213
229
 
214
230
  def _apply_smoothing(
215
- landmarks_sequence: list, params: AnalysisParameters, verbose: bool
231
+ landmarks_sequence: list,
232
+ params: AnalysisParameters,
233
+ verbose: bool,
234
+ timer: PerformanceTimer | None = None,
216
235
  ) -> list:
217
236
  """Apply smoothing to landmark sequence with auto-tuned parameters.
218
237
 
@@ -220,6 +239,7 @@ def _apply_smoothing(
220
239
  landmarks_sequence: Sequence of landmarks from all frames
221
240
  params: Auto-tuned parameters containing smoothing settings
222
241
  verbose: Print progress messages
242
+ timer: Optional PerformanceTimer for measuring operations
223
243
 
224
244
  Returns:
225
245
  Smoothed landmarks sequence
@@ -230,21 +250,39 @@ def _apply_smoothing(
230
250
  print("Smoothing landmarks with outlier rejection...")
231
251
  if params.bilateral_filter:
232
252
  print("Using bilateral temporal filter...")
233
- return smooth_landmarks_advanced(
234
- landmarks_sequence,
235
- window_length=params.smoothing_window,
236
- polyorder=params.polyorder,
237
- use_outlier_rejection=params.outlier_rejection,
238
- use_bilateral=params.bilateral_filter,
239
- )
253
+ if timer:
254
+ with timer.measure("smoothing"):
255
+ return smooth_landmarks_advanced(
256
+ landmarks_sequence,
257
+ window_length=params.smoothing_window,
258
+ polyorder=params.polyorder,
259
+ use_outlier_rejection=params.outlier_rejection,
260
+ use_bilateral=params.bilateral_filter,
261
+ )
262
+ else:
263
+ return smooth_landmarks_advanced(
264
+ landmarks_sequence,
265
+ window_length=params.smoothing_window,
266
+ polyorder=params.polyorder,
267
+ use_outlier_rejection=params.outlier_rejection,
268
+ use_bilateral=params.bilateral_filter,
269
+ )
240
270
  else:
241
271
  if verbose:
242
272
  print("Smoothing landmarks...")
243
- return smooth_landmarks(
244
- landmarks_sequence,
245
- window_length=params.smoothing_window,
246
- polyorder=params.polyorder,
247
- )
273
+ if timer:
274
+ with timer.measure("smoothing"):
275
+ return smooth_landmarks(
276
+ landmarks_sequence,
277
+ window_length=params.smoothing_window,
278
+ polyorder=params.polyorder,
279
+ )
280
+ else:
281
+ return smooth_landmarks(
282
+ landmarks_sequence,
283
+ window_length=params.smoothing_window,
284
+ polyorder=params.polyorder,
285
+ )
248
286
 
249
287
 
250
288
  def _calculate_foot_visibility(frame_landmarks: dict) -> float:
@@ -303,7 +341,37 @@ def _extract_vertical_positions(
303
341
  return np.array(position_list), np.array(visibilities_list)
304
342
 
305
343
 
306
- def _generate_outputs(
344
+ def _convert_timer_to_stage_names(
345
+ timer_metrics: dict[str, float],
346
+ ) -> dict[str, float]:
347
+ """Convert timer metric names to human-readable stage names.
348
+
349
+ Args:
350
+ timer_metrics: Dictionary from PerformanceTimer.get_metrics()
351
+
352
+ Returns:
353
+ Dictionary with human-readable stage names as keys
354
+ """
355
+ mapping = {
356
+ "video_initialization": "Video initialization",
357
+ "pose_tracking": "Pose tracking",
358
+ "parameter_auto_tuning": "Parameter auto-tuning",
359
+ "smoothing": "Smoothing",
360
+ "vertical_position_extraction": "Vertical position extraction",
361
+ "ground_contact_detection": "Ground contact detection",
362
+ "metrics_calculation": "Metrics calculation",
363
+ "quality_assessment": "Quality assessment",
364
+ "metadata_building": "Metadata building",
365
+ "metrics_validation": "Metrics validation",
366
+ "phase_detection": "Phase detection",
367
+ "json_serialization": "JSON serialization",
368
+ "debug_video_generation": "Debug video generation",
369
+ "debug_video_reencode": "Debug video re-encoding",
370
+ }
371
+ return {mapping.get(k, k): v for k, v in timer_metrics.items()}
372
+
373
+
374
+ def _generate_dropjump_outputs(
307
375
  metrics: DropJumpMetrics,
308
376
  json_output: str | None,
309
377
  output_video: str | None,
@@ -312,6 +380,7 @@ def _generate_outputs(
312
380
  contact_states: list[ContactState],
313
381
  video: VideoProcessor,
314
382
  verbose: bool,
383
+ timer: PerformanceTimer | None = None,
315
384
  ) -> None:
316
385
  """Generate JSON and debug video outputs if requested.
317
386
 
@@ -324,13 +393,24 @@ def _generate_outputs(
324
393
  contact_states: Ground contact state for each frame
325
394
  video: Video processor with dimensions and fps
326
395
  verbose: Print progress messages
396
+ timer: Optional PerformanceTimer for measuring operations
327
397
  """
328
398
  # Save JSON if requested
329
399
  if json_output:
330
400
  import json
331
401
 
332
- output_path = Path(json_output)
333
- output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
402
+ if timer:
403
+ with timer.measure("json_serialization"):
404
+ output_path = Path(json_output)
405
+ metrics_dict = metrics.to_dict()
406
+ json_str = json.dumps(metrics_dict, indent=2)
407
+ output_path.write_text(json_str)
408
+ else:
409
+ output_path = Path(json_output)
410
+ metrics_dict = metrics.to_dict()
411
+ json_str = json.dumps(metrics_dict, indent=2)
412
+ output_path.write_text(json_str)
413
+
334
414
  if verbose:
335
415
  print(f"Metrics written to: {json_output}")
336
416
 
@@ -339,24 +419,48 @@ def _generate_outputs(
339
419
  if verbose:
340
420
  print(f"Generating debug video: {output_video}")
341
421
 
342
- with DebugOverlayRenderer(
343
- output_video,
344
- video.width,
345
- video.height,
346
- video.display_width,
347
- video.display_height,
348
- video.fps,
349
- ) as renderer:
350
- for i, frame in enumerate(frames):
351
- annotated = renderer.render_frame(
352
- frame,
353
- smoothed_landmarks[i],
354
- contact_states[i],
355
- i,
356
- metrics,
357
- use_com=False,
358
- )
359
- renderer.write_frame(annotated)
422
+ if timer:
423
+ with timer.measure("debug_video_generation"):
424
+ with DebugOverlayRenderer(
425
+ output_video,
426
+ video.width,
427
+ video.height,
428
+ video.display_width,
429
+ video.display_height,
430
+ video.fps,
431
+ ) as renderer:
432
+ for i, frame in enumerate(frames):
433
+ annotated = renderer.render_frame(
434
+ frame,
435
+ smoothed_landmarks[i],
436
+ contact_states[i],
437
+ i,
438
+ metrics,
439
+ use_com=False,
440
+ )
441
+ renderer.write_frame(annotated)
442
+ # Capture re-encoding duration separately
443
+ with timer.measure("debug_video_reencode"):
444
+ pass # Re-encoding happens in context manager __exit__
445
+ else:
446
+ with DebugOverlayRenderer(
447
+ output_video,
448
+ video.width,
449
+ video.height,
450
+ video.display_width,
451
+ video.display_height,
452
+ video.fps,
453
+ ) as renderer:
454
+ for i, frame in enumerate(frames):
455
+ annotated = renderer.render_frame(
456
+ frame,
457
+ smoothed_landmarks[i],
458
+ contact_states[i],
459
+ i,
460
+ metrics,
461
+ use_com=False,
462
+ )
463
+ renderer.write_frame(annotated)
360
464
 
361
465
  if verbose:
362
466
  print(f"Debug video saved: {output_video}")
@@ -403,6 +507,7 @@ def process_dropjump_video(
403
507
  detection_confidence: float | None = None,
404
508
  tracking_confidence: float | None = None,
405
509
  verbose: bool = False,
510
+ timer: PerformanceTimer | None = None,
406
511
  ) -> DropJumpMetrics:
407
512
  """
408
513
  Process a single drop jump video and return metrics.
@@ -422,6 +527,7 @@ def process_dropjump_video(
422
527
  detection_confidence: Optional override for pose detection confidence
423
528
  tracking_confidence: Optional override for pose tracking confidence
424
529
  verbose: Print processing details
530
+ timer: Optional PerformanceTimer for measuring operations
425
531
 
426
532
  Returns:
427
533
  DropJumpMetrics object containing analysis results
@@ -440,209 +546,244 @@ def process_dropjump_video(
440
546
 
441
547
  set_deterministic_mode(seed=42)
442
548
 
443
- # Start timing
549
+ # Start overall timing
444
550
  start_time = time.time()
551
+ if timer is None:
552
+ timer = PerformanceTimer()
445
553
 
446
554
  # Convert quality string to enum
447
555
  quality_preset = _parse_quality_preset(quality)
448
556
 
449
557
  # Initialize video processor
450
- with VideoProcessor(video_path) as video:
451
- if verbose:
452
- print(
453
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
454
- f"{video.frame_count} frames"
558
+ with timer.measure("video_initialization"):
559
+ with VideoProcessor(video_path, timer=timer) as video:
560
+ if verbose:
561
+ print(
562
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
563
+ f"{video.frame_count} frames"
564
+ )
565
+
566
+ # Determine detection/tracking confidence levels
567
+ detection_conf, tracking_conf = _determine_confidence_levels(
568
+ quality_preset, detection_confidence, tracking_confidence
455
569
  )
456
570
 
457
- # Determine detection/tracking confidence levels
458
- detection_conf, tracking_conf = _determine_confidence_levels(
459
- quality_preset, detection_confidence, tracking_confidence
460
- )
571
+ # Process all frames with pose tracking
572
+ if verbose:
573
+ print("Processing all frames with MediaPipe pose tracking...")
574
+ tracker = PoseTracker(
575
+ min_detection_confidence=detection_conf,
576
+ min_tracking_confidence=tracking_conf,
577
+ timer=timer,
578
+ )
579
+ frames, landmarks_sequence = _process_all_frames(
580
+ video, tracker, verbose, timer
581
+ )
461
582
 
462
- # Process all frames with pose tracking
463
- tracker = PoseTracker(
464
- min_detection_confidence=detection_conf,
465
- min_tracking_confidence=tracking_conf,
466
- )
467
- frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
583
+ # Analyze video characteristics and auto-tune parameters
584
+ with timer.measure("parameter_auto_tuning"):
585
+ characteristics = analyze_video_sample(
586
+ landmarks_sequence, video.fps, video.frame_count
587
+ )
588
+ params = auto_tune_parameters(characteristics, quality_preset)
589
+
590
+ # Apply expert overrides if provided
591
+ params = _apply_expert_overrides(
592
+ params,
593
+ smoothing_window,
594
+ velocity_threshold,
595
+ min_contact_frames,
596
+ visibility_threshold,
597
+ )
468
598
 
469
- # Analyze video characteristics and auto-tune parameters
470
- characteristics = analyze_video_sample(
471
- landmarks_sequence, video.fps, video.frame_count
472
- )
473
- params = auto_tune_parameters(characteristics, quality_preset)
474
-
475
- # Apply expert overrides if provided
476
- params = _apply_expert_overrides(
477
- params,
478
- smoothing_window,
479
- velocity_threshold,
480
- min_contact_frames,
481
- visibility_threshold,
482
- )
599
+ # Show selected parameters if verbose
600
+ if verbose:
601
+ _print_verbose_parameters(
602
+ video, characteristics, quality_preset, params
603
+ )
483
604
 
484
- # Show selected parameters if verbose
485
- if verbose:
486
- _print_verbose_parameters(video, characteristics, quality_preset, params)
605
+ # Apply smoothing with auto-tuned parameters
606
+ smoothed_landmarks = _apply_smoothing(
607
+ landmarks_sequence, params, verbose, timer
608
+ )
487
609
 
488
- # Apply smoothing with auto-tuned parameters
489
- smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
610
+ # Extract vertical positions from feet
611
+ if verbose:
612
+ print("Extracting foot positions...")
613
+ with timer.measure("vertical_position_extraction"):
614
+ vertical_positions, visibilities = _extract_vertical_positions(
615
+ smoothed_landmarks
616
+ )
490
617
 
491
- # Extract vertical positions from feet
492
- if verbose:
493
- print("Extracting foot positions...")
494
- vertical_positions, visibilities = _extract_vertical_positions(
495
- smoothed_landmarks
496
- )
618
+ # Detect ground contact
619
+ if verbose:
620
+ print("Detecting ground contact...")
621
+ with timer.measure("ground_contact_detection"):
622
+ contact_states = detect_ground_contact(
623
+ vertical_positions,
624
+ velocity_threshold=params.velocity_threshold,
625
+ min_contact_frames=params.min_contact_frames,
626
+ visibility_threshold=params.visibility_threshold,
627
+ visibilities=visibilities,
628
+ window_length=params.smoothing_window,
629
+ polyorder=params.polyorder,
630
+ )
497
631
 
498
- # Detect ground contact
499
- contact_states = detect_ground_contact(
500
- vertical_positions,
501
- velocity_threshold=params.velocity_threshold,
502
- min_contact_frames=params.min_contact_frames,
503
- visibility_threshold=params.visibility_threshold,
504
- visibilities=visibilities,
505
- window_length=params.smoothing_window,
506
- polyorder=params.polyorder,
507
- )
632
+ # Calculate metrics
633
+ if verbose:
634
+ print("Calculating metrics...")
635
+ with timer.measure("metrics_calculation"):
636
+ metrics = calculate_drop_jump_metrics(
637
+ contact_states,
638
+ vertical_positions,
639
+ video.fps,
640
+ drop_start_frame=drop_start_frame,
641
+ velocity_threshold=params.velocity_threshold,
642
+ smoothing_window=params.smoothing_window,
643
+ polyorder=params.polyorder,
644
+ use_curvature=params.use_curvature,
645
+ )
508
646
 
509
- # Calculate metrics
510
- if verbose:
511
- print("Calculating metrics...")
512
-
513
- metrics = calculate_drop_jump_metrics(
514
- contact_states,
515
- vertical_positions,
516
- video.fps,
517
- drop_start_frame=drop_start_frame,
518
- velocity_threshold=params.velocity_threshold,
519
- smoothing_window=params.smoothing_window,
520
- polyorder=params.polyorder,
521
- use_curvature=params.use_curvature,
522
- )
647
+ # Assess quality and add confidence scores
648
+ if verbose:
649
+ print("Assessing tracking quality...")
650
+ with timer.measure("quality_assessment"):
651
+ # Detect outliers for quality scoring (doesn't affect results)
652
+ _, outlier_mask = reject_outliers(
653
+ vertical_positions,
654
+ use_ransac=True,
655
+ use_median=True,
656
+ interpolate=False, # Don't modify, just detect
657
+ )
523
658
 
524
- # Assess quality and add confidence scores
525
- if verbose:
526
- print("Assessing tracking quality...")
527
-
528
- # Detect outliers for quality scoring (doesn't affect results, just
529
- # for assessment)
530
- _, outlier_mask = reject_outliers(
531
- vertical_positions,
532
- use_ransac=True,
533
- use_median=True,
534
- interpolate=False, # Don't modify, just detect
535
- )
659
+ # Count phases for quality assessment
660
+ phases = find_contact_phases(contact_states)
661
+ phases_detected = len(phases) > 0
662
+ phase_count = len(phases)
663
+
664
+ # Perform quality assessment
665
+ quality_result = assess_jump_quality(
666
+ visibilities=visibilities,
667
+ positions=vertical_positions,
668
+ outlier_mask=outlier_mask,
669
+ fps=video.fps,
670
+ phases_detected=phases_detected,
671
+ phase_count=phase_count,
672
+ )
536
673
 
537
- # Count phases for quality assessment
538
- phases = find_contact_phases(contact_states)
539
- phases_detected = len(phases) > 0
540
- phase_count = len(phases)
541
-
542
- # Perform quality assessment
543
- quality_result = assess_jump_quality(
544
- visibilities=visibilities,
545
- positions=vertical_positions,
546
- outlier_mask=outlier_mask,
547
- fps=video.fps,
548
- phases_detected=phases_detected,
549
- phase_count=phase_count,
550
- )
674
+ # Build complete metadata
675
+ with timer.measure("metadata_building"):
676
+ processing_time = time.time() - start_time
677
+
678
+ video_info = VideoInfo(
679
+ source_path=video_path,
680
+ fps=video.fps,
681
+ width=video.width,
682
+ height=video.height,
683
+ duration_s=video.frame_count / video.fps,
684
+ frame_count=video.frame_count,
685
+ codec=video.codec,
686
+ )
551
687
 
552
- # Build complete metadata
553
- processing_time = time.time() - start_time
688
+ # Check if drop start was auto-detected
689
+ drop_frame = None
690
+ if drop_start_frame is None and metrics.drop_start_frame is not None:
691
+ # Auto-detected drop start from box
692
+ drop_frame = metrics.drop_start_frame
693
+ elif drop_start_frame is not None:
694
+ # Manual drop start provided
695
+ drop_frame = drop_start_frame
696
+
697
+ algorithm_config = AlgorithmConfig(
698
+ detection_method="forward_search",
699
+ tracking_method="mediapipe_pose",
700
+ model_complexity=1,
701
+ smoothing=SmoothingConfig(
702
+ window_size=params.smoothing_window,
703
+ polynomial_order=params.polyorder,
704
+ use_bilateral_filter=params.bilateral_filter,
705
+ use_outlier_rejection=params.outlier_rejection,
706
+ ),
707
+ detection=DetectionConfig(
708
+ velocity_threshold=params.velocity_threshold,
709
+ min_contact_frames=params.min_contact_frames,
710
+ visibility_threshold=params.visibility_threshold,
711
+ use_curvature_refinement=params.use_curvature,
712
+ ),
713
+ drop_detection=DropDetectionConfig(
714
+ auto_detect_drop_start=(drop_start_frame is None),
715
+ detected_drop_frame=drop_frame,
716
+ min_stationary_duration_s=0.5,
717
+ ),
718
+ )
554
719
 
555
- video_info = VideoInfo(
556
- source_path=video_path,
557
- fps=video.fps,
558
- width=video.width,
559
- height=video.height,
560
- duration_s=video.frame_count / video.fps,
561
- frame_count=video.frame_count,
562
- codec=video.codec,
563
- )
720
+ # Convert timer metrics to human-readable stage names
721
+ stage_times = _convert_timer_to_stage_names(timer.get_metrics())
564
722
 
565
- processing_info = ProcessingInfo(
566
- version=get_kinemotion_version(),
567
- timestamp=create_timestamp(),
568
- quality_preset=quality_preset.value,
569
- processing_time_s=processing_time,
570
- )
723
+ processing_info = ProcessingInfo(
724
+ version=get_kinemotion_version(),
725
+ timestamp=create_timestamp(),
726
+ quality_preset=quality_preset.value,
727
+ processing_time_s=processing_time,
728
+ timing_breakdown=stage_times,
729
+ )
571
730
 
572
- # Check if drop start was auto-detected
573
- drop_frame = None
574
- if drop_start_frame is None and metrics.drop_start_frame is not None:
575
- # Auto-detected drop start from box
576
- drop_frame = metrics.drop_start_frame
577
- elif drop_start_frame is not None:
578
- # Manual drop start provided
579
- drop_frame = drop_start_frame
580
-
581
- algorithm_config = AlgorithmConfig(
582
- detection_method="forward_search",
583
- tracking_method="mediapipe_pose",
584
- model_complexity=1,
585
- smoothing=SmoothingConfig(
586
- window_size=params.smoothing_window,
587
- polynomial_order=params.polyorder,
588
- use_bilateral_filter=params.bilateral_filter,
589
- use_outlier_rejection=params.outlier_rejection,
590
- ),
591
- detection=DetectionConfig(
592
- velocity_threshold=params.velocity_threshold,
593
- min_contact_frames=params.min_contact_frames,
594
- visibility_threshold=params.visibility_threshold,
595
- use_curvature_refinement=params.use_curvature,
596
- ),
597
- drop_detection=DropDetectionConfig(
598
- auto_detect_drop_start=(drop_start_frame is None),
599
- detected_drop_frame=drop_frame,
600
- min_stationary_duration_s=0.5,
601
- ),
602
- )
731
+ result_metadata = ResultMetadata(
732
+ quality=quality_result,
733
+ video=video_info,
734
+ processing=processing_info,
735
+ algorithm=algorithm_config,
736
+ )
603
737
 
604
- result_metadata = ResultMetadata(
605
- quality=quality_result,
606
- video=video_info,
607
- processing=processing_info,
608
- algorithm=algorithm_config,
609
- )
738
+ # Attach complete metadata to metrics
739
+ metrics.result_metadata = result_metadata
740
+
741
+ if verbose and quality_result.warnings:
742
+ print("\n⚠️ Quality Warnings:")
743
+ for warning in quality_result.warnings:
744
+ print(f" - {warning}")
745
+ print()
746
+
747
+ # Generate outputs (JSON and debug video)
748
+ _generate_dropjump_outputs(
749
+ metrics,
750
+ json_output,
751
+ output_video,
752
+ frames,
753
+ smoothed_landmarks,
754
+ contact_states,
755
+ video,
756
+ verbose,
757
+ timer,
758
+ )
610
759
 
611
- # Attach complete metadata to metrics
612
- metrics.result_metadata = result_metadata
613
-
614
- if verbose and quality_result.warnings:
615
- print("\n⚠️ Quality Warnings:")
616
- for warning in quality_result.warnings:
617
- print(f" - {warning}")
618
- print()
619
-
620
- # Generate outputs (JSON and debug video)
621
- _generate_outputs(
622
- metrics,
623
- json_output,
624
- output_video,
625
- frames,
626
- smoothed_landmarks,
627
- contact_states,
628
- video,
629
- verbose,
630
- )
760
+ # Validate metrics against physiological bounds
761
+ with timer.measure("metrics_validation"):
762
+ validator = DropJumpMetricsValidator()
763
+ validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
764
+ metrics.validation_result = validation_result
631
765
 
632
- if verbose:
633
- print("Analysis complete!")
766
+ if verbose and validation_result.issues:
767
+ print("\n⚠️ Validation Results:")
768
+ for issue in validation_result.issues:
769
+ print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
634
770
 
635
- # Validate metrics against physiological bounds
636
- validator = DropJumpMetricsValidator()
637
- validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
638
- metrics.validation_result = validation_result
771
+ # Print timing summary if verbose
772
+ if verbose:
773
+ total_time = time.time() - start_time
774
+ stage_times = _convert_timer_to_stage_names(timer.get_metrics())
639
775
 
640
- if verbose and validation_result.issues:
641
- print("\n⚠️ Validation Results:")
642
- for issue in validation_result.issues:
643
- print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
776
+ print("\n=== Timing Summary ===")
777
+ for stage, duration in stage_times.items():
778
+ percentage = (duration / total_time) * 100
779
+ dur_ms = duration * 1000
780
+ print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
781
+ total_ms = total_time * 1000
782
+ print(f"{'Total':.>40} {total_ms:>6.0f}ms (100.0%)")
783
+ print()
784
+ print("Analysis complete!")
644
785
 
645
- return metrics
786
+ return metrics
646
787
 
647
788
 
648
789
  def process_dropjump_videos_bulk(
@@ -801,13 +942,39 @@ def _generate_cmj_outputs(
801
942
  video_display_height: int,
802
943
  video_fps: float,
803
944
  verbose: bool,
945
+ timer: PerformanceTimer | None = None,
804
946
  ) -> None:
805
- """Generate JSON and debug video outputs for CMJ analysis."""
947
+ """Generate JSON and debug video outputs for CMJ analysis.
948
+
949
+ Args:
950
+ output_video: Optional path for debug video output
951
+ json_output: Optional path for JSON output
952
+ metrics: Calculated CMJ metrics
953
+ frames: List of video frames
954
+ smoothed_landmarks: Smoothed landmark sequence
955
+ video_width: Video width in pixels
956
+ video_height: Video height in pixels
957
+ video_display_width: Display width considering aspect ratio
958
+ video_display_height: Display height considering aspect ratio
959
+ video_fps: Video frames per second
960
+ verbose: Print progress messages
961
+ timer: Optional PerformanceTimer for measuring operations
962
+ """
806
963
  if json_output:
807
964
  import json
808
965
 
809
- output_path = Path(json_output)
810
- output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
966
+ if timer:
967
+ with timer.measure("json_serialization"):
968
+ output_path = Path(json_output)
969
+ metrics_dict = metrics.to_dict()
970
+ json_str = json.dumps(metrics_dict, indent=2)
971
+ output_path.write_text(json_str)
972
+ else:
973
+ output_path = Path(json_output)
974
+ metrics_dict = metrics.to_dict()
975
+ json_str = json.dumps(metrics_dict, indent=2)
976
+ output_path.write_text(json_str)
977
+
811
978
  if verbose:
812
979
  print(f"Metrics written to: {json_output}")
813
980
 
@@ -815,19 +982,38 @@ def _generate_cmj_outputs(
815
982
  if verbose:
816
983
  print(f"Generating debug video: {output_video}")
817
984
 
818
- with CMJDebugOverlayRenderer(
819
- output_video,
820
- video_width,
821
- video_height,
822
- video_display_width,
823
- video_display_height,
824
- video_fps,
825
- ) as renderer:
826
- for i, frame in enumerate(frames):
827
- annotated = renderer.render_frame(
828
- frame, smoothed_landmarks[i], i, metrics
829
- )
830
- renderer.write_frame(annotated)
985
+ if timer:
986
+ with timer.measure("debug_video_generation"):
987
+ with CMJDebugOverlayRenderer(
988
+ output_video,
989
+ video_width,
990
+ video_height,
991
+ video_display_width,
992
+ video_display_height,
993
+ video_fps,
994
+ ) as renderer:
995
+ for i, frame in enumerate(frames):
996
+ annotated = renderer.render_frame(
997
+ frame, smoothed_landmarks[i], i, metrics
998
+ )
999
+ renderer.write_frame(annotated)
1000
+ # Capture re-encoding duration separately
1001
+ with timer.measure("debug_video_reencode"):
1002
+ pass # Re-encoding happens in context manager __exit__
1003
+ else:
1004
+ with CMJDebugOverlayRenderer(
1005
+ output_video,
1006
+ video_width,
1007
+ video_height,
1008
+ video_display_width,
1009
+ video_display_height,
1010
+ video_fps,
1011
+ ) as renderer:
1012
+ for i, frame in enumerate(frames):
1013
+ annotated = renderer.render_frame(
1014
+ frame, smoothed_landmarks[i], i, metrics
1015
+ )
1016
+ renderer.write_frame(annotated)
831
1017
 
832
1018
  if verbose:
833
1019
  print(f"Debug video saved: {output_video}")
@@ -845,6 +1031,7 @@ def process_cmj_video(
845
1031
  detection_confidence: float | None = None,
846
1032
  tracking_confidence: float | None = None,
847
1033
  verbose: bool = False,
1034
+ timer: PerformanceTimer | None = None,
848
1035
  ) -> CMJMetrics:
849
1036
  """
850
1037
  Process a single CMJ video and return metrics.
@@ -865,6 +1052,7 @@ def process_cmj_video(
865
1052
  detection_confidence: Optional override for pose detection confidence
866
1053
  tracking_confidence: Optional override for pose tracking confidence
867
1054
  verbose: Print processing details
1055
+ timer: Optional PerformanceTimer for measuring operations
868
1056
 
869
1057
  Returns:
870
1058
  CMJMetrics object containing analysis results
@@ -885,222 +1073,255 @@ def process_cmj_video(
885
1073
  if not Path(video_path).exists():
886
1074
  raise FileNotFoundError(f"Video file not found: {video_path}")
887
1075
 
888
- # Start timing
1076
+ # Start overall timing
889
1077
  start_time = time.time()
1078
+ if timer is None:
1079
+ timer = PerformanceTimer()
890
1080
 
891
1081
  # Convert quality string to enum
892
1082
  quality_preset = _parse_quality_preset(quality)
893
1083
 
894
1084
  # Initialize video processor
895
- with VideoProcessor(video_path) as video:
896
- if verbose:
897
- print(
898
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
899
- f"{video.frame_count} frames"
900
- )
901
-
902
- # Determine confidence levels
903
- det_conf, track_conf = _determine_confidence_levels(
904
- quality_preset, detection_confidence, tracking_confidence
905
- )
906
-
907
- # Track all frames
908
- tracker = PoseTracker(
909
- min_detection_confidence=det_conf, min_tracking_confidence=track_conf
910
- )
911
- frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
912
-
913
- # Auto-tune parameters
914
- characteristics = analyze_video_sample(
915
- landmarks_sequence, video.fps, video.frame_count
916
- )
917
- params = auto_tune_parameters(characteristics, quality_preset)
918
-
919
- # Apply expert overrides
920
- params = _apply_expert_overrides(
921
- params,
922
- smoothing_window,
923
- velocity_threshold,
924
- min_contact_frames,
925
- visibility_threshold,
926
- )
927
-
928
- if verbose:
929
- _print_verbose_parameters(video, characteristics, quality_preset, params)
930
-
931
- # Apply smoothing
932
- smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
933
-
934
- # Extract vertical positions
935
- if verbose:
936
- print("Extracting vertical positions (Hip and Foot)...")
1085
+ with timer.measure("video_initialization"):
1086
+ with VideoProcessor(video_path, timer=timer) as video:
1087
+ if verbose:
1088
+ print(
1089
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
1090
+ f"{video.frame_count} frames"
1091
+ )
937
1092
 
938
- # Primary: Hips (for depth, velocity, general phases)
939
- vertical_positions, visibilities = _extract_vertical_positions(
940
- smoothed_landmarks, target="hip"
941
- )
1093
+ # Determine confidence levels
1094
+ det_conf, track_conf = _determine_confidence_levels(
1095
+ quality_preset, detection_confidence, tracking_confidence
1096
+ )
942
1097
 
943
- # Secondary: Feet (for precise landing detection)
944
- foot_positions, _ = _extract_vertical_positions(
945
- smoothed_landmarks, target="foot"
946
- )
1098
+ # Track all frames
1099
+ if verbose:
1100
+ print("Processing all frames with MediaPipe pose tracking...")
1101
+ tracker = PoseTracker(
1102
+ min_detection_confidence=det_conf,
1103
+ min_tracking_confidence=track_conf,
1104
+ timer=timer,
1105
+ )
1106
+ frames, landmarks_sequence = _process_all_frames(
1107
+ video, tracker, verbose, timer
1108
+ )
947
1109
 
948
- tracking_method = "hip_hybrid"
1110
+ # Auto-tune parameters
1111
+ with timer.measure("parameter_auto_tuning"):
1112
+ characteristics = analyze_video_sample(
1113
+ landmarks_sequence, video.fps, video.frame_count
1114
+ )
1115
+ params = auto_tune_parameters(characteristics, quality_preset)
1116
+
1117
+ # Apply expert overrides
1118
+ params = _apply_expert_overrides(
1119
+ params,
1120
+ smoothing_window,
1121
+ velocity_threshold,
1122
+ min_contact_frames,
1123
+ visibility_threshold,
1124
+ )
949
1125
 
950
- # Detect CMJ phases
951
- if verbose:
952
- print("Detecting CMJ phases...")
953
-
954
- phases = detect_cmj_phases(
955
- vertical_positions,
956
- video.fps,
957
- window_length=params.smoothing_window,
958
- polyorder=params.polyorder,
959
- landing_positions=foot_positions, # Use feet for landing
960
- )
1126
+ if verbose:
1127
+ _print_verbose_parameters(
1128
+ video, characteristics, quality_preset, params
1129
+ )
961
1130
 
962
- if phases is None:
963
- raise ValueError("Could not detect CMJ phases in video")
1131
+ # Apply smoothing
1132
+ smoothed_landmarks = _apply_smoothing(
1133
+ landmarks_sequence, params, verbose, timer
1134
+ )
964
1135
 
965
- standing_end, lowest_point, takeoff_frame, landing_frame = phases
1136
+ # Extract vertical positions
1137
+ if verbose:
1138
+ print("Extracting vertical positions (Hip and Foot)...")
1139
+ with timer.measure("vertical_position_extraction"):
1140
+ # Primary: Hips (for depth, velocity, general phases)
1141
+ vertical_positions, visibilities = _extract_vertical_positions(
1142
+ smoothed_landmarks, target="hip"
1143
+ )
966
1144
 
967
- # Calculate metrics
968
- if verbose:
969
- print("Calculating metrics...")
1145
+ # Secondary: Feet (for precise landing detection)
1146
+ foot_positions, _ = _extract_vertical_positions(
1147
+ smoothed_landmarks, target="foot"
1148
+ )
970
1149
 
971
- # Use signed velocity for CMJ (need direction information)
972
- from .cmj.analysis import compute_signed_velocity
1150
+ tracking_method = "hip_hybrid"
1151
+
1152
+ # Detect CMJ phases
1153
+ if verbose:
1154
+ print("Detecting CMJ phases...")
1155
+ with timer.measure("phase_detection"):
1156
+ phases = detect_cmj_phases(
1157
+ vertical_positions,
1158
+ video.fps,
1159
+ window_length=params.smoothing_window,
1160
+ polyorder=params.polyorder,
1161
+ landing_positions=foot_positions, # Use feet for landing
1162
+ )
973
1163
 
974
- velocities = compute_signed_velocity(
975
- vertical_positions,
976
- window_length=params.smoothing_window,
977
- polyorder=params.polyorder,
978
- )
1164
+ if phases is None:
1165
+ raise ValueError("Could not detect CMJ phases in video")
979
1166
 
980
- metrics = calculate_cmj_metrics(
981
- vertical_positions,
982
- velocities,
983
- standing_end,
984
- lowest_point,
985
- takeoff_frame,
986
- landing_frame,
987
- video.fps,
988
- tracking_method=tracking_method,
989
- )
1167
+ standing_end, lowest_point, takeoff_frame, landing_frame = phases
990
1168
 
991
- # Assess quality and add confidence scores
992
- if verbose:
993
- print("Assessing tracking quality...")
994
-
995
- # Detect outliers for quality scoring (doesn't affect results, just
996
- # for assessment)
997
- _, outlier_mask = reject_outliers(
998
- vertical_positions,
999
- use_ransac=True,
1000
- use_median=True,
1001
- interpolate=False, # Don't modify, just detect
1002
- )
1169
+ # Calculate metrics
1170
+ if verbose:
1171
+ print("Calculating metrics...")
1172
+ with timer.measure("metrics_calculation"):
1173
+ # Use signed velocity for CMJ (need direction information)
1174
+ from .cmj.analysis import compute_signed_velocity
1003
1175
 
1004
- # Phases detected successfully if we got here
1005
- phases_detected = True
1006
- phase_count = 4 # standing, eccentric, concentric, flight
1007
-
1008
- # Perform quality assessment
1009
- quality_result = assess_jump_quality(
1010
- visibilities=visibilities,
1011
- positions=vertical_positions,
1012
- outlier_mask=outlier_mask,
1013
- fps=video.fps,
1014
- phases_detected=phases_detected,
1015
- phase_count=phase_count,
1016
- )
1176
+ velocities = compute_signed_velocity(
1177
+ vertical_positions,
1178
+ window_length=params.smoothing_window,
1179
+ polyorder=params.polyorder,
1180
+ )
1017
1181
 
1018
- # Build complete metadata
1019
- processing_time = time.time() - start_time
1182
+ metrics = calculate_cmj_metrics(
1183
+ vertical_positions,
1184
+ velocities,
1185
+ standing_end,
1186
+ lowest_point,
1187
+ takeoff_frame,
1188
+ landing_frame,
1189
+ video.fps,
1190
+ tracking_method=tracking_method,
1191
+ )
1020
1192
 
1021
- video_info = VideoInfo(
1022
- source_path=video_path,
1023
- fps=video.fps,
1024
- width=video.width,
1025
- height=video.height,
1026
- duration_s=video.frame_count / video.fps,
1027
- frame_count=video.frame_count,
1028
- codec=video.codec,
1029
- )
1193
+ # Assess quality and add confidence scores
1194
+ if verbose:
1195
+ print("Assessing tracking quality...")
1196
+ with timer.measure("quality_assessment"):
1197
+ # Detect outliers for quality scoring (doesn't affect results)
1198
+ _, outlier_mask = reject_outliers(
1199
+ vertical_positions,
1200
+ use_ransac=True,
1201
+ use_median=True,
1202
+ interpolate=False, # Don't modify, just detect
1203
+ )
1030
1204
 
1031
- processing_info = ProcessingInfo(
1032
- version=get_kinemotion_version(),
1033
- timestamp=create_timestamp(),
1034
- quality_preset=quality_preset.value,
1035
- processing_time_s=processing_time,
1036
- )
1205
+ # Phases detected successfully if we got here
1206
+ phases_detected = True
1207
+ phase_count = 4 # standing, eccentric, concentric, flight
1208
+
1209
+ # Perform quality assessment
1210
+ quality_result = assess_jump_quality(
1211
+ visibilities=visibilities,
1212
+ positions=vertical_positions,
1213
+ outlier_mask=outlier_mask,
1214
+ fps=video.fps,
1215
+ phases_detected=phases_detected,
1216
+ phase_count=phase_count,
1217
+ )
1037
1218
 
1038
- algorithm_config = AlgorithmConfig(
1039
- detection_method="backward_search",
1040
- tracking_method="mediapipe_pose",
1041
- model_complexity=1,
1042
- smoothing=SmoothingConfig(
1043
- window_size=params.smoothing_window,
1044
- polynomial_order=params.polyorder,
1045
- use_bilateral_filter=params.bilateral_filter,
1046
- use_outlier_rejection=params.outlier_rejection,
1047
- ),
1048
- detection=DetectionConfig(
1049
- velocity_threshold=params.velocity_threshold,
1050
- min_contact_frames=params.min_contact_frames,
1051
- visibility_threshold=params.visibility_threshold,
1052
- use_curvature_refinement=params.use_curvature,
1053
- ),
1054
- drop_detection=None, # CMJ doesn't have drop detection
1055
- )
1219
+ # Build complete metadata
1220
+ with timer.measure("metadata_building"):
1221
+ processing_time = time.time() - start_time
1222
+
1223
+ video_info = VideoInfo(
1224
+ source_path=video_path,
1225
+ fps=video.fps,
1226
+ width=video.width,
1227
+ height=video.height,
1228
+ duration_s=video.frame_count / video.fps,
1229
+ frame_count=video.frame_count,
1230
+ codec=video.codec,
1231
+ )
1056
1232
 
1057
- result_metadata = ResultMetadata(
1058
- quality=quality_result,
1059
- video=video_info,
1060
- processing=processing_info,
1061
- algorithm=algorithm_config,
1062
- )
1233
+ # Convert timer metrics to human-readable stage names
1234
+ stage_times = _convert_timer_to_stage_names(timer.get_metrics())
1063
1235
 
1064
- # Attach complete metadata to metrics
1065
- metrics.result_metadata = result_metadata
1066
-
1067
- if verbose and quality_result.warnings:
1068
- print("\n⚠️ Quality Warnings:")
1069
- for warning in quality_result.warnings:
1070
- print(f" - {warning}")
1071
- print()
1072
-
1073
- # Generate outputs if requested
1074
- _generate_cmj_outputs(
1075
- output_video,
1076
- json_output,
1077
- metrics,
1078
- frames,
1079
- smoothed_landmarks,
1080
- video.width,
1081
- video.height,
1082
- video.display_width,
1083
- video.display_height,
1084
- video.fps,
1085
- verbose,
1086
- )
1236
+ processing_info = ProcessingInfo(
1237
+ version=get_kinemotion_version(),
1238
+ timestamp=create_timestamp(),
1239
+ quality_preset=quality_preset.value,
1240
+ processing_time_s=processing_time,
1241
+ timing_breakdown=stage_times,
1242
+ )
1087
1243
 
1088
- if verbose:
1089
- print(f"\nJump height: {metrics.jump_height:.3f}m")
1090
- print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
1091
- print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
1244
+ algorithm_config = AlgorithmConfig(
1245
+ detection_method="backward_search",
1246
+ tracking_method="mediapipe_pose",
1247
+ model_complexity=1,
1248
+ smoothing=SmoothingConfig(
1249
+ window_size=params.smoothing_window,
1250
+ polynomial_order=params.polyorder,
1251
+ use_bilateral_filter=params.bilateral_filter,
1252
+ use_outlier_rejection=params.outlier_rejection,
1253
+ ),
1254
+ detection=DetectionConfig(
1255
+ velocity_threshold=params.velocity_threshold,
1256
+ min_contact_frames=params.min_contact_frames,
1257
+ visibility_threshold=params.visibility_threshold,
1258
+ use_curvature_refinement=params.use_curvature,
1259
+ ),
1260
+ drop_detection=None, # CMJ doesn't have drop detection
1261
+ )
1092
1262
 
1093
- # Validate metrics against physiological bounds
1094
- validator = CMJMetricsValidator()
1095
- validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
1096
- metrics.validation_result = validation_result
1263
+ result_metadata = ResultMetadata(
1264
+ quality=quality_result,
1265
+ video=video_info,
1266
+ processing=processing_info,
1267
+ algorithm=algorithm_config,
1268
+ )
1097
1269
 
1098
- if verbose and validation_result.issues:
1099
- print("\n⚠️ Validation Results:")
1100
- for issue in validation_result.issues:
1101
- print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
1270
+ # Attach complete metadata to metrics
1271
+ metrics.result_metadata = result_metadata
1272
+
1273
+ if verbose and quality_result.warnings:
1274
+ print("\n⚠️ Quality Warnings:")
1275
+ for warning in quality_result.warnings:
1276
+ print(f" - {warning}")
1277
+ print()
1278
+
1279
+ # Generate outputs if requested
1280
+ _generate_cmj_outputs(
1281
+ output_video,
1282
+ json_output,
1283
+ metrics,
1284
+ frames,
1285
+ smoothed_landmarks,
1286
+ video.width,
1287
+ video.height,
1288
+ video.display_width,
1289
+ video.display_height,
1290
+ video.fps,
1291
+ verbose,
1292
+ timer,
1293
+ )
1102
1294
 
1103
- return metrics
1295
+ # Validate metrics against physiological bounds
1296
+ with timer.measure("metrics_validation"):
1297
+ validator = CMJMetricsValidator()
1298
+ validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
1299
+ metrics.validation_result = validation_result
1300
+
1301
+ if verbose and validation_result.issues:
1302
+ print("\n⚠️ Validation Results:")
1303
+ for issue in validation_result.issues:
1304
+ print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
1305
+
1306
+ # Print timing summary if verbose
1307
+ if verbose:
1308
+ total_time = time.time() - start_time
1309
+ stage_times = _convert_timer_to_stage_names(timer.get_metrics())
1310
+
1311
+ print("\n=== Timing Summary ===")
1312
+ for stage, duration in stage_times.items():
1313
+ percentage = (duration / total_time) * 100
1314
+ dur_ms = duration * 1000
1315
+ print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
1316
+ total_ms = total_time * 1000
1317
+ print(f"{'Total':.>40} {total_ms:>6.0f}ms (100.0%)")
1318
+ print()
1319
+
1320
+ print(f"\nJump height: {metrics.jump_height:.3f}m")
1321
+ print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
1322
+ print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
1323
+
1324
+ return metrics
1104
1325
 
1105
1326
 
1106
1327
  def process_cmj_videos_bulk(