kinemotion 0.44.0__py3-none-any.whl → 0.45.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -1,22 +1,16 @@
1
1
  "Public API for programmatic use of kinemotion analysis."
2
2
 
3
+ import json
3
4
  import time
4
5
  from collections.abc import Callable
5
- from concurrent.futures import ProcessPoolExecutor, as_completed
6
6
  from dataclasses import dataclass
7
7
  from pathlib import Path
8
8
 
9
- import cv2
10
- import numpy as np
11
-
12
- from .cmj.analysis import compute_average_hip_position, detect_cmj_phases
9
+ from .cmj.analysis import detect_cmj_phases
13
10
  from .cmj.debug_overlay import CMJDebugOverlayRenderer
14
11
  from .cmj.kinematics import CMJMetrics, calculate_cmj_metrics
15
12
  from .cmj.metrics_validator import CMJMetricsValidator
16
13
  from .core.auto_tuning import (
17
- AnalysisParameters,
18
- QualityPreset,
19
- VideoCharacteristics,
20
14
  analyze_video_sample,
21
15
  auto_tune_parameters,
22
16
  )
@@ -32,13 +26,22 @@ from .core.metadata import (
32
26
  create_timestamp,
33
27
  get_kinemotion_version,
34
28
  )
29
+ from .core.pipeline_utils import (
30
+ apply_expert_overrides,
31
+ apply_smoothing,
32
+ convert_timer_to_stage_names,
33
+ determine_confidence_levels,
34
+ extract_vertical_positions,
35
+ parse_quality_preset,
36
+ print_verbose_parameters,
37
+ process_all_frames,
38
+ process_videos_bulk_generic,
39
+ )
35
40
  from .core.pose import PoseTracker
36
41
  from .core.quality import assess_jump_quality
37
- from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
38
42
  from .core.timing import PerformanceTimer
39
43
  from .core.video_io import VideoProcessor
40
44
  from .dropjump.analysis import (
41
- compute_average_foot_position,
42
45
  detect_ground_contact,
43
46
  find_contact_phases,
44
47
  )
@@ -47,391 +50,6 @@ from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
47
50
  from .dropjump.metrics_validator import DropJumpMetricsValidator
48
51
 
49
52
 
50
- def _parse_quality_preset(quality: str) -> QualityPreset:
51
- """Parse and validate quality preset string.
52
-
53
- Args:
54
- quality: Quality preset string ('fast', 'balanced', or 'accurate')
55
-
56
- Returns:
57
- QualityPreset enum value
58
-
59
- Raises:
60
- ValueError: If quality preset is invalid
61
- """
62
- try:
63
- return QualityPreset(quality.lower())
64
- except ValueError as e:
65
- raise ValueError(
66
- f"Invalid quality preset: {quality}. "
67
- "Must be 'fast', 'balanced', or 'accurate'"
68
- ) from e
69
-
70
-
71
- def _determine_confidence_levels(
72
- quality_preset: QualityPreset,
73
- detection_confidence: float | None,
74
- tracking_confidence: float | None,
75
- ) -> tuple[float, float]:
76
- """Determine detection and tracking confidence levels.
77
-
78
- Confidence levels are set based on quality preset and can be overridden
79
- by expert parameters.
80
-
81
- Args:
82
- quality_preset: Quality preset enum
83
- detection_confidence: Optional expert override for detection confidence
84
- tracking_confidence: Optional expert override for tracking confidence
85
-
86
- Returns:
87
- Tuple of (detection_confidence, tracking_confidence)
88
- """
89
- # Set initial confidence from quality preset
90
- initial_detection_conf = 0.5
91
- initial_tracking_conf = 0.5
92
-
93
- if quality_preset == QualityPreset.FAST:
94
- initial_detection_conf = 0.3
95
- initial_tracking_conf = 0.3
96
- elif quality_preset == QualityPreset.ACCURATE:
97
- initial_detection_conf = 0.6
98
- initial_tracking_conf = 0.6
99
-
100
- # Override with expert values if provided
101
- if detection_confidence is not None:
102
- initial_detection_conf = detection_confidence
103
- if tracking_confidence is not None:
104
- initial_tracking_conf = tracking_confidence
105
-
106
- return initial_detection_conf, initial_tracking_conf
107
-
108
-
109
- def _apply_expert_overrides(
110
- params: AnalysisParameters,
111
- smoothing_window: int | None,
112
- velocity_threshold: float | None,
113
- min_contact_frames: int | None,
114
- visibility_threshold: float | None,
115
- ) -> AnalysisParameters:
116
- """Apply expert parameter overrides to auto-tuned parameters.
117
-
118
- Args:
119
- params: Auto-tuned parameters object
120
- smoothing_window: Optional override for smoothing window
121
- velocity_threshold: Optional override for velocity threshold
122
- min_contact_frames: Optional override for minimum contact frames
123
- visibility_threshold: Optional override for visibility threshold
124
-
125
- Returns:
126
- Modified params object (mutated in place)
127
- """
128
- if smoothing_window is not None:
129
- params.smoothing_window = smoothing_window
130
- if velocity_threshold is not None:
131
- params.velocity_threshold = velocity_threshold
132
- if min_contact_frames is not None:
133
- params.min_contact_frames = min_contact_frames
134
- if visibility_threshold is not None:
135
- params.visibility_threshold = visibility_threshold
136
- return params
137
-
138
-
139
- def _print_verbose_parameters(
140
- video: VideoProcessor,
141
- characteristics: VideoCharacteristics,
142
- quality_preset: QualityPreset,
143
- params: AnalysisParameters,
144
- ) -> None:
145
- """Print auto-tuned parameters in verbose mode.
146
-
147
- Args:
148
- video: Video processor with fps and dimensions
149
- characteristics: Video analysis characteristics
150
- quality_preset: Selected quality preset
151
- params: Auto-tuned parameters
152
- """
153
- print("\n" + "=" * 60)
154
- print("AUTO-TUNED PARAMETERS")
155
- print("=" * 60)
156
- print(f"Video FPS: {video.fps:.2f}")
157
- print(
158
- f"Tracking quality: {characteristics.tracking_quality} "
159
- f"(avg visibility: {characteristics.avg_visibility:.2f})"
160
- )
161
- print(f"Quality preset: {quality_preset.value}")
162
- print("\nSelected parameters:")
163
- print(f" smoothing_window: {params.smoothing_window}")
164
- print(f" polyorder: {params.polyorder}")
165
- print(f" velocity_threshold: {params.velocity_threshold:.4f}")
166
- print(f" min_contact_frames: {params.min_contact_frames}")
167
- print(f" visibility_threshold: {params.visibility_threshold}")
168
- print(f" detection_confidence: {params.detection_confidence}")
169
- print(f" tracking_confidence: {params.tracking_confidence}")
170
- print(f" outlier_rejection: {params.outlier_rejection}")
171
- print(f" bilateral_filter: {params.bilateral_filter}")
172
- print(f" use_curvature: {params.use_curvature}")
173
- print("=" * 60 + "\n")
174
-
175
-
176
- def _process_all_frames(
177
- video: VideoProcessor,
178
- tracker: PoseTracker,
179
- verbose: bool,
180
- timer: PerformanceTimer | None = None,
181
- close_tracker: bool = True,
182
- target_debug_fps: float = 30.0,
183
- max_debug_dim: int = 720,
184
- ) -> tuple[list, list, list]:
185
- """Process all frames from video and extract pose landmarks.
186
-
187
- Optimizes memory and speed by:
188
- 1. Decimating frames stored for debug video (to target_debug_fps)
189
- 2. Pre-resizing stored frames (to max_debug_dim)
190
-
191
- Args:
192
- video: Video processor to read frames from
193
- tracker: Pose tracker for landmark detection
194
- verbose: Print progress messages
195
- timer: Optional PerformanceTimer for measuring operations
196
- close_tracker: Whether to close the tracker after processing (default: True)
197
- target_debug_fps: Target FPS for debug video (default: 30.0)
198
- max_debug_dim: Max dimension for debug video frames (default: 720)
199
-
200
- Returns:
201
- Tuple of (debug_frames, landmarks_sequence, frame_indices)
202
-
203
- Raises:
204
- ValueError: If no frames could be processed
205
- """
206
- if verbose:
207
- print("Tracking pose landmarks...")
208
-
209
- landmarks_sequence = []
210
- debug_frames = []
211
- frame_indices = []
212
-
213
- # Calculate decimation and resize parameters
214
- step = max(1, int(video.fps / target_debug_fps))
215
-
216
- # Calculate resize dimensions maintaining aspect ratio
217
- # Logic mirrors BaseDebugOverlayRenderer to ensure consistency
218
- w, h = video.display_width, video.display_height
219
- scale = 1.0
220
- if max(w, h) > max_debug_dim:
221
- scale = max_debug_dim / max(w, h)
222
-
223
- debug_w = int(w * scale) // 2 * 2
224
- debug_h = int(h * scale) // 2 * 2
225
- should_resize = (debug_w != video.width) or (debug_h != video.height)
226
-
227
- frame_idx = 0
228
-
229
- if timer:
230
- with timer.measure("pose_tracking"):
231
- while True:
232
- frame = video.read_frame()
233
- if frame is None:
234
- break
235
-
236
- # 1. Track on FULL resolution frame (preserves accuracy)
237
- landmarks = tracker.process_frame(frame)
238
- landmarks_sequence.append(landmarks)
239
-
240
- # 2. Store frame for debug video ONLY if matches step
241
- if frame_idx % step == 0:
242
- # Pre-resize to save memory and later encoding time
243
- if should_resize:
244
- # Use simple linear interpolation for speed (debug only)
245
- processed_frame = cv2.resize(
246
- frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
247
- )
248
- else:
249
- processed_frame = frame
250
-
251
- debug_frames.append(processed_frame)
252
- frame_indices.append(frame_idx)
253
-
254
- frame_idx += 1
255
- else:
256
- while True:
257
- frame = video.read_frame()
258
- if frame is None:
259
- break
260
-
261
- landmarks = tracker.process_frame(frame)
262
- landmarks_sequence.append(landmarks)
263
-
264
- if frame_idx % step == 0:
265
- if should_resize:
266
- processed_frame = cv2.resize(
267
- frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
268
- )
269
- else:
270
- processed_frame = frame
271
-
272
- debug_frames.append(processed_frame)
273
- frame_indices.append(frame_idx)
274
-
275
- frame_idx += 1
276
-
277
- if close_tracker:
278
- tracker.close()
279
-
280
- if not landmarks_sequence:
281
- raise ValueError("No frames could be processed from video")
282
-
283
- return debug_frames, landmarks_sequence, frame_indices
284
-
285
-
286
- def _apply_smoothing(
287
- landmarks_sequence: list,
288
- params: AnalysisParameters,
289
- verbose: bool,
290
- timer: PerformanceTimer | None = None,
291
- ) -> list:
292
- """Apply smoothing to landmark sequence with auto-tuned parameters.
293
-
294
- Args:
295
- landmarks_sequence: Sequence of landmarks from all frames
296
- params: Auto-tuned parameters containing smoothing settings
297
- verbose: Print progress messages
298
- timer: Optional PerformanceTimer for measuring operations
299
-
300
- Returns:
301
- Smoothed landmarks sequence
302
- """
303
- if params.outlier_rejection or params.bilateral_filter:
304
- if verbose:
305
- if params.outlier_rejection:
306
- print("Smoothing landmarks with outlier rejection...")
307
- if params.bilateral_filter:
308
- print("Using bilateral temporal filter...")
309
- if timer:
310
- with timer.measure("smoothing"):
311
- return smooth_landmarks_advanced(
312
- landmarks_sequence,
313
- window_length=params.smoothing_window,
314
- polyorder=params.polyorder,
315
- use_outlier_rejection=params.outlier_rejection,
316
- use_bilateral=params.bilateral_filter,
317
- )
318
- else:
319
- return smooth_landmarks_advanced(
320
- landmarks_sequence,
321
- window_length=params.smoothing_window,
322
- polyorder=params.polyorder,
323
- use_outlier_rejection=params.outlier_rejection,
324
- use_bilateral=params.bilateral_filter,
325
- )
326
- else:
327
- if verbose:
328
- print("Smoothing landmarks...")
329
- if timer:
330
- with timer.measure("smoothing"):
331
- return smooth_landmarks(
332
- landmarks_sequence,
333
- window_length=params.smoothing_window,
334
- polyorder=params.polyorder,
335
- )
336
- else:
337
- return smooth_landmarks(
338
- landmarks_sequence,
339
- window_length=params.smoothing_window,
340
- polyorder=params.polyorder,
341
- )
342
-
343
-
344
- def _calculate_foot_visibility(frame_landmarks: dict) -> float:
345
- """Calculate average visibility of foot landmarks.
346
-
347
- Args:
348
- frame_landmarks: Dictionary of landmarks for a frame
349
-
350
- Returns:
351
- Average visibility value (0-1)
352
- """
353
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
354
- foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
355
- return float(np.mean(foot_vis)) if foot_vis else 0.0
356
-
357
-
358
- def _extract_vertical_positions(
359
- smoothed_landmarks: list,
360
- target: str = "foot",
361
- ) -> tuple[np.ndarray, np.ndarray]:
362
- """Extract vertical positions and visibilities from smoothed landmarks.
363
-
364
- Args:
365
- smoothed_landmarks: Smoothed landmark sequence
366
- target: Tracking target "foot" or "hip" (default: "foot")
367
-
368
- Returns:
369
- Tuple of (vertical_positions, visibilities) as numpy arrays
370
- """
371
- position_list: list[float] = []
372
- visibilities_list: list[float] = []
373
-
374
- for frame_landmarks in smoothed_landmarks:
375
- if frame_landmarks:
376
- if target == "hip":
377
- _, y = compute_average_hip_position(frame_landmarks)
378
- # For hips, we can use average visibility of hips if needed,
379
- # but currently _calculate_foot_visibility is specific to feet.
380
- # We'll stick to foot visibility for now as it indicates
381
- # overall leg tracking quality, or we could implement
382
- # _calculate_hip_visibility. For simplicity, we'll use foot
383
- # visibility as a proxy for "body visibility" or just use 1.0
384
- # since hips are usually visible if feet are. Actually, let's
385
- # just use foot visibility for consistency in quality checks.
386
- vis = _calculate_foot_visibility(frame_landmarks)
387
- else:
388
- _, y = compute_average_foot_position(frame_landmarks)
389
- vis = _calculate_foot_visibility(frame_landmarks)
390
-
391
- position_list.append(y)
392
- visibilities_list.append(vis)
393
- else:
394
- position_list.append(position_list[-1] if position_list else 0.5)
395
- visibilities_list.append(0.0)
396
-
397
- return np.array(position_list), np.array(visibilities_list)
398
-
399
-
400
- def _convert_timer_to_stage_names(
401
- timer_metrics: dict[str, float],
402
- ) -> dict[str, float]:
403
- """Convert timer metric names to human-readable stage names.
404
-
405
- Args:
406
- timer_metrics: Dictionary from PerformanceTimer.get_metrics()
407
-
408
- Returns:
409
- Dictionary with human-readable stage names as keys
410
- """
411
- mapping = {
412
- "video_initialization": "Video initialization",
413
- "pose_tracking": "Pose tracking",
414
- "parameter_auto_tuning": "Parameter auto-tuning",
415
- "smoothing": "Smoothing",
416
- "vertical_position_extraction": "Vertical position extraction",
417
- "ground_contact_detection": "Ground contact detection",
418
- "metrics_calculation": "Metrics calculation",
419
- "quality_assessment": "Quality assessment",
420
- "metadata_building": "Metadata building",
421
- "metrics_validation": "Metrics validation",
422
- "phase_detection": "Phase detection",
423
- "json_serialization": "JSON serialization",
424
- "debug_video_generation": "Debug video generation",
425
- "debug_video_reencode": "Debug video re-encoding",
426
- "frame_rotation": "Frame rotation",
427
- "debug_video_resize": "Debug video resizing",
428
- "debug_video_copy": "Debug video frame copy",
429
- "debug_video_draw": "Debug video drawing",
430
- "debug_video_write": "Debug video encoding",
431
- }
432
- return {mapping.get(k, k): v for k, v in timer_metrics.items()}
433
-
434
-
435
53
  @dataclass
436
54
  class DropJumpVideoResult:
437
55
  """Result of processing a single drop jump video."""
@@ -507,34 +125,25 @@ def process_dropjump_video(
507
125
  if not Path(video_path).exists():
508
126
  raise FileNotFoundError(f"Video file not found: {video_path}")
509
127
 
510
- # Set deterministic mode for drop jump reproducibility
511
- # Note: MediaPipe has inherent non-determinism (Google issue #3945)
512
- # This improves consistency but cannot eliminate all variation
513
128
  from .core.determinism import set_deterministic_mode
514
129
 
515
130
  set_deterministic_mode(seed=42)
516
131
 
517
- # Start overall timing
518
132
  start_time = time.time()
519
133
  if timer is None:
520
134
  timer = PerformanceTimer()
521
135
 
522
- # Convert quality string to enum
523
- quality_preset = _parse_quality_preset(quality)
136
+ quality_preset = parse_quality_preset(quality)
524
137
 
525
- # Load video
526
138
  with timer.measure("video_initialization"):
527
139
  with VideoProcessor(video_path, timer=timer) as video:
528
- # Determine detection/tracking confidence levels
529
- detection_conf, tracking_conf = _determine_confidence_levels(
140
+ detection_conf, tracking_conf = determine_confidence_levels(
530
141
  quality_preset, detection_confidence, tracking_confidence
531
142
  )
532
143
 
533
- # Process all frames with pose tracking
534
144
  if verbose:
535
145
  print("Processing all frames with MediaPipe pose tracking...")
536
146
 
537
- # Use provided tracker or create new one
538
147
  tracker = pose_tracker
539
148
  should_close_tracker = False
540
149
 
@@ -546,19 +155,17 @@ def process_dropjump_video(
546
155
  )
547
156
  should_close_tracker = True
548
157
 
549
- frames, landmarks_sequence, frame_indices = _process_all_frames(
158
+ frames, landmarks_sequence, frame_indices = process_all_frames(
550
159
  video, tracker, verbose, timer, close_tracker=should_close_tracker
551
160
  )
552
161
 
553
- # Auto-tune parameters
554
162
  with timer.measure("parameter_auto_tuning"):
555
163
  characteristics = analyze_video_sample(
556
164
  landmarks_sequence, video.fps, video.frame_count
557
165
  )
558
166
  params = auto_tune_parameters(characteristics, quality_preset)
559
167
 
560
- # Apply expert overrides if provided
561
- params = _apply_expert_overrides(
168
+ params = apply_expert_overrides(
562
169
  params,
563
170
  smoothing_window,
564
171
  velocity_threshold,
@@ -566,26 +173,22 @@ def process_dropjump_video(
566
173
  visibility_threshold,
567
174
  )
568
175
 
569
- # Show selected parameters if verbose
570
176
  if verbose:
571
- _print_verbose_parameters(
177
+ print_verbose_parameters(
572
178
  video, characteristics, quality_preset, params
573
179
  )
574
180
 
575
- # Apply smoothing with auto-tuned parameters
576
- smoothed_landmarks = _apply_smoothing(
181
+ smoothed_landmarks = apply_smoothing(
577
182
  landmarks_sequence, params, verbose, timer
578
183
  )
579
184
 
580
- # Extract vertical positions from feet
581
185
  if verbose:
582
186
  print("Extracting foot positions...")
583
187
  with timer.measure("vertical_position_extraction"):
584
- vertical_positions, visibilities = _extract_vertical_positions(
188
+ vertical_positions, visibilities = extract_vertical_positions(
585
189
  smoothed_landmarks
586
190
  )
587
191
 
588
- # Detect ground contact
589
192
  if verbose:
590
193
  print("Detecting ground contact...")
591
194
  with timer.measure("ground_contact_detection"):
@@ -599,7 +202,6 @@ def process_dropjump_video(
599
202
  polyorder=params.polyorder,
600
203
  )
601
204
 
602
- # Calculate metrics
603
205
  if verbose:
604
206
  print("Calculating metrics...")
605
207
  with timer.measure("metrics_calculation"):
@@ -614,24 +216,20 @@ def process_dropjump_video(
614
216
  use_curvature=params.use_curvature,
615
217
  )
616
218
 
617
- # Assess quality and add confidence scores
618
219
  if verbose:
619
220
  print("Assessing tracking quality...")
620
221
  with timer.measure("quality_assessment"):
621
- # Detect outliers for quality scoring (doesn't affect results)
622
222
  _, outlier_mask = reject_outliers(
623
223
  vertical_positions,
624
224
  use_ransac=True,
625
225
  use_median=True,
626
- interpolate=False, # Don't modify, just detect
226
+ interpolate=False,
627
227
  )
628
228
 
629
- # Count phases for quality assessment
630
229
  phases = find_contact_phases(contact_states)
631
230
  phases_detected = len(phases) > 0
632
231
  phase_count = len(phases)
633
232
 
634
- # Perform quality assessment
635
233
  quality_result = assess_jump_quality(
636
234
  visibilities=visibilities,
637
235
  positions=vertical_positions,
@@ -641,13 +239,10 @@ def process_dropjump_video(
641
239
  phase_count=phase_count,
642
240
  )
643
241
 
644
- # Build algorithm configuration early (but attach metadata later)
645
242
  drop_frame = None
646
243
  if drop_start_frame is None and metrics.drop_start_frame is not None:
647
- # Auto-detected drop start from box
648
244
  drop_frame = metrics.drop_start_frame
649
245
  elif drop_start_frame is not None:
650
- # Manual drop start provided
651
246
  drop_frame = drop_start_frame
652
247
 
653
248
  algorithm_config = AlgorithmConfig(
@@ -689,20 +284,16 @@ def process_dropjump_video(
689
284
  print(f" - {warning}")
690
285
  print()
691
286
 
692
- # Generate debug video (but not JSON yet - we need to attach metadata first)
693
287
  if output_video:
694
288
  if verbose:
695
289
  print(f"Generating debug video: {output_video}")
696
290
 
697
- # Determine debug video properties from the pre-processed frames
698
291
  debug_h, debug_w = frames[0].shape[:2]
699
292
  if video.fps > 30:
700
293
  debug_fps = video.fps / (video.fps / 30.0)
701
294
  else:
702
295
  debug_fps = video.fps
703
- # Use approximate 30fps if decimated, or actual if not
704
296
  if len(frames) < len(landmarks_sequence):
705
- # Re-calculate step to get precise FPS
706
297
  step = max(1, int(video.fps / 30.0))
707
298
  debug_fps = video.fps / step
708
299
 
@@ -710,10 +301,10 @@ def process_dropjump_video(
710
301
  with timer.measure("debug_video_generation"):
711
302
  with DebugOverlayRenderer(
712
303
  output_video,
713
- debug_w, # Encoded width = pre-resized width
714
- debug_h, # Encoded height
715
- debug_w, # Display width (already corrected)
716
- debug_h, # Display height
304
+ debug_w,
305
+ debug_h,
306
+ debug_w,
307
+ debug_h,
717
308
  debug_fps,
718
309
  timer=timer,
719
310
  ) as renderer:
@@ -727,9 +318,8 @@ def process_dropjump_video(
727
318
  use_com=False,
728
319
  )
729
320
  renderer.write_frame(annotated)
730
- # Capture re-encoding duration separately
731
321
  with timer.measure("debug_video_reencode"):
732
- pass # Re-encoding happens in context manager __exit__
322
+ pass
733
323
  else:
734
324
  with DebugOverlayRenderer(
735
325
  output_video,
@@ -754,7 +344,6 @@ def process_dropjump_video(
754
344
  if verbose:
755
345
  print(f"Debug video saved: {output_video}")
756
346
 
757
- # Validate metrics against physiological bounds
758
347
  with timer.measure("metrics_validation"):
759
348
  validator = DropJumpMetricsValidator()
760
349
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
@@ -765,10 +354,8 @@ def process_dropjump_video(
765
354
  for issue in validation_result.issues:
766
355
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
767
356
 
768
- # NOW create ProcessingInfo with complete timing breakdown
769
- # (includes debug video generation timing)
770
357
  processing_time = time.time() - start_time
771
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
358
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
772
359
 
773
360
  processing_info = ProcessingInfo(
774
361
  version=get_kinemotion_version(),
@@ -785,34 +372,27 @@ def process_dropjump_video(
785
372
  algorithm=algorithm_config,
786
373
  )
787
374
 
788
- # Attach complete metadata to metrics
789
375
  metrics.result_metadata = result_metadata
790
376
 
791
- # NOW write JSON after metadata is attached
792
377
  if json_output:
793
378
  if timer:
794
379
  with timer.measure("json_serialization"):
795
380
  output_path = Path(json_output)
796
381
  metrics_dict = metrics.to_dict()
797
- import json
798
-
799
382
  json_str = json.dumps(metrics_dict, indent=2)
800
383
  output_path.write_text(json_str)
801
384
  else:
802
385
  output_path = Path(json_output)
803
386
  metrics_dict = metrics.to_dict()
804
- import json
805
-
806
387
  json_str = json.dumps(metrics_dict, indent=2)
807
388
  output_path.write_text(json_str)
808
389
 
809
390
  if verbose:
810
391
  print(f"Metrics written to: {json_output}")
811
392
 
812
- # Print timing summary if verbose
813
393
  if verbose:
814
394
  total_time = time.time() - start_time
815
- stage_times_verbose = _convert_timer_to_stage_names(timer.get_metrics())
395
+ stage_times_verbose = convert_timer_to_stage_names(timer.get_metrics())
816
396
 
817
397
  print("\n=== Timing Summary ===")
818
398
  for stage, duration in stage_times_verbose.items():
@@ -833,76 +413,25 @@ def process_dropjump_videos_bulk(
833
413
  progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
834
414
  ) -> list[DropJumpVideoResult]:
835
415
  """
836
- Process multiple drop jump videos in parallel using ProcessPoolExecutor.
837
-
838
- Args:
839
- configs: List of DropJumpVideoConfig objects specifying video paths
840
- and parameters
841
- max_workers: Maximum number of parallel workers (default: 4)
842
- progress_callback: Optional callback function called after each video
843
- completes.
844
- Receives DropJumpVideoResult object.
845
-
846
- Returns:
847
- List of DropJumpVideoResult objects, one per input video, in completion order
848
-
849
- Example:
850
- >>> configs = [
851
- ... DropJumpVideoConfig("video1.mp4"),
852
- ... DropJumpVideoConfig("video2.mp4", quality="accurate"),
853
- ... DropJumpVideoConfig("video3.mp4", output_video="debug3.mp4"),
854
- ... ]
855
- >>> results = process_dropjump_videos_bulk(configs, max_workers=4)
856
- >>> for result in results:
857
- ... if result.success:
858
- ... print(f"{result.video_path}: {result.metrics.jump_height_m:.3f}m")
859
- ... else:
860
- ... print(f"{result.video_path}: FAILED - {result.error}")
416
+ Process multiple drop jump videos in parallel.
861
417
  """
862
- results: list[DropJumpVideoResult] = []
863
-
864
- # Use ProcessPoolExecutor for CPU-bound video processing
865
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
866
- # Submit all jobs
867
- future_to_config = {
868
- executor.submit(_process_dropjump_video_wrapper, config): config
869
- for config in configs
870
- }
871
-
872
- # Process results as they complete
873
- for future in as_completed(future_to_config):
874
- config = future_to_config[future]
875
- result: DropJumpVideoResult
876
-
877
- try:
878
- result = future.result()
879
- except Exception as exc:
880
- # Handle unexpected errors
881
- result = DropJumpVideoResult(
882
- video_path=config.video_path,
883
- success=False,
884
- error=f"Unexpected error: {str(exc)}",
885
- )
886
-
887
- results.append(result)
888
418
 
889
- # Call progress callback if provided
890
- if progress_callback:
891
- progress_callback(result)
419
+ def error_factory(video_path: str, error_msg: str) -> DropJumpVideoResult:
420
+ return DropJumpVideoResult(
421
+ video_path=video_path, success=False, error=error_msg
422
+ )
892
423
 
893
- return results
424
+ return process_videos_bulk_generic(
425
+ configs,
426
+ _process_dropjump_video_wrapper,
427
+ error_factory,
428
+ max_workers,
429
+ progress_callback,
430
+ )
894
431
 
895
432
 
896
433
  def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
897
- """
898
- Wrapper function for parallel processing. Must be picklable (top-level function).
899
-
900
- Args:
901
- config: DropJumpVideoConfig object with processing parameters
902
-
903
- Returns:
904
- DropJumpVideoResult object with metrics or error information
905
- """
434
+ """Wrapper function for parallel processing."""
906
435
  start_time = time.time()
907
436
 
908
437
  try:
@@ -918,7 +447,7 @@ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVide
918
447
  visibility_threshold=config.visibility_threshold,
919
448
  detection_confidence=config.detection_confidence,
920
449
  tracking_confidence=config.tracking_confidence,
921
- verbose=False, # Disable verbose in parallel mode
450
+ verbose=False,
922
451
  )
923
452
 
924
453
  processing_time = time.time() - start_time
@@ -1014,28 +543,16 @@ def process_cmj_video(
1014
543
  Raises:
1015
544
  ValueError: If video cannot be processed or parameters are invalid
1016
545
  FileNotFoundError: If video file does not exist
1017
-
1018
- Example:
1019
- >>> metrics = process_cmj_video(
1020
- ... "athlete_cmj.mp4",
1021
- ... quality="balanced",
1022
- ... verbose=True
1023
- ... )
1024
- >>> print(f"Jump height: {metrics.jump_height:.3f}m")
1025
- >>> print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
1026
546
  """
1027
547
  if not Path(video_path).exists():
1028
548
  raise FileNotFoundError(f"Video file not found: {video_path}")
1029
549
 
1030
- # Start overall timing
1031
550
  start_time = time.time()
1032
551
  if timer is None:
1033
552
  timer = PerformanceTimer()
1034
553
 
1035
- # Convert quality string to enum
1036
- quality_preset = _parse_quality_preset(quality)
554
+ quality_preset = parse_quality_preset(quality)
1037
555
 
1038
- # Initialize video processor
1039
556
  with timer.measure("video_initialization"):
1040
557
  with VideoProcessor(video_path, timer=timer) as video:
1041
558
  if verbose:
@@ -1044,16 +561,13 @@ def process_cmj_video(
1044
561
  f"{video.frame_count} frames"
1045
562
  )
1046
563
 
1047
- # Determine confidence levels
1048
- det_conf, track_conf = _determine_confidence_levels(
564
+ det_conf, track_conf = determine_confidence_levels(
1049
565
  quality_preset, detection_confidence, tracking_confidence
1050
566
  )
1051
567
 
1052
- # Track all frames
1053
568
  if verbose:
1054
569
  print("Processing all frames with MediaPipe pose tracking...")
1055
570
 
1056
- # Use provided tracker or create new one
1057
571
  tracker = pose_tracker
1058
572
  should_close_tracker = False
1059
573
 
@@ -1065,19 +579,17 @@ def process_cmj_video(
1065
579
  )
1066
580
  should_close_tracker = True
1067
581
 
1068
- frames, landmarks_sequence, frame_indices = _process_all_frames(
582
+ frames, landmarks_sequence, frame_indices = process_all_frames(
1069
583
  video, tracker, verbose, timer, close_tracker=should_close_tracker
1070
584
  )
1071
585
 
1072
- # Auto-tune parameters
1073
586
  with timer.measure("parameter_auto_tuning"):
1074
587
  characteristics = analyze_video_sample(
1075
588
  landmarks_sequence, video.fps, video.frame_count
1076
589
  )
1077
590
  params = auto_tune_parameters(characteristics, quality_preset)
1078
591
 
1079
- # Apply expert overrides
1080
- params = _apply_expert_overrides(
592
+ params = apply_expert_overrides(
1081
593
  params,
1082
594
  smoothing_window,
1083
595
  velocity_threshold,
@@ -1086,32 +598,27 @@ def process_cmj_video(
1086
598
  )
1087
599
 
1088
600
  if verbose:
1089
- _print_verbose_parameters(
601
+ print_verbose_parameters(
1090
602
  video, characteristics, quality_preset, params
1091
603
  )
1092
604
 
1093
- # Apply smoothing
1094
- smoothed_landmarks = _apply_smoothing(
605
+ smoothed_landmarks = apply_smoothing(
1095
606
  landmarks_sequence, params, verbose, timer
1096
607
  )
1097
608
 
1098
- # Extract vertical positions
1099
609
  if verbose:
1100
610
  print("Extracting vertical positions (Hip and Foot)...")
1101
611
  with timer.measure("vertical_position_extraction"):
1102
- # Primary: Hips (for depth, velocity, general phases)
1103
- vertical_positions, visibilities = _extract_vertical_positions(
612
+ vertical_positions, visibilities = extract_vertical_positions(
1104
613
  smoothed_landmarks, target="hip"
1105
614
  )
1106
615
 
1107
- # Secondary: Feet (for precise landing detection)
1108
- foot_positions, _ = _extract_vertical_positions(
616
+ foot_positions, _ = extract_vertical_positions(
1109
617
  smoothed_landmarks, target="foot"
1110
618
  )
1111
619
 
1112
620
  tracking_method = "hip_hybrid"
1113
621
 
1114
- # Detect CMJ phases
1115
622
  if verbose:
1116
623
  print("Detecting CMJ phases...")
1117
624
  with timer.measure("phase_detection"):
@@ -1120,7 +627,7 @@ def process_cmj_video(
1120
627
  video.fps,
1121
628
  window_length=params.smoothing_window,
1122
629
  polyorder=params.polyorder,
1123
- landing_positions=foot_positions, # Use feet for landing
630
+ landing_positions=foot_positions,
1124
631
  )
1125
632
 
1126
633
  if phases is None:
@@ -1128,11 +635,9 @@ def process_cmj_video(
1128
635
 
1129
636
  standing_end, lowest_point, takeoff_frame, landing_frame = phases
1130
637
 
1131
- # Calculate metrics
1132
638
  if verbose:
1133
639
  print("Calculating metrics...")
1134
640
  with timer.measure("metrics_calculation"):
1135
- # Use signed velocity for CMJ (need direction information)
1136
641
  from .cmj.analysis import compute_signed_velocity
1137
642
 
1138
643
  velocities = compute_signed_velocity(
@@ -1152,23 +657,19 @@ def process_cmj_video(
1152
657
  tracking_method=tracking_method,
1153
658
  )
1154
659
 
1155
- # Assess quality and add confidence scores
1156
660
  if verbose:
1157
661
  print("Assessing tracking quality...")
1158
662
  with timer.measure("quality_assessment"):
1159
- # Detect outliers for quality scoring (doesn't affect results)
1160
663
  _, outlier_mask = reject_outliers(
1161
664
  vertical_positions,
1162
665
  use_ransac=True,
1163
666
  use_median=True,
1164
- interpolate=False, # Don't modify, just detect
667
+ interpolate=False,
1165
668
  )
1166
669
 
1167
- # Phases detected successfully if we got here
1168
670
  phases_detected = True
1169
- phase_count = 4 # standing, eccentric, concentric, flight
671
+ phase_count = 4
1170
672
 
1171
- # Perform quality assessment
1172
673
  quality_result = assess_jump_quality(
1173
674
  visibilities=visibilities,
1174
675
  positions=vertical_positions,
@@ -1178,7 +679,6 @@ def process_cmj_video(
1178
679
  phase_count=phase_count,
1179
680
  )
1180
681
 
1181
- # Build algorithm config early (but attach metadata later)
1182
682
  algorithm_config = AlgorithmConfig(
1183
683
  detection_method="backward_search",
1184
684
  tracking_method="mediapipe_pose",
@@ -1195,7 +695,7 @@ def process_cmj_video(
1195
695
  visibility_threshold=params.visibility_threshold,
1196
696
  use_curvature_refinement=params.use_curvature,
1197
697
  ),
1198
- drop_detection=None, # CMJ doesn't have drop detection
698
+ drop_detection=None,
1199
699
  )
1200
700
 
1201
701
  video_info = VideoInfo(
@@ -1214,12 +714,10 @@ def process_cmj_video(
1214
714
  print(f" - {warning}")
1215
715
  print()
1216
716
 
1217
- # Generate debug video (but not JSON yet - we need to attach metadata first)
1218
717
  if output_video:
1219
718
  if verbose:
1220
719
  print(f"Generating debug video: {output_video}")
1221
720
 
1222
- # Determine debug video properties from the pre-processed frames
1223
721
  debug_h, debug_w = frames[0].shape[:2]
1224
722
  step = max(1, int(video.fps / 30.0))
1225
723
  debug_fps = video.fps / step
@@ -1233,16 +731,15 @@ def process_cmj_video(
1233
731
  debug_w,
1234
732
  debug_h,
1235
733
  debug_fps,
1236
- timer=timer, # Passing timer here too
734
+ timer=timer,
1237
735
  ) as renderer:
1238
736
  for frame, idx in zip(frames, frame_indices, strict=True):
1239
737
  annotated = renderer.render_frame(
1240
738
  frame, smoothed_landmarks[idx], idx, metrics
1241
739
  )
1242
740
  renderer.write_frame(annotated)
1243
- # Capture re-encoding duration separately
1244
741
  with timer.measure("debug_video_reencode"):
1245
- pass # Re-encoding happens in context manager __exit__
742
+ pass
1246
743
  else:
1247
744
  with CMJDebugOverlayRenderer(
1248
745
  output_video,
@@ -1251,7 +748,7 @@ def process_cmj_video(
1251
748
  debug_w,
1252
749
  debug_h,
1253
750
  debug_fps,
1254
- timer=timer, # Passing timer here too
751
+ timer=timer,
1255
752
  ) as renderer:
1256
753
  for frame, idx in zip(frames, frame_indices, strict=True):
1257
754
  annotated = renderer.render_frame(
@@ -1262,16 +759,13 @@ def process_cmj_video(
1262
759
  if verbose:
1263
760
  print(f"Debug video saved: {output_video}")
1264
761
 
1265
- # Validate metrics against physiological bounds
1266
762
  with timer.measure("metrics_validation"):
1267
763
  validator = CMJMetricsValidator()
1268
764
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
1269
765
  metrics.validation_result = validation_result
1270
766
 
1271
- # NOW create ProcessingInfo with complete timing breakdown
1272
- # (includes debug video generation timing)
1273
767
  processing_time = time.time() - start_time
1274
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
768
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
1275
769
 
1276
770
  processing_info = ProcessingInfo(
1277
771
  version=get_kinemotion_version(),
@@ -1288,24 +782,18 @@ def process_cmj_video(
1288
782
  algorithm=algorithm_config,
1289
783
  )
1290
784
 
1291
- # Attach complete metadata to metrics
1292
785
  metrics.result_metadata = result_metadata
1293
786
 
1294
- # NOW write JSON after metadata is attached
1295
787
  if json_output:
1296
788
  if timer:
1297
789
  with timer.measure("json_serialization"):
1298
790
  output_path = Path(json_output)
1299
791
  metrics_dict = metrics.to_dict()
1300
- import json
1301
-
1302
792
  json_str = json.dumps(metrics_dict, indent=2)
1303
793
  output_path.write_text(json_str)
1304
794
  else:
1305
795
  output_path = Path(json_output)
1306
796
  metrics_dict = metrics.to_dict()
1307
- import json
1308
-
1309
797
  json_str = json.dumps(metrics_dict, indent=2)
1310
798
  output_path.write_text(json_str)
1311
799
 
@@ -1317,16 +805,15 @@ def process_cmj_video(
1317
805
  for issue in validation_result.issues:
1318
806
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
1319
807
 
1320
- # Print timing summary if verbose
1321
808
  if verbose:
1322
809
  total_time = time.time() - start_time
1323
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
810
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
1324
811
 
1325
812
  print("\n=== Timing Summary ===")
1326
813
  for stage, duration in stage_times.items():
1327
814
  percentage = (duration / total_time) * 100
1328
815
  dur_ms = duration * 1000
1329
- print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
816
+ print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
1330
817
  total_ms = total_time * 1000
1331
818
  print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
1332
819
  print()
@@ -1344,72 +831,23 @@ def process_cmj_videos_bulk(
1344
831
  progress_callback: Callable[[CMJVideoResult], None] | None = None,
1345
832
  ) -> list[CMJVideoResult]:
1346
833
  """
1347
- Process multiple CMJ videos in parallel using ProcessPoolExecutor.
1348
-
1349
- Args:
1350
- configs: List of CMJVideoConfig objects specifying video paths and parameters
1351
- max_workers: Maximum number of parallel workers (default: 4)
1352
- progress_callback: Optional callback function called after each video completes.
1353
- Receives CMJVideoResult object.
1354
-
1355
- Returns:
1356
- List of CMJVideoResult objects, one per input video, in completion order
1357
-
1358
- Example:
1359
- >>> configs = [
1360
- ... CMJVideoConfig("video1.mp4"),
1361
- ... CMJVideoConfig("video2.mp4", quality="accurate"),
1362
- ... CMJVideoConfig("video3.mp4", output_video="debug3.mp4"),
1363
- ... ]
1364
- >>> results = process_cmj_videos_bulk(configs, max_workers=4)
1365
- >>> for result in results:
1366
- ... if result.success:
1367
- ... print(f"{result.video_path}: {result.metrics.jump_height:.3f}m")
1368
- ... else:
1369
- ... print(f"{result.video_path}: FAILED - {result.error}")
834
+ Process multiple CMJ videos in parallel.
1370
835
  """
1371
- results: list[CMJVideoResult] = []
1372
-
1373
- # Use ProcessPoolExecutor for CPU-bound video processing
1374
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
1375
- # Submit all jobs
1376
- future_to_config = {
1377
- executor.submit(_process_cmj_video_wrapper, config): config
1378
- for config in configs
1379
- }
1380
-
1381
- # Process results as they complete
1382
- for future in as_completed(future_to_config):
1383
- config = future_to_config[future]
1384
- result: CMJVideoResult
1385
-
1386
- try:
1387
- result = future.result()
1388
- results.append(result)
1389
- except Exception as e:
1390
- result = CMJVideoResult(
1391
- video_path=config.video_path, success=False, error=str(e)
1392
- )
1393
- results.append(result)
1394
836
 
1395
- # Call progress callback if provided
1396
- if progress_callback:
1397
- progress_callback(result)
837
+ def error_factory(video_path: str, error_msg: str) -> CMJVideoResult:
838
+ return CMJVideoResult(video_path=video_path, success=False, error=error_msg)
1398
839
 
1399
- return results
840
+ return process_videos_bulk_generic(
841
+ configs,
842
+ _process_cmj_video_wrapper,
843
+ error_factory,
844
+ max_workers,
845
+ progress_callback,
846
+ )
1400
847
 
1401
848
 
1402
849
  def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
1403
- """
1404
- Wrapper function for parallel CMJ processing. Must be picklable
1405
- (top-level function).
1406
-
1407
- Args:
1408
- config: CMJVideoConfig object with processing parameters
1409
-
1410
- Returns:
1411
- CMJVideoResult object with metrics or error information
1412
- """
850
+ """Wrapper function for parallel CMJ processing."""
1413
851
  start_time = time.time()
1414
852
 
1415
853
  try:
@@ -1424,7 +862,7 @@ def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
1424
862
  visibility_threshold=config.visibility_threshold,
1425
863
  detection_confidence=config.detection_confidence,
1426
864
  tracking_confidence=config.tracking_confidence,
1427
- verbose=False, # Disable verbose in parallel mode
865
+ verbose=False,
1428
866
  )
1429
867
 
1430
868
  processing_time = time.time() - start_time