kinemotion 0.10.3__tar.gz → 0.10.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (56) hide show
  1. {kinemotion-0.10.3 → kinemotion-0.10.5}/CHANGELOG.md +16 -0
  2. {kinemotion-0.10.3 → kinemotion-0.10.5}/PKG-INFO +1 -1
  3. {kinemotion-0.10.3 → kinemotion-0.10.5}/pyproject.toml +1 -1
  4. kinemotion-0.10.5/src/kinemotion/api.py +604 -0
  5. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/dropjump/kinematics.py +318 -206
  6. {kinemotion-0.10.3 → kinemotion-0.10.5}/uv.lock +1 -1
  7. kinemotion-0.10.3/src/kinemotion/api.py +0 -428
  8. {kinemotion-0.10.3 → kinemotion-0.10.5}/.dockerignore +0 -0
  9. {kinemotion-0.10.3 → kinemotion-0.10.5}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  10. {kinemotion-0.10.3 → kinemotion-0.10.5}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  11. {kinemotion-0.10.3 → kinemotion-0.10.5}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  12. {kinemotion-0.10.3 → kinemotion-0.10.5}/.github/pull_request_template.md +0 -0
  13. {kinemotion-0.10.3 → kinemotion-0.10.5}/.github/workflows/release.yml +0 -0
  14. {kinemotion-0.10.3 → kinemotion-0.10.5}/.gitignore +0 -0
  15. {kinemotion-0.10.3 → kinemotion-0.10.5}/.pre-commit-config.yaml +0 -0
  16. {kinemotion-0.10.3 → kinemotion-0.10.5}/.tool-versions +0 -0
  17. {kinemotion-0.10.3 → kinemotion-0.10.5}/CLAUDE.md +0 -0
  18. {kinemotion-0.10.3 → kinemotion-0.10.5}/CODE_OF_CONDUCT.md +0 -0
  19. {kinemotion-0.10.3 → kinemotion-0.10.5}/CONTRIBUTING.md +0 -0
  20. {kinemotion-0.10.3 → kinemotion-0.10.5}/Dockerfile +0 -0
  21. {kinemotion-0.10.3 → kinemotion-0.10.5}/GEMINI.md +0 -0
  22. {kinemotion-0.10.3 → kinemotion-0.10.5}/LICENSE +0 -0
  23. {kinemotion-0.10.3 → kinemotion-0.10.5}/README.md +0 -0
  24. {kinemotion-0.10.3 → kinemotion-0.10.5}/SECURITY.md +0 -0
  25. {kinemotion-0.10.3 → kinemotion-0.10.5}/docs/BULK_PROCESSING.md +0 -0
  26. {kinemotion-0.10.3 → kinemotion-0.10.5}/docs/ERRORS_FINDINGS.md +0 -0
  27. {kinemotion-0.10.3 → kinemotion-0.10.5}/docs/FRAMERATE.md +0 -0
  28. {kinemotion-0.10.3 → kinemotion-0.10.5}/docs/IMU_METADATA_PRESERVATION.md +0 -0
  29. {kinemotion-0.10.3 → kinemotion-0.10.5}/docs/PARAMETERS.md +0 -0
  30. {kinemotion-0.10.3 → kinemotion-0.10.5}/docs/VALIDATION_PLAN.md +0 -0
  31. {kinemotion-0.10.3 → kinemotion-0.10.5}/examples/bulk/README.md +0 -0
  32. {kinemotion-0.10.3 → kinemotion-0.10.5}/examples/bulk/bulk_processing.py +0 -0
  33. {kinemotion-0.10.3 → kinemotion-0.10.5}/examples/bulk/simple_example.py +0 -0
  34. {kinemotion-0.10.3 → kinemotion-0.10.5}/examples/programmatic_usage.py +0 -0
  35. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/__init__.py +0 -0
  36. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/cli.py +0 -0
  37. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/core/__init__.py +0 -0
  38. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/core/auto_tuning.py +0 -0
  39. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/core/filtering.py +0 -0
  40. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/core/pose.py +0 -0
  41. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/core/smoothing.py +0 -0
  42. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/core/video_io.py +0 -0
  43. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/dropjump/__init__.py +0 -0
  44. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/dropjump/analysis.py +0 -0
  45. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/dropjump/cli.py +0 -0
  46. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/dropjump/debug_overlay.py +0 -0
  47. {kinemotion-0.10.3 → kinemotion-0.10.5}/src/kinemotion/py.typed +0 -0
  48. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/__init__.py +0 -0
  49. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_adaptive_threshold.py +0 -0
  50. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_api.py +0 -0
  51. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_aspect_ratio.py +0 -0
  52. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_com_estimation.py +0 -0
  53. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_contact_detection.py +0 -0
  54. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_filtering.py +0 -0
  55. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_kinematics.py +0 -0
  56. {kinemotion-0.10.3 → kinemotion-0.10.5}/tests/test_polyorder.py +0 -0
@@ -7,6 +7,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  <!-- version list -->
9
9
 
10
+ ## v0.10.5 (2025-11-03)
11
+
12
+ ### Bug Fixes
13
+
14
+ - **kinematics**: Reduce cognitive complexity in calculate_drop_jump_metrics
15
+ ([`d6a06f3`](https://github.com/feniix/kinemotion/commit/d6a06f3671eb370a971c73c98270668d5aefe9b1))
16
+
17
+
18
+ ## v0.10.4 (2025-11-03)
19
+
20
+ ### Bug Fixes
21
+
22
+ - **api**: Reduce cognitive complexity in process_video function
23
+ ([`d2e05cb`](https://github.com/feniix/kinemotion/commit/d2e05cb415067a1a1b081216a9474ccda1ae2567))
24
+
25
+
10
26
  ## v0.10.3 (2025-11-03)
11
27
 
12
28
  ### Bug Fixes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.10.3
3
+ Version: 0.10.5
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "kinemotion"
3
- version = "0.10.3"
3
+ version = "0.10.5"
4
4
  description = "Video-based kinematic analysis for athletic performance"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10,<3.13"
@@ -0,0 +1,604 @@
1
+ """Public API for programmatic use of kinemotion analysis."""
2
+
3
+ import time
4
+ from collections.abc import Callable
5
+ from concurrent.futures import ProcessPoolExecutor, as_completed
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ import numpy as np
10
+
11
+ from .core.auto_tuning import (
12
+ AnalysisParameters,
13
+ QualityPreset,
14
+ VideoCharacteristics,
15
+ analyze_video_sample,
16
+ auto_tune_parameters,
17
+ )
18
+ from .core.pose import PoseTracker
19
+ from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
20
+ from .core.video_io import VideoProcessor
21
+ from .dropjump.analysis import (
22
+ ContactState,
23
+ compute_average_foot_position,
24
+ detect_ground_contact,
25
+ )
26
+ from .dropjump.debug_overlay import DebugOverlayRenderer
27
+ from .dropjump.kinematics import DropJumpMetrics, calculate_drop_jump_metrics
28
+
29
+
30
+ def _parse_quality_preset(quality: str) -> QualityPreset:
31
+ """Parse and validate quality preset string.
32
+
33
+ Args:
34
+ quality: Quality preset string ('fast', 'balanced', or 'accurate')
35
+
36
+ Returns:
37
+ QualityPreset enum value
38
+
39
+ Raises:
40
+ ValueError: If quality preset is invalid
41
+ """
42
+ try:
43
+ return QualityPreset(quality.lower())
44
+ except ValueError as e:
45
+ raise ValueError(
46
+ f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
47
+ ) from e
48
+
49
+
50
+ def _determine_confidence_levels(
51
+ quality_preset: QualityPreset,
52
+ detection_confidence: float | None,
53
+ tracking_confidence: float | None,
54
+ ) -> tuple[float, float]:
55
+ """Determine detection and tracking confidence levels.
56
+
57
+ Confidence levels are set based on quality preset and can be overridden
58
+ by expert parameters.
59
+
60
+ Args:
61
+ quality_preset: Quality preset enum
62
+ detection_confidence: Optional expert override for detection confidence
63
+ tracking_confidence: Optional expert override for tracking confidence
64
+
65
+ Returns:
66
+ Tuple of (detection_confidence, tracking_confidence)
67
+ """
68
+ # Set initial confidence from quality preset
69
+ initial_detection_conf = 0.5
70
+ initial_tracking_conf = 0.5
71
+
72
+ if quality_preset == QualityPreset.FAST:
73
+ initial_detection_conf = 0.3
74
+ initial_tracking_conf = 0.3
75
+ elif quality_preset == QualityPreset.ACCURATE:
76
+ initial_detection_conf = 0.6
77
+ initial_tracking_conf = 0.6
78
+
79
+ # Override with expert values if provided
80
+ if detection_confidence is not None:
81
+ initial_detection_conf = detection_confidence
82
+ if tracking_confidence is not None:
83
+ initial_tracking_conf = tracking_confidence
84
+
85
+ return initial_detection_conf, initial_tracking_conf
86
+
87
+
88
+ def _apply_expert_overrides(
89
+ params: AnalysisParameters,
90
+ smoothing_window: int | None,
91
+ velocity_threshold: float | None,
92
+ min_contact_frames: int | None,
93
+ visibility_threshold: float | None,
94
+ ) -> AnalysisParameters:
95
+ """Apply expert parameter overrides to auto-tuned parameters.
96
+
97
+ Args:
98
+ params: Auto-tuned parameters object
99
+ smoothing_window: Optional override for smoothing window
100
+ velocity_threshold: Optional override for velocity threshold
101
+ min_contact_frames: Optional override for minimum contact frames
102
+ visibility_threshold: Optional override for visibility threshold
103
+
104
+ Returns:
105
+ Modified params object (mutated in place)
106
+ """
107
+ if smoothing_window is not None:
108
+ params.smoothing_window = smoothing_window
109
+ if velocity_threshold is not None:
110
+ params.velocity_threshold = velocity_threshold
111
+ if min_contact_frames is not None:
112
+ params.min_contact_frames = min_contact_frames
113
+ if visibility_threshold is not None:
114
+ params.visibility_threshold = visibility_threshold
115
+ return params
116
+
117
+
118
+ def _print_verbose_parameters(
119
+ video: VideoProcessor,
120
+ characteristics: VideoCharacteristics,
121
+ quality_preset: QualityPreset,
122
+ params: AnalysisParameters,
123
+ ) -> None:
124
+ """Print auto-tuned parameters in verbose mode.
125
+
126
+ Args:
127
+ video: Video processor with fps and dimensions
128
+ characteristics: Video analysis characteristics
129
+ quality_preset: Selected quality preset
130
+ params: Auto-tuned parameters
131
+ """
132
+ print("\n" + "=" * 60)
133
+ print("AUTO-TUNED PARAMETERS")
134
+ print("=" * 60)
135
+ print(f"Video FPS: {video.fps:.2f}")
136
+ print(
137
+ f"Tracking quality: {characteristics.tracking_quality} "
138
+ f"(avg visibility: {characteristics.avg_visibility:.2f})"
139
+ )
140
+ print(f"Quality preset: {quality_preset.value}")
141
+ print("\nSelected parameters:")
142
+ print(f" smoothing_window: {params.smoothing_window}")
143
+ print(f" polyorder: {params.polyorder}")
144
+ print(f" velocity_threshold: {params.velocity_threshold:.4f}")
145
+ print(f" min_contact_frames: {params.min_contact_frames}")
146
+ print(f" visibility_threshold: {params.visibility_threshold}")
147
+ print(f" detection_confidence: {params.detection_confidence}")
148
+ print(f" tracking_confidence: {params.tracking_confidence}")
149
+ print(f" outlier_rejection: {params.outlier_rejection}")
150
+ print(f" bilateral_filter: {params.bilateral_filter}")
151
+ print(f" use_curvature: {params.use_curvature}")
152
+ print("=" * 60 + "\n")
153
+
154
+
155
+ def _process_all_frames(
156
+ video: VideoProcessor, tracker: PoseTracker, verbose: bool
157
+ ) -> tuple[list, list]:
158
+ """Process all frames from video and extract pose landmarks.
159
+
160
+ Args:
161
+ video: Video processor to read frames from
162
+ tracker: Pose tracker for landmark detection
163
+ verbose: Print progress messages
164
+
165
+ Returns:
166
+ Tuple of (frames, landmarks_sequence)
167
+
168
+ Raises:
169
+ ValueError: If no frames could be processed
170
+ """
171
+ if verbose:
172
+ print("Tracking pose landmarks...")
173
+
174
+ landmarks_sequence = []
175
+ frames = []
176
+
177
+ while True:
178
+ frame = video.read_frame()
179
+ if frame is None:
180
+ break
181
+
182
+ frames.append(frame)
183
+ landmarks = tracker.process_frame(frame)
184
+ landmarks_sequence.append(landmarks)
185
+
186
+ tracker.close()
187
+
188
+ if not landmarks_sequence:
189
+ raise ValueError("No frames could be processed from video")
190
+
191
+ return frames, landmarks_sequence
192
+
193
+
194
+ def _apply_smoothing(
195
+ landmarks_sequence: list, params: AnalysisParameters, verbose: bool
196
+ ) -> list:
197
+ """Apply smoothing to landmark sequence with auto-tuned parameters.
198
+
199
+ Args:
200
+ landmarks_sequence: Sequence of landmarks from all frames
201
+ params: Auto-tuned parameters containing smoothing settings
202
+ verbose: Print progress messages
203
+
204
+ Returns:
205
+ Smoothed landmarks sequence
206
+ """
207
+ if params.outlier_rejection or params.bilateral_filter:
208
+ if verbose:
209
+ if params.outlier_rejection:
210
+ print("Smoothing landmarks with outlier rejection...")
211
+ if params.bilateral_filter:
212
+ print("Using bilateral temporal filter...")
213
+ return smooth_landmarks_advanced(
214
+ landmarks_sequence,
215
+ window_length=params.smoothing_window,
216
+ polyorder=params.polyorder,
217
+ use_outlier_rejection=params.outlier_rejection,
218
+ use_bilateral=params.bilateral_filter,
219
+ )
220
+ else:
221
+ if verbose:
222
+ print("Smoothing landmarks...")
223
+ return smooth_landmarks(
224
+ landmarks_sequence,
225
+ window_length=params.smoothing_window,
226
+ polyorder=params.polyorder,
227
+ )
228
+
229
+
230
+ def _extract_vertical_positions(
231
+ smoothed_landmarks: list,
232
+ ) -> tuple[np.ndarray, np.ndarray]:
233
+ """Extract vertical foot positions and visibilities from smoothed landmarks.
234
+
235
+ Args:
236
+ smoothed_landmarks: Smoothed landmark sequence
237
+
238
+ Returns:
239
+ Tuple of (vertical_positions, visibilities) as numpy arrays
240
+ """
241
+ position_list: list[float] = []
242
+ visibilities_list: list[float] = []
243
+
244
+ for frame_landmarks in smoothed_landmarks:
245
+ if frame_landmarks:
246
+ _, foot_y = compute_average_foot_position(frame_landmarks)
247
+ position_list.append(foot_y)
248
+
249
+ # Average visibility of foot landmarks
250
+ foot_vis = []
251
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
252
+ if key in frame_landmarks:
253
+ foot_vis.append(frame_landmarks[key][2])
254
+ visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
255
+ else:
256
+ position_list.append(position_list[-1] if position_list else 0.5)
257
+ visibilities_list.append(0.0)
258
+
259
+ return np.array(position_list), np.array(visibilities_list)
260
+
261
+
262
+ def _generate_outputs(
263
+ metrics: DropJumpMetrics,
264
+ json_output: str | None,
265
+ output_video: str | None,
266
+ frames: list,
267
+ smoothed_landmarks: list,
268
+ contact_states: list[ContactState],
269
+ video: VideoProcessor,
270
+ verbose: bool,
271
+ ) -> None:
272
+ """Generate JSON and debug video outputs if requested.
273
+
274
+ Args:
275
+ metrics: Calculated drop jump metrics
276
+ json_output: Optional path for JSON output
277
+ output_video: Optional path for debug video
278
+ frames: List of video frames
279
+ smoothed_landmarks: Smoothed landmark sequence
280
+ contact_states: Ground contact state for each frame
281
+ video: Video processor with dimensions and fps
282
+ verbose: Print progress messages
283
+ """
284
+ # Save JSON if requested
285
+ if json_output:
286
+ import json
287
+
288
+ output_path = Path(json_output)
289
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
290
+ if verbose:
291
+ print(f"Metrics written to: {json_output}")
292
+
293
+ # Generate debug video if requested
294
+ if output_video:
295
+ if verbose:
296
+ print(f"Generating debug video: {output_video}")
297
+
298
+ with DebugOverlayRenderer(
299
+ output_video,
300
+ video.width,
301
+ video.height,
302
+ video.display_width,
303
+ video.display_height,
304
+ video.fps,
305
+ ) as renderer:
306
+ for i, frame in enumerate(frames):
307
+ annotated = renderer.render_frame(
308
+ frame,
309
+ smoothed_landmarks[i],
310
+ contact_states[i],
311
+ i,
312
+ metrics,
313
+ use_com=False,
314
+ )
315
+ renderer.write_frame(annotated)
316
+
317
+ if verbose:
318
+ print(f"Debug video saved: {output_video}")
319
+
320
+
321
+ @dataclass
322
+ class VideoResult:
323
+ """Result of processing a single video."""
324
+
325
+ video_path: str
326
+ success: bool
327
+ metrics: DropJumpMetrics | None = None
328
+ error: str | None = None
329
+ processing_time: float = 0.0
330
+
331
+
332
+ @dataclass
333
+ class VideoConfig:
334
+ """Configuration for processing a single video."""
335
+
336
+ video_path: str
337
+ drop_height: float
338
+ quality: str = "balanced"
339
+ output_video: str | None = None
340
+ json_output: str | None = None
341
+ drop_start_frame: int | None = None
342
+ smoothing_window: int | None = None
343
+ velocity_threshold: float | None = None
344
+ min_contact_frames: int | None = None
345
+ visibility_threshold: float | None = None
346
+ detection_confidence: float | None = None
347
+ tracking_confidence: float | None = None
348
+
349
+
350
+ def process_video(
351
+ video_path: str,
352
+ drop_height: float,
353
+ quality: str = "balanced",
354
+ output_video: str | None = None,
355
+ json_output: str | None = None,
356
+ drop_start_frame: int | None = None,
357
+ smoothing_window: int | None = None,
358
+ velocity_threshold: float | None = None,
359
+ min_contact_frames: int | None = None,
360
+ visibility_threshold: float | None = None,
361
+ detection_confidence: float | None = None,
362
+ tracking_confidence: float | None = None,
363
+ verbose: bool = False,
364
+ ) -> DropJumpMetrics:
365
+ """
366
+ Process a single drop jump video and return metrics.
367
+
368
+ Args:
369
+ video_path: Path to the input video file
370
+ drop_height: Height of drop box/platform in meters (e.g., 0.40 for 40cm)
371
+ quality: Analysis quality preset ("fast", "balanced", or "accurate")
372
+ output_video: Optional path for debug video output
373
+ json_output: Optional path for JSON metrics output
374
+ drop_start_frame: Optional manual drop start frame
375
+ smoothing_window: Optional override for smoothing window
376
+ velocity_threshold: Optional override for velocity threshold
377
+ min_contact_frames: Optional override for minimum contact frames
378
+ visibility_threshold: Optional override for visibility threshold
379
+ detection_confidence: Optional override for pose detection confidence
380
+ tracking_confidence: Optional override for pose tracking confidence
381
+ verbose: Print processing details
382
+
383
+ Returns:
384
+ DropJumpMetrics object containing analysis results
385
+
386
+ Raises:
387
+ ValueError: If video cannot be processed or parameters are invalid
388
+ FileNotFoundError: If video file does not exist
389
+ """
390
+ if not Path(video_path).exists():
391
+ raise FileNotFoundError(f"Video file not found: {video_path}")
392
+
393
+ # Convert quality string to enum
394
+ quality_preset = _parse_quality_preset(quality)
395
+
396
+ # Initialize video processor
397
+ with VideoProcessor(video_path) as video:
398
+ if verbose:
399
+ print(
400
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
401
+ f"{video.frame_count} frames"
402
+ )
403
+
404
+ # Determine detection/tracking confidence levels
405
+ detection_conf, tracking_conf = _determine_confidence_levels(
406
+ quality_preset, detection_confidence, tracking_confidence
407
+ )
408
+
409
+ # Process all frames with pose tracking
410
+ tracker = PoseTracker(
411
+ min_detection_confidence=detection_conf,
412
+ min_tracking_confidence=tracking_conf,
413
+ )
414
+ frames, landmarks_sequence = _process_all_frames(video, tracker, verbose)
415
+
416
+ # Analyze video characteristics and auto-tune parameters
417
+ characteristics = analyze_video_sample(
418
+ landmarks_sequence, video.fps, video.frame_count
419
+ )
420
+ params = auto_tune_parameters(characteristics, quality_preset)
421
+
422
+ # Apply expert overrides if provided
423
+ params = _apply_expert_overrides(
424
+ params,
425
+ smoothing_window,
426
+ velocity_threshold,
427
+ min_contact_frames,
428
+ visibility_threshold,
429
+ )
430
+
431
+ # Show selected parameters if verbose
432
+ if verbose:
433
+ _print_verbose_parameters(video, characteristics, quality_preset, params)
434
+
435
+ # Apply smoothing with auto-tuned parameters
436
+ smoothed_landmarks = _apply_smoothing(landmarks_sequence, params, verbose)
437
+
438
+ # Extract vertical positions from feet
439
+ if verbose:
440
+ print("Extracting foot positions...")
441
+ vertical_positions, visibilities = _extract_vertical_positions(
442
+ smoothed_landmarks
443
+ )
444
+
445
+ # Detect ground contact
446
+ contact_states = detect_ground_contact(
447
+ vertical_positions,
448
+ velocity_threshold=params.velocity_threshold,
449
+ min_contact_frames=params.min_contact_frames,
450
+ visibility_threshold=params.visibility_threshold,
451
+ visibilities=visibilities,
452
+ window_length=params.smoothing_window,
453
+ polyorder=params.polyorder,
454
+ )
455
+
456
+ # Calculate metrics
457
+ if verbose:
458
+ print("Calculating metrics...")
459
+ print(
460
+ f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)"
461
+ )
462
+
463
+ metrics = calculate_drop_jump_metrics(
464
+ contact_states,
465
+ vertical_positions,
466
+ video.fps,
467
+ drop_height_m=drop_height,
468
+ drop_start_frame=drop_start_frame,
469
+ velocity_threshold=params.velocity_threshold,
470
+ smoothing_window=params.smoothing_window,
471
+ polyorder=params.polyorder,
472
+ use_curvature=params.use_curvature,
473
+ kinematic_correction_factor=1.0,
474
+ )
475
+
476
+ # Generate outputs (JSON and debug video)
477
+ _generate_outputs(
478
+ metrics,
479
+ json_output,
480
+ output_video,
481
+ frames,
482
+ smoothed_landmarks,
483
+ contact_states,
484
+ video,
485
+ verbose,
486
+ )
487
+
488
+ if verbose:
489
+ print("Analysis complete!")
490
+
491
+ return metrics
492
+
493
+
494
+ def process_videos_bulk(
495
+ configs: list[VideoConfig],
496
+ max_workers: int = 4,
497
+ progress_callback: Callable[[VideoResult], None] | None = None,
498
+ ) -> list[VideoResult]:
499
+ """
500
+ Process multiple videos in parallel using ProcessPoolExecutor.
501
+
502
+ Args:
503
+ configs: List of VideoConfig objects specifying video paths and parameters
504
+ max_workers: Maximum number of parallel workers (default: 4)
505
+ progress_callback: Optional callback function called after each video completes.
506
+ Receives VideoResult object.
507
+
508
+ Returns:
509
+ List of VideoResult objects, one per input video, in completion order
510
+
511
+ Example:
512
+ >>> configs = [
513
+ ... VideoConfig("video1.mp4", drop_height=0.40),
514
+ ... VideoConfig("video2.mp4", drop_height=0.30, quality="accurate"),
515
+ ... VideoConfig("video3.mp4", drop_height=0.50, output_video="debug3.mp4"),
516
+ ... ]
517
+ >>> results = process_videos_bulk(configs, max_workers=4)
518
+ >>> for result in results:
519
+ ... if result.success:
520
+ ... print(f"{result.video_path}: {result.metrics.jump_height_m:.3f}m")
521
+ ... else:
522
+ ... print(f"{result.video_path}: FAILED - {result.error}")
523
+ """
524
+ results: list[VideoResult] = []
525
+
526
+ # Use ProcessPoolExecutor for CPU-bound video processing
527
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
528
+ # Submit all jobs
529
+ future_to_config = {
530
+ executor.submit(_process_video_wrapper, config): config
531
+ for config in configs
532
+ }
533
+
534
+ # Process results as they complete
535
+ for future in as_completed(future_to_config):
536
+ config = future_to_config[future]
537
+ result: VideoResult
538
+
539
+ try:
540
+ result = future.result()
541
+ except Exception as exc:
542
+ # Handle unexpected errors
543
+ result = VideoResult(
544
+ video_path=config.video_path,
545
+ success=False,
546
+ error=f"Unexpected error: {str(exc)}",
547
+ )
548
+
549
+ results.append(result)
550
+
551
+ # Call progress callback if provided
552
+ if progress_callback:
553
+ progress_callback(result)
554
+
555
+ return results
556
+
557
+
558
+ def _process_video_wrapper(config: VideoConfig) -> VideoResult:
559
+ """
560
+ Wrapper function for parallel processing. Must be picklable (top-level function).
561
+
562
+ Args:
563
+ config: VideoConfig object with processing parameters
564
+
565
+ Returns:
566
+ VideoResult object with metrics or error information
567
+ """
568
+ start_time = time.time()
569
+
570
+ try:
571
+ metrics = process_video(
572
+ video_path=config.video_path,
573
+ drop_height=config.drop_height,
574
+ quality=config.quality,
575
+ output_video=config.output_video,
576
+ json_output=config.json_output,
577
+ drop_start_frame=config.drop_start_frame,
578
+ smoothing_window=config.smoothing_window,
579
+ velocity_threshold=config.velocity_threshold,
580
+ min_contact_frames=config.min_contact_frames,
581
+ visibility_threshold=config.visibility_threshold,
582
+ detection_confidence=config.detection_confidence,
583
+ tracking_confidence=config.tracking_confidence,
584
+ verbose=False, # Disable verbose in parallel mode
585
+ )
586
+
587
+ processing_time = time.time() - start_time
588
+
589
+ return VideoResult(
590
+ video_path=config.video_path,
591
+ success=True,
592
+ metrics=metrics,
593
+ processing_time=processing_time,
594
+ )
595
+
596
+ except Exception as e:
597
+ processing_time = time.time() - start_time
598
+
599
+ return VideoResult(
600
+ video_path=config.video_path,
601
+ success=False,
602
+ error=str(e),
603
+ processing_time=processing_time,
604
+ )