kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (48) hide show
  1. kinemotion/__init__.py +31 -6
  2. kinemotion/api.py +39 -598
  3. kinemotion/cli.py +2 -0
  4. kinemotion/cmj/__init__.py +5 -0
  5. kinemotion/cmj/analysis.py +621 -0
  6. kinemotion/cmj/api.py +563 -0
  7. kinemotion/cmj/cli.py +324 -0
  8. kinemotion/cmj/debug_overlay.py +457 -0
  9. kinemotion/cmj/joint_angles.py +307 -0
  10. kinemotion/cmj/kinematics.py +360 -0
  11. kinemotion/cmj/metrics_validator.py +767 -0
  12. kinemotion/cmj/validation_bounds.py +341 -0
  13. kinemotion/core/__init__.py +28 -0
  14. kinemotion/core/auto_tuning.py +71 -37
  15. kinemotion/core/cli_utils.py +60 -0
  16. kinemotion/core/debug_overlay_utils.py +385 -0
  17. kinemotion/core/determinism.py +83 -0
  18. kinemotion/core/experimental.py +103 -0
  19. kinemotion/core/filtering.py +9 -6
  20. kinemotion/core/formatting.py +75 -0
  21. kinemotion/core/metadata.py +231 -0
  22. kinemotion/core/model_downloader.py +172 -0
  23. kinemotion/core/pipeline_utils.py +433 -0
  24. kinemotion/core/pose.py +298 -141
  25. kinemotion/core/pose_landmarks.py +67 -0
  26. kinemotion/core/quality.py +393 -0
  27. kinemotion/core/smoothing.py +250 -154
  28. kinemotion/core/timing.py +247 -0
  29. kinemotion/core/types.py +42 -0
  30. kinemotion/core/validation.py +201 -0
  31. kinemotion/core/video_io.py +135 -50
  32. kinemotion/dropjump/__init__.py +1 -1
  33. kinemotion/dropjump/analysis.py +367 -182
  34. kinemotion/dropjump/api.py +665 -0
  35. kinemotion/dropjump/cli.py +156 -466
  36. kinemotion/dropjump/debug_overlay.py +136 -206
  37. kinemotion/dropjump/kinematics.py +232 -255
  38. kinemotion/dropjump/metrics_validator.py +240 -0
  39. kinemotion/dropjump/validation_bounds.py +157 -0
  40. kinemotion/models/__init__.py +0 -0
  41. kinemotion/models/pose_landmarker_lite.task +0 -0
  42. kinemotion-0.67.0.dist-info/METADATA +726 -0
  43. kinemotion-0.67.0.dist-info/RECORD +47 -0
  44. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
  45. kinemotion-0.10.6.dist-info/METADATA +0 -561
  46. kinemotion-0.10.6.dist-info/RECORD +0 -20
  47. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
  48. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,665 @@
1
+ """Public API for drop jump video analysis."""
2
+
3
+ import json
4
+ import time
5
+ from collections.abc import Callable
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from numpy.typing import NDArray
12
+
13
+ from ..core.auto_tuning import (
14
+ AnalysisParameters,
15
+ QualityPreset,
16
+ VideoCharacteristics,
17
+ analyze_video_sample,
18
+ auto_tune_parameters,
19
+ )
20
+ from ..core.filtering import reject_outliers
21
+ from ..core.metadata import (
22
+ AlgorithmConfig,
23
+ DetectionConfig,
24
+ DropDetectionConfig,
25
+ ProcessingInfo,
26
+ ResultMetadata,
27
+ SmoothingConfig,
28
+ VideoInfo,
29
+ create_timestamp,
30
+ get_kinemotion_version,
31
+ )
32
+ from ..core.pipeline_utils import (
33
+ apply_expert_overrides,
34
+ apply_smoothing,
35
+ convert_timer_to_stage_names,
36
+ determine_confidence_levels,
37
+ extract_vertical_positions,
38
+ parse_quality_preset,
39
+ print_verbose_parameters,
40
+ process_all_frames,
41
+ process_videos_bulk_generic,
42
+ )
43
+ from ..core.pose import PoseTracker
44
+ from ..core.quality import QualityAssessment, assess_jump_quality
45
+ from ..core.timing import NULL_TIMER, PerformanceTimer, Timer
46
+ from ..core.video_io import VideoProcessor
47
+ from .analysis import (
48
+ detect_ground_contact,
49
+ find_contact_phases,
50
+ )
51
+ from .debug_overlay import DebugOverlayRenderer
52
+ from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
53
+ from .metrics_validator import DropJumpMetricsValidator
54
+
55
+
56
+ @dataclass
57
+ class AnalysisOverrides:
58
+ """Optional overrides for analysis parameters.
59
+
60
+ Allows fine-tuning of specific analysis parameters beyond quality presets.
61
+ If None, values will be determined by the quality preset.
62
+ """
63
+
64
+ smoothing_window: int | None = None
65
+ velocity_threshold: float | None = None
66
+ min_contact_frames: int | None = None
67
+ visibility_threshold: float | None = None
68
+
69
+
70
+ @dataclass
71
+ class DropJumpVideoResult:
72
+ """Result of processing a single drop jump video."""
73
+
74
+ video_path: str
75
+ success: bool
76
+ metrics: DropJumpMetrics | None = None
77
+ error: str | None = None
78
+ processing_time: float = 0.0
79
+
80
+
81
+ @dataclass
82
+ class DropJumpVideoConfig:
83
+ """Configuration for processing a single drop jump video."""
84
+
85
+ video_path: str
86
+ quality: str = "balanced"
87
+ output_video: str | None = None
88
+ json_output: str | None = None
89
+ drop_start_frame: int | None = None
90
+ overrides: AnalysisOverrides | None = None
91
+ detection_confidence: float | None = None
92
+ tracking_confidence: float | None = None
93
+
94
+
95
+ def _assess_dropjump_quality(
96
+ vertical_positions: "NDArray",
97
+ visibilities: "NDArray",
98
+ contact_states: list,
99
+ fps: float,
100
+ ) -> tuple:
101
+ """Assess tracking quality and detect phases.
102
+
103
+ Returns:
104
+ Tuple of (quality_result, outlier_mask, phases_detected, phase_count)
105
+ """
106
+ _, outlier_mask = reject_outliers(
107
+ vertical_positions,
108
+ use_ransac=True,
109
+ use_median=True,
110
+ interpolate=False,
111
+ )
112
+
113
+ phases = find_contact_phases(contact_states)
114
+ phases_detected = len(phases) > 0
115
+ phase_count = len(phases)
116
+
117
+ quality_result = assess_jump_quality(
118
+ visibilities=visibilities,
119
+ positions=vertical_positions,
120
+ outlier_mask=outlier_mask,
121
+ fps=fps,
122
+ phases_detected=phases_detected,
123
+ phase_count=phase_count,
124
+ )
125
+
126
+ return quality_result, outlier_mask, phases_detected, phase_count
127
+
128
+
129
+ def _build_dropjump_metadata(
130
+ video_path: str,
131
+ video: "VideoProcessor",
132
+ params: "AnalysisParameters",
133
+ quality_result: QualityAssessment,
134
+ drop_start_frame: int | None,
135
+ metrics: DropJumpMetrics,
136
+ processing_time: float,
137
+ quality_preset: "QualityPreset",
138
+ timer: Timer,
139
+ ) -> ResultMetadata:
140
+ """Build complete result metadata."""
141
+ drop_frame = None
142
+ if drop_start_frame is None and metrics.drop_start_frame is not None:
143
+ drop_frame = metrics.drop_start_frame
144
+ elif drop_start_frame is not None:
145
+ drop_frame = drop_start_frame
146
+
147
+ algorithm_config = AlgorithmConfig(
148
+ detection_method="forward_search",
149
+ tracking_method="mediapipe_pose",
150
+ model_complexity=1,
151
+ smoothing=SmoothingConfig(
152
+ window_size=params.smoothing_window,
153
+ polynomial_order=params.polyorder,
154
+ use_bilateral_filter=params.bilateral_filter,
155
+ use_outlier_rejection=params.outlier_rejection,
156
+ ),
157
+ detection=DetectionConfig(
158
+ velocity_threshold=params.velocity_threshold,
159
+ min_contact_frames=params.min_contact_frames,
160
+ visibility_threshold=params.visibility_threshold,
161
+ use_curvature_refinement=params.use_curvature,
162
+ ),
163
+ drop_detection=DropDetectionConfig(
164
+ auto_detect_drop_start=(drop_start_frame is None),
165
+ detected_drop_frame=drop_frame,
166
+ min_stationary_duration_s=0.5,
167
+ ),
168
+ )
169
+
170
+ video_info = VideoInfo(
171
+ source_path=video_path,
172
+ fps=video.fps,
173
+ width=video.width,
174
+ height=video.height,
175
+ duration_s=video.frame_count / video.fps,
176
+ frame_count=video.frame_count,
177
+ codec=video.codec,
178
+ )
179
+
180
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
181
+
182
+ processing_info = ProcessingInfo(
183
+ version=get_kinemotion_version(),
184
+ timestamp=create_timestamp(),
185
+ quality_preset=quality_preset.value,
186
+ processing_time_s=processing_time,
187
+ timing_breakdown=stage_times,
188
+ )
189
+
190
+ return ResultMetadata(
191
+ quality=quality_result,
192
+ video=video_info,
193
+ processing=processing_info,
194
+ algorithm=algorithm_config,
195
+ )
196
+
197
+
198
+ def _save_dropjump_json(
199
+ json_output: str,
200
+ metrics: DropJumpMetrics,
201
+ timer: Timer,
202
+ verbose: bool,
203
+ ) -> None:
204
+ """Save metrics to JSON file."""
205
+ with timer.measure("json_serialization"):
206
+ output_path = Path(json_output)
207
+ metrics_dict = metrics.to_dict()
208
+ json_str = json.dumps(metrics_dict, indent=2)
209
+ output_path.write_text(json_str)
210
+
211
+ if verbose:
212
+ print(f"Metrics written to: {json_output}")
213
+
214
+
215
+ def _print_dropjump_summary(
216
+ start_time: float,
217
+ timer: Timer,
218
+ ) -> None:
219
+ """Print verbose timing summary."""
220
+ total_time = time.time() - start_time
221
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
222
+
223
+ print("\n=== Timing Summary ===")
224
+ for stage, duration in stage_times.items():
225
+ percentage = (duration / total_time) * 100
226
+ dur_ms = duration * 1000
227
+ print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
228
+ total_ms = total_time * 1000
229
+ print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
230
+ print()
231
+ print("Analysis complete!")
232
+
233
+
234
+ def _setup_pose_tracker(
235
+ quality_preset: QualityPreset,
236
+ detection_confidence: float | None,
237
+ tracking_confidence: float | None,
238
+ pose_tracker: "PoseTracker | None",
239
+ timer: Timer,
240
+ ) -> tuple["PoseTracker", bool]:
241
+ """Set up pose tracker and determine if it should be closed."""
242
+ detection_conf, tracking_conf = determine_confidence_levels(
243
+ quality_preset, detection_confidence, tracking_confidence
244
+ )
245
+
246
+ tracker = pose_tracker
247
+ should_close_tracker = False
248
+
249
+ if tracker is None:
250
+ tracker = PoseTracker(
251
+ min_detection_confidence=detection_conf,
252
+ min_tracking_confidence=tracking_conf,
253
+ timer=timer,
254
+ )
255
+ should_close_tracker = True
256
+
257
+ return tracker, should_close_tracker
258
+
259
+
260
+ def _process_frames_and_landmarks(
261
+ video: "VideoProcessor",
262
+ tracker: "PoseTracker",
263
+ should_close_tracker: bool,
264
+ verbose: bool,
265
+ timer: Timer,
266
+ ) -> tuple[list, list, list[int]]:
267
+ """Process all video frames and extract landmarks."""
268
+ if verbose:
269
+ print("Processing all frames with MediaPipe pose tracking...")
270
+
271
+ frames, landmarks_sequence, frame_indices = process_all_frames(
272
+ video, tracker, verbose, timer, close_tracker=should_close_tracker
273
+ )
274
+
275
+ return frames, landmarks_sequence, frame_indices
276
+
277
+
278
+ def _tune_and_smooth(
279
+ landmarks_sequence: list,
280
+ video_fps: float,
281
+ frame_count: int,
282
+ quality_preset: QualityPreset,
283
+ overrides: AnalysisOverrides | None,
284
+ timer: Timer,
285
+ verbose: bool,
286
+ ) -> tuple[list, AnalysisParameters, VideoCharacteristics]:
287
+ """Tune parameters and apply smoothing to landmarks.
288
+
289
+ Args:
290
+ landmarks_sequence: Sequence of pose landmarks
291
+ video_fps: Video frame rate
292
+ frame_count: Total number of frames
293
+ quality_preset: Quality preset for analysis
294
+ overrides: Optional parameter overrides
295
+ timer: Performance timer
296
+ verbose: Verbose output flag
297
+
298
+ Returns:
299
+ Tuple of (smoothed_landmarks, params, characteristics)
300
+ """
301
+ with timer.measure("parameter_auto_tuning"):
302
+ characteristics = analyze_video_sample(landmarks_sequence, video_fps, frame_count)
303
+ params = auto_tune_parameters(characteristics, quality_preset)
304
+
305
+ # Apply overrides if provided
306
+ if overrides:
307
+ params = apply_expert_overrides(
308
+ params,
309
+ overrides.smoothing_window,
310
+ overrides.velocity_threshold,
311
+ overrides.min_contact_frames,
312
+ overrides.visibility_threshold,
313
+ )
314
+ else:
315
+ params = apply_expert_overrides(
316
+ params,
317
+ None,
318
+ None,
319
+ None,
320
+ None,
321
+ )
322
+
323
+ smoothed_landmarks = apply_smoothing(landmarks_sequence, params, verbose, timer)
324
+
325
+ return smoothed_landmarks, params, characteristics
326
+
327
+
328
+ def _extract_positions_and_detect_contact(
329
+ smoothed_landmarks: list,
330
+ params: AnalysisParameters,
331
+ timer: Timer,
332
+ verbose: bool,
333
+ ) -> tuple["NDArray", "NDArray", list]:
334
+ """Extract vertical positions and detect ground contact."""
335
+ if verbose:
336
+ print("Extracting foot positions...")
337
+ with timer.measure("vertical_position_extraction"):
338
+ vertical_positions, visibilities = extract_vertical_positions(smoothed_landmarks)
339
+
340
+ if verbose:
341
+ print("Detecting ground contact...")
342
+ with timer.measure("ground_contact_detection"):
343
+ contact_states = detect_ground_contact(
344
+ vertical_positions,
345
+ velocity_threshold=params.velocity_threshold,
346
+ min_contact_frames=params.min_contact_frames,
347
+ visibility_threshold=params.visibility_threshold,
348
+ visibilities=visibilities,
349
+ window_length=params.smoothing_window,
350
+ polyorder=params.polyorder,
351
+ timer=timer,
352
+ )
353
+
354
+ return vertical_positions, visibilities, contact_states
355
+
356
+
357
+ def _calculate_metrics_and_assess_quality(
358
+ contact_states: list,
359
+ vertical_positions: "NDArray",
360
+ visibilities: "NDArray",
361
+ video_fps: float,
362
+ drop_start_frame: int | None,
363
+ params: AnalysisParameters,
364
+ timer: Timer,
365
+ verbose: bool,
366
+ ) -> tuple[DropJumpMetrics, QualityAssessment]:
367
+ """Calculate metrics and assess quality."""
368
+ if verbose:
369
+ print("Calculating metrics...")
370
+ with timer.measure("metrics_calculation"):
371
+ metrics = calculate_drop_jump_metrics(
372
+ contact_states,
373
+ vertical_positions,
374
+ video_fps,
375
+ drop_start_frame=drop_start_frame,
376
+ velocity_threshold=params.velocity_threshold,
377
+ smoothing_window=params.smoothing_window,
378
+ polyorder=params.polyorder,
379
+ use_curvature=params.use_curvature,
380
+ timer=timer,
381
+ )
382
+
383
+ if verbose:
384
+ print("Assessing tracking quality...")
385
+ with timer.measure("quality_assessment"):
386
+ quality_result, _, _, _ = _assess_dropjump_quality(
387
+ vertical_positions, visibilities, contact_states, video_fps
388
+ )
389
+
390
+ return metrics, quality_result
391
+
392
+
393
+ def _print_quality_warnings(quality_result: QualityAssessment, verbose: bool) -> None:
394
+ """Print quality warnings if present."""
395
+ if verbose and quality_result.warnings:
396
+ print("\n⚠️ Quality Warnings:")
397
+ for warning in quality_result.warnings:
398
+ print(f" - {warning}")
399
+ print()
400
+
401
+
402
+ def _validate_metrics_and_print_results(
403
+ metrics: DropJumpMetrics,
404
+ timer: Timer,
405
+ verbose: bool,
406
+ ) -> None:
407
+ """Validate metrics and print validation results if verbose."""
408
+ with timer.measure("metrics_validation"):
409
+ validator = DropJumpMetricsValidator()
410
+ validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
411
+ metrics.validation_result = validation_result
412
+
413
+ if verbose and validation_result.issues:
414
+ print("\n⚠️ Validation Results:")
415
+ for issue in validation_result.issues:
416
+ print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
417
+
418
+
419
+ def _generate_debug_video(
420
+ output_video: str,
421
+ frames: list,
422
+ frame_indices: list[int],
423
+ video_fps: float,
424
+ smoothed_landmarks: list,
425
+ contact_states: list,
426
+ metrics: DropJumpMetrics,
427
+ timer: Timer | None,
428
+ verbose: bool,
429
+ ) -> None:
430
+ """Generate debug video with overlay."""
431
+ if verbose:
432
+ print(f"Generating debug video: {output_video}")
433
+
434
+ if not frames:
435
+ return
436
+
437
+ timer = timer or NULL_TIMER
438
+ debug_h, debug_w = frames[0].shape[:2]
439
+
440
+ if video_fps > 30:
441
+ debug_fps = video_fps / (video_fps / 30.0)
442
+ else:
443
+ debug_fps = video_fps
444
+
445
+ if len(frames) < len(smoothed_landmarks):
446
+ step = max(1, int(video_fps / 30.0))
447
+ debug_fps = video_fps / step
448
+
449
+ def _render_frames(renderer: DebugOverlayRenderer) -> None:
450
+ for frame, idx in zip(frames, frame_indices, strict=True):
451
+ annotated = renderer.render_frame(
452
+ frame,
453
+ smoothed_landmarks[idx],
454
+ contact_states[idx],
455
+ idx,
456
+ metrics,
457
+ use_com=False,
458
+ )
459
+ renderer.write_frame(annotated)
460
+
461
+ renderer_context = DebugOverlayRenderer(
462
+ output_video,
463
+ debug_w,
464
+ debug_h,
465
+ debug_w,
466
+ debug_h,
467
+ debug_fps,
468
+ timer=timer,
469
+ )
470
+
471
+ with timer.measure("debug_video_generation"):
472
+ with renderer_context as renderer:
473
+ _render_frames(renderer)
474
+
475
+ if verbose:
476
+ print(f"Debug video saved: {output_video}")
477
+
478
+
479
+ def process_dropjump_video(
480
+ video_path: str,
481
+ quality: str = "balanced",
482
+ output_video: str | None = None,
483
+ json_output: str | None = None,
484
+ drop_start_frame: int | None = None,
485
+ overrides: AnalysisOverrides | None = None,
486
+ detection_confidence: float | None = None,
487
+ tracking_confidence: float | None = None,
488
+ verbose: bool = False,
489
+ timer: Timer | None = None,
490
+ pose_tracker: "PoseTracker | None" = None,
491
+ ) -> DropJumpMetrics:
492
+ """
493
+ Process a single drop jump video and return metrics.
494
+
495
+ Jump height is calculated from flight time using kinematic formula (h = g*t²/8).
496
+
497
+ Args:
498
+ video_path: Path to the input video file
499
+ quality: Analysis quality preset ("fast", "balanced", or "accurate")
500
+ output_video: Optional path for debug video output
501
+ json_output: Optional path for JSON metrics output
502
+ drop_start_frame: Optional manual drop start frame
503
+ overrides: Optional AnalysisOverrides for fine-tuning parameters
504
+ detection_confidence: Optional override for pose detection confidence
505
+ tracking_confidence: Optional override for pose tracking confidence
506
+ verbose: Print processing details
507
+ timer: Optional Timer for measuring operations
508
+ pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
509
+
510
+ Returns:
511
+ DropJumpMetrics object containing analysis results
512
+
513
+ Raises:
514
+ ValueError: If video cannot be processed or parameters are invalid
515
+ FileNotFoundError: If video file does not exist
516
+ """
517
+ if not Path(video_path).exists():
518
+ raise FileNotFoundError(f"Video file not found: {video_path}")
519
+
520
+ from ..core.determinism import set_deterministic_mode
521
+
522
+ set_deterministic_mode(seed=42)
523
+
524
+ start_time = time.time()
525
+ timer = timer or PerformanceTimer()
526
+ quality_preset = parse_quality_preset(quality)
527
+
528
+ with timer.measure("video_initialization"):
529
+ with VideoProcessor(video_path, timer=timer) as video:
530
+ tracker, should_close_tracker = _setup_pose_tracker(
531
+ quality_preset,
532
+ detection_confidence,
533
+ tracking_confidence,
534
+ pose_tracker,
535
+ timer,
536
+ )
537
+
538
+ frames, landmarks_sequence, frame_indices = _process_frames_and_landmarks(
539
+ video, tracker, should_close_tracker, verbose, timer
540
+ )
541
+
542
+ smoothed_landmarks, params, characteristics = _tune_and_smooth(
543
+ landmarks_sequence,
544
+ video.fps,
545
+ video.frame_count,
546
+ quality_preset,
547
+ overrides,
548
+ timer,
549
+ verbose,
550
+ )
551
+
552
+ if verbose:
553
+ print_verbose_parameters(video, characteristics, quality_preset, params)
554
+
555
+ vertical_positions, visibilities, contact_states = (
556
+ _extract_positions_and_detect_contact(smoothed_landmarks, params, timer, verbose)
557
+ )
558
+
559
+ metrics, quality_result = _calculate_metrics_and_assess_quality(
560
+ contact_states,
561
+ vertical_positions,
562
+ visibilities,
563
+ video.fps,
564
+ drop_start_frame,
565
+ params,
566
+ timer,
567
+ verbose,
568
+ )
569
+
570
+ _print_quality_warnings(quality_result, verbose)
571
+
572
+ if output_video:
573
+ _generate_debug_video(
574
+ output_video,
575
+ frames,
576
+ frame_indices,
577
+ video.fps,
578
+ smoothed_landmarks,
579
+ contact_states,
580
+ metrics,
581
+ timer,
582
+ verbose,
583
+ )
584
+
585
+ _validate_metrics_and_print_results(metrics, timer, verbose)
586
+
587
+ processing_time = time.time() - start_time
588
+ result_metadata = _build_dropjump_metadata(
589
+ video_path,
590
+ video,
591
+ params,
592
+ quality_result,
593
+ drop_start_frame,
594
+ metrics,
595
+ processing_time,
596
+ quality_preset,
597
+ timer,
598
+ )
599
+ metrics.result_metadata = result_metadata
600
+
601
+ if json_output:
602
+ _save_dropjump_json(json_output, metrics, timer, verbose)
603
+
604
+ if verbose:
605
+ _print_dropjump_summary(start_time, timer)
606
+
607
+ return metrics
608
+
609
+
610
+ def process_dropjump_videos_bulk(
611
+ configs: list[DropJumpVideoConfig],
612
+ max_workers: int = 4,
613
+ progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
614
+ ) -> list[DropJumpVideoResult]:
615
+ """
616
+ Process multiple drop jump videos in parallel.
617
+ """
618
+
619
+ def error_factory(video_path: str, error_msg: str) -> DropJumpVideoResult:
620
+ return DropJumpVideoResult(video_path=video_path, success=False, error=error_msg)
621
+
622
+ return process_videos_bulk_generic(
623
+ configs,
624
+ _process_dropjump_video_wrapper,
625
+ error_factory,
626
+ max_workers,
627
+ progress_callback,
628
+ )
629
+
630
+
631
+ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
632
+ """Wrapper function for parallel processing."""
633
+ start_time = time.time()
634
+
635
+ try:
636
+ metrics = process_dropjump_video(
637
+ video_path=config.video_path,
638
+ quality=config.quality,
639
+ output_video=config.output_video,
640
+ json_output=config.json_output,
641
+ drop_start_frame=config.drop_start_frame,
642
+ overrides=config.overrides,
643
+ detection_confidence=config.detection_confidence,
644
+ tracking_confidence=config.tracking_confidence,
645
+ verbose=False,
646
+ )
647
+
648
+ processing_time = time.time() - start_time
649
+
650
+ return DropJumpVideoResult(
651
+ video_path=config.video_path,
652
+ success=True,
653
+ metrics=metrics,
654
+ processing_time=processing_time,
655
+ )
656
+
657
+ except Exception as e:
658
+ processing_time = time.time() - start_time
659
+
660
+ return DropJumpVideoResult(
661
+ video_path=config.video_path,
662
+ success=False,
663
+ error=str(e),
664
+ processing_time=processing_time,
665
+ )