kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (48) hide show
  1. kinemotion/__init__.py +31 -6
  2. kinemotion/api.py +39 -598
  3. kinemotion/cli.py +2 -0
  4. kinemotion/cmj/__init__.py +5 -0
  5. kinemotion/cmj/analysis.py +621 -0
  6. kinemotion/cmj/api.py +563 -0
  7. kinemotion/cmj/cli.py +324 -0
  8. kinemotion/cmj/debug_overlay.py +457 -0
  9. kinemotion/cmj/joint_angles.py +307 -0
  10. kinemotion/cmj/kinematics.py +360 -0
  11. kinemotion/cmj/metrics_validator.py +767 -0
  12. kinemotion/cmj/validation_bounds.py +341 -0
  13. kinemotion/core/__init__.py +28 -0
  14. kinemotion/core/auto_tuning.py +71 -37
  15. kinemotion/core/cli_utils.py +60 -0
  16. kinemotion/core/debug_overlay_utils.py +385 -0
  17. kinemotion/core/determinism.py +83 -0
  18. kinemotion/core/experimental.py +103 -0
  19. kinemotion/core/filtering.py +9 -6
  20. kinemotion/core/formatting.py +75 -0
  21. kinemotion/core/metadata.py +231 -0
  22. kinemotion/core/model_downloader.py +172 -0
  23. kinemotion/core/pipeline_utils.py +433 -0
  24. kinemotion/core/pose.py +298 -141
  25. kinemotion/core/pose_landmarks.py +67 -0
  26. kinemotion/core/quality.py +393 -0
  27. kinemotion/core/smoothing.py +250 -154
  28. kinemotion/core/timing.py +247 -0
  29. kinemotion/core/types.py +42 -0
  30. kinemotion/core/validation.py +201 -0
  31. kinemotion/core/video_io.py +135 -50
  32. kinemotion/dropjump/__init__.py +1 -1
  33. kinemotion/dropjump/analysis.py +367 -182
  34. kinemotion/dropjump/api.py +665 -0
  35. kinemotion/dropjump/cli.py +156 -466
  36. kinemotion/dropjump/debug_overlay.py +136 -206
  37. kinemotion/dropjump/kinematics.py +232 -255
  38. kinemotion/dropjump/metrics_validator.py +240 -0
  39. kinemotion/dropjump/validation_bounds.py +157 -0
  40. kinemotion/models/__init__.py +0 -0
  41. kinemotion/models/pose_landmarker_lite.task +0 -0
  42. kinemotion-0.67.0.dist-info/METADATA +726 -0
  43. kinemotion-0.67.0.dist-info/RECORD +47 -0
  44. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
  45. kinemotion-0.10.6.dist-info/METADATA +0 -561
  46. kinemotion-0.10.6.dist-info/RECORD +0 -20
  47. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
  48. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,36 +1,23 @@
1
1
  """Command-line interface for drop jump analysis."""
2
2
 
3
3
  import csv
4
- import glob
5
4
  import json
6
5
  import sys
7
6
  from dataclasses import dataclass
8
7
  from pathlib import Path
9
- from typing import Any
10
8
 
11
9
  import click
12
- import numpy as np
13
10
 
14
- from ..api import VideoConfig, VideoResult, process_videos_bulk
15
- from ..core.auto_tuning import (
16
- AnalysisParameters as AutoTunedParams,
11
+ from ..core.cli_utils import (
12
+ collect_video_files,
13
+ generate_batch_output_paths,
17
14
  )
18
- from ..core.auto_tuning import (
19
- QualityPreset,
20
- VideoCharacteristics,
21
- analyze_video_sample,
22
- auto_tune_parameters,
15
+ from .api import (
16
+ DropJumpVideoConfig,
17
+ DropJumpVideoResult,
18
+ process_dropjump_video,
19
+ process_dropjump_videos_bulk,
23
20
  )
24
- from ..core.pose import PoseTracker
25
- from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
26
- from ..core.video_io import VideoProcessor
27
- from .analysis import (
28
- ContactState,
29
- compute_average_foot_position,
30
- detect_ground_contact,
31
- )
32
- from .debug_overlay import DebugOverlayRenderer
33
- from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
34
21
 
35
22
 
36
23
  @dataclass
@@ -60,15 +47,6 @@ class AnalysisParameters:
60
47
  type=click.Path(),
61
48
  help="Path for JSON metrics output (default: stdout)",
62
49
  )
63
- @click.option(
64
- "--drop-height",
65
- type=float,
66
- required=True,
67
- help=(
68
- "Height of drop box/platform in meters (e.g., 0.40 for 40cm box) - "
69
- "REQUIRED for accurate calibration"
70
- ),
71
- )
72
50
  @click.option(
73
51
  "--quality",
74
52
  type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
@@ -158,11 +136,11 @@ class AnalysisParameters:
158
136
  default=None,
159
137
  help="[EXPERT] Override pose tracking confidence",
160
138
  )
161
- def dropjump_analyze(
139
+ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
140
+ # parameters for each option
162
141
  video_path: tuple[str, ...],
163
142
  output: str | None,
164
143
  json_output: str | None,
165
- drop_height: float,
166
144
  quality: str,
167
145
  verbose: bool,
168
146
  batch: bool,
@@ -179,10 +157,12 @@ def dropjump_analyze(
179
157
  tracking_confidence: float | None,
180
158
  ) -> None:
181
159
  """
182
- Analyze drop-jump video(s) to estimate ground contact time, flight time, and jump height.
160
+ Analyze drop-jump video(s) to estimate ground contact time, flight time,
161
+ and jump height.
183
162
 
184
- Uses intelligent auto-tuning to select optimal parameters based on video characteristics.
185
- Parameters are automatically adjusted for frame rate, tracking quality, and analysis preset.
163
+ Uses intelligent auto-tuning to select optimal parameters based on video
164
+ characteristics. Parameters are automatically adjusted for frame rate,
165
+ tracking quality, and analysis preset.
186
166
 
187
167
  VIDEO_PATH: Path(s) to video file(s). Supports glob patterns in batch mode
188
168
  (e.g., "videos/*.mp4").
@@ -191,28 +171,19 @@ def dropjump_analyze(
191
171
 
192
172
  \b
193
173
  # Single video
194
- kinemotion dropjump-analyze video.mp4 --drop-height 0.40
174
+ kinemotion dropjump-analyze video.mp4
195
175
 
196
176
  \b
197
177
  # Batch mode with glob pattern
198
- kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 --workers 4
178
+ kinemotion dropjump-analyze videos/*.mp4 --batch --workers 4
199
179
 
200
180
  \b
201
181
  # Batch with output directories
202
- kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 \\
182
+ kinemotion dropjump-analyze videos/*.mp4 --batch \
203
183
  --json-output-dir results/ --csv-summary summary.csv
204
184
  """
205
185
  # Expand glob patterns and collect all video files
206
- video_files: list[str] = []
207
- for pattern in video_path:
208
- expanded = glob.glob(pattern)
209
- if expanded:
210
- video_files.extend(expanded)
211
- elif Path(pattern).exists():
212
- # Direct path (not a glob pattern)
213
- video_files.append(pattern)
214
- else:
215
- click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
186
+ video_files = collect_video_files(video_path)
216
187
 
217
188
  if not video_files:
218
189
  click.echo("Error: No video files found", err=True)
@@ -222,7 +193,7 @@ def dropjump_analyze(
222
193
  use_batch = batch or len(video_files) > 1
223
194
 
224
195
  # Group expert parameters
225
- params = AnalysisParameters(
196
+ expert_params = AnalysisParameters(
226
197
  drop_start_frame=drop_start_frame,
227
198
  smoothing_window=smoothing_window,
228
199
  velocity_threshold=velocity_threshold,
@@ -235,13 +206,12 @@ def dropjump_analyze(
235
206
  if use_batch:
236
207
  _process_batch(
237
208
  video_files,
238
- drop_height,
239
209
  quality,
240
210
  workers,
241
211
  output_dir,
242
212
  json_output_dir,
243
213
  csv_summary,
244
- params,
214
+ expert_params,
245
215
  )
246
216
  else:
247
217
  # Single video mode (original behavior)
@@ -249,377 +219,74 @@ def dropjump_analyze(
249
219
  video_files[0],
250
220
  output,
251
221
  json_output,
252
- drop_height,
253
222
  quality,
254
223
  verbose,
255
- params,
224
+ expert_params,
256
225
  )
257
226
 
258
227
 
259
- def _determine_initial_confidence(
260
- quality_preset: QualityPreset,
261
- expert_params: AnalysisParameters,
262
- ) -> tuple[float, float]:
263
- """Determine initial detection and tracking confidence levels.
264
-
265
- Args:
266
- quality_preset: Quality preset enum
267
- expert_params: Expert parameter overrides
268
-
269
- Returns:
270
- Tuple of (detection_confidence, tracking_confidence)
271
- """
272
- initial_detection_conf = 0.5
273
- initial_tracking_conf = 0.5
274
-
275
- if quality_preset == QualityPreset.FAST:
276
- initial_detection_conf = 0.3
277
- initial_tracking_conf = 0.3
278
- elif quality_preset == QualityPreset.ACCURATE:
279
- initial_detection_conf = 0.6
280
- initial_tracking_conf = 0.6
281
-
282
- # Override with expert values if provided
283
- if expert_params.detection_confidence is not None:
284
- initial_detection_conf = expert_params.detection_confidence
285
- if expert_params.tracking_confidence is not None:
286
- initial_tracking_conf = expert_params.tracking_confidence
287
-
288
- return initial_detection_conf, initial_tracking_conf
289
-
290
-
291
- def _track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
292
- """Track pose landmarks in all video frames.
293
-
294
- Args:
295
- video: Video processor
296
- tracker: Pose tracker
297
-
298
- Returns:
299
- Tuple of (frames, landmarks_sequence)
300
- """
301
- click.echo("Tracking pose landmarks...", err=True)
302
- landmarks_sequence = []
303
- frames = []
304
-
305
- bar: Any
306
- with click.progressbar(length=video.frame_count, label="Processing frames") as bar:
307
- while True:
308
- frame = video.read_frame()
309
- if frame is None:
310
- break
311
-
312
- frames.append(frame)
313
- landmarks = tracker.process_frame(frame)
314
- landmarks_sequence.append(landmarks)
315
-
316
- bar.update(1)
317
-
318
- tracker.close()
319
- return frames, landmarks_sequence
320
-
321
-
322
- def _apply_expert_param_overrides(
323
- params: AutoTunedParams, expert_params: AnalysisParameters
324
- ) -> AutoTunedParams:
325
- """Apply expert parameter overrides to auto-tuned parameters.
326
-
327
- Args:
328
- params: Auto-tuned parameters
329
- expert_params: Expert overrides
330
-
331
- Returns:
332
- Modified params object (mutated in place)
333
- """
334
- if expert_params.smoothing_window is not None:
335
- params.smoothing_window = expert_params.smoothing_window
336
- if expert_params.velocity_threshold is not None:
337
- params.velocity_threshold = expert_params.velocity_threshold
338
- if expert_params.min_contact_frames is not None:
339
- params.min_contact_frames = expert_params.min_contact_frames
340
- if expert_params.visibility_threshold is not None:
341
- params.visibility_threshold = expert_params.visibility_threshold
342
- return params
343
-
344
-
345
- def _print_auto_tuned_params(
346
- video: VideoProcessor,
347
- characteristics: VideoCharacteristics,
348
- quality_preset: QualityPreset,
349
- params: AutoTunedParams,
350
- ) -> None:
351
- """Print auto-tuned parameters in verbose mode.
352
-
353
- Args:
354
- video: Video processor
355
- characteristics: Video characteristics
356
- quality_preset: Quality preset
357
- params: Auto-tuned parameters
358
- """
359
- click.echo("\n" + "=" * 60, err=True)
360
- click.echo("AUTO-TUNED PARAMETERS", err=True)
361
- click.echo("=" * 60, err=True)
362
- click.echo(f"Video FPS: {video.fps:.2f}", err=True)
363
- click.echo(
364
- f"Tracking quality: {characteristics.tracking_quality} "
365
- f"(avg visibility: {characteristics.avg_visibility:.2f})",
366
- err=True,
367
- )
368
- click.echo(f"Quality preset: {quality_preset.value}", err=True)
369
- click.echo("\nSelected parameters:", err=True)
370
- click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
371
- click.echo(f" polyorder: {params.polyorder}", err=True)
372
- click.echo(f" velocity_threshold: {params.velocity_threshold:.4f}", err=True)
373
- click.echo(f" min_contact_frames: {params.min_contact_frames}", err=True)
374
- click.echo(f" visibility_threshold: {params.visibility_threshold}", err=True)
375
- click.echo(f" detection_confidence: {params.detection_confidence}", err=True)
376
- click.echo(f" tracking_confidence: {params.tracking_confidence}", err=True)
377
- click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
378
- click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
379
- click.echo(f" use_curvature: {params.use_curvature}", err=True)
380
- click.echo("=" * 60 + "\n", err=True)
381
-
382
-
383
- def _smooth_landmark_sequence(
384
- landmarks_sequence: list, params: AutoTunedParams
385
- ) -> list:
386
- """Apply smoothing to landmark sequence.
387
-
388
- Args:
389
- landmarks_sequence: Raw landmark sequence
390
- params: Auto-tuned parameters
391
-
392
- Returns:
393
- Smoothed landmarks
394
- """
395
- if params.outlier_rejection or params.bilateral_filter:
396
- if params.outlier_rejection:
397
- click.echo("Smoothing landmarks with outlier rejection...", err=True)
398
- if params.bilateral_filter:
399
- click.echo(
400
- "Using bilateral temporal filter for edge-preserving smoothing...",
401
- err=True,
402
- )
403
- return smooth_landmarks_advanced(
404
- landmarks_sequence,
405
- window_length=params.smoothing_window,
406
- polyorder=params.polyorder,
407
- use_outlier_rejection=params.outlier_rejection,
408
- use_bilateral=params.bilateral_filter,
409
- )
410
- else:
411
- click.echo("Smoothing landmarks...", err=True)
412
- return smooth_landmarks(
413
- landmarks_sequence,
414
- window_length=params.smoothing_window,
415
- polyorder=params.polyorder,
416
- )
417
-
418
-
419
- def _extract_positions_and_visibilities(
420
- smoothed_landmarks: list,
421
- ) -> tuple[np.ndarray, np.ndarray]:
422
- """Extract vertical positions and visibilities from landmarks.
423
-
424
- Args:
425
- smoothed_landmarks: Smoothed landmark sequence
426
-
427
- Returns:
428
- Tuple of (vertical_positions, visibilities)
429
- """
430
- click.echo("Extracting foot positions...", err=True)
431
-
432
- position_list: list[float] = []
433
- visibilities_list: list[float] = []
434
-
435
- for frame_landmarks in smoothed_landmarks:
436
- if frame_landmarks:
437
- _, foot_y = compute_average_foot_position(frame_landmarks)
438
- position_list.append(foot_y)
439
-
440
- # Average visibility of foot landmarks
441
- foot_vis = []
442
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
443
- if key in frame_landmarks:
444
- foot_vis.append(frame_landmarks[key][2])
445
- visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
446
- else:
447
- position_list.append(position_list[-1] if position_list else 0.5)
448
- visibilities_list.append(0.0)
449
-
450
- return np.array(position_list), np.array(visibilities_list)
451
-
452
-
453
- def _create_debug_video(
454
- output: str,
455
- video: VideoProcessor,
456
- frames: list,
457
- smoothed_landmarks: list,
458
- contact_states: list[ContactState],
459
- metrics: DropJumpMetrics,
460
- ) -> None:
461
- """Generate debug video with overlays.
462
-
463
- Args:
464
- output: Output video path
465
- video: Video processor
466
- frames: Video frames
467
- smoothed_landmarks: Smoothed landmarks
468
- contact_states: Contact states
469
- metrics: Calculated metrics
470
- """
471
- click.echo(f"Generating debug video: {output}", err=True)
472
- if video.display_width != video.width or video.display_height != video.height:
473
- click.echo(f"Source video encoded: {video.width}x{video.height}", err=True)
474
- click.echo(
475
- f"Output dimensions: {video.display_width}x{video.display_height} "
476
- f"(respecting display aspect ratio)",
477
- err=True,
478
- )
479
- else:
480
- click.echo(
481
- f"Output dimensions: {video.width}x{video.height} "
482
- f"(matching source video aspect ratio)",
483
- err=True,
484
- )
485
-
486
- with DebugOverlayRenderer(
487
- output,
488
- video.width,
489
- video.height,
490
- video.display_width,
491
- video.display_height,
492
- video.fps,
493
- ) as renderer:
494
- render_bar: Any
495
- with click.progressbar(
496
- length=len(frames), label="Rendering frames"
497
- ) as render_bar:
498
- for i, frame in enumerate(frames):
499
- annotated = renderer.render_frame(
500
- frame,
501
- smoothed_landmarks[i],
502
- contact_states[i],
503
- i,
504
- metrics,
505
- use_com=False,
506
- )
507
- renderer.write_frame(annotated)
508
- render_bar.update(1)
509
-
510
- click.echo(f"Debug video saved: {output}", err=True)
511
-
512
-
513
228
  def _process_single(
514
229
  video_path: str,
515
230
  output: str | None,
516
231
  json_output: str | None,
517
- drop_height: float,
518
232
  quality: str,
519
233
  verbose: bool,
520
234
  expert_params: AnalysisParameters,
521
235
  ) -> None:
522
- """Process a single video (original CLI behavior)."""
236
+ """Process a single video by calling the API."""
523
237
  click.echo(f"Analyzing video: {video_path}", err=True)
524
238
 
525
- quality_preset = QualityPreset(quality.lower())
526
-
527
239
  try:
528
- with VideoProcessor(video_path) as video:
529
- click.echo(
530
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
531
- f"{video.frame_count} frames",
532
- err=True,
533
- )
534
-
535
- # Determine confidence levels
536
- detection_conf, tracking_conf = _determine_initial_confidence(
537
- quality_preset, expert_params
538
- )
539
-
540
- # Track all frames
541
- tracker = PoseTracker(
542
- min_detection_confidence=detection_conf,
543
- min_tracking_confidence=tracking_conf,
544
- )
545
- frames, landmarks_sequence = _track_all_frames(video, tracker)
546
-
547
- if not landmarks_sequence:
548
- click.echo("Error: No frames processed", err=True)
549
- sys.exit(1)
550
-
551
- # Auto-tune parameters
552
- characteristics = analyze_video_sample(
553
- landmarks_sequence, video.fps, video.frame_count
554
- )
555
- params = auto_tune_parameters(characteristics, quality_preset)
556
- params = _apply_expert_param_overrides(params, expert_params)
557
-
558
- # Show parameters if verbose
559
- if verbose:
560
- _print_auto_tuned_params(video, characteristics, quality_preset, params)
561
-
562
- # Apply smoothing
563
- smoothed_landmarks = _smooth_landmark_sequence(landmarks_sequence, params)
240
+ # Create AnalysisOverrides if any expert parameters are set
241
+ from .api import AnalysisOverrides
564
242
 
565
- # Extract positions
566
- vertical_positions, visibilities = _extract_positions_and_visibilities(
567
- smoothed_landmarks
568
- )
569
-
570
- # Detect ground contact
571
- contact_states = detect_ground_contact(
572
- vertical_positions,
573
- velocity_threshold=params.velocity_threshold,
574
- min_contact_frames=params.min_contact_frames,
575
- visibility_threshold=params.visibility_threshold,
576
- visibilities=visibilities,
577
- window_length=params.smoothing_window,
578
- polyorder=params.polyorder,
579
- )
580
-
581
- # Calculate metrics
582
- click.echo("Calculating metrics...", err=True)
583
- click.echo(
584
- f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
585
- err=True,
586
- )
587
- metrics = calculate_drop_jump_metrics(
588
- contact_states,
589
- vertical_positions,
590
- video.fps,
591
- drop_height_m=drop_height,
592
- drop_start_frame=expert_params.drop_start_frame,
593
- velocity_threshold=params.velocity_threshold,
594
- smoothing_window=params.smoothing_window,
595
- polyorder=params.polyorder,
596
- use_curvature=params.use_curvature,
597
- kinematic_correction_factor=1.0,
243
+ overrides = None
244
+ if any(
245
+ [
246
+ expert_params.smoothing_window is not None,
247
+ expert_params.velocity_threshold is not None,
248
+ expert_params.min_contact_frames is not None,
249
+ expert_params.visibility_threshold is not None,
250
+ ]
251
+ ):
252
+ overrides = AnalysisOverrides(
253
+ smoothing_window=expert_params.smoothing_window,
254
+ velocity_threshold=expert_params.velocity_threshold,
255
+ min_contact_frames=expert_params.min_contact_frames,
256
+ visibility_threshold=expert_params.visibility_threshold,
598
257
  )
599
258
 
600
- # Output metrics
601
- metrics_json = json.dumps(metrics.to_dict(), indent=2)
602
- if json_output:
603
- Path(json_output).write_text(metrics_json)
604
- click.echo(f"Metrics written to: {json_output}", err=True)
605
- else:
606
- click.echo(metrics_json)
259
+ # Call the API function (handles all processing logic)
260
+ metrics = process_dropjump_video(
261
+ video_path=video_path,
262
+ quality=quality,
263
+ output_video=output,
264
+ json_output=json_output,
265
+ drop_start_frame=expert_params.drop_start_frame,
266
+ overrides=overrides,
267
+ detection_confidence=expert_params.detection_confidence,
268
+ tracking_confidence=expert_params.tracking_confidence,
269
+ verbose=verbose,
270
+ )
607
271
 
608
- # Generate debug video if requested
609
- if output:
610
- _create_debug_video(
611
- output, video, frames, smoothed_landmarks, contact_states, metrics
612
- )
272
+ # Print formatted summary to stdout if no JSON output specified
273
+ if not json_output:
274
+ click.echo(json.dumps(metrics.to_dict(), indent=2))
613
275
 
614
- click.echo("Analysis complete!", err=True)
276
+ click.echo("Analysis complete!", err=True)
615
277
 
616
278
  except Exception as e:
617
279
  click.echo(f"Error: {str(e)}", err=True)
280
+ if verbose:
281
+ import traceback
282
+
283
+ traceback.print_exc()
618
284
  sys.exit(1)
619
285
 
620
286
 
621
287
  def _setup_batch_output_dirs(
622
- output_dir: str | None, json_output_dir: str | None
288
+ output_dir: str | None,
289
+ json_output_dir: str | None,
623
290
  ) -> None:
624
291
  """Create output directories for batch processing.
625
292
 
@@ -638,48 +305,55 @@ def _setup_batch_output_dirs(
638
305
 
639
306
  def _create_video_configs(
640
307
  video_files: list[str],
641
- drop_height: float,
642
308
  quality: str,
643
309
  output_dir: str | None,
644
310
  json_output_dir: str | None,
645
311
  expert_params: AnalysisParameters,
646
- ) -> list[VideoConfig]:
312
+ ) -> list[DropJumpVideoConfig]:
647
313
  """Build configuration objects for each video.
648
314
 
649
315
  Args:
650
316
  video_files: List of video file paths
651
- drop_height: Drop height in meters
652
317
  quality: Quality preset
653
318
  output_dir: Debug video output directory
654
319
  json_output_dir: JSON metrics output directory
655
320
  expert_params: Expert parameter overrides
656
321
 
657
322
  Returns:
658
- List of VideoConfig objects
323
+ List of DropJumpVideoConfig objects
659
324
  """
660
- configs: list[VideoConfig] = []
325
+ configs: list[DropJumpVideoConfig] = []
661
326
  for video_file in video_files:
662
- video_name = Path(video_file).stem
327
+ debug_video, json_file = generate_batch_output_paths(
328
+ video_file, output_dir, json_output_dir
329
+ )
663
330
 
664
- debug_video = None
665
- if output_dir:
666
- debug_video = str(Path(output_dir) / f"{video_name}_debug.mp4")
331
+ # Create AnalysisOverrides if any expert parameters are set
332
+ from .api import AnalysisOverrides
667
333
 
668
- json_file = None
669
- if json_output_dir:
670
- json_file = str(Path(json_output_dir) / f"{video_name}.json")
334
+ overrides = None
335
+ if any(
336
+ [
337
+ expert_params.smoothing_window is not None,
338
+ expert_params.velocity_threshold is not None,
339
+ expert_params.min_contact_frames is not None,
340
+ expert_params.visibility_threshold is not None,
341
+ ]
342
+ ):
343
+ overrides = AnalysisOverrides(
344
+ smoothing_window=expert_params.smoothing_window,
345
+ velocity_threshold=expert_params.velocity_threshold,
346
+ min_contact_frames=expert_params.min_contact_frames,
347
+ visibility_threshold=expert_params.visibility_threshold,
348
+ )
671
349
 
672
- config = VideoConfig(
350
+ config = DropJumpVideoConfig(
673
351
  video_path=video_file,
674
- drop_height=drop_height,
675
352
  quality=quality,
676
353
  output_video=debug_video,
677
354
  json_output=json_file,
678
355
  drop_start_frame=expert_params.drop_start_frame,
679
- smoothing_window=expert_params.smoothing_window,
680
- velocity_threshold=expert_params.velocity_threshold,
681
- min_contact_frames=expert_params.min_contact_frames,
682
- visibility_threshold=expert_params.visibility_threshold,
356
+ overrides=overrides,
683
357
  detection_confidence=expert_params.detection_confidence,
684
358
  tracking_confidence=expert_params.tracking_confidence,
685
359
  )
@@ -688,7 +362,7 @@ def _create_video_configs(
688
362
  return configs
689
363
 
690
364
 
691
- def _compute_batch_statistics(results: list[VideoResult]) -> None:
365
+ def _compute_batch_statistics(results: list[DropJumpVideoResult]) -> None:
692
366
  """Compute and display batch processing statistics.
693
367
 
694
368
  Args:
@@ -708,16 +382,10 @@ def _compute_batch_statistics(results: list[VideoResult]) -> None:
708
382
  if successful:
709
383
  # Calculate average metrics
710
384
  with_gct = [
711
- r
712
- for r in successful
713
- if r.metrics and r.metrics.ground_contact_time is not None
714
- ]
715
- with_flight = [
716
- r for r in successful if r.metrics and r.metrics.flight_time is not None
717
- ]
718
- with_jump = [
719
- r for r in successful if r.metrics and r.metrics.jump_height is not None
385
+ r for r in successful if r.metrics and r.metrics.ground_contact_time is not None
720
386
  ]
387
+ with_flight = [r for r in successful if r.metrics and r.metrics.flight_time is not None]
388
+ with_jump = [r for r in successful if r.metrics and r.metrics.jump_height is not None]
721
389
 
722
390
  if with_gct:
723
391
  avg_gct = sum(
@@ -747,8 +415,67 @@ def _compute_batch_statistics(results: list[VideoResult]) -> None:
747
415
  )
748
416
 
749
417
 
418
+ def _format_time_metric(value: float | None, multiplier: float = 1000.0) -> str:
419
+ """Format time metric for CSV output.
420
+
421
+ Args:
422
+ value: Time value in seconds
423
+ multiplier: Multiplier to convert to milliseconds (default: 1000.0)
424
+
425
+ Returns:
426
+ Formatted string or "N/A" if value is None
427
+ """
428
+ return f"{value * multiplier:.1f}" if value is not None else "N/A"
429
+
430
+
431
+ def _format_distance_metric(value: float | None) -> str:
432
+ """Format distance metric for CSV output.
433
+
434
+ Args:
435
+ value: Distance value in meters
436
+
437
+ Returns:
438
+ Formatted string or "N/A" if value is None
439
+ """
440
+ return f"{value:.3f}" if value is not None else "N/A"
441
+
442
+
443
+ def _create_csv_row_from_result(result: DropJumpVideoResult) -> list[str]:
444
+ """Create CSV row from video processing result.
445
+
446
+ Args:
447
+ result: Video processing result
448
+
449
+ Returns:
450
+ List of formatted values for CSV row
451
+ """
452
+ video_name = Path(result.video_path).name
453
+ processing_time = f"{result.processing_time:.2f}"
454
+
455
+ if result.success and result.metrics:
456
+ return [
457
+ video_name,
458
+ _format_time_metric(result.metrics.ground_contact_time),
459
+ _format_time_metric(result.metrics.flight_time),
460
+ _format_distance_metric(result.metrics.jump_height),
461
+ processing_time,
462
+ "Success",
463
+ ]
464
+ else:
465
+ return [
466
+ video_name,
467
+ "N/A",
468
+ "N/A",
469
+ "N/A",
470
+ processing_time,
471
+ f"Failed: {result.error}",
472
+ ]
473
+
474
+
750
475
  def _write_csv_summary(
751
- csv_summary: str | None, results: list[VideoResult], successful: list[VideoResult]
476
+ csv_summary: str | None,
477
+ results: list[DropJumpVideoResult],
478
+ successful: list[DropJumpVideoResult],
752
479
  ) -> None:
753
480
  """Write CSV summary of batch processing results.
754
481
 
@@ -780,47 +507,13 @@ def _write_csv_summary(
780
507
 
781
508
  # Data rows
782
509
  for result in results:
783
- if result.success and result.metrics:
784
- writer.writerow(
785
- [
786
- Path(result.video_path).name,
787
- (
788
- f"{result.metrics.ground_contact_time * 1000:.1f}"
789
- if result.metrics.ground_contact_time
790
- else "N/A"
791
- ),
792
- (
793
- f"{result.metrics.flight_time * 1000:.1f}"
794
- if result.metrics.flight_time
795
- else "N/A"
796
- ),
797
- (
798
- f"{result.metrics.jump_height:.3f}"
799
- if result.metrics.jump_height
800
- else "N/A"
801
- ),
802
- f"{result.processing_time:.2f}",
803
- "Success",
804
- ]
805
- )
806
- else:
807
- writer.writerow(
808
- [
809
- Path(result.video_path).name,
810
- "N/A",
811
- "N/A",
812
- "N/A",
813
- f"{result.processing_time:.2f}",
814
- f"Failed: {result.error}",
815
- ]
816
- )
510
+ writer.writerow(_create_csv_row_from_result(result))
817
511
 
818
512
  click.echo("CSV summary written successfully", err=True)
819
513
 
820
514
 
821
515
  def _process_batch(
822
516
  video_files: list[str],
823
- drop_height: float,
824
517
  quality: str,
825
518
  workers: int,
826
519
  output_dir: str | None,
@@ -829,9 +522,7 @@ def _process_batch(
829
522
  expert_params: AnalysisParameters,
830
523
  ) -> None:
831
524
  """Process multiple videos in batch mode using parallel processing."""
832
- click.echo(
833
- f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True
834
- )
525
+ click.echo(f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True)
835
526
  click.echo("=" * 70, err=True)
836
527
 
837
528
  # Setup output directories
@@ -839,20 +530,19 @@ def _process_batch(
839
530
 
840
531
  # Create video configurations
841
532
  configs = _create_video_configs(
842
- video_files, drop_height, quality, output_dir, json_output_dir, expert_params
533
+ video_files, quality, output_dir, json_output_dir, expert_params
843
534
  )
844
535
 
845
536
  # Progress callback
846
537
  completed = 0
847
538
 
848
- def show_progress(result: VideoResult) -> None:
539
+ def show_progress(result: DropJumpVideoResult) -> None:
849
540
  nonlocal completed
850
541
  completed += 1
851
542
  status = "✓" if result.success else "✗"
852
543
  video_name = Path(result.video_path).name
853
544
  click.echo(
854
- f"[{completed}/{len(configs)}] {status} {video_name} "
855
- f"({result.processing_time:.1f}s)",
545
+ f"[{completed}/{len(configs)}] {status} {video_name} ({result.processing_time:.1f}s)",
856
546
  err=True,
857
547
  )
858
548
  if not result.success:
@@ -860,7 +550,7 @@ def _process_batch(
860
550
 
861
551
  # Process all videos
862
552
  click.echo("\nProcessing videos...", err=True)
863
- results = process_videos_bulk(
553
+ results = process_dropjump_videos_bulk(
864
554
  configs, max_workers=workers, progress_callback=show_progress
865
555
  )
866
556