kinemotion 0.10.2__py3-none-any.whl → 0.10.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -4,6 +4,7 @@ import csv
4
4
  import glob
5
5
  import json
6
6
  import sys
7
+ from dataclasses import dataclass
7
8
  from pathlib import Path
8
9
  from typing import Any
9
10
 
@@ -11,8 +12,12 @@ import click
11
12
  import numpy as np
12
13
 
13
14
  from ..api import VideoConfig, VideoResult, process_videos_bulk
15
+ from ..core.auto_tuning import (
16
+ AnalysisParameters as AutoTunedParams,
17
+ )
14
18
  from ..core.auto_tuning import (
15
19
  QualityPreset,
20
+ VideoCharacteristics,
16
21
  analyze_video_sample,
17
22
  auto_tune_parameters,
18
23
  )
@@ -20,11 +25,25 @@ from ..core.pose import PoseTracker
20
25
  from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
21
26
  from ..core.video_io import VideoProcessor
22
27
  from .analysis import (
28
+ ContactState,
23
29
  compute_average_foot_position,
24
30
  detect_ground_contact,
25
31
  )
26
32
  from .debug_overlay import DebugOverlayRenderer
27
- from .kinematics import calculate_drop_jump_metrics
33
+ from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
34
+
35
+
36
+ @dataclass
37
+ class AnalysisParameters:
38
+ """Expert parameters for analysis customization."""
39
+
40
+ drop_start_frame: int | None = None
41
+ smoothing_window: int | None = None
42
+ velocity_threshold: float | None = None
43
+ min_contact_frames: int | None = None
44
+ visibility_threshold: float | None = None
45
+ detection_confidence: float | None = None
46
+ tracking_confidence: float | None = None
28
47
 
29
48
 
30
49
  @click.command(name="dropjump-analyze")
@@ -139,7 +158,7 @@ from .kinematics import calculate_drop_jump_metrics
139
158
  default=None,
140
159
  help="[EXPERT] Override pose tracking confidence",
141
160
  )
142
- def dropjump_analyze(
161
+ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for each option
143
162
  video_path: tuple[str, ...],
144
163
  output: str | None,
145
164
  json_output: str | None,
@@ -202,6 +221,17 @@ def dropjump_analyze(
202
221
  # Determine if batch mode should be used
203
222
  use_batch = batch or len(video_files) > 1
204
223
 
224
+ # Group expert parameters
225
+ expert_params = AnalysisParameters(
226
+ drop_start_frame=drop_start_frame,
227
+ smoothing_window=smoothing_window,
228
+ velocity_threshold=velocity_threshold,
229
+ min_contact_frames=min_contact_frames,
230
+ visibility_threshold=visibility_threshold,
231
+ detection_confidence=detection_confidence,
232
+ tracking_confidence=tracking_confidence,
233
+ )
234
+
205
235
  if use_batch:
206
236
  _process_batch(
207
237
  video_files,
@@ -211,13 +241,7 @@ def dropjump_analyze(
211
241
  output_dir,
212
242
  json_output_dir,
213
243
  csv_summary,
214
- drop_start_frame,
215
- smoothing_window,
216
- velocity_threshold,
217
- min_contact_frames,
218
- visibility_threshold,
219
- detection_confidence,
220
- tracking_confidence,
244
+ expert_params,
221
245
  )
222
246
  else:
223
247
  # Single video mode (original behavior)
@@ -228,14 +252,262 @@ def dropjump_analyze(
228
252
  drop_height,
229
253
  quality,
230
254
  verbose,
231
- drop_start_frame,
232
- smoothing_window,
233
- velocity_threshold,
234
- min_contact_frames,
235
- visibility_threshold,
236
- detection_confidence,
237
- tracking_confidence,
255
+ expert_params,
256
+ )
257
+
258
+
259
+ def _determine_initial_confidence(
260
+ quality_preset: QualityPreset,
261
+ expert_params: AnalysisParameters,
262
+ ) -> tuple[float, float]:
263
+ """Determine initial detection and tracking confidence levels.
264
+
265
+ Args:
266
+ quality_preset: Quality preset enum
267
+ expert_params: Expert parameter overrides
268
+
269
+ Returns:
270
+ Tuple of (detection_confidence, tracking_confidence)
271
+ """
272
+ initial_detection_conf = 0.5
273
+ initial_tracking_conf = 0.5
274
+
275
+ if quality_preset == QualityPreset.FAST:
276
+ initial_detection_conf = 0.3
277
+ initial_tracking_conf = 0.3
278
+ elif quality_preset == QualityPreset.ACCURATE:
279
+ initial_detection_conf = 0.6
280
+ initial_tracking_conf = 0.6
281
+
282
+ # Override with expert values if provided
283
+ if expert_params.detection_confidence is not None:
284
+ initial_detection_conf = expert_params.detection_confidence
285
+ if expert_params.tracking_confidence is not None:
286
+ initial_tracking_conf = expert_params.tracking_confidence
287
+
288
+ return initial_detection_conf, initial_tracking_conf
289
+
290
+
291
+ def _track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
292
+ """Track pose landmarks in all video frames.
293
+
294
+ Args:
295
+ video: Video processor
296
+ tracker: Pose tracker
297
+
298
+ Returns:
299
+ Tuple of (frames, landmarks_sequence)
300
+ """
301
+ click.echo("Tracking pose landmarks...", err=True)
302
+ landmarks_sequence = []
303
+ frames = []
304
+
305
+ bar: Any
306
+ with click.progressbar(length=video.frame_count, label="Processing frames") as bar:
307
+ while True:
308
+ frame = video.read_frame()
309
+ if frame is None:
310
+ break
311
+
312
+ frames.append(frame)
313
+ landmarks = tracker.process_frame(frame)
314
+ landmarks_sequence.append(landmarks)
315
+
316
+ bar.update(1)
317
+
318
+ tracker.close()
319
+ return frames, landmarks_sequence
320
+
321
+
322
+ def _apply_expert_param_overrides(
323
+ params: AutoTunedParams, expert_params: AnalysisParameters
324
+ ) -> AutoTunedParams:
325
+ """Apply expert parameter overrides to auto-tuned parameters.
326
+
327
+ Args:
328
+ params: Auto-tuned parameters
329
+ expert_params: Expert overrides
330
+
331
+ Returns:
332
+ Modified params object (mutated in place)
333
+ """
334
+ if expert_params.smoothing_window is not None:
335
+ params.smoothing_window = expert_params.smoothing_window
336
+ if expert_params.velocity_threshold is not None:
337
+ params.velocity_threshold = expert_params.velocity_threshold
338
+ if expert_params.min_contact_frames is not None:
339
+ params.min_contact_frames = expert_params.min_contact_frames
340
+ if expert_params.visibility_threshold is not None:
341
+ params.visibility_threshold = expert_params.visibility_threshold
342
+ return params
343
+
344
+
345
+ def _print_auto_tuned_params(
346
+ video: VideoProcessor,
347
+ characteristics: VideoCharacteristics,
348
+ quality_preset: QualityPreset,
349
+ params: AutoTunedParams,
350
+ ) -> None:
351
+ """Print auto-tuned parameters in verbose mode.
352
+
353
+ Args:
354
+ video: Video processor
355
+ characteristics: Video characteristics
356
+ quality_preset: Quality preset
357
+ params: Auto-tuned parameters
358
+ """
359
+ click.echo("\n" + "=" * 60, err=True)
360
+ click.echo("AUTO-TUNED PARAMETERS", err=True)
361
+ click.echo("=" * 60, err=True)
362
+ click.echo(f"Video FPS: {video.fps:.2f}", err=True)
363
+ click.echo(
364
+ f"Tracking quality: {characteristics.tracking_quality} "
365
+ f"(avg visibility: {characteristics.avg_visibility:.2f})",
366
+ err=True,
367
+ )
368
+ click.echo(f"Quality preset: {quality_preset.value}", err=True)
369
+ click.echo("\nSelected parameters:", err=True)
370
+ click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
371
+ click.echo(f" polyorder: {params.polyorder}", err=True)
372
+ click.echo(f" velocity_threshold: {params.velocity_threshold:.4f}", err=True)
373
+ click.echo(f" min_contact_frames: {params.min_contact_frames}", err=True)
374
+ click.echo(f" visibility_threshold: {params.visibility_threshold}", err=True)
375
+ click.echo(f" detection_confidence: {params.detection_confidence}", err=True)
376
+ click.echo(f" tracking_confidence: {params.tracking_confidence}", err=True)
377
+ click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
378
+ click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
379
+ click.echo(f" use_curvature: {params.use_curvature}", err=True)
380
+ click.echo("=" * 60 + "\n", err=True)
381
+
382
+
383
+ def _smooth_landmark_sequence(
384
+ landmarks_sequence: list, params: AutoTunedParams
385
+ ) -> list:
386
+ """Apply smoothing to landmark sequence.
387
+
388
+ Args:
389
+ landmarks_sequence: Raw landmark sequence
390
+ params: Auto-tuned parameters
391
+
392
+ Returns:
393
+ Smoothed landmarks
394
+ """
395
+ if params.outlier_rejection or params.bilateral_filter:
396
+ if params.outlier_rejection:
397
+ click.echo("Smoothing landmarks with outlier rejection...", err=True)
398
+ if params.bilateral_filter:
399
+ click.echo(
400
+ "Using bilateral temporal filter for edge-preserving smoothing...",
401
+ err=True,
402
+ )
403
+ return smooth_landmarks_advanced(
404
+ landmarks_sequence,
405
+ window_length=params.smoothing_window,
406
+ polyorder=params.polyorder,
407
+ use_outlier_rejection=params.outlier_rejection,
408
+ use_bilateral=params.bilateral_filter,
409
+ )
410
+ else:
411
+ click.echo("Smoothing landmarks...", err=True)
412
+ return smooth_landmarks(
413
+ landmarks_sequence,
414
+ window_length=params.smoothing_window,
415
+ polyorder=params.polyorder,
416
+ )
417
+
418
+
419
+ def _extract_positions_and_visibilities(
420
+ smoothed_landmarks: list,
421
+ ) -> tuple[np.ndarray, np.ndarray]:
422
+ """Extract vertical positions and visibilities from landmarks.
423
+
424
+ Args:
425
+ smoothed_landmarks: Smoothed landmark sequence
426
+
427
+ Returns:
428
+ Tuple of (vertical_positions, visibilities)
429
+ """
430
+ click.echo("Extracting foot positions...", err=True)
431
+
432
+ position_list: list[float] = []
433
+ visibilities_list: list[float] = []
434
+
435
+ for frame_landmarks in smoothed_landmarks:
436
+ if frame_landmarks:
437
+ _, foot_y = compute_average_foot_position(frame_landmarks)
438
+ position_list.append(foot_y)
439
+
440
+ # Average visibility of foot landmarks
441
+ foot_vis = []
442
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
443
+ if key in frame_landmarks:
444
+ foot_vis.append(frame_landmarks[key][2])
445
+ visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
446
+ else:
447
+ position_list.append(position_list[-1] if position_list else 0.5)
448
+ visibilities_list.append(0.0)
449
+
450
+ return np.array(position_list), np.array(visibilities_list)
451
+
452
+
453
+ def _create_debug_video(
454
+ output: str,
455
+ video: VideoProcessor,
456
+ frames: list,
457
+ smoothed_landmarks: list,
458
+ contact_states: list[ContactState],
459
+ metrics: DropJumpMetrics,
460
+ ) -> None:
461
+ """Generate debug video with overlays.
462
+
463
+ Args:
464
+ output: Output video path
465
+ video: Video processor
466
+ frames: Video frames
467
+ smoothed_landmarks: Smoothed landmarks
468
+ contact_states: Contact states
469
+ metrics: Calculated metrics
470
+ """
471
+ click.echo(f"Generating debug video: {output}", err=True)
472
+ if video.display_width != video.width or video.display_height != video.height:
473
+ click.echo(f"Source video encoded: {video.width}x{video.height}", err=True)
474
+ click.echo(
475
+ f"Output dimensions: {video.display_width}x{video.display_height} "
476
+ f"(respecting display aspect ratio)",
477
+ err=True,
238
478
  )
479
+ else:
480
+ click.echo(
481
+ f"Output dimensions: {video.width}x{video.height} "
482
+ f"(matching source video aspect ratio)",
483
+ err=True,
484
+ )
485
+
486
+ with DebugOverlayRenderer(
487
+ output,
488
+ video.width,
489
+ video.height,
490
+ video.display_width,
491
+ video.display_height,
492
+ video.fps,
493
+ ) as renderer:
494
+ render_bar: Any
495
+ with click.progressbar(
496
+ length=len(frames), label="Rendering frames"
497
+ ) as render_bar:
498
+ for i, frame in enumerate(frames):
499
+ annotated = renderer.render_frame(
500
+ frame,
501
+ smoothed_landmarks[i],
502
+ contact_states[i],
503
+ i,
504
+ metrics,
505
+ use_com=False,
506
+ )
507
+ renderer.write_frame(annotated)
508
+ render_bar.update(1)
509
+
510
+ click.echo(f"Debug video saved: {output}", err=True)
239
511
 
240
512
 
241
513
  def _process_single(
@@ -245,22 +517,14 @@ def _process_single(
245
517
  drop_height: float,
246
518
  quality: str,
247
519
  verbose: bool,
248
- drop_start_frame: int | None,
249
- smoothing_window: int | None,
250
- velocity_threshold: float | None,
251
- min_contact_frames: int | None,
252
- visibility_threshold: float | None,
253
- detection_confidence: float | None,
254
- tracking_confidence: float | None,
520
+ expert_params: AnalysisParameters,
255
521
  ) -> None:
256
522
  """Process a single video (original CLI behavior)."""
257
523
  click.echo(f"Analyzing video: {video_path}", err=True)
258
524
 
259
- # Convert quality string to enum
260
525
  quality_preset = QualityPreset(quality.lower())
261
526
 
262
527
  try:
263
- # Initialize video processor
264
528
  with VideoProcessor(video_path) as video:
265
529
  click.echo(
266
530
  f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
@@ -268,180 +532,42 @@ def _process_single(
268
532
  err=True,
269
533
  )
270
534
 
271
- # ================================================================
272
- # STEP 1: Auto-tune parameters based on video characteristics
273
- # ================================================================
274
-
275
- # Analyze video characteristics from a sample to determine optimal parameters
276
- # We'll use detection/tracking confidence from quality preset for initial tracking
277
- initial_detection_conf = 0.5
278
- initial_tracking_conf = 0.5
279
-
280
- if quality_preset == QualityPreset.FAST:
281
- initial_detection_conf = 0.3
282
- initial_tracking_conf = 0.3
283
- elif quality_preset == QualityPreset.ACCURATE:
284
- initial_detection_conf = 0.6
285
- initial_tracking_conf = 0.6
286
-
287
- # Override with expert values if provided
288
- if detection_confidence is not None:
289
- initial_detection_conf = detection_confidence
290
- if tracking_confidence is not None:
291
- initial_tracking_conf = tracking_confidence
292
-
293
- # Initialize pose tracker
294
- tracker = PoseTracker(
295
- min_detection_confidence=initial_detection_conf,
296
- min_tracking_confidence=initial_tracking_conf,
535
+ # Determine confidence levels
536
+ detection_conf, tracking_conf = _determine_initial_confidence(
537
+ quality_preset, expert_params
297
538
  )
298
539
 
299
- # Process all frames
300
- click.echo("Tracking pose landmarks...", err=True)
301
- landmarks_sequence = []
302
- frames = []
303
-
304
- bar: Any
305
- with click.progressbar(
306
- length=video.frame_count, label="Processing frames"
307
- ) as bar:
308
- while True:
309
- frame = video.read_frame()
310
- if frame is None:
311
- break
312
-
313
- frames.append(frame)
314
- landmarks = tracker.process_frame(frame)
315
- landmarks_sequence.append(landmarks)
316
-
317
- bar.update(1)
318
-
319
- tracker.close()
540
+ # Track all frames
541
+ tracker = PoseTracker(
542
+ min_detection_confidence=detection_conf,
543
+ min_tracking_confidence=tracking_conf,
544
+ )
545
+ frames, landmarks_sequence = _track_all_frames(video, tracker)
320
546
 
321
547
  if not landmarks_sequence:
322
548
  click.echo("Error: No frames processed", err=True)
323
549
  sys.exit(1)
324
550
 
325
- # ================================================================
326
- # STEP 2: Analyze video characteristics and auto-tune parameters
327
- # ================================================================
328
-
551
+ # Auto-tune parameters
329
552
  characteristics = analyze_video_sample(
330
553
  landmarks_sequence, video.fps, video.frame_count
331
554
  )
332
-
333
- # Auto-tune parameters based on video characteristics
334
555
  params = auto_tune_parameters(characteristics, quality_preset)
556
+ params = _apply_expert_param_overrides(params, expert_params)
335
557
 
336
- # Apply expert overrides if provided
337
- if smoothing_window is not None:
338
- params.smoothing_window = smoothing_window
339
- if velocity_threshold is not None:
340
- params.velocity_threshold = velocity_threshold
341
- if min_contact_frames is not None:
342
- params.min_contact_frames = min_contact_frames
343
- if visibility_threshold is not None:
344
- params.visibility_threshold = visibility_threshold
345
-
346
- # Show selected parameters if verbose
558
+ # Show parameters if verbose
347
559
  if verbose:
348
- click.echo("\n" + "=" * 60, err=True)
349
- click.echo("AUTO-TUNED PARAMETERS", err=True)
350
- click.echo("=" * 60, err=True)
351
- click.echo(f"Video FPS: {video.fps:.2f}", err=True)
352
- click.echo(
353
- f"Tracking quality: {characteristics.tracking_quality} "
354
- f"(avg visibility: {characteristics.avg_visibility:.2f})",
355
- err=True,
356
- )
357
- click.echo(f"Quality preset: {quality_preset.value}", err=True)
358
- click.echo("\nSelected parameters:", err=True)
359
- click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
360
- click.echo(f" polyorder: {params.polyorder}", err=True)
361
- click.echo(
362
- f" velocity_threshold: {params.velocity_threshold:.4f}", err=True
363
- )
364
- click.echo(
365
- f" min_contact_frames: {params.min_contact_frames}", err=True
366
- )
367
- click.echo(
368
- f" visibility_threshold: {params.visibility_threshold}", err=True
369
- )
370
- click.echo(
371
- f" detection_confidence: {params.detection_confidence}", err=True
372
- )
373
- click.echo(
374
- f" tracking_confidence: {params.tracking_confidence}", err=True
375
- )
376
- click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
377
- click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
378
- click.echo(f" use_curvature: {params.use_curvature}", err=True)
379
- click.echo("=" * 60 + "\n", err=True)
380
-
381
- # ================================================================
382
- # STEP 3: Apply smoothing with auto-tuned parameters
383
- # ================================================================
384
-
385
- # Smooth landmarks using auto-tuned parameters
386
- if params.outlier_rejection or params.bilateral_filter:
387
- if params.outlier_rejection:
388
- click.echo(
389
- "Smoothing landmarks with outlier rejection...", err=True
390
- )
391
- if params.bilateral_filter:
392
- click.echo(
393
- "Using bilateral temporal filter for edge-preserving smoothing...",
394
- err=True,
395
- )
396
- smoothed_landmarks = smooth_landmarks_advanced(
397
- landmarks_sequence,
398
- window_length=params.smoothing_window,
399
- polyorder=params.polyorder,
400
- use_outlier_rejection=params.outlier_rejection,
401
- use_bilateral=params.bilateral_filter,
402
- )
403
- else:
404
- click.echo("Smoothing landmarks...", err=True)
405
- smoothed_landmarks = smooth_landmarks(
406
- landmarks_sequence,
407
- window_length=params.smoothing_window,
408
- polyorder=params.polyorder,
409
- )
560
+ _print_auto_tuned_params(video, characteristics, quality_preset, params)
561
+
562
+ # Apply smoothing
563
+ smoothed_landmarks = _smooth_landmark_sequence(landmarks_sequence, params)
564
+
565
+ # Extract positions
566
+ vertical_positions, visibilities = _extract_positions_and_visibilities(
567
+ smoothed_landmarks
568
+ )
410
569
 
411
- # Extract vertical positions from feet
412
- click.echo("Extracting foot positions...", err=True)
413
-
414
- position_list: list[float] = []
415
- visibilities_list: list[float] = []
416
-
417
- for frame_landmarks in smoothed_landmarks:
418
- if frame_landmarks:
419
- # Use average foot position
420
- _, foot_y = compute_average_foot_position(frame_landmarks)
421
- position_list.append(foot_y)
422
-
423
- # Average visibility of foot landmarks
424
- foot_vis = []
425
- for key in [
426
- "left_ankle",
427
- "right_ankle",
428
- "left_heel",
429
- "right_heel",
430
- ]:
431
- if key in frame_landmarks:
432
- foot_vis.append(frame_landmarks[key][2])
433
- visibilities_list.append(
434
- float(np.mean(foot_vis)) if foot_vis else 0.0
435
- )
436
- else:
437
- # Use previous position if available, otherwise default
438
- position_list.append(position_list[-1] if position_list else 0.5)
439
- visibilities_list.append(0.0)
440
-
441
- vertical_positions: np.ndarray = np.array(position_list)
442
- visibilities: np.ndarray = np.array(visibilities_list)
443
-
444
- # Detect ground contact using auto-tuned parameters
570
+ # Detect ground contact
445
571
  contact_states = detect_ground_contact(
446
572
  vertical_positions,
447
573
  velocity_threshold=params.velocity_threshold,
@@ -463,72 +589,27 @@ def _process_single(
463
589
  vertical_positions,
464
590
  video.fps,
465
591
  drop_height_m=drop_height,
466
- drop_start_frame=drop_start_frame,
592
+ drop_start_frame=expert_params.drop_start_frame,
467
593
  velocity_threshold=params.velocity_threshold,
468
594
  smoothing_window=params.smoothing_window,
469
595
  polyorder=params.polyorder,
470
596
  use_curvature=params.use_curvature,
471
- kinematic_correction_factor=1.0, # Always 1.0 now (no experimental correction)
597
+ kinematic_correction_factor=1.0,
472
598
  )
473
599
 
474
- # Output metrics as JSON
475
- metrics_dict = metrics.to_dict()
476
- metrics_json = json.dumps(metrics_dict, indent=2)
477
-
600
+ # Output metrics
601
+ metrics_json = json.dumps(metrics.to_dict(), indent=2)
478
602
  if json_output:
479
- output_path = Path(json_output)
480
- output_path.write_text(metrics_json)
603
+ Path(json_output).write_text(metrics_json)
481
604
  click.echo(f"Metrics written to: {json_output}", err=True)
482
605
  else:
483
606
  click.echo(metrics_json)
484
607
 
485
608
  # Generate debug video if requested
486
609
  if output:
487
- click.echo(f"Generating debug video: {output}", err=True)
488
- if (
489
- video.display_width != video.width
490
- or video.display_height != video.height
491
- ):
492
- click.echo(
493
- f"Source video encoded: {video.width}x{video.height}",
494
- err=True,
495
- )
496
- click.echo(
497
- f"Output dimensions: {video.display_width}x{video.display_height} "
498
- f"(respecting display aspect ratio)",
499
- err=True,
500
- )
501
- else:
502
- click.echo(
503
- f"Output dimensions: {video.width}x{video.height} "
504
- f"(matching source video aspect ratio)",
505
- err=True,
506
- )
507
- with DebugOverlayRenderer(
508
- output,
509
- video.width,
510
- video.height,
511
- video.display_width,
512
- video.display_height,
513
- video.fps,
514
- ) as renderer:
515
- render_bar: Any
516
- with click.progressbar(
517
- length=len(frames), label="Rendering frames"
518
- ) as render_bar:
519
- for i, frame in enumerate(frames):
520
- annotated = renderer.render_frame(
521
- frame,
522
- smoothed_landmarks[i],
523
- contact_states[i],
524
- i,
525
- metrics,
526
- use_com=False,
527
- )
528
- renderer.write_frame(annotated)
529
- render_bar.update(1)
530
-
531
- click.echo(f"Debug video saved: {output}", err=True)
610
+ _create_debug_video(
611
+ output, video, frames, smoothed_landmarks, contact_states, metrics
612
+ )
532
613
 
533
614
  click.echo("Analysis complete!", err=True)
534
615
 
@@ -537,29 +618,15 @@ def _process_single(
537
618
  sys.exit(1)
538
619
 
539
620
 
540
- def _process_batch(
541
- video_files: list[str],
542
- drop_height: float,
543
- quality: str,
544
- workers: int,
545
- output_dir: str | None,
546
- json_output_dir: str | None,
547
- csv_summary: str | None,
548
- drop_start_frame: int | None,
549
- smoothing_window: int | None,
550
- velocity_threshold: float | None,
551
- min_contact_frames: int | None,
552
- visibility_threshold: float | None,
553
- detection_confidence: float | None,
554
- tracking_confidence: float | None,
621
+ def _setup_batch_output_dirs(
622
+ output_dir: str | None, json_output_dir: str | None
555
623
  ) -> None:
556
- """Process multiple videos in batch mode using parallel processing."""
557
- click.echo(
558
- f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True
559
- )
560
- click.echo("=" * 70, err=True)
624
+ """Create output directories for batch processing.
561
625
 
562
- # Create output directories if specified
626
+ Args:
627
+ output_dir: Debug video output directory
628
+ json_output_dir: JSON metrics output directory
629
+ """
563
630
  if output_dir:
564
631
  Path(output_dir).mkdir(parents=True, exist_ok=True)
565
632
  click.echo(f"Debug videos will be saved to: {output_dir}", err=True)
@@ -568,12 +635,32 @@ def _process_batch(
568
635
  Path(json_output_dir).mkdir(parents=True, exist_ok=True)
569
636
  click.echo(f"JSON metrics will be saved to: {json_output_dir}", err=True)
570
637
 
571
- # Build configurations for each video
638
+
639
+ def _create_video_configs(
640
+ video_files: list[str],
641
+ drop_height: float,
642
+ quality: str,
643
+ output_dir: str | None,
644
+ json_output_dir: str | None,
645
+ expert_params: AnalysisParameters,
646
+ ) -> list[VideoConfig]:
647
+ """Build configuration objects for each video.
648
+
649
+ Args:
650
+ video_files: List of video file paths
651
+ drop_height: Drop height in meters
652
+ quality: Quality preset
653
+ output_dir: Debug video output directory
654
+ json_output_dir: JSON metrics output directory
655
+ expert_params: Expert parameter overrides
656
+
657
+ Returns:
658
+ List of VideoConfig objects
659
+ """
572
660
  configs: list[VideoConfig] = []
573
661
  for video_file in video_files:
574
662
  video_name = Path(video_file).stem
575
663
 
576
- # Determine output paths
577
664
  debug_video = None
578
665
  if output_dir:
579
666
  debug_video = str(Path(output_dir) / f"{video_name}_debug.mp4")
@@ -588,39 +675,25 @@ def _process_batch(
588
675
  quality=quality,
589
676
  output_video=debug_video,
590
677
  json_output=json_file,
591
- drop_start_frame=drop_start_frame,
592
- smoothing_window=smoothing_window,
593
- velocity_threshold=velocity_threshold,
594
- min_contact_frames=min_contact_frames,
595
- visibility_threshold=visibility_threshold,
596
- detection_confidence=detection_confidence,
597
- tracking_confidence=tracking_confidence,
678
+ drop_start_frame=expert_params.drop_start_frame,
679
+ smoothing_window=expert_params.smoothing_window,
680
+ velocity_threshold=expert_params.velocity_threshold,
681
+ min_contact_frames=expert_params.min_contact_frames,
682
+ visibility_threshold=expert_params.visibility_threshold,
683
+ detection_confidence=expert_params.detection_confidence,
684
+ tracking_confidence=expert_params.tracking_confidence,
598
685
  )
599
686
  configs.append(config)
600
687
 
601
- # Progress callback
602
- completed = 0
688
+ return configs
603
689
 
604
- def show_progress(result: VideoResult) -> None:
605
- nonlocal completed
606
- completed += 1
607
- status = "✓" if result.success else "✗"
608
- video_name = Path(result.video_path).name
609
- click.echo(
610
- f"[{completed}/{len(configs)}] {status} {video_name} "
611
- f"({result.processing_time:.1f}s)",
612
- err=True,
613
- )
614
- if not result.success:
615
- click.echo(f" Error: {result.error}", err=True)
616
690
 
617
- # Process all videos
618
- click.echo("\nProcessing videos...", err=True)
619
- results = process_videos_bulk(
620
- configs, max_workers=workers, progress_callback=show_progress
621
- )
691
+ def _compute_batch_statistics(results: list[VideoResult]) -> None:
692
+ """Compute and display batch processing statistics.
622
693
 
623
- # Generate summary
694
+ Args:
695
+ results: List of video processing results
696
+ """
624
697
  click.echo("\n" + "=" * 70, err=True)
625
698
  click.echo("BATCH PROCESSING SUMMARY", err=True)
626
699
  click.echo("=" * 70, err=True)
@@ -647,7 +720,6 @@ def _process_batch(
647
720
  ]
648
721
 
649
722
  if with_gct:
650
- # Type assertion: filtering ensures metrics and ground_contact_time are not None
651
723
  avg_gct = sum(
652
724
  r.metrics.ground_contact_time * 1000
653
725
  for r in with_gct
@@ -656,7 +728,6 @@ def _process_batch(
656
728
  click.echo(f"\nAverage ground contact time: {avg_gct:.1f} ms", err=True)
657
729
 
658
730
  if with_flight:
659
- # Type assertion: filtering ensures metrics and flight_time are not None
660
731
  avg_flight = sum(
661
732
  r.metrics.flight_time * 1000
662
733
  for r in with_flight
@@ -665,7 +736,6 @@ def _process_batch(
665
736
  click.echo(f"Average flight time: {avg_flight:.1f} ms", err=True)
666
737
 
667
738
  if with_jump:
668
- # Type assertion: filtering ensures metrics and jump_height are not None
669
739
  avg_jump = sum(
670
740
  r.metrics.jump_height
671
741
  for r in with_jump
@@ -676,63 +746,129 @@ def _process_batch(
676
746
  err=True,
677
747
  )
678
748
 
679
- # Export CSV summary if requested
680
- if csv_summary and successful:
681
- click.echo(f"\nExporting CSV summary to: {csv_summary}", err=True)
682
- Path(csv_summary).parent.mkdir(parents=True, exist_ok=True)
683
-
684
- with open(csv_summary, "w", newline="") as f:
685
- writer = csv.writer(f)
686
-
687
- # Header
688
- writer.writerow(
689
- [
690
- "Video",
691
- "Ground Contact Time (ms)",
692
- "Flight Time (ms)",
693
- "Jump Height (m)",
694
- "Processing Time (s)",
695
- "Status",
696
- ]
697
- )
698
749
 
699
- # Data rows
700
- for result in results:
701
- if result.success and result.metrics:
702
- writer.writerow(
703
- [
704
- Path(result.video_path).name,
705
- (
706
- f"{result.metrics.ground_contact_time * 1000:.1f}"
707
- if result.metrics.ground_contact_time
708
- else "N/A"
709
- ),
710
- (
711
- f"{result.metrics.flight_time * 1000:.1f}"
712
- if result.metrics.flight_time
713
- else "N/A"
714
- ),
715
- (
716
- f"{result.metrics.jump_height:.3f}"
717
- if result.metrics.jump_height
718
- else "N/A"
719
- ),
720
- f"{result.processing_time:.2f}",
721
- "Success",
722
- ]
723
- )
724
- else:
725
- writer.writerow(
726
- [
727
- Path(result.video_path).name,
728
- "N/A",
729
- "N/A",
730
- "N/A",
731
- f"{result.processing_time:.2f}",
732
- f"Failed: {result.error}",
733
- ]
734
- )
735
-
736
- click.echo("CSV summary written successfully", err=True)
750
+ def _write_csv_summary(
751
+ csv_summary: str | None, results: list[VideoResult], successful: list[VideoResult]
752
+ ) -> None:
753
+ """Write CSV summary of batch processing results.
754
+
755
+ Args:
756
+ csv_summary: Path to CSV output file
757
+ results: All processing results
758
+ successful: Successful processing results
759
+ """
760
+ if not csv_summary or not successful:
761
+ return
762
+
763
+ click.echo(f"\nExporting CSV summary to: {csv_summary}", err=True)
764
+ Path(csv_summary).parent.mkdir(parents=True, exist_ok=True)
765
+
766
+ with open(csv_summary, "w", newline="") as f:
767
+ writer = csv.writer(f)
768
+
769
+ # Header
770
+ writer.writerow(
771
+ [
772
+ "Video",
773
+ "Ground Contact Time (ms)",
774
+ "Flight Time (ms)",
775
+ "Jump Height (m)",
776
+ "Processing Time (s)",
777
+ "Status",
778
+ ]
779
+ )
780
+
781
+ # Data rows
782
+ for result in results:
783
+ if result.success and result.metrics:
784
+ writer.writerow(
785
+ [
786
+ Path(result.video_path).name,
787
+ (
788
+ f"{result.metrics.ground_contact_time * 1000:.1f}"
789
+ if result.metrics.ground_contact_time
790
+ else "N/A"
791
+ ),
792
+ (
793
+ f"{result.metrics.flight_time * 1000:.1f}"
794
+ if result.metrics.flight_time
795
+ else "N/A"
796
+ ),
797
+ (
798
+ f"{result.metrics.jump_height:.3f}"
799
+ if result.metrics.jump_height
800
+ else "N/A"
801
+ ),
802
+ f"{result.processing_time:.2f}",
803
+ "Success",
804
+ ]
805
+ )
806
+ else:
807
+ writer.writerow(
808
+ [
809
+ Path(result.video_path).name,
810
+ "N/A",
811
+ "N/A",
812
+ "N/A",
813
+ f"{result.processing_time:.2f}",
814
+ f"Failed: {result.error}",
815
+ ]
816
+ )
817
+
818
+ click.echo("CSV summary written successfully", err=True)
819
+
820
+
821
+ def _process_batch(
822
+ video_files: list[str],
823
+ drop_height: float,
824
+ quality: str,
825
+ workers: int,
826
+ output_dir: str | None,
827
+ json_output_dir: str | None,
828
+ csv_summary: str | None,
829
+ expert_params: AnalysisParameters,
830
+ ) -> None:
831
+ """Process multiple videos in batch mode using parallel processing."""
832
+ click.echo(
833
+ f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True
834
+ )
835
+ click.echo("=" * 70, err=True)
836
+
837
+ # Setup output directories
838
+ _setup_batch_output_dirs(output_dir, json_output_dir)
839
+
840
+ # Create video configurations
841
+ configs = _create_video_configs(
842
+ video_files, drop_height, quality, output_dir, json_output_dir, expert_params
843
+ )
844
+
845
+ # Progress callback
846
+ completed = 0
847
+
848
+ def show_progress(result: VideoResult) -> None:
849
+ nonlocal completed
850
+ completed += 1
851
+ status = "✓" if result.success else "✗"
852
+ video_name = Path(result.video_path).name
853
+ click.echo(
854
+ f"[{completed}/{len(configs)}] {status} {video_name} "
855
+ f"({result.processing_time:.1f}s)",
856
+ err=True,
857
+ )
858
+ if not result.success:
859
+ click.echo(f" Error: {result.error}", err=True)
860
+
861
+ # Process all videos
862
+ click.echo("\nProcessing videos...", err=True)
863
+ results = process_videos_bulk(
864
+ configs, max_workers=workers, progress_callback=show_progress
865
+ )
866
+
867
+ # Display statistics
868
+ _compute_batch_statistics(results)
869
+
870
+ # Export CSV summary if requested
871
+ successful = [r for r in results if r.success]
872
+ _write_csv_summary(csv_summary, results, successful)
737
873
 
738
874
  click.echo("\nBatch processing complete!", err=True)