kinemotion 0.10.5__py3-none-any.whl → 0.10.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -12,8 +12,12 @@ import click
12
12
  import numpy as np
13
13
 
14
14
  from ..api import VideoConfig, VideoResult, process_videos_bulk
15
+ from ..core.auto_tuning import (
16
+ AnalysisParameters as AutoTunedParams,
17
+ )
15
18
  from ..core.auto_tuning import (
16
19
  QualityPreset,
20
+ VideoCharacteristics,
17
21
  analyze_video_sample,
18
22
  auto_tune_parameters,
19
23
  )
@@ -21,11 +25,12 @@ from ..core.pose import PoseTracker
21
25
  from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
22
26
  from ..core.video_io import VideoProcessor
23
27
  from .analysis import (
28
+ ContactState,
24
29
  compute_average_foot_position,
25
30
  detect_ground_contact,
26
31
  )
27
32
  from .debug_overlay import DebugOverlayRenderer
28
- from .kinematics import calculate_drop_jump_metrics
33
+ from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
29
34
 
30
35
 
31
36
  @dataclass
@@ -41,6 +46,25 @@ class AnalysisParameters:
41
46
  tracking_confidence: float | None = None
42
47
 
43
48
 
49
+ @dataclass
50
+ class BatchOptions:
51
+ """Batch processing configuration."""
52
+
53
+ batch: bool
54
+ workers: int
55
+ output_dir: str | None
56
+ json_output_dir: str | None
57
+ csv_summary: str | None
58
+
59
+
60
+ @dataclass
61
+ class OutputOptions:
62
+ """Output configuration for single video processing."""
63
+
64
+ output: str | None
65
+ json_output: str | None
66
+
67
+
44
68
  @click.command(name="dropjump-analyze")
45
69
  @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
46
70
  @click.option(
@@ -197,6 +221,62 @@ def dropjump_analyze(
197
221
  kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 \\
198
222
  --json-output-dir results/ --csv-summary summary.csv
199
223
  """
224
+ # Group parameters into dataclasses
225
+ batch_opts = BatchOptions(
226
+ batch=batch,
227
+ workers=workers,
228
+ output_dir=output_dir,
229
+ json_output_dir=json_output_dir,
230
+ csv_summary=csv_summary,
231
+ )
232
+
233
+ output_opts = OutputOptions(
234
+ output=output,
235
+ json_output=json_output,
236
+ )
237
+
238
+ expert_params = AnalysisParameters(
239
+ drop_start_frame=drop_start_frame,
240
+ smoothing_window=smoothing_window,
241
+ velocity_threshold=velocity_threshold,
242
+ min_contact_frames=min_contact_frames,
243
+ visibility_threshold=visibility_threshold,
244
+ detection_confidence=detection_confidence,
245
+ tracking_confidence=tracking_confidence,
246
+ )
247
+
248
+ # Execute analysis with grouped parameters
249
+ _execute_analysis(
250
+ video_path,
251
+ drop_height,
252
+ quality,
253
+ verbose,
254
+ batch_opts,
255
+ output_opts,
256
+ expert_params,
257
+ )
258
+
259
+
260
+ def _execute_analysis(
261
+ video_path: tuple[str, ...],
262
+ drop_height: float,
263
+ quality: str,
264
+ verbose: bool,
265
+ batch_opts: BatchOptions,
266
+ output_opts: OutputOptions,
267
+ expert_params: AnalysisParameters,
268
+ ) -> None:
269
+ """Execute drop jump analysis with grouped parameters.
270
+
271
+ Args:
272
+ video_path: Tuple of video path patterns
273
+ drop_height: Drop height in meters
274
+ quality: Quality preset string
275
+ verbose: Verbose output flag
276
+ batch_opts: Batch processing options
277
+ output_opts: Output file options
278
+ expert_params: Expert parameter overrides
279
+ """
200
280
  # Expand glob patterns and collect all video files
201
281
  video_files: list[str] = []
202
282
  for pattern in video_path:
@@ -214,43 +294,286 @@ def dropjump_analyze(
214
294
  sys.exit(1)
215
295
 
216
296
  # Determine if batch mode should be used
217
- use_batch = batch or len(video_files) > 1
218
-
219
- # Group expert parameters
220
- params = AnalysisParameters(
221
- drop_start_frame=drop_start_frame,
222
- smoothing_window=smoothing_window,
223
- velocity_threshold=velocity_threshold,
224
- min_contact_frames=min_contact_frames,
225
- visibility_threshold=visibility_threshold,
226
- detection_confidence=detection_confidence,
227
- tracking_confidence=tracking_confidence,
228
- )
297
+ use_batch = batch_opts.batch or len(video_files) > 1
229
298
 
230
299
  if use_batch:
231
300
  _process_batch(
232
301
  video_files,
233
302
  drop_height,
234
303
  quality,
235
- workers,
236
- output_dir,
237
- json_output_dir,
238
- csv_summary,
239
- params,
304
+ batch_opts.workers,
305
+ batch_opts.output_dir,
306
+ batch_opts.json_output_dir,
307
+ batch_opts.csv_summary,
308
+ expert_params,
240
309
  )
241
310
  else:
242
311
  # Single video mode (original behavior)
243
312
  _process_single(
244
313
  video_files[0],
245
- output,
246
- json_output,
314
+ output_opts.output,
315
+ output_opts.json_output,
247
316
  drop_height,
248
317
  quality,
249
318
  verbose,
250
- params,
319
+ expert_params,
251
320
  )
252
321
 
253
322
 
323
+ def _determine_initial_confidence(
324
+ quality_preset: QualityPreset,
325
+ expert_params: AnalysisParameters,
326
+ ) -> tuple[float, float]:
327
+ """Determine initial detection and tracking confidence levels.
328
+
329
+ Args:
330
+ quality_preset: Quality preset enum
331
+ expert_params: Expert parameter overrides
332
+
333
+ Returns:
334
+ Tuple of (detection_confidence, tracking_confidence)
335
+ """
336
+ initial_detection_conf = 0.5
337
+ initial_tracking_conf = 0.5
338
+
339
+ if quality_preset == QualityPreset.FAST:
340
+ initial_detection_conf = 0.3
341
+ initial_tracking_conf = 0.3
342
+ elif quality_preset == QualityPreset.ACCURATE:
343
+ initial_detection_conf = 0.6
344
+ initial_tracking_conf = 0.6
345
+
346
+ # Override with expert values if provided
347
+ if expert_params.detection_confidence is not None:
348
+ initial_detection_conf = expert_params.detection_confidence
349
+ if expert_params.tracking_confidence is not None:
350
+ initial_tracking_conf = expert_params.tracking_confidence
351
+
352
+ return initial_detection_conf, initial_tracking_conf
353
+
354
+
355
+ def _track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
356
+ """Track pose landmarks in all video frames.
357
+
358
+ Args:
359
+ video: Video processor
360
+ tracker: Pose tracker
361
+
362
+ Returns:
363
+ Tuple of (frames, landmarks_sequence)
364
+ """
365
+ click.echo("Tracking pose landmarks...", err=True)
366
+ landmarks_sequence = []
367
+ frames = []
368
+
369
+ bar: Any
370
+ with click.progressbar(length=video.frame_count, label="Processing frames") as bar:
371
+ while True:
372
+ frame = video.read_frame()
373
+ if frame is None:
374
+ break
375
+
376
+ frames.append(frame)
377
+ landmarks = tracker.process_frame(frame)
378
+ landmarks_sequence.append(landmarks)
379
+
380
+ bar.update(1)
381
+
382
+ tracker.close()
383
+ return frames, landmarks_sequence
384
+
385
+
386
+ def _apply_expert_param_overrides(
387
+ params: AutoTunedParams, expert_params: AnalysisParameters
388
+ ) -> AutoTunedParams:
389
+ """Apply expert parameter overrides to auto-tuned parameters.
390
+
391
+ Args:
392
+ params: Auto-tuned parameters
393
+ expert_params: Expert overrides
394
+
395
+ Returns:
396
+ Modified params object (mutated in place)
397
+ """
398
+ if expert_params.smoothing_window is not None:
399
+ params.smoothing_window = expert_params.smoothing_window
400
+ if expert_params.velocity_threshold is not None:
401
+ params.velocity_threshold = expert_params.velocity_threshold
402
+ if expert_params.min_contact_frames is not None:
403
+ params.min_contact_frames = expert_params.min_contact_frames
404
+ if expert_params.visibility_threshold is not None:
405
+ params.visibility_threshold = expert_params.visibility_threshold
406
+ return params
407
+
408
+
409
+ def _print_auto_tuned_params(
410
+ video: VideoProcessor,
411
+ characteristics: VideoCharacteristics,
412
+ quality_preset: QualityPreset,
413
+ params: AutoTunedParams,
414
+ ) -> None:
415
+ """Print auto-tuned parameters in verbose mode.
416
+
417
+ Args:
418
+ video: Video processor
419
+ characteristics: Video characteristics
420
+ quality_preset: Quality preset
421
+ params: Auto-tuned parameters
422
+ """
423
+ click.echo("\n" + "=" * 60, err=True)
424
+ click.echo("AUTO-TUNED PARAMETERS", err=True)
425
+ click.echo("=" * 60, err=True)
426
+ click.echo(f"Video FPS: {video.fps:.2f}", err=True)
427
+ click.echo(
428
+ f"Tracking quality: {characteristics.tracking_quality} "
429
+ f"(avg visibility: {characteristics.avg_visibility:.2f})",
430
+ err=True,
431
+ )
432
+ click.echo(f"Quality preset: {quality_preset.value}", err=True)
433
+ click.echo("\nSelected parameters:", err=True)
434
+ click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
435
+ click.echo(f" polyorder: {params.polyorder}", err=True)
436
+ click.echo(f" velocity_threshold: {params.velocity_threshold:.4f}", err=True)
437
+ click.echo(f" min_contact_frames: {params.min_contact_frames}", err=True)
438
+ click.echo(f" visibility_threshold: {params.visibility_threshold}", err=True)
439
+ click.echo(f" detection_confidence: {params.detection_confidence}", err=True)
440
+ click.echo(f" tracking_confidence: {params.tracking_confidence}", err=True)
441
+ click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
442
+ click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
443
+ click.echo(f" use_curvature: {params.use_curvature}", err=True)
444
+ click.echo("=" * 60 + "\n", err=True)
445
+
446
+
447
+ def _smooth_landmark_sequence(
448
+ landmarks_sequence: list, params: AutoTunedParams
449
+ ) -> list:
450
+ """Apply smoothing to landmark sequence.
451
+
452
+ Args:
453
+ landmarks_sequence: Raw landmark sequence
454
+ params: Auto-tuned parameters
455
+
456
+ Returns:
457
+ Smoothed landmarks
458
+ """
459
+ if params.outlier_rejection or params.bilateral_filter:
460
+ if params.outlier_rejection:
461
+ click.echo("Smoothing landmarks with outlier rejection...", err=True)
462
+ if params.bilateral_filter:
463
+ click.echo(
464
+ "Using bilateral temporal filter for edge-preserving smoothing...",
465
+ err=True,
466
+ )
467
+ return smooth_landmarks_advanced(
468
+ landmarks_sequence,
469
+ window_length=params.smoothing_window,
470
+ polyorder=params.polyorder,
471
+ use_outlier_rejection=params.outlier_rejection,
472
+ use_bilateral=params.bilateral_filter,
473
+ )
474
+ else:
475
+ click.echo("Smoothing landmarks...", err=True)
476
+ return smooth_landmarks(
477
+ landmarks_sequence,
478
+ window_length=params.smoothing_window,
479
+ polyorder=params.polyorder,
480
+ )
481
+
482
+
483
+ def _extract_positions_and_visibilities(
484
+ smoothed_landmarks: list,
485
+ ) -> tuple[np.ndarray, np.ndarray]:
486
+ """Extract vertical positions and visibilities from landmarks.
487
+
488
+ Args:
489
+ smoothed_landmarks: Smoothed landmark sequence
490
+
491
+ Returns:
492
+ Tuple of (vertical_positions, visibilities)
493
+ """
494
+ click.echo("Extracting foot positions...", err=True)
495
+
496
+ position_list: list[float] = []
497
+ visibilities_list: list[float] = []
498
+
499
+ for frame_landmarks in smoothed_landmarks:
500
+ if frame_landmarks:
501
+ _, foot_y = compute_average_foot_position(frame_landmarks)
502
+ position_list.append(foot_y)
503
+
504
+ # Average visibility of foot landmarks
505
+ foot_vis = []
506
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
507
+ if key in frame_landmarks:
508
+ foot_vis.append(frame_landmarks[key][2])
509
+ visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
510
+ else:
511
+ position_list.append(position_list[-1] if position_list else 0.5)
512
+ visibilities_list.append(0.0)
513
+
514
+ return np.array(position_list), np.array(visibilities_list)
515
+
516
+
517
+ def _create_debug_video(
518
+ output: str,
519
+ video: VideoProcessor,
520
+ frames: list,
521
+ smoothed_landmarks: list,
522
+ contact_states: list[ContactState],
523
+ metrics: DropJumpMetrics,
524
+ ) -> None:
525
+ """Generate debug video with overlays.
526
+
527
+ Args:
528
+ output: Output video path
529
+ video: Video processor
530
+ frames: Video frames
531
+ smoothed_landmarks: Smoothed landmarks
532
+ contact_states: Contact states
533
+ metrics: Calculated metrics
534
+ """
535
+ click.echo(f"Generating debug video: {output}", err=True)
536
+ if video.display_width != video.width or video.display_height != video.height:
537
+ click.echo(f"Source video encoded: {video.width}x{video.height}", err=True)
538
+ click.echo(
539
+ f"Output dimensions: {video.display_width}x{video.display_height} "
540
+ f"(respecting display aspect ratio)",
541
+ err=True,
542
+ )
543
+ else:
544
+ click.echo(
545
+ f"Output dimensions: {video.width}x{video.height} "
546
+ f"(matching source video aspect ratio)",
547
+ err=True,
548
+ )
549
+
550
+ with DebugOverlayRenderer(
551
+ output,
552
+ video.width,
553
+ video.height,
554
+ video.display_width,
555
+ video.display_height,
556
+ video.fps,
557
+ ) as renderer:
558
+ render_bar: Any
559
+ with click.progressbar(
560
+ length=len(frames), label="Rendering frames"
561
+ ) as render_bar:
562
+ for i, frame in enumerate(frames):
563
+ annotated = renderer.render_frame(
564
+ frame,
565
+ smoothed_landmarks[i],
566
+ contact_states[i],
567
+ i,
568
+ metrics,
569
+ use_com=False,
570
+ )
571
+ renderer.write_frame(annotated)
572
+ render_bar.update(1)
573
+
574
+ click.echo(f"Debug video saved: {output}", err=True)
575
+
576
+
254
577
  def _process_single(
255
578
  video_path: str,
256
579
  output: str | None,
@@ -263,11 +586,9 @@ def _process_single(
263
586
  """Process a single video (original CLI behavior)."""
264
587
  click.echo(f"Analyzing video: {video_path}", err=True)
265
588
 
266
- # Convert quality string to enum
267
589
  quality_preset = QualityPreset(quality.lower())
268
590
 
269
591
  try:
270
- # Initialize video processor
271
592
  with VideoProcessor(video_path) as video:
272
593
  click.echo(
273
594
  f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
@@ -275,180 +596,42 @@ def _process_single(
275
596
  err=True,
276
597
  )
277
598
 
278
- # ================================================================
279
- # STEP 1: Auto-tune parameters based on video characteristics
280
- # ================================================================
281
-
282
- # Analyze video characteristics from a sample to determine optimal parameters
283
- # We'll use detection/tracking confidence from quality preset for initial tracking
284
- initial_detection_conf = 0.5
285
- initial_tracking_conf = 0.5
286
-
287
- if quality_preset == QualityPreset.FAST:
288
- initial_detection_conf = 0.3
289
- initial_tracking_conf = 0.3
290
- elif quality_preset == QualityPreset.ACCURATE:
291
- initial_detection_conf = 0.6
292
- initial_tracking_conf = 0.6
293
-
294
- # Override with expert values if provided
295
- if expert_params.detection_confidence is not None:
296
- initial_detection_conf = expert_params.detection_confidence
297
- if expert_params.tracking_confidence is not None:
298
- initial_tracking_conf = expert_params.tracking_confidence
299
-
300
- # Initialize pose tracker
301
- tracker = PoseTracker(
302
- min_detection_confidence=initial_detection_conf,
303
- min_tracking_confidence=initial_tracking_conf,
599
+ # Determine confidence levels
600
+ detection_conf, tracking_conf = _determine_initial_confidence(
601
+ quality_preset, expert_params
304
602
  )
305
603
 
306
- # Process all frames
307
- click.echo("Tracking pose landmarks...", err=True)
308
- landmarks_sequence = []
309
- frames = []
310
-
311
- bar: Any
312
- with click.progressbar(
313
- length=video.frame_count, label="Processing frames"
314
- ) as bar:
315
- while True:
316
- frame = video.read_frame()
317
- if frame is None:
318
- break
319
-
320
- frames.append(frame)
321
- landmarks = tracker.process_frame(frame)
322
- landmarks_sequence.append(landmarks)
323
-
324
- bar.update(1)
325
-
326
- tracker.close()
604
+ # Track all frames
605
+ tracker = PoseTracker(
606
+ min_detection_confidence=detection_conf,
607
+ min_tracking_confidence=tracking_conf,
608
+ )
609
+ frames, landmarks_sequence = _track_all_frames(video, tracker)
327
610
 
328
611
  if not landmarks_sequence:
329
612
  click.echo("Error: No frames processed", err=True)
330
613
  sys.exit(1)
331
614
 
332
- # ================================================================
333
- # STEP 2: Analyze video characteristics and auto-tune parameters
334
- # ================================================================
335
-
615
+ # Auto-tune parameters
336
616
  characteristics = analyze_video_sample(
337
617
  landmarks_sequence, video.fps, video.frame_count
338
618
  )
339
-
340
- # Auto-tune parameters based on video characteristics
341
619
  params = auto_tune_parameters(characteristics, quality_preset)
620
+ params = _apply_expert_param_overrides(params, expert_params)
342
621
 
343
- # Apply expert overrides if provided
344
- if expert_params.smoothing_window is not None:
345
- params.smoothing_window = expert_params.smoothing_window
346
- if expert_params.velocity_threshold is not None:
347
- params.velocity_threshold = expert_params.velocity_threshold
348
- if expert_params.min_contact_frames is not None:
349
- params.min_contact_frames = expert_params.min_contact_frames
350
- if expert_params.visibility_threshold is not None:
351
- params.visibility_threshold = expert_params.visibility_threshold
352
-
353
- # Show selected parameters if verbose
622
+ # Show parameters if verbose
354
623
  if verbose:
355
- click.echo("\n" + "=" * 60, err=True)
356
- click.echo("AUTO-TUNED PARAMETERS", err=True)
357
- click.echo("=" * 60, err=True)
358
- click.echo(f"Video FPS: {video.fps:.2f}", err=True)
359
- click.echo(
360
- f"Tracking quality: {characteristics.tracking_quality} "
361
- f"(avg visibility: {characteristics.avg_visibility:.2f})",
362
- err=True,
363
- )
364
- click.echo(f"Quality preset: {quality_preset.value}", err=True)
365
- click.echo("\nSelected parameters:", err=True)
366
- click.echo(f" smoothing_window: {params.smoothing_window}", err=True)
367
- click.echo(f" polyorder: {params.polyorder}", err=True)
368
- click.echo(
369
- f" velocity_threshold: {params.velocity_threshold:.4f}", err=True
370
- )
371
- click.echo(
372
- f" min_contact_frames: {params.min_contact_frames}", err=True
373
- )
374
- click.echo(
375
- f" visibility_threshold: {params.visibility_threshold}", err=True
376
- )
377
- click.echo(
378
- f" detection_confidence: {params.detection_confidence}", err=True
379
- )
380
- click.echo(
381
- f" tracking_confidence: {params.tracking_confidence}", err=True
382
- )
383
- click.echo(f" outlier_rejection: {params.outlier_rejection}", err=True)
384
- click.echo(f" bilateral_filter: {params.bilateral_filter}", err=True)
385
- click.echo(f" use_curvature: {params.use_curvature}", err=True)
386
- click.echo("=" * 60 + "\n", err=True)
387
-
388
- # ================================================================
389
- # STEP 3: Apply smoothing with auto-tuned parameters
390
- # ================================================================
391
-
392
- # Smooth landmarks using auto-tuned parameters
393
- if params.outlier_rejection or params.bilateral_filter:
394
- if params.outlier_rejection:
395
- click.echo(
396
- "Smoothing landmarks with outlier rejection...", err=True
397
- )
398
- if params.bilateral_filter:
399
- click.echo(
400
- "Using bilateral temporal filter for edge-preserving smoothing...",
401
- err=True,
402
- )
403
- smoothed_landmarks = smooth_landmarks_advanced(
404
- landmarks_sequence,
405
- window_length=params.smoothing_window,
406
- polyorder=params.polyorder,
407
- use_outlier_rejection=params.outlier_rejection,
408
- use_bilateral=params.bilateral_filter,
409
- )
410
- else:
411
- click.echo("Smoothing landmarks...", err=True)
412
- smoothed_landmarks = smooth_landmarks(
413
- landmarks_sequence,
414
- window_length=params.smoothing_window,
415
- polyorder=params.polyorder,
416
- )
624
+ _print_auto_tuned_params(video, characteristics, quality_preset, params)
625
+
626
+ # Apply smoothing
627
+ smoothed_landmarks = _smooth_landmark_sequence(landmarks_sequence, params)
628
+
629
+ # Extract positions
630
+ vertical_positions, visibilities = _extract_positions_and_visibilities(
631
+ smoothed_landmarks
632
+ )
417
633
 
418
- # Extract vertical positions from feet
419
- click.echo("Extracting foot positions...", err=True)
420
-
421
- position_list: list[float] = []
422
- visibilities_list: list[float] = []
423
-
424
- for frame_landmarks in smoothed_landmarks:
425
- if frame_landmarks:
426
- # Use average foot position
427
- _, foot_y = compute_average_foot_position(frame_landmarks)
428
- position_list.append(foot_y)
429
-
430
- # Average visibility of foot landmarks
431
- foot_vis = []
432
- for key in [
433
- "left_ankle",
434
- "right_ankle",
435
- "left_heel",
436
- "right_heel",
437
- ]:
438
- if key in frame_landmarks:
439
- foot_vis.append(frame_landmarks[key][2])
440
- visibilities_list.append(
441
- float(np.mean(foot_vis)) if foot_vis else 0.0
442
- )
443
- else:
444
- # Use previous position if available, otherwise default
445
- position_list.append(position_list[-1] if position_list else 0.5)
446
- visibilities_list.append(0.0)
447
-
448
- vertical_positions: np.ndarray = np.array(position_list)
449
- visibilities: np.ndarray = np.array(visibilities_list)
450
-
451
- # Detect ground contact using auto-tuned parameters
634
+ # Detect ground contact
452
635
  contact_states = detect_ground_contact(
453
636
  vertical_positions,
454
637
  velocity_threshold=params.velocity_threshold,
@@ -475,67 +658,22 @@ def _process_single(
475
658
  smoothing_window=params.smoothing_window,
476
659
  polyorder=params.polyorder,
477
660
  use_curvature=params.use_curvature,
478
- kinematic_correction_factor=1.0, # Always 1.0 now (no experimental correction)
661
+ kinematic_correction_factor=1.0,
479
662
  )
480
663
 
481
- # Output metrics as JSON
482
- metrics_dict = metrics.to_dict()
483
- metrics_json = json.dumps(metrics_dict, indent=2)
484
-
664
+ # Output metrics
665
+ metrics_json = json.dumps(metrics.to_dict(), indent=2)
485
666
  if json_output:
486
- output_path = Path(json_output)
487
- output_path.write_text(metrics_json)
667
+ Path(json_output).write_text(metrics_json)
488
668
  click.echo(f"Metrics written to: {json_output}", err=True)
489
669
  else:
490
670
  click.echo(metrics_json)
491
671
 
492
672
  # Generate debug video if requested
493
673
  if output:
494
- click.echo(f"Generating debug video: {output}", err=True)
495
- if (
496
- video.display_width != video.width
497
- or video.display_height != video.height
498
- ):
499
- click.echo(
500
- f"Source video encoded: {video.width}x{video.height}",
501
- err=True,
502
- )
503
- click.echo(
504
- f"Output dimensions: {video.display_width}x{video.display_height} "
505
- f"(respecting display aspect ratio)",
506
- err=True,
507
- )
508
- else:
509
- click.echo(
510
- f"Output dimensions: {video.width}x{video.height} "
511
- f"(matching source video aspect ratio)",
512
- err=True,
513
- )
514
- with DebugOverlayRenderer(
515
- output,
516
- video.width,
517
- video.height,
518
- video.display_width,
519
- video.display_height,
520
- video.fps,
521
- ) as renderer:
522
- render_bar: Any
523
- with click.progressbar(
524
- length=len(frames), label="Rendering frames"
525
- ) as render_bar:
526
- for i, frame in enumerate(frames):
527
- annotated = renderer.render_frame(
528
- frame,
529
- smoothed_landmarks[i],
530
- contact_states[i],
531
- i,
532
- metrics,
533
- use_com=False,
534
- )
535
- renderer.write_frame(annotated)
536
- render_bar.update(1)
537
-
538
- click.echo(f"Debug video saved: {output}", err=True)
674
+ _create_debug_video(
675
+ output, video, frames, smoothed_landmarks, contact_states, metrics
676
+ )
539
677
 
540
678
  click.echo("Analysis complete!", err=True)
541
679
 
@@ -544,23 +682,15 @@ def _process_single(
544
682
  sys.exit(1)
545
683
 
546
684
 
547
- def _process_batch(
548
- video_files: list[str],
549
- drop_height: float,
550
- quality: str,
551
- workers: int,
552
- output_dir: str | None,
553
- json_output_dir: str | None,
554
- csv_summary: str | None,
555
- expert_params: AnalysisParameters,
685
+ def _setup_batch_output_dirs(
686
+ output_dir: str | None, json_output_dir: str | None
556
687
  ) -> None:
557
- """Process multiple videos in batch mode using parallel processing."""
558
- click.echo(
559
- f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True
560
- )
561
- click.echo("=" * 70, err=True)
688
+ """Create output directories for batch processing.
562
689
 
563
- # Create output directories if specified
690
+ Args:
691
+ output_dir: Debug video output directory
692
+ json_output_dir: JSON metrics output directory
693
+ """
564
694
  if output_dir:
565
695
  Path(output_dir).mkdir(parents=True, exist_ok=True)
566
696
  click.echo(f"Debug videos will be saved to: {output_dir}", err=True)
@@ -569,12 +699,32 @@ def _process_batch(
569
699
  Path(json_output_dir).mkdir(parents=True, exist_ok=True)
570
700
  click.echo(f"JSON metrics will be saved to: {json_output_dir}", err=True)
571
701
 
572
- # Build configurations for each video
702
+
703
+ def _create_video_configs(
704
+ video_files: list[str],
705
+ drop_height: float,
706
+ quality: str,
707
+ output_dir: str | None,
708
+ json_output_dir: str | None,
709
+ expert_params: AnalysisParameters,
710
+ ) -> list[VideoConfig]:
711
+ """Build configuration objects for each video.
712
+
713
+ Args:
714
+ video_files: List of video file paths
715
+ drop_height: Drop height in meters
716
+ quality: Quality preset
717
+ output_dir: Debug video output directory
718
+ json_output_dir: JSON metrics output directory
719
+ expert_params: Expert parameter overrides
720
+
721
+ Returns:
722
+ List of VideoConfig objects
723
+ """
573
724
  configs: list[VideoConfig] = []
574
725
  for video_file in video_files:
575
726
  video_name = Path(video_file).stem
576
727
 
577
- # Determine output paths
578
728
  debug_video = None
579
729
  if output_dir:
580
730
  debug_video = str(Path(output_dir) / f"{video_name}_debug.mp4")
@@ -599,29 +749,15 @@ def _process_batch(
599
749
  )
600
750
  configs.append(config)
601
751
 
602
- # Progress callback
603
- completed = 0
752
+ return configs
604
753
 
605
- def show_progress(result: VideoResult) -> None:
606
- nonlocal completed
607
- completed += 1
608
- status = "✓" if result.success else "✗"
609
- video_name = Path(result.video_path).name
610
- click.echo(
611
- f"[{completed}/{len(configs)}] {status} {video_name} "
612
- f"({result.processing_time:.1f}s)",
613
- err=True,
614
- )
615
- if not result.success:
616
- click.echo(f" Error: {result.error}", err=True)
617
754
 
618
- # Process all videos
619
- click.echo("\nProcessing videos...", err=True)
620
- results = process_videos_bulk(
621
- configs, max_workers=workers, progress_callback=show_progress
622
- )
755
+ def _compute_batch_statistics(results: list[VideoResult]) -> None:
756
+ """Compute and display batch processing statistics.
623
757
 
624
- # Generate summary
758
+ Args:
759
+ results: List of video processing results
760
+ """
625
761
  click.echo("\n" + "=" * 70, err=True)
626
762
  click.echo("BATCH PROCESSING SUMMARY", err=True)
627
763
  click.echo("=" * 70, err=True)
@@ -648,7 +784,6 @@ def _process_batch(
648
784
  ]
649
785
 
650
786
  if with_gct:
651
- # Type assertion: filtering ensures metrics and ground_contact_time are not None
652
787
  avg_gct = sum(
653
788
  r.metrics.ground_contact_time * 1000
654
789
  for r in with_gct
@@ -657,7 +792,6 @@ def _process_batch(
657
792
  click.echo(f"\nAverage ground contact time: {avg_gct:.1f} ms", err=True)
658
793
 
659
794
  if with_flight:
660
- # Type assertion: filtering ensures metrics and flight_time are not None
661
795
  avg_flight = sum(
662
796
  r.metrics.flight_time * 1000
663
797
  for r in with_flight
@@ -666,7 +800,6 @@ def _process_batch(
666
800
  click.echo(f"Average flight time: {avg_flight:.1f} ms", err=True)
667
801
 
668
802
  if with_jump:
669
- # Type assertion: filtering ensures metrics and jump_height are not None
670
803
  avg_jump = sum(
671
804
  r.metrics.jump_height
672
805
  for r in with_jump
@@ -677,63 +810,129 @@ def _process_batch(
677
810
  err=True,
678
811
  )
679
812
 
680
- # Export CSV summary if requested
681
- if csv_summary and successful:
682
- click.echo(f"\nExporting CSV summary to: {csv_summary}", err=True)
683
- Path(csv_summary).parent.mkdir(parents=True, exist_ok=True)
684
-
685
- with open(csv_summary, "w", newline="") as f:
686
- writer = csv.writer(f)
687
-
688
- # Header
689
- writer.writerow(
690
- [
691
- "Video",
692
- "Ground Contact Time (ms)",
693
- "Flight Time (ms)",
694
- "Jump Height (m)",
695
- "Processing Time (s)",
696
- "Status",
697
- ]
698
- )
699
813
 
700
- # Data rows
701
- for result in results:
702
- if result.success and result.metrics:
703
- writer.writerow(
704
- [
705
- Path(result.video_path).name,
706
- (
707
- f"{result.metrics.ground_contact_time * 1000:.1f}"
708
- if result.metrics.ground_contact_time
709
- else "N/A"
710
- ),
711
- (
712
- f"{result.metrics.flight_time * 1000:.1f}"
713
- if result.metrics.flight_time
714
- else "N/A"
715
- ),
716
- (
717
- f"{result.metrics.jump_height:.3f}"
718
- if result.metrics.jump_height
719
- else "N/A"
720
- ),
721
- f"{result.processing_time:.2f}",
722
- "Success",
723
- ]
724
- )
725
- else:
726
- writer.writerow(
727
- [
728
- Path(result.video_path).name,
729
- "N/A",
730
- "N/A",
731
- "N/A",
732
- f"{result.processing_time:.2f}",
733
- f"Failed: {result.error}",
734
- ]
735
- )
736
-
737
- click.echo("CSV summary written successfully", err=True)
814
+ def _write_csv_summary(
815
+ csv_summary: str | None, results: list[VideoResult], successful: list[VideoResult]
816
+ ) -> None:
817
+ """Write CSV summary of batch processing results.
818
+
819
+ Args:
820
+ csv_summary: Path to CSV output file
821
+ results: All processing results
822
+ successful: Successful processing results
823
+ """
824
+ if not csv_summary or not successful:
825
+ return
826
+
827
+ click.echo(f"\nExporting CSV summary to: {csv_summary}", err=True)
828
+ Path(csv_summary).parent.mkdir(parents=True, exist_ok=True)
829
+
830
+ with open(csv_summary, "w", newline="") as f:
831
+ writer = csv.writer(f)
832
+
833
+ # Header
834
+ writer.writerow(
835
+ [
836
+ "Video",
837
+ "Ground Contact Time (ms)",
838
+ "Flight Time (ms)",
839
+ "Jump Height (m)",
840
+ "Processing Time (s)",
841
+ "Status",
842
+ ]
843
+ )
844
+
845
+ # Data rows
846
+ for result in results:
847
+ if result.success and result.metrics:
848
+ writer.writerow(
849
+ [
850
+ Path(result.video_path).name,
851
+ (
852
+ f"{result.metrics.ground_contact_time * 1000:.1f}"
853
+ if result.metrics.ground_contact_time
854
+ else "N/A"
855
+ ),
856
+ (
857
+ f"{result.metrics.flight_time * 1000:.1f}"
858
+ if result.metrics.flight_time
859
+ else "N/A"
860
+ ),
861
+ (
862
+ f"{result.metrics.jump_height:.3f}"
863
+ if result.metrics.jump_height
864
+ else "N/A"
865
+ ),
866
+ f"{result.processing_time:.2f}",
867
+ "Success",
868
+ ]
869
+ )
870
+ else:
871
+ writer.writerow(
872
+ [
873
+ Path(result.video_path).name,
874
+ "N/A",
875
+ "N/A",
876
+ "N/A",
877
+ f"{result.processing_time:.2f}",
878
+ f"Failed: {result.error}",
879
+ ]
880
+ )
881
+
882
+ click.echo("CSV summary written successfully", err=True)
883
+
884
+
885
+ def _process_batch(
886
+ video_files: list[str],
887
+ drop_height: float,
888
+ quality: str,
889
+ workers: int,
890
+ output_dir: str | None,
891
+ json_output_dir: str | None,
892
+ csv_summary: str | None,
893
+ expert_params: AnalysisParameters,
894
+ ) -> None:
895
+ """Process multiple videos in batch mode using parallel processing."""
896
+ click.echo(
897
+ f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True
898
+ )
899
+ click.echo("=" * 70, err=True)
900
+
901
+ # Setup output directories
902
+ _setup_batch_output_dirs(output_dir, json_output_dir)
903
+
904
+ # Create video configurations
905
+ configs = _create_video_configs(
906
+ video_files, drop_height, quality, output_dir, json_output_dir, expert_params
907
+ )
908
+
909
+ # Progress callback
910
+ completed = 0
911
+
912
+ def show_progress(result: VideoResult) -> None:
913
+ nonlocal completed
914
+ completed += 1
915
+ status = "✓" if result.success else "✗"
916
+ video_name = Path(result.video_path).name
917
+ click.echo(
918
+ f"[{completed}/{len(configs)}] {status} {video_name} "
919
+ f"({result.processing_time:.1f}s)",
920
+ err=True,
921
+ )
922
+ if not result.success:
923
+ click.echo(f" Error: {result.error}", err=True)
924
+
925
+ # Process all videos
926
+ click.echo("\nProcessing videos...", err=True)
927
+ results = process_videos_bulk(
928
+ configs, max_workers=workers, progress_callback=show_progress
929
+ )
930
+
931
+ # Display statistics
932
+ _compute_batch_statistics(results)
933
+
934
+ # Export CSV summary if requested
935
+ successful = [r for r in results if r.success]
936
+ _write_csv_summary(csv_summary, results, successful)
738
937
 
739
938
  click.echo("\nBatch processing complete!", err=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.10.5
3
+ Version: 0.10.7
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -9,12 +9,12 @@ kinemotion/core/smoothing.py,sha256=FON4qKtsSp1-03GnJrDkEUAePaACn4QPMJF0eTIYqR0,
9
9
  kinemotion/core/video_io.py,sha256=z8Z0qbNaKbcdB40KnbNOBMzab3BbgnhBxp-mUBYeXgM,6577
10
10
  kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
11
11
  kinemotion/dropjump/analysis.py,sha256=HfJt2t9IsMBiBUz7apIzdxbRH9QqzlFnDVVWcKhU3ow,23291
12
- kinemotion/dropjump/cli.py,sha256=C6v6E3g1W-KNFc0xUzSjg4wKve1WsPxKvUBJV7LiMNI,26468
12
+ kinemotion/dropjump/cli.py,sha256=THFJqVLFdFIXgrKSHw2MVc5DuOsqHKJQVT_x_C7l_18,29443
13
13
  kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
14
14
  kinemotion/dropjump/kinematics.py,sha256=RM_O8Kdc6aEiPIu_99N4cu-4EhYSQxtBGASJF_dmQaU,19081
15
15
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- kinemotion-0.10.5.dist-info/METADATA,sha256=I5dXmUcnNNtKS43uCbC_zbMLBMZAg_QpOMhwpnFSYcw,20333
17
- kinemotion-0.10.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
- kinemotion-0.10.5.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
19
- kinemotion-0.10.5.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
20
- kinemotion-0.10.5.dist-info/RECORD,,
16
+ kinemotion-0.10.7.dist-info/METADATA,sha256=HDg72JCd-k5WKdgm8o7YfXj2Pdtu-DW_cs-KvGHm_xc,20333
17
+ kinemotion-0.10.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ kinemotion-0.10.7.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
19
+ kinemotion-0.10.7.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
20
+ kinemotion-0.10.7.dist-info/RECORD,,